1
1

Compare commits

..

102 Commits

Author SHA1 Message Date
e1c1c65578 cleanup 2021-10-31 14:28:01 +01:00
2154631e85 cleanups 2021-10-31 14:20:04 +01:00
71b7e59e28 remove print 2021-10-31 14:09:16 +01:00
7106fe299e cleanup 2021-10-31 14:06:48 +01:00
4b8e52bdbc add timeit include 2021-10-31 13:50:05 +01:00
462d3bde6e fix 2021-10-31 13:36:24 +01:00
e354ff5b86 cleanup 2021-10-31 13:27:43 +01:00
27da814444 add test 2021-10-31 13:08:01 +01:00
493571c3c3 add some noexcepts 2021-10-31 13:02:49 +01:00
a6eb04cce4 cleanups 2021-10-31 12:39:46 +01:00
139f69b3b9 Merge branch 'master' into virtual-array-value-type 2021-10-31 12:33:45 +01:00
8a1cc2f291 Merge branch 'master' into virtual-array-value-type 2021-10-30 23:04:24 +02:00
572fa82cc9 cleanup 2021-10-30 23:04:04 +02:00
c8ab4dcce1 cleanup 2021-10-30 23:02:05 +02:00
55a6af1c1e cleanup 2021-10-30 22:52:28 +02:00
0827245a91 cleanup 2021-10-30 22:49:26 +02:00
2f284f42b6 progress 2021-10-30 22:38:35 +02:00
1131bf6ec6 progress 2021-10-30 22:32:48 +02:00
2b0e890a55 comments 2021-10-30 22:20:04 +02:00
c4d3380d2e progress 2021-10-30 21:36:11 +02:00
05cef9da88 cleanup 2021-10-30 21:20:15 +02:00
43a53f97c4 cleanup tests 2021-10-30 21:17:24 +02:00
1d7a893d12 cleanup 2021-10-30 21:07:33 +02:00
5e60ba93f7 progress 2021-10-30 21:05:56 +02:00
65258dc98f progress 2021-10-30 21:02:49 +02:00
d449e66c1d cleanup 2021-10-30 20:59:26 +02:00
01fe332b9d progress 2021-10-30 20:54:00 +02:00
d6b0f3a3a4 progress 2021-10-30 20:42:37 +02:00
928b7b5340 cleanup 2021-10-30 20:35:26 +02:00
aa22839aa1 progress 2021-10-30 20:31:53 +02:00
78daf1b697 progress 2021-10-30 20:24:20 +02:00
dcd9b21d07 progress 2021-10-30 20:21:48 +02:00
42b350ed7f progress 2021-10-30 20:14:43 +02:00
e79c7e088e progress 2021-10-30 19:59:35 +02:00
99c5b788eb progress 2021-10-30 19:55:53 +02:00
a2b5a74d49 progress 2021-10-30 19:46:17 +02:00
84626dfa16 progress 2021-10-30 19:40:58 +02:00
3d0abb3be9 progress 2021-10-30 19:31:19 +02:00
9b6c13e66c progress 2021-10-30 19:10:44 +02:00
7f4273d373 progress 2021-10-30 18:50:28 +02:00
d1b8d6acd3 progress 2021-10-30 18:39:34 +02:00
2267f19486 progress 2021-10-30 18:18:27 +02:00
acd8874205 progress 2021-10-30 18:13:52 +02:00
a69bd34fad fix 2021-10-30 16:56:05 +02:00
2516fc953a Merge branch 'blender-v3.0-release' 2021-10-30 16:54:06 +02:00
50f93cbf52 cleanup 2021-10-25 00:35:58 +02:00
8af541dcfd Geometry Nodes: Refactor virtual array system.
Differential Revision: https://developer.blender.org/D12986
2021-10-25 00:16:40 +02:00
f8c0682a67 progress 2021-10-24 23:58:16 +02:00
b40e753c81 fix 2021-10-24 23:20:32 +02:00
2cff0676af cleanup 2021-10-24 23:13:25 +02:00
b0444a347b cleanup 2021-10-24 23:11:47 +02:00
becfc547b9 cleanup 2021-10-24 23:08:22 +02:00
6a558a18f9 progress 2021-10-24 23:02:51 +02:00
c066102f30 progress 2021-10-24 22:55:05 +02:00
ba49545060 progress 2021-10-24 22:50:52 +02:00
ca70c0521a progress 2021-10-24 22:44:54 +02:00
4323b3f592 progress 2021-10-24 22:28:43 +02:00
d1e6606d4d progress 2021-10-24 22:10:47 +02:00
b64ad0b30a cleanup 2021-10-24 22:00:32 +02:00
ed4b15ecd3 cleanup 2021-10-24 21:57:56 +02:00
f79a673bf1 cleanup 2021-10-24 21:44:08 +02:00
1438f16663 fix 2021-10-24 21:09:58 +02:00
d613451c3a Merge branch 'master' into virtual-array-value-type 2021-10-24 20:56:50 +02:00
3f8ac0e5ef Merge branch 'master' into virtual-array-value-type 2021-10-24 17:27:01 +02:00
56bd1f46bd fixes 2021-10-24 17:20:54 +02:00
1e4cbec720 fix 2021-10-24 17:09:24 +02:00
f9b669c588 fixes 2021-10-24 16:45:02 +02:00
1ac0a2db11 cleanup 2021-10-24 16:38:23 +02:00
771cce22f1 cleanup 2021-10-24 16:34:50 +02:00
c1a1046ed7 cleanup 2021-10-24 16:29:33 +02:00
f430e7850a cleanup 2021-10-24 16:24:58 +02:00
116eb09681 fix 2021-10-24 16:23:35 +02:00
841d393c15 cleanup 2021-10-24 16:23:17 +02:00
bc1acbcc5a cleanup 2021-10-24 16:15:11 +02:00
cd49f68db6 cleanup 2021-10-24 16:08:02 +02:00
5b51065347 cleanup 2021-10-24 16:05:15 +02:00
e736fe67c9 cleanup 2021-10-24 15:59:12 +02:00
1d20f60616 fixes 2021-10-24 15:27:21 +02:00
91b58c95ac fixes 2021-10-24 14:57:51 +02:00
955b01e359 fix assignment operators 2021-10-24 13:59:19 +02:00
1b3758f205 cleanup 2021-10-24 13:59:11 +02:00
33870b21f3 fix output attribute 2021-10-24 13:05:45 +02:00
e49f70745b fix dst fields 2021-10-24 12:56:11 +02:00
c48a99542d compiling works again 2021-10-24 02:26:08 +02:00
68b5670268 more renaming 2021-10-23 22:50:37 +02:00
dea268de72 progress 2021-10-23 22:42:30 +02:00
399463c548 progress 2021-10-23 22:35:12 +02:00
1d94de4ac3 progress 2021-10-23 20:22:49 +02:00
0453361d20 progress 2021-10-23 20:11:19 +02:00
b21a3ae6ad rename to *Impl 2021-10-23 19:15:29 +02:00
edb8ccf31f support converting VMutableArray to VArray 2021-10-23 19:14:14 +02:00
7af712c343 add mutable virtual array 2021-10-23 19:06:53 +02:00
a9e0cb6ec8 rename to VMutableArrayImpl 2021-10-23 18:53:34 +02:00
5a7b30f8f9 cleanup 2021-10-23 18:52:44 +02:00
024c07af84 more virtual array tests 2021-10-23 18:10:11 +02:00
c256298924 rename VArray to VArrayImpl 2021-10-23 16:48:22 +02:00
a11950531f add comments 2021-10-23 16:36:28 +02:00
ae01d1db98 add extra type info 2021-10-23 16:08:20 +02:00
f3a5b31196 fix assign any 2021-10-23 15:54:56 +02:00
8d1a5fcfea add tests 2021-10-23 15:45:49 +02:00
39c4103f9e move methods 2021-10-23 15:22:28 +02:00
4e03ac6b25 initial Any type 2021-10-23 14:50:11 +02:00
4176 changed files with 143033 additions and 206225 deletions

View File

@@ -264,9 +264,6 @@ ForEachMacros:
- SET_SLOT_PROBING_BEGIN
- MAP_SLOT_PROBING_BEGIN
- VECTOR_SET_SLOT_PROBING_BEGIN
- LIGHT_FOREACH_BEGIN_DIRECTIONAL
- LIGHT_FOREACH_BEGIN_LOCAL
- LIGHT_FOREACH_BEGIN_LOCAL_NO_CULL
StatementMacros:
- PyObject_HEAD

View File

@@ -12,8 +12,6 @@ Checks: >
-readability-avoid-const-params-in-decls,
-readability-simplify-boolean-expr,
-readability-make-member-function-const,
-readability-suspicious-call-argument,
-readability-redundant-member-init,
-readability-misleading-indentation,
@@ -27,8 +25,6 @@ Checks: >
-bugprone-branch-clone,
-bugprone-macro-parentheses,
-bugprone-reserved-identifier,
-bugprone-easily-swappable-parameters,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-sizeof-expression,
-bugprone-integer-division,
@@ -44,8 +40,7 @@ Checks: >
-modernize-pass-by-value,
# Cannot be enabled yet, because using raw string literals in tests breaks
# the windows compiler currently.
-modernize-raw-string-literal,
-modernize-return-braced-init-list
-modernize-raw-string-literal
CheckOptions:
- key: modernize-use-default-member-init.UseAssignment

View File

@@ -157,9 +157,8 @@ option(WITH_BLENDER "Build blender (disable to build only the blender player)" O
mark_as_advanced(WITH_BLENDER)
if(APPLE)
# In future, can be used with `quicklookthumbnailing/qlthumbnailreply` to create file
# thumbnails for say Finder. Turn it off for now.
option(WITH_BLENDER_THUMBNAILER "Build \"blender-thumbnailer\" thumbnail extraction utility" OFF)
# Currently this causes a build error linking, disable.
set(WITH_BLENDER_THUMBNAILER OFF)
elseif(WIN32)
option(WITH_BLENDER_THUMBNAILER "Build \"BlendThumb.dll\" helper for Windows explorer integration" ON)
else()
@@ -188,13 +187,6 @@ mark_as_advanced(CPACK_OVERRIDE_PACKAGENAME)
mark_as_advanced(BUILDINFO_OVERRIDE_DATE)
mark_as_advanced(BUILDINFO_OVERRIDE_TIME)
if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.16")
option(WITH_UNITY_BUILD "Enable unity build for modules that support it to improve compile times" ON)
mark_as_advanced(WITH_UNITY_BUILD)
else()
set(WITH_UNITY_BUILD OFF)
endif()
option(WITH_IK_ITASC "Enable ITASC IK solver (only disable for development & for incompatible C++ compilers)" ON)
option(WITH_IK_SOLVER "Enable Legacy IK solver (only disable for development)" ON)
option(WITH_FFTW3 "Enable FFTW3 support (Used for smoke, ocean sim, and audio effects)" ON)
@@ -274,13 +266,11 @@ endif()
if(UNIX AND NOT APPLE)
option(WITH_SYSTEM_GLEW "Use GLEW OpenGL wrapper library provided by the operating system" OFF)
option(WITH_SYSTEM_GLEW "Use GLEW OpenGL wrapper library provided by the operating system" OFF)
option(WITH_SYSTEM_FREETYPE "Use the freetype library provided by the operating system" OFF)
option(WITH_SYSTEM_GLES "Use OpenGL ES library provided by the operating system" ON)
else()
# not an option for other OS's
set(WITH_SYSTEM_GLEW OFF)
set(WITH_SYSTEM_GLES OFF)
set(WITH_SYSTEM_FREETYPE OFF)
endif()
@@ -421,7 +411,6 @@ option(WITH_CYCLES "Enable Cycles Render Engine" ON)
option(WITH_CYCLES_OSL "Build Cycles with OpenShadingLanguage support" ON)
option(WITH_CYCLES_EMBREE "Build Cycles with Embree support" ON)
option(WITH_CYCLES_LOGGING "Build Cycles with logging support" ON)
option(WITH_CYCLES_DEBUG "Build Cycles with options useful for debugging (e.g., MIS)" OFF)
option(WITH_CYCLES_STANDALONE "Build Cycles standalone application" OFF)
option(WITH_CYCLES_STANDALONE_GUI "Build Cycles standalone with GUI" OFF)
@@ -436,40 +425,26 @@ mark_as_advanced(WITH_CYCLES_DEBUG_NAN)
mark_as_advanced(WITH_CYCLES_NATIVE_ONLY)
# NVIDIA CUDA & OptiX
if(NOT APPLE)
option(WITH_CYCLES_DEVICE_CUDA "Enable Cycles NVIDIA CUDA compute support" ON)
option(WITH_CYCLES_DEVICE_OPTIX "Enable Cycles NVIDIA OptiX support" ON)
mark_as_advanced(WITH_CYCLES_DEVICE_CUDA)
option(WITH_CYCLES_DEVICE_CUDA "Enable Cycles NVIDIA CUDA compute support" ON)
option(WITH_CYCLES_DEVICE_OPTIX "Enable Cycles NVIDIA OptiX support" ON)
mark_as_advanced(WITH_CYCLES_DEVICE_CUDA)
option(WITH_CYCLES_CUDA_BINARIES "Build Cycles NVIDIA CUDA binaries" OFF)
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 sm_86 compute_75 CACHE STRING "CUDA architectures to build binaries for")
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
option(WITH_CUDA_DYNLOAD "Dynamically load CUDA libraries at runtime (for developers, makes cuda-gdb work)" ON)
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
mark_as_advanced(WITH_CYCLES_CUBIN_COMPILER)
mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
mark_as_advanced(WITH_CUDA_DYNLOAD)
endif()
option(WITH_CYCLES_CUDA_BINARIES "Build Cycles NVIDIA CUDA binaries" OFF)
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 sm_86 compute_75 CACHE STRING "CUDA architectures to build binaries for")
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
option(WITH_CUDA_DYNLOAD "Dynamically load CUDA libraries at runtime (for developers, makes cuda-gdb work)" ON)
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
mark_as_advanced(WITH_CYCLES_CUBIN_COMPILER)
mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
mark_as_advanced(WITH_CUDA_DYNLOAD)
# AMD HIP
if(NOT APPLE)
if(WIN32)
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" ON)
else()
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" OFF)
endif()
option(WITH_CYCLES_HIP_BINARIES "Build Cycles AMD HIP binaries" OFF)
set(CYCLES_HIP_BINARIES_ARCH gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 CACHE STRING "AMD HIP architectures to build binaries for")
mark_as_advanced(WITH_CYCLES_DEVICE_HIP)
mark_as_advanced(CYCLES_HIP_BINARIES_ARCH)
endif()
# Apple Metal
if(APPLE)
option(WITH_CYCLES_DEVICE_METAL "Enable Cycles Apple Metal compute support" ON)
endif()
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" OFF)
option(WITH_CYCLES_HIP_BINARIES "Build Cycles AMD HIP binaries" OFF)
set(CYCLES_HIP_BINARIES_ARCH gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 CACHE STRING "AMD HIP architectures to build binaries for")
mark_as_advanced(WITH_CYCLES_DEVICE_HIP)
mark_as_advanced(CYCLES_HIP_BINARIES_ARCH)
# Draw Manager
option(WITH_DRAW_DEBUG "Add extra debug capabilities to Draw Manager" OFF)
@@ -514,10 +489,11 @@ if(WIN32)
endif()
# This should be turned off when Blender enter beta/rc/release
if("${BLENDER_VERSION_CYCLE}" STREQUAL "alpha")
set(WITH_EXPERIMENTAL_FEATURES ON)
else()
if("${BLENDER_VERSION_CYCLE}" STREQUAL "release" OR
"${BLENDER_VERSION_CYCLE}" STREQUAL "rc")
set(WITH_EXPERIMENTAL_FEATURES OFF)
else()
set(WITH_EXPERIMENTAL_FEATURES ON)
endif()
# Unit testsing
@@ -539,14 +515,12 @@ option(WITH_OPENGL "When off limits visibility of the opengl header
option(WITH_GLEW_ES "Switches to experimental copy of GLEW that has support for OpenGL ES. (temporary option for development purposes)" OFF)
option(WITH_GL_EGL "Use the EGL OpenGL system library instead of the platform specific OpenGL system library (CGL, glX, or WGL)" OFF)
option(WITH_GL_PROFILE_ES20 "Support using OpenGL ES 2.0. (through either EGL or the AGL/WGL/XGL 'es20' profile)" OFF)
option(WITH_GPU_SHADER_BUILDER "Shader builder is a developer option enabling linting on GLSL during compilation" OFF)
mark_as_advanced(
WITH_OPENGL
WITH_GLEW_ES
WITH_GL_EGL
WITH_GL_PROFILE_ES20
WITH_GPU_SHADER_BUILDER
)
if(WIN32)
@@ -564,18 +538,12 @@ if(WIN32)
set(CPACK_INSTALL_PREFIX ${CMAKE_GENERIC_PROGRAM_FILES}/${})
endif()
# Compiler tool-chain.
if(UNIX AND NOT APPLE)
if(CMAKE_COMPILER_IS_GNUCC)
option(WITH_LINKER_GOLD "Use ld.gold linker which is usually faster than ld.bfd" ON)
mark_as_advanced(WITH_LINKER_GOLD)
option(WITH_LINKER_LLD "Use ld.lld linker which is usually faster than ld.gold" OFF)
mark_as_advanced(WITH_LINKER_LLD)
endif()
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
option(WITH_LINKER_MOLD "Use ld.mold linker which is usually faster than ld.gold & ld.lld." OFF)
mark_as_advanced(WITH_LINKER_MOLD)
endif()
# Compiler toolchain
if(CMAKE_COMPILER_IS_GNUCC)
option(WITH_LINKER_GOLD "Use ld.gold linker which is usually faster than ld.bfd" ON)
mark_as_advanced(WITH_LINKER_GOLD)
option(WITH_LINKER_LLD "Use ld.lld linker which is usually faster than ld.gold" OFF)
mark_as_advanced(WITH_LINKER_LLD)
endif()
option(WITH_COMPILER_ASAN "Build and link against address sanitizer (only for Debug & RelWithDebInfo targets)." OFF)
@@ -674,7 +642,7 @@ if(WIN32)
option(WITH_WINDOWS_PDB "Generate a pdb file for client side stacktraces" ON)
mark_as_advanced(WITH_WINDOWS_PDB)
option(WITH_WINDOWS_STRIPPED_PDB "Use a stripped PDB file" ON)
option(WITH_WINDOWS_STRIPPED_PDB "Use a stripped PDB file" On)
mark_as_advanced(WITH_WINDOWS_STRIPPED_PDB)
endif()
@@ -686,7 +654,7 @@ if(WIN32 OR XCODE)
option(IDE_GROUP_PROJECTS_IN_FOLDERS "Organize the projects according to source folder structure." ON)
mark_as_advanced(IDE_GROUP_PROJECTS_IN_FOLDERS)
if(IDE_GROUP_PROJECTS_IN_FOLDERS)
if (IDE_GROUP_PROJECTS_IN_FOLDERS)
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
endif()
endif()
@@ -711,12 +679,9 @@ if(UNIX AND NOT APPLE)
endif()
# Installation process.
set(POSTINSTALL_SCRIPT "" CACHE FILEPATH "Run given CMake script after installation process")
option(POSTINSTALL_SCRIPT "Run given CMake script after installation process" OFF)
mark_as_advanced(POSTINSTALL_SCRIPT)
set(POSTCONFIGURE_SCRIPT "" CACHE FILEPATH "Run given CMake script as the last step of CMake configuration")
mark_as_advanced(POSTCONFIGURE_SCRIPT)
# end option(...)
@@ -870,7 +835,7 @@ if(WITH_AUDASPACE)
endif()
# Auto-enable CUDA dynload if toolkit is not found.
if(WITH_CYCLES AND WITH_CYCLES_DEVICE_CUDA AND NOT WITH_CUDA_DYNLOAD)
if(NOT WITH_CUDA_DYNLOAD)
find_package(CUDA)
if(NOT CUDA_FOUND)
message(STATUS "CUDA toolkit not found, using dynamic runtime loading of libraries (WITH_CUDA_DYNLOAD) instead")
@@ -1100,7 +1065,7 @@ if(MSVC)
add_definitions(-D__LITTLE_ENDIAN__)
# OSX-Note: as we do cross-compiling with specific set architecture,
# endianness-detection and auto-setting is counterproductive
# endianess-detection and auto-setting is counterproductive
# so we just set endianness according CMAKE_OSX_ARCHITECTURES
elseif(CMAKE_OSX_ARCHITECTURES MATCHES i386 OR CMAKE_OSX_ARCHITECTURES MATCHES x86_64 OR CMAKE_OSX_ARCHITECTURES MATCHES arm64)
@@ -1790,7 +1755,7 @@ endif()
set(CMAKE_CXX_STANDARD 17)
# If C++17 is not available, downgrading to an earlier standard is NOT OK.
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# Do not enable compiler specific language extensions.
# Do not enable compiler specific language extentions.
set(CMAKE_CXX_EXTENSIONS OFF)
# Make MSVC properly report the value of the __cplusplus preprocessor macro
@@ -2080,8 +2045,3 @@ endif()
if(0)
print_all_vars()
endif()
# Should be the last step of configuration.
if(POSTCONFIGURE_SCRIPT)
include(${POSTCONFIGURE_SCRIPT})
endif()

View File

@@ -51,7 +51,7 @@ Other Convenience Targets
* config: Run cmake configuration tool to set build options.
* deps: Build library dependencies (intended only for platform maintainers).
The existance of locally build dependencies overrides the pre-built dependencies from subversion.
The existance of locally build dependancies overrides the pre-built dependencies from subversion.
These must be manually removed from '../lib/' to go back to using the pre-compiled libraries.
Project Files

View File

@@ -63,7 +63,6 @@ include(cmake/jpeg.cmake)
include(cmake/blosc.cmake)
include(cmake/pthreads.cmake)
include(cmake/openexr.cmake)
include(cmake/brotli.cmake)
include(cmake/freetype.cmake)
include(cmake/freeglut.cmake)
include(cmake/glew.cmake)

View File

@@ -25,13 +25,8 @@ else()
endif()
if(WIN32)
if(MSVC_VERSION GREATER_EQUAL 1920) # 2019
set(BOOST_TOOLSET toolset=msvc-14.2)
set(BOOST_COMPILER_STRING -vc142)
else() # 2017
set(BOOST_TOOLSET toolset=msvc-14.1)
set(BOOST_COMPILER_STRING -vc141)
endif()
set(BOOST_TOOLSET toolset=msvc-14.1)
set(BOOST_COMPILER_STRING -vc141)
set(BOOST_CONFIGURE_COMMAND bootstrap.bat)
set(BOOST_BUILD_COMMAND b2)

View File

@@ -1,38 +0,0 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
set(BROTLI_EXTRA_ARGS
)
ExternalProject_Add(external_brotli
URL file://${PACKAGE_DIR}/${BROTLI_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${BROTLI_HASH_TYPE}=${BROTLI_HASH}
PREFIX ${BUILD_DIR}/brotli
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/brotli ${DEFAULT_CMAKE_FLAGS} ${BROTLI_EXTRA_ARGS}
INSTALL_DIR ${LIBDIR}/brotli
)
if(BUILD_MODE STREQUAL Release AND WIN32)
ExternalProject_Add_Step(external_brotli after_install
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/brotli/include ${HARVEST_TARGET}/brotli/include
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/brotli/lib/brotlidec-static${LIBEXT} ${HARVEST_TARGET}/brotli/lib/brotlidec-static${LIBEXT}
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/brotli/lib/brotlicommon-static${LIBEXT} ${HARVEST_TARGET}/brotli/lib/brotlicommon-static${LIBEXT}
DEPENDEES install
)
endif()

View File

@@ -94,4 +94,3 @@ download_source(POTRACE)
download_source(HARU)
download_source(ZSTD)
download_source(FLEX)
download_source(BROTLI)

View File

@@ -19,13 +19,13 @@
set(FREETYPE_EXTRA_ARGS
-DCMAKE_RELEASE_POSTFIX:STRING=2ST
-DCMAKE_DEBUG_POSTFIX:STRING=2ST_d
-DFT_DISABLE_BZIP2=ON
-DFT_DISABLE_HARFBUZZ=ON
-DFT_DISABLE_PNG=ON
-DFT_REQUIRE_BROTLI=ON
-DPC_BROTLIDEC_INCLUDEDIR=${LIBDIR}/brotli/include
-DPC_BROTLIDEC_LIBDIR=${LIBDIR}/brotli/lib
)
-DWITH_BZip2=OFF
-DWITH_HarfBuzz=OFF
-DFT_WITH_HARFBUZZ=OFF
-DFT_WITH_BZIP2=OFF
-DCMAKE_DISABLE_FIND_PACKAGE_HarfBuzz=TRUE
-DCMAKE_DISABLE_FIND_PACKAGE_BZip2=TRUE
-DCMAKE_DISABLE_FIND_PACKAGE_BrotliDec=TRUE)
ExternalProject_Add(external_freetype
URL file://${PACKAGE_DIR}/${FREETYPE_FILE}
@@ -36,11 +36,6 @@ ExternalProject_Add(external_freetype
INSTALL_DIR ${LIBDIR}/freetype
)
add_dependencies(
external_freetype
external_brotli
)
if(BUILD_MODE STREQUAL Release AND WIN32)
ExternalProject_Add_Step(external_freetype after_install
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/freetype ${HARVEST_TARGET}/freetype

View File

@@ -38,6 +38,13 @@ elseif(UNIX AND NOT APPLE)
)
endif()
if(BLENDER_PLATFORM_ARM)
set(GMP_OPTIONS
${GMP_OPTIONS}
--disable-assembly
)
endif()
ExternalProject_Add(external_gmp
URL file://${PACKAGE_DIR}/${GMP_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}

View File

@@ -17,7 +17,7 @@
# ***** END GPL LICENSE BLOCK *****
########################################################################
# Copy all generated files to the proper structure as blender prefers
# Copy all generated files to the proper strucure as blender prefers
########################################################################
if(NOT DEFINED HARVEST_TARGET)
@@ -79,8 +79,6 @@ endfunction()
harvest(alembic/include alembic/include "*.h")
harvest(alembic/lib/libAlembic.a alembic/lib/libAlembic.a)
harvest(alembic/bin alembic/bin "*")
harvest(brotli/include brotli/include "*.h")
harvest(brotli/lib brotli/lib "*.a")
harvest(boost/include boost/include "*")
harvest(boost/lib boost/lib "*.a")
harvest(ffmpeg/include ffmpeg/include "*.h")

View File

@@ -42,7 +42,6 @@ ExternalProject_Add(nanovdb
URL_HASH ${NANOVDB_HASH_TYPE}=${NANOVDB_HASH}
PREFIX ${BUILD_DIR}/nanovdb
SOURCE_SUBDIR nanovdb
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/nanovdb/src/nanovdb < ${PATCH_DIR}/nanovdb.diff
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/nanovdb ${DEFAULT_CMAKE_FLAGS} ${NANOVDB_EXTRA_ARGS}
INSTALL_DIR ${LIBDIR}/nanovdb
)

View File

@@ -39,7 +39,7 @@ endif()
set(DOWNLOAD_DIR "${CMAKE_CURRENT_BINARY_DIR}/downloads" CACHE STRING "Path for downloaded files")
# This path must be hard-coded like this, so that the GNUmakefile knows where it is and can pass it to make_source_archive.py:
set(PACKAGE_DIR "${CMAKE_CURRENT_BINARY_DIR}/packages")
option(PACKAGE_USE_UPSTREAM_SOURCES "Use sources upstream to download the package sources, when OFF the blender mirror will be used" ON)
option(PACKAGE_USE_UPSTREAM_SOURCES "Use soures upstream to download the package sources, when OFF the blender mirror will be used" ON)
file(TO_CMAKE_PATH ${DOWNLOAD_DIR} DOWNLOAD_DIR)
file(TO_CMAKE_PATH ${PACKAGE_DIR} PACKAGE_DIR)

View File

@@ -24,7 +24,7 @@ if(MSVC)
add_custom_command(
OUTPUT ${PYTARGET}/bin/python${PYTHON_POSTFIX}.exe
COMMAND echo packaging python
COMMAND echo this should output at ${PYTARGET}/bin/python${PYTHON_POSTFIX}.exe
COMMAND echo this should ouput at ${PYTARGET}/bin/python${PYTHON_POSTFIX}.exe
COMMAND ${CMAKE_COMMAND} -E make_directory ${PYTARGET}/libs
COMMAND ${CMAKE_COMMAND} -E copy ${PYSRC}/libs/python${PYTHON_SHORT_VERSION_NO_DOTS}.lib ${PYTARGET}/libs/python${PYTHON_SHORT_VERSION_NO_DOTS}.lib
COMMAND ${CMAKE_COMMAND} -E copy ${PYSRC}/python.exe ${PYTARGET}/bin/python.exe
@@ -43,7 +43,7 @@ if(MSVC)
add_custom_command(
OUTPUT ${PYTARGET}/bin/python${PYTHON_POSTFIX}.exe
COMMAND echo packaging python
COMMAND echo this should output at ${PYTARGET}/bin/python${PYTHON_POSTFIX}.exe
COMMAND echo this should ouput at ${PYTARGET}/bin/python${PYTHON_POSTFIX}.exe
COMMAND ${CMAKE_COMMAND} -E make_directory ${PYTARGET}/libs
COMMAND ${CMAKE_COMMAND} -E copy ${PYSRC}/libs/python${PYTHON_SHORT_VERSION_NO_DOTS}${PYTHON_POSTFIX}.lib ${PYTARGET}/libs/python${PYTHON_SHORT_VERSION_NO_DOTS}${PYTHON_POSTFIX}.lib
COMMAND ${CMAKE_COMMAND} -E copy ${PYSRC}/python${PYTHON_POSTFIX}.exe ${PYTARGET}/bin/python${PYTHON_POSTFIX}.exe

View File

@@ -31,7 +31,7 @@ ExternalProject_Add(external_python_site_packages
CONFIGURE_COMMAND ${PIP_CONFIGURE_COMMAND}
BUILD_COMMAND ""
PREFIX ${BUILD_DIR}/site_packages
INSTALL_COMMAND ${PYTHON_BINARY} -m pip install --no-cache-dir ${SITE_PACKAGES_EXTRA} cython==${CYTHON_VERSION} idna==${IDNA_VERSION} charset-normalizer==${CHARSET_NORMALIZER_VERSION} urllib3==${URLLIB3_VERSION} certifi==${CERTIFI_VERSION} requests==${REQUESTS_VERSION} zstandard==${ZSTANDARD_VERSION} --no-binary :all:
INSTALL_COMMAND ${PYTHON_BINARY} -m pip install ${SITE_PACKAGES_EXTRA} cython==${CYTHON_VERSION} idna==${IDNA_VERSION} charset-normalizer==${CHARSET_NORMALIZER_VERSION} urllib3==${URLLIB3_VERSION} certifi==${CERTIFI_VERSION} requests==${REQUESTS_VERSION} zstandard==${ZSTANDARD_VERSION} --no-binary :all:
)
if(USE_PIP_NUMPY)

View File

@@ -83,9 +83,9 @@ else()
set(OPENEXR_VERSION_POSTFIX)
endif()
set(FREETYPE_VERSION 2.11.1)
set(FREETYPE_VERSION 2.10.2)
set(FREETYPE_URI http://prdownloads.sourceforge.net/freetype/freetype-${FREETYPE_VERSION}.tar.gz)
set(FREETYPE_HASH bd4e3b007474319909a6b79d50908e85)
set(FREETYPE_HASH b1cb620e4c875cd4d1bfa04945400945)
set(FREETYPE_HASH_TYPE MD5)
set(FREETYPE_FILE freetype-${FREETYPE_VERSION}.tar.gz)
@@ -189,11 +189,11 @@ set(OSL_HASH 1abd7ce40481771a9fa937f19595d2f2)
set(OSL_HASH_TYPE MD5)
set(OSL_FILE OpenShadingLanguage-${OSL_VERSION}.tar.gz)
set(PYTHON_VERSION 3.10.2)
set(PYTHON_SHORT_VERSION 3.10)
set(PYTHON_SHORT_VERSION_NO_DOTS 310)
set(PYTHON_VERSION 3.9.7)
set(PYTHON_SHORT_VERSION 3.9)
set(PYTHON_SHORT_VERSION_NO_DOTS 39)
set(PYTHON_URI https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tar.xz)
set(PYTHON_HASH 14e8c22458ed7779a1957b26cde01db9)
set(PYTHON_HASH fddb060b483bc01850a3f412eea1d954)
set(PYTHON_HASH_TYPE MD5)
set(PYTHON_FILE Python-${PYTHON_VERSION}.tar.xz)
@@ -215,20 +215,18 @@ set(NANOVDB_HASH e7b9e863ec2f3b04ead171dec2322807)
set(NANOVDB_HASH_TYPE MD5)
set(NANOVDB_FILE nano-vdb-${NANOVDB_GIT_UID}.tar.gz)
set(IDNA_VERSION 3.3)
set(CHARSET_NORMALIZER_VERSION 2.0.10)
set(URLLIB3_VERSION 1.26.8)
set(IDNA_VERSION 3.2)
set(CHARSET_NORMALIZER_VERSION 2.0.6)
set(URLLIB3_VERSION 1.26.7)
set(CERTIFI_VERSION 2021.10.8)
set(REQUESTS_VERSION 2.27.1)
set(CYTHON_VERSION 0.29.26)
# The version of the zstd library used to build the Python package should match ZSTD_VERSION defined below.
# At this time of writing, 0.17.0 was already released, but built against zstd 1.5.1, while we use 1.5.0.
set(ZSTANDARD_VERSION 0.16.0)
set(REQUESTS_VERSION 2.26.0)
set(CYTHON_VERSION 0.29.24)
set(ZSTANDARD_VERSION 0.15.2 )
set(NUMPY_VERSION 1.22.0)
set(NUMPY_SHORT_VERSION 1.22)
set(NUMPY_VERSION 1.21.2)
set(NUMPY_SHORT_VERSION 1.21)
set(NUMPY_URI https://github.com/numpy/numpy/releases/download/v${NUMPY_VERSION}/numpy-${NUMPY_VERSION}.zip)
set(NUMPY_HASH 252de134862a27bd66705d29622edbfe)
set(NUMPY_HASH 5638d5dae3ca387be562912312db842e)
set(NUMPY_HASH_TYPE MD5)
set(NUMPY_FILE numpy-${NUMPY_VERSION}.zip)
@@ -476,9 +474,9 @@ set(ISPC_HASH 2e3abedbc0ea9aaec17d6562c632454d)
set(ISPC_HASH_TYPE MD5)
set(ISPC_FILE ispc-${ISPC_VERSION}.tar.gz)
set(GMP_VERSION 6.2.1)
set(GMP_VERSION 6.2.0)
set(GMP_URI https://gmplib.org/download/gmp/gmp-${GMP_VERSION}.tar.xz)
set(GMP_HASH 0b82665c4a92fd2ade7440c13fcaa42b)
set(GMP_HASH a325e3f09e6d91e62101e59f9bda3ec1)
set(GMP_HASH_TYPE MD5)
set(GMP_FILE gmp-${GMP_VERSION}.tar.xz)
@@ -502,9 +500,3 @@ set(ZSTD_FILE zstd-${ZSTD_VERSION}.tar.gz)
set(SSE2NEON_GIT https://github.com/DLTcollab/sse2neon.git)
set(SSE2NEON_GIT_HASH fe5ff00bb8d19b327714a3c290f3e2ce81ba3525)
set(BROTLI_VERSION v1.0.9)
set(BROTLI_URI https://github.com/google/brotli/archive/refs/tags/${BROTLI_VERSION}.tar.gz)
set(BROTLI_HASH f9e8d81d0405ba66d181529af42a3354f838c939095ff99930da6aa9cdf6fe46)
set(BROTLI_HASH_TYPE SHA256)
set(BROTLI_FILE brotli-${BROTLI_VERSION}.tar.gz)

View File

@@ -379,27 +379,27 @@ USE_CXX11=true
CLANG_FORMAT_VERSION_MIN="6.0"
CLANG_FORMAT_VERSION_MEX="10.0"
PYTHON_VERSION="3.10.2"
PYTHON_VERSION_SHORT="3.10"
PYTHON_VERSION_MIN="3.9"
PYTHON_VERSION_MEX="3.12"
PYTHON_VERSION="3.9.7"
PYTHON_VERSION_SHORT="3.9"
PYTHON_VERSION_MIN="3.7"
PYTHON_VERSION_MEX="3.11"
PYTHON_VERSION_INSTALLED=$PYTHON_VERSION_SHORT
PYTHON_FORCE_BUILD=false
PYTHON_FORCE_REBUILD=false
PYTHON_SKIP=false
# Additional Python modules.
PYTHON_IDNA_VERSION="3.3"
PYTHON_IDNA_VERSION="3.2"
PYTHON_IDNA_VERSION_MIN="2.0"
PYTHON_IDNA_VERSION_MEX="4.0"
PYTHON_IDNA_NAME="idna"
PYTHON_CHARSET_NORMALIZER_VERSION="2.0.10"
PYTHON_CHARSET_NORMALIZER_VERSION="2.0.6"
PYTHON_CHARSET_NORMALIZER_VERSION_MIN="2.0.6"
PYTHON_CHARSET_NORMALIZER_VERSION_MEX="2.1.0" # requests uses `charset_normalizer~=2.0.0`
PYTHON_CHARSET_NORMALIZER_NAME="charset-normalizer"
PYTHON_URLLIB3_VERSION="1.26.8"
PYTHON_URLLIB3_VERSION="1.26.7"
PYTHON_URLLIB3_VERSION_MIN="1.0"
PYTHON_URLLIB3_VERSION_MEX="2.0"
PYTHON_URLLIB3_NAME="urllib3"
@@ -409,17 +409,17 @@ PYTHON_CERTIFI_VERSION_MIN="2021.0"
PYTHON_CERTIFI_VERSION_MEX="2023.0"
PYTHON_CERTIFI_NAME="certifi"
PYTHON_REQUESTS_VERSION="2.27.1"
PYTHON_REQUESTS_VERSION="2.23.0"
PYTHON_REQUESTS_VERSION_MIN="2.0"
PYTHON_REQUESTS_VERSION_MEX="3.0"
PYTHON_REQUESTS_NAME="requests"
PYTHON_ZSTANDARD_VERSION="0.16.0"
PYTHON_ZSTANDARD_VERSION="0.15.2"
PYTHON_ZSTANDARD_VERSION_MIN="0.15.2"
PYTHON_ZSTANDARD_VERSION_MEX="0.20.0"
PYTHON_ZSTANDARD_VERSION_MEX="0.16.0"
PYTHON_ZSTANDARD_NAME="zstandard"
PYTHON_NUMPY_VERSION="1.22.0"
PYTHON_NUMPY_VERSION="1.21.2"
PYTHON_NUMPY_VERSION_MIN="1.14"
PYTHON_NUMPY_VERSION_MEX="2.0"
PYTHON_NUMPY_NAME="numpy"
@@ -492,14 +492,14 @@ OIIO_SKIP=false
LLVM_VERSION="12.0.0"
LLVM_VERSION_SHORT="12.0"
LLVM_VERSION_MIN="11.0"
LLVM_VERSION_MEX="14.0"
LLVM_VERSION_MEX="13.0"
LLVM_VERSION_FOUND=""
LLVM_FORCE_BUILD=false
LLVM_FORCE_REBUILD=false
LLVM_SKIP=false
# OSL needs to be compiled for now!
OSL_VERSION="1.11.17.0"
OSL_VERSION="1.11.14.1"
OSL_VERSION_SHORT="1.11"
OSL_VERSION_MIN="1.11"
OSL_VERSION_MEX="2.0"
@@ -1826,7 +1826,7 @@ compile_OCIO() {
# Force linking against static libs
#rm -f $_inst/lib/*.so*
# Additional dependencies
# Additional depencencies
#cp ext/dist/lib/libtinyxml.a $_inst/lib
#cp ext/dist/lib/libyaml-cpp.a $_inst/lib
@@ -2083,9 +2083,9 @@ compile_OIIO() {
cmake_d="$cmake_d -D OPENEXR_VERSION=$OPENEXR_VERSION"
if [ "$_with_built_openexr" = true ]; then
cmake_d="$cmake_d -D ILMBASE_ROOT=$INST/openexr"
cmake_d="$cmake_d -D OPENEXR_ROOT=$INST/openexr"
INFO "Ilmbase_ROOT=$INST/openexr"
cmake_d="$cmake_d -D ILMBASE_HOME=$INST/openexr"
cmake_d="$cmake_d -D OPENEXR_HOME=$INST/openexr"
INFO "ILMBASE_HOME=$INST/openexr"
fi
# ptex is only needed when nicholas bishop is ready
@@ -2374,9 +2374,9 @@ compile_OSL() {
#~ cmake_d="$cmake_d -D ILMBASE_VERSION=$ILMBASE_VERSION"
if [ "$_with_built_openexr" = true ]; then
cmake_d="$cmake_d -D ILMBASE_ROOT=$INST/openexr"
cmake_d="$cmake_d -D OPENEXR_ROOT=$INST/openexr"
INFO "Ilmbase_ROOT=$INST/openexr"
INFO "ILMBASE_HOME=$INST/openexr"
cmake_d="$cmake_d -D OPENEXR_ROOT_DIR=$INST/openexr"
cmake_d="$cmake_d -D ILMBASE_ROOT_DIR=$INST/openexr"
# XXX Temp workaround... sigh, ILMBase really messed the things up by defining their custom names ON by default :(
fi
@@ -3620,8 +3620,8 @@ compile_FFmpeg() {
fi
./configure --cc="gcc -Wl,--as-needed" \
--extra-ldflags="-pthread" \
--prefix=$_inst --enable-shared \
--extra-ldflags="-pthread -static-libgcc" \
--prefix=$_inst --enable-static \
--disable-ffplay --disable-doc \
--enable-gray \
--enable-avfilter --disable-vdpau \
@@ -4036,14 +4036,14 @@ install_DEB() {
INFO "Forced Python building, as requested..."
_do_compile_python=true
else
check_package_version_ge_lt_DEB python${PYTHON_VERSION_SHORT}-dev $PYTHON_VERSION_MIN $PYTHON_VERSION_MEX
check_package_version_ge_lt_DEB python3-dev $PYTHON_VERSION_MIN $PYTHON_VERSION_MEX
if [ $? -eq 0 ]; then
install_packages_DEB python${PYTHON_VERSION_SHORT}-dev
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_DEB python3-dev` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
install_packages_DEB python3-dev
clean_Python
PRINT ""
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_DEB python${PYTHON_VERSION_SHORT}-dev` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
for module in "${PYTHON_MODULES_PACKAGES[@]}"
do
module=($module)
@@ -4681,11 +4681,11 @@ install_RPM() {
else
check_package_version_ge_lt_RPM python3-devel $PYTHON_VERSION_MIN $PYTHON_VERSION_MEX
if [ $? -eq 0 ]; then
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_RPM python3-devel` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
install_packages_RPM python3-devel
clean_Python
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_RPM python3-devel` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
for module in "${PYTHON_MODULES_PACKAGES[@]}"
do
module=($module)
@@ -5224,12 +5224,12 @@ install_ARCH() {
else
check_package_version_ge_lt_ARCH python $PYTHON_VERSION_MIN $PYTHON_VERSION_MEX
if [ $? -eq 0 ]; then
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_ARCH python` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
install_packages_ARCH python
clean_Python
PRINT ""
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_ARCH python` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
for module in "${PYTHON_MODULES_PACKAGES[@]}"
do
module=($module)
@@ -5721,6 +5721,76 @@ install_OTHER() {
# ----------------------------------------------------------------------------
# Printing User Info
print_info_ffmpeglink_DEB() {
dpkg -L $_packages | grep -e ".*\/lib[^\/]\+\.so" | gawk '{ printf(nlines ? "'"$_ffmpeg_list_sep"'%s" : "%s", gensub(/.*lib([^\/]+)\.so/, "\\1", "g", $0)); nlines++ }'
}
print_info_ffmpeglink_RPM() {
rpm -ql $_packages | grep -e ".*\/lib[^\/]\+\.so" | gawk '{ printf(nlines ? "'"$_ffmpeg_list_sep"'%s" : "%s", gensub(/.*lib([^\/]+)\.so/, "\\1", "g", $0)); nlines++ }'
}
print_info_ffmpeglink_ARCH() {
pacman -Ql $_packages | grep -e ".*\/lib[^\/]\+\.so$" | gawk '{ printf(nlines ? "'"$_ffmpeg_list_sep"'%s" : "%s", gensub(/.*lib([^\/]+)\.so/, "\\1", "g", $0)); nlines++ }'
}
print_info_ffmpeglink() {
# This func must only print a ';'-separated list of libs...
if [ -z "$DISTRO" ]; then
ERROR "Failed to detect distribution type"
exit 1
fi
# Create list of packages from which to get libs names...
_packages=""
if [ "$THEORA_USE" = true ]; then
_packages="$_packages $THEORA_DEV"
fi
if [ "$VORBIS_USE" = true ]; then
_packages="$_packages $VORBIS_DEV"
fi
if [ "$OGG_USE" = true ]; then
_packages="$_packages $OGG_DEV"
fi
if [ "$XVID_USE" = true ]; then
_packages="$_packages $XVID_DEV"
fi
if [ "$VPX_USE" = true ]; then
_packages="$_packages $VPX_DEV"
fi
if [ "$OPUS_USE" = true ]; then
_packages="$_packages $OPUS_DEV"
fi
if [ "$MP3LAME_USE" = true ]; then
_packages="$_packages $MP3LAME_DEV"
fi
if [ "$X264_USE" = true ]; then
_packages="$_packages $X264_DEV"
fi
if [ "$OPENJPEG_USE" = true ]; then
_packages="$_packages $OPENJPEG_DEV"
fi
if [ "$DISTRO" = "DEB" ]; then
print_info_ffmpeglink_DEB
elif [ "$DISTRO" = "RPM" ]; then
print_info_ffmpeglink_RPM
elif [ "$DISTRO" = "ARCH" ]; then
print_info_ffmpeglink_ARCH
# XXX TODO!
else
PRINT "<Could not determine additional link libraries needed for ffmpeg, replace this by valid list of libs...>"
fi
}
print_info() {
PRINT ""
PRINT ""
@@ -5731,7 +5801,7 @@ print_info() {
PRINT "If you're using CMake add this to your configuration flags:"
_buildargs="-U *SNDFILE* -U PYTHON* -U *BOOST* -U *Boost* -U *TBB*"
_buildargs="$_buildargs -U *OPENCOLORIO* -U *OPENEXR* -U *OPENIMAGEIO* -U *LLVM* -U *CLANG* -U *CYCLES*"
_buildargs="$_buildargs -U *OPENCOLORIO* -U *OPENEXR* -U *OPENIMAGEIO* -U *LLVM* -U *CYCLES*"
_buildargs="$_buildargs -U *OPENSUBDIV* -U *OPENVDB* -U *BLOSC* -U *COLLADA* -U *FFMPEG* -U *ALEMBIC* -U *USD*"
_buildargs="$_buildargs -U *EMBREE* -U *OPENIMAGEDENOISE* -U *OPENXR*"
@@ -5932,10 +6002,12 @@ print_info() {
if [ "$FFMPEG_SKIP" = false ]; then
_1="-D WITH_CODEC_FFMPEG=ON"
_2="-D FFMPEG_LIBRARIES='avformat;avcodec;avutil;avdevice;swscale;swresample;lzma;rt;`print_info_ffmpeglink`'"
PRINT " $_1"
_buildargs="$_buildargs $_1"
PRINT " $_2"
_buildargs="$_buildargs $_1 $_2"
if [ -d $INST/ffmpeg ]; then
_1="-D FFMPEG_ROOT_DIR=$INST/ffmpeg"
_1="-D FFMPEG=$INST/ffmpeg"
PRINT " $_1"
_buildargs="$_buildargs $_1"
fi

View File

@@ -1,374 +0,0 @@
Index: nanovdb/nanovdb/NanoVDB.h
===================================================================
--- a/nanovdb/nanovdb/NanoVDB.h (revision 62751)
+++ b/nanovdb/nanovdb/NanoVDB.h (working copy)
@@ -152,8 +152,8 @@
#endif // __CUDACC_RTC__
-#ifdef __CUDACC__
-// Only define __hostdev__ when using NVIDIA CUDA compiler
+#if defined(__CUDACC__) || defined(__HIP__)
+// Only define __hostdev__ when using NVIDIA CUDA or HIP compiler
#define __hostdev__ __host__ __device__
#else
#define __hostdev__
@@ -461,7 +461,7 @@
/// Maximum floating-point values
template<typename T>
struct Maximum;
-#ifdef __CUDA_ARCH__
+#if defined(__CUDA_ARCH__) || defined(__HIP__)
template<>
struct Maximum<int>
{
@@ -1006,10 +1006,10 @@
using Vec3i = Vec3<int>;
/// @brief Return a single precision floating-point vector of this coordinate
-Vec3f Coord::asVec3s() const { return Vec3f(float(mVec[0]), float(mVec[1]), float(mVec[2])); }
+inline __hostdev__ Vec3f Coord::asVec3s() const { return Vec3f(float(mVec[0]), float(mVec[1]), float(mVec[2])); }
/// @brief Return a double precision floating-point vector of this coordinate
-Vec3d Coord::asVec3d() const { return Vec3d(double(mVec[0]), double(mVec[1]), double(mVec[2])); }
+inline __hostdev__ Vec3d Coord::asVec3d() const { return Vec3d(double(mVec[0]), double(mVec[1]), double(mVec[2])); }
// ----------------------------> Vec4 <--------------------------------------
@@ -1820,7 +1820,7 @@
}; // Map
template<typename Mat4T>
-void Map::set(const Mat4T& mat, const Mat4T& invMat, double taper)
+__hostdev__ void Map::set(const Mat4T& mat, const Mat4T& invMat, double taper)
{
float * mf = mMatF, *vf = mVecF;
float* mif = mInvMatF;
@@ -2170,7 +2170,7 @@
}; // Class Grid
template<typename TreeT>
-int Grid<TreeT>::findBlindDataForSemantic(GridBlindDataSemantic semantic) const
+__hostdev__ int Grid<TreeT>::findBlindDataForSemantic(GridBlindDataSemantic semantic) const
{
for (uint32_t i = 0, n = blindDataCount(); i < n; ++i)
if (blindMetaData(i).mSemantic == semantic)
@@ -2328,7 +2328,7 @@
}; // Tree class
template<typename RootT>
-void Tree<RootT>::extrema(ValueType& min, ValueType& max) const
+__hostdev__ void Tree<RootT>::extrema(ValueType& min, ValueType& max) const
{
min = this->root().valueMin();
max = this->root().valueMax();
@@ -2336,7 +2336,7 @@
template<typename RootT>
template<typename NodeT>
-const NodeT* Tree<RootT>::getNode(uint32_t i) const
+__hostdev__ const NodeT* Tree<RootT>::getNode(uint32_t i) const
{
static_assert(is_same<TreeNodeT<NodeT::LEVEL>, NodeT>::value, "Tree::getNode: unvalid node type");
NANOVDB_ASSERT(i < DataType::mCount[NodeT::LEVEL]);
@@ -2345,7 +2345,7 @@
template<typename RootT>
template<int LEVEL>
-const typename TreeNode<Tree<RootT>, LEVEL>::type* Tree<RootT>::getNode(uint32_t i) const
+__hostdev__ const typename TreeNode<Tree<RootT>, LEVEL>::type* Tree<RootT>::getNode(uint32_t i) const
{
NANOVDB_ASSERT(i < DataType::mCount[LEVEL]);
return reinterpret_cast<const TreeNodeT<LEVEL>*>(reinterpret_cast<const uint8_t*>(this) + DataType::mBytes[LEVEL]) + i;
@@ -2353,7 +2353,7 @@
template<typename RootT>
template<typename NodeT>
-NodeT* Tree<RootT>::getNode(uint32_t i)
+__hostdev__ NodeT* Tree<RootT>::getNode(uint32_t i)
{
static_assert(is_same<TreeNodeT<NodeT::LEVEL>, NodeT>::value, "Tree::getNode: invalid node type");
NANOVDB_ASSERT(i < DataType::mCount[NodeT::LEVEL]);
@@ -2362,7 +2362,7 @@
template<typename RootT>
template<int LEVEL>
-typename TreeNode<Tree<RootT>, LEVEL>::type* Tree<RootT>::getNode(uint32_t i)
+__hostdev__ typename TreeNode<Tree<RootT>, LEVEL>::type* Tree<RootT>::getNode(uint32_t i)
{
NANOVDB_ASSERT(i < DataType::mCount[LEVEL]);
return reinterpret_cast<TreeNodeT<LEVEL>*>(reinterpret_cast<uint8_t*>(this) + DataType::mBytes[LEVEL]) + i;
@@ -2370,7 +2370,7 @@
template<typename RootT>
template<typename NodeT>
-uint32_t Tree<RootT>::getNodeID(const NodeT& node) const
+__hostdev__ uint32_t Tree<RootT>::getNodeID(const NodeT& node) const
{
static_assert(is_same<TreeNodeT<NodeT::LEVEL>, NodeT>::value, "Tree::getNodeID: invalid node type");
const NodeT* first = reinterpret_cast<const NodeT*>(reinterpret_cast<const uint8_t*>(this) + DataType::mBytes[NodeT::LEVEL]);
@@ -2380,7 +2380,7 @@
template<typename RootT>
template<typename NodeT>
-uint32_t Tree<RootT>::getLinearOffset(const NodeT& node) const
+__hostdev__ uint32_t Tree<RootT>::getLinearOffset(const NodeT& node) const
{
return this->getNodeID(node) + DataType::mPFSum[NodeT::LEVEL];
}
@@ -3366,7 +3366,7 @@
}; // LeafNode class
template<typename ValueT, typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM>
-inline void LeafNode<ValueT, CoordT, MaskT, LOG2DIM>::updateBBox()
+inline __hostdev__ void LeafNode<ValueT, CoordT, MaskT, LOG2DIM>::updateBBox()
{
static_assert(LOG2DIM == 3, "LeafNode::updateBBox: only supports LOGDIM = 3!");
if (!this->isActive()) return;
Index: nanovdb/nanovdb/util/SampleFromVoxels.h
===================================================================
--- a/nanovdb/nanovdb/util/SampleFromVoxels.h (revision 62751)
+++ b/nanovdb/nanovdb/util/SampleFromVoxels.h (working copy)
@@ -22,7 +22,7 @@
#define NANOVDB_SAMPLE_FROM_VOXELS_H_HAS_BEEN_INCLUDED
// Only define __hostdev__ when compiling as NVIDIA CUDA
-#ifdef __CUDACC__
+#if defined(__CUDACC__) || defined(__HIP__)
#define __hostdev__ __host__ __device__
#else
#include <cmath> // for floor
@@ -136,7 +136,7 @@
template<typename TreeOrAccT>
template<typename Vec3T>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 0, true>::operator()(const Vec3T& xyz) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 0, true>::operator()(const Vec3T& xyz) const
{
const CoordT ijk = Round<CoordT>(xyz);
if (ijk != mPos) {
@@ -147,7 +147,7 @@
}
template<typename TreeOrAccT>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 0, true>::operator()(const CoordT& ijk) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 0, true>::operator()(const CoordT& ijk) const
{
if (ijk != mPos) {
mPos = ijk;
@@ -158,7 +158,7 @@
template<typename TreeOrAccT>
template<typename Vec3T>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 0, false>::operator()(const Vec3T& xyz) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 0, false>::operator()(const Vec3T& xyz) const
{
return mAcc.getValue(Round<CoordT>(xyz));
}
@@ -195,7 +195,7 @@
}; // TrilinearSamplerBase
template<typename TreeOrAccT>
-void TrilinearSampler<TreeOrAccT>::stencil(CoordT& ijk, ValueT (&v)[2][2][2]) const
+__hostdev__ void TrilinearSampler<TreeOrAccT>::stencil(CoordT& ijk, ValueT (&v)[2][2][2]) const
{
v[0][0][0] = mAcc.getValue(ijk); // i, j, k
@@ -224,7 +224,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-typename TreeOrAccT::ValueType TrilinearSampler<TreeOrAccT>::sample(const Vec3T<RealT> &uvw, const ValueT (&v)[2][2][2])
+__hostdev__ typename TreeOrAccT::ValueType TrilinearSampler<TreeOrAccT>::sample(const Vec3T<RealT> &uvw, const ValueT (&v)[2][2][2])
{
#if 0
auto lerp = [](ValueT a, ValueT b, ValueT w){ return fma(w, b-a, a); };// = w*(b-a) + a
@@ -239,7 +239,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-Vec3T<typename TreeOrAccT::ValueType> TrilinearSampler<TreeOrAccT>::gradient(const Vec3T<RealT> &uvw, const ValueT (&v)[2][2][2])
+__hostdev__ Vec3T<typename TreeOrAccT::ValueType> TrilinearSampler<TreeOrAccT>::gradient(const Vec3T<RealT> &uvw, const ValueT (&v)[2][2][2])
{
static_assert(std::is_floating_point<ValueT>::value, "TrilinearSampler::gradient requires a floating-point type");
#if 0
@@ -270,7 +270,7 @@
}
template<typename TreeOrAccT>
-bool TrilinearSampler<TreeOrAccT>::zeroCrossing(const ValueT (&v)[2][2][2])
+__hostdev__ bool TrilinearSampler<TreeOrAccT>::zeroCrossing(const ValueT (&v)[2][2][2])
{
static_assert(std::is_floating_point<ValueT>::value, "TrilinearSampler::zeroCrossing requires a floating-point type");
const bool less = v[0][0][0] < ValueT(0);
@@ -363,7 +363,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 1, true>::operator()(Vec3T<RealT> xyz) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 1, true>::operator()(Vec3T<RealT> xyz) const
{
this->cache(xyz);
return BaseT::sample(xyz, mVal);
@@ -370,7 +370,7 @@
}
template<typename TreeOrAccT>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 1, true>::operator()(const CoordT &ijk) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 1, true>::operator()(const CoordT &ijk) const
{
return ijk == mPos ? mVal[0][0][0] : BaseT::mAcc.getValue(ijk);
}
@@ -377,7 +377,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-Vec3T<typename TreeOrAccT::ValueType> SampleFromVoxels<TreeOrAccT, 1, true>::gradient(Vec3T<RealT> xyz) const
+__hostdev__ Vec3T<typename TreeOrAccT::ValueType> SampleFromVoxels<TreeOrAccT, 1, true>::gradient(Vec3T<RealT> xyz) const
{
this->cache(xyz);
return BaseT::gradient(xyz, mVal);
@@ -393,7 +393,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-void SampleFromVoxels<TreeOrAccT, 1, true>::cache(Vec3T<RealT>& xyz) const
+__hostdev__ void SampleFromVoxels<TreeOrAccT, 1, true>::cache(Vec3T<RealT>& xyz) const
{
CoordT ijk = Floor<CoordT>(xyz);
if (ijk != mPos) {
@@ -406,7 +406,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 1, false>::operator()(Vec3T<RealT> xyz) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 1, false>::operator()(Vec3T<RealT> xyz) const
{
ValueT val[2][2][2];
CoordT ijk = Floor<CoordT>(xyz);
@@ -418,7 +418,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 1, false>::operator()(Vec3T<RealT> xyz) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 1, false>::operator()(Vec3T<RealT> xyz) const
{
auto lerp = [](ValueT a, ValueT b, RealT w) { return a + ValueT(w) * (b - a); };
@@ -463,7 +463,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-inline Vec3T<typename TreeOrAccT::ValueType> SampleFromVoxels<TreeOrAccT, 1, false>::gradient(Vec3T<RealT> xyz) const
+inline __hostdev__ Vec3T<typename TreeOrAccT::ValueType> SampleFromVoxels<TreeOrAccT, 1, false>::gradient(Vec3T<RealT> xyz) const
{
ValueT val[2][2][2];
CoordT ijk = Floor<CoordT>(xyz);
@@ -473,7 +473,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-bool SampleFromVoxels<TreeOrAccT, 1, false>::zeroCrossing(Vec3T<RealT> xyz) const
+__hostdev__ bool SampleFromVoxels<TreeOrAccT, 1, false>::zeroCrossing(Vec3T<RealT> xyz) const
{
ValueT val[2][2][2];
CoordT ijk = Floor<CoordT>(xyz);
@@ -510,7 +510,7 @@
}; // TriquadraticSamplerBase
template<typename TreeOrAccT>
-void TriquadraticSampler<TreeOrAccT>::stencil(const CoordT &ijk, ValueT (&v)[3][3][3]) const
+__hostdev__ void TriquadraticSampler<TreeOrAccT>::stencil(const CoordT &ijk, ValueT (&v)[3][3][3]) const
{
CoordT p(ijk[0] - 1, 0, 0);
for (int dx = 0; dx < 3; ++dx, ++p[0]) {
@@ -526,7 +526,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-typename TreeOrAccT::ValueType TriquadraticSampler<TreeOrAccT>::sample(const Vec3T<RealT> &uvw, const ValueT (&v)[3][3][3])
+__hostdev__ typename TreeOrAccT::ValueType TriquadraticSampler<TreeOrAccT>::sample(const Vec3T<RealT> &uvw, const ValueT (&v)[3][3][3])
{
auto kernel = [](const ValueT* value, double weight)->ValueT {
return weight * (weight * (0.5f * (value[0] + value[2]) - value[1]) +
@@ -545,7 +545,7 @@
}
template<typename TreeOrAccT>
-bool TriquadraticSampler<TreeOrAccT>::zeroCrossing(const ValueT (&v)[3][3][3])
+__hostdev__ bool TriquadraticSampler<TreeOrAccT>::zeroCrossing(const ValueT (&v)[3][3][3])
{
static_assert(std::is_floating_point<ValueT>::value, "TrilinearSampler::zeroCrossing requires a floating-point type");
const bool less = v[0][0][0] < ValueT(0);
@@ -624,7 +624,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 2, true>::operator()(Vec3T<RealT> xyz) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 2, true>::operator()(Vec3T<RealT> xyz) const
{
this->cache(xyz);
return BaseT::sample(xyz, mVal);
@@ -631,7 +631,7 @@
}
template<typename TreeOrAccT>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 2, true>::operator()(const CoordT &ijk) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 2, true>::operator()(const CoordT &ijk) const
{
return ijk == mPos ? mVal[1][1][1] : BaseT::mAcc.getValue(ijk);
}
@@ -646,7 +646,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-void SampleFromVoxels<TreeOrAccT, 2, true>::cache(Vec3T<RealT>& xyz) const
+__hostdev__ void SampleFromVoxels<TreeOrAccT, 2, true>::cache(Vec3T<RealT>& xyz) const
{
CoordT ijk = Floor<CoordT>(xyz);
if (ijk != mPos) {
@@ -657,7 +657,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 2, false>::operator()(Vec3T<RealT> xyz) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 2, false>::operator()(Vec3T<RealT> xyz) const
{
ValueT val[3][3][3];
CoordT ijk = Floor<CoordT>(xyz);
@@ -667,7 +667,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-bool SampleFromVoxels<TreeOrAccT, 2, false>::zeroCrossing(Vec3T<RealT> xyz) const
+__hostdev__ bool SampleFromVoxels<TreeOrAccT, 2, false>::zeroCrossing(Vec3T<RealT> xyz) const
{
ValueT val[3][3][3];
CoordT ijk = Floor<CoordT>(xyz);
@@ -710,7 +710,7 @@
}; // TricubicSampler
template<typename TreeOrAccT>
-void TricubicSampler<TreeOrAccT>::stencil(const CoordT& ijk, ValueT (&C)[64]) const
+__hostdev__ void TricubicSampler<TreeOrAccT>::stencil(const CoordT& ijk, ValueT (&C)[64]) const
{
auto fetch = [&](int i, int j, int k) -> ValueT& { return C[((i + 1) << 4) + ((j + 1) << 2) + k + 1]; };
@@ -929,7 +929,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 3, true>::operator()(Vec3T<RealT> xyz) const
+__hostdev__ typename TreeOrAccT::ValueType SampleFromVoxels<TreeOrAccT, 3, true>::operator()(Vec3T<RealT> xyz) const
{
this->cache(xyz);
return BaseT::sample(xyz, mC);
@@ -937,7 +937,7 @@
template<typename TreeOrAccT>
template<typename RealT, template<typename...> class Vec3T>
-void SampleFromVoxels<TreeOrAccT, 3, true>::cache(Vec3T<RealT>& xyz) const
+__hostdev__ void SampleFromVoxels<TreeOrAccT, 3, true>::cache(Vec3T<RealT>& xyz) const
{
CoordT ijk = Floor<CoordT>(xyz);
if (ijk != mPos) {

View File

@@ -197,38 +197,3 @@ index 67ec0d15f..6dc3e85a0 100644
#else
#error Unknown architecture.
#endif
diff --git a/pxr/base/arch/demangle.cpp b/pxr/base/arch/demangle.cpp
index 67ec0d15f..6dc3e85a0 100644
--- a/pxr/base/arch/demangle.cpp
+++ b/pxr/base/arch/demangle.cpp
@@ -36,6 +36,7 @@
#if (ARCH_COMPILER_GCC_MAJOR == 3 && ARCH_COMPILER_GCC_MINOR >= 1) || \
ARCH_COMPILER_GCC_MAJOR > 3 || defined(ARCH_COMPILER_CLANG)
#define _AT_LEAST_GCC_THREE_ONE_OR_CLANG
+#include <cxxabi.h>
#endif
PXR_NAMESPACE_OPEN_SCOPE
@@ -138,7 +139,6 @@
#endif
#if defined(_AT_LEAST_GCC_THREE_ONE_OR_CLANG)
-#include <cxxabi.h>
/*
* This routine doesn't work when you get to gcc3.4.
diff --git a/pxr/base/work/singularTask.h b/pxr/base/work/singularTask.h
index 67ec0d15f..6dc3e85a0 100644
--- a/pxr/base/work/singularTask.h
+++ b/pxr/base/work/singularTask.h
@@ -120,7 +120,7 @@
// case we go again to ensure the task can do whatever it
// was awakened to do. Once we successfully take the count
// to zero, we stop.
- size_t old = count;
+ std::size_t old = count;
do { _fn(); } while (
!count.compare_exchange_strong(old, 0));
});

View File

@@ -1,14 +1,21 @@
@echo off
if NOT "%1" == "" (
if "%1" == "2017" (
echo "Building for VS2017"
set VSVER=15.0
set VSVER_SHORT=15
set BuildDir=VS15
if "%1" == "2013" (
echo "Building for VS2013"
set VSVER=12.0
set VSVER_SHORT=12
set BuildDir=VS12
goto par2
)
if "%1" == "2019" (
echo "Building for VS2019"
if "%1" == "2015" (
echo "Building for VS2015"
set VSVER=14.0
set VSVER_SHORT=14
set BuildDir=VS14
goto par2
)
if "%1" == "2017" (
echo "Building for VS2017"
set VSVER=15.0
set VSVER_SHORT=15
set BuildDir=VS15
@@ -18,22 +25,40 @@ if NOT "%1" == "" (
)
:usage
Echo Usage build_deps 2017/2019 x64
Echo Usage build_deps 2013/2015/2017 x64/x86
goto exit
:par2
if NOT "%2" == "" (
if "%2" == "x86" (
echo "Building for x86"
set HARVESTROOT=Windows_vc
set ARCH=86
if "%1" == "2013" (
set CMAKE_BUILDER=Visual Studio 12 2013
)
if "%1" == "2015" (
set CMAKE_BUILDER=Visual Studio 14 2015
)
if "%1" == "2017" (
set CMAKE_BUILDER=Visual Studio 15 2017
)
goto start
)
if "%2" == "x64" (
echo "Building for x64"
set HARVESTROOT=Win64_vc
set ARCH=64
if "%1" == "2019" (
set CMAKE_BUILDER=Visual Studio 16 2019
set CMAKE_BUILD_ARCH=-A x64
if "%1" == "2013" (
set CMAKE_BUILDER=Visual Studio 12 2013 Win64
)
if "%1" == "2015" (
set CMAKE_BUILDER=Visual Studio 14 2015 Win64
)
if "%1" == "2017" (
set CMAKE_BUILDER=Visual Studio 15 2017 Win64
set CMAKE_BUILD_ARCH=
)
goto start
)
)
@@ -95,7 +120,7 @@ set path=%BUILD_DIR%\downloads\mingw\mingw64\msys\1.0\bin\;%BUILD_DIR%\downloads
mkdir %STAGING%\%BuildDir%%ARCH%R
cd %Staging%\%BuildDir%%ARCH%R
echo %DATE% %TIME% : Start > %StatusFile%
cmake -G "%CMAKE_BUILDER%" %CMAKE_BUILD_ARCH% -Thost=x64 %SOURCE_DIR% -DPACKAGE_DIR=%BUILD_DIR%/packages -DDOWNLOAD_DIR=%BUILD_DIR%/downloads -DBUILD_MODE=Release -DHARVEST_TARGET=%HARVEST_DIR%/%HARVESTROOT%%VSVER_SHORT%/
cmake -G "%CMAKE_BUILDER%" -Thost=x64 %SOURCE_DIR% -DPACKAGE_DIR=%BUILD_DIR%/packages -DDOWNLOAD_DIR=%BUILD_DIR%/downloads -DBUILD_MODE=Release -DHARVEST_TARGET=%HARVEST_DIR%/%HARVESTROOT%%VSVER_SHORT%/
echo %DATE% %TIME% : Release Configuration done >> %StatusFile%
if "%dobuild%" == "1" (
msbuild /m "ll.vcxproj" /p:Configuration=Release /fl /flp:logfile=BlenderDeps_llvm.log;Verbosity=normal
@@ -108,7 +133,7 @@ if "%NODEBUG%" == "1" goto exit
cd %BUILD_DIR%
mkdir %STAGING%\%BuildDir%%ARCH%D
cd %Staging%\%BuildDir%%ARCH%D
cmake -G "%CMAKE_BUILDER%" %CMAKE_BUILD_ARCH% -Thost=x64 %SOURCE_DIR% -DPACKAGE_DIR=%BUILD_DIR%/packages -DDOWNLOAD_DIR=%BUILD_DIR%/downloads -DCMAKE_BUILD_TYPE=Debug -DBUILD_MODE=Debug -DHARVEST_TARGET=%HARVEST_DIR%/%HARVESTROOT%%VSVER_SHORT%/ %CMAKE_DEBUG_OPTIONS%
cmake -G "%CMAKE_BUILDER%" -Thost=x64 %SOURCE_DIR% -DPACKAGE_DIR=%BUILD_DIR%/packages -DDOWNLOAD_DIR=%BUILD_DIR%/downloads -DCMAKE_BUILD_TYPE=Debug -DBUILD_MODE=Debug -DHARVEST_TARGET=%HARVEST_DIR%/%HARVESTROOT%%VSVER_SHORT%/ %CMAKE_DEBUG_OPTIONS%
echo %DATE% %TIME% : Debug Configuration done >> %StatusFile%
if "%dobuild%" == "1" (
msbuild /m "ll.vcxproj" /p:Configuration=Debug /fl /flp:logfile=BlenderDeps_llvm.log;;Verbosity=normal

View File

@@ -1,83 +0,0 @@
# - Find Brotli library (compression for freetype/woff2).
# This module defines
# BROTLI_INCLUDE_DIRS, where to find Brotli headers, Set when
# BROTLI_INCLUDE_DIR is found.
# BROTLI_LIBRARIES, libraries to link against to use Brotli.
# BROTLI_ROOT_DIR, The base directory to search for Brotli.
# This can also be an environment variable.
# BROTLI_FOUND, If false, do not try to use Brotli.
#
#=============================================================================
# Copyright 2022 Blender Foundation.
#
# Distributed under the OSI-approved BSD 3-Clause License,
# see accompanying file BSD-3-Clause-license.txt for details.
#=============================================================================
# If BROTLI_ROOT_DIR was defined in the environment, use it.
IF(NOT BROTLI_ROOT_DIR AND NOT $ENV{BROTLI_ROOT_DIR} STREQUAL "")
SET(BROTLI_ROOT_DIR $ENV{BROTLI_ROOT_DIR})
ENDIF()
SET(_BROTLI_SEARCH_DIRS
${BROTLI_ROOT_DIR}
)
FIND_PATH(BROTLI_INCLUDE_DIR
NAMES
brotli/decode.h
HINTS
${_BROTLI_SEARCH_DIRS}
PATH_SUFFIXES
include
DOC "Brotli header files"
)
FIND_LIBRARY(BROTLI_LIBRARY_COMMON
NAMES
# Some builds use a special `-static` postfix in their static libraries names.
brotlicommon-static
brotlicommon
HINTS
${_BROTLI_SEARCH_DIRS}
PATH_SUFFIXES
lib64 lib lib/static
DOC "Brotli static common library"
)
FIND_LIBRARY(BROTLI_LIBRARY_DEC
NAMES
# Some builds use a special `-static` postfix in their static libraries names.
brotlidec-static
brotlidec
HINTS
${_BROTLI_SEARCH_DIRS}
PATH_SUFFIXES
lib64 lib lib/static
DOC "Brotli static decode library"
)
IF(${BROTLI_LIBRARY_COMMON_NOTFOUND} or ${BROTLI_LIBRARY_DEC_NOTFOUND})
set(BROTLI_FOUND FALSE)
ELSE()
# handle the QUIETLY and REQUIRED arguments and set BROTLI_FOUND to TRUE if
# all listed variables are TRUE
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(Brotli DEFAULT_MSG BROTLI_LIBRARY_COMMON BROTLI_LIBRARY_DEC BROTLI_INCLUDE_DIR)
IF(BROTLI_FOUND)
get_filename_component(BROTLI_LIBRARY_DIR ${BROTLI_LIBRARY_COMMON} DIRECTORY)
SET(BROTLI_INCLUDE_DIRS ${BROTLI_INCLUDE_DIR})
SET(BROTLI_LIBRARIES ${BROTLI_LIBRARY_DEC} ${BROTLI_LIBRARY_COMMON})
ENDIF()
ENDIF()
MARK_AS_ADVANCED(
BROTLI_INCLUDE_DIR
BROTLI_LIBRARY_COMMON
BROTLI_LIBRARY_DEC
BROTLI_LIBRARY_DIR
)
UNSET(_BROTLI_SEARCH_DIRS)

View File

@@ -33,8 +33,6 @@ if(NOT FFMPEG_FIND_COMPONENTS)
avfilter
avformat
avutil
swscale
swresample
)
endif()
@@ -52,9 +50,9 @@ foreach(_component ${FFMPEG_FIND_COMPONENTS})
string(TOUPPER ${_component} _upper_COMPONENT)
find_library(FFMPEG_${_upper_COMPONENT}_LIBRARY
NAMES
${_component}
${_upper_COMPONENT}
HINTS
${_ffmpeg_SEARCH_DIRS}
${LIBDIR}/ffmpeg
PATH_SUFFIXES
lib64 lib
)

View File

@@ -21,7 +21,7 @@ ENDIF()
SET(_optix_SEARCH_DIRS
${OPTIX_ROOT_DIR}
"$ENV{PROGRAMDATA}/NVIDIA Corporation/OptiX SDK 7.3.0"
"$ENV{PROGRAMDATA}/NVIDIA Corporation/OptiX SDK 7.0.0"
)
FIND_PATH(OPTIX_INCLUDE_DIR

View File

@@ -34,7 +34,7 @@ IF(NOT PYTHON_ROOT_DIR AND NOT $ENV{PYTHON_ROOT_DIR} STREQUAL "")
SET(PYTHON_ROOT_DIR $ENV{PYTHON_ROOT_DIR})
ENDIF()
SET(PYTHON_VERSION 3.10 CACHE STRING "Python Version (major and minor only)")
SET(PYTHON_VERSION 3.9 CACHE STRING "Python Version (major and minor only)")
MARK_AS_ADVANCED(PYTHON_VERSION)

View File

@@ -168,7 +168,7 @@ def function_parm_wash_tokens(parm):
# if tokens[-1].kind == To
# remove trailing char
if tokens[-1].kind == TokenKind.PUNCTUATION:
if tokens[-1].spelling in {",", ")", ";"}:
if tokens[-1].spelling in (",", ")", ";"):
tokens.pop()
# else:
# print(tokens[-1].spelling)
@@ -179,7 +179,7 @@ def function_parm_wash_tokens(parm):
t_spelling = t.spelling
ok = True
if t_kind == TokenKind.KEYWORD:
if t_spelling in {"const", "restrict", "volatile"}:
if t_spelling in ("const", "restrict", "volatile"):
ok = False
elif t_spelling.startswith("__"):
ok = False # __restrict

View File

@@ -114,7 +114,7 @@ def is_c_header(filename: str) -> bool:
def is_c(filename: str) -> bool:
ext = splitext(filename)[1]
return (ext in {".c", ".cpp", ".cxx", ".m", ".mm", ".rc", ".cc", ".inl", ".metal"})
return (ext in {".c", ".cpp", ".cxx", ".m", ".mm", ".rc", ".cc", ".inl"})
def is_c_any(filename: str) -> bool:

View File

@@ -180,7 +180,7 @@ def create_nb_project_main():
f.write(' </logicalFolder>\n')
f.write(' </logicalFolder>\n')
# default, but this dir is in fact not in blender dir so we can ignore it
# default, but this dir is infact not in blender dir so we can ignore it
# f.write(' <sourceFolderFilter>^(nbproject)$</sourceFolderFilter>\n')
f.write(r' <sourceFolderFilter>^(nbproject|__pycache__|.*\.py|.*\.html|.*\.blend)$</sourceFolderFilter>\n')

View File

@@ -19,6 +19,9 @@ set(WITH_CODEC_SNDFILE OFF CACHE BOOL "" FORCE)
set(WITH_COMPOSITOR OFF CACHE BOOL "" FORCE)
set(WITH_COREAUDIO OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_DEVICE_OPTIX OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_EMBREE OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL OFF CACHE BOOL "" FORCE)
set(WITH_DRACO OFF CACHE BOOL "" FORCE)
set(WITH_FFTW3 OFF CACHE BOOL "" FORCE)
set(WITH_FREESTYLE OFF CACHE BOOL "" FORCE)

View File

@@ -61,7 +61,6 @@ set(WITH_MEM_JEMALLOC ON CACHE BOOL "" FORCE)
# platform dependent options
if(APPLE)
set(WITH_COREAUDIO ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_DEVICE_METAL ON CACHE BOOL "" FORCE)
endif()
if(NOT WIN32)
set(WITH_JACK ON CACHE BOOL "" FORCE)
@@ -82,5 +81,4 @@ if(NOT APPLE)
set(WITH_CYCLES_DEVICE_OPTIX ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_HIP_BINARIES ON CACHE BOOL "" FORCE)
endif()

View File

@@ -488,6 +488,7 @@ function(blender_add_test_executable
include_directories(${includes})
include_directories(${includes_sys})
setup_libdirs()
BLENDER_SRC_GTEST_EX(
NAME ${name}
@@ -524,6 +525,83 @@ function(setup_heavy_lib_pool)
endif()
endfunction()
function(SETUP_LIBDIRS)
# NOTE: For all new libraries, use absolute library paths.
# This should eventually be phased out.
# APPLE plaform uses full paths for linking libraries, and avoids link_directories.
if(NOT MSVC AND NOT APPLE)
link_directories(${JPEG_LIBPATH} ${PNG_LIBPATH} ${ZLIB_LIBPATH} ${FREETYPE_LIBPATH})
if(WITH_PYTHON) # AND NOT WITH_PYTHON_MODULE # WIN32 needs
link_directories(${PYTHON_LIBPATH})
endif()
if(WITH_SDL AND NOT WITH_SDL_DYNLOAD)
link_directories(${SDL_LIBPATH})
endif()
if(WITH_CODEC_FFMPEG)
link_directories(${FFMPEG_LIBPATH})
endif()
if(WITH_IMAGE_OPENEXR)
link_directories(${OPENEXR_LIBPATH})
endif()
if(WITH_IMAGE_TIFF)
link_directories(${TIFF_LIBPATH})
endif()
if(WITH_BOOST)
link_directories(${BOOST_LIBPATH})
endif()
if(WITH_OPENIMAGEIO)
link_directories(${OPENIMAGEIO_LIBPATH})
endif()
if(WITH_OPENIMAGEDENOISE)
link_directories(${OPENIMAGEDENOISE_LIBPATH})
endif()
if(WITH_OPENCOLORIO)
link_directories(${OPENCOLORIO_LIBPATH})
endif()
if(WITH_OPENVDB)
link_directories(${OPENVDB_LIBPATH})
endif()
if(WITH_OPENAL)
link_directories(${OPENAL_LIBPATH})
endif()
if(WITH_JACK AND NOT WITH_JACK_DYNLOAD)
link_directories(${JACK_LIBPATH})
endif()
if(WITH_PULSEAUDIO AND NOT WITH_PULSEAUDIO_DYNLOAD)
link_directories(${LIBPULSE_LIBPATH})
endif()
if(WITH_CODEC_SNDFILE)
link_directories(${LIBSNDFILE_LIBPATH})
endif()
if(WITH_FFTW3)
link_directories(${FFTW3_LIBPATH})
endif()
if(WITH_OPENCOLLADA)
link_directories(${OPENCOLLADA_LIBPATH})
# # Never set
# link_directories(${PCRE_LIBPATH})
# link_directories(${EXPAT_LIBPATH})
endif()
if(WITH_LLVM)
link_directories(${LLVM_LIBPATH})
endif()
if(WITH_ALEMBIC)
link_directories(${ALEMBIC_LIBPATH})
endif()
if(WITH_GMP)
link_directories(${GMP_LIBPATH})
endif()
if(WIN32 AND NOT UNIX)
link_directories(${PTHREADS_LIBPATH})
endif()
endif()
endfunction()
# Platform specific linker flags for targets.
function(setup_platform_linker_flags
target)
@@ -1197,20 +1275,43 @@ endfunction()
macro(openmp_delayload
projectname
)
if(MSVC)
if(WITH_OPENMP)
if(MSVC_CLANG)
set(OPENMP_DLL_NAME "libomp")
elseif(MSVC_VERSION EQUAL 1800)
set(OPENMP_DLL_NAME "vcomp120")
else()
set(OPENMP_DLL_NAME "vcomp140")
if(MSVC)
if(WITH_OPENMP)
if(MSVC_CLANG)
set(OPENMP_DLL_NAME "libomp")
elseif(MSVC_VERSION EQUAL 1800)
set(OPENMP_DLL_NAME "vcomp120")
else()
set(OPENMP_DLL_NAME "vcomp140")
endif()
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_RELEASE " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_DEBUG " /DELAYLOAD:${OPENMP_DLL_NAME}d.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_RELWITHDEBINFO " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_MINSIZEREL " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
endif()
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_RELEASE " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_DEBUG " /DELAYLOAD:${OPENMP_DLL_NAME}d.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_RELWITHDEBINFO " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_MINSIZEREL " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
endif()
endmacro()
macro(blender_precompile_headers target cpp header)
if(MSVC)
# get the name for the pch output file
get_filename_component(pchbase ${cpp} NAME_WE)
set(pchfinal "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${pchbase}.pch")
# mark the cpp as the one outputting the pch
set_property(SOURCE ${cpp} APPEND PROPERTY OBJECT_OUTPUTS "${pchfinal}")
# get all sources for the target
get_target_property(sources ${target} SOURCES)
# make all sources depend on the pch to enforce the build order
foreach(src ${sources})
set_property(SOURCE ${src} APPEND PROPERTY OBJECT_DEPENDS "${pchfinal}")
endforeach()
target_sources(${target} PRIVATE ${cpp} ${header})
set_target_properties(${target} PROPERTIES COMPILE_FLAGS "/Yu${header} /Fp${pchfinal} /FI${header}")
set_source_files_properties(${cpp} PROPERTIES COMPILE_FLAGS "/Yc${header} /Fp${pchfinal}")
endif()
endmacro()

View File

@@ -128,20 +128,25 @@ if(WITH_CODEC_SNDFILE)
endif()
if(WITH_PYTHON)
# Use precompiled libraries by default.
set(PYTHON_VERSION 3.10)
# we use precompiled libraries for py 3.9 and up by default
set(PYTHON_VERSION 3.9)
if(NOT WITH_PYTHON_MODULE AND NOT WITH_PYTHON_FRAMEWORK)
# Normally cached but not since we include them with blender.
# normally cached but not since we include them with blender
set(PYTHON_INCLUDE_DIR "${LIBDIR}/python/include/python${PYTHON_VERSION}")
set(PYTHON_EXECUTABLE "${LIBDIR}/python/bin/python${PYTHON_VERSION}")
set(PYTHON_LIBRARY ${LIBDIR}/python/lib/libpython${PYTHON_VERSION}.a)
set(PYTHON_LIBPATH "${LIBDIR}/python/lib/python${PYTHON_VERSION}")
# set(PYTHON_LINKFLAGS "-u _PyMac_Error") # won't build with this enabled
else()
# Module must be compiled against Python framework.
# module must be compiled against Python framework
set(_py_framework "/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}")
set(PYTHON_INCLUDE_DIR "${_py_framework}/include/python${PYTHON_VERSION}")
set(PYTHON_EXECUTABLE "${_py_framework}/bin/python${PYTHON_VERSION}")
set(PYTHON_LIBPATH "${_py_framework}/lib/python${PYTHON_VERSION}")
# set(PYTHON_LIBRARY python${PYTHON_VERSION})
# set(PYTHON_LINKFLAGS "-u _PyMac_Error -framework Python") # won't build with this enabled
unset(_py_framework)
endif()
@@ -161,18 +166,13 @@ if(WITH_FFTW3)
find_package(Fftw3)
endif()
# FreeType compiled with Brotli compression for woff2.
find_package(Freetype REQUIRED)
list(APPEND FREETYPE_LIBRARIES
${LIBDIR}/brotli/lib/libbrotlicommon-static.a
${LIBDIR}/brotli/lib/libbrotlidec-static.a)
if(WITH_IMAGE_OPENEXR)
find_package(OpenEXR)
endif()
if(WITH_CODEC_FFMPEG)
set(FFMPEG_ROOT_DIR ${LIBDIR}/ffmpeg)
set(FFMPEG_FIND_COMPONENTS
avcodec avdevice avformat avutil
mp3lame ogg opus swresample swscale
@@ -257,6 +257,9 @@ if(WITH_BOOST)
if(WITH_INTERNATIONAL)
list(APPEND _boost_FIND_COMPONENTS locale)
endif()
if(WITH_CYCLES_NETWORK)
list(APPEND _boost_FIND_COMPONENTS serialization)
endif()
if(WITH_OPENVDB)
list(APPEND _boost_FIND_COMPONENTS iostreams)
endif()
@@ -336,7 +339,7 @@ if(WITH_LLVM)
endif()
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(CYCLES_OSL ${LIBDIR}/osl)
find_library(OSL_LIB_EXEC NAMES oslexec PATHS ${CYCLES_OSL}/lib)
@@ -356,7 +359,7 @@ if(WITH_CYCLES AND WITH_CYCLES_OSL)
endif()
endif()
if(WITH_CYCLES AND WITH_CYCLES_EMBREE)
if(WITH_CYCLES_EMBREE)
find_package(Embree 3.8.0 REQUIRED)
# Increase stack size for Embree, only works for executables.
if(NOT WITH_PYTHON_MODULE)
@@ -479,11 +482,8 @@ string(APPEND PLATFORM_LINKFLAGS " -stdlib=libc++")
# Suppress ranlib "has no symbols" warnings (workaround for T48250)
set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
# llvm-ranlib doesn't support this flag. Xcode's libtool does.
if(NOT ${CMAKE_RANLIB} MATCHES ".*llvm-ranlib$")
set(CMAKE_C_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
set(CMAKE_CXX_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
endif()
set(CMAKE_C_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
set(CMAKE_CXX_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
if(WITH_COMPILER_CCACHE)
if(NOT CMAKE_GENERATOR STREQUAL "Xcode")
@@ -510,6 +510,3 @@ list(APPEND CMAKE_BUILD_RPATH "${OpenMP_LIBRARY_DIR}")
set(CMAKE_SKIP_INSTALL_RPATH FALSE)
list(APPEND CMAKE_INSTALL_RPATH "@loader_path/../Resources/${BLENDER_VERSION}/lib")
# Same as `CFBundleIdentifier` in Info.plist.
set(CMAKE_XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER "org.blenderfoundation.blender")

View File

@@ -96,7 +96,7 @@ else()
# Detect SDK version to use.
if(NOT DEFINED OSX_SYSTEM)
execute_process(
COMMAND xcrun --sdk macosx --show-sdk-version
COMMAND xcrun --show-sdk-version
OUTPUT_VARIABLE OSX_SYSTEM
OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()

View File

@@ -18,7 +18,7 @@
# All rights reserved.
# ***** END GPL LICENSE BLOCK *****
# Libraries configuration for any *nix system including Linux and Unix (excluding APPLE).
# Libraries configuration for any *nix system including Linux and Unix.
# Detect precompiled library directory
if(NOT DEFINED LIBDIR)
@@ -48,9 +48,6 @@ if(NOT DEFINED LIBDIR)
unset(LIBDIR_CENTOS7_ABI)
endif()
# Support restoring this value once pre-compiled libraries have been handled.
set(WITH_STATIC_LIBS_INIT ${WITH_STATIC_LIBS})
if(EXISTS ${LIBDIR})
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
@@ -103,22 +100,7 @@ find_package_wrapper(JPEG REQUIRED)
find_package_wrapper(PNG REQUIRED)
find_package_wrapper(ZLIB REQUIRED)
find_package_wrapper(Zstd REQUIRED)
if(NOT WITH_SYSTEM_FREETYPE)
# FreeType compiled with Brotli compression for woff2.
find_package_wrapper(Freetype REQUIRED)
if(EXISTS ${LIBDIR})
find_package_wrapper(Brotli REQUIRED)
# NOTE: This is done on WIN32 & APPLE but fails on some Linux systems.
# See: https://devtalk.blender.org/t/22536
# So `BROTLI_LIBRARIES` need to be added directly after `FREETYPE_LIBRARIES`.
#
# list(APPEND FREETYPE_LIBRARIES
# ${BROTLI_LIBRARIES}
# )
endif()
endif()
find_package_wrapper(Freetype REQUIRED)
if(WITH_PYTHON)
# No way to set py35, remove for now.
@@ -196,30 +178,26 @@ endif()
if(WITH_CODEC_FFMPEG)
if(EXISTS ${LIBDIR})
set(FFMPEG_ROOT_DIR ${LIBDIR}/ffmpeg)
# Override FFMPEG components to also include static library dependencies
# included with precompiled libraries, and to ensure correct link order.
set(FFMPEG_FIND_COMPONENTS
avformat avcodec avdevice avutil swresample swscale
sndfile
FLAC
mp3lame
opus
theora theoradec theoraenc
vorbis vorbisenc vorbisfile ogg
vpx
x264
xvidcore)
elseif(FFMPEG)
# Old cache variable used for root dir, convert to new standard.
set(FFMPEG_ROOT_DIR ${FFMPEG})
# For precompiled lib directory, all ffmpeg dependencies are in the same folder
file(GLOB ffmpeg_libs ${LIBDIR}/ffmpeg/lib/*.a ${LIBDIR}/sndfile/lib/*.a)
set(FFMPEG ${LIBDIR}/ffmpeg CACHE PATH "FFMPEG Directory")
set(FFMPEG_LIBRARIES ${ffmpeg_libs} ${ffmpeg_libs} CACHE STRING "FFMPEG Libraries")
else()
set(FFMPEG /usr CACHE PATH "FFMPEG Directory")
set(FFMPEG_LIBRARIES avformat avcodec avutil avdevice swscale CACHE STRING "FFMPEG Libraries")
endif()
find_package(FFmpeg)
if(NOT FFMPEG_FOUND)
set(WITH_CODEC_FFMPEG OFF)
message(STATUS "FFmpeg not found, disabling it")
mark_as_advanced(FFMPEG)
# lame, but until we have proper find module for ffmpeg
set(FFMPEG_INCLUDE_DIRS ${FFMPEG}/include)
if(EXISTS "${FFMPEG}/include/ffmpeg/")
list(APPEND FFMPEG_INCLUDE_DIRS "${FFMPEG}/include/ffmpeg")
endif()
# end lameness
mark_as_advanced(FFMPEG_LIBRARIES)
set(FFMPEG_LIBPATH ${FFMPEG}/lib)
endif()
if(WITH_FFTW3)
@@ -263,7 +241,7 @@ if(WITH_INPUT_NDOF)
endif()
endif()
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
if(EXISTS ${CYCLES_OSL} AND NOT OSL_ROOT)
set(OSL_ROOT ${CYCLES_OSL})
@@ -336,7 +314,7 @@ if(WITH_BOOST)
endif()
set(Boost_USE_MULTITHREADED ON)
set(__boost_packages filesystem regex thread date_time)
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
if(NOT (${OSL_LIBRARY_VERSION_MAJOR} EQUAL "1" AND ${OSL_LIBRARY_VERSION_MINOR} LESS "6"))
list(APPEND __boost_packages wave)
else()
@@ -345,6 +323,9 @@ if(WITH_BOOST)
if(WITH_INTERNATIONAL)
list(APPEND __boost_packages locale)
endif()
if(WITH_CYCLES_NETWORK)
list(APPEND __boost_packages serialization)
endif()
if(WITH_OPENVDB)
list(APPEND __boost_packages iostreams)
endif()
@@ -422,7 +403,7 @@ if(WITH_OPENCOLORIO)
endif()
endif()
if(WITH_CYCLES AND WITH_CYCLES_EMBREE)
if(WITH_CYCLES_EMBREE)
find_package(Embree 3.8.0 REQUIRED)
endif()
@@ -554,21 +535,6 @@ add_definitions(-D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE
#
# Keep last, so indirectly linked libraries don't override our own pre-compiled libs.
if(EXISTS ${LIBDIR})
# Clear the prefix path as it causes the `LIBDIR` to override system locations.
unset(CMAKE_PREFIX_PATH)
# Since the pre-compiled `LIBDIR` directories have been handled, don't prefer static libraries.
set(WITH_STATIC_LIBS ${WITH_STATIC_LIBS_INIT})
endif()
if(WITH_SYSTEM_FREETYPE)
find_package_wrapper(Freetype)
if(NOT FREETYPE_FOUND)
message(FATAL_ERROR "Failed finding system FreeType version!")
endif()
endif()
if(WITH_LZO AND WITH_SYSTEM_LZO)
find_package_wrapper(LZO)
if(NOT LZO_FOUND)
@@ -681,9 +647,6 @@ endif()
# ----------------------------------------------------------------------------
# Compilers
# Only set the linker once.
set(_IS_LINKER_DEFAULT ON)
# GNU Compiler
if(CMAKE_COMPILER_IS_GNUCC)
# ffp-contract=off:
@@ -702,89 +665,26 @@ if(CMAKE_COMPILER_IS_GNUCC)
string(PREPEND CMAKE_CXX_FLAGS_RELWITHDEBINFO "${GCC_EXTRA_FLAGS_RELEASE} ")
unset(GCC_EXTRA_FLAGS_RELEASE)
# NOTE(@campbellbarton): Eventually mold will be able to use `-fuse-ld=mold`,
# however at the moment this only works for GCC 12.1+ (unreleased at time of writing).
# So a workaround is used here "-B" which points to another path to find system commands
# such as `ld`.
if(WITH_LINKER_MOLD AND _IS_LINKER_DEFAULT)
find_program(MOLD_BIN "mold")
mark_as_advanced(MOLD_BIN)
if(NOT MOLD_BIN)
message(STATUS "The \"mold\" binary could not be found, using system linker.")
set(WITH_LINKER_MOLD OFF)
else()
# By default mold installs the binary to:
# - `{PREFIX}/bin/mold` as well as a symbolic-link in...
# - `{PREFIX}/lib/mold/ld`.
# (where `PREFIX` is typically `/usr/`).
#
# This block of code finds `{PREFIX}/lib/mold` from the `mold` binary.
# Other methods of searching for the path could also be made to work,
# we could even make our own directory and symbolic-link, however it's more
# convenient to use the one provided by mold.
#
# Use the binary path to "mold", to find the common prefix which contains "lib/mold".
# The parent directory: e.g. `/usr/bin/mold` -> `/usr/bin/`.
get_filename_component(MOLD_PREFIX "${MOLD_BIN}" DIRECTORY)
# The common prefix path: e.g. `/usr/bin/` -> `/usr/` to use as a hint.
get_filename_component(MOLD_PREFIX "${MOLD_PREFIX}" DIRECTORY)
# Find `{PREFIX}/lib/mold/ld`, store the directory component (without the `ld`).
# Then pass `-B {PREFIX}/lib/mold` to GCC so the `ld` located there overrides the default.
find_path(
MOLD_BIN_DIR "ld"
HINTS "${MOLD_PREFIX}"
# The default path is `libexec`, Arch Linux for e.g.
# replaces this with `lib` so check both.
PATH_SUFFIXES "libexec/mold" "lib/mold" "lib64/mold"
NO_DEFAULT_PATH
NO_CACHE
)
if(NOT MOLD_BIN_DIR)
message(STATUS
"The mold linker could not find the directory containing the linker command "
"(typically "
"\"${MOLD_PREFIX}/libexec/mold/ld\") or "
"\"${MOLD_PREFIX}/lib/mold/ld\") using system linker.")
set(WITH_LINKER_MOLD OFF)
endif()
unset(MOLD_PREFIX)
endif()
if(WITH_LINKER_MOLD)
# GCC will search for `ld` in this directory first.
string(APPEND CMAKE_EXE_LINKER_FLAGS " -B \"${MOLD_BIN_DIR}\"")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -B \"${MOLD_BIN_DIR}\"")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " -B \"${MOLD_BIN_DIR}\"")
set(_IS_LINKER_DEFAULT OFF)
endif()
unset(MOLD_BIN)
unset(MOLD_BIN_DIR)
endif()
if(WITH_LINKER_GOLD AND _IS_LINKER_DEFAULT)
if(WITH_LINKER_GOLD)
execute_process(
COMMAND ${CMAKE_C_COMPILER} -fuse-ld=gold -Wl,--version
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
if("${LD_VERSION}" MATCHES "GNU gold")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -fuse-ld=gold")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -fuse-ld=gold")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " -fuse-ld=gold")
set(_IS_LINKER_DEFAULT OFF)
string(APPEND CMAKE_C_FLAGS " -fuse-ld=gold")
string(APPEND CMAKE_CXX_FLAGS " -fuse-ld=gold")
else()
message(STATUS "GNU gold linker isn't available, using the default system linker.")
endif()
unset(LD_VERSION)
endif()
if(WITH_LINKER_LLD AND _IS_LINKER_DEFAULT)
if(WITH_LINKER_LLD)
execute_process(
COMMAND ${CMAKE_C_COMPILER} -fuse-ld=lld -Wl,--version
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
if("${LD_VERSION}" MATCHES "LLD")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -fuse-ld=lld")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -fuse-ld=lld")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " -fuse-ld=lld")
set(_IS_LINKER_DEFAULT OFF)
string(APPEND CMAKE_C_FLAGS " -fuse-ld=lld")
string(APPEND CMAKE_CXX_FLAGS " -fuse-ld=lld")
else()
message(STATUS "LLD linker isn't available, using the default system linker.")
endif()
@@ -794,28 +694,6 @@ if(CMAKE_COMPILER_IS_GNUCC)
# CLang is the same as GCC for now.
elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
set(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing")
if(WITH_LINKER_MOLD AND _IS_LINKER_DEFAULT)
find_program(MOLD_BIN "mold")
mark_as_advanced(MOLD_BIN)
if(NOT MOLD_BIN)
message(STATUS "The \"mold\" binary could not be found, using system linker.")
set(WITH_LINKER_MOLD OFF)
else()
if(CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0)
string(APPEND CMAKE_EXE_LINKER_FLAGS " --ld-path=\"${MOLD_BIN}\"")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " --ld-path=\"${MOLD_BIN}\"")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " --ld-path=\"${MOLD_BIN}\"")
else()
string(APPEND CMAKE_EXE_LINKER_FLAGS " -fuse-ld=\"${MOLD_BIN}\"")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -fuse-ld=\"${MOLD_BIN}\"")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " -fuse-ld=\"${MOLD_BIN}\"")
endif()
set(_IS_LINKER_DEFAULT OFF)
endif()
unset(MOLD_BIN)
endif()
# Intel C++ Compiler
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
# think these next two are broken
@@ -839,8 +717,6 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
string(APPEND PLATFORM_LINKFLAGS " -static-intel")
endif()
unset(_IS_LINKER_DEFAULT)
# Avoid conflicts with Mesa llvmpipe, Luxrender, and other plug-ins that may
# use the same libraries as Blender with a different version or build options.
set(PLATFORM_LINKFLAGS

View File

@@ -27,7 +27,7 @@ if(NOT MSVC)
endif()
if(CMAKE_C_COMPILER_ID MATCHES "Clang")
set(MSVC_CLANG ON)
set(MSVC_CLANG On)
set(VC_TOOLS_DIR $ENV{VCToolsRedistDir} CACHE STRING "Location of the msvc redistributables")
set(MSVC_REDIST_DIR ${VC_TOOLS_DIR})
if(DEFINED MSVC_REDIST_DIR)
@@ -53,11 +53,7 @@ if(CMAKE_C_COMPILER_ID MATCHES "Clang")
endif()
if(WITH_WINDOWS_STRIPPED_PDB)
message(WARNING "stripped pdb not supported with clang, disabling..")
set(WITH_WINDOWS_STRIPPED_PDB OFF)
endif()
else()
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.28.29921) # MSVC 2019 16.9.16
message(FATAL_ERROR "Compiler is unsupported, MSVC 2019 16.9.16 or newer is required for building blender.")
set(WITH_WINDOWS_STRIPPED_PDB Off)
endif()
endif()
@@ -163,7 +159,7 @@ endif()
if(WITH_COMPILER_ASAN AND MSVC AND NOT MSVC_CLANG)
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 19.28.29828)
#set a flag so we don't have to do this comparison all the time
SET(MSVC_ASAN ON)
SET(MSVC_ASAN On)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /fsanitize=address")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /fsanitize=address")
string(APPEND CMAKE_EXE_LINKER_FLAGS_DEBUG " /INCREMENTAL:NO")
@@ -183,22 +179,22 @@ endif()
if(WITH_WINDOWS_SCCACHE AND CMAKE_VS_MSBUILD_COMMAND)
message(WARNING "Disabling sccache, sccache is not supported with msbuild")
set(WITH_WINDOWS_SCCACHE OFF)
set(WITH_WINDOWS_SCCACHE Off)
endif()
# Debug Symbol format
# sccache # MSVC_ASAN # format # why
# ON # ON # Z7 # sccache will only play nice with Z7
# ON # OFF # Z7 # sccache will only play nice with Z7
# OFF # ON # Zi # Asan will not play nice with Edit and Continue
# OFF # OFF # ZI # Neither asan nor sscache is enabled Edit and Continue is available
# On # On # Z7 # sccache will only play nice with Z7
# On # Off # Z7 # sccache will only play nice with Z7
# Off # On # Zi # Asan will not play nice with Edit and Continue
# Off # Off # ZI # Neither asan nor sscache is enabled Edit and Continue is available
# Release Symbol format
# sccache # MSVC_ASAN # format # why
# ON # ON # Z7 # sccache will only play nice with Z7
# ON # OFF # Z7 # sccache will only play nice with Z7
# OFF # ON # Zi # Asan will not play nice with Edit and Continue
# OFF # OFF # Zi # Edit and Continue disables some optimizations
# On # On # Z7 # sccache will only play nice with Z7
# On # Off # Z7 # sccache will only play nice with Z7
# Off # On # Zi # Asan will not play nice with Edit and Continue
# Off # Off # Zi # Edit and Continue disables some optimizations
if(WITH_WINDOWS_SCCACHE)
@@ -269,6 +265,12 @@ if(NOT DEFINED LIBDIR)
elseif(MSVC_VERSION GREATER 1919)
message(STATUS "Visual Studio 2019 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
elseif(MSVC_VERSION GREATER 1909)
message(STATUS "Visual Studio 2017 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
elseif(MSVC_VERSION EQUAL 1900)
message(STATUS "Visual Studio 2015 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
endif()
else()
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
@@ -286,7 +288,7 @@ if(CMAKE_GENERATOR MATCHES "^Visual Studio.+" AND # Only supported in the VS IDE
"EnableMicrosoftCodeAnalysis=false"
"EnableClangTidyCodeAnalysis=true"
)
set(VS_CLANG_TIDY ON)
set(VS_CLANG_TIDY On)
endif()
# Mark libdir as system headers with a lower warn level, to resolve some warnings
@@ -345,11 +347,7 @@ set(FREETYPE_INCLUDE_DIRS
${LIBDIR}/freetype/include
${LIBDIR}/freetype/include/freetype2
)
set(FREETYPE_LIBRARIES
${LIBDIR}/freetype/lib/freetype2ST.lib
${LIBDIR}/brotli/lib/brotlidec-static.lib
${LIBDIR}/brotli/lib/brotlicommon-static.lib
)
set(FREETYPE_LIBRARY ${LIBDIR}/freetype/lib/freetype2ST.lib)
windows_find_package(freetype REQUIRED)
if(WITH_FFTW3)
@@ -463,7 +461,7 @@ if(WITH_JACK)
endif()
if(WITH_PYTHON)
set(PYTHON_VERSION 3.10) # CACHE STRING)
set(PYTHON_VERSION 3.9) # CACHE STRING)
string(REPLACE "." "" _PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION})
set(PYTHON_LIBRARY ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/libs/python${_PYTHON_VERSION_NO_DOTS}.lib)
@@ -471,7 +469,7 @@ if(WITH_PYTHON)
set(PYTHON_INCLUDE_DIR ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/include)
set(PYTHON_NUMPY_INCLUDE_DIRS ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/lib/site-packages/numpy/core/include)
set(NUMPY_FOUND ON)
set(NUMPY_FOUND On)
unset(_PYTHON_VERSION_NO_DOTS)
# uncached vars
set(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}")
@@ -479,7 +477,7 @@ if(WITH_PYTHON)
endif()
if(WITH_BOOST)
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(boost_extra_libs wave)
endif()
if(WITH_INTERNATIONAL)
@@ -522,7 +520,7 @@ if(WITH_BOOST)
debug ${BOOST_LIBPATH}/libboost_thread-${BOOST_DEBUG_POSTFIX}
debug ${BOOST_LIBPATH}/libboost_chrono-${BOOST_DEBUG_POSTFIX}
)
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(BOOST_LIBRARIES ${BOOST_LIBRARIES}
optimized ${BOOST_LIBPATH}/libboost_wave-${BOOST_POSTFIX}
debug ${BOOST_LIBPATH}/libboost_wave-${BOOST_DEBUG_POSTFIX})
@@ -710,7 +708,7 @@ if(WITH_CODEC_SNDFILE)
set(LIBSNDFILE_LIBRARIES ${LIBSNDFILE_LIBPATH}/libsndfile-1.lib)
endif()
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
set(OSL_SHADER_DIR ${CYCLES_OSL}/shaders)
# Shaders have moved around a bit between OSL versions, check multiple locations
@@ -743,7 +741,7 @@ if(WITH_CYCLES AND WITH_CYCLES_OSL)
endif()
endif()
if(WITH_CYCLES AND WITH_CYCLES_EMBREE)
if(WITH_CYCLES_EMBREE)
windows_find_package(Embree)
if(NOT EMBREE_FOUND)
set(EMBREE_INCLUDE_DIRS ${LIBDIR}/embree/include)
@@ -855,18 +853,18 @@ if(WITH_GMP)
set(GMP_INCLUDE_DIRS ${LIBDIR}/gmp/include)
set(GMP_LIBRARIES ${LIBDIR}/gmp/lib/libgmp-10.lib optimized ${LIBDIR}/gmp/lib/libgmpxx.lib debug ${LIBDIR}/gmp/lib/libgmpxx_d.lib)
set(GMP_ROOT_DIR ${LIBDIR}/gmp)
set(GMP_FOUND ON)
set(GMP_FOUND On)
endif()
if(WITH_POTRACE)
set(POTRACE_INCLUDE_DIRS ${LIBDIR}/potrace/include)
set(POTRACE_LIBRARIES ${LIBDIR}/potrace/lib/potrace.lib)
set(POTRACE_FOUND ON)
set(POTRACE_FOUND On)
endif()
if(WITH_HARU)
if(EXISTS ${LIBDIR}/haru)
set(HARU_FOUND ON)
set(HARU_FOUND On)
set(HARU_ROOT_DIR ${LIBDIR}/haru)
set(HARU_INCLUDE_DIRS ${HARU_ROOT_DIR}/include)
set(HARU_LIBRARIES ${HARU_ROOT_DIR}/lib/libhpdfs.lib)

View File

@@ -27,7 +27,7 @@ if(WITH_WINDOWS_BUNDLE_CRT)
# Install the CRT to the blender.crt Sub folder.
install(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS} DESTINATION ./blender.crt COMPONENT Libraries)
# Generating the manifest is a relatively expensive operation since
# Generating the manifest is a relativly expensive operation since
# it is collecting an sha1 hash for every file required. so only do
# this work when the libs have either changed or the manifest does
# not exist yet.

View File

@@ -3,6 +3,9 @@ echo No explicit msvc version requested, autodetecting version.
call "%~dp0\detect_msvc2019.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete
call "%~dp0\detect_msvc2017.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete
call "%~dp0\detect_msvc2022.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete

View File

@@ -1,3 +1,4 @@
if "%BUILD_VS_YEAR%"=="2017" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2019" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2022" set BUILD_VS_LIBDIRPOST=vc15

View File

@@ -19,6 +19,12 @@ if "%WITH_PYDEBUG%"=="1" (
set PYDEBUG_CMAKE_ARGS=-DWINDOWS_PYTHON_DEBUG=On
)
if "%BUILD_VS_YEAR%"=="2017" (
set BUILD_GENERATOR_POST=%WINDOWS_ARCH%
) else (
set BUILD_PLATFORM_SELECT=-A %MSBUILD_PLATFORM%
)
set BUILD_CMAKE_ARGS=%BUILD_CMAKE_ARGS% -G "Visual Studio %BUILD_VS_VER% %BUILD_VS_YEAR%%BUILD_GENERATOR_POST%" %BUILD_PLATFORM_SELECT% %TESTS_CMAKE_ARGS% %CLANG_CMAKE_ARGS% %ASAN_CMAKE_ARGS% %PYDEBUG_CMAKE_ARGS%
if NOT EXIST %BUILD_DIR%\nul (

View File

@@ -37,9 +37,15 @@ set LLVM_DIR=
:DetectionComplete
set CC=%LLVM_DIR%\bin\clang-cl
set CXX=%LLVM_DIR%\bin\clang-cl
rem build and tested against 2019 16.2
set CFLAGS=-m64 -fmsc-version=1922
set CXXFLAGS=-m64 -fmsc-version=1922
if "%BUILD_VS_YEAR%" == "2019" (
rem build and tested against 2019 16.2
set CFLAGS=-m64 -fmsc-version=1922
set CXXFLAGS=-m64 -fmsc-version=1922
) else (
rem build and tested against 2017 15.7
set CFLAGS=-m64 -fmsc-version=1914
set CXXFLAGS=-m64 -fmsc-version=1914
)
)
if "%WITH_ASAN%"=="1" (

View File

@@ -0,0 +1,3 @@
set BUILD_VS_VER=15
set BUILD_VS_YEAR=2017
call "%~dp0\detect_msvc_vswhere.cmd"

View File

@@ -3,32 +3,7 @@ for %%X in (svn.exe) do (set SVN=%%~$PATH:X)
for %%X in (cmake.exe) do (set CMAKE=%%~$PATH:X)
for %%X in (ctest.exe) do (set CTEST=%%~$PATH:X)
for %%X in (git.exe) do (set GIT=%%~$PATH:X)
REM For python, default on 39 but if that does not exist also check
REM the 310,311 and 312 folders to see if those are there, it checks
REM this far ahead to ensure good lib folder compatiblity in the future.
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\39\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\310\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\311\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\312\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
if NOT EXIST %PYTHON% (
echo Warning: Python not found, there is likely an issue with the library folder
set PYTHON=""
)
:detect_python_done
if NOT "%verbose%" == "" (
echo svn : "%SVN%"
echo cmake : "%CMAKE%"
@@ -36,3 +11,7 @@ if NOT "%verbose%" == "" (
echo git : "%GIT%"
echo python : "%PYTHON%"
)
if "%CMAKE%" == "" (
echo Cmake not found in path, required for building, exiting...
exit /b 1
)

View File

@@ -9,11 +9,17 @@ exit /b 1
:detect_done
echo found clang-format in %CF_PATH%
if NOT EXIST %PYTHON% (
echo python not found, required for this operation
exit /b 1
if EXIST %PYTHON% (
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\39\bin\python.exe
goto detect_python_done
)
echo python not found in lib folder
exit /b 1
:detect_python_done
echo found python (%PYTHON%)
set FORMAT_PATHS=%BLENDER_DIR%\source\tools\utils_maintenance\clang_format_paths.py
REM The formatting script expects clang-format to be in the current PATH.

View File

@@ -1,8 +1,18 @@
if NOT EXIST %PYTHON% (
echo python not found, required for this operation
exit /b 1
if EXIST "%PYTHON%" (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\39\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
echo python not found at %PYTHON%
exit /b 1
:detect_python_done
echo found python (%PYTHON%)
call "%~dp0\find_inkscape.cmd"
if EXIST "%INKSCAPE_BIN%" (

View File

@@ -1,8 +1,18 @@
if NOT EXIST %PYTHON% (
echo python not found, required for this operation
exit /b 1
if EXIST %PYTHON% (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\39\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
echo python not found at %PYTHON%
exit /b 1
:detect_python_done
echo found python (%PYTHON%)
call "%~dp0\find_blender.cmd"
if EXIST "%BLENDER_BIN%" (

View File

@@ -50,6 +50,14 @@ if NOT "%1" == "" (
goto ERR
) else if "%1" == "x64" (
set BUILD_ARCH=x64
) else if "%1" == "2017" (
set BUILD_VS_YEAR=2017
) else if "%1" == "2017pre" (
set BUILD_VS_YEAR=2017
set VSWHERE_ARGS=-prerelease
) else if "%1" == "2017b" (
set BUILD_VS_YEAR=2017
set VSWHERE_ARGS=-products Microsoft.VisualStudio.Product.BuildTools
) else if "%1" == "2019" (
set BUILD_VS_YEAR=2019
) else if "%1" == "2019pre" (

View File

@@ -24,12 +24,12 @@ echo - nobuildinfo ^(disable buildinfo^)
echo - debug ^(Build an unoptimized debuggable build^)
echo - packagename [newname] ^(override default cpack package name^)
echo - builddir [newdir] ^(override default build folder^)
echo - 2017 ^(build with visual studio 2017^)
echo - 2017pre ^(build with visual studio 2017 pre-release^)
echo - 2017b ^(build with visual studio 2017 Build Tools^)
echo - 2019 ^(build with visual studio 2019^)
echo - 2019pre ^(build with visual studio 2019 pre-release^)
echo - 2019b ^(build with visual studio 2019 Build Tools^)
echo - 2022 ^(build with visual studio 2022^)
echo - 2022pre ^(build with visual studio 2022 pre-release^)
echo - 2022b ^(build with visual studio 2022 Build Tools^)
echo.
echo Documentation Targets ^(Not associated with building^)

View File

@@ -1,3 +1,4 @@
if "%BUILD_VS_YEAR%"=="2017" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2019" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2022" set BUILD_VS_LIBDIRPOST=vc15

View File

@@ -1,7 +1,10 @@
if NOT EXIST %PYTHON% (
echo python not found, required for this operation
exit /b 1
if EXIST %PYTHON% (
goto detect_python_done
)
echo python not found in lib folder
exit /b 1
:detect_python_done
REM Use -B to avoid writing __pycache__ in lib directory and causing update conflicts.

View File

@@ -38,7 +38,7 @@ PROJECT_NAME = Blender
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = V3.2
PROJECT_NUMBER = V3.1
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a

View File

@@ -6,87 +6,91 @@
* as part of the normal development process.
*/
/* TODO: other modules.
* - `libmv`
* - `cycles`
* - `opencolorio`
* - `opensubdiv`
* - `openvdb`
* - `quadriflow`
/** \defgroup MEM Guarded memory (de)allocation
* \ingroup intern
*/
/** \defgroup intern_atomic Atomic Operations
* \ingroup intern */
/** \defgroup clog C-Logging (CLOG)
* \ingroup intern
*/
/** \defgroup intern_clog C-Logging (CLOG)
* \ingroup intern */
/** \defgroup ctr container
* \ingroup intern
*/
/** \defgroup intern_eigen Eigen
* \ingroup intern */
/** \defgroup iksolver iksolver
* \ingroup intern
*/
/** \defgroup intern_glew-mx GLEW with Multiple Rendering Context's
* \ingroup intern */
/** \defgroup itasc itasc
* \ingroup intern
*/
/** \defgroup intern_iksolver Inverse Kinematics (Solver)
* \ingroup intern */
/** \defgroup memutil memutil
* \ingroup intern
*/
/** \defgroup intern_itasc Inverse Kinematics (ITASC)
* \ingroup intern */
/** \defgroup mikktspace mikktspace
* \ingroup intern
*/
/** \defgroup intern_libc_compat libc Compatibility For Linux
* \ingroup intern */
/** \defgroup moto moto
* \ingroup intern
*/
/** \defgroup intern_locale Locale
* \ingroup intern */
/** \defgroup eigen eigen
* \ingroup intern
*/
/** \defgroup intern_mantaflow Manta-Flow Fluid Simulation
* \ingroup intern */
/** \defgroup smoke smoke
* \ingroup intern
*/
/** \defgroup intern_mem Guarded Memory (de)allocation
* \ingroup intern */
/** \defgroup intern_memutil Memory Utilities (memutil)
* \ingroup intern */
/** \defgroup intern_mikktspace MikktSpace
* \ingroup intern */
/** \defgroup intern_rigidbody Rigid-Body C-API
* \ingroup intern */
/** \defgroup intern_sky_model Sky Model
* \ingroup intern */
/** \defgroup intern_utf_conv UTF-8/16 Conversion (utfconv)
* \ingroup intern */
/** \defgroup string string
* \ingroup intern
*/
/** \defgroup audaspace Audaspace
* \ingroup intern undoc
* \todo add to doxygen */
* \todo add to doxygen
*/
/** \defgroup audcoreaudio Audaspace CoreAudio
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audfx Audaspace FX
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audopenal Audaspace OpenAL
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audpulseaudio Audaspace PulseAudio
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audwasapi Audaspace WASAPI
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audpython Audaspace Python
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audsdl Audaspace SDL
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audsrc Audaspace SRC
* \ingroup audaspace */
*
* \ingroup audaspace
*/
/** \defgroup audffmpeg Audaspace FFMpeg
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audfftw Audaspace FFTW
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audjack Audaspace Jack
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audsndfile Audaspace sndfile
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup GHOST GHOST API
* \ingroup intern GUI

View File

@@ -5,8 +5,7 @@
/** \defgroup bmesh BMesh
* \ingroup blender
*/
/** \defgroup compositor Compositing
* \ingroup blender */
/** \defgroup compositor Compositing */
/** \defgroup python Python
* \ingroup blender
@@ -79,8 +78,7 @@
* \ingroup blender
*/
/** \defgroup data DNA, RNA and .blend access
* \ingroup blender */
/** \defgroup data DNA, RNA and .blend access*/
/** \defgroup gpu GPU
* \ingroup blender
@@ -103,12 +101,11 @@
* merged in docs.
*/
/**
* \defgroup gui GUI
* \ingroup blender */
/** \defgroup gui GUI */
/** \defgroup wm Window Manager
* \ingroup gui */
* \ingroup blender gui
*/
/* ================================ */
@@ -282,8 +279,7 @@
* \ingroup gui
*/
/** \defgroup externformats External Formats
* \ingroup blender */
/** \defgroup externformats External Formats */
/** \defgroup collada COLLADA
* \ingroup externformats
@@ -312,7 +308,4 @@
/* ================================ */
/** \defgroup undoc Undocumented
*
* \brief Modules and libraries that are still undocumented,
* or lacking proper integration into the doxygen system, are marked in this group.
*/
* \brief Modules and libraries that are still undocumented, or lacking proper integration into the doxygen system, are marked in this group. */

View File

@@ -61,7 +61,7 @@ def blender_extract_info(blender_bin: str) -> Dict[str, str]:
stdout=subprocess.PIPE,
).stdout.decode(encoding="utf-8")
blender_version_output = subprocess.run(
blender_version_ouput = subprocess.run(
[blender_bin, "--version"],
env=blender_env,
check=True,
@@ -73,7 +73,7 @@ def blender_extract_info(blender_bin: str) -> Dict[str, str]:
# check for each lines prefix to ensure these aren't included.
blender_version = ""
blender_date = ""
for l in blender_version_output.split("\n"):
for l in blender_version_ouput.split("\n"):
if l.startswith("Blender "):
# Remove 'Blender' prefix.
blender_version = l.split(" ", 1)[1].strip()

View File

@@ -11,7 +11,7 @@ import queue
execution_queue = queue.Queue()
# This function can safely be called in another thread.
# This function can savely be called in another thread.
# The function will be executed when the timer runs the next time.
def run_in_main_thread(function):
execution_queue.put(function)

View File

@@ -8,42 +8,27 @@ def set_pose_matrices(obj, matrix_map):
"Assign pose space matrices of all bones at once, ignoring constraints."
def rec(pbone, parent_matrix):
if pbone.name in matrix_map:
matrix = matrix_map[pbone.name]
matrix = matrix_map[pbone.name]
## Instead of:
# pbone.matrix = matrix
# bpy.context.view_layer.update()
## Instead of:
# pbone.matrix = matrix
# bpy.context.view_layer.update()
# Compute and assign local matrix, using the new parent matrix
if pbone.parent:
pbone.matrix_basis = pbone.bone.convert_local_to_pose(
matrix,
pbone.bone.matrix_local,
parent_matrix=parent_matrix,
parent_matrix_local=pbone.parent.bone.matrix_local,
invert=True
)
else:
pbone.matrix_basis = pbone.bone.convert_local_to_pose(
matrix,
pbone.bone.matrix_local,
invert=True
)
# Compute and assign local matrix, using the new parent matrix
if pbone.parent:
pbone.matrix_basis = pbone.bone.convert_local_to_pose(
matrix,
pbone.bone.matrix_local,
parent_matrix=parent_matrix,
parent_matrix_local=pbone.parent.bone.matrix_local,
invert=True
)
else:
# Compute the updated pose matrix from local and new parent matrix
if pbone.parent:
matrix = pbone.bone.convert_local_to_pose(
pbone.matrix_basis,
pbone.bone.matrix_local,
parent_matrix=parent_matrix,
parent_matrix_local=pbone.parent.bone.matrix_local,
)
else:
matrix = pbone.bone.convert_local_to_pose(
pbone.matrix_basis,
pbone.bone.matrix_local,
)
pbone.matrix_basis = pbone.bone.convert_local_to_pose(
matrix,
pbone.bone.matrix_local,
invert=True
)
# Recursively process children, passing the new matrix through
for child in pbone.children:

View File

@@ -42,13 +42,8 @@ class SimpleMouseOperator(bpy.types.Operator):
self.y = event.mouse_y
return self.execute(context)
# Only needed if you want to add into a dynamic menu
def menu_func(self, context):
self.layout.operator(SimpleMouseOperator.bl_idname, text="Simple Mouse Operator")
# Register and add to the view menu (required to also use F3 search "Simple Mouse Operator" for quick access)
bpy.utils.register_class(SimpleMouseOperator)
bpy.types.VIEW3D_MT_view.append(menu_func)
# Test call to the newly defined operator.
# Here we call the operator and invoke it, meaning that the settings are taken

View File

@@ -43,7 +43,7 @@ def menu_func(self, context):
self.layout.operator(ExportSomeData.bl_idname, text="Text Export Operator")
# Register and add to the file selector (required to also use F3 search "Text Export Operator" for quick access)
# Register and add to the file selector
bpy.utils.register_class(ExportSomeData)
bpy.types.TOPBAR_MT_file_export.append(menu_func)

View File

@@ -27,14 +27,8 @@ class DialogOperator(bpy.types.Operator):
wm = context.window_manager
return wm.invoke_props_dialog(self)
# Only needed if you want to add into a dynamic menu
def menu_func(self, context):
self.layout.operator(DialogOperator.bl_idname, text="Dialog Operator")
# Register and add to the object menu (required to also use F3 search "Dialog Operator" for quick access)
bpy.utils.register_class(DialogOperator)
bpy.types.VIEW3D_MT_object.append(menu_func)
# Test call.
bpy.ops.object.dialog_operator('INVOKE_DEFAULT')

View File

@@ -41,13 +41,8 @@ class CustomDrawOperator(bpy.types.Operator):
col.prop(self, "my_string")
# Only needed if you want to add into a dynamic menu
def menu_func(self, context):
self.layout.operator(CustomDrawOperator.bl_idname, text="Custom Draw Operator")
# Register and add to the object menu (required to also use F3 search "Custom Draw Operator" for quick access)
bpy.utils.register_class(CustomDrawOperator)
bpy.types.VIEW3D_MT_object.append(menu_func)
# test call
bpy.ops.object.custom_draw('INVOKE_DEFAULT')

View File

@@ -55,13 +55,8 @@ class ModalOperator(bpy.types.Operator):
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
# Only needed if you want to add into a dynamic menu
def menu_func(self, context):
self.layout.operator(ModalOperator.bl_idname, text="Modal Operator")
# Register and add to the object menu (required to also use F3 search "Modal Operator" for quick access)
bpy.utils.register_class(ModalOperator)
bpy.types.VIEW3D_MT_object.append(menu_func)
# test call
bpy.ops.object.modal_operator('INVOKE_DEFAULT')

View File

@@ -31,13 +31,8 @@ class SearchEnumOperator(bpy.types.Operator):
context.window_manager.invoke_search_popup(self)
return {'RUNNING_MODAL'}
# Only needed if you want to add into a dynamic menu
def menu_func(self, context):
self.layout.operator(SearchEnumOperator.bl_idname, text="Search Enum Operator")
# Register and add to the object menu (required to also use F3 search "Search Enum Operator" for quick access)
bpy.utils.register_class(SearchEnumOperator)
bpy.types.VIEW3D_MT_object.append(menu_func)
# test call
bpy.ops.object.search_enum_operator('INVOKE_DEFAULT')

View File

@@ -22,13 +22,8 @@ class HelloWorldOperator(bpy.types.Operator):
print("Hello World")
return {'FINISHED'}
# Only needed if you want to add into a dynamic menu
def menu_func(self, context):
self.layout.operator(HelloWorldOperator.bl_idname, text="Hello World Operator")
# Register and add to the view menu (required to also use F3 search "Hello World Operator" for quick access)
bpy.utils.register_class(HelloWorldOperator)
bpy.types.VIEW3D_MT_view.append(menu_func)
# test call to the newly defined operator
bpy.ops.wm.hello_world()

View File

@@ -106,6 +106,24 @@ including advanced features.
floating-point values. These values are interpreted as a plane equation.
.. function:: glColor (red, green, blue, alpha):
B{glColor3b, glColor3d, glColor3f, glColor3i, glColor3s, glColor3ub, glColor3ui, glColor3us,
glColor4b, glColor4d, glColor4f, glColor4i, glColor4s, glColor4ub, glColor4ui, glColor4us,
glColor3bv, glColor3dv, glColor3fv, glColor3iv, glColor3sv, glColor3ubv, glColor3uiv,
glColor3usv, glColor4bv, glColor4dv, glColor4fv, glColor4iv, glColor4sv, glColor4ubv,
glColor4uiv, glColor4usv}
Set a new color.
.. seealso:: `OpenGL Docs <https://khronos.org/registry/OpenGL-Refpages/gl4/html/glColor.xhtml>`__
:type red, green, blue, alpha: Depends on function prototype.
:arg red, green, blue: Specify new red, green, and blue values for the current color.
:arg alpha: Specifies a new alpha value for the current color. Included only in the
four-argument glColor4 commands. (With '4' colors only)
.. function:: glColorMask(red, green, blue, alpha):
Enable and disable writing of frame buffer color components

View File

@@ -728,7 +728,7 @@ Abusing RNA property callbacks
------------------------------
Python-defined RNA properties can have custom callbacks. Trying to perform complex operations
from there, like calling an operator, may work, but is not officially recommended nor supported.
from there, like calling an operator, may work, but is not officialy recommended nor supported.
Main reason is that those callback should be very fast, but additionally, it may for example
create issues with undo/redo system (most operators store an history step, and editing an RNA
@@ -743,7 +743,7 @@ will re-allocate objects data,
any references to a meshes vertices/polygons/UVs, armatures bones,
curves points, etc. cannot be accessed after switching mode.
Only the reference to the data itself can be re-accessed, the following example will crash.
Only the reference to the data its self can be re-accessed, the following example will crash.
.. code-block:: python

View File

@@ -417,8 +417,7 @@ MODULE_GROUPING = {
BLENDER_REVISION = str(bpy.app.build_hash, 'utf_8')
# '2.83.0 Beta' or '2.83.0' or '2.83.1'
BLENDER_VERSION_STRING = bpy.app.version_string
BLENDER_VERSION_DOTS = "%d.%d" % (bpy.app.version[0], bpy.app.version[1])
BLENDER_VERSION_DOTS = bpy.app.version_string
if BLENDER_REVISION != "Unknown":
# SHA1 Git hash
@@ -1104,7 +1103,6 @@ context_type_map = {
"selectable_objects": ("Object", True),
"selected_asset_files": ("FileSelectEntry", True),
"selected_bones": ("EditBone", True),
"selected_editable_actions": ("Action", True),
"selected_editable_bones": ("EditBone", True),
"selected_editable_fcurves": ("FCurve", True),
"selected_editable_keyframes": ("Keyframe", True),
@@ -1120,13 +1118,12 @@ context_type_map = {
"selected_pose_bones": ("PoseBone", True),
"selected_pose_bones_from_active_object": ("PoseBone", True),
"selected_sequences": ("Sequence", True),
"selected_visible_actions": ("Action", True),
"selected_visible_fcurves": ("FCurve", True),
"sequences": ("Sequence", True),
"soft_body": ("SoftBodyModifier", False),
"speaker": ("Speaker", False),
"texture": ("Texture", False),
"texture_slot": ("TextureSlot", False),
"texture_slot": ("MaterialTextureSlot", False),
"texture_user": ("ID", False),
"texture_user_property": ("Property", False),
"ui_list": ("UIList", False),
@@ -1725,11 +1722,11 @@ def write_sphinx_conf_py(basepath):
fw("import sys, os\n\n")
fw("extensions = ['sphinx.ext.intersphinx']\n\n")
fw("intersphinx_mapping = {'blender_manual': ('https://docs.blender.org/manual/en/dev/', None)}\n\n")
fw("project = 'Blender %s Python API'\n" % BLENDER_VERSION_STRING)
fw("project = 'Blender %s Python API'\n" % BLENDER_VERSION_DOTS)
fw("master_doc = 'index'\n")
fw("copyright = u'Blender Foundation'\n")
fw("version = '%s'\n" % BLENDER_VERSION_DOTS)
fw("release = '%s'\n" % BLENDER_VERSION_DOTS)
fw("version = '%s'\n" % BLENDER_VERSION_HASH)
fw("release = '%s'\n" % BLENDER_VERSION_HASH)
# Quiet file not in table-of-contents warnings.
fw("exclude_patterns = [\n")
@@ -1750,7 +1747,6 @@ except ModuleNotFoundError:
fw("if html_theme == 'sphinx_rtd_theme':\n")
fw(" html_theme_options = {\n")
fw(" 'display_version': False,\n")
# fw(" 'analytics_id': '',\n")
# fw(" 'collapse_navigation': True,\n")
fw(" 'sticky_navigation': False,\n")
@@ -1764,18 +1760,12 @@ except ModuleNotFoundError:
fw("html_show_sphinx = False\n")
fw("html_baseurl = 'https://docs.blender.org/api/current/'\n")
fw("html_use_opensearch = 'https://docs.blender.org/api/current'\n")
fw("html_show_search_summary = True\n")
fw("html_split_index = True\n")
fw("html_static_path = ['static']\n")
fw("templates_path = ['templates']\n")
fw("html_context = {'commit': '%s'}\n" % BLENDER_VERSION_HASH)
fw("html_extra_path = ['static/favicon.ico', 'static/blender_logo.svg']\n")
fw("html_favicon = 'static/favicon.ico'\n")
fw("html_logo = 'static/blender_logo.svg'\n")
fw("html_last_updated_fmt = '%m/%d/%Y'\n\n")
fw("if html_theme == 'sphinx_rtd_theme':\n")
fw(" html_css_files = ['css/version_switch.css']\n")
fw(" html_js_files = ['js/version_switch.js']\n")
# needed for latex, pdf gen
fw("latex_elements = {\n")
@@ -2132,9 +2122,6 @@ def copy_theme_assets(basepath):
shutil.copytree(os.path.join(SCRIPT_DIR, "static"),
os.path.join(basepath, "static"),
copy_function=shutil.copy)
shutil.copytree(os.path.join(SCRIPT_DIR, "templates"),
os.path.join(basepath, "templates"),
copy_function=shutil.copy)
def rna2sphinx(basepath):
@@ -2267,7 +2254,7 @@ def main():
# First monkey patch to load in fake members.
setup_monkey_patch()
# Perform changes to Blender itself.
# Perform changes to Blender it's self.
setup_data = setup_blender()
# eventually, create the dirs

View File

@@ -1,127 +0,0 @@
/* Override RTD theme */
.rst-versions {
border-top: 0px;
overflow: visible;
}
.version-btn.vdeact {
cursor: default;
color: dimgray;
}
.version-btn.vdeact::after {
content: "";
}
#versionwrap {
display: flex;
padding-top: 2px;
font-size: 90%;
justify-content: center;
flex-wrap: wrap;
}
.version-btn {
display: inline-block;
background-color: #272525;
width: 140px;
text-align: center;
padding: 3px 10px;
margin: 0px 5px 4px;
vertical-align: middle;
color: #27AE60;
border: solid 1px #444444;
border-radius: 3px;
cursor: pointer;
z-index: 400;
transition: border-color 0.4s;
}
.version-btn::after {
content:"\f0d8";
display: inline;
font: normal normal normal 16px/1 FontAwesome;
color: #8d8c8c;
vertical-align: top;
padding-left: 0.5em;
}
.version-btn-open::after {
color: gray;
}
.version-btn:hover, .version-btn:focus {
border-color: #525252;
}
.version-btn-open {
color: gray;
border: solid 1px gray;
}
.version-btn.wait {
cursor: wait;
}
.version-btn.disabled {
cursor: not-allowed;
color: dimgray;
}
.version-dialog {
display: none;
position: absolute;
bottom: 28px;
width: 140px;
margin: 0 5px;
padding-bottom: 4px;
background-color: #0003;
border-radius: 3px;
box-shadow: 0 0 6px #000C;
z-index: 999;
max-height: calc(100vh - 30px);
overflow-y: auto;
cursor: default;
}
.version-title {
padding: 5px;
color: black;
text-align: center;
font-size: 102%;
background-color: #27ae60;
border-bottom: solid 1.5px #444;
}
.version-list {
margin-bottom: 4px;
text-align: center;
background-color: #000C;
border: solid 1px gray;
border-radius: 0px 0px 3px 3px;
}
.version-list a, .version-list span, .version-list li {
position: relative;
display: block;
font-size: 98%;
line-height: 1.15;
width: 100%;
margin: 0;
padding: 4px 0px;
color: #404040;
}
.version-list li {
background-color: #ede9e9;
color: #404040;
padding: 1px;
}
.version-list li:hover, .version-list li a:focus {
background-color: #b9cfda;
}
.version-list li.selected, .version-list li.selected:hover {
background-color: #8d8c8c;
}
.version-list li.selected span {
cursor: default;
outline-color: red;
}
.version-arrow {
position: absolute;
width: 8px;
height: 8px;
left: 50%;
bottom: 4px;
margin-left: -4px;
transform: rotate(225deg);
background: #ede9e9;
border: 1px solid gray;
border-width: 1px 0 0 1px;
}

View File

@@ -1,323 +0,0 @@
(function() { // switch: v1.2
"use strict";
var versionsFileUrl = "https://docs.blender.org/PROD/versions.json"
var all_versions;
var Popover = function() {
function Popover(id)
{
this.isOpen = false;
this.type = (id === "version-popover");
this.$btn = $('#' + id);
this.$dialog = this.$btn.next();
this.$list = this.$dialog.children("ul");
this.sel = null;
this.beforeInit();
}
Popover.prototype = {
beforeInit : function() {
var that = this;
this.$btn.on("click", function(e) {
that.init();
e.preventDefault();
e.stopPropagation();
});
this.$btn.on("keydown", function(e) {
if (that.btnKeyFilter(e)) {
that.init();
e.preventDefault();
e.stopPropagation();
}
});
},
init : function() {
this.$btn.off("click");
this.$btn.off("keydown");
if (all_versions === undefined) {
this.$btn.addClass("wait");
this.loadVL(this);
}
else {
this.afterLoad();
}
},
loadVL : function(that) {
$.getJSON(versionsFileUrl, function(data) {
all_versions = data;
that.afterLoad();
return true;
}).fail(function() {
console.log("Version Switch Error: versions.json could not be loaded.");
that.$btn.addClass("disabled");
return false;
});
},
afterLoad : function() {
var release = DOCUMENTATION_OPTIONS.VERSION;
const m = release.match(/\d\.\d+/g);
if (m) {
release = m[0];
}
this.warnOld(release, all_versions);
var version = this.getNamed(release);
var list = this.buildList(version);
this.$list.children(":first-child").remove();
this.$list.append(list);
var that = this;
this.$list.on("keydown", function(e) {
that.keyMove(e);
});
this.$btn.removeClass("wait");
this.btnOpenHandler();
this.$btn.on("mousedown", function(e) {
that.btnOpenHandler();
e.preventDefault()
});
this.$btn.on("keydown", function(e) {
if (that.btnKeyFilter(e)) {
that.btnOpenHandler();
}
});
},
warnOld : function(release, all_versions) {
// Note this is effectively disabled now, two issues must fixed:
// * versions.js does not contain a current entry, because that leads to
// duplicate version numbers in the menu. These need to be deduplicated.
// * It only shows the warning after opening the menu to switch version
// when versions.js is loaded. This is too late to be useful.
var current = all_versions.current
if (!current)
{
// console.log("Version Switch Error: no 'current' in version.json.");
return;
}
const m = current.match(/\d\.\d+/g);
if (m) {
current = parseFloat(m[0]);
}
if (release < current) {
var currentURL = window.location.pathname.replace(release, current);
var warning = $('<div class="admonition warning"> ' +
'<p class="first admonition-title">Note</p> ' +
'<p class="last"> ' +
'You are not using the most up to date version of the documentation. ' +
'<a href="#"></a> is the newest version.' +
'</p>' +
'</div>');
warning.find('a').attr('href', currentURL).text(current);
var body = $("div.body");
if (!body.length) {
body = $("div.document");
}
body.prepend(warning);
}
},
buildList : function(v) {
var url = new URL(window.location.href);
let pathSplit = [ "", "api", v ];
if (url.pathname.startsWith("/api/")) {
pathSplit.push(url.pathname.split('/').slice(3).join('/'));
}
else {
pathSplit.push(url.pathname.substring(1));
}
if (this.type) {
var dyn = all_versions;
var cur = v;
}
var buf = [];
var that = this;
$.each(dyn, function(ix, title) {
buf.push("<li");
if (ix === cur) {
buf.push(
' class="selected" tabindex="-1" role="presentation"><span tabindex="-1" role="menuitem" aria-current="page">' +
title + '</spanp></li>');
}
else {
pathSplit[2 + that.type] = ix;
var href = new URL(url);
href.pathname = pathSplit.join('/');
buf.push(' tabindex="-1" role="presentation"><a href ="' + href + '" tabindex="-1">' +
title + '</a></li>');
}
});
return buf.join('');
},
getNamed : function(v) {
$.each(all_versions, function(ix, title) {
if (ix === "master" || ix === "latest") {
var m = title.match(/\d\.\d[\w\d\.]*/)[0];
if (parseFloat(m) == v) {
v = ix;
return false;
}
}
});
return v;
},
dialogToggle : function(speed) {
var wasClose = !this.isOpen;
var that = this;
if (!this.isOpen) {
this.$btn.addClass("version-btn-open");
this.$btn.attr("aria-pressed", true);
this.$dialog.attr("aria-hidden", false);
this.$dialog.fadeIn(speed, function() {
that.$btn.parent().on("focusout", function(e) {
that.focusoutHandler();
e.stopImmediatePropagation();
})
that.$btn.parent().on("mouseleave", function(e) {
that.mouseoutHandler();
e.stopImmediatePropagation();
});
});
this.isOpen = true;
}
else {
this.$btn.removeClass("version-btn-open");
this.$btn.attr("aria-pressed", false);
this.$dialog.attr("aria-hidden", true);
this.$btn.parent().off("focusout");
this.$btn.parent().off("mouseleave");
this.$dialog.fadeOut(speed, function() {
if (this.$sel) {
this.$sel.attr("tabindex", -1);
}
that.$btn.attr("tabindex", 0);
if (document.activeElement !== null && document.activeElement !== document &&
document.activeElement !== document.body) {
that.$btn.focus();
}
});
this.isOpen = false;
}
if (wasClose) {
if (this.$sel) {
this.$sel.attr("tabindex", -1);
}
if (document.activeElement !== null && document.activeElement !== document &&
document.activeElement !== document.body) {
var $nw = this.listEnter();
$nw.attr("tabindex", 0);
$nw.focus();
this.$sel = $nw;
}
}
},
btnOpenHandler : function() {
this.dialogToggle(300);
},
focusoutHandler : function() {
var list = this.$list;
var that = this;
setTimeout(function() {
if (list.find(":focus").length === 0) {
that.dialogToggle(200);
}
}, 200);
},
mouseoutHandler : function() {
this.dialogToggle(200);
},
btnKeyFilter : function(e) {
if (e.ctrlKey || e.shiftKey) {
return false;
}
if (e.key === " " || e.key === "Enter" || (e.key === "ArrowDown" && e.altKey) ||
e.key === "ArrowDown" || e.key === "ArrowUp") {
return true;
}
return false;
},
keyMove : function(e) {
if (e.ctrlKey || e.shiftKey) {
return true;
}
var p = true;
var $nw = $(e.target);
switch (e.key) {
case "ArrowUp":
$nw = this.listPrev($nw);
break;
case "ArrowDown":
$nw = this.listNext($nw);
break;
case "Home":
$nw = this.listFirst();
break;
case "End":
$nw = this.listLast();
break;
case "Escape":
$nw = this.listExit();
break;
case "ArrowLeft":
$nw = this.listExit();
break;
case "ArrowRight":
$nw = this.listExit();
break;
default:
p = false;
}
if (p) {
$nw.attr("tabindex", 0);
$nw.focus();
if (this.$sel) {
this.$sel.attr("tabindex", -1);
}
this.$sel = $nw;
e.preventDefault();
e.stopPropagation();
}
},
listPrev : function($nw) {
if ($nw.parent().prev().length !== 0) {
return $nw.parent().prev().children(":first-child");
}
else {
return this.listLast();
}
},
listNext : function($nw) {
if ($nw.parent().next().length !== 0) {
return $nw.parent().next().children(":first-child");
}
else {
return this.listFirst();
}
},
listFirst : function() {
return this.$list.children(":first-child").children(":first-child");
},
listLast : function() {
return this.$list.children(":last-child").children(":first-child");
},
listExit : function() {
this.mouseoutHandler();
return this.$btn;
},
listEnter : function() {
return this.$list.children(":first-child").children(":first-child");
}
};
return Popover
}();
$(document).ready(function() {
var lng_popover = new Popover("version-popover");
});
})();

View File

@@ -1,17 +0,0 @@
<div class="rst-versions" data-toggle="rst-versions" role="note" aria-label="document versions">
<ul id="versionwrap" role="presentation">
<li role="presentation">
<span id="version-popover" class="version-btn" tabindex="0" role="button" aria-label="versions selector" aria-haspopup="true" aria-controls="version-vsnlist" aria-disabled="true">
{{ release }}
</span>
<div class="version-dialog" aria-hidden="true">
<div class="version-arrow" aria-hidden="true"></div>
<div class="version-title">Versions</div>
<ul id="version-vsnlist" class="version-list" role="menu" aria-labelledby="version-popover" aria-hidden="true">
<li role="presentation">Loading...</li>
</ul>
</div>
</li>
</ul>
</div>

View File

@@ -113,6 +113,6 @@ if(WITH_MOD_FLUID)
add_subdirectory(mantaflow)
endif()
if(WITH_COMPOSITOR)
if (WITH_COMPOSITOR)
add_subdirectory(smaa_areatex)
endif()

View File

@@ -1092,12 +1092,12 @@ if(WITH_PYTHON)
configure_file(${PYTHON_SOURCE_DIRECTORY}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py ESCAPE_QUOTES @ONLY)
if(APPLE)
add_custom_command(OUTPUT build COMMAND MACOSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET} ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR} setup.py)
add_custom_command(OUTPUT build COMMAND MACOSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET} ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR})
elseif(WIN32)
set(ENV{VS100COMNTOOLS} $ENV{VS120COMNTOOLS})
add_custom_command(OUTPUT build COMMAND ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR} setup.py)
add_custom_command(OUTPUT build COMMAND ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR})
else()
add_custom_command(OUTPUT build COMMAND ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR} setup.py)
add_custom_command(OUTPUT build COMMAND ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR})
endif()
add_custom_target(pythonmodule ALL DEPENDS build SOURCES ${PYTHON_SOURCE_DIRECTORY}/setup.py.in ${PYTHON_SRC} ${PYTHON_HDR})
add_dependencies(pythonmodule audaspace)

View File

@@ -8,20 +8,20 @@ import numpy
from distutils.core import setup, Extension
if len(sys.argv) > 2 and sys.argv[1] == '--build-docs':
import subprocess
from distutils.core import Distribution
from distutils.command.build import build
import subprocess
from distutils.core import Distribution
from distutils.command.build import build
dist = Distribution()
cmd = build(dist)
cmd.finalize_options()
#print(cmd.build_platlib)
dist = Distribution()
cmd = build(dist)
cmd.finalize_options()
#print(cmd.build_platlib)
os.environ['PYTHONPATH'] = os.path.join(os.getcwd(), cmd.build_platlib)
os.environ['LD_LIBRARY_PATH'] = os.getcwd()
os.environ['PYTHONPATH'] = os.path.join(os.getcwd(), cmd.build_platlib)
os.environ['LD_LIBRARY_PATH'] = os.getcwd()
ret = subprocess.call(sys.argv[2:])
sys.exit(ret)
ret = subprocess.call(sys.argv[2:])
sys.exit(ret)
# the following line is not working due to https://bugs.python.org/issue9023
@@ -43,8 +43,7 @@ audaspace = Extension(
library_dirs = ['.', 'Release', 'Debug'],
language = 'c++',
extra_compile_args = extra_args,
define_macros = [('WITH_CONVOLUTION', None)] if '@WITH_FFTW@' == 'ON' else [],
sources = [os.path.join(source_directory, file) for file in ['PyAPI.cpp', 'PyDevice.cpp', 'PyHandle.cpp', 'PySound.cpp', 'PySequenceEntry.cpp', 'PySequence.cpp', 'PyPlaybackManager.cpp', 'PyDynamicMusic.cpp', 'PyThreadPool.cpp', 'PySource.cpp'] + (['PyImpulseResponse.cpp', 'PyHRTF.cpp'] if '@WITH_FFTW@' == 'ON' else [])]
sources = [os.path.join(source_directory, file) for file in ['PyAPI.cpp', 'PyDevice.cpp', 'PyHandle.cpp', 'PySound.cpp', 'PySequenceEntry.cpp', 'PySequence.cpp', 'PyPlaybackManager.cpp', 'PyDynamicMusic.cpp', 'PyThreadPool.cpp', 'PySource.cpp'] + (['PyImpulseResponse.cpp', 'PyHRTF.cpp'] if '@WITH_FFTW@' == 'ON' else [])]
)
setup(
@@ -57,6 +56,6 @@ setup(
license = 'Apache License 2.0',
long_description = codecs.open(os.path.join(source_directory, '../../README.md'), 'r', 'utf-8').read(),
ext_modules = [audaspace],
headers = [os.path.join(source_directory, file) for file in ['PyAPI.h', 'PyDevice.h', 'PyHandle.h', 'PySound.h', 'PySequenceEntry.h', 'PySequence.h', 'PyPlaybackManager.h', 'PyDynamicMusic.h', 'PyThreadPool.h', 'PySource.h'] + (['PyImpulseResponse.h', 'PyHRTF.h'] if '@WITH_FFTW@' == 'ON' else [])] + ['Audaspace.h']
headers = [os.path.join(source_directory, file) for file in ['PyAPI.h', 'PyDevice.h', 'PyHandle.h', 'PySound.h', 'PySequenceEntry.h', 'PySequence.h', 'PyPlaybackManager.h', 'PyDynamicMusic.h', 'PyThreadPool.h', 'PySource.h'] + (['PyImpulseResponse.h', 'PyHRTF.h'] if '@WITH_FFTW@' == 'ON' else [])] + ['Audaspace.h']
)

View File

@@ -95,13 +95,6 @@ void WASAPIDevice::runMixingThread()
sleep_duration = std::chrono::milliseconds(buffer_size * 1000 / int(m_specs.rate) / 2);
}
if(m_default_device_changed)
{
m_default_device_changed = false;
result = AUDCLNT_E_DEVICE_INVALIDATED;
goto stop_thread;
}
if(FAILED(result = m_audio_client->GetCurrentPadding(&padding)))
goto stop_thread;
@@ -303,78 +296,13 @@ bool WASAPIDevice::setupDevice(DeviceSpecs &specs)
return true;
}
ULONG WASAPIDevice::AddRef()
{
return InterlockedIncrement(&m_reference_count);
}
ULONG WASAPIDevice::Release()
{
ULONG reference_count = InterlockedDecrement(&m_reference_count);
if(0 == reference_count)
delete this;
return reference_count;
}
HRESULT WASAPIDevice::QueryInterface(REFIID riid, void **ppvObject)
{
if(riid == __uuidof(IMMNotificationClient))
{
*ppvObject = reinterpret_cast<IMMNotificationClient*>(this);
AddRef();
}
else if(riid == IID_IUnknown)
{
*ppvObject = reinterpret_cast<IUnknown*>(this);
AddRef();
}
else
{
*ppvObject = nullptr;
return E_NOINTERFACE;
}
return S_OK;
}
HRESULT WASAPIDevice::OnDeviceStateChanged(LPCWSTR pwstrDeviceId, DWORD dwNewState)
{
return S_OK;
}
HRESULT WASAPIDevice::OnDeviceAdded(LPCWSTR pwstrDeviceId)
{
return S_OK;
}
HRESULT WASAPIDevice::OnDeviceRemoved(LPCWSTR pwstrDeviceId)
{
return S_OK;
}
HRESULT WASAPIDevice::OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR pwstrDeviceId)
{
if(flow != EDataFlow::eCapture)
m_default_device_changed = true;
return S_OK;
}
HRESULT WASAPIDevice::OnPropertyValueChanged(LPCWSTR pwstrDeviceId, const PROPERTYKEY key)
{
return S_OK;
}
WASAPIDevice::WASAPIDevice(DeviceSpecs specs, int buffersize) :
m_buffersize(buffersize),
m_imm_device_enumerator(nullptr),
m_imm_device(nullptr),
m_audio_client(nullptr),
m_wave_format_extensible({}),
m_default_device_changed(false),
m_reference_count(1)
m_wave_format_extensible({})
{
// initialize COM if it hasn't happened yet
CoInitializeEx(nullptr, COINIT_MULTITHREADED);
@@ -399,8 +327,6 @@ WASAPIDevice::WASAPIDevice(DeviceSpecs specs, int buffersize) :
create();
m_imm_device_enumerator->RegisterEndpointNotificationCallback(this);
return;
error:
@@ -414,8 +340,6 @@ WASAPIDevice::~WASAPIDevice()
{
stopMixingThread();
m_imm_device_enumerator->UnregisterEndpointNotificationCallback(this);
SafeRelease(&m_audio_client);
SafeRelease(&m_imm_device);
SafeRelease(&m_imm_device_enumerator);

View File

@@ -40,7 +40,7 @@ AUD_NAMESPACE_BEGIN
/**
* This device plays back through WASAPI, the Windows audio API.
*/
class AUD_PLUGIN_API WASAPIDevice : IMMNotificationClient, public ThreadedDevice
class AUD_PLUGIN_API WASAPIDevice : public ThreadedDevice
{
private:
int m_buffersize;
@@ -48,8 +48,6 @@ private:
IMMDevice* m_imm_device;
IAudioClient* m_audio_client;
WAVEFORMATEXTENSIBLE m_wave_format_extensible;
bool m_default_device_changed;
LONG m_reference_count;
AUD_LOCAL HRESULT setupRenderClient(IAudioRenderClient*& render_client, UINT32& buffer_size);
@@ -60,17 +58,6 @@ private:
AUD_LOCAL bool setupDevice(DeviceSpecs& specs);
// IUnknown implementation
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppvObject);
// IMMNotificationClient implementation
HRESULT STDMETHODCALLTYPE OnDeviceStateChanged(LPCWSTR pwstrDeviceId, DWORD dwNewState);
HRESULT STDMETHODCALLTYPE OnDeviceAdded(LPCWSTR pwstrDeviceId);
HRESULT STDMETHODCALLTYPE OnDeviceRemoved(LPCWSTR pwstrDeviceId);
HRESULT STDMETHODCALLTYPE OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR pwstrDeviceId);
HRESULT STDMETHODCALLTYPE OnPropertyValueChanged(LPCWSTR pwstrDeviceId, const PROPERTYKEY key);
// delete copy constructor and operator=
WASAPIDevice(const WASAPIDevice&) = delete;
WASAPIDevice& operator=(const WASAPIDevice&) = delete;

12
extern/hipew/README vendored
View File

@@ -1,12 +0,0 @@
The HIP Extension Wrangler Library (HIPEW) is a cross-platform open-source
C/C++ library to dynamically load the HIP library.
HIP (Heterogeneous-Compute Interface for Portability) is an API for C++
programming on AMD GPUs.
It is maintained as part of the Blender project, but included in extern/
for consistency with CUEW and CLEW libraries.
LICENSE
HIPEW is released under the Apache 2.0 license.

View File

@@ -1,5 +0,0 @@
Project: Blender
URL: https://git.blender.org/blender.git
License: Apache 2.0
Upstream version: N/A
Local modifications: None

View File

@@ -804,29 +804,31 @@ typedef enum hipDeviceP2PAttr {
} hipDeviceP2PAttr;
typedef struct HIP_MEMCPY3D {
unsigned int srcXInBytes;
unsigned int srcY;
unsigned int srcZ;
unsigned int srcLOD;
size_t srcXInBytes;
size_t srcY;
size_t srcZ;
size_t srcLOD;
hipMemoryType srcMemoryType;
const void* srcHost;
hipDeviceptr_t srcDevice;
hArray srcArray;
unsigned int srcPitch;
unsigned int srcHeight;
unsigned int dstXInBytes;
unsigned int dstY;
unsigned int dstZ;
unsigned int dstLOD;
hArray * srcArray;
void* reserved0;
size_t srcPitch;
size_t srcHeight;
size_t dstXInBytes;
size_t dstY;
size_t dstZ;
size_t dstLOD;
hipMemoryType dstMemoryType;
void* dstHost;
hipDeviceptr_t dstDevice;
hArray dstArray;
unsigned int dstPitch;
unsigned int dstHeight;
unsigned int WidthInBytes;
unsigned int Height;
unsigned int Depth;
hArray * dstArray;
void* reserved1;
size_t dstPitch;
size_t dstHeight;
size_t WidthInBytes;
size_t Height;
size_t Depth;
} HIP_MEMCPY3D;
typedef struct HIP_MEMCPY3D_PEER_st {
@@ -877,7 +879,7 @@ typedef struct HIP_RESOURCE_DESC_st {
hipResourceType resType;
union {
struct {
hArray h_Array;
hArray * h_Array;
} array;
struct {
hipMipmappedArray_t hMipmappedArray;
@@ -1072,10 +1074,9 @@ typedef enum hiprtcResult {
typedef hipError_t HIPAPI thipGetErrorName(hipError_t error, const char** pStr);
typedef hipError_t HIPAPI thipInit(unsigned int Flags);
typedef hipError_t HIPAPI thipDriverGetVersion(int* driverVersion);
typedef hipError_t HIPAPI thipGetDevice(int* device);
typedef hipError_t HIPAPI thipGetDevice(hipDevice_t* device, int ordinal);
typedef hipError_t HIPAPI thipGetDeviceCount(int* count);
typedef hipError_t HIPAPI thipGetDeviceProperties(hipDeviceProp_t* props, int deviceId);
typedef hipError_t HIPAPI thipDeviceGet(hipDevice_t* device, int ordinal);
typedef hipError_t HIPAPI thipDeviceGetName(char* name, int len, hipDevice_t dev);
typedef hipError_t HIPAPI thipDeviceGetAttribute(int* pi, hipDeviceAttribute_t attrib, hipDevice_t dev);
typedef hipError_t HIPAPI thipDeviceComputeCapability(int* major, int* minor, hipDevice_t dev);
@@ -1208,7 +1209,6 @@ extern thipDriverGetVersion *hipDriverGetVersion;
extern thipGetDevice *hipGetDevice;
extern thipGetDeviceCount *hipGetDeviceCount;
extern thipGetDeviceProperties *hipGetDeviceProperties;
extern thipDeviceGet* hipDeviceGet;
extern thipDeviceGetName *hipDeviceGetName;
extern thipDeviceGetAttribute *hipDeviceGetAttribute;
extern thipDeviceComputeCapability *hipDeviceComputeCapability;
@@ -1333,7 +1333,6 @@ enum {
HIPEW_SUCCESS = 0,
HIPEW_ERROR_OPEN_FAILED = -1,
HIPEW_ERROR_ATEXIT_FAILED = -2,
HIPEW_ERROR_OLD_DRIVER = -3,
};
enum {

View File

@@ -71,7 +71,6 @@ thipDriverGetVersion *hipDriverGetVersion;
thipGetDevice *hipGetDevice;
thipGetDeviceCount *hipGetDeviceCount;
thipGetDeviceProperties *hipGetDeviceProperties;
thipDeviceGet* hipDeviceGet;
thipDeviceGetName *hipDeviceGetName;
thipDeviceGetAttribute *hipDeviceGetAttribute;
thipDeviceComputeCapability *hipDeviceComputeCapability;
@@ -214,36 +213,6 @@ static void hipewHipExit(void) {
}
}
#ifdef _WIN32
static int hipewHasOldDriver(const char *hip_path) {
DWORD verHandle = 0;
DWORD verSize = GetFileVersionInfoSize(hip_path, &verHandle);
int old_driver = 0;
if (verSize != 0) {
LPSTR verData = (LPSTR)malloc(verSize);
if (GetFileVersionInfo(hip_path, verHandle, verSize, verData)) {
LPBYTE lpBuffer = NULL;
UINT size = 0;
if (VerQueryValue(verData, "\\", (VOID FAR * FAR *)&lpBuffer, &size)) {
if (size) {
VS_FIXEDFILEINFO *verInfo = (VS_FIXEDFILEINFO *)lpBuffer;
/* Magic value from
* https://docs.microsoft.com/en-us/windows/win32/api/verrsrc/ns-verrsrc-vs_fixedfileinfo */
if (verInfo->dwSignature == 0xfeef04bd) {
unsigned int fileVersionLS0 = (verInfo->dwFileVersionLS >> 16) & 0xffff;
unsigned int fileversionLS1 = (verInfo->dwFileVersionLS >> 0) & 0xffff;
/* Corresponds to versions older than AMD Radeon Pro 21.Q4. */
old_driver = ((fileVersionLS0 < 3354) || (fileVersionLS0 == 3354 && fileversionLS1 < 13));
}
}
}
}
free(verData);
}
return old_driver;
}
#endif
static int hipewHipInit(void) {
/* Library paths. */
#ifdef _WIN32
@@ -257,7 +226,7 @@ static int hipewHipInit(void) {
#endif
static int initialized = 0;
static int result = 0;
int error;
int error, driver_version;
if (initialized) {
return result;
@@ -271,14 +240,6 @@ static int hipewHipInit(void) {
return result;
}
#ifdef _WIN32
/* Test for driver version. */
if(hipewHasOldDriver(hip_paths[0])) {
result = HIPEW_ERROR_OLD_DRIVER;
return result;
}
#endif
/* Load library. */
hip_lib = dynamic_library_open_find(hip_paths);
@@ -294,7 +255,6 @@ static int hipewHipInit(void) {
HIP_LIBRARY_FIND_CHECKED(hipGetDevice);
HIP_LIBRARY_FIND_CHECKED(hipGetDeviceCount);
HIP_LIBRARY_FIND_CHECKED(hipGetDeviceProperties);
HIP_LIBRARY_FIND_CHECKED(hipDeviceGet);
HIP_LIBRARY_FIND_CHECKED(hipDeviceGetName);
HIP_LIBRARY_FIND_CHECKED(hipDeviceGetAttribute);
HIP_LIBRARY_FIND_CHECKED(hipDeviceComputeCapability);
@@ -565,6 +525,8 @@ int hipewCompilerVersion(void) {
const char *path = hipewCompilerPath();
const char *marker = "Hip compilation tools, release ";
FILE *pipe;
int major, minor;
char *versionstr;
char buf[128];
char output[65536] = "\0";
char command[65536] = "\0";

View File

@@ -1,7 +1,7 @@
Project: NanoSVG
URL: https://github.com/memononen/nanosvg
License: zlib
Upstream version: 3cdd4a9d7886
Upstream version:
Local modifications: Added some functionality to manage grease pencil layers
Added a fix to SVG import arc and float errors (https://developer.blender.org/rB11dc674c78b49fc4e0b7c134c375b6c8b8eacbcc)

View File

@@ -25,6 +25,7 @@ add_subdirectory(ghost)
add_subdirectory(guardedalloc)
add_subdirectory(libmv)
add_subdirectory(memutil)
add_subdirectory(numaapi)
add_subdirectory(opencolorio)
add_subdirectory(opensubdiv)
add_subdirectory(mikktspace)

View File

@@ -45,7 +45,7 @@
*/
/** \file
* \ingroup intern_atomic
* \ingroup Atomic
*
* \brief Provides wrapper around system-specific atomic primitives,
* and some extensions (faked-atomic operations over float numbers).

View File

@@ -44,10 +44,6 @@
* The Original Code is: adapted from jemalloc.
*/
/** \file
* \ingroup intern_atomic
*/
#ifndef __ATOMIC_OPS_EXT_H__
#define __ATOMIC_OPS_EXT_H__

View File

@@ -5,7 +5,7 @@
* All rights reserved.
* Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
* Copyright (C) 2009-2013 Facebook, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice(s),
@@ -13,7 +13,7 @@
* 2. Redistributions in binary form must reproduce the above copyright notice(s),
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@@ -26,10 +26,6 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \ingroup intern_atomic
*/
#ifndef __ATOMIC_OPS_MSVC_H__
#define __ATOMIC_OPS_MSVC_H__

View File

@@ -44,10 +44,6 @@
* The Original Code is: adapted from jemalloc.
*/
/** \file
* \ingroup intern_atomic
*/
#ifndef __ATOMIC_OPS_UNIX_H__
#define __ATOMIC_OPS_UNIX_H__

View File

@@ -44,10 +44,6 @@
* The Original Code is: adapted from jemalloc.
*/
/** \file
* \ingroup intern_atomic
*/
#ifndef __ATOMIC_OPS_UTILS_H__
#define __ATOMIC_OPS_UTILS_H__

View File

@@ -14,8 +14,11 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __CLG_LOG_H__
#define __CLG_LOG_H__
/** \file
* \ingroup intern_clog
* \ingroup clog
*
* C Logging Library (clog)
* ========================
@@ -65,9 +68,6 @@
* - 4+: May be used for more details than 3, should be avoided but not prevented.
*/
#ifndef __CLG_LOG_H__
#define __CLG_LOG_H__
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */

View File

@@ -15,7 +15,7 @@
*/
/** \file
* \ingroup intern_clog
* \ingroup clog
*/
#include <assert.h>
@@ -388,7 +388,7 @@ static void clg_ctx_fatal_action(CLogContext *ctx)
static void clg_ctx_backtrace(CLogContext *ctx)
{
/* NOTE: we avoid writing to 'FILE', for back-trace we make an exception,
/* Note: we avoid writing to 'FILE', for back-trace we make an exception,
* if necessary we could have a version of the callback that writes to file
* descriptor all at once. */
ctx->callbacks.backtrace_fn(ctx->output_file);

View File

@@ -226,9 +226,6 @@ add_definitions(
-DCCL_NAMESPACE_END=}
)
if(WITH_CYCLES_DEBUG)
add_definitions(-DWITH_CYCLES_DEBUG)
endif()
if(WITH_CYCLES_STANDALONE_GUI)
add_definitions(-DWITH_CYCLES_STANDALONE_GUI)
endif()
@@ -337,7 +334,7 @@ else()
endif()
# Warnings
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_C_COMPILER_ID MATCHES "Clang")
if(CMAKE_COMPILER_IS_GNUCXX)
ADD_CHECK_CXX_COMPILER_FLAG(CMAKE_CXX_FLAGS _has_cxxflag_float_conversion "-Werror=float-conversion")
ADD_CHECK_CXX_COMPILER_FLAG(CMAKE_CXX_FLAGS _has_cxxflag_double_promotion "-Werror=double-promotion")
ADD_CHECK_CXX_COMPILER_FLAG(CMAKE_CXX_FLAGS _has_no_error_unused_macros "-Wno-error=unused-macros")

View File

@@ -51,6 +51,8 @@ list(APPEND LIBRARIES ${CYCLES_GL_LIBRARIES})
# Common configuration.
cycles_link_directories()
add_definitions(${GL_DEFINITIONS})
include_directories(${INC})

View File

@@ -82,7 +82,7 @@ static void session_print_status()
string status, substatus;
/* get status */
double progress = options.session->progress.get_progress();
float progress = options.session->progress.get_progress();
options.session->progress.get_status(status, substatus);
if (substatus != "")
@@ -183,7 +183,7 @@ static void display_info(Progress &progress)
progress.get_time(total_time, sample_time);
progress.get_status(status, substatus);
double progress_val = progress.get_progress();
float progress_val = progress.get_progress();
if (substatus != "")
status += ": " + substatus;

View File

@@ -40,7 +40,6 @@ set(SRC
object_cull.cpp
output_driver.cpp
particles.cpp
pointcloud.cpp
curves.cpp
logging.cpp
python.cpp
@@ -88,7 +87,6 @@ endif()
set(ADDON_FILES
addon/__init__.py
addon/camera.py
addon/engine.py
addon/operators.py
addon/osl.py
@@ -103,11 +101,6 @@ add_definitions(${GL_DEFINITIONS})
if(WITH_CYCLES_DEVICE_HIP)
add_definitions(-DWITH_HIP)
endif()
if(WITH_CYCLES_DEVICE_METAL)
add_definitions(-DWITH_METAL)
endif()
if(WITH_MOD_FLUID)
add_definitions(-DWITH_FLUID)
endif()
@@ -145,6 +138,11 @@ endif()
blender_add_lib(bf_intern_cycles "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")
# avoid link failure with clang 3.4 debug
if(CMAKE_C_COMPILER_ID MATCHES "Clang" AND NOT ${CMAKE_C_COMPILER_VERSION} VERSION_LESS '3.4')
string(APPEND CMAKE_CXX_FLAGS_DEBUG " -gline-tables-only")
endif()
add_dependencies(bf_intern_cycles bf_rna)
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${ADDON_FILES}" ${CYCLES_INSTALL_PATH})

View File

@@ -1,84 +0,0 @@
#
# Copyright 2011-2021 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
# Fit to match default projective camera with focal_length 50 and sensor_width 36.
default_fisheye_polynomial = [-1.1735143712967577e-05,
-0.019988736953434998,
-3.3525322965709175e-06,
3.099275275886036e-06,
-2.6064646454854524e-08]
# Utilities to generate lens polynomials to match built-in camera types, only here
# for reference at the moment, not used by the code.
def create_grid(sensor_height, sensor_width):
import numpy as np
if sensor_height is None:
sensor_height = sensor_width / (16 / 9) # Default aspect ration 16:9
uu, vv = np.meshgrid(np.linspace(0, 1, 100), np.linspace(0, 1, 100))
uu = (uu - 0.5) * sensor_width
vv = (vv - 0.5) * sensor_height
rr = np.sqrt(uu ** 2 + vv ** 2)
return rr
def fisheye_lens_polynomial_from_projective(focal_length=50, sensor_width=36, sensor_height=None):
import numpy as np
rr = create_grid(sensor_height, sensor_width)
polynomial = np.polyfit(rr.flat, (-np.arctan(rr / focal_length)).flat, 4)
return list(reversed(polynomial))
def fisheye_lens_polynomial_from_projective_fov(fov, sensor_width=36, sensor_height=None):
import numpy as np
f = sensor_width / 2 / np.tan(fov / 2)
return fisheye_lens_polynomial_from_projective(f, sensor_width, sensor_height)
def fisheye_lens_polynomial_from_equisolid(lens=10.5, sensor_width=36, sensor_height=None):
import numpy as np
rr = create_grid(sensor_height, sensor_width)
x = rr.reshape(-1)
x = np.stack([x**i for i in [1, 2, 3, 4]])
y = (-2 * np.arcsin(rr / (2 * lens))).reshape(-1)
polynomial = np.linalg.lstsq(x.T, y.T, rcond=None)[0]
return [0] + list(polynomial)
def fisheye_lens_polynomial_from_equidistant(fov=180, sensor_width=36, sensor_height=None):
import numpy as np
return [0, -np.radians(fov) / sensor_width, 0, 0, 0]
def fisheye_lens_polynomial_from_distorted_projective_polynomial(k1, k2, k3, focal_length=50, sensor_width=36, sensor_height=None):
import numpy as np
rr = create_grid(sensor_height, sensor_width)
r2 = (rr / focal_length) ** 2
r4 = r2 * r2
r6 = r4 * r2
r_coeff = 1 + k1 * r2 + k2 * r4 + k3 * r6
polynomial = np.polyfit(rr.flat, (-np.arctan(rr / focal_length * r_coeff)).flat, 4)
return list(reversed(polynomial))
def fisheye_lens_polynomial_from_distorted_projective_divisions(k1, k2, focal_length=50, sensor_width=36, sensor_height=None):
import numpy as np
rr = create_grid(sensor_height, sensor_width)
r2 = (rr / focal_length) ** 2
r4 = r2 * r2
r_coeff = 1 + k1 * r2 + k2 * r4
polynomial = np.polyfit(rr.flat, (-np.arctan(rr / focal_length / r_coeff)).flat, 4)
return list(reversed(polynomial))

View File

@@ -28,7 +28,7 @@ def _configure_argument_parser():
action='store_true')
parser.add_argument("--cycles-device",
help="Set the device to use for Cycles, overriding user preferences and the scene setting."
"Valid options are 'CPU', 'CUDA', 'OPTIX', 'HIP' or 'METAL'."
"Valid options are 'CPU', 'CUDA', 'OPTIX', or 'HIP'"
"Additionally, you can append '+CPU' to any GPU type for hybrid rendering.",
default=None)
return parser
@@ -60,8 +60,9 @@ def init():
path = os.path.dirname(__file__)
user_path = os.path.dirname(os.path.abspath(bpy.utils.user_resource('CONFIG', path='')))
temp_path = bpy.app.tempdir
_cycles.init(path, user_path, bpy.app.background)
_cycles.init(path, user_path, temp_path, bpy.app.background)
_parse_command_line()
@@ -232,7 +233,6 @@ def list_render_passes(scene, srl):
if crl.denoising_store_passes:
yield ("Denoising Normal", "XYZ", 'VECTOR')
yield ("Denoising Albedo", "RGB", 'COLOR')
yield ("Denoising Depth", "Z", 'VALUE')
# Custom AOV passes.
for aov in srl.aovs:

View File

@@ -40,10 +40,10 @@ class AddPresetIntegrator(AddPresetBase, Operator):
"cycles.transparent_max_bounces",
"cycles.caustics_reflective",
"cycles.caustics_refractive",
"cycles.blur_glossy",
"cycles.use_fast_gi",
"cycles.ao_bounces",
"cycles.ao_bounces_render",
"cycles.blur_glossy"
"cycles.use_fast_gi"
"cycles.ao_bounces"
"cycles.ao_bounces_render"
]
preset_subdir = "cycles/integrator"

View File

@@ -33,7 +33,6 @@ from math import pi
# enums
from . import engine
from . import camera
enum_devices = (
('CPU', "CPU", "Use CPU for rendering"),
@@ -73,8 +72,6 @@ enum_panorama_types = (
('FISHEYE_EQUISOLID', "Fisheye Equisolid",
"Similar to most fisheye modern lens, takes sensor dimensions into consideration"),
('MIRRORBALL', "Mirror Ball", "Uses the mirror ball mapping"),
('FISHEYE_LENS_POLYNOMIAL', "Fisheye Lens Polynomial",
"Defines the lens projection as polynomial to allow real world camera lenses to be mimicked."),
)
enum_curve_shape = (
@@ -90,7 +87,7 @@ enum_use_layer_samples = (
enum_sampling_pattern = (
('SOBOL', "Sobol", "Use Sobol random sampling pattern", 0),
('PROGRESSIVE_MULTI_JITTER', "Progressive Multi-Jitter", "Use Progressive Multi-Jitter random sampling pattern", 1),
('PROGRESSIVE_MUTI_JITTER', "Progressive Multi-Jitter", "Use Progressive Multi-Jitter random sampling pattern", 1),
)
enum_volume_sampling = (
@@ -114,8 +111,7 @@ enum_device_type = (
('CPU', "CPU", "CPU", 0),
('CUDA', "CUDA", "CUDA", 1),
('OPTIX', "OptiX", "OptiX", 3),
('HIP', "HIP", "HIP", 4),
('METAL', "Metal", "Metal", 5)
("HIP", "HIP", "HIP", 4)
)
enum_texture_limit = (
@@ -222,12 +218,6 @@ enum_denoising_prefilter = (
('ACCURATE', "Accurate", "Prefilter noisy guiding passes before denoising color. Improves quality when guiding passes are noisy using extra processing time", 3),
)
enum_direct_light_sampling_type = (
('MULTIPLE_IMPORTANCE_SAMPLING', "Multiple Importance Sampling", "Multiple importance sampling is used to combine direct light contributions from next-event estimation and forward path tracing", 0),
('FORWARD_PATH_TRACING', "Forward Path Tracing", "Direct light contributions are only sampled using forward path tracing", 1),
('NEXT_EVENT_ESTIMATION', "Next-Event Estimation", "Direct light contributions are only sampled using next-event estimation", 2),
)
def update_render_passes(self, context):
scene = context.scene
view_layer = context.view_layer
@@ -335,13 +325,6 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
default=1024,
)
sample_offset: IntProperty(
name="Sample Offset",
description="Number of samples to skip when starting render",
min=0, max=(1 << 24),
default=0,
)
time_limit: FloatProperty(
name="Time Limit",
description="Limit the render time (excluding synchronization time)."
@@ -356,14 +339,14 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
name="Sampling Pattern",
description="Random sampling pattern used by the integrator. When adaptive sampling is enabled, Progressive Multi-Jitter is always used instead of Sobol",
items=enum_sampling_pattern,
default='PROGRESSIVE_MULTI_JITTER',
default='PROGRESSIVE_MUTI_JITTER',
)
scrambling_distance: FloatProperty(
name="Scrambling Distance",
default=1.0,
min=0.0, max=1.0,
description="Reduce randomization between pixels to improve GPU rendering performance, at the cost of possible rendering artifacts if set too low. Only works when not using adaptive sampling",
description="Lower values give faster rendering with GPU rendering and less noise with all devices at the cost of possible artifacts if set too low. Only works when not using adaptive sampling",
)
preview_scrambling_distance: BoolProperty(
name="Scrambling Distance viewport",
@@ -371,10 +354,10 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
description="Uses the Scrambling Distance value for the viewport. Faster but may flicker",
)
auto_scrambling_distance: BoolProperty(
name="Automatic Scrambling Distance",
adaptive_scrambling_distance: BoolProperty(
name="Adaptive Scrambling Distance",
default=False,
description="Automatically reduce the randomization between pixels to improve GPU rendering performance, at the cost of possible rendering artifacts. Only works when not using adaptive sampling",
description="Uses a formula to adapt the scrambling distance strength based on the sample count",
)
use_layer_samples: EnumProperty(
@@ -432,13 +415,6 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
default=0,
)
direct_light_sampling_type: EnumProperty(
name="Direct Light Sampling",
description="The type of strategy used for sampling direct light contributions",
items=enum_direct_light_sampling_type,
default='MULTIPLE_IMPORTANCE_SAMPLING',
)
min_light_bounces: IntProperty(
name="Min Light Bounces",
description="Minimum number of light bounces. Setting this higher reduces noise in the first bounces, "
@@ -667,11 +643,6 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
description="Use special type BVH optimized for hair (uses more ram but renders faster)",
default=True,
)
debug_use_compact_bvh: BoolProperty(
name="Use Compact BVH",
description="Use compact BVH structure (uses less ram but renders slower)",
default=True,
)
debug_bvh_time_steps: IntProperty(
name="BVH Time Steps",
description="Split BVH primitives by this number of time steps to speed up render time in cost of memory",
@@ -799,15 +770,15 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
)
use_auto_tile: BoolProperty(
name="Use Tiling",
description="Render high resolution images in tiles to reduce memory usage, using the specified tile size. Tiles are cached to disk while rendering to save memory",
name="Auto Tiles",
description="Automatically render high resolution images in tiles to reduce memory usage, using the specified tile size. Tiles are cached to disk while rendering to save memory",
default=True,
)
tile_size: IntProperty(
name="Tile Size",
default=2048,
description="",
min=8, max=8192,
min=8, max=16384,
)
# Various fine-tuning debug flags
@@ -899,32 +870,6 @@ class CyclesCameraSettings(bpy.types.PropertyGroup):
default=pi,
)
fisheye_polynomial_k0: FloatProperty(
name="Fisheye Polynomial K0",
description="Coefficient K0 of the lens polinomial",
default=camera.default_fisheye_polynomial[0], precision=6, step=0.1, subtype='ANGLE',
)
fisheye_polynomial_k1: FloatProperty(
name="Fisheye Polynomial K1",
description="Coefficient K1 of the lens polinomial",
default=camera.default_fisheye_polynomial[1], precision=6, step=0.1, subtype='ANGLE',
)
fisheye_polynomial_k2: FloatProperty(
name="Fisheye Polynomial K2",
description="Coefficient K2 of the lens polinomial",
default=camera.default_fisheye_polynomial[2], precision=6, step=0.1, subtype='ANGLE',
)
fisheye_polynomial_k3: FloatProperty(
name="Fisheye Polynomial K3",
description="Coefficient K3 of the lens polinomial",
default=camera.default_fisheye_polynomial[3], precision=6, step=0.1, subtype='ANGLE',
)
fisheye_polynomial_k4: FloatProperty(
name="Fisheye Polynomial K4",
description="Coefficient K4 of the lens polinomial",
default=camera.default_fisheye_polynomial[4], precision=6, step=0.1, subtype='ANGLE',
)
@classmethod
def register(cls):
bpy.types.Camera.cycles = PointerProperty(
@@ -1347,7 +1292,8 @@ class CyclesPreferences(bpy.types.AddonPreferences):
def get_device_types(self, context):
import _cycles
has_cuda, has_optix, has_hip, has_metal = _cycles.get_device_types()
has_cuda, has_optix, has_hip = _cycles.get_device_types()
list = [('NONE', "None", "Don't use compute device", 0)]
if has_cuda:
list.append(('CUDA', "CUDA", "Use CUDA for GPU acceleration", 1))
@@ -1355,8 +1301,6 @@ class CyclesPreferences(bpy.types.AddonPreferences):
list.append(('OPTIX', "OptiX", "Use OptiX for GPU acceleration", 3))
if has_hip:
list.append(('HIP', "HIP", "Use HIP for GPU acceleration", 4))
if has_metal:
list.append(('METAL', "Metal", "Use Metal for GPU acceleration", 5))
return list
@@ -1382,7 +1326,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
def update_device_entries(self, device_list):
for device in device_list:
if not device[1] in {'CUDA', 'OPTIX', 'CPU', 'HIP', 'METAL'}:
if not device[1] in {'CUDA', 'OPTIX', 'CPU', 'HIP'}:
continue
# Try to find existing Device entry
entry = self.find_existing_device_entry(device)
@@ -1416,7 +1360,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
elif entry.type == 'CPU':
cpu_devices.append(entry)
# Extend all GPU devices with CPU.
if len(devices) and compute_device_type != 'CPU':
if compute_device_type != 'CPU':
devices.extend(cpu_devices)
return devices
@@ -1426,7 +1370,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
import _cycles
# Ensure `self.devices` is not re-allocated when the second call to
# get_devices_for_type is made, freeing items from the first list.
for device_type in ('CUDA', 'OPTIX', 'HIP', 'METAL'):
for device_type in ('CUDA', 'OPTIX', 'HIP'):
self.update_device_entries(_cycles.available_devices(device_type))
# Deprecated: use refresh_devices instead.
@@ -1434,37 +1378,18 @@ class CyclesPreferences(bpy.types.AddonPreferences):
self.refresh_devices()
return None
def get_compute_device_type(self):
if self.compute_device_type == '':
return 'NONE'
return self.compute_device_type
def get_num_gpu_devices(self):
import _cycles
compute_device_type = self.get_compute_device_type()
device_list = _cycles.available_devices(compute_device_type)
device_list = _cycles.available_devices(self.compute_device_type)
num = 0
for device in device_list:
if device[1] != compute_device_type:
if device[1] != self.compute_device_type:
continue
for dev in self.devices:
if dev.use and dev.id == device[2]:
num += 1
return num
def has_multi_device(self):
import _cycles
compute_device_type = self.get_compute_device_type()
device_list = _cycles.available_devices(compute_device_type)
for device in device_list:
if device[1] == compute_device_type:
continue
for dev in self.devices:
if dev.use and dev.id == device[2]:
return True
return False
def has_active_device(self):
return self.get_num_gpu_devices() > 0
@@ -1488,11 +1413,9 @@ class CyclesPreferences(bpy.types.AddonPreferences):
col.label(text="and NVIDIA driver version 470 or newer", icon='BLANK1')
elif device_type == 'HIP':
import sys
col.label(text="Requires discrete AMD GPU with RDNA architecture", icon='BLANK1')
col.label(text="Requires discrete AMD GPU with ??? architecture", icon='BLANK1')
if sys.platform[:3] == "win":
col.label(text="and AMD Radeon Pro 21.Q4 driver or newer", icon='BLANK1')
elif device_type == 'METAL':
col.label(text="Requires Apple Silicon and macOS 12.0 or newer", icon='BLANK1')
col.label(text="and AMD driver version ??? or newer", icon='BLANK1')
return
for device in devices:
@@ -1502,16 +1425,15 @@ class CyclesPreferences(bpy.types.AddonPreferences):
row = layout.row()
row.prop(self, "compute_device_type", expand=True)
compute_device_type = self.get_compute_device_type()
if compute_device_type == 'NONE':
if self.compute_device_type == 'NONE':
return
row = layout.row()
devices = self.get_devices_for_type(compute_device_type)
self._draw_devices(row, compute_device_type, devices)
devices = self.get_devices_for_type(self.compute_device_type)
self._draw_devices(row, self.compute_device_type, devices)
import _cycles
has_peer_memory = 0
for device in _cycles.available_devices(compute_device_type):
for device in _cycles.available_devices(self.compute_device_type):
if device[3] and self.find_existing_device_entry(device).use:
has_peer_memory += 1
if has_peer_memory > 1:

View File

@@ -97,11 +97,6 @@ def use_cpu(context):
return (get_device_type(context) == 'NONE' or cscene.device == 'CPU')
def use_metal(context):
cscene = context.scene.cycles
return (get_device_type(context) == 'METAL' and cscene.device == 'GPU')
def use_cuda(context):
cscene = context.scene.cycles
@@ -118,11 +113,11 @@ def use_optix(context):
return (get_device_type(context) == 'OPTIX' and cscene.device == 'GPU')
def use_multi_device(context):
def use_sample_all_lights(context):
cscene = context.scene.cycles
if cscene.device != 'GPU':
return False
return context.preferences.addons[__package__].preferences.has_multi_device()
return cscene.sample_all_lights_direct or cscene.sample_all_lights_indirect
def show_device_active(context):
@@ -295,18 +290,15 @@ class CYCLES_RENDER_PT_sampling_advanced(CyclesButtonsPanel, Panel):
col.active = not (cscene.use_adaptive_sampling and cscene.use_preview_adaptive_sampling)
col.prop(cscene, "sampling_pattern", text="Pattern")
col = layout.column(align=True)
col.prop(cscene, "sample_offset")
layout.separator()
heading = layout.column(align=True, heading="Scrambling Distance")
heading.active = not (cscene.use_adaptive_sampling and cscene.use_preview_adaptive_sampling)
heading.prop(cscene, "auto_scrambling_distance", text="Automatic")
sub = heading.row()
col = layout.column(align=True)
col.active = not (cscene.use_adaptive_sampling and cscene.use_preview_adaptive_sampling)
col.prop(cscene, "scrambling_distance", text="Scrambling Distance")
col.prop(cscene, "adaptive_scrambling_distance", text="Adaptive")
sub = col.row(align=True)
sub.active = not cscene.use_preview_adaptive_sampling
sub.prop(cscene, "preview_scrambling_distance", text="Viewport")
heading.prop(cscene, "scrambling_distance", text="Multiplier")
layout.separator()
@@ -667,10 +659,6 @@ class CYCLES_RENDER_PT_performance_acceleration_structure(CyclesButtonsPanel, Pa
bl_label = "Acceleration Structure"
bl_parent_id = "CYCLES_RENDER_PT_performance"
@classmethod
def poll(cls, context):
return not use_optix(context) or use_multi_device(context)
def draw(self, context):
import _cycles
@@ -683,33 +671,21 @@ class CYCLES_RENDER_PT_performance_acceleration_structure(CyclesButtonsPanel, Pa
col = layout.column()
use_embree = _cycles.with_embree
use_embree = False
if use_cpu(context):
col.prop(cscene, "debug_use_spatial_splits")
if use_embree:
col.prop(cscene, "debug_use_compact_bvh")
else:
sub = col.column()
sub.active = not cscene.debug_use_spatial_splits
sub.prop(cscene, "debug_bvh_time_steps")
col.prop(cscene, "debug_use_hair_bvh")
use_embree = _cycles.with_embree
if not use_embree:
sub = col.column(align=True)
sub.label(text="Cycles built without Embree support")
sub.label(text="CPU raytracing performance will be poor")
else:
col.prop(cscene, "debug_use_spatial_splits")
sub = col.column()
sub.active = not cscene.debug_use_spatial_splits
sub.prop(cscene, "debug_bvh_time_steps")
col.prop(cscene, "debug_use_hair_bvh")
# CPU is used in addition to a GPU
if use_multi_device(context) and use_embree:
col.prop(cscene, "debug_use_compact_bvh")
col.prop(cscene, "debug_use_spatial_splits")
sub = col.column()
sub.active = not use_embree
sub.prop(cscene, "debug_use_hair_bvh")
sub = col.column()
sub.active = not cscene.debug_use_spatial_splits and not use_embree
sub.prop(cscene, "debug_bvh_time_steps")
class CYCLES_RENDER_PT_performance_final_render(CyclesButtonsPanel, Panel):
@@ -1036,7 +1012,7 @@ class CYCLES_OBJECT_PT_motion_blur(CyclesButtonsPanel, Panel):
def poll(cls, context):
ob = context.object
if CyclesButtonsPanel.poll(context) and ob:
if ob.type in {'MESH', 'CURVE', 'CURVE', 'SURFACE', 'FONT', 'META', 'CAMERA', 'HAIR', 'POINTCLOUD'}:
if ob.type in {'MESH', 'CURVE', 'CURVE', 'SURFACE', 'FONT', 'META', 'CAMERA'}:
return True
if ob.instance_type == 'COLLECTION' and ob.instance_collection:
return True
@@ -1075,7 +1051,7 @@ class CYCLES_OBJECT_PT_motion_blur(CyclesButtonsPanel, Panel):
def has_geometry_visibility(ob):
return ob and ((ob.type in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META', 'LIGHT', 'VOLUME', 'POINTCLOUD', 'HAIR'}) or
return ob and ((ob.type in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META', 'LIGHT'}) or
(ob.instance_type == 'COLLECTION' and ob.instance_collection))
@@ -1819,45 +1795,18 @@ class CYCLES_RENDER_PT_bake_output(CyclesButtonsPanel, Panel):
rd = scene.render
if rd.use_bake_multires:
layout.prop(rd, "bake_margin")
layout.prop(rd, "use_bake_clear", text="Clear Image")
if rd.bake_type == 'DISPLACEMENT':
layout.prop(rd, "use_bake_lores_mesh")
else:
layout.prop(cbk, "target")
if cbk.target == 'IMAGE_TEXTURES':
layout.prop(cbk, "margin")
layout.prop(cbk, "use_clear", text="Clear Image")
class CYCLES_RENDER_PT_bake_output_margin(CyclesButtonsPanel, Panel):
bl_label = "Margin"
bl_context = "render"
bl_parent_id = "CYCLES_RENDER_PT_bake_output"
COMPAT_ENGINES = {'CYCLES'}
@classmethod
def poll(cls, context):
scene = context.scene
cbk = scene.render.bake
return cbk.target == 'IMAGE_TEXTURES'
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
cscene = scene.cycles
cbk = scene.render.bake
rd = scene.render
if rd.use_bake_multires:
layout.prop(rd, "bake_margin_type", text="Type")
layout.prop(rd, "bake_margin", text="Size")
else:
if cbk.target == 'IMAGE_TEXTURES':
layout.prop(cbk, "margin_type", text="Type")
layout.prop(cbk, "margin", text="Size")
class CYCLES_RENDER_PT_debug(CyclesDebugButtonsPanel, Panel):
bl_label = "Debug"
@@ -1867,38 +1816,37 @@ class CYCLES_RENDER_PT_debug(CyclesDebugButtonsPanel, Panel):
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
cscene = scene.cycles
col = layout.column(heading="CPU")
col = layout.column()
col.label(text="CPU Flags:")
row = col.row(align=True)
row.prop(cscene, "debug_use_cpu_sse2", toggle=True)
row.prop(cscene, "debug_use_cpu_sse3", toggle=True)
row.prop(cscene, "debug_use_cpu_sse41", toggle=True)
row.prop(cscene, "debug_use_cpu_avx", toggle=True)
row.prop(cscene, "debug_use_cpu_avx2", toggle=True)
col.prop(cscene, "debug_bvh_layout", text="BVH")
col.prop(cscene, "debug_bvh_layout")
col.separator()
col = layout.column(heading="CUDA")
col = layout.column()
col.label(text="CUDA Flags:")
col.prop(cscene, "debug_use_cuda_adaptive_compile")
col = layout.column(heading="OptiX")
col.prop(cscene, "debug_use_optix_debug", text="Module Debug")
col.separator()
col.prop(cscene, "debug_bvh_type", text="Viewport BVH")
col = layout.column()
col.label(text="OptiX Flags:")
col.prop(cscene, "debug_use_optix_debug")
col.separator()
import _cycles
if _cycles.with_debug:
col.prop(cscene, "direct_light_sampling_type")
col = layout.column()
col.prop(cscene, "debug_bvh_type")
class CYCLES_RENDER_PT_simplify(CyclesButtonsPanel, Panel):
@@ -2226,7 +2174,6 @@ classes = (
CYCLES_RENDER_PT_bake_influence,
CYCLES_RENDER_PT_bake_selected_to_active,
CYCLES_RENDER_PT_bake_output,
CYCLES_RENDER_PT_bake_output_margin,
CYCLES_RENDER_PT_debug,
node_panel(CYCLES_MATERIAL_PT_settings),
node_panel(CYCLES_MATERIAL_PT_settings_surface),

View File

@@ -86,7 +86,7 @@ def do_versions(self):
# Device might not currently be available so this can fail
try:
if system.legacy_compute_device_type == 1:
prop.compute_device_type = 'NONE' # Was OpenCL
prop.compute_device_type = 'OPENCL'
elif system.legacy_compute_device_type == 2:
prop.compute_device_type = 'CUDA'
else:
@@ -97,12 +97,6 @@ def do_versions(self):
# Init device list for UI
prop.get_devices(prop.compute_device_type)
if bpy.context.preferences.version <= (3, 0, 40):
# Disable OpenCL device
prop = bpy.context.preferences.addons[__package__].preferences
if prop.is_property_set("compute_device_type") and prop['compute_device_type'] == 4:
prop.compute_device_type = 'NONE'
# We don't modify startup file because it assumes to
# have all the default values only.
if not bpy.data.is_saved:
@@ -243,7 +237,7 @@ def do_versions(self):
cscene.use_preview_denoising = False
if not cscene.is_property_set("sampling_pattern") or \
cscene.get('sampling_pattern') >= 2:
cscene.sampling_pattern = 'PROGRESSIVE_MULTI_JITTER'
cscene.sampling_pattern = 'PROGRESSIVE_MUTI_JITTER'
# Removal of square samples.
cscene = scene.cycles

View File

@@ -69,12 +69,6 @@ struct BlenderCamera {
float pole_merge_angle_from;
float pole_merge_angle_to;
float fisheye_polynomial_k0;
float fisheye_polynomial_k1;
float fisheye_polynomial_k2;
float fisheye_polynomial_k3;
float fisheye_polynomial_k4;
enum { AUTO, HORIZONTAL, VERTICAL } sensor_fit;
float sensor_width;
float sensor_height;
@@ -206,12 +200,6 @@ static void blender_camera_from_object(BlenderCamera *bcam,
bcam->longitude_min = RNA_float_get(&ccamera, "longitude_min");
bcam->longitude_max = RNA_float_get(&ccamera, "longitude_max");
bcam->fisheye_polynomial_k0 = RNA_float_get(&ccamera, "fisheye_polynomial_k0");
bcam->fisheye_polynomial_k1 = RNA_float_get(&ccamera, "fisheye_polynomial_k1");
bcam->fisheye_polynomial_k2 = RNA_float_get(&ccamera, "fisheye_polynomial_k2");
bcam->fisheye_polynomial_k3 = RNA_float_get(&ccamera, "fisheye_polynomial_k3");
bcam->fisheye_polynomial_k4 = RNA_float_get(&ccamera, "fisheye_polynomial_k4");
bcam->interocular_distance = b_camera.stereo().interocular_distance();
if (b_camera.stereo().convergence_mode() == BL::CameraStereoData::convergence_mode_PARALLEL) {
bcam->convergence_distance = FLT_MAX;
@@ -434,8 +422,7 @@ static void blender_camera_sync(Camera *cam,
cam->set_full_height(height);
/* panorama sensor */
if (bcam->type == CAMERA_PANORAMA && (bcam->panorama_type == PANORAMA_FISHEYE_EQUISOLID ||
bcam->panorama_type == PANORAMA_FISHEYE_LENS_POLYNOMIAL)) {
if (bcam->type == CAMERA_PANORAMA && bcam->panorama_type == PANORAMA_FISHEYE_EQUISOLID) {
float fit_xratio = (float)bcam->render_width * bcam->pixelaspect.x;
float fit_yratio = (float)bcam->render_height * bcam->pixelaspect.y;
bool horizontal_fit;
@@ -478,12 +465,6 @@ static void blender_camera_sync(Camera *cam,
cam->set_latitude_min(bcam->latitude_min);
cam->set_latitude_max(bcam->latitude_max);
cam->set_fisheye_polynomial_k0(bcam->fisheye_polynomial_k0);
cam->set_fisheye_polynomial_k1(bcam->fisheye_polynomial_k1);
cam->set_fisheye_polynomial_k2(bcam->fisheye_polynomial_k2);
cam->set_fisheye_polynomial_k3(bcam->fisheye_polynomial_k3);
cam->set_fisheye_polynomial_k4(bcam->fisheye_polynomial_k4);
cam->set_longitude_min(bcam->longitude_min);
cam->set_longitude_max(bcam->longitude_max);
@@ -658,7 +639,7 @@ void BlenderSync::sync_camera_motion(
/* TODO(sergey): De-duplicate calculation with camera sync. */
float fov = 2.0f * atanf((0.5f * sensor_size) / bcam.lens / aspectratio);
if (fov != cam->get_fov()) {
VLOG(3) << "Camera " << b_ob.name() << " FOV change detected.";
VLOG(1) << "Camera " << b_ob.name() << " FOV change detected.";
if (motion_time == 0.0f) {
cam->set_fov(fov);
}

View File

@@ -14,8 +14,6 @@
* limitations under the License.
*/
#include <optional>
#include "blender/sync.h"
#include "blender/util.h"
@@ -201,7 +199,7 @@ static bool ObtainCacheParticleUV(Hair *hair,
b_mesh->uv_layers.begin(l);
float2 uv = zero_float2();
if (!b_mesh->uv_layers.empty())
if (b_mesh->uv_layers.length())
b_psys.uv_on_emitter(psmd, *b_pa, pa_no, uv_num, &uv.x);
CData->curve_uv.push_back_slow(uv);
@@ -263,7 +261,7 @@ static bool ObtainCacheParticleVcol(Hair *hair,
b_mesh->vertex_colors.begin(l);
float4 vcol = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
if (!b_mesh->vertex_colors.empty())
if (b_mesh->vertex_colors.length())
b_psys.mcol_on_emitter(psmd, *b_pa, pa_no, vcol_num, &vcol.x);
CData->curve_vcol.push_back_slow(vcol);
@@ -306,6 +304,10 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
}
}
if (num_curves > 0) {
VLOG(1) << "Exporting curve segments for mesh " << hair->name;
}
hair->reserve_curves(hair->num_curves() + num_curves, hair->get_curve_keys().size() + num_keys);
num_keys = 0;
@@ -354,7 +356,7 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
/* check allocation */
if ((hair->get_curve_keys().size() != num_keys) || (hair->num_curves() != num_curves)) {
VLOG(1) << "Hair memory allocation failed, clearing data.";
VLOG(1) << "Allocation failed, clearing data";
hair->clear(true);
}
}
@@ -410,11 +412,16 @@ static void export_hair_motion_validate_attribute(Hair *hair,
if (num_motion_keys != num_keys || !have_motion) {
/* No motion or hair "topology" changed, remove attributes again. */
if (num_motion_keys != num_keys) {
VLOG(1) << "Hair topology changed, removing motion attribute.";
VLOG(1) << "Hair topology changed, removing attribute.";
}
else {
VLOG(1) << "No motion, removing attribute.";
}
hair->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
}
else if (motion_step > 0) {
VLOG(1) << "Filling in new motion vertex position for motion_step " << motion_step;
/* Motion, fill up previous steps that we might have skipped because
* they had no motion, but we need them anyway now. */
for (int step = 0; step < motion_step; step++) {
@@ -430,12 +437,16 @@ static void export_hair_motion_validate_attribute(Hair *hair,
static void ExportCurveSegmentsMotion(Hair *hair, ParticleCurveData *CData, int motion_step)
{
VLOG(1) << "Exporting curve motion segments for hair " << hair->name << ", motion step "
<< motion_step;
/* find attribute */
Attribute *attr_mP = hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
bool new_attribute = false;
/* add new attribute if it doesn't exist already */
if (!attr_mP) {
VLOG(1) << "Creating new motion vertex position attribute";
attr_mP = hair->attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
new_attribute = true;
}
@@ -627,35 +638,14 @@ void BlenderSync::sync_particle_hair(
}
#ifdef WITH_HAIR_NODES
static std::optional<BL::FloatAttribute> find_curves_radius_attribute(BL::Hair b_hair)
static float4 hair_point_as_float4(BL::HairPoint b_point)
{
for (BL::Attribute &b_attribute : b_hair.attributes) {
if (b_attribute.name() != "radius") {
continue;
}
if (b_attribute.domain() != BL::Attribute::domain_POINT) {
continue;
}
if (b_attribute.data_type() != BL::Attribute::data_type_FLOAT) {
continue;
}
return BL::FloatAttribute{b_attribute};
}
return std::nullopt;
}
static float4 hair_point_as_float4(BL::Hair b_hair,
std::optional<BL::FloatAttribute> b_attr_radius,
const int index)
{
float4 mP = float3_to_float4(get_float3(b_hair.position_data[index].vector()));
mP.w = b_attr_radius ? b_attr_radius->data[index].value() : 0.0f;
float4 mP = float3_to_float4(get_float3(b_point.co()));
mP.w = b_point.radius();
return mP;
}
static float4 interpolate_hair_points(BL::Hair b_hair,
std::optional<BL::FloatAttribute> b_attr_radius,
const int first_point_index,
const int num_points,
const float step)
@@ -664,8 +654,8 @@ static float4 interpolate_hair_points(BL::Hair b_hair,
const int point_a = clamp((int)curve_t, 0, num_points - 1);
const int point_b = min(point_a + 1, num_points - 1);
const float t = curve_t - (float)point_a;
return lerp(hair_point_as_float4(b_hair, b_attr_radius, first_point_index + point_a),
hair_point_as_float4(b_hair, b_attr_radius, first_point_index + point_b),
return lerp(hair_point_as_float4(b_hair.points[first_point_index + point_a]),
hair_point_as_float4(b_hair.points[first_point_index + point_b]),
t);
}
@@ -692,16 +682,18 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Hair b_hair)
const int num_keys = b_hair.points.length();
const int num_curves = b_hair.curves.length();
hair->reserve_curves(num_curves, num_keys);
if (num_curves > 0) {
VLOG(1) << "Exporting curve segments for hair " << hair->name;
}
std::optional<BL::FloatAttribute> b_attr_radius = find_curves_radius_attribute(b_hair);
hair->reserve_curves(num_curves, num_keys);
/* Export curves and points. */
vector<float> points_length;
for (int i = 0; i < num_curves; i++) {
const int first_point_index = b_hair.curve_offset_data[i].value();
const int num_points = b_hair.curve_offset_data[i + 1].value() - first_point_index;
for (BL::HairCurve &b_curve : b_hair.curves) {
const int first_point_index = b_curve.first_point_index();
const int num_points = b_curve.num_points();
float3 prev_co = zero_float3();
float length = 0.0f;
@@ -712,9 +704,10 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Hair b_hair)
/* Position and radius. */
for (int i = 0; i < num_points; i++) {
const float3 co = get_float3(b_hair.position_data[first_point_index + i].vector());
const float radius = b_attr_radius ? b_attr_radius->data[first_point_index + i].value() :
0.0f;
BL::HairPoint b_point = b_hair.points[first_point_index + i];
const float3 co = get_float3(b_point.co());
const float radius = b_point.radius();
hair->add_curve_key(co, radius);
if (attr_intercept) {
@@ -739,7 +732,7 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Hair b_hair)
/* Random number per curve. */
if (attr_random != NULL) {
attr_random->add(hash_uint2_to_float(i, 0));
attr_random->add(hash_uint2_to_float(b_curve.index(), 0));
}
/* Curve. */
@@ -750,28 +743,29 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Hair b_hair)
static void export_hair_curves_motion(Hair *hair, BL::Hair b_hair, int motion_step)
{
VLOG(1) << "Exporting curve motion segments for hair " << hair->name << ", motion step "
<< motion_step;
/* Find or add attribute. */
Attribute *attr_mP = hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
bool new_attribute = false;
if (!attr_mP) {
VLOG(1) << "Creating new motion vertex position attribute";
attr_mP = hair->attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
new_attribute = true;
}
/* Export motion keys. */
const int num_keys = hair->get_curve_keys().size();
const int num_curves = b_hair.curves.length();
float4 *mP = attr_mP->data_float4() + motion_step * num_keys;
bool have_motion = false;
int num_motion_keys = 0;
int curve_index = 0;
std::optional<BL::FloatAttribute> b_attr_radius = find_curves_radius_attribute(b_hair);
for (int i = 0; i < num_curves; i++) {
const int first_point_index = b_hair.curve_offset_data[i].value();
const int num_points = b_hair.curve_offset_data[i + 1].value() - first_point_index;
for (BL::HairCurve &b_curve : b_hair.curves) {
const int first_point_index = b_curve.first_point_index();
const int num_points = b_curve.num_points();
Hair::Curve curve = hair->get_curve(curve_index);
curve_index++;
@@ -782,7 +776,7 @@ static void export_hair_curves_motion(Hair *hair, BL::Hair b_hair, int motion_st
int point_index = first_point_index + i;
if (point_index < num_keys) {
mP[num_motion_keys] = hair_point_as_float4(b_hair, b_attr_radius, point_index);
mP[num_motion_keys] = hair_point_as_float4(b_hair.points[point_index]);
num_motion_keys++;
if (!have_motion) {
@@ -801,8 +795,7 @@ static void export_hair_curves_motion(Hair *hair, BL::Hair b_hair, int motion_st
const float step_size = curve.num_keys > 1 ? 1.0f / (curve.num_keys - 1) : 0.0f;
for (int i = 0; i < curve.num_keys; i++) {
const float step = i * step_size;
mP[num_motion_keys] = interpolate_hair_points(
b_hair, b_attr_radius, first_point_index, num_points, step);
mP[num_motion_keys] = interpolate_hair_points(b_hair, first_point_index, num_points, step);
num_motion_keys++;
}
have_motion = true;
@@ -847,14 +840,11 @@ void BlenderSync::sync_hair(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, H
new_hair.set_used_shaders(used_shaders);
if (view_layer.use_hair) {
#ifdef WITH_HAIR_NODES
if (b_ob_info.object_data.is_a(&RNA_Hair)) {
/* Hair object. */
sync_hair(&new_hair, b_ob_info, false);
}
else
#endif
{
else {
/* Particle hair. */
bool need_undeformed = new_hair.need_attribute(scene, ATTR_STD_GENERATED);
BL::Mesh b_mesh = object_to_mesh(
@@ -901,15 +891,12 @@ void BlenderSync::sync_hair_motion(BL::Depsgraph b_depsgraph,
/* Export deformed coordinates. */
if (ccl::BKE_object_is_deform_modified(b_ob_info, b_scene, preview)) {
#ifdef WITH_HAIR_NODES
if (b_ob_info.object_data.is_a(&RNA_Hair)) {
/* Hair object. */
sync_hair(hair, b_ob_info, true, motion_step);
return;
}
else
#endif
{
else {
/* Particle hair. */
BL::Mesh b_mesh = object_to_mesh(
b_data, b_ob_info, b_depsgraph, false, Mesh::SUBDIVISION_NONE);

View File

@@ -27,7 +27,6 @@ enum ComputeDevice {
COMPUTE_DEVICE_CUDA = 1,
COMPUTE_DEVICE_OPTIX = 3,
COMPUTE_DEVICE_HIP = 4,
COMPUTE_DEVICE_METAL = 5,
COMPUTE_DEVICE_NUM
};
@@ -86,9 +85,6 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences, BL::Scene &b_scen
else if (compute_device == COMPUTE_DEVICE_HIP) {
mask |= DEVICE_MASK_HIP;
}
else if (compute_device == COMPUTE_DEVICE_METAL) {
mask |= DEVICE_MASK_METAL;
}
vector<DeviceInfo> devices = Device::available_devices(mask);
/* Match device preferences and available devices. */

View File

@@ -272,300 +272,12 @@ uint BlenderDisplaySpaceShader::get_shader_program()
return shader_program_;
}
/* --------------------------------------------------------------------
* DrawTile.
*/
/* Higher level representation of a texture from the graphics library. */
class GLTexture {
public:
/* Global counter for all allocated OpenGL textures used by instances of this class. */
static inline std::atomic<int> num_used = 0;
GLTexture() = default;
~GLTexture()
{
assert(gl_id == 0);
}
GLTexture(const GLTexture &other) = delete;
GLTexture &operator=(GLTexture &other) = delete;
GLTexture(GLTexture &&other) noexcept
: gl_id(other.gl_id), width(other.width), height(other.height)
{
other.reset();
}
GLTexture &operator=(GLTexture &&other)
{
if (this == &other) {
return *this;
}
gl_id = other.gl_id;
width = other.width;
height = other.height;
other.reset();
return *this;
}
bool gl_resources_ensure()
{
if (gl_id) {
return true;
}
/* Create texture. */
glGenTextures(1, &gl_id);
if (!gl_id) {
LOG(ERROR) << "Error creating texture.";
return false;
}
/* Configure the texture. */
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, gl_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
/* Clamp to edge so that precision issues when zoomed out (which forces linear interpolation)
* does not cause unwanted repetition. */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_2D, 0);
++num_used;
return true;
}
void gl_resources_destroy()
{
if (!gl_id) {
return;
}
glDeleteTextures(1, &gl_id);
reset();
--num_used;
}
/* OpenGL resource IDs of the texture.
*
* NOTE: Allocated on the render engine's context. */
uint gl_id = 0;
/* Dimensions of the texture in pixels. */
int width = 0;
int height = 0;
protected:
void reset()
{
gl_id = 0;
width = 0;
height = 0;
}
};
/* Higher level representation of a Pixel Buffer Object (PBO) from the graphics library. */
class GLPixelBufferObject {
public:
/* Global counter for all allocated OpenGL PBOs used by instances of this class. */
static inline std::atomic<int> num_used = 0;
GLPixelBufferObject() = default;
~GLPixelBufferObject()
{
assert(gl_id == 0);
}
GLPixelBufferObject(const GLPixelBufferObject &other) = delete;
GLPixelBufferObject &operator=(GLPixelBufferObject &other) = delete;
GLPixelBufferObject(GLPixelBufferObject &&other) noexcept
: gl_id(other.gl_id), width(other.width), height(other.height)
{
other.reset();
}
GLPixelBufferObject &operator=(GLPixelBufferObject &&other)
{
if (this == &other) {
return *this;
}
gl_id = other.gl_id;
width = other.width;
height = other.height;
other.reset();
return *this;
}
bool gl_resources_ensure()
{
if (gl_id) {
return true;
}
glGenBuffers(1, &gl_id);
if (!gl_id) {
LOG(ERROR) << "Error creating texture pixel buffer object.";
return false;
}
++num_used;
return true;
}
void gl_resources_destroy()
{
if (!gl_id) {
return;
}
glDeleteBuffers(1, &gl_id);
reset();
--num_used;
}
/* OpenGL resource IDs of the PBO.
*
* NOTE: Allocated on the render engine's context. */
uint gl_id = 0;
/* Dimensions of the PBO. */
int width = 0;
int height = 0;
protected:
void reset()
{
gl_id = 0;
width = 0;
height = 0;
}
};
class DrawTile {
public:
DrawTile() = default;
~DrawTile() = default;
DrawTile(const DrawTile &other) = delete;
DrawTile &operator=(const DrawTile &other) = delete;
DrawTile(DrawTile &&other) noexcept = default;
DrawTile &operator=(DrawTile &&other) = default;
bool gl_resources_ensure()
{
if (!texture.gl_resources_ensure()) {
gl_resources_destroy();
return false;
}
if (!gl_vertex_buffer) {
glGenBuffers(1, &gl_vertex_buffer);
if (!gl_vertex_buffer) {
LOG(ERROR) << "Error allocating tile VBO.";
gl_resources_destroy();
return false;
}
}
return true;
}
void gl_resources_destroy()
{
texture.gl_resources_destroy();
if (gl_vertex_buffer) {
glDeleteBuffers(1, &gl_vertex_buffer);
gl_vertex_buffer = 0;
}
}
inline bool ready_to_draw() const
{
return texture.gl_id != 0;
}
/* Texture which contains pixels of the tile. */
GLTexture texture;
/* Display parameters the texture of this tile has been updated for. */
BlenderDisplayDriver::Params params;
/* OpenGL resources needed for drawing. */
uint gl_vertex_buffer = 0;
};
class DrawTileAndPBO {
public:
bool gl_resources_ensure()
{
if (!tile.gl_resources_ensure() || !buffer_object.gl_resources_ensure()) {
gl_resources_destroy();
return false;
}
return true;
}
void gl_resources_destroy()
{
tile.gl_resources_destroy();
buffer_object.gl_resources_destroy();
}
DrawTile tile;
GLPixelBufferObject buffer_object;
};
/* --------------------------------------------------------------------
* BlenderDisplayDriver.
*/
struct BlenderDisplayDriver::Tiles {
/* Resources of a tile which is being currently rendered. */
DrawTileAndPBO current_tile;
/* All tiles which rendering is finished and which content will not be changed. */
struct {
vector<DrawTile> tiles;
void gl_resources_destroy_and_clear()
{
for (DrawTile &tile : tiles) {
tile.gl_resources_destroy();
}
tiles.clear();
}
} finished_tiles;
};
BlenderDisplayDriver::BlenderDisplayDriver(BL::RenderEngine &b_engine, BL::Scene &b_scene)
: b_engine_(b_engine),
display_shader_(BlenderDisplayShader::create(b_engine, b_scene)),
tiles_(make_unique<Tiles>())
: b_engine_(b_engine), display_shader_(BlenderDisplayShader::create(b_engine, b_scene))
{
/* Create context while on the main thread. */
gl_context_create();
@@ -580,21 +292,6 @@ BlenderDisplayDriver::~BlenderDisplayDriver()
* Update procedure.
*/
void BlenderDisplayDriver::next_tile_begin()
{
if (!tiles_->current_tile.tile.ready_to_draw()) {
LOG(ERROR)
<< "Unexpectedly moving to the next tile without any data provided for current tile.";
return;
}
/* Moving to the next tile without giving render data for the current tile is not an expected
* situation. */
DCHECK(!need_clear_);
tiles_->finished_tiles.tiles.emplace_back(std::move(tiles_->current_tile.tile));
}
bool BlenderDisplayDriver::update_begin(const Params &params,
int texture_width,
int texture_height)
@@ -615,96 +312,58 @@ bool BlenderDisplayDriver::update_begin(const Params &params,
glWaitSync((GLsync)gl_render_sync_, 0, GL_TIMEOUT_IGNORED);
}
DrawTile &current_tile = tiles_->current_tile.tile;
GLPixelBufferObject &current_tile_buffer_object = tiles_->current_tile.buffer_object;
/* Clear storage of all finished tiles when display clear is requested.
* Do it when new tile data is provided to handle the display clear flag in a single place.
* It also makes the logic reliable from the whether drawing did happen or not point of view. */
if (need_clear_) {
tiles_->finished_tiles.gl_resources_destroy_and_clear();
need_clear_ = false;
}
if (!tiles_->current_tile.gl_resources_ensure()) {
tiles_->current_tile.gl_resources_destroy();
if (!gl_texture_resources_ensure()) {
gl_context_disable();
return false;
}
/* Update texture dimensions if needed. */
if (current_tile.texture.width != texture_width ||
current_tile.texture.height != texture_height) {
if (texture_.width != texture_width || texture_.height != texture_height) {
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, current_tile.texture.gl_id);
glBindTexture(GL_TEXTURE_2D, texture_.gl_id);
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA16F, texture_width, texture_height, 0, GL_RGBA, GL_HALF_FLOAT, 0);
current_tile.texture.width = texture_width;
current_tile.texture.height = texture_height;
texture_.width = texture_width;
texture_.height = texture_height;
glBindTexture(GL_TEXTURE_2D, 0);
/* Texture did change, and no pixel storage was provided. Tag for an explicit zeroing out to
* avoid undefined content. */
texture_.need_clear = true;
}
/* Update PBO dimensions if needed.
*
* NOTE: Allocate the PBO for the size which will fit the final render resolution (as in,
* NOTE: Allocate the PBO for the the size which will fit the final render resolution (as in,
* at a resolution divider 1. This was we don't need to recreate graphics interoperability
* objects which are costly and which are tied to the specific underlying buffer size.
* The downside of this approach is that when graphics interoperability is not used we are
* sending too much data to GPU when resolution divider is not 1. */
/* TODO(sergey): Investigate whether keeping the PBO exact size of the texture makes non-interop
* mode faster. */
const int buffer_width = params.size.x;
const int buffer_height = params.size.y;
if (current_tile_buffer_object.width != buffer_width ||
current_tile_buffer_object.height != buffer_height) {
const int buffer_width = params.full_size.x;
const int buffer_height = params.full_size.y;
if (texture_.buffer_width != buffer_width || texture_.buffer_height != buffer_height) {
const size_t size_in_bytes = sizeof(half4) * buffer_width * buffer_height;
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, current_tile_buffer_object.gl_id);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, texture_.gl_pbo_id);
glBufferData(GL_PIXEL_UNPACK_BUFFER, size_in_bytes, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
current_tile_buffer_object.width = buffer_width;
current_tile_buffer_object.height = buffer_height;
texture_.buffer_width = buffer_width;
texture_.buffer_height = buffer_height;
}
/* Store an updated parameters of the current tile.
* In theory it is only needed once per update of the tile, but doing it on every update is
* the easiest and is not expensive. */
tiles_->current_tile.tile.params = params;
/* New content will be provided to the texture in one way or another, so mark this in a
* centralized place. */
texture_.need_update = true;
texture_.params = params;
return true;
}
static void update_tile_texture_pixels(const DrawTileAndPBO &tile)
{
const GLTexture &texture = tile.tile.texture;
DCHECK_NE(tile.buffer_object.gl_id, 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture.gl_id);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, tile.buffer_object.gl_id);
glTexSubImage2D(
GL_TEXTURE_2D, 0, 0, 0, texture.width, texture.height, GL_RGBA, GL_HALF_FLOAT, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
void BlenderDisplayDriver::update_end()
{
/* Unpack the PBO into the texture as soon as the new content is provided.
*
* This allows to ensure that the unpacking happens while resources like graphics interop (which
* lifetime is outside of control of the display driver) are still valid, as well as allows to
* move the tile from being current to finished immediately after this call.
*
* One concern with this approach is that if the update happens more often than drawing then
* doing the unpack here occupies GPU transfer for no good reason. However, the render scheduler
* takes care of ensuring updates don't happen that often. In regular applications redraw will
* happen much more often than this update. */
update_tile_texture_pixels(tiles_->current_tile);
gl_upload_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
@@ -717,11 +376,7 @@ void BlenderDisplayDriver::update_end()
half4 *BlenderDisplayDriver::map_texture_buffer()
{
const uint pbo_gl_id = tiles_->current_tile.buffer_object.gl_id;
DCHECK_NE(pbo_gl_id, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo_gl_id);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, texture_.gl_pbo_id);
half4 *mapped_rgba_pixels = reinterpret_cast<half4 *>(
glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY));
@@ -729,6 +384,15 @@ half4 *BlenderDisplayDriver::map_texture_buffer()
LOG(ERROR) << "Error mapping BlenderDisplayDriver pixel buffer object.";
}
if (texture_.need_clear) {
const int64_t texture_width = texture_.width;
const int64_t texture_height = texture_.height;
memset(reinterpret_cast<void *>(mapped_rgba_pixels),
0,
texture_width * texture_height * sizeof(half4));
texture_.need_clear = false;
}
return mapped_rgba_pixels;
}
@@ -747,9 +411,12 @@ BlenderDisplayDriver::GraphicsInterop BlenderDisplayDriver::graphics_interop_get
{
GraphicsInterop interop_dst;
interop_dst.buffer_width = tiles_->current_tile.buffer_object.width;
interop_dst.buffer_height = tiles_->current_tile.buffer_object.height;
interop_dst.opengl_pbo_id = tiles_->current_tile.buffer_object.gl_id;
interop_dst.buffer_width = texture_.buffer_width;
interop_dst.buffer_height = texture_.buffer_height;
interop_dst.opengl_pbo_id = texture_.gl_pbo_id;
interop_dst.need_clear = texture_.need_clear;
texture_.need_clear = false;
return interop_dst;
}
@@ -770,7 +437,7 @@ void BlenderDisplayDriver::graphics_interop_deactivate()
void BlenderDisplayDriver::clear()
{
need_clear_ = true;
texture_.need_clear = true;
}
void BlenderDisplayDriver::set_zoom(float zoom_x, float zoom_y)
@@ -778,155 +445,26 @@ void BlenderDisplayDriver::set_zoom(float zoom_x, float zoom_y)
zoom_ = make_float2(zoom_x, zoom_y);
}
/* Update vertex buffer with new coordinates of vertex positions and texture coordinates.
* This buffer is used to render texture in the viewport.
*
* NOTE: The buffer needs to be bound. */
static void vertex_buffer_update(const DisplayDriver::Params &params)
{
const int x = params.full_offset.x;
const int y = params.full_offset.y;
const int width = params.size.x;
const int height = params.size.y;
/* Invalidate old contents - avoids stalling if the buffer is still waiting in queue to be
* rendered. */
glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
float *vpointer = reinterpret_cast<float *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY));
if (!vpointer) {
return;
}
vpointer[0] = 0.0f;
vpointer[1] = 0.0f;
vpointer[2] = x;
vpointer[3] = y;
vpointer[4] = 1.0f;
vpointer[5] = 0.0f;
vpointer[6] = x + width;
vpointer[7] = y;
vpointer[8] = 1.0f;
vpointer[9] = 1.0f;
vpointer[10] = x + width;
vpointer[11] = y + height;
vpointer[12] = 0.0f;
vpointer[13] = 1.0f;
vpointer[14] = x;
vpointer[15] = y + height;
glUnmapBuffer(GL_ARRAY_BUFFER);
}
static void draw_tile(const float2 &zoom,
const int texcoord_attribute,
const int position_attribute,
const DrawTile &draw_tile)
{
if (!draw_tile.ready_to_draw()) {
return;
}
const GLTexture &texture = draw_tile.texture;
DCHECK_NE(texture.gl_id, 0);
DCHECK_NE(draw_tile.gl_vertex_buffer, 0);
glBindBuffer(GL_ARRAY_BUFFER, draw_tile.gl_vertex_buffer);
/* Draw at the parameters for which the texture has been updated for. This allows to always draw
* texture during bordered-rendered camera view without flickering. The validness of the display
* parameters for a texture is guaranteed by the initial "clear" state which makes drawing to
* have an early output.
*
* Such approach can cause some extra "jelly" effect during panning, but it is not more jelly
* than overlay of selected objects. Also, it's possible to redraw texture at an intersection of
* the texture draw parameters and the latest updated draw parameters (although, complexity of
* doing it might not worth it. */
vertex_buffer_update(draw_tile.params);
glBindTexture(GL_TEXTURE_2D, texture.gl_id);
/* Trick to keep sharp rendering without jagged edges on all GPUs.
*
* The idea here is to enforce driver to use linear interpolation when the image is not zoomed
* in.
* For the render result with a resolution divider in effect we always use nearest interpolation.
*
* Use explicit MIN assignment to make sure the driver does not have an undefined behavior at
* the zoom level 1. The MAG filter is always NEAREST. */
const float zoomed_width = draw_tile.params.size.x * zoom.x;
const float zoomed_height = draw_tile.params.size.y * zoom.y;
if (texture.width != draw_tile.params.size.x || texture.height != draw_tile.params.size.y) {
/* Resolution divider is different from 1, force nearest interpolation. */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else if (zoomed_width - draw_tile.params.size.x > 0.5f ||
zoomed_height - draw_tile.params.size.y > 0.5f) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
glVertexAttribPointer(
texcoord_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)0);
glVertexAttribPointer(position_attribute,
2,
GL_FLOAT,
GL_FALSE,
4 * sizeof(float),
(const GLvoid *)(sizeof(float) * 2));
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
void BlenderDisplayDriver::flush()
{
/* This is called from the render thread that also calls update_begin/end, right before ending
* the render loop. We wait for any queued PBO and render commands to be done, before destroying
* the render thread and activating the context in the main thread to destroy resources.
*
* If we don't do this, the NVIDIA driver hangs for a few seconds for when ending 3D viewport
* rendering, for unknown reasons. This was found with NVIDIA driver version 470.73 and a Quadro
* RTX 6000 on Linux. */
if (!gl_context_enable()) {
return;
}
if (gl_upload_sync_) {
glWaitSync((GLsync)gl_upload_sync_, 0, GL_TIMEOUT_IGNORED);
}
if (gl_render_sync_) {
glWaitSync((GLsync)gl_render_sync_, 0, GL_TIMEOUT_IGNORED);
}
gl_context_disable();
}
void BlenderDisplayDriver::draw(const Params &params)
{
/* See do_update_begin() for why no locking is required here. */
const bool transparent = true; // TODO(sergey): Derive this from Film.
if (!gl_draw_resources_ensure()) {
return;
}
if (use_gl_context_) {
gl_context_mutex_.lock();
}
if (need_clear_) {
if (texture_.need_clear) {
/* Texture is requested to be cleared and was not yet cleared.
*
* Do early return which should be equivalent of drawing all-zero texture.
* Watch out for the lock though so that the clear happening during update is properly
* synchronized here. */
if (use_gl_context_) {
gl_context_mutex_.unlock();
}
gl_context_mutex_.unlock();
return;
}
@@ -939,37 +477,66 @@ void BlenderDisplayDriver::draw(const Params &params)
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
}
glActiveTexture(GL_TEXTURE0);
display_shader_->bind(params.full_size.x, params.full_size.y);
/* NOTE: The VAO is to be allocated on the drawing context as it is not shared across contexts.
* Simplest is to allocate it on every redraw so that it is possible to destroy it from a
* correct context. */
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_.gl_id);
/* Trick to keep sharp rendering without jagged edges on all GPUs.
*
* The idea here is to enforce driver to use linear interpolation when the image is not zoomed
* in.
* For the render result with a resolution divider in effect we always use nearest interpolation.
*
* Use explicit MIN assignment to make sure the driver does not have an undefined behavior at
* the zoom level 1. The MAG filter is always NEAREST. */
const float zoomed_width = params.size.x * zoom_.x;
const float zoomed_height = params.size.y * zoom_.y;
if (texture_.width != params.size.x || texture_.height != params.size.y) {
/* Resolution divider is different from 1, force nearest interpolation. */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else if (zoomed_width - params.size.x > 0.5f || zoomed_height - params.size.y > 0.5f) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_);
texture_update_if_needed();
vertex_buffer_update(params);
/* TODO(sergey): Does it make sense/possible to cache/reuse the VAO? */
GLuint vertex_array_object;
glGenVertexArrays(1, &vertex_array_object);
glBindVertexArray(vertex_array_object);
display_shader_->bind(params.full_size.x, params.full_size.y);
const int texcoord_attribute = display_shader_->get_tex_coord_attrib_location();
const int position_attribute = display_shader_->get_position_attrib_location();
glEnableVertexAttribArray(texcoord_attribute);
glEnableVertexAttribArray(position_attribute);
draw_tile(zoom_, texcoord_attribute, position_attribute, tiles_->current_tile.tile);
glVertexAttribPointer(
texcoord_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)0);
glVertexAttribPointer(position_attribute,
2,
GL_FLOAT,
GL_FALSE,
4 * sizeof(float),
(const GLvoid *)(sizeof(float) * 2));
for (const DrawTile &tile : tiles_->finished_tiles.tiles) {
draw_tile(zoom_, texcoord_attribute, position_attribute, tile);
}
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
display_shader_->unbind();
glBindTexture(GL_TEXTURE_2D, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteVertexArrays(1, &vertex_array_object);
display_shader_->unbind();
if (transparent) {
glDisable(GL_BLEND);
}
@@ -977,11 +544,6 @@ void BlenderDisplayDriver::draw(const Params &params)
gl_render_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
if (VLOG_IS_ON(5)) {
VLOG(5) << "Number of textures: " << GLTexture::num_used;
VLOG(5) << "Number of PBOs: " << GLPixelBufferObject::num_used;
}
if (use_gl_context_) {
gl_context_mutex_.unlock();
}
@@ -1056,16 +618,154 @@ void BlenderDisplayDriver::gl_context_dispose()
}
}
bool BlenderDisplayDriver::gl_draw_resources_ensure()
{
if (!texture_.gl_id) {
/* If there is no texture allocated, there is nothing to draw. Inform the draw call that it can
* can not continue. Note that this is not an unrecoverable error, so once the texture is known
* we will come back here and create all the GPU resources needed for draw. */
return false;
}
if (gl_draw_resource_creation_attempted_) {
return gl_draw_resources_created_;
}
gl_draw_resource_creation_attempted_ = true;
if (!vertex_buffer_) {
glGenBuffers(1, &vertex_buffer_);
if (!vertex_buffer_) {
LOG(ERROR) << "Error creating vertex buffer.";
return false;
}
}
gl_draw_resources_created_ = true;
return true;
}
void BlenderDisplayDriver::gl_resources_destroy()
{
gl_context_enable();
tiles_->current_tile.gl_resources_destroy();
tiles_->finished_tiles.gl_resources_destroy_and_clear();
if (vertex_buffer_ != 0) {
glDeleteBuffers(1, &vertex_buffer_);
}
if (texture_.gl_pbo_id) {
glDeleteBuffers(1, &texture_.gl_pbo_id);
texture_.gl_pbo_id = 0;
}
if (texture_.gl_id) {
glDeleteTextures(1, &texture_.gl_id);
texture_.gl_id = 0;
}
gl_context_disable();
gl_context_dispose();
}
bool BlenderDisplayDriver::gl_texture_resources_ensure()
{
if (texture_.creation_attempted) {
return texture_.is_created;
}
texture_.creation_attempted = true;
DCHECK(!texture_.gl_id);
DCHECK(!texture_.gl_pbo_id);
/* Create texture. */
glGenTextures(1, &texture_.gl_id);
if (!texture_.gl_id) {
LOG(ERROR) << "Error creating texture.";
return false;
}
/* Configure the texture. */
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_.gl_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
/* Create PBO for the texture. */
glGenBuffers(1, &texture_.gl_pbo_id);
if (!texture_.gl_pbo_id) {
LOG(ERROR) << "Error creating texture pixel buffer object.";
return false;
}
/* Creation finished with a success. */
texture_.is_created = true;
return true;
}
void BlenderDisplayDriver::texture_update_if_needed()
{
if (!texture_.need_update) {
return;
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, texture_.gl_pbo_id);
glTexSubImage2D(
GL_TEXTURE_2D, 0, 0, 0, texture_.width, texture_.height, GL_RGBA, GL_HALF_FLOAT, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
texture_.need_update = false;
}
void BlenderDisplayDriver::vertex_buffer_update(const Params & /*params*/)
{
/* Draw at the parameters for which the texture has been updated for. This allows to always draw
* texture during bordered-rendered camera view without flickering. The validness of the display
* parameters for a texture is guaranteed by the initial "clear" state which makes drawing to
* have an early output.
*
* Such approach can cause some extra "jelly" effect during panning, but it is not more jelly
* than overlay of selected objects. Also, it's possible to redraw texture at an intersection of
* the texture draw parameters and the latest updated draw parameters (although, complexity of
* doing it might not worth it. */
const int x = texture_.params.full_offset.x;
const int y = texture_.params.full_offset.y;
const int width = texture_.params.size.x;
const int height = texture_.params.size.y;
/* Invalidate old contents - avoids stalling if the buffer is still waiting in queue to be
* rendered. */
glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
float *vpointer = reinterpret_cast<float *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY));
if (!vpointer) {
return;
}
vpointer[0] = 0.0f;
vpointer[1] = 0.0f;
vpointer[2] = x;
vpointer[3] = y;
vpointer[4] = 1.0f;
vpointer[5] = 0.0f;
vpointer[6] = x + width;
vpointer[7] = y;
vpointer[8] = 1.0f;
vpointer[9] = 1.0f;
vpointer[10] = x + width;
vpointer[11] = y + height;
vpointer[12] = 0.0f;
vpointer[13] = 1.0f;
vpointer[14] = x;
vpointer[15] = y + height;
glUnmapBuffer(GL_ARRAY_BUFFER);
}
CCL_NAMESPACE_END

View File

@@ -26,7 +26,6 @@
#include "util/thread.h"
#include "util/unique_ptr.h"
#include "util/vector.h"
CCL_NAMESPACE_BEGIN
@@ -113,8 +112,6 @@ class BlenderDisplayDriver : public DisplayDriver {
void set_zoom(float zoom_x, float zoom_y);
protected:
virtual void next_tile_begin() override;
virtual bool update_begin(const Params &params, int texture_width, int texture_height) override;
virtual void update_end() override;
@@ -125,17 +122,33 @@ class BlenderDisplayDriver : public DisplayDriver {
virtual void draw(const Params &params) override;
virtual void flush() override;
/* Helper function which allocates new GPU context. */
void gl_context_create();
bool gl_context_enable();
void gl_context_disable();
void gl_context_dispose();
/* Make sure texture is allocated and its initial configuration is performed. */
bool gl_texture_resources_ensure();
/* Ensure all runtime GPU resources needed for drawing are allocated.
* Returns true if all resources needed for drawing are available. */
bool gl_draw_resources_ensure();
/* Destroy all GPU resources which are being used by this object. */
void gl_resources_destroy();
/* Update GPU texture dimensions and content if needed (new pixel data was provided).
*
* NOTE: The texture needs to be bound. */
void texture_update_if_needed();
/* Update vertex buffer with new coordinates of vertex positions and texture coordinates.
* This buffer is used to render texture in the viewport.
*
* NOTE: The buffer needs to be bound. */
void vertex_buffer_update(const Params &params);
BL::RenderEngine b_engine_;
/* OpenGL context which is used the render engine doesn't have its own. */
@@ -146,14 +159,50 @@ class BlenderDisplayDriver : public DisplayDriver {
/* Mutex used to guard the `gl_context_`. */
thread_mutex gl_context_mutex_;
/* Content of the display is to be filled with zeroes. */
std::atomic<bool> need_clear_ = true;
/* Texture which contains pixels of the render result. */
struct {
/* Indicates whether texture creation was attempted and succeeded.
* Used to avoid multiple attempts of texture creation on GPU issues or GPU context
* misconfiguration. */
bool creation_attempted = false;
bool is_created = false;
/* OpenGL resource IDs of the texture itself and Pixel Buffer Object (PBO) used to write
* pixels to it.
*
* NOTE: Allocated on the engine's context. */
uint gl_id = 0;
uint gl_pbo_id = 0;
/* Is true when new data was written to the PBO, meaning, the texture might need to be resized
* and new data is to be uploaded to the GPU. */
bool need_update = false;
/* Content of the texture is to be filled with zeroes. */
std::atomic<bool> need_clear = true;
/* Dimensions of the texture in pixels. */
int width = 0;
int height = 0;
/* Dimensions of the underlying PBO. */
int buffer_width = 0;
int buffer_height = 0;
/* Display parameters the texture has been updated for. */
Params params;
} texture_;
unique_ptr<BlenderDisplayShader> display_shader_;
/* Opaque storage for an internal state and data for tiles. */
struct Tiles;
unique_ptr<Tiles> tiles_;
/* Special track of whether GPU resources were attempted to be created, to avoid attempts of
* their re-creation on failure on every redraw. */
bool gl_draw_resource_creation_attempted_ = false;
bool gl_draw_resources_created_ = false;
/* Vertex buffer which hold vertices of a triangle fan which is textures with the texture
* holding the render result. */
uint vertex_buffer_ = 0;
void *gl_render_sync_ = nullptr;
void *gl_upload_sync_ = nullptr;

Some files were not shown because too many files have changed in this diff Show More