Compare commits
33 Commits
temp-exper
...
geometry-n
Author | SHA1 | Date | |
---|---|---|---|
bf4b31468e | |||
f5de32562e | |||
bc4e31afb6 | |||
065dfdc529 | |||
9d7672be71 | |||
994e7178bb | |||
1719743066 | |||
8910033f57 | |||
2a4c6c612a | |||
b062b922f9 | |||
895f4620a0 | |||
fafed6234b | |||
7ff8094a8b | |||
5aabf67a9c | |||
ab8c7fe946 | |||
a05012d500 | |||
97a93566e9 | |||
da4d697772 | |||
87218899be | |||
ffa0a6df9d | |||
706fa5ad76 | |||
b7f6de490d | |||
7e485b4620 | |||
a7dba81aab | |||
4606e83a75 | |||
1d28de57a4 | |||
3cfcfb938d | |||
bcdc6910a0 | |||
7793e8c884 | |||
05d9bd7c4a | |||
9255ce9247 | |||
a0ce0154e7 | |||
0cd7f7ddd1 |
15
.clang-tidy
15
.clang-tidy
@@ -15,6 +15,7 @@ Checks: >
|
||||
|
||||
-readability-misleading-indentation,
|
||||
|
||||
-readability-redundant-member-init,
|
||||
-readability-use-anyofallof,
|
||||
|
||||
-readability-function-cognitive-complexity,
|
||||
@@ -29,19 +30,7 @@ Checks: >
|
||||
-bugprone-sizeof-expression,
|
||||
-bugprone-integer-division,
|
||||
|
||||
-bugprone-exception-escape,
|
||||
-bugprone-redundant-branch-condition,
|
||||
|
||||
modernize-*,
|
||||
-modernize-use-auto,
|
||||
-modernize-use-trailing-return-type,
|
||||
-modernize-avoid-c-arrays,
|
||||
-modernize-use-equals-default,
|
||||
-modernize-use-nodiscard,
|
||||
-modernize-loop-convert,
|
||||
-modernize-pass-by-value,
|
||||
-modernize-use-default-member-init,
|
||||
-modernize-raw-string-literal,
|
||||
-modernize-avoid-bind,
|
||||
-modernize-use-transparent-functors,
|
||||
|
||||
WarningsAsErrors: '*'
|
||||
|
@@ -1,169 +0,0 @@
|
||||
# git config blame.ignoreRevsFile .git-blame-ignore-revs
|
||||
#
|
||||
# After running the above, commits listed in this file will be
|
||||
# ignored by git blame. The blame will be shifted to the person
|
||||
# who edited the line(s) before the ignored commit.
|
||||
#
|
||||
# To disable this ignorance for a command, run as follows
|
||||
# git blame --ignore-revs-file="" <other options>
|
||||
#
|
||||
# Changes that belong here:
|
||||
# - Massive comment, doxy-sections, or spelling corrections.
|
||||
# - Clang-format, PEP8 or other automated changes which are *strictly* "no functional change".
|
||||
# - Several smaller commits should be added to this list at once, because adding
|
||||
# one extra commit (to edit this file) after every small cleanup is noisy.
|
||||
#
|
||||
# Note:
|
||||
# - The comment above the SHA should be the first line of the commit.
|
||||
# - It is fine to pack together similar commits if they have the same explanatory comment.
|
||||
# - Use only 40 character git SHAs; not smaller ones, not prefixed with rB.
|
||||
#
|
||||
# https://git-scm.com/docs/git-blame/2.23.0
|
||||
|
||||
# white space commit. (2 spaces -> tab).
|
||||
0a3694cd6ebec710da7110e9f168a72d47c71ee0
|
||||
|
||||
# Cycles: Cleanup, spacing after preprocessor
|
||||
cb4b5e12abf1fc6cf9ffc0944e0a1bc406286c63
|
||||
|
||||
# ClangFormat: apply to source, most of intern
|
||||
e12c08e8d170b7ca40f204a5b0423c23a9fbc2c1
|
||||
|
||||
# Code Style: use "#pragma once" in source directory
|
||||
91694b9b58ab953f3b313be9389cc1303e472fc2
|
||||
|
||||
# Code Style: use "#pragma once" in some newer headers
|
||||
8198dbb888856b8c11757586df02aca15f132f90
|
||||
|
||||
# Code Style: use "#pragma once" in intern/ghost
|
||||
1b1129f82a9cf316b54fbc025f8cfcc1a74b8589
|
||||
|
||||
# Cleanup: mostly comments, use doxy syntax & typos
|
||||
e0cb02587012b4b2f4b18363dc7d0a7da2c02093
|
||||
|
||||
# Cleanup: use C comments for descriptive text
|
||||
2abfcebb0eb7989e3d1e7d03f37ecf5c088210af
|
||||
|
||||
# use lowercase for cmake builtin names and macros, remove contents in else() and endif() which is no longer needed.
|
||||
afacd184982e58a9c830a3d5366e25983939a7ba
|
||||
|
||||
# Spelling: It's Versus Its
|
||||
3a7fd309fce89213b0224b3c6807adb2d1fe7ca8
|
||||
|
||||
# Spelling: Then Versus Than
|
||||
d1eefc421544e2ea632fb35cb6bcaade4c39ce6b
|
||||
|
||||
# Spelling: Miscellaneous
|
||||
84ef3b80de4915a24a9fd2fd214d0fa44e59b854
|
||||
|
||||
# Spelling: Loose Versus Lose
|
||||
c0a6bc19794c69843c38451c762e91bc10136e0f
|
||||
|
||||
# Spelling: Apart Versus A Part
|
||||
3d26cd01b9ba6381eb165e11536345ae652dfb41
|
||||
|
||||
# Cleanup: use 2 space indentation for CMake
|
||||
3076d95ba441cd32706a27d18922a30f8fd28b8a
|
||||
|
||||
# Cleanup: use over-line for doxy comments
|
||||
4b188bb08cf5aaae3c68ab57bbcfa037eef1ac10
|
||||
|
||||
# Cleanup: General comment style clean up of graph_edit.c and fcurve.c
|
||||
0105f146bb40bd609ccbda3d3f6aeb8e14ad3f9e
|
||||
|
||||
# Cleanup: pep8 (indentation, spacing, long lines)
|
||||
41d2d6da0c96d351b47acb64d3e0decdba16cb16
|
||||
|
||||
# Cleanup: pep8, blank lines
|
||||
bab9de2a52929fe2b45ecddb1eb09da3378e303b
|
||||
|
||||
# Cleanup: PEP8 for python changes
|
||||
1e7e94588daa66483190f45a9de5e98228f80e05
|
||||
|
||||
# GPencil: Cleanup pep8
|
||||
a09cc3ee1a99f2cd5040bbf30c8ab8c588bb2bb1
|
||||
|
||||
# Cleanup: trailing space, remove tabs, pep8
|
||||
c42a6b77b52560d257279de2cb624b4ef2c0d24c
|
||||
|
||||
# Cleanup: use C style doxygen comments
|
||||
8c1726918374e1d2d2123e17bae8db5aadde3433
|
||||
|
||||
# Cleanup: use doxy sections for imbuf
|
||||
c207f7c22e1439e0b285fba5d2c072bdae23f981
|
||||
|
||||
# Cleanup: Clang-Tidy, modernize-use-bool-literals
|
||||
af35ada2f3fa8da4d46b3a71de724d353d716820
|
||||
|
||||
# Cleanup: Use nullptr everywhere in fluid code
|
||||
311031ecd03dbfbf43e1df672a395f24b2e7d4d3
|
||||
|
||||
# Cleanup: Clang-Tidy, modernize-redundant-void-arg
|
||||
a331d5c99299c4514ca33c843b1c79b872f2728d
|
||||
|
||||
# Cleanup: Clang-Tidy modernize-use-nullptr
|
||||
16732def37c5a66f3ea28dbe247b09cc6bca6677
|
||||
|
||||
# Cleanup: Clang-tidy, modernize-concat-nested-namespaces
|
||||
4525049aa0cf818f6483dce589ac9791eb562338
|
||||
|
||||
# Cleanup: Clang-tidy else-after-return
|
||||
ae342ed4511cf2e144dcd27ce2c635d3d536f9ad
|
||||
|
||||
# Cleanup: Clang-Tidy, readability-redundant-member-init
|
||||
190170d4cc92ff34abe1744a10474ac4f1074086
|
||||
|
||||
# Cleanup: use 'filepath' instead of 'name' for ImBuf utilities
|
||||
99f56b4c16323f96c0cbf54e392fb509fcac5bda
|
||||
|
||||
# Cleanup: clang-format
|
||||
c4d8f6a4a8ddc29ed27311ed7578b3c8c31399d2
|
||||
b5d310b569e07a937798a2d38539cfd290149f1c
|
||||
8c846cccd6bdfd3e90a695fabbf05f53e5466a57
|
||||
40d4a4cb1a6b4c3c2a486e8f2868f547530e0811
|
||||
4eac03d821fa17546f562485f7d073813a5e5943
|
||||
|
||||
# Cleanup: use preprocessor version check for PyTypeObject declaration
|
||||
cd9acfed4f7674b84be965d469a367aef96f8af3
|
||||
|
||||
# Cycles: fix compilation of OSL shaders following API change
|
||||
b980cd163a9d5d77eeffc2e353333e739fa9e719
|
||||
|
||||
# Cleanup: clang-tidy suppress warnings for PyTypeObject.tp_print
|
||||
efd71aad4f22ec0073d80b8dd296015d3f395aa8
|
||||
|
||||
# Cleanup: fix wrong merge, remove extra unique_ptr.
|
||||
6507449e54a167c63a72229e4d0119dd2af68ae5
|
||||
|
||||
# Cleanup: fix some clang tidy issues
|
||||
525a042c5c7513c41240b118acca002f6c60cc12
|
||||
|
||||
# Fix T82520: error building freestyle with Python3.8
|
||||
e118426e4695a97d67e65d69677f3c4e2db50a56
|
||||
|
||||
# Cleanup: Clang-tidy, readability-else-after-return
|
||||
7be47dadea5066ae095c644e0b4f1f10d75f5ab3
|
||||
|
||||
# Cleanup: Add `r_` to return parameter
|
||||
45dca05b1cd2a5ead59144c93d790fdfe7c35ee6
|
||||
|
||||
# Cleanup: Typo in `print_default_info` function name.
|
||||
41a73909dec716642f044e60b40a28335c9fdb10
|
||||
|
||||
# Cleanup: Reduce indentation
|
||||
1cc3a0e2cf73a5ff4f9e0a7f5338eda77266b300
|
||||
|
||||
# Build-system: Force C linkage for all DNA type headers
|
||||
ad4b7741dba45a2be210942c18af6b6e4438f129
|
||||
|
||||
# Cleanup: Move function to proper section
|
||||
c126e27cdc8b28365a9d5f9fafc4d521d1eb83df
|
||||
|
||||
# Cleanup: remove break after return statements
|
||||
bbdfeb751e16d939482d2e4b95c4d470f53f18a5
|
||||
|
||||
# Cleanup: clang-tidy
|
||||
af013ff76feef7e8b8ba642279c62a5dc275d59f
|
||||
|
||||
# Cleanup: Make panel type flag names more clear
|
||||
9d28353b525ecfbcca1501be72e4276dfb2bbc2a
|
186
CMakeLists.txt
186
CMakeLists.txt
@@ -178,7 +178,6 @@ mark_as_advanced(BUILDINFO_OVERRIDE_TIME)
|
||||
option(WITH_IK_ITASC "Enable ITASC IK solver (only disable for development & for incompatible C++ compilers)" ON)
|
||||
option(WITH_IK_SOLVER "Enable Legacy IK solver (only disable for development)" ON)
|
||||
option(WITH_FFTW3 "Enable FFTW3 support (Used for smoke, ocean sim, and audio effects)" ON)
|
||||
option(WITH_PUGIXML "Enable PugiXML support (Used for OpenImageIO, Grease Pencil SVG export)" ON)
|
||||
option(WITH_BULLET "Enable Bullet (Physics Engine)" ON)
|
||||
option(WITH_SYSTEM_BULLET "Use the systems bullet library (currently unsupported due to missing features in upstream!)" )
|
||||
mark_as_advanced(WITH_SYSTEM_BULLET)
|
||||
@@ -204,7 +203,7 @@ option(WITH_OPENVDB "Enable features relying on OpenVDB" ON)
|
||||
option(WITH_OPENVDB_BLOSC "Enable blosc compression for OpenVDB, only enable if OpenVDB was built with blosc support" ON)
|
||||
option(WITH_OPENVDB_3_ABI_COMPATIBLE "Assume OpenVDB library has been compiled with version 3 ABI compatibility" OFF)
|
||||
mark_as_advanced(WITH_OPENVDB_3_ABI_COMPATIBLE)
|
||||
option(WITH_NANOVDB "Enable usage of NanoVDB data structure for rendering on the GPU" ON)
|
||||
option(WITH_NANOVDB "Enable usage of NanoVDB data structure for accelerated rendering on the GPU" OFF)
|
||||
|
||||
# GHOST Windowing Library Options
|
||||
option(WITH_GHOST_DEBUG "Enable debugging output for the GHOST library" OFF)
|
||||
@@ -347,21 +346,16 @@ if(UNIX AND NOT APPLE)
|
||||
endif()
|
||||
|
||||
option(WITH_PYTHON_INSTALL "Copy system python into the blender install folder" ON)
|
||||
|
||||
if((WITH_AUDASPACE AND NOT WITH_SYSTEM_AUDASPACE) OR WITH_MOD_FLUID)
|
||||
option(WITH_PYTHON_NUMPY "Include NumPy in Blender (used by Audaspace and Mantaflow)" ON)
|
||||
endif()
|
||||
|
||||
if(WIN32 OR APPLE)
|
||||
# Windows and macOS have this bundled with Python libraries.
|
||||
elseif(WITH_PYTHON_INSTALL OR WITH_PYTHON_NUMPY)
|
||||
elseif(WITH_PYTHON_INSTALL OR (WITH_AUDASPACE AND NOT WITH_SYSTEM_AUDASPACE))
|
||||
set(PYTHON_NUMPY_PATH "" CACHE PATH "Path to python site-packages or dist-packages containing 'numpy' module")
|
||||
mark_as_advanced(PYTHON_NUMPY_PATH)
|
||||
set(PYTHON_NUMPY_INCLUDE_DIRS "" CACHE PATH "Path to the include directory of the NumPy module")
|
||||
set(PYTHON_NUMPY_INCLUDE_DIRS ${PYTHON_NUMPY_PATH}/numpy/core/include CACHE PATH "Path to the include directory of the numpy module")
|
||||
mark_as_advanced(PYTHON_NUMPY_INCLUDE_DIRS)
|
||||
endif()
|
||||
if(WITH_PYTHON_INSTALL)
|
||||
option(WITH_PYTHON_INSTALL_NUMPY "Copy system NumPy into the blender install folder" ON)
|
||||
option(WITH_PYTHON_INSTALL_NUMPY "Copy system numpy into the blender install folder" ON)
|
||||
|
||||
if(UNIX AND NOT APPLE)
|
||||
option(WITH_PYTHON_INSTALL_REQUESTS "Copy system requests into the blender install folder" ON)
|
||||
@@ -383,7 +377,6 @@ option(WITH_CYCLES_CUDA_BINARIES "Build Cycles CUDA binaries" OFF)
|
||||
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
|
||||
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
|
||||
mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
|
||||
set(CYCLES_TEST_DEVICES CPU CACHE STRING "Run regression tests on the specified device types (CPU CUDA OPTIX OPENCL)" )
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 sm_86 compute_75 CACHE STRING "CUDA architectures to build binaries for")
|
||||
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
|
||||
unset(PLATFORM_DEFAULT)
|
||||
@@ -432,8 +425,8 @@ mark_as_advanced(WITH_CXX_GUARDEDALLOC)
|
||||
option(WITH_ASSERT_ABORT "Call abort() when raising an assertion through BLI_assert()" ON)
|
||||
mark_as_advanced(WITH_ASSERT_ABORT)
|
||||
|
||||
if((UNIX AND NOT APPLE) OR (CMAKE_GENERATOR MATCHES "^Visual Studio.+"))
|
||||
option(WITH_CLANG_TIDY "Use Clang Tidy to analyze the source code (only enable for development on Linux using Clang, or Windows using the Visual Studio IDE)" OFF)
|
||||
if(UNIX AND NOT APPLE)
|
||||
option(WITH_CLANG_TIDY "Use Clang Tidy to analyze the source code (only enable for development on Linux using Clang)" OFF)
|
||||
mark_as_advanced(WITH_CLANG_TIDY)
|
||||
endif()
|
||||
|
||||
@@ -533,10 +526,10 @@ if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
# Silence the warning that object-size is not effective in -O0.
|
||||
set(_asan_defaults "${_asan_defaults}")
|
||||
else()
|
||||
string(APPEND _asan_defaults " -fsanitize=object-size")
|
||||
set(_asan_defaults "${_asan_defaults} -fsanitize=object-size")
|
||||
endif()
|
||||
else()
|
||||
string(APPEND _asan_defaults " -fsanitize=leak -fsanitize=object-size")
|
||||
set(_asan_defaults "${_asan_defaults} -fsanitize=leak -fsanitize=object-size")
|
||||
endif()
|
||||
|
||||
set(COMPILER_ASAN_CFLAGS "${_asan_defaults}" CACHE STRING "C flags for address sanitizer")
|
||||
@@ -577,11 +570,6 @@ if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
option(WITH_COMPILER_SHORT_FILE_MACRO "Make paths in macros like __FILE__ relative to top level source and build directories." ON)
|
||||
mark_as_advanced(WITH_COMPILER_SHORT_FILE_MACRO)
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
# Use hardcoded paths or find_package to find externals
|
||||
option(WITH_WINDOWS_FIND_MODULES "Use find_package to locate libraries" OFF)
|
||||
@@ -610,11 +598,6 @@ if(WIN32)
|
||||
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
# See WITH_WINDOWS_SCCACHE for Windows.
|
||||
option(WITH_COMPILER_CCACHE "Use ccache to improve rebuild times (Works with Ninja, Makefiles and Xcode)" OFF)
|
||||
endif()
|
||||
|
||||
# The following only works with the Ninja generator in CMake >= 3.0.
|
||||
if("${CMAKE_GENERATOR}" MATCHES "Ninja")
|
||||
option(WITH_NINJA_POOL_JOBS
|
||||
@@ -709,8 +692,6 @@ set_and_warn_dependency(WITH_BOOST WITH_OPENCOLORIO OFF)
|
||||
set_and_warn_dependency(WITH_BOOST WITH_QUADRIFLOW OFF)
|
||||
set_and_warn_dependency(WITH_BOOST WITH_USD OFF)
|
||||
set_and_warn_dependency(WITH_BOOST WITH_ALEMBIC OFF)
|
||||
set_and_warn_dependency(WITH_PUGIXML WITH_CYCLES_OSL OFF)
|
||||
set_and_warn_dependency(WITH_PUGIXML WITH_OPENIMAGEIO OFF)
|
||||
|
||||
if(WITH_BOOST AND NOT (WITH_CYCLES OR WITH_OPENIMAGEIO OR WITH_INTERNATIONAL OR
|
||||
WITH_OPENVDB OR WITH_OPENCOLORIO OR WITH_USD OR WITH_ALEMBIC))
|
||||
@@ -877,11 +858,11 @@ if(NOT CMAKE_BUILD_TYPE MATCHES "Release")
|
||||
# Since linker flags are not set, all compiler checks and `find_package`
|
||||
# calls that rely on `try_compile` will fail.
|
||||
# See CMP0066 also.
|
||||
string(APPEND CMAKE_C_FLAGS_DEBUG " ${COMPILER_ASAN_CFLAGS}")
|
||||
string(APPEND CMAKE_C_FLAGS_RELWITHDEBINFO " ${COMPILER_ASAN_CFLAGS}")
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMPILER_ASAN_CFLAGS}")
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} ${COMPILER_ASAN_CFLAGS}")
|
||||
|
||||
string(APPEND CMAKE_CXX_FLAGS_DEBUG " ${COMPILER_ASAN_CXXFLAGS}")
|
||||
string(APPEND CMAKE_CXX_FLAGS_RELWITHDEBINFO " ${COMPILER_ASAN_CXXFLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMPILER_ASAN_CXXFLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${COMPILER_ASAN_CXXFLAGS}")
|
||||
endif()
|
||||
if(MSVC)
|
||||
set(COMPILER_ASAN_LINKER_FLAGS "/FUNCTIONPADMIN:6")
|
||||
@@ -889,11 +870,9 @@ if(NOT CMAKE_BUILD_TYPE MATCHES "Release")
|
||||
|
||||
if(APPLE AND COMPILER_ASAN_LIBRARY)
|
||||
string(REPLACE " " ";" _list_COMPILER_ASAN_CFLAGS ${COMPILER_ASAN_CFLAGS})
|
||||
set(_is_CONFIG_DEBUG "$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>")
|
||||
add_compile_options("$<${_is_CONFIG_DEBUG}:${_list_COMPILER_ASAN_CFLAGS}>")
|
||||
add_link_options("$<${_is_CONFIG_DEBUG}:-fno-omit-frame-pointer;-fsanitize=address>")
|
||||
add_compile_options("$<$<NOT:$<CONFIG:Release>>:${_list_COMPILER_ASAN_CFLAGS}>")
|
||||
add_link_options("$<$<NOT:$<CONFIG:Release>>:-fno-omit-frame-pointer;-fsanitize=address>")
|
||||
unset(_list_COMPILER_ASAN_CFLAGS)
|
||||
unset(_is_CONFIG_DEBUG)
|
||||
elseif(COMPILER_ASAN_LIBRARY)
|
||||
set(PLATFORM_LINKLIBS "${PLATFORM_LINKLIBS};${COMPILER_ASAN_LIBRARY}")
|
||||
set(PLATFORM_LINKFLAGS "${COMPILER_ASAN_LIBRARY} ${COMPILER_ASAN_LINKER_FLAGS}")
|
||||
@@ -962,11 +941,11 @@ endif()
|
||||
# Do it globally, SSE2 is required for quite some time now.
|
||||
# Doing it now allows to use SSE/SSE2 in inline headers.
|
||||
if(SUPPORT_SSE_BUILD)
|
||||
string(PREPEND PLATFORM_CFLAGS "${COMPILER_SSE_FLAG} ")
|
||||
set(PLATFORM_CFLAGS " ${COMPILER_SSE_FLAG} ${PLATFORM_CFLAGS}")
|
||||
add_definitions(-D__SSE__ -D__MMX__)
|
||||
endif()
|
||||
if(SUPPORT_SSE2_BUILD)
|
||||
string(APPEND PLATFORM_CFLAGS " ${COMPILER_SSE2_FLAG}")
|
||||
set(PLATFORM_CFLAGS " ${PLATFORM_CFLAGS} ${COMPILER_SSE2_FLAG}")
|
||||
add_definitions(-D__SSE2__)
|
||||
if(NOT SUPPORT_SSE_BUILD) # don't double up
|
||||
add_definitions(-D__MMX__)
|
||||
@@ -1178,8 +1157,8 @@ if(WITH_OPENMP)
|
||||
|
||||
if(OPENMP_FOUND)
|
||||
if(NOT WITH_OPENMP_STATIC)
|
||||
string(APPEND CMAKE_C_FLAGS " ${OpenMP_C_FLAGS}")
|
||||
string(APPEND CMAKE_CXX_FLAGS " ${OpenMP_CXX_FLAGS}")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
|
||||
else()
|
||||
# Typically avoid adding flags as defines but we can't
|
||||
# pass OpenMP flags to the linker for static builds, meaning
|
||||
@@ -1496,12 +1475,10 @@ if(CMAKE_COMPILER_IS_GNUCC)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_INT_IN_BOOL_CONTEXT -Wno-int-in-bool-context)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_FORMAT -Wno-format)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_SWITCH -Wno-switch)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
|
||||
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_CLASS_MEMACCESS -Wno-class-memaccess)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_COMMENT -Wno-comment)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_TYPEDEFS -Wno-unused-local-typedefs)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCC AND (NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "7.0"))
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_IMPLICIT_FALLTHROUGH -Wno-implicit-fallthrough)
|
||||
@@ -1540,7 +1517,6 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_MACROS -Wno-unused-macros)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISLEADING_INDENTATION -Wno-misleading-indentation)
|
||||
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISSING_VARIABLE_DECLARATIONS -Wno-missing-variable-declarations)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_INCOMPAT_PTR_DISCARD_QUAL -Wno-incompatible-pointer-types-discards-qualifiers)
|
||||
@@ -1551,18 +1527,15 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNDEF -Wno-undef)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_MISSING_NORETURN -Wno-missing-noreturn)
|
||||
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_PRIVATE_FIELD -Wno-unused-private-field)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_CXX11_NARROWING -Wno-c++11-narrowing)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_NON_VIRTUAL_DTOR -Wno-non-virtual-dtor)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_MACROS -Wno-unused-macros)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_REORDER -Wno-reorder)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_COMMENT -Wno-comment)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_TYPEDEFS -Wno-unused-local-typedefs)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNDEFINED_VAR_TEMPLATE -Wno-undefined-var-template)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_INSTANTIATION_AFTER_SPECIALIZATION -Wno-instantiation-after-specialization)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_MISLEADING_INDENTATION -Wno-misleading-indentation)
|
||||
|
||||
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
|
||||
|
||||
@@ -1575,8 +1548,8 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
|
||||
|
||||
# disable numbered, false positives
|
||||
string(APPEND C_WARNINGS " -wd188,186,144,913,556,858,597,177,1292,167,279,592,94,2722,3199")
|
||||
string(APPEND CXX_WARNINGS " -wd188,186,144,913,556,858,597,177,1292,167,279,592,94,2722,3199")
|
||||
set(C_WARNINGS "${C_WARNINGS} -wd188,186,144,913,556,858,597,177,1292,167,279,592,94,2722,3199")
|
||||
set(CXX_WARNINGS "${CXX_WARNINGS} -wd188,186,144,913,556,858,597,177,1292,167,279,592,94,2722,3199")
|
||||
elseif(CMAKE_C_COMPILER_ID MATCHES "MSVC")
|
||||
# most msvc warnings are C & C++
|
||||
set(_WARNINGS
|
||||
@@ -1607,7 +1580,7 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "MSVC")
|
||||
|
||||
if(MSVC_VERSION GREATER_EQUAL 1911)
|
||||
# see https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/c5038?view=vs-2017
|
||||
string(APPEND _WARNINGS " /w35038") # order of initialization in c++ constructors
|
||||
set(_WARNINGS "${_WARNINGS} /w35038") # order of initialization in c++ constructors
|
||||
endif()
|
||||
|
||||
string(REPLACE ";" " " _WARNINGS "${_WARNINGS}")
|
||||
@@ -1631,33 +1604,36 @@ if(WITH_PYTHON)
|
||||
|
||||
if(WIN32 OR APPLE)
|
||||
# Windows and macOS have this bundled with Python libraries.
|
||||
elseif((WITH_PYTHON_INSTALL AND WITH_PYTHON_INSTALL_NUMPY) OR WITH_PYTHON_NUMPY)
|
||||
elseif((WITH_PYTHON_INSTALL AND WITH_PYTHON_INSTALL_NUMPY) OR (WITH_AUDASPACE AND NOT WITH_SYSTEM_AUDASPACE))
|
||||
if(("${PYTHON_NUMPY_PATH}" STREQUAL "") OR (${PYTHON_NUMPY_PATH} MATCHES NOTFOUND))
|
||||
find_python_package(numpy "core/include")
|
||||
find_python_package(numpy)
|
||||
unset(PYTHON_NUMPY_INCLUDE_DIRS CACHE)
|
||||
set(PYTHON_NUMPY_INCLUDE_DIRS ${PYTHON_NUMPY_PATH}/numpy/core/include CACHE PATH "Path to the include directory of the numpy module")
|
||||
mark_as_advanced(PYTHON_NUMPY_INCLUDE_DIRS)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WIN32 OR APPLE)
|
||||
# pass, we have this in lib/python/site-packages
|
||||
elseif(WITH_PYTHON_INSTALL_REQUESTS)
|
||||
find_python_package(requests "")
|
||||
find_python_package(requests)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
string(APPEND CMAKE_CXX_FLAGS " /std:c++17")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++17")
|
||||
# Make MSVC properly report the value of the __cplusplus preprocessor macro
|
||||
# Available MSVC 15.7 (1914) and up, without this it reports 199711L regardless
|
||||
# of the C++ standard chosen above
|
||||
if(MSVC_VERSION GREATER 1913)
|
||||
string(APPEND CMAKE_CXX_FLAGS " /Zc:__cplusplus")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zc:__cplusplus")
|
||||
endif()
|
||||
elseif(
|
||||
CMAKE_COMPILER_IS_GNUCC OR
|
||||
CMAKE_C_COMPILER_ID MATCHES "Clang" OR
|
||||
CMAKE_C_COMPILER_ID MATCHES "Intel"
|
||||
)
|
||||
string(APPEND CMAKE_CXX_FLAGS " -std=c++17")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown compiler ${CMAKE_C_COMPILER_ID}, can't enable C++17 build")
|
||||
endif()
|
||||
@@ -1670,47 +1646,12 @@ if(
|
||||
(CMAKE_C_COMPILER_ID MATCHES "Intel")
|
||||
)
|
||||
# Use C11 + GNU extensions, works with GCC, Clang, ICC
|
||||
string(APPEND CMAKE_C_FLAGS " -std=gnu11")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu11")
|
||||
endif()
|
||||
|
||||
if(UNIX AND NOT APPLE)
|
||||
if(NOT WITH_CXX11_ABI)
|
||||
string(APPEND PLATFORM_CFLAGS " -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_COMPILER_SHORT_FILE_MACRO)
|
||||
# Use '-fmacro-prefix-map' for Clang and GCC (MSVC doesn't support this).
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_PREFIX_MAP_FLAGS C_MACRO_PREFIX_MAP -fmacro-prefix-map=foo=bar)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_PREFIX_MAP_FLAGS CXX_MACRO_PREFIX_MAP -fmacro-prefix-map=foo=bar)
|
||||
if(C_MACRO_PREFIX_MAP AND CXX_MACRO_PREFIX_MAP)
|
||||
if(APPLE)
|
||||
if(XCODE AND ${XCODE_VERSION} VERSION_LESS 12.0)
|
||||
# Developers may have say LLVM Clang-10.0.1 toolchain (which supports the flag)
|
||||
# with Xcode-11 (the Clang of which doesn't support the flag).
|
||||
message(WARNING
|
||||
"-fmacro-prefix-map flag is NOT supported by Clang shipped with Xcode-${XCODE_VERSION}."
|
||||
" Some Xcode functionality in Product menu may not work. Disabling WITH_COMPILER_SHORT_FILE_MACRO."
|
||||
)
|
||||
set(WITH_COMPILER_SHORT_FILE_MACRO OFF)
|
||||
endif()
|
||||
endif()
|
||||
if(WITH_COMPILER_SHORT_FILE_MACRO)
|
||||
path_ensure_trailing_slash(_src_dir "${CMAKE_SOURCE_DIR}")
|
||||
path_ensure_trailing_slash(_bin_dir "${CMAKE_BINARY_DIR}")
|
||||
# Keep this variable so it can be stripped from build-info.
|
||||
set(PLATFORM_CFLAGS_FMACRO_PREFIX_MAP
|
||||
"-fmacro-prefix-map=\"${_src_dir}\"=\"\" -fmacro-prefix-map=\"${_bin_dir}\"=\"\"")
|
||||
string(APPEND PLATFORM_CFLAGS " ${PLATFORM_CFLAGS_FMACRO_PREFIX_MAP}")
|
||||
unset(_src_dir)
|
||||
unset(_bin_dir)
|
||||
endif()
|
||||
else()
|
||||
message(WARNING
|
||||
"-fmacro-prefix-map flag is NOT supported by C/C++ compiler."
|
||||
" Disabling WITH_COMPILER_SHORT_FILE_MACRO."
|
||||
)
|
||||
set(WITH_COMPILER_SHORT_FILE_MACRO OFF)
|
||||
set(PLATFORM_CFLAGS "${PLATFORM_CFLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -1773,10 +1714,6 @@ elseif(WITH_CYCLES_STANDALONE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Testing
|
||||
add_subdirectory(tests)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Blender Application
|
||||
if(WITH_BLENDER)
|
||||
@@ -1784,6 +1721,11 @@ if(WITH_BLENDER)
|
||||
endif()
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Testing
|
||||
add_subdirectory(tests)
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Define 'heavy' submodules (for Ninja builder when using pools).
|
||||
setup_heavy_lib_pool()
|
||||
@@ -1813,7 +1755,7 @@ if(FIRST_RUN)
|
||||
set(_msg " - ${_setting}")
|
||||
string(LENGTH "${_msg}" _len)
|
||||
while("32" GREATER "${_len}")
|
||||
string(APPEND _msg " ")
|
||||
set(_msg "${_msg} ")
|
||||
math(EXPR _len "${_len} + 1")
|
||||
endwhile()
|
||||
|
||||
@@ -1831,24 +1773,24 @@ if(FIRST_RUN)
|
||||
message(STATUS "C++ Compiler: \"${CMAKE_CXX_COMPILER_ID}\"")
|
||||
|
||||
info_cfg_text("Build Options:")
|
||||
info_cfg_option(WITH_ALEMBIC)
|
||||
info_cfg_option(WITH_BULLET)
|
||||
info_cfg_option(WITH_CYCLES)
|
||||
info_cfg_option(WITH_FFTW3)
|
||||
info_cfg_option(WITH_FREESTYLE)
|
||||
info_cfg_option(WITH_GMP)
|
||||
info_cfg_option(WITH_IK_ITASC)
|
||||
info_cfg_option(WITH_IK_SOLVER)
|
||||
info_cfg_option(WITH_INPUT_NDOF)
|
||||
info_cfg_option(WITH_INTERNATIONAL)
|
||||
info_cfg_option(WITH_IK_ITASC)
|
||||
info_cfg_option(WITH_OPENCOLLADA)
|
||||
info_cfg_option(WITH_FFTW3)
|
||||
info_cfg_option(WITH_INTERNATIONAL)
|
||||
info_cfg_option(WITH_INPUT_NDOF)
|
||||
info_cfg_option(WITH_CYCLES)
|
||||
info_cfg_option(WITH_FREESTYLE)
|
||||
info_cfg_option(WITH_OPENCOLORIO)
|
||||
info_cfg_option(WITH_XR_OPENXR)
|
||||
info_cfg_option(WITH_OPENIMAGEDENOISE)
|
||||
info_cfg_option(WITH_OPENVDB)
|
||||
info_cfg_option(WITH_ALEMBIC)
|
||||
info_cfg_option(WITH_QUADRIFLOW)
|
||||
info_cfg_option(WITH_TBB)
|
||||
info_cfg_option(WITH_USD)
|
||||
info_cfg_option(WITH_XR_OPENXR)
|
||||
info_cfg_option(WITH_TBB)
|
||||
info_cfg_option(WITH_GMP)
|
||||
|
||||
info_cfg_text("Compiler Options:")
|
||||
info_cfg_option(WITH_BUILDINFO)
|
||||
@@ -1856,58 +1798,58 @@ if(FIRST_RUN)
|
||||
|
||||
info_cfg_text("System Options:")
|
||||
info_cfg_option(WITH_INSTALL_PORTABLE)
|
||||
info_cfg_option(WITH_MEM_JEMALLOC)
|
||||
info_cfg_option(WITH_MEM_VALGRIND)
|
||||
info_cfg_option(WITH_SYSTEM_GLEW)
|
||||
info_cfg_option(WITH_X11_ALPHA)
|
||||
info_cfg_option(WITH_X11_XF86VMODE)
|
||||
info_cfg_option(WITH_X11_XFIXES)
|
||||
info_cfg_option(WITH_X11_XINPUT)
|
||||
info_cfg_option(WITH_MEM_JEMALLOC)
|
||||
info_cfg_option(WITH_MEM_VALGRIND)
|
||||
info_cfg_option(WITH_SYSTEM_GLEW)
|
||||
|
||||
info_cfg_text("Image Formats:")
|
||||
info_cfg_option(WITH_OPENIMAGEIO)
|
||||
info_cfg_option(WITH_IMAGE_CINEON)
|
||||
info_cfg_option(WITH_IMAGE_DDS)
|
||||
info_cfg_option(WITH_IMAGE_HDR)
|
||||
info_cfg_option(WITH_IMAGE_OPENEXR)
|
||||
info_cfg_option(WITH_IMAGE_OPENJPEG)
|
||||
info_cfg_option(WITH_IMAGE_TIFF)
|
||||
info_cfg_option(WITH_OPENIMAGEIO)
|
||||
|
||||
info_cfg_text("Audio:")
|
||||
info_cfg_option(WITH_CODEC_AVI)
|
||||
info_cfg_option(WITH_CODEC_FFMPEG)
|
||||
info_cfg_option(WITH_CODEC_SNDFILE)
|
||||
info_cfg_option(WITH_JACK)
|
||||
info_cfg_option(WITH_JACK_DYNLOAD)
|
||||
info_cfg_option(WITH_OPENAL)
|
||||
info_cfg_option(WITH_SDL)
|
||||
info_cfg_option(WITH_SDL_DYNLOAD)
|
||||
info_cfg_option(WITH_JACK)
|
||||
info_cfg_option(WITH_JACK_DYNLOAD)
|
||||
info_cfg_option(WITH_CODEC_AVI)
|
||||
info_cfg_option(WITH_CODEC_FFMPEG)
|
||||
info_cfg_option(WITH_CODEC_SNDFILE)
|
||||
|
||||
info_cfg_text("Compression:")
|
||||
info_cfg_option(WITH_LZMA)
|
||||
info_cfg_option(WITH_LZO)
|
||||
|
||||
info_cfg_text("Python:")
|
||||
if(APPLE)
|
||||
info_cfg_option(WITH_PYTHON_FRAMEWORK)
|
||||
endif()
|
||||
info_cfg_option(WITH_PYTHON_INSTALL)
|
||||
info_cfg_option(WITH_PYTHON_INSTALL_NUMPY)
|
||||
info_cfg_option(WITH_PYTHON_MODULE)
|
||||
info_cfg_option(WITH_PYTHON_SAFETY)
|
||||
if(APPLE)
|
||||
info_cfg_option(WITH_PYTHON_FRAMEWORK)
|
||||
endif()
|
||||
|
||||
info_cfg_text("Modifiers:")
|
||||
info_cfg_option(WITH_MOD_REMESH)
|
||||
info_cfg_option(WITH_MOD_FLUID)
|
||||
info_cfg_option(WITH_MOD_OCEANSIM)
|
||||
info_cfg_option(WITH_MOD_REMESH)
|
||||
|
||||
info_cfg_text("OpenGL:")
|
||||
info_cfg_option(WITH_GLEW_ES)
|
||||
info_cfg_option(WITH_GL_EGL)
|
||||
info_cfg_option(WITH_GL_PROFILE_ES20)
|
||||
if(WIN32)
|
||||
info_cfg_option(WITH_GL_ANGLE)
|
||||
endif()
|
||||
info_cfg_option(WITH_GL_EGL)
|
||||
info_cfg_option(WITH_GL_PROFILE_ES20)
|
||||
info_cfg_option(WITH_GLEW_ES)
|
||||
|
||||
info_cfg_text("")
|
||||
|
||||
|
@@ -41,7 +41,6 @@ Convenience Targets
|
||||
* developer: Enable faster builds, error checking and tests, recommended for developers.
|
||||
* config: Run cmake configuration tool to set build options.
|
||||
* ninja: Use ninja build tool for faster builds.
|
||||
* ccache: Use ccache for faster rebuilds.
|
||||
|
||||
Note: passing the argument 'BUILD_DIR=path' when calling make will override the default build dir.
|
||||
Note: passing the argument 'BUILD_CMAKE_ARGS=args' lets you add cmake arguments.
|
||||
@@ -242,10 +241,6 @@ ifneq "$(findstring developer, $(MAKECMDGOALS))" ""
|
||||
CMAKE_CONFIG_ARGS:=-C"$(BLENDER_DIR)/build_files/cmake/config/blender_developer.cmake" $(CMAKE_CONFIG_ARGS)
|
||||
endif
|
||||
|
||||
ifneq "$(findstring ccache, $(MAKECMDGOALS))" ""
|
||||
CMAKE_CONFIG_ARGS:=-DWITH_COMPILER_CCACHE=YES $(CMAKE_CONFIG_ARGS)
|
||||
endif
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# build tool
|
||||
|
||||
@@ -345,7 +340,6 @@ headless: all
|
||||
bpy: all
|
||||
developer: all
|
||||
ninja: all
|
||||
ccache: all
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Build dependencies
|
||||
|
@@ -85,7 +85,6 @@ include(cmake/flexbison.cmake)
|
||||
include(cmake/osl.cmake)
|
||||
include(cmake/tbb.cmake)
|
||||
include(cmake/openvdb.cmake)
|
||||
include(cmake/nanovdb.cmake)
|
||||
include(cmake/python.cmake)
|
||||
include(cmake/python_site_packages.cmake)
|
||||
include(cmake/package_python.cmake)
|
||||
@@ -94,7 +93,11 @@ include(cmake/usd.cmake)
|
||||
include(cmake/potrace.cmake)
|
||||
# Boost needs to be included after python.cmake due to the PYTHON_BINARY variable being needed.
|
||||
include(cmake/boost.cmake)
|
||||
include(cmake/pugixml.cmake)
|
||||
if(UNIX)
|
||||
# Rely on PugiXML compiled with OpenImageIO
|
||||
else()
|
||||
include(cmake/pugixml.cmake)
|
||||
endif()
|
||||
if((NOT APPLE) OR ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
|
||||
include(cmake/ispc.cmake)
|
||||
include(cmake/openimagedenoise.cmake)
|
||||
|
@@ -42,14 +42,8 @@ if(UNIX)
|
||||
endforeach()
|
||||
|
||||
if(APPLE)
|
||||
# Homebrew has different default locations for ARM and Intel macOS.
|
||||
if("${CMAKE_HOST_SYSTEM_PROCESSOR}" STREQUAL "arm64")
|
||||
set(HOMEBREW_LOCATION "/opt/homebrew")
|
||||
else()
|
||||
set(HOMEBREW_LOCATION "/usr/local")
|
||||
endif()
|
||||
if(NOT EXISTS "${HOMEBREW_LOCATION}/opt/bison/bin/bison")
|
||||
string(APPEND _software_missing " bison")
|
||||
if(NOT EXISTS "/usr/local/opt/bison/bin/bison")
|
||||
set(_software_missing "${_software_missing} bison")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
@@ -17,14 +17,13 @@
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
set(CLANG_EXTRA_ARGS
|
||||
-DLLVM_DIR="${LIBDIR}/llvm/lib/cmake/llvm/"
|
||||
-DCLANG_PATH_TO_LLVM_SOURCE=${BUILD_DIR}/ll/src/ll
|
||||
-DCLANG_PATH_TO_LLVM_BUILD=${LIBDIR}/llvm
|
||||
-DLLVM_USE_CRT_RELEASE=MD
|
||||
-DLLVM_USE_CRT_DEBUG=MDd
|
||||
-DLLVM_CONFIG=${LIBDIR}/llvm/bin/llvm-config
|
||||
)
|
||||
|
||||
set(BUILD_CLANG_TOOLS OFF)
|
||||
|
||||
if(WIN32)
|
||||
set(CLANG_GENERATOR "Ninja")
|
||||
else()
|
||||
@@ -32,32 +31,11 @@ else()
|
||||
endif()
|
||||
|
||||
if(APPLE)
|
||||
set(BUILD_CLANG_TOOLS ON)
|
||||
set(CLANG_EXTRA_ARGS ${CLANG_EXTRA_ARGS}
|
||||
-DLIBXML2_LIBRARY=${LIBDIR}/xml2/lib/libxml2.a
|
||||
)
|
||||
endif()
|
||||
|
||||
if(BUILD_CLANG_TOOLS)
|
||||
# ExternalProject_Add does not allow multiple tarballs to be
|
||||
# downloaded. Work around this by having an empty build action
|
||||
# for the extra tools, and referring the clang build to the location
|
||||
# of the clang-tools-extra source.
|
||||
ExternalProject_Add(external_clang_tools
|
||||
URL ${CLANG_TOOLS_URI}
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
URL_HASH MD5=${CLANG_TOOLS_HASH}
|
||||
INSTALL_DIR ${LIBDIR}/clang_tools
|
||||
PREFIX ${BUILD_DIR}/clang_tools
|
||||
CONFIGURE_COMMAND echo "."
|
||||
BUILD_COMMAND echo "."
|
||||
INSTALL_COMMAND echo "."
|
||||
)
|
||||
list(APPEND CLANG_EXTRA_ARGS
|
||||
-DLLVM_EXTERNAL_CLANG_TOOLS_EXTRA_SOURCE_DIR=${BUILD_DIR}/clang_tools/src/external_clang_tools/
|
||||
)
|
||||
endif()
|
||||
|
||||
ExternalProject_Add(external_clang
|
||||
URL ${CLANG_URI}
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
@@ -87,14 +65,6 @@ add_dependencies(
|
||||
ll
|
||||
)
|
||||
|
||||
if(BUILD_CLANG_TOOLS)
|
||||
# `external_clang_tools` is for downloading the source, not compiling it.
|
||||
add_dependencies(
|
||||
external_clang
|
||||
external_clang_tools
|
||||
)
|
||||
endif()
|
||||
|
||||
# We currently do not build libxml2 on Windows.
|
||||
if(NOT WIN32)
|
||||
add_dependencies(
|
||||
|
@@ -98,10 +98,6 @@ harvest(jpg/include jpeg/include "*.h")
|
||||
harvest(jpg/lib jpeg/lib "libjpeg.a")
|
||||
harvest(lame/lib ffmpeg/lib "*.a")
|
||||
harvest(clang/bin llvm/bin "clang-format")
|
||||
if(BUILD_CLANG_TOOLS)
|
||||
harvest(clang/bin llvm/bin "clang-tidy")
|
||||
harvest(clang/share/clang llvm/share "run-clang-tidy.py")
|
||||
endif()
|
||||
harvest(clang/include llvm/include "*")
|
||||
harvest(llvm/include llvm/include "*")
|
||||
harvest(llvm/bin llvm/bin "llvm-config")
|
||||
@@ -150,8 +146,10 @@ harvest(openjpeg/lib openjpeg/lib "*.a")
|
||||
harvest(opensubdiv/include opensubdiv/include "*.h")
|
||||
harvest(opensubdiv/lib opensubdiv/lib "*.a")
|
||||
harvest(openvdb/include/openvdb openvdb/include/openvdb "*.h")
|
||||
if(WITH_NANOVDB)
|
||||
harvest(openvdb/nanovdb nanovdb/include/nanovdb "*.h")
|
||||
endif()
|
||||
harvest(openvdb/lib openvdb/lib "*.a")
|
||||
harvest(nanovdb/nanovdb nanovdb/include/nanovdb "*.h")
|
||||
harvest(xr_openxr_sdk/include/openxr xr_openxr_sdk/include/openxr "*.h")
|
||||
harvest(xr_openxr_sdk/lib xr_openxr_sdk/lib "*.a")
|
||||
harvest(osl/bin osl/bin "oslc")
|
||||
@@ -160,8 +158,6 @@ harvest(osl/lib osl/lib "*.a")
|
||||
harvest(osl/shaders osl/shaders "*.h")
|
||||
harvest(png/include png/include "*.h")
|
||||
harvest(png/lib png/lib "*.a")
|
||||
harvest(pugixml/include pugixml/include "*.hpp")
|
||||
harvest(pugixml/lib pugixml/lib "*.a")
|
||||
harvest(python/bin python/bin "python${PYTHON_SHORT_VERSION}m")
|
||||
harvest(python/include python/include "*h")
|
||||
harvest(python/lib python/lib "*")
|
||||
|
@@ -25,13 +25,8 @@ if(WIN32)
|
||||
elseif(APPLE)
|
||||
# Use bison installed via Homebrew.
|
||||
# The one which comes which Xcode toolset is too old.
|
||||
if("${CMAKE_HOST_SYSTEM_PROCESSOR}" STREQUAL "arm64")
|
||||
set(HOMEBREW_LOCATION "/opt/homebrew")
|
||||
else()
|
||||
set(HOMEBREW_LOCATION "/usr/local")
|
||||
endif()
|
||||
set(ISPC_EXTRA_ARGS_APPLE
|
||||
-DBISON_EXECUTABLE=${HOMEBREW_LOCATION}/opt/bison/bin/bison
|
||||
-DBISON_EXECUTABLE=/usr/local/opt/bison/bin/bison
|
||||
)
|
||||
elseif(UNIX)
|
||||
set(ISPC_EXTRA_ARGS_UNIX
|
||||
|
@@ -1,54 +0,0 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
set(NANOVDB_EXTRA_ARGS
|
||||
# NanoVDB is header-only, so only need the install target
|
||||
-DNANOVDB_BUILD_UNITTESTS=OFF
|
||||
-DNANOVDB_BUILD_EXAMPLES=OFF
|
||||
-DNANOVDB_BUILD_BENCHMARK=OFF
|
||||
-DNANOVDB_BUILD_DOCS=OFF
|
||||
-DNANOVDB_BUILD_TOOLS=OFF
|
||||
-DNANOVDB_CUDA_KEEP_PTX=OFF
|
||||
# Do not need to include any of the dependencies because of this
|
||||
-DNANOVDB_USE_OPENVDB=OFF
|
||||
-DNANOVDB_USE_OPENGL=OFF
|
||||
-DNANOVDB_USE_OPENCL=OFF
|
||||
-DNANOVDB_USE_CUDA=OFF
|
||||
-DNANOVDB_USE_TBB=OFF
|
||||
-DNANOVDB_USE_BLOSC=OFF
|
||||
-DNANOVDB_USE_ZLIB=OFF
|
||||
-DNANOVDB_USE_OPTIX=OFF
|
||||
-DNANOVDB_ALLOW_FETCHCONTENT=OFF
|
||||
)
|
||||
|
||||
ExternalProject_Add(nanovdb
|
||||
URL ${NANOVDB_URI}
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
URL_HASH MD5=${NANOVDB_HASH}
|
||||
PREFIX ${BUILD_DIR}/nanovdb
|
||||
SOURCE_SUBDIR nanovdb
|
||||
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/nanovdb ${DEFAULT_CMAKE_FLAGS} ${NANOVDB_EXTRA_ARGS}
|
||||
INSTALL_DIR ${LIBDIR}/nanovdb
|
||||
)
|
||||
|
||||
if(WIN32)
|
||||
ExternalProject_Add_Step(nanovdb after_install
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/nanovdb/nanovdb ${HARVEST_TARGET}/nanovdb/include/nanovdb
|
||||
DEPENDEES install
|
||||
)
|
||||
endif()
|
@@ -112,9 +112,6 @@ set(OPENIMAGEIO_EXTRA_ARGS
|
||||
-DOPENEXR_IEX_LIBRARY=${LIBDIR}/openexr/lib/${LIBPREFIX}Iex${OPENEXR_VERSION_POSTFIX}${LIBEXT}
|
||||
-DOPENEXR_ILMIMF_LIBRARY=${LIBDIR}/openexr/lib/${LIBPREFIX}IlmImf${OPENEXR_VERSION_POSTFIX}${LIBEXT}
|
||||
-DSTOP_ON_WARNING=OFF
|
||||
-DUSE_EXTERNAL_PUGIXML=ON
|
||||
-DPUGIXML_LIBRARY=${LIBDIR}/pugixml/lib/${LIBPREFIX}pugixml${LIBEXT}
|
||||
-DPUGIXML_INCLUDE_DIR=${LIBDIR}/pugixml/include/
|
||||
${WEBP_FLAGS}
|
||||
${OIIO_SIMD_FLAGS}
|
||||
)
|
||||
@@ -137,7 +134,6 @@ add_dependencies(
|
||||
external_jpeg
|
||||
external_boost
|
||||
external_tiff
|
||||
external_pugixml
|
||||
external_openjpeg${OPENJPEG_POSTFIX}
|
||||
${WEBP_DEP}
|
||||
)
|
||||
|
@@ -54,6 +54,20 @@ set(OPENVDB_EXTRA_ARGS
|
||||
-DOPENVDB_CORE_STATIC=${OPENVDB_STATIC}
|
||||
-DOPENVDB_BUILD_BINARIES=Off
|
||||
-DCMAKE_DEBUG_POSTFIX=_d
|
||||
# NanoVDB is header-only, so only need the install target
|
||||
-DNANOVDB_BUILD_UNITTESTS=OFF
|
||||
-DNANOVDB_BUILD_EXAMPLES=OFF
|
||||
-DNANOVDB_BUILD_BENCHMARK=OFF
|
||||
-DNANOVDB_BUILD_DOCS=OFF
|
||||
-DNANOVDB_BUILD_TOOLS=OFF
|
||||
-DNANOVDB_CUDA_KEEP_PTX=OFF
|
||||
-DNANOVDB_USE_OPENGL=OFF
|
||||
-DNANOVDB_USE_OPENGL=OFF
|
||||
-DNANOVDB_USE_CUDA=OFF
|
||||
-DNANOVDB_USE_TBB=OFF
|
||||
-DNANOVDB_USE_OPTIX=OFF
|
||||
-DNANOVDB_USE_OPENVDB=OFF
|
||||
-DNANOVDB_ALLOW_FETCHCONTENT=OFF
|
||||
)
|
||||
|
||||
if(WIN32)
|
||||
@@ -74,12 +88,18 @@ else()
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_NANOVDB)
|
||||
set(OPENVDB_PATCH_FILE openvdb_nanovdb.diff)
|
||||
else()
|
||||
set(OPENVDB_PATCH_FILE openvdb.diff)
|
||||
endif()
|
||||
|
||||
ExternalProject_Add(openvdb
|
||||
URL ${OPENVDB_URI}
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
URL_HASH MD5=${OPENVDB_HASH}
|
||||
PREFIX ${BUILD_DIR}/openvdb
|
||||
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/openvdb/src/openvdb < ${PATCH_DIR}/openvdb.diff
|
||||
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/openvdb/src/openvdb < ${PATCH_DIR}/${OPENVDB_PATCH_FILE}
|
||||
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/openvdb ${DEFAULT_CMAKE_FLAGS} ${OPENVDB_EXTRA_ARGS}
|
||||
INSTALL_DIR ${LIBDIR}/openvdb
|
||||
)
|
||||
@@ -101,6 +121,12 @@ if(WIN32)
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/openvdb/bin/openvdb.dll ${HARVEST_TARGET}/openvdb/bin/openvdb.dll
|
||||
DEPENDEES install
|
||||
)
|
||||
if(WITH_NANOVDB)
|
||||
ExternalProject_Add_Step(openvdb nanovdb_install
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/openvdb/nanovdb ${HARVEST_TARGET}/nanovdb/include/nanovdb
|
||||
DEPENDEES after_install
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
if(BUILD_MODE STREQUAL Debug)
|
||||
ExternalProject_Add_Step(openvdb after_install
|
||||
|
@@ -21,6 +21,7 @@ if(WIN32)
|
||||
endif()
|
||||
option(WITH_WEBP "Enable building of oiio with webp support" OFF)
|
||||
option(WITH_BOOST_PYTHON "Enable building of boost with python support" OFF)
|
||||
option(WITH_NANOVDB "Enable building of OpenVDB with NanoVDB included" OFF)
|
||||
set(MAKE_THREADS 1 CACHE STRING "Number of threads to run make with")
|
||||
|
||||
if(NOT BUILD_MODE)
|
||||
@@ -56,7 +57,7 @@ if(WIN32)
|
||||
if(MSVC_VERSION GREATER 1909)
|
||||
set(COMMON_MSVC_FLAGS "/Wv:18") #some deps with warnings as error aren't quite ready for dealing with the new 2017 warnings.
|
||||
endif()
|
||||
string(APPEND COMMON_MSVC_FLAGS " /bigobj")
|
||||
set(COMMON_MSVC_FLAGS "${COMMON_MSVC_FLAGS} /bigobj")
|
||||
if(WITH_OPTIMIZED_DEBUG)
|
||||
set(BLENDER_CMAKE_C_FLAGS_DEBUG "/MDd ${COMMON_MSVC_FLAGS} /O2 /Ob2 /DNDEBUG /DPSAPI_VERSION=1 /DOIIO_STATIC_BUILD /DTINYFORMAT_ALLOW_WCHAR_STRINGS")
|
||||
else()
|
||||
|
@@ -78,10 +78,14 @@ set(OSL_EXTRA_ARGS
|
||||
-DINSTALL_DOCS=OFF
|
||||
${OSL_SIMD_FLAGS}
|
||||
-DPARTIO_LIBRARIES=
|
||||
-DPUGIXML_HOME=${LIBDIR}/pugixml
|
||||
)
|
||||
|
||||
if(APPLE)
|
||||
if(WIN32)
|
||||
set(OSL_EXTRA_ARGS
|
||||
${OSL_EXTRA_ARGS}
|
||||
-DPUGIXML_HOME=${LIBDIR}/pugixml
|
||||
)
|
||||
elseif(APPLE)
|
||||
# Make symbol hiding consistent with OIIO which defaults to OFF,
|
||||
# avoids linker warnings on macOS
|
||||
set(OSL_EXTRA_ARGS
|
||||
@@ -110,9 +114,17 @@ add_dependencies(
|
||||
external_zlib
|
||||
external_flexbison
|
||||
external_openimageio
|
||||
external_pugixml
|
||||
)
|
||||
|
||||
if(UNIX)
|
||||
# Rely on PugiXML compiled with OpenImageIO
|
||||
else()
|
||||
add_dependencies(
|
||||
external_osl
|
||||
external_pugixml
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
if(BUILD_MODE STREQUAL Release)
|
||||
ExternalProject_Add_Step(external_osl after_install
|
||||
|
@@ -30,13 +30,13 @@ ExternalProject_Add(external_pugixml
|
||||
if(WIN32)
|
||||
if(BUILD_MODE STREQUAL Release)
|
||||
ExternalProject_Add_Step(external_pugixml after_install
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/pugixml ${HARVEST_TARGET}/pugixml
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/pugixml/lib/pugixml.lib ${HARVEST_TARGET}/osl/lib/pugixml.lib
|
||||
DEPENDEES install
|
||||
)
|
||||
endif()
|
||||
if(BUILD_MODE STREQUAL Debug)
|
||||
ExternalProject_Add_Step(external_pugixml after_install
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/pugixml/lib/pugixml.lib ${HARVEST_TARGET}/pugixml/lib/pugixml_d.lib
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/pugixml/lib/pugixml.lib ${HARVEST_TARGET}/osl/lib/pugixml_d.lib
|
||||
DEPENDEES install
|
||||
)
|
||||
endif()
|
||||
|
@@ -120,9 +120,6 @@ set(LLVM_HASH 31eb9ce73dd2a0f8dcab8319fb03f8fc)
|
||||
set(CLANG_URI https://github.com/llvm/llvm-project/releases/download/llvmorg-${LLVM_VERSION}/clang-${LLVM_VERSION}.src.tar.xz)
|
||||
set(CLANG_HASH 13468e4a44940efef1b75e8641752f90)
|
||||
|
||||
set(CLANG_TOOLS_URI https://github.com/llvm/llvm-project/releases/download/llvmorg-${LLVM_VERSION}/clang-tools-extra-${LLVM_VERSION}.src.tar.xz)
|
||||
set(CLANG_TOOLS_HASH c76293870b564c6a7968622b475b7646)
|
||||
|
||||
set(OPENMP_URI https://github.com/llvm/llvm-project/releases/download/llvmorg-${LLVM_VERSION}/openmp-${LLVM_VERSION}.src.tar.xz)
|
||||
set(OPENMP_HASH 6eade16057edbdecb3c4eef9daa2bfcf)
|
||||
|
||||
@@ -148,13 +145,15 @@ set(TBB_VERSION 2019_U9)
|
||||
set(TBB_URI https://github.com/oneapi-src/oneTBB/archive/${TBB_VERSION}.tar.gz)
|
||||
set(TBB_HASH 26263622e9187212ec240dcf01b66207)
|
||||
|
||||
set(OPENVDB_VERSION 7.0.0)
|
||||
set(OPENVDB_URI https://github.com/AcademySoftwareFoundation/openvdb/archive/v${OPENVDB_VERSION}.tar.gz)
|
||||
set(OPENVDB_HASH fd6c4f168282f7e0e494d290cd531fa8)
|
||||
|
||||
set(NANOVDB_GIT_UID e62f7a0bf1e27397223c61ddeaaf57edf111b77f)
|
||||
set(NANOVDB_URI https://github.com/AcademySoftwareFoundation/openvdb/archive/${NANOVDB_GIT_UID}.tar.gz)
|
||||
set(NANOVDB_HASH 90919510bc6ccd630fedc56f748cb199)
|
||||
if(WITH_NANOVDB)
|
||||
set(OPENVDB_GIT_UID e62f7a0bf1e27397223c61ddeaaf57edf111b77f)
|
||||
set(OPENVDB_URI https://github.com/AcademySoftwareFoundation/openvdb/archive/${OPENVDB_GIT_UID}.tar.gz)
|
||||
set(OPENVDB_HASH 90919510bc6ccd630fedc56f748cb199)
|
||||
else()
|
||||
set(OPENVDB_VERSION 7.0.0)
|
||||
set(OPENVDB_URI https://github.com/AcademySoftwareFoundation/openvdb/archive/v${OPENVDB_VERSION}.tar.gz)
|
||||
set(OPENVDB_HASH fd6c4f168282f7e0e494d290cd531fa8)
|
||||
endif()
|
||||
|
||||
set(IDNA_VERSION 2.9)
|
||||
set(CHARDET_VERSION 3.0.4)
|
||||
|
@@ -51,7 +51,7 @@ ARGS=$( \
|
||||
getopt \
|
||||
-o s:i:t:h \
|
||||
--long source:,install:,tmp:,info:,threads:,help,show-deps,no-sudo,no-build,no-confirm,\
|
||||
with-all,with-opencollada,with-jack,with-embree,with-oidn,with-nanovdb,\
|
||||
with-all,with-opencollada,with-jack,with-embree,with-oidn,\
|
||||
ver-ocio:,ver-oiio:,ver-llvm:,ver-osl:,ver-osd:,ver-openvdb:,ver-xr-openxr:,\
|
||||
force-all,force-python,force-numpy,force-boost,force-tbb,\
|
||||
force-ocio,force-openexr,force-oiio,force-llvm,force-osl,force-osd,force-openvdb,\
|
||||
@@ -151,9 +151,6 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
|
||||
--with-oidn
|
||||
Build and install the OpenImageDenoise libraries.
|
||||
|
||||
--with-nanovdb
|
||||
Build and install the NanoVDB branch of OpenVDB (instead of official release of OpenVDB).
|
||||
|
||||
--with-jack
|
||||
Install the jack libraries.
|
||||
|
||||
@@ -438,7 +435,7 @@ _with_built_openexr=false
|
||||
|
||||
OIIO_VERSION="2.1.15.0"
|
||||
OIIO_VERSION_SHORT="2.1"
|
||||
OIIO_VERSION_MIN="2.1.12"
|
||||
OIIO_VERSION_MIN="1.8"
|
||||
OIIO_VERSION_MAX="3.0"
|
||||
OIIO_FORCE_BUILD=false
|
||||
OIIO_FORCE_REBUILD=false
|
||||
@@ -679,10 +676,6 @@ while true; do
|
||||
--with-oidn)
|
||||
WITH_OIDN=true; shift; continue
|
||||
;;
|
||||
--with-nanovdb)
|
||||
WITH_NANOVDB=true;
|
||||
shift; continue
|
||||
;;
|
||||
--with-jack)
|
||||
WITH_JACK=true; shift; continue;
|
||||
;;
|
||||
@@ -964,11 +957,6 @@ if [ "$WITH_ALL" = true -a "$OIDN_SKIP" = false ]; then
|
||||
fi
|
||||
if [ "$WITH_ALL" = true ]; then
|
||||
WITH_JACK=true
|
||||
WITH_NANOVDB=true
|
||||
fi
|
||||
|
||||
if [ "$WITH_NANOVDB" = true ]; then
|
||||
OPENVDB_FORCE_BUILD=true
|
||||
fi
|
||||
|
||||
|
||||
@@ -1041,15 +1029,11 @@ OSD_SOURCE=( "https://github.com/PixarAnimationStudios/OpenSubdiv/archive/v${OSD
|
||||
|
||||
OPENVDB_USE_REPO=false
|
||||
OPENVDB_BLOSC_SOURCE=( "https://github.com/Blosc/c-blosc/archive/v${OPENVDB_BLOSC_VERSION}.tar.gz" )
|
||||
OPENVDB_SOURCE=( "https://github.com/AcademySoftwareFoundation/openvdb/archive/v${OPENVDB_VERSION}.tar.gz" )
|
||||
#~ OPENVDB_SOURCE_REPO=( "https://github.com/AcademySoftwareFoundation/openvdb.git" )
|
||||
OPENVDB_SOURCE=( "https://github.com/dreamworksanimation/openvdb/archive/v${OPENVDB_VERSION}.tar.gz" )
|
||||
#~ OPENVDB_SOURCE_REPO=( "https:///dreamworksanimation/openvdb.git" )
|
||||
#~ OPENVDB_SOURCE_REPO_UID="404659fffa659da075d1c9416e4fc939139a84ee"
|
||||
#~ OPENVDB_SOURCE_REPO_BRANCH="dev"
|
||||
|
||||
NANOVDB_USE_REPO=false
|
||||
NANOVDB_SOURCE_REPO_UID="e62f7a0bf1e27397223c61ddeaaf57edf111b77f"
|
||||
NANOVDB_SOURCE=( "https://github.com/AcademySoftwareFoundation/openvdb/archive/${NANOVDB_SOURCE_REPO_UID}.tar.gz" )
|
||||
|
||||
ALEMBIC_USE_REPO=false
|
||||
ALEMBIC_SOURCE=( "https://github.com/alembic/alembic/archive/${ALEMBIC_VERSION}.tar.gz" )
|
||||
# ALEMBIC_SOURCE_REPO=( "https://github.com/alembic/alembic.git" )
|
||||
@@ -2086,7 +2070,7 @@ compile_OIIO() {
|
||||
cmake_d="$cmake_d -D USE_OPENCV=OFF"
|
||||
cmake_d="$cmake_d -D BUILD_TESTING=OFF"
|
||||
cmake_d="$cmake_d -D OIIO_BUILD_TESTS=OFF"
|
||||
cmake_d="$cmake_d -D OIIO_BUILD_TOOLS=ON"
|
||||
cmake_d="$cmake_d -D OIIO_BUILD_TOOLS=OFF"
|
||||
cmake_d="$cmake_d -D TXT2MAN="
|
||||
#cmake_d="$cmake_d -D CMAKE_EXPORT_COMPILE_COMMANDS=ON"
|
||||
#cmake_d="$cmake_d -D CMAKE_VERBOSE_MAKEFILE=ON"
|
||||
@@ -2610,115 +2594,11 @@ compile_BLOSC() {
|
||||
# ----------------------------------------------------------------------------
|
||||
# Build OpenVDB
|
||||
|
||||
_init_nanovdb() {
|
||||
_src=$SRC/openvdb-$OPENVDB_VERSION/nanovdb
|
||||
_inst=$INST/nanovdb-$OPENVDB_VERSION_SHORT
|
||||
_inst_shortcut=$INST/nanovdb
|
||||
}
|
||||
|
||||
_update_deps_nanovdb() {
|
||||
:
|
||||
}
|
||||
|
||||
clean_nanovdb() {
|
||||
_init_nanovdb
|
||||
if [ -d $_inst ]; then
|
||||
_update_deps_nanovdb
|
||||
fi
|
||||
_git=true # Mere trick to prevent clean from removing $_src...
|
||||
_clean
|
||||
}
|
||||
|
||||
install_NanoVDB() {
|
||||
# To be changed each time we make edits that would modify the compiled results!
|
||||
nanovdb_magic=1
|
||||
_init_nanovdb
|
||||
|
||||
# Clean install if needed!
|
||||
magic_compile_check nanovdb-$OPENVDB_VERSION $nanovdb_magic
|
||||
if [ $? -eq 1 ]; then
|
||||
clean_nanovdb
|
||||
fi
|
||||
|
||||
if [ ! -d $_inst ]; then
|
||||
INFO "Installing NanoVDB v$OPENVDB_VERSION"
|
||||
_is_building=true
|
||||
|
||||
# Rebuild dependencies as well!
|
||||
_update_deps_nanovdb
|
||||
|
||||
prepare_inst
|
||||
|
||||
if [ ! -d $_src ]; then
|
||||
ERROR "NanoVDB not found in openvdb-$OPENVDB_VERSION ($_src), exiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Always refresh the whole build!
|
||||
if [ -d build ]; then
|
||||
rm -rf build
|
||||
fi
|
||||
mkdir build
|
||||
cd build
|
||||
|
||||
cmake_d="-D CMAKE_BUILD_TYPE=Release"
|
||||
cmake_d="$cmake_d -D CMAKE_INSTALL_PREFIX=$_inst"
|
||||
|
||||
# NanoVDB is header-only, so only need the install target
|
||||
cmake_d="$cmake_d -D NANOVDB_BUILD_UNITTESTS=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_BUILD_EXAMPLES=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_BUILD_BENCHMARK=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_BUILD_DOCS=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_BUILD_TOOLS=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_CUDA_KEEP_PTX=OFF"
|
||||
|
||||
# Do not need to include any of the dependencies because of this
|
||||
cmake_d="$cmake_d -D NANOVDB_USE_OPENVDB=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_USE_OPENGL=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_USE_OPENCL=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_USE_CUDA=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_USE_TBB=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_USE_BLOSC=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_USE_ZLIB=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_USE_OPTIX=OFF"
|
||||
cmake_d="$cmake_d -D NANOVDB_ALLOW_FETCHCONTENT=OFF"
|
||||
|
||||
cmake $cmake_d $_src
|
||||
|
||||
make -j$THREADS install
|
||||
make clean
|
||||
|
||||
#~ mkdir -p $_inst
|
||||
#~ cp -r $_src/include $_inst/include
|
||||
|
||||
if [ -d $_inst ]; then
|
||||
_create_inst_shortcut
|
||||
else
|
||||
ERROR "NanoVDB-v$OPENVDB_VERSION failed to install, exiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
magic_compile_set nanovdb-$OPENVDB_VERSION $nanovdb_magic
|
||||
|
||||
cd $CWD
|
||||
INFO "Done compiling NanoVDB-v$OPENVDB_VERSION!"
|
||||
_is_building=false
|
||||
else
|
||||
INFO "Own NanoVDB-v$OPENVDB_VERSION is up to date, nothing to do!"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
_init_openvdb() {
|
||||
_src=$SRC/openvdb-$OPENVDB_VERSION
|
||||
_git=false
|
||||
_inst=$INST/openvdb-$OPENVDB_VERSION_SHORT
|
||||
_inst_shortcut=$INST/openvdb
|
||||
|
||||
_openvdb_source=$OPENVDB_SOURCE
|
||||
if [ "$WITH_NANOVDB" = true ]; then
|
||||
_openvdb_source=$NANOVDB_SOURCE
|
||||
fi
|
||||
}
|
||||
|
||||
_update_deps_openvdb() {
|
||||
@@ -2743,7 +2623,7 @@ compile_OPENVDB() {
|
||||
PRINT ""
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
openvdb_magic=2
|
||||
openvdb_magic=1
|
||||
_init_openvdb
|
||||
|
||||
# Clean install if needed!
|
||||
@@ -2753,7 +2633,7 @@ compile_OPENVDB() {
|
||||
fi
|
||||
|
||||
if [ ! -d $_inst ]; then
|
||||
INFO "Building OpenVDB-$OPENVDB_VERSION (with NanoVDB: $WITH_NANOVDB)"
|
||||
INFO "Building OpenVDB-$OPENVDB_VERSION"
|
||||
_is_building=true
|
||||
|
||||
# Rebuild dependencies as well!
|
||||
@@ -2761,17 +2641,12 @@ compile_OPENVDB() {
|
||||
|
||||
prepare_inst
|
||||
|
||||
if [ ! -d $_src ]; then
|
||||
if [ ! -d $_src -o true ]; then
|
||||
mkdir -p $SRC
|
||||
download _openvdb_source[@] "$_src.tar.gz"
|
||||
download OPENVDB_SOURCE[@] "$_src.tar.gz"
|
||||
|
||||
INFO "Unpacking OpenVDB-$OPENVDB_VERSION"
|
||||
if [ "$WITH_NANOVDB" = true ]; then
|
||||
tar -C $SRC --transform "s,(.*/?)openvdb-$NANOVDB_SOURCE_REPO_UID[^/]*(.*),\1openvdb-$OPENVDB_VERSION\2,x" \
|
||||
-xf $_src.tar.gz
|
||||
else
|
||||
tar -C $SRC -xf $_src.tar.gz
|
||||
fi
|
||||
tar -C $SRC -xf $_src.tar.gz
|
||||
fi
|
||||
|
||||
cd $_src
|
||||
@@ -2785,40 +2660,33 @@ compile_OPENVDB() {
|
||||
#~ git reset --hard
|
||||
#~ fi
|
||||
|
||||
# Always refresh the whole build!
|
||||
if [ -d build ]; then
|
||||
rm -rf build
|
||||
fi
|
||||
mkdir build
|
||||
cd build
|
||||
# Source builds here
|
||||
cd openvdb
|
||||
|
||||
cmake_d="-D CMAKE_BUILD_TYPE=Release"
|
||||
cmake_d="$cmake_d -D CMAKE_INSTALL_PREFIX=$_inst"
|
||||
cmake_d="$cmake_d -D USE_STATIC_DEPENDENCIES=OFF"
|
||||
cmake_d="$cmake_d -D OPENVDB_BUILD_BINARIES=OFF"
|
||||
make_d="DESTDIR=$_inst"
|
||||
make_d="$make_d HDSO=/usr"
|
||||
|
||||
if [ -d $INST/boost ]; then
|
||||
cmake_d="$cmake_d -D BOOST_ROOT=$INST/boost"
|
||||
cmake_d="$cmake_d -D Boost_USE_MULTITHREADED=ON"
|
||||
cmake_d="$cmake_d -D Boost_NO_SYSTEM_PATHS=ON"
|
||||
cmake_d="$cmake_d -D Boost_NO_BOOST_CMAKE=ON"
|
||||
make_d="$make_d BOOST_INCL_DIR=$INST/boost/include BOOST_LIB_DIR=$INST/boost/lib"
|
||||
fi
|
||||
if [ -d $INST/tbb ]; then
|
||||
cmake_d="$cmake_d -D TBB_ROOT=$INST/tbb"
|
||||
make_d="$make_d TBB_ROOT=$INST/tbb TBB_USE_STATIC_LIBS=OFF"
|
||||
fi
|
||||
|
||||
if [ "$_with_built_openexr" = true ]; then
|
||||
cmake_d="$cmake_d -D IlmBase_ROOT=$INST/openexr"
|
||||
cmake_d="$cmake_d -D OpenEXR_ROOT=$INST/openexr"
|
||||
make_d="$make_d ILMBASE_INCL_DIR=$INST/openexr/include ILMBASE_LIB_DIR=$INST/openexr/lib"
|
||||
make_d="$make_d EXR_INCL_DIR=$INST/openexr/include EXR_LIB_DIR=$INST/openexr/lib"
|
||||
INFO "ILMBASE_HOME=$INST/openexr"
|
||||
fi
|
||||
|
||||
if [ -d $INST/blosc ]; then
|
||||
cmake_d="$cmake_d -D Blosc_ROOT=$INST/blosc"
|
||||
make_d="$make_d BLOSC_INCL_DIR=$INST/blosc/include BLOSC_LIB_DIR=$INST/blosc/lib"
|
||||
fi
|
||||
|
||||
cmake $cmake_d ..
|
||||
|
||||
make -j$THREADS install
|
||||
# Build without log4cplus, glfw, python module & docs
|
||||
make_d="$make_d LOG4CPLUS_INCL_DIR= GLFW_INCL_DIR= PYTHON_VERSION= DOXYGEN="
|
||||
|
||||
make -j$THREADS lib $make_d install
|
||||
make clean
|
||||
|
||||
if [ -d $_inst ]; then
|
||||
@@ -2839,10 +2707,6 @@ compile_OPENVDB() {
|
||||
fi
|
||||
|
||||
run_ldconfig "openvdb"
|
||||
|
||||
if [ "$WITH_NANOVDB" = true ]; then
|
||||
install_NanoVDB
|
||||
fi
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
@@ -4072,7 +3936,7 @@ install_DEB() {
|
||||
else
|
||||
check_package_version_ge_lt_DEB libopenimageio-dev $OIIO_VERSION_MIN $OIIO_VERSION_MAX
|
||||
if [ $? -eq 0 -a "$_with_built_openexr" = false ]; then
|
||||
install_packages_DEB libopenimageio-dev openimageio-tools
|
||||
install_packages_DEB libopenimageio-dev
|
||||
clean_OIIO
|
||||
else
|
||||
compile_OIIO
|
||||
@@ -4714,13 +4578,13 @@ install_RPM() {
|
||||
INFO "Forced OpenImageIO building, as requested..."
|
||||
compile_OIIO
|
||||
else
|
||||
check_package_version_ge_lt_RPM OpenImageIO-devel $OIIO_VERSION_MIN $OIIO_VERSION_MAX
|
||||
if [ $? -eq 0 -a $_with_built_openexr == false ]; then
|
||||
install_packages_RPM OpenImageIO-devel OpenImageIO-utils
|
||||
clean_OIIO
|
||||
else
|
||||
#check_package_version_ge_lt_RPM OpenImageIO-devel $OIIO_VERSION_MIN $OIIO_VERSION_MAX
|
||||
#if [ $? -eq 0 -a $_with_built_openexr == false ]; then
|
||||
# install_packages_RPM OpenImageIO-devel
|
||||
# clean_OIIO
|
||||
#else
|
||||
compile_OIIO
|
||||
fi
|
||||
#fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -5827,13 +5691,6 @@ print_info() {
|
||||
PRINT " $_1"
|
||||
_buildargs="$_buildargs $_1"
|
||||
fi
|
||||
if [ -d $INST/nanovdb ]; then
|
||||
_1="-D WITH_NANOVDB=ON"
|
||||
_2="-D NANOVDB_ROOT_DIR=$INST/nanovdb"
|
||||
PRINT " $_1"
|
||||
PRINT " $_2"
|
||||
_buildargs="$_buildargs $_1 $_2"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$WITH_OPENCOLLADA" = true ]; then
|
||||
|
@@ -34,24 +34,3 @@ diff -Naur orig/src/include/OpenImageIO/platform.h external_openimageio/src/incl
|
||||
# include <windows.h>
|
||||
#endif
|
||||
|
||||
diff -Naur orig/src/libutil/ustring.cpp external_openimageio/src/libutil/ustring.cpp
|
||||
--- orig/src/libutil/ustring.cpp 2020-05-11 05:43:52.000000000 +0200
|
||||
+++ external_openimageio/src/libutil/ustring.cpp 2020-11-26 12:06:08.000000000 +0100
|
||||
@@ -337,6 +337,8 @@
|
||||
// the std::string to make it point to our chars! In such a case, the
|
||||
// destructor will be careful not to allow a deallocation.
|
||||
|
||||
+ // Disable internal std::string for Apple silicon based Macs
|
||||
+#if !(defined(__APPLE__) && defined(__arm64__))
|
||||
#if defined(__GNUC__) && !defined(_LIBCPP_VERSION) \
|
||||
&& defined(_GLIBCXX_USE_CXX11_ABI) && _GLIBCXX_USE_CXX11_ABI
|
||||
// NEW gcc ABI
|
||||
@@ -382,7 +384,7 @@
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
-
|
||||
+#endif
|
||||
// Remaining cases - just assign the internal string. This may result
|
||||
// in double allocation for the chars. If you care about that, do
|
||||
// something special for your platform, much like we did for gcc and
|
135
build_files/build_environment/patches/openvdb_nanovdb.diff
Normal file
135
build_files/build_environment/patches/openvdb_nanovdb.diff
Normal file
@@ -0,0 +1,135 @@
|
||||
diff -Naur orig/cmake/FindIlmBase.cmake openvdb/cmake/FindIlmBase.cmake
|
||||
--- orig/cmake/FindIlmBase.cmake 2019-12-06 12:11:33 -0700
|
||||
+++ openvdb/cmake/FindIlmBase.cmake 2020-08-12 12:48:44 -0600
|
||||
@@ -217,6 +217,8 @@
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".lib")
|
||||
endif()
|
||||
list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES "${_IlmBase_Version_Suffix}.lib")
|
||||
+ list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES "_s.lib")
|
||||
+ list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES "_s_d.lib")
|
||||
else()
|
||||
if(ILMBASE_USE_STATIC_LIBS)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
diff -Naur orig/cmake/FindOpenEXR.cmake openvdb/cmake/FindOpenEXR.cmake
|
||||
--- orig/cmake/FindOpenEXR.cmake 2019-12-06 12:11:33 -0700
|
||||
+++ openvdb/cmake/FindOpenEXR.cmake 2020-08-12 12:48:44 -0600
|
||||
@@ -210,6 +210,8 @@
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".lib")
|
||||
endif()
|
||||
list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES "${_OpenEXR_Version_Suffix}.lib")
|
||||
+ list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES "_s.lib")
|
||||
+ list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES "_s_d.lib")
|
||||
else()
|
||||
if(OPENEXR_USE_STATIC_LIBS)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
diff -Naur orig/openvdb/openvdb/CMakeLists.txt openvdb/openvdb/openvdb/CMakeLists.txt
|
||||
--- orig/openvdb/openvdb/CMakeLists.txt 2019-12-06 12:11:33 -0700
|
||||
+++ openvdb/openvdb/openvdb/CMakeLists.txt 2020-08-12 14:12:26 -0600
|
||||
@@ -105,7 +105,9 @@
|
||||
# http://boost.2283326.n4.nabble.com/CMake-config-scripts-broken-in-1-70-td4708957.html
|
||||
# https://github.com/boostorg/boost_install/commit/160c7cb2b2c720e74463865ef0454d4c4cd9ae7c
|
||||
set(BUILD_SHARED_LIBS ON)
|
||||
- set(Boost_USE_STATIC_LIBS OFF)
|
||||
+ if(NOT WIN32) # blender links boost statically on windows
|
||||
+ set(Boost_USE_STATIC_LIBS OFF)
|
||||
+ endif()
|
||||
endif()
|
||||
|
||||
find_package(Boost ${MINIMUM_BOOST_VERSION} REQUIRED COMPONENTS iostreams system)
|
||||
@@ -193,6 +195,7 @@
|
||||
if(OPENVDB_DISABLE_BOOST_IMPLICIT_LINKING)
|
||||
add_definitions(-DBOOST_ALL_NO_LIB)
|
||||
endif()
|
||||
+ add_definitions(-D__TBB_NO_IMPLICIT_LINKAGE -DOPENVDB_OPENEXR_STATICLIB)
|
||||
endif()
|
||||
|
||||
# @todo Should be target definitions
|
||||
@@ -383,7 +386,12 @@
|
||||
# imported targets.
|
||||
|
||||
if(OPENVDB_CORE_SHARED)
|
||||
- add_library(openvdb_shared SHARED ${OPENVDB_LIBRARY_SOURCE_FILES})
|
||||
+ if(WIN32)
|
||||
+ configure_file(version.rc.in ${CMAKE_CURRENT_BINARY_DIR}/version.rc @ONLY)
|
||||
+ add_library(openvdb_shared SHARED ${OPENVDB_LIBRARY_SOURCE_FILES} ${CMAKE_CURRENT_BINARY_DIR}/version.rc)
|
||||
+ else()
|
||||
+ add_library(openvdb_shared SHARED ${OPENVDB_LIBRARY_SOURCE_FILES})
|
||||
+ endif()
|
||||
endif()
|
||||
|
||||
if(OPENVDB_CORE_STATIC)
|
||||
diff -Naur orig/openvdb/openvdb/version.rc.in openvdb/openvdb/openvdb/version.rc.in
|
||||
--- orig/openvdb/openvdb/version.rc.in 1969-12-31 17:00:00 -0700
|
||||
+++ openvdb/openvdb/openvdb/version.rc.in 2020-08-12 14:15:01 -0600
|
||||
@@ -0,0 +1,48 @@
|
||||
+#include <winver.h>
|
||||
+
|
||||
+#define VER_FILEVERSION @OpenVDB_MAJOR_VERSION@,@OpenVDB_MINOR_VERSION@,@OpenVDB_PATCH_VERSION@,0
|
||||
+#define VER_FILEVERSION_STR "@OpenVDB_MAJOR_VERSION@.@OpenVDB_MINOR_VERSION@.@OpenVDB_PATCH_VERSION@.0\0"
|
||||
+
|
||||
+#define VER_PRODUCTVERSION @OpenVDB_MAJOR_VERSION@,@OpenVDB_MINOR_VERSION@,@OpenVDB_PATCH_VERSION@,0
|
||||
+#define VER_PRODUCTVERSION_STR "@OpenVDB_MAJOR_VERSION@.@OpenVDB_MINOR_VERSION@\0"
|
||||
+
|
||||
+#ifndef DEBUG
|
||||
+#define VER_DEBUG 0
|
||||
+#else
|
||||
+#define VER_DEBUG VS_FF_DEBUG
|
||||
+#endif
|
||||
+
|
||||
+VS_VERSION_INFO VERSIONINFO
|
||||
+FILEVERSION VER_FILEVERSION
|
||||
+PRODUCTVERSION VER_PRODUCTVERSION
|
||||
+FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
|
||||
+FILEFLAGS (VER_DEBUG)
|
||||
+FILEOS VOS__WINDOWS32
|
||||
+FILETYPE VFT_DLL
|
||||
+FILESUBTYPE VFT2_UNKNOWN
|
||||
+BEGIN
|
||||
+ BLOCK "StringFileInfo"
|
||||
+ BEGIN
|
||||
+ BLOCK "040904E4"
|
||||
+ BEGIN
|
||||
+ VALUE "FileDescription", "OpenVDB"
|
||||
+ VALUE "FileVersion", VER_FILEVERSION_STR
|
||||
+ VALUE "InternalName", "OpenVDB"
|
||||
+ VALUE "ProductName", "OpenVDB"
|
||||
+ VALUE "ProductVersion", VER_PRODUCTVERSION_STR
|
||||
+ END
|
||||
+ END
|
||||
+
|
||||
+ BLOCK "VarFileInfo"
|
||||
+ BEGIN
|
||||
+ /* The following line should only be modified for localized versions. */
|
||||
+ /* It consists of any number of WORD,WORD pairs, with each pair */
|
||||
+ /* describing a language,codepage combination supported by the file. */
|
||||
+ /* */
|
||||
+ /* For example, a file might have values "0x409,1252" indicating that it */
|
||||
+ /* supports English language (0x409) in the Windows ANSI codepage (1252). */
|
||||
+
|
||||
+ VALUE "Translation", 0x409, 1252
|
||||
+
|
||||
+ END
|
||||
+END
|
||||
diff -Naur openvdb-original/CMakeLists.txt openvdb/CMakeLists.txt
|
||||
--- openvdb-original/CMakeLists.txt 2020-08-27 03:34:02.000000000 +0200
|
||||
+++ openvdb/CMakeLists.txt 2020-09-02 10:56:21.665735244 +0200
|
||||
@@ -68,6 +68,7 @@
|
||||
option(OPENVDB_INSTALL_HOUDINI_PYTHONRC [=[Install a Houdini startup script that sets
|
||||
the visibilty of OpenVDB nodes and their native equivalents.]=] OFF)
|
||||
option(OPENVDB_BUILD_MAYA_PLUGIN "Build the Maya plugin" OFF)
|
||||
+option(OPENVDB_BUILD_NANOVDB "Build nanovdb" ON)
|
||||
option(OPENVDB_ENABLE_RPATH "Build with RPATH information" ON)
|
||||
option(OPENVDB_CXX_STRICT "Enable or disable pre-defined compiler warnings" OFF)
|
||||
option(OPENVDB_CODE_COVERAGE "Enable code coverage. This also overrides CMAKE_BUILD_TYPE to Debug" OFF)
|
||||
@@ -740,6 +741,10 @@
|
||||
add_subdirectory(openvdb_maya)
|
||||
endif()
|
||||
|
||||
+if(OPENVDB_BUILD_NANOVDB)
|
||||
+ add_subdirectory(nanovdb)
|
||||
+endif()
|
||||
+
|
||||
##########################################################################
|
||||
|
||||
add_custom_target(uninstall
|
||||
|
@@ -18,72 +18,12 @@
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import codesign.util as util
|
||||
|
||||
|
||||
class ArchiveStateError(Exception):
|
||||
message: str
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ArchiveState:
|
||||
"""
|
||||
Additional information (state) of the archive
|
||||
|
||||
Includes information like expected file size of the archive file in the case
|
||||
the archive file is expected to be successfully created.
|
||||
|
||||
If the archive can not be created, this state will contain error message
|
||||
indicating details of error.
|
||||
"""
|
||||
|
||||
# Size in bytes of the corresponding archive.
|
||||
file_size: Optional[int] = None
|
||||
|
||||
# Non-empty value indicates that error has happenned.
|
||||
error_message: str = ''
|
||||
|
||||
def has_error(self) -> bool:
|
||||
"""
|
||||
Check whether the archive is at error state
|
||||
"""
|
||||
|
||||
return self.error_message
|
||||
|
||||
def serialize_to_string(self) -> str:
|
||||
payload = dataclasses.asdict(self)
|
||||
return json.dumps(payload, sort_keys=True, indent=4)
|
||||
|
||||
def serialize_to_file(self, filepath: Path) -> None:
|
||||
string = self.serialize_to_string()
|
||||
filepath.write_text(string)
|
||||
|
||||
@classmethod
|
||||
def deserialize_from_string(cls, string: str) -> 'ArchiveState':
|
||||
try:
|
||||
object_as_dict = json.loads(string)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ArchiveStateError('Error parsing JSON')
|
||||
|
||||
return cls(**object_as_dict)
|
||||
|
||||
@classmethod
|
||||
def deserialize_from_file(cls, filepath: Path):
|
||||
string = filepath.read_text()
|
||||
return cls.deserialize_from_string(string)
|
||||
|
||||
|
||||
class ArchiveWithIndicator:
|
||||
"""
|
||||
The idea of this class is to wrap around logic which takes care of keeping
|
||||
@@ -139,19 +79,6 @@ class ArchiveWithIndicator:
|
||||
if not self.ready_indicator_filepath.exists():
|
||||
return False
|
||||
|
||||
try:
|
||||
archive_state = ArchiveState.deserialize_from_file(
|
||||
self.ready_indicator_filepath)
|
||||
except ArchiveStateError as error:
|
||||
print(f'Error deserializing archive state: {error.message}')
|
||||
return False
|
||||
|
||||
if archive_state.has_error():
|
||||
# If the error did happen during codesign procedure there will be no
|
||||
# corresponding archive file.
|
||||
# The caller code will deal with the error check further.
|
||||
return True
|
||||
|
||||
# Sometimes on macOS indicator file appears prior to the actual archive
|
||||
# despite the order of creation and os.sync() used in tag_ready().
|
||||
# So consider archive not ready if there is an indicator without an
|
||||
@@ -161,11 +88,23 @@ class ArchiveWithIndicator:
|
||||
f'({self.archive_filepath}) to appear.')
|
||||
return False
|
||||
|
||||
# Read archive size from indicator/
|
||||
#
|
||||
# Assume that file is either empty or is fully written. This is being checked
|
||||
# by performing ValueError check since empty string will throw this exception
|
||||
# when attempted to be converted to int.
|
||||
expected_archive_size_str = self.ready_indicator_filepath.read_text()
|
||||
try:
|
||||
expected_archive_size = int(expected_archive_size_str)
|
||||
except ValueError:
|
||||
print(f'Invalid archive size "{expected_archive_size_str}"')
|
||||
return False
|
||||
|
||||
# Wait for until archive is fully stored.
|
||||
actual_archive_size = self.archive_filepath.stat().st_size
|
||||
if actual_archive_size != archive_state.file_size:
|
||||
if actual_archive_size != expected_archive_size:
|
||||
print('Partial/invalid archive size (expected '
|
||||
f'{archive_state.file_size} got {actual_archive_size})')
|
||||
f'{expected_archive_size} got {actual_archive_size})')
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -190,7 +129,7 @@ class ArchiveWithIndicator:
|
||||
print(f'Exception checking archive: {e}')
|
||||
return False
|
||||
|
||||
def tag_ready(self, error_message='') -> None:
|
||||
def tag_ready(self) -> None:
|
||||
"""
|
||||
Tag the archive as ready by creating the corresponding indication file.
|
||||
|
||||
@@ -199,34 +138,13 @@ class ArchiveWithIndicator:
|
||||
If it is violated, an assert will fail.
|
||||
"""
|
||||
assert not self.is_ready()
|
||||
|
||||
# Try the best to make sure everything is synced to the file system,
|
||||
# to avoid any possibility of stamp appearing on a network share prior to
|
||||
# an actual file.
|
||||
if util.get_current_platform() != util.Platform.WINDOWS:
|
||||
os.sync()
|
||||
|
||||
archive_size = -1
|
||||
if self.archive_filepath.exists():
|
||||
archive_size = self.archive_filepath.stat().st_size
|
||||
|
||||
archive_info = ArchiveState(
|
||||
file_size=archive_size, error_message=error_message)
|
||||
|
||||
self.ready_indicator_filepath.write_text(
|
||||
archive_info.serialize_to_string())
|
||||
|
||||
def get_state(self) -> ArchiveState:
|
||||
"""
|
||||
Get state object for this archive
|
||||
|
||||
The state is read from the corresponding state file.
|
||||
"""
|
||||
|
||||
try:
|
||||
return ArchiveState.deserialize_from_file(self.ready_indicator_filepath)
|
||||
except ArchiveStateError as error:
|
||||
return ArchiveState(error_message=f'Error in information format: {error}')
|
||||
archive_size = self.archive_filepath.stat().st_size
|
||||
self.ready_indicator_filepath.write_text(str(archive_size))
|
||||
|
||||
def clean(self) -> None:
|
||||
"""
|
||||
|
@@ -58,7 +58,6 @@ import codesign.util as util
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.archive_with_indicator import ArchiveWithIndicator
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -146,13 +145,13 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
def cleanup_environment_for_builder(self) -> None:
|
||||
# TODO(sergey): Revisit need of cleaning up the existing files.
|
||||
# In practice it wasn't so helpful, and with multiple clients
|
||||
# talking to the same server it becomes even more tricky.
|
||||
# talking to the same server it becomes even mor etricky.
|
||||
pass
|
||||
|
||||
def cleanup_environment_for_signing_server(self) -> None:
|
||||
# TODO(sergey): Revisit need of cleaning up the existing files.
|
||||
# In practice it wasn't so helpful, and with multiple clients
|
||||
# talking to the same server it becomes even more tricky.
|
||||
# talking to the same server it becomes even mor etricky.
|
||||
pass
|
||||
|
||||
def generate_request_id(self) -> str:
|
||||
@@ -221,15 +220,9 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
"""
|
||||
Wait until archive with signed files is available.
|
||||
|
||||
Will only return if the archive with signed files is available. If there
|
||||
was an error during code sign procedure the SystemExit exception is
|
||||
raised, with the message set to the error reported by the codesign
|
||||
server.
|
||||
|
||||
Will only wait for the configured time. If that time exceeds and there
|
||||
is still no responce from the signing server the application will exit
|
||||
with a non-zero exit code.
|
||||
|
||||
"""
|
||||
|
||||
signed_archive_info = self.signed_archive_info_for_request_id(
|
||||
@@ -243,17 +236,9 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
time.sleep(1)
|
||||
time_slept_in_seconds = time.monotonic() - time_start
|
||||
if time_slept_in_seconds > timeout_in_seconds:
|
||||
signed_archive_info.clean()
|
||||
unsigned_archive_info.clean()
|
||||
raise SystemExit("Signing server didn't finish signing in "
|
||||
f'{timeout_in_seconds} seconds, dying :(')
|
||||
|
||||
archive_state = signed_archive_info.get_state()
|
||||
if archive_state.has_error():
|
||||
signed_archive_info.clean()
|
||||
unsigned_archive_info.clean()
|
||||
raise SystemExit(
|
||||
f'Error happenned during codesign procedure: {archive_state.error_message}')
|
||||
f"{timeout_in_seconds} seconds, dying :(")
|
||||
|
||||
def copy_signed_files_to_directory(
|
||||
self, signed_dir: Path, destination_dir: Path) -> None:
|
||||
@@ -411,13 +396,7 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
temp_dir)
|
||||
|
||||
logger_server.info('Signing all requested files...')
|
||||
try:
|
||||
self.sign_all_files(files)
|
||||
except CodeSignException as error:
|
||||
signed_archive_info.tag_ready(error_message=error.message)
|
||||
unsigned_archive_info.clean()
|
||||
logger_server.info('Signing is complete with errors.')
|
||||
return
|
||||
self.sign_all_files(files)
|
||||
|
||||
logger_server.info('Packing signed files...')
|
||||
pack_files(files=files,
|
||||
|
@@ -1,26 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
class CodeSignException(Exception):
|
||||
message: str
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
@@ -33,7 +33,6 @@ from buildbot_utils import Builder
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.base_code_signer import BaseCodeSigner
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_server = logger.getChild('server')
|
||||
@@ -46,10 +45,6 @@ EXTENSIONS_TO_BE_SIGNED = {'.dylib', '.so', '.dmg'}
|
||||
NAME_PREFIXES_TO_BE_SIGNED = {'python'}
|
||||
|
||||
|
||||
class NotarizationException(CodeSignException):
|
||||
pass
|
||||
|
||||
|
||||
def is_file_from_bundle(file: AbsoluteAndRelativeFileName) -> bool:
|
||||
"""
|
||||
Check whether file is coming from an .app bundle
|
||||
@@ -191,7 +186,7 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
file.absolute_filepath]
|
||||
self.run_command_or_mock(command, util.Platform.MACOS)
|
||||
|
||||
def codesign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
def codesign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> bool:
|
||||
"""
|
||||
Run codesign tool on all eligible files in the given list.
|
||||
|
||||
@@ -230,6 +225,8 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
file_index + 1, num_signed_files,
|
||||
signed_file.relative_filepath)
|
||||
|
||||
return True
|
||||
|
||||
def codesign_bundles(
|
||||
self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
"""
|
||||
@@ -276,6 +273,8 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
|
||||
files.extend(extra_files)
|
||||
|
||||
return True
|
||||
|
||||
############################################################################
|
||||
# Notarization.
|
||||
|
||||
@@ -335,40 +334,7 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
logger_server.error('xcrun command did not report RequestUUID')
|
||||
return None
|
||||
|
||||
def notarize_review_status(self, xcrun_output: str) -> bool:
|
||||
"""
|
||||
Review status returned by xcrun's notarization info
|
||||
|
||||
Returns truth if the notarization process has finished.
|
||||
If there are errors during notarization, a NotarizationException()
|
||||
exception is thrown with status message from the notarial office.
|
||||
"""
|
||||
|
||||
# Parse status and message
|
||||
status = xcrun_field_value_from_output('Status', xcrun_output)
|
||||
status_message = xcrun_field_value_from_output(
|
||||
'Status Message', xcrun_output)
|
||||
|
||||
if status == 'success':
|
||||
logger_server.info(
|
||||
'Package successfully notarized: %s', status_message)
|
||||
return True
|
||||
|
||||
if status == 'invalid':
|
||||
logger_server.error(xcrun_output)
|
||||
logger_server.error(
|
||||
'Package notarization has failed: %s', status_message)
|
||||
raise NotarizationException(status_message)
|
||||
|
||||
if status == 'in progress':
|
||||
return False
|
||||
|
||||
logger_server.info(
|
||||
'Unknown notarization status %s (%s)', status, status_message)
|
||||
|
||||
return False
|
||||
|
||||
def notarize_wait_result(self, request_uuid: str) -> None:
|
||||
def notarize_wait_result(self, request_uuid: str) -> bool:
|
||||
"""
|
||||
Wait for until notarial office have a reply
|
||||
"""
|
||||
@@ -385,11 +351,29 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
timeout_in_seconds = self.config.MACOS_NOTARIZE_TIMEOUT_IN_SECONDS
|
||||
|
||||
while True:
|
||||
xcrun_output = self.check_output_or_mock(
|
||||
output = self.check_output_or_mock(
|
||||
command, util.Platform.MACOS, allow_nonzero_exit_code=True)
|
||||
# Parse status and message
|
||||
status = xcrun_field_value_from_output('Status', output)
|
||||
status_message = xcrun_field_value_from_output(
|
||||
'Status Message', output)
|
||||
|
||||
if self.notarize_review_status(xcrun_output):
|
||||
break
|
||||
# Review status.
|
||||
if status:
|
||||
if status == 'success':
|
||||
logger_server.info(
|
||||
'Package successfully notarized: %s', status_message)
|
||||
return True
|
||||
elif status == 'invalid':
|
||||
logger_server.error(output)
|
||||
logger_server.error(
|
||||
'Package notarization has failed: %s', status_message)
|
||||
return False
|
||||
elif status == 'in progress':
|
||||
pass
|
||||
else:
|
||||
logger_server.info(
|
||||
'Unknown notarization status %s (%s)', status, status_message)
|
||||
|
||||
logger_server.info('Keep waiting for notarization office.')
|
||||
time.sleep(30)
|
||||
@@ -410,6 +394,8 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
command = ['xcrun', 'stapler', 'staple', '-v', file.absolute_filepath]
|
||||
self.check_output_or_mock(command, util.Platform.MACOS)
|
||||
|
||||
return True
|
||||
|
||||
def notarize_dmg(self, file: AbsoluteAndRelativeFileName) -> bool:
|
||||
"""
|
||||
Run entire pipeline to get DMG notarized.
|
||||
@@ -428,7 +414,10 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
return False
|
||||
|
||||
# Staple.
|
||||
self.notarize_staple(file)
|
||||
if not self.notarize_staple(file):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def notarize_all_dmg(
|
||||
self, files: List[AbsoluteAndRelativeFileName]) -> bool:
|
||||
@@ -443,7 +432,10 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
if not self.check_file_is_to_be_signed(file):
|
||||
continue
|
||||
|
||||
self.notarize_dmg(file)
|
||||
if not self.notarize_dmg(file):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
############################################################################
|
||||
# Entry point.
|
||||
@@ -451,6 +443,11 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
# TODO(sergey): Handle errors somehow.
|
||||
|
||||
self.codesign_all_files(files)
|
||||
self.codesign_bundles(files)
|
||||
self.notarize_all_dmg(files)
|
||||
if not self.codesign_all_files(files):
|
||||
return
|
||||
|
||||
if not self.codesign_bundles(files):
|
||||
return
|
||||
|
||||
if not self.notarize_all_dmg(files):
|
||||
return
|
||||
|
@@ -29,7 +29,6 @@ from buildbot_utils import Builder
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.base_code_signer import BaseCodeSigner
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_server = logger.getChild('server')
|
||||
@@ -41,9 +40,6 @@ BLACKLIST_FILE_PREFIXES = (
|
||||
'api-ms-', 'concrt', 'msvcp', 'ucrtbase', 'vcomp', 'vcruntime')
|
||||
|
||||
|
||||
class SigntoolException(CodeSignException):
|
||||
pass
|
||||
|
||||
class WindowsCodeSigner(BaseCodeSigner):
|
||||
def check_file_is_to_be_signed(
|
||||
self, file: AbsoluteAndRelativeFileName) -> bool:
|
||||
@@ -54,46 +50,12 @@ class WindowsCodeSigner(BaseCodeSigner):
|
||||
|
||||
return file.relative_filepath.suffix in EXTENSIONS_TO_BE_SIGNED
|
||||
|
||||
|
||||
def get_sign_command_prefix(self) -> List[str]:
|
||||
return [
|
||||
'signtool', 'sign', '/v',
|
||||
'/f', self.config.WIN_CERTIFICATE_FILEPATH,
|
||||
'/tr', self.config.WIN_TIMESTAMP_AUTHORITY_URL]
|
||||
|
||||
|
||||
def run_codesign_tool(self, filepath: Path) -> None:
|
||||
command = self.get_sign_command_prefix() + [filepath]
|
||||
|
||||
try:
|
||||
codesign_output = self.check_output_or_mock(command, util.Platform.WINDOWS)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise SigntoolException(f'Error running signtool {e}')
|
||||
|
||||
logger_server.info(f'signtool output:\n{codesign_output}')
|
||||
|
||||
got_number_of_success = False
|
||||
|
||||
for line in codesign_output.split('\n'):
|
||||
line_clean = line.strip()
|
||||
line_clean_lower = line_clean.lower()
|
||||
|
||||
if line_clean_lower.startswith('number of warnings') or \
|
||||
line_clean_lower.startswith('number of errors'):
|
||||
number = int(line_clean_lower.split(':')[1])
|
||||
if number != 0:
|
||||
raise SigntoolException('Non-clean success of signtool')
|
||||
|
||||
if line_clean_lower.startswith('number of files successfully signed'):
|
||||
got_number_of_success = True
|
||||
number = int(line_clean_lower.split(':')[1])
|
||||
if number != 1:
|
||||
raise SigntoolException('Signtool did not consider codesign a success')
|
||||
|
||||
if not got_number_of_success:
|
||||
raise SigntoolException('Signtool did not report number of files signed')
|
||||
|
||||
|
||||
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
# NOTE: Sign files one by one to avoid possible command line length
|
||||
# overflow (which could happen if we ever decide to sign every binary
|
||||
@@ -111,7 +73,12 @@ class WindowsCodeSigner(BaseCodeSigner):
|
||||
file_index + 1, num_files, file.relative_filepath)
|
||||
continue
|
||||
|
||||
command = self.get_sign_command_prefix()
|
||||
command.append(file.absolute_filepath)
|
||||
logger_server.info(
|
||||
'Running signtool command for file [%d/%d] %s...',
|
||||
file_index + 1, num_files, file.relative_filepath)
|
||||
self.run_codesign_tool(file.absolute_filepath)
|
||||
# TODO(sergey): Check the status somehow. With a missing certificate
|
||||
# the command still exists with a zero code.
|
||||
self.run_command_or_mock(command, util.Platform.WINDOWS)
|
||||
# TODO(sergey): Report number of signed and ignored files.
|
||||
|
@@ -43,10 +43,7 @@ find_program(CLANG_TIDY_EXECUTABLE
|
||||
${_clang_tidy_SEARCH_DIRS}
|
||||
)
|
||||
|
||||
if(CLANG_TIDY_EXECUTABLE AND NOT EXISTS ${CLANG_TIDY_EXECUTABLE})
|
||||
message(WARNING "Cached or directly specified Clang-Tidy executable does not exist.")
|
||||
set(CLANG_TIDY_FOUND FALSE)
|
||||
elseif(CLANG_TIDY_EXECUTABLE)
|
||||
if(CLANG_TIDY_EXECUTABLE)
|
||||
# Mark clang-tidy as found.
|
||||
set(CLANG_TIDY_FOUND TRUE)
|
||||
|
||||
|
@@ -49,7 +49,7 @@ FIND_LIBRARY(PUGIXML_LIBRARY
|
||||
# handle the QUIETLY and REQUIRED arguments and set PUGIXML_FOUND to TRUE if
|
||||
# all listed variables are TRUE
|
||||
INCLUDE(FindPackageHandleStandardArgs)
|
||||
FIND_PACKAGE_HANDLE_STANDARD_ARGS(PugiXML DEFAULT_MSG
|
||||
FIND_PACKAGE_HANDLE_STANDARD_ARGS(PUGIXML DEFAULT_MSG
|
||||
PUGIXML_LIBRARY PUGIXML_INCLUDE_DIR)
|
||||
|
||||
IF(PUGIXML_FOUND)
|
||||
|
@@ -330,9 +330,6 @@ function(gtest_add_tests)
|
||||
set(gtest_case_name_regex ".*\\( *([A-Za-z_0-9]+) *, *([A-Za-z_0-9]+) *\\).*")
|
||||
set(gtest_test_type_regex "(TYPED_TEST|TEST_?[FP]?)")
|
||||
|
||||
# This will get a filter for each test suite.
|
||||
set(test_filters "")
|
||||
|
||||
foreach(source IN LISTS ARGS_SOURCES)
|
||||
if(NOT ARGS_SKIP_DEPENDENCY)
|
||||
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${source})
|
||||
@@ -379,32 +376,175 @@ function(gtest_add_tests)
|
||||
list(APPEND testList ${ctest_test_name})
|
||||
endif()
|
||||
else()
|
||||
# BLENDER: collect tests named "suite.testcase" as list of "suite.*" filters.
|
||||
string(REGEX REPLACE "\\..*$" "" gtest_suite_name ${gtest_test_name})
|
||||
list(APPEND test_filters "${gtest_suite_name}.*")
|
||||
set(ctest_test_name ${ARGS_TEST_PREFIX}${gtest_test_name}${ARGS_TEST_SUFFIX})
|
||||
add_test(NAME ${ctest_test_name}
|
||||
${workDir}
|
||||
COMMAND ${ARGS_TARGET}
|
||||
--gtest_filter=${gtest_test_name}
|
||||
${ARGS_EXTRA_ARGS}
|
||||
)
|
||||
list(APPEND testList ${ctest_test_name})
|
||||
endif()
|
||||
endforeach()
|
||||
endforeach()
|
||||
|
||||
# Join all found GTest suite names into one big filter.
|
||||
list(REMOVE_DUPLICATES test_filters)
|
||||
list(JOIN test_filters ":" gtest_filter)
|
||||
add_test(NAME ${ARGS_TEST_PREFIX}
|
||||
${workDir}
|
||||
COMMAND ${ARGS_TARGET}
|
||||
--gtest_filter=${gtest_filter}
|
||||
${ARGS_EXTRA_ARGS}
|
||||
)
|
||||
list(APPEND testList ${ARGS_TEST_PREFIX})
|
||||
|
||||
if(ARGS_TEST_LIST)
|
||||
set(${ARGS_TEST_LIST} ${testList} PARENT_SCOPE)
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
# BLENDER: remove the discovery function gtest_discover_tests(). It's not used,
|
||||
# as it generates too many test invocations.
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
function(gtest_discover_tests TARGET)
|
||||
cmake_parse_arguments(
|
||||
""
|
||||
"NO_PRETTY_TYPES;NO_PRETTY_VALUES"
|
||||
"TEST_PREFIX;TEST_SUFFIX;WORKING_DIRECTORY;TEST_LIST;DISCOVERY_TIMEOUT;XML_OUTPUT_DIR;DISCOVERY_MODE"
|
||||
"EXTRA_ARGS;PROPERTIES"
|
||||
${ARGN}
|
||||
)
|
||||
|
||||
if(NOT _WORKING_DIRECTORY)
|
||||
set(_WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
|
||||
endif()
|
||||
if(NOT _TEST_LIST)
|
||||
set(_TEST_LIST ${TARGET}_TESTS)
|
||||
endif()
|
||||
if(NOT _DISCOVERY_TIMEOUT)
|
||||
set(_DISCOVERY_TIMEOUT 5)
|
||||
endif()
|
||||
if(NOT _DISCOVERY_MODE)
|
||||
if(NOT CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE)
|
||||
set(CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE "POST_BUILD")
|
||||
endif()
|
||||
set(_DISCOVERY_MODE ${CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE})
|
||||
endif()
|
||||
|
||||
get_property(
|
||||
has_counter
|
||||
TARGET ${TARGET}
|
||||
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
|
||||
SET
|
||||
)
|
||||
if(has_counter)
|
||||
get_property(
|
||||
counter
|
||||
TARGET ${TARGET}
|
||||
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
|
||||
)
|
||||
math(EXPR counter "${counter} + 1")
|
||||
else()
|
||||
set(counter 1)
|
||||
endif()
|
||||
set_property(
|
||||
TARGET ${TARGET}
|
||||
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
|
||||
${counter}
|
||||
)
|
||||
|
||||
# Define rule to generate test list for aforementioned test executable
|
||||
# Blender: use _ instead of [] to avoid problems with zsh regex.
|
||||
set(ctest_file_base "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_${counter}_")
|
||||
set(ctest_include_file "${ctest_file_base}_include.cmake")
|
||||
set(ctest_tests_file "${ctest_file_base}_tests.cmake")
|
||||
get_property(crosscompiling_emulator
|
||||
TARGET ${TARGET}
|
||||
PROPERTY CROSSCOMPILING_EMULATOR
|
||||
)
|
||||
|
||||
if(_DISCOVERY_MODE STREQUAL "POST_BUILD")
|
||||
add_custom_command(
|
||||
TARGET ${TARGET} POST_BUILD
|
||||
BYPRODUCTS "${ctest_tests_file}"
|
||||
COMMAND "${CMAKE_COMMAND}"
|
||||
-D "TEST_TARGET=${TARGET}"
|
||||
-D "TEST_EXECUTABLE=$<TARGET_FILE:${TARGET}>"
|
||||
-D "TEST_EXECUTOR=${crosscompiling_emulator}"
|
||||
-D "TEST_WORKING_DIR=${_WORKING_DIRECTORY}"
|
||||
-D "TEST_EXTRA_ARGS=${_EXTRA_ARGS}"
|
||||
-D "TEST_PROPERTIES=${_PROPERTIES}"
|
||||
-D "TEST_PREFIX=${_TEST_PREFIX}"
|
||||
-D "TEST_SUFFIX=${_TEST_SUFFIX}"
|
||||
-D "NO_PRETTY_TYPES=${_NO_PRETTY_TYPES}"
|
||||
-D "NO_PRETTY_VALUES=${_NO_PRETTY_VALUES}"
|
||||
-D "TEST_LIST=${_TEST_LIST}"
|
||||
-D "CTEST_FILE=${ctest_tests_file}"
|
||||
-D "TEST_DISCOVERY_TIMEOUT=${_DISCOVERY_TIMEOUT}"
|
||||
-D "TEST_XML_OUTPUT_DIR=${_XML_OUTPUT_DIR}"
|
||||
-P "${_GOOGLETEST_DISCOVER_TESTS_SCRIPT}"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
file(WRITE "${ctest_include_file}"
|
||||
"if(EXISTS \"${ctest_tests_file}\")\n"
|
||||
" include(\"${ctest_tests_file}\")\n"
|
||||
"else()\n"
|
||||
" add_test(${TARGET}_NOT_BUILT ${TARGET}_NOT_BUILT)\n"
|
||||
"endif()\n"
|
||||
)
|
||||
elseif(_DISCOVERY_MODE STREQUAL "PRE_TEST")
|
||||
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL
|
||||
PROPERTY GENERATOR_IS_MULTI_CONFIG
|
||||
)
|
||||
|
||||
if(GENERATOR_IS_MULTI_CONFIG)
|
||||
set(ctest_tests_file "${ctest_file_base}_tests-$<CONFIG>.cmake")
|
||||
endif()
|
||||
|
||||
string(CONCAT ctest_include_content
|
||||
"if(EXISTS \"$<TARGET_FILE:${TARGET}>\")" "\n"
|
||||
" if(\"$<TARGET_FILE:${TARGET}>\" IS_NEWER_THAN \"${ctest_tests_file}\")" "\n"
|
||||
" include(\"${_GOOGLETEST_DISCOVER_TESTS_SCRIPT}\")" "\n"
|
||||
" gtest_discover_tests_impl(" "\n"
|
||||
" TEST_EXECUTABLE" " [==[" "$<TARGET_FILE:${TARGET}>" "]==]" "\n"
|
||||
" TEST_EXECUTOR" " [==[" "${crosscompiling_emulator}" "]==]" "\n"
|
||||
" TEST_WORKING_DIR" " [==[" "${_WORKING_DIRECTORY}" "]==]" "\n"
|
||||
" TEST_EXTRA_ARGS" " [==[" "${_EXTRA_ARGS}" "]==]" "\n"
|
||||
" TEST_PROPERTIES" " [==[" "${_PROPERTIES}" "]==]" "\n"
|
||||
" TEST_PREFIX" " [==[" "${_TEST_PREFIX}" "]==]" "\n"
|
||||
" TEST_SUFFIX" " [==[" "${_TEST_SUFFIX}" "]==]" "\n"
|
||||
" NO_PRETTY_TYPES" " [==[" "${_NO_PRETTY_TYPES}" "]==]" "\n"
|
||||
" NO_PRETTY_VALUES" " [==[" "${_NO_PRETTY_VALUES}" "]==]" "\n"
|
||||
" TEST_LIST" " [==[" "${_TEST_LIST}" "]==]" "\n"
|
||||
" CTEST_FILE" " [==[" "${ctest_tests_file}" "]==]" "\n"
|
||||
" TEST_DISCOVERY_TIMEOUT" " [==[" "${_DISCOVERY_TIMEOUT}" "]==]" "\n"
|
||||
" TEST_XML_OUTPUT_DIR" " [==[" "${_XML_OUTPUT_DIR}" "]==]" "\n"
|
||||
" )" "\n"
|
||||
" endif()" "\n"
|
||||
" include(\"${ctest_tests_file}\")" "\n"
|
||||
"else()" "\n"
|
||||
" add_test(${TARGET}_NOT_BUILT ${TARGET}_NOT_BUILT)" "\n"
|
||||
"endif()" "\n"
|
||||
)
|
||||
|
||||
if(GENERATOR_IS_MULTI_CONFIG)
|
||||
foreach(_config ${CMAKE_CONFIGURATION_TYPES})
|
||||
file(GENERATE OUTPUT "${ctest_file_base}_include-${_config}.cmake" CONTENT "${ctest_include_content}" CONDITION $<CONFIG:${_config}>)
|
||||
endforeach()
|
||||
file(WRITE "${ctest_include_file}" "include(\"${ctest_file_base}_include-\${CTEST_CONFIGURATION_TYPE}.cmake\")")
|
||||
else()
|
||||
file(GENERATE OUTPUT "${ctest_file_base}_include.cmake" CONTENT "${ctest_include_content}")
|
||||
file(WRITE "${ctest_include_file}" "include(\"${ctest_file_base}_include.cmake\")")
|
||||
endif()
|
||||
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown DISCOVERY_MODE: ${_DISCOVERY_MODE}")
|
||||
endif()
|
||||
|
||||
# Add discovered tests to directory TEST_INCLUDE_FILES
|
||||
set_property(DIRECTORY
|
||||
APPEND PROPERTY TEST_INCLUDE_FILES "${ctest_include_file}"
|
||||
)
|
||||
|
||||
endfunction()
|
||||
|
||||
###############################################################################
|
||||
|
||||
set(_GOOGLETEST_DISCOVER_TESTS_SCRIPT
|
||||
${CMAKE_CURRENT_LIST_DIR}/GTestAddTests.cmake
|
||||
)
|
||||
|
||||
# Restore project's policies
|
||||
cmake_policy(POP)
|
||||
|
191
build_files/cmake/Modules/GTestAddTests.cmake
Normal file
191
build_files/cmake/Modules/GTestAddTests.cmake
Normal file
@@ -0,0 +1,191 @@
|
||||
# Distributed under the OSI-approved BSD 3-Clause License,
|
||||
# see accompanying file BSD-3-Clause-license.txt for details.
|
||||
|
||||
# Blender: disable ASAN leak detection when trying to discover tests.
|
||||
set(ENV{ASAN_OPTIONS} "detect_leaks=0")
|
||||
|
||||
cmake_minimum_required(VERSION ${CMAKE_VERSION})
|
||||
|
||||
# Overwrite possibly existing ${_CTEST_FILE} with empty file
|
||||
set(flush_tests_MODE WRITE)
|
||||
|
||||
# Flushes script to ${_CTEST_FILE}
|
||||
macro(flush_script)
|
||||
file(${flush_tests_MODE} "${_CTEST_FILE}" "${script}")
|
||||
set(flush_tests_MODE APPEND)
|
||||
|
||||
set(script "")
|
||||
endmacro()
|
||||
|
||||
# Flushes tests_buffer to tests
|
||||
macro(flush_tests_buffer)
|
||||
list(APPEND tests "${tests_buffer}")
|
||||
set(tests_buffer "")
|
||||
endmacro()
|
||||
|
||||
macro(add_command NAME)
|
||||
set(_args "")
|
||||
foreach(_arg ${ARGN})
|
||||
if(_arg MATCHES "[^-./:a-zA-Z0-9_]")
|
||||
string(APPEND _args " [==[${_arg}]==]")
|
||||
else()
|
||||
string(APPEND _args " ${_arg}")
|
||||
endif()
|
||||
endforeach()
|
||||
string(APPEND script "${NAME}(${_args})\n")
|
||||
string(LENGTH "${script}" _script_len)
|
||||
if(${_script_len} GREATER "50000")
|
||||
flush_script()
|
||||
endif()
|
||||
# Unsets macro local variables to prevent leakage outside of this macro.
|
||||
unset(_args)
|
||||
unset(_script_len)
|
||||
endmacro()
|
||||
|
||||
function(gtest_discover_tests_impl)
|
||||
|
||||
cmake_parse_arguments(
|
||||
""
|
||||
""
|
||||
"NO_PRETTY_TYPES;NO_PRETTY_VALUES;TEST_EXECUTABLE;TEST_EXECUTOR;TEST_WORKING_DIR;TEST_PREFIX;TEST_SUFFIX;TEST_LIST;CTEST_FILE;TEST_DISCOVERY_TIMEOUT;TEST_XML_OUTPUT_DIR"
|
||||
"TEST_EXTRA_ARGS;TEST_PROPERTIES"
|
||||
${ARGN}
|
||||
)
|
||||
|
||||
set(prefix "${_TEST_PREFIX}")
|
||||
set(suffix "${_TEST_SUFFIX}")
|
||||
set(extra_args ${_TEST_EXTRA_ARGS})
|
||||
set(properties ${_TEST_PROPERTIES})
|
||||
set(script)
|
||||
set(suite)
|
||||
set(tests)
|
||||
set(tests_buffer)
|
||||
|
||||
# Run test executable to get list of available tests
|
||||
if(NOT EXISTS "${_TEST_EXECUTABLE}")
|
||||
message(FATAL_ERROR
|
||||
"Specified test executable does not exist.\n"
|
||||
" Path: '${_TEST_EXECUTABLE}'"
|
||||
)
|
||||
endif()
|
||||
execute_process(
|
||||
COMMAND ${_TEST_EXECUTOR} "${_TEST_EXECUTABLE}" --gtest_list_tests
|
||||
WORKING_DIRECTORY "${_TEST_WORKING_DIR}"
|
||||
TIMEOUT ${_TEST_DISCOVERY_TIMEOUT}
|
||||
OUTPUT_VARIABLE output
|
||||
RESULT_VARIABLE result
|
||||
)
|
||||
if(NOT ${result} EQUAL 0)
|
||||
string(REPLACE "\n" "\n " output "${output}")
|
||||
message(FATAL_ERROR
|
||||
"Error running test executable.\n"
|
||||
" Path: '${_TEST_EXECUTABLE}'\n"
|
||||
" Result: ${result}\n"
|
||||
" Output:\n"
|
||||
" ${output}\n"
|
||||
)
|
||||
endif()
|
||||
|
||||
# Preserve semicolon in test-parameters
|
||||
string(REPLACE [[;]] [[\;]] output "${output}")
|
||||
string(REPLACE "\n" ";" output "${output}")
|
||||
|
||||
# Parse output
|
||||
foreach(line ${output})
|
||||
# Skip header
|
||||
if(NOT line MATCHES "gtest_main\\.cc")
|
||||
# Do we have a module name or a test name?
|
||||
if(NOT line MATCHES "^ ")
|
||||
# Module; remove trailing '.' to get just the name...
|
||||
string(REGEX REPLACE "\\.( *#.*)?" "" suite "${line}")
|
||||
if(line MATCHES "#" AND NOT _NO_PRETTY_TYPES)
|
||||
string(REGEX REPLACE "/[0-9]\\.+ +#.*= +" "/" pretty_suite "${line}")
|
||||
else()
|
||||
set(pretty_suite "${suite}")
|
||||
endif()
|
||||
string(REGEX REPLACE "^DISABLED_" "" pretty_suite "${pretty_suite}")
|
||||
else()
|
||||
# Test name; strip spaces and comments to get just the name...
|
||||
string(REGEX REPLACE " +" "" test "${line}")
|
||||
if(test MATCHES "#" AND NOT _NO_PRETTY_VALUES)
|
||||
string(REGEX REPLACE "/[0-9]+#GetParam..=" "/" pretty_test "${test}")
|
||||
else()
|
||||
string(REGEX REPLACE "#.*" "" pretty_test "${test}")
|
||||
endif()
|
||||
string(REGEX REPLACE "^DISABLED_" "" pretty_test "${pretty_test}")
|
||||
string(REGEX REPLACE "#.*" "" test "${test}")
|
||||
if(NOT "${_TEST_XML_OUTPUT_DIR}" STREQUAL "")
|
||||
set(TEST_XML_OUTPUT_PARAM "--gtest_output=xml:${_TEST_XML_OUTPUT_DIR}/${prefix}${suite}.${test}${suffix}.xml")
|
||||
else()
|
||||
unset(TEST_XML_OUTPUT_PARAM)
|
||||
endif()
|
||||
|
||||
# sanitize test name for further processing downstream
|
||||
set(testname "${prefix}${pretty_suite}.${pretty_test}${suffix}")
|
||||
# escape \
|
||||
string(REPLACE [[\]] [[\\]] testname "${testname}")
|
||||
# escape ;
|
||||
string(REPLACE [[;]] [[\;]] testname "${testname}")
|
||||
# escape $
|
||||
string(REPLACE [[$]] [[\$]] testname "${testname}")
|
||||
|
||||
# ...and add to script
|
||||
add_command(add_test
|
||||
"${testname}"
|
||||
${_TEST_EXECUTOR}
|
||||
"${_TEST_EXECUTABLE}"
|
||||
"--gtest_filter=${suite}.${test}"
|
||||
"--gtest_also_run_disabled_tests"
|
||||
${TEST_XML_OUTPUT_PARAM}
|
||||
${extra_args}
|
||||
)
|
||||
if(suite MATCHES "^DISABLED" OR test MATCHES "^DISABLED")
|
||||
add_command(set_tests_properties
|
||||
"${testname}"
|
||||
PROPERTIES DISABLED TRUE
|
||||
)
|
||||
endif()
|
||||
add_command(set_tests_properties
|
||||
"${testname}"
|
||||
PROPERTIES
|
||||
WORKING_DIRECTORY "${_TEST_WORKING_DIR}"
|
||||
SKIP_REGULAR_EXPRESSION "\\\\[ SKIPPED \\\\]"
|
||||
${properties}
|
||||
)
|
||||
list(APPEND tests_buffer "${testname}")
|
||||
list(LENGTH tests_buffer tests_buffer_length)
|
||||
if(${tests_buffer_length} GREATER "250")
|
||||
flush_tests_buffer()
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
|
||||
# Create a list of all discovered tests, which users may use to e.g. set
|
||||
# properties on the tests
|
||||
flush_tests_buffer()
|
||||
add_command(set ${_TEST_LIST} ${tests})
|
||||
|
||||
# Write CTest script
|
||||
flush_script()
|
||||
|
||||
endfunction()
|
||||
|
||||
if(CMAKE_SCRIPT_MODE_FILE)
|
||||
gtest_discover_tests_impl(
|
||||
NO_PRETTY_TYPES ${NO_PRETTY_TYPES}
|
||||
NO_PRETTY_VALUES ${NO_PRETTY_VALUES}
|
||||
TEST_EXECUTABLE ${TEST_EXECUTABLE}
|
||||
TEST_EXECUTOR ${TEST_EXECUTOR}
|
||||
TEST_WORKING_DIR ${TEST_WORKING_DIR}
|
||||
TEST_PREFIX ${TEST_PREFIX}
|
||||
TEST_SUFFIX ${TEST_SUFFIX}
|
||||
TEST_LIST ${TEST_LIST}
|
||||
CTEST_FILE ${CTEST_FILE}
|
||||
TEST_DISCOVERY_TIMEOUT ${TEST_DISCOVERY_TIMEOUT}
|
||||
TEST_XML_OUTPUT_DIR ${TEST_XML_OUTPUT_DIR}
|
||||
TEST_EXTRA_ARGS ${TEST_EXTRA_ARGS}
|
||||
TEST_PROPERTIES ${TEST_PROPERTIES}
|
||||
)
|
||||
endif()
|
@@ -8,17 +8,6 @@
|
||||
#
|
||||
#=============================================================================
|
||||
|
||||
function(GET_BLENDER_TEST_INSTALL_DIR VARIABLE_NAME)
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(GENERATOR_IS_MULTI_CONFIG)
|
||||
string(REPLACE "\${BUILD_TYPE}" "$<CONFIG>" TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
else()
|
||||
string(REPLACE "\${BUILD_TYPE}" "" TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
endif()
|
||||
set(${VARIABLE_NAME} "${TEST_INSTALL_DIR}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
|
||||
macro(BLENDER_SRC_GTEST_EX)
|
||||
if(WITH_GTESTS)
|
||||
set(options SKIP_ADD_TEST)
|
||||
@@ -86,7 +75,13 @@ macro(BLENDER_SRC_GTEST_EX)
|
||||
target_link_libraries(${TARGET_NAME} ${GMP_LIBRARIES})
|
||||
endif()
|
||||
|
||||
GET_BLENDER_TEST_INSTALL_DIR(TEST_INSTALL_DIR)
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(GENERATOR_IS_MULTI_CONFIG)
|
||||
string(REPLACE "\${BUILD_TYPE}" "$<CONFIG>" TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
else()
|
||||
string(REPLACE "\${BUILD_TYPE}" "" TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
endif()
|
||||
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES
|
||||
RUNTIME_OUTPUT_DIRECTORY "${TESTS_OUTPUT_DIR}"
|
||||
RUNTIME_OUTPUT_DIRECTORY_RELEASE "${TESTS_OUTPUT_DIR}"
|
||||
@@ -102,7 +97,6 @@ macro(BLENDER_SRC_GTEST_EX)
|
||||
set_tests_properties(${TARGET_NAME} PROPERTIES ENVIRONMENT LSAN_OPTIONS=exitcode=0)
|
||||
endif()
|
||||
if(WIN32)
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES VS_GLOBAL_VcpkgEnabled "false")
|
||||
unset(MANIFEST)
|
||||
endif()
|
||||
unset(TEST_INC)
|
||||
|
@@ -128,7 +128,7 @@ if(EXISTS ${SOURCE_DIR}/.git)
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(NOT _git_changed_files STREQUAL "")
|
||||
string(APPEND MY_WC_BRANCH " (modified)")
|
||||
set(MY_WC_BRANCH "${MY_WC_BRANCH} (modified)")
|
||||
else()
|
||||
# Unpushed commits are also considered local modifications
|
||||
execute_process(COMMAND git log @{u}..
|
||||
@@ -137,7 +137,7 @@ if(EXISTS ${SOURCE_DIR}/.git)
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
ERROR_QUIET)
|
||||
if(NOT _git_unpushed_log STREQUAL "")
|
||||
string(APPEND MY_WC_BRANCH " (modified)")
|
||||
set(MY_WC_BRANCH "${MY_WC_BRANCH} (modified)")
|
||||
endif()
|
||||
unset(_git_unpushed_log)
|
||||
endif()
|
||||
@@ -161,7 +161,6 @@ file(WRITE buildinfo.h.txt
|
||||
"#define BUILD_BRANCH \"${MY_WC_BRANCH}\"\n"
|
||||
"#define BUILD_DATE \"${BUILD_DATE}\"\n"
|
||||
"#define BUILD_TIME \"${BUILD_TIME}\"\n"
|
||||
"#include \"buildinfo_static.h\"\n"
|
||||
)
|
||||
|
||||
# cleanup
|
||||
|
@@ -1,8 +0,0 @@
|
||||
/* CMake expanded values that won't change between CMake execution (unlike date/time).
|
||||
* This is included by `buildinfo.h` generated by `buildinfo.cmake`. */
|
||||
#define BUILD_PLATFORM "@BUILD_PLATFORM@"
|
||||
#define BUILD_TYPE "@BUILD_TYPE@"
|
||||
#define BUILD_CFLAGS "@BUILD_CFLAGS@"
|
||||
#define BUILD_CXXFLAGS "@BUILD_CXXFLAGS@"
|
||||
#define BUILD_LINKFLAGS "@BUILD_LINKFLAGS@"
|
||||
#define BUILD_SYSTEM "@BUILD_SYSTEM@"
|
@@ -13,7 +13,7 @@ Invocation:
|
||||
export CLANG_BIND_DIR="/dsk/src/llvm/tools/clang/bindings/python"
|
||||
export CLANG_LIB_DIR="/opt/llvm/lib"
|
||||
|
||||
python clang_array_check.py somefile.c -DSOME_DEFINE -I/some/include
|
||||
python2 clang_array_check.py somefile.c -DSOME_DEFINE -I/some/include
|
||||
|
||||
... defines and includes are optional
|
||||
|
||||
@@ -76,32 +76,6 @@ defs_precalc = {
|
||||
"glNormal3bv": {0: 3},
|
||||
"glNormal3iv": {0: 3},
|
||||
"glNormal3sv": {0: 3},
|
||||
|
||||
# GPU immediate mode.
|
||||
"immVertex2iv": {1: 2},
|
||||
|
||||
"immVertex2fv": {1: 2},
|
||||
"immVertex3fv": {1: 3},
|
||||
|
||||
"immAttr2fv": {1: 2},
|
||||
"immAttr3fv": {1: 3},
|
||||
"immAttr4fv": {1: 4},
|
||||
|
||||
"immAttr3ubv": {1: 3},
|
||||
"immAttr4ubv": {1: 4},
|
||||
|
||||
"immUniform2fv": {1: 2},
|
||||
"immUniform3fv": {1: 3},
|
||||
"immUniform4fv": {1: 4},
|
||||
|
||||
"immUniformColor3fv": {0: 3},
|
||||
"immUniformColor4fv": {0: 4},
|
||||
|
||||
"immUniformColor3ubv": {1: 3},
|
||||
"immUniformColor4ubv": {1: 4},
|
||||
|
||||
"immUniformColor3fvAlpha": {0: 3},
|
||||
"immUniformColor4fvAlpha": {0: 4},
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@@ -126,8 +100,7 @@ else:
|
||||
if CLANG_LIB_DIR is None:
|
||||
print("$CLANG_LIB_DIR clang lib dir not set")
|
||||
|
||||
if CLANG_BIND_DIR:
|
||||
sys.path.append(CLANG_BIND_DIR)
|
||||
sys.path.append(CLANG_BIND_DIR)
|
||||
|
||||
import clang
|
||||
import clang.cindex
|
||||
@@ -135,8 +108,7 @@ from clang.cindex import (CursorKind,
|
||||
TypeKind,
|
||||
TokenKind)
|
||||
|
||||
if CLANG_LIB_DIR:
|
||||
clang.cindex.Config.set_library_path(CLANG_LIB_DIR)
|
||||
clang.cindex.Config.set_library_path(CLANG_LIB_DIR)
|
||||
|
||||
index = clang.cindex.Index.create()
|
||||
|
||||
|
@@ -32,7 +32,7 @@ CHECKER_IGNORE_PREFIX = [
|
||||
"intern/moto",
|
||||
]
|
||||
|
||||
CHECKER_BIN = "python3"
|
||||
CHECKER_BIN = "python2"
|
||||
|
||||
CHECKER_ARGS = [
|
||||
os.path.join(os.path.dirname(__file__), "clang_array_check.py"),
|
||||
|
@@ -44,7 +44,6 @@ set(WITH_OPENMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB_BLOSC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PYTHON_INSTALL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_QUADRIFLOW ON CACHE BOOL "" FORCE)
|
||||
|
@@ -51,7 +51,6 @@ set(WITH_OPENIMAGEIO OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENMP OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_QUADRIFLOW OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_SDL OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_TBB OFF CACHE BOOL "" FORCE)
|
||||
|
@@ -45,7 +45,6 @@ set(WITH_OPENMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB_BLOSC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PYTHON_INSTALL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_QUADRIFLOW ON CACHE BOOL "" FORCE)
|
||||
|
@@ -28,7 +28,6 @@ set(WITH_OPENCOLLADA OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_INTERNATIONAL OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_BULLET OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_ALEMBIC OFF CACHE BOOL "" FORCE)
|
||||
|
||||
# Depends on Python install, do this to quiet warning.
|
||||
|
@@ -60,19 +60,6 @@ function(list_assert_duplicates
|
||||
unset(_len_after)
|
||||
endfunction()
|
||||
|
||||
# Adds a native path separator to the end of the path:
|
||||
#
|
||||
# - 'example' -> 'example/'
|
||||
# - '/example///' -> '/example/'
|
||||
#
|
||||
macro(path_ensure_trailing_slash
|
||||
path_new path_input
|
||||
)
|
||||
file(TO_NATIVE_PATH "/" _path_sep)
|
||||
string(REGEX REPLACE "[${_path_sep}]+$" "" ${path_new} ${path_input})
|
||||
set(${path_new} "${${path_new}}${_path_sep}")
|
||||
unset(_path_sep)
|
||||
endmacro()
|
||||
|
||||
# foo_bar.spam --> foo_barMySuffix.spam
|
||||
macro(file_suffix
|
||||
@@ -196,7 +183,7 @@ function(blender_user_header_search_paths
|
||||
foreach(_INC ${includes})
|
||||
get_filename_component(_ABS_INC ${_INC} ABSOLUTE)
|
||||
# _ALL_INCS is a space-separated string of file paths in quotes.
|
||||
string(APPEND _ALL_INCS " \"${_ABS_INC}\"")
|
||||
set(_ALL_INCS "${_ALL_INCS} \"${_ABS_INC}\"")
|
||||
endforeach()
|
||||
set_target_properties(${name} PROPERTIES XCODE_ATTRIBUTE_USER_HEADER_SEARCH_PATHS "${_ALL_INCS}")
|
||||
endif()
|
||||
@@ -263,11 +250,11 @@ macro(add_cc_flags_custom_test
|
||||
string(TOUPPER ${name} _name_upper)
|
||||
if(DEFINED CMAKE_C_FLAGS_${_name_upper})
|
||||
message(STATUS "Using custom CFLAGS: CMAKE_C_FLAGS_${_name_upper} in \"${CMAKE_CURRENT_SOURCE_DIR}\"")
|
||||
string(APPEND CMAKE_C_FLAGS " ${CMAKE_C_FLAGS_${_name_upper}}" ${ARGV1})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${_name_upper}}" ${ARGV1})
|
||||
endif()
|
||||
if(DEFINED CMAKE_CXX_FLAGS_${_name_upper})
|
||||
message(STATUS "Using custom CXXFLAGS: CMAKE_CXX_FLAGS_${_name_upper} in \"${CMAKE_CURRENT_SOURCE_DIR}\"")
|
||||
string(APPEND CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS_${_name_upper}}" ${ARGV1})
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${_name_upper}}" ${ARGV1})
|
||||
endif()
|
||||
unset(_name_upper)
|
||||
|
||||
@@ -388,43 +375,6 @@ function(blender_add_lib
|
||||
set_property(GLOBAL APPEND PROPERTY BLENDER_LINK_LIBS ${name})
|
||||
endfunction()
|
||||
|
||||
function(blender_add_test_suite)
|
||||
if (ARGC LESS 1)
|
||||
message(FATAL_ERROR "No arguments supplied to blender_add_test_suite()")
|
||||
endif()
|
||||
|
||||
# Parse the arguments
|
||||
set(oneValueArgs TARGET SUITE_NAME)
|
||||
set(multiValueArgs SOURCES)
|
||||
cmake_parse_arguments(ARGS "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
# Figure out the release dir, as some tests need files from there.
|
||||
GET_BLENDER_TEST_INSTALL_DIR(TEST_INSTALL_DIR)
|
||||
if(APPLE)
|
||||
set(_test_release_dir ${TEST_INSTALL_DIR}/Blender.app/Contents/Resources/${BLENDER_VERSION})
|
||||
else()
|
||||
if(WIN32 OR WITH_INSTALL_PORTABLE)
|
||||
set(_test_release_dir ${TEST_INSTALL_DIR}/${BLENDER_VERSION})
|
||||
else()
|
||||
set(_test_release_dir ${TEST_INSTALL_DIR}/share/blender/${BLENDER_VERSION})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Define a test case with our custom gtest_add_tests() command.
|
||||
include(GTest)
|
||||
gtest_add_tests(
|
||||
TARGET ${ARGS_TARGET}
|
||||
SOURCES "${ARGS_SOURCES}"
|
||||
TEST_PREFIX ${ARGS_SUITE_NAME}
|
||||
WORKING_DIRECTORY "${TEST_INSTALL_DIR}"
|
||||
EXTRA_ARGS
|
||||
--test-assets-dir "${CMAKE_SOURCE_DIR}/../lib/tests"
|
||||
--test-release-dir "${_test_release_dir}"
|
||||
)
|
||||
|
||||
unset(_test_release_dir)
|
||||
endfunction()
|
||||
|
||||
# Add tests for a Blender library, to be called in tandem with blender_add_lib().
|
||||
# The tests will be part of the blender_test executable (see tests/gtests/runner).
|
||||
function(blender_add_test_lib
|
||||
@@ -458,12 +408,6 @@ function(blender_add_test_lib
|
||||
blender_add_lib__impl(${name} "${sources}" "${includes}" "${includes_sys}" "${library_deps}")
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY BLENDER_TEST_LIBS ${name})
|
||||
|
||||
blender_add_test_suite(
|
||||
TARGET blender_test
|
||||
SUITE_NAME ${name}
|
||||
SOURCES "${sources}"
|
||||
)
|
||||
endfunction()
|
||||
|
||||
|
||||
@@ -497,10 +441,14 @@ function(blender_add_test_executable
|
||||
SKIP_ADD_TEST
|
||||
)
|
||||
|
||||
blender_add_test_suite(
|
||||
TARGET ${name}_test
|
||||
SUITE_NAME ${name}
|
||||
SOURCES "${sources}"
|
||||
include(GTest)
|
||||
set(_GOOGLETEST_DISCOVER_TESTS_SCRIPT
|
||||
${CMAKE_SOURCE_DIR}/build_files/cmake/Modules/GTestAddTests.cmake
|
||||
)
|
||||
|
||||
gtest_discover_tests(${name}_test
|
||||
DISCOVERY_MODE PRE_TEST
|
||||
WORKING_DIRECTORY "${TEST_INSTALL_DIR}"
|
||||
)
|
||||
endfunction()
|
||||
|
||||
@@ -727,14 +675,14 @@ endmacro()
|
||||
macro(add_c_flag
|
||||
flag)
|
||||
|
||||
string(APPEND CMAKE_C_FLAGS " ${flag}")
|
||||
string(APPEND CMAKE_CXX_FLAGS " ${flag}")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
|
||||
endmacro()
|
||||
|
||||
macro(add_cxx_flag
|
||||
flag)
|
||||
|
||||
string(APPEND CMAKE_CXX_FLAGS " ${flag}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
|
||||
endmacro()
|
||||
|
||||
macro(remove_strict_flags)
|
||||
@@ -1178,7 +1126,6 @@ endfunction()
|
||||
|
||||
function(find_python_package
|
||||
package
|
||||
relative_include_dir
|
||||
)
|
||||
|
||||
string(TOUPPER ${package} _upper_package)
|
||||
@@ -1210,10 +1157,7 @@ function(find_python_package
|
||||
dist-packages
|
||||
vendor-packages
|
||||
NO_DEFAULT_PATH
|
||||
DOC
|
||||
"Path to python site-packages or dist-packages containing '${package}' module"
|
||||
)
|
||||
mark_as_advanced(PYTHON_${_upper_package}_PATH)
|
||||
|
||||
if(NOT EXISTS "${PYTHON_${_upper_package}_PATH}")
|
||||
message(WARNING
|
||||
@@ -1231,50 +1175,6 @@ function(find_python_package
|
||||
set(WITH_PYTHON_INSTALL_${_upper_package} OFF PARENT_SCOPE)
|
||||
else()
|
||||
message(STATUS "${package} found at '${PYTHON_${_upper_package}_PATH}'")
|
||||
|
||||
if(NOT "${relative_include_dir}" STREQUAL "")
|
||||
set(_relative_include_dir "${package}/${relative_include_dir}")
|
||||
unset(PYTHON_${_upper_package}_INCLUDE_DIRS CACHE)
|
||||
find_path(PYTHON_${_upper_package}_INCLUDE_DIRS
|
||||
NAMES
|
||||
"${_relative_include_dir}"
|
||||
HINTS
|
||||
"${PYTHON_LIBPATH}/"
|
||||
"${PYTHON_LIBPATH}/python${PYTHON_VERSION}/"
|
||||
"${PYTHON_LIBPATH}/python${_PY_VER_MAJOR}/"
|
||||
PATH_SUFFIXES
|
||||
"site-packages/"
|
||||
"dist-packages/"
|
||||
"vendor-packages/"
|
||||
NO_DEFAULT_PATH
|
||||
DOC
|
||||
"Path to python site-packages or dist-packages containing '${package}' module header files"
|
||||
)
|
||||
mark_as_advanced(PYTHON_${_upper_package}_INCLUDE_DIRS)
|
||||
|
||||
if(NOT EXISTS "${PYTHON_${_upper_package}_INCLUDE_DIRS}")
|
||||
message(WARNING
|
||||
"Python package '${package}' include dir path could not be found in:\n"
|
||||
"'${PYTHON_LIBPATH}/python${PYTHON_VERSION}/site-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${_PY_VER_MAJOR}/site-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${PYTHON_VERSION}/dist-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${_PY_VER_MAJOR}/dist-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${PYTHON_VERSION}/vendor-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${_PY_VER_MAJOR}/vendor-packages/${_relative_include_dir}', "
|
||||
"\n"
|
||||
"The 'WITH_PYTHON_${_upper_package}' option will be disabled.\n"
|
||||
"The build will be usable, only add-ons that depend on this package won't be functional."
|
||||
)
|
||||
set(WITH_PYTHON_${_upper_package} OFF PARENT_SCOPE)
|
||||
else()
|
||||
set(_temp "${PYTHON_${_upper_package}_INCLUDE_DIRS}/${package}/${relative_include_dir}")
|
||||
unset(PYTHON_${_upper_package}_INCLUDE_DIRS CACHE)
|
||||
set(PYTHON_${_upper_package}_INCLUDE_DIRS "${_temp}"
|
||||
CACHE PATH "Path to the include directory of the ${package} module")
|
||||
|
||||
message(STATUS "${package} include files found at '${PYTHON_${_upper_package}_INCLUDE_DIRS}'")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endfunction()
|
||||
|
@@ -60,19 +60,11 @@ if(WITH_OPENAL)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_JACK)
|
||||
find_library(JACK_FRAMEWORK
|
||||
NAMES jackmp
|
||||
)
|
||||
if(NOT JACK_FRAMEWORK)
|
||||
set(WITH_JACK OFF)
|
||||
else()
|
||||
set(JACK_INCLUDE_DIRS ${JACK_FRAMEWORK}/headers)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED LIBDIR)
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/darwin)
|
||||
# Prefer lib directory paths
|
||||
file(GLOB LIB_SUBDIRS ${LIBDIR}/*)
|
||||
set(CMAKE_PREFIX_PATH ${LIB_SUBDIRS})
|
||||
else()
|
||||
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
|
||||
endif()
|
||||
@@ -80,10 +72,6 @@ if(NOT EXISTS "${LIBDIR}/")
|
||||
message(FATAL_ERROR "Mac OSX requires pre-compiled libs at: '${LIBDIR}'")
|
||||
endif()
|
||||
|
||||
# Prefer lib directory paths
|
||||
file(GLOB LIB_SUBDIRS ${LIBDIR}/*)
|
||||
set(CMAKE_PREFIX_PATH ${LIB_SUBDIRS})
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Find precompiled libraries, and avoid system or user-installed ones.
|
||||
|
||||
@@ -106,6 +94,17 @@ if(WITH_OPENSUBDIV)
|
||||
find_package(OpenSubdiv)
|
||||
endif()
|
||||
|
||||
if(WITH_JACK)
|
||||
find_library(JACK_FRAMEWORK
|
||||
NAMES jackmp
|
||||
)
|
||||
if(NOT JACK_FRAMEWORK)
|
||||
set(WITH_JACK OFF)
|
||||
else()
|
||||
set(JACK_INCLUDE_DIRS ${JACK_FRAMEWORK}/headers)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_CODEC_SNDFILE)
|
||||
find_package(SndFile)
|
||||
find_library(_sndfile_FLAC_LIBRARY NAMES flac HINTS ${LIBDIR}/sndfile/lib)
|
||||
@@ -195,7 +194,7 @@ if(SYSTEMSTUBS_LIBRARY)
|
||||
list(APPEND PLATFORM_LINKLIBS SystemStubs)
|
||||
endif()
|
||||
|
||||
string(APPEND PLATFORM_CFLAGS " -pipe -funsigned-char -fno-strict-aliasing")
|
||||
set(PLATFORM_CFLAGS "${PLATFORM_CFLAGS} -pipe -funsigned-char")
|
||||
set(PLATFORM_LINKFLAGS
|
||||
"-fexceptions -framework CoreServices -framework Foundation -framework IOKit -framework AppKit -framework Cocoa -framework Carbon -framework AudioUnit -framework AudioToolbox -framework CoreAudio -framework Metal -framework QuartzCore"
|
||||
)
|
||||
@@ -203,12 +202,12 @@ set(PLATFORM_LINKFLAGS
|
||||
list(APPEND PLATFORM_LINKLIBS c++)
|
||||
|
||||
if(WITH_JACK)
|
||||
string(APPEND PLATFORM_LINKFLAGS " -F/Library/Frameworks -weak_framework jackmp")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -F/Library/Frameworks -weak_framework jackmp")
|
||||
endif()
|
||||
|
||||
if(WITH_PYTHON_MODULE OR WITH_PYTHON_FRAMEWORK)
|
||||
# force cmake to link right framework
|
||||
string(APPEND PLATFORM_LINKFLAGS " /Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/Python")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} /Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/Python")
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLLADA)
|
||||
@@ -223,7 +222,7 @@ if(WITH_SDL)
|
||||
find_package(SDL2)
|
||||
set(SDL_INCLUDE_DIR ${SDL2_INCLUDE_DIRS})
|
||||
set(SDL_LIBRARY ${SDL2_LIBRARIES})
|
||||
string(APPEND PLATFORM_LINKFLAGS " -framework ForceFeedback")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -framework ForceFeedback")
|
||||
endif()
|
||||
|
||||
set(PNG_ROOT ${LIBDIR}/png)
|
||||
@@ -267,15 +266,7 @@ if(WITH_BOOST)
|
||||
endif()
|
||||
|
||||
if(WITH_INTERNATIONAL OR WITH_CODEC_FFMPEG)
|
||||
string(APPEND PLATFORM_LINKFLAGS " -liconv") # boost_locale and ffmpeg needs it !
|
||||
endif()
|
||||
|
||||
if(WITH_PUGIXML)
|
||||
find_package(PugiXML)
|
||||
if(NOT PUGIXML_FOUND)
|
||||
message(WARNING "PugiXML not found, disabling WITH_PUGIXML")
|
||||
set(WITH_PUGIXML OFF)
|
||||
endif()
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -liconv") # boost_locale and ffmpeg needs it !
|
||||
endif()
|
||||
|
||||
if(WITH_OPENIMAGEIO)
|
||||
@@ -346,7 +337,7 @@ if(WITH_CYCLES_EMBREE)
|
||||
find_package(Embree 3.8.0 REQUIRED)
|
||||
# Increase stack size for Embree, only works for executables.
|
||||
if(NOT WITH_PYTHON_MODULE)
|
||||
string(APPEND PLATFORM_LINKFLAGS " -Wl,-stack_size,0x100000")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -Xlinker -stack_size -Xlinker 0x100000")
|
||||
endif()
|
||||
|
||||
# Embree static library linking can mix up SSE and AVX symbols, causing
|
||||
@@ -390,7 +381,7 @@ if(WITH_OPENMP)
|
||||
set(OPENMP_FOUND ON)
|
||||
set(OpenMP_C_FLAGS "-Xclang -fopenmp -I'${LIBDIR}/openmp/include'")
|
||||
set(OpenMP_CXX_FLAGS "-Xclang -fopenmp -I'${LIBDIR}/openmp/include'")
|
||||
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L'${LIBDIR}/openmp/lib' -lomp")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -L'${LIBDIR}/openmp/lib' -lomp")
|
||||
|
||||
# Copy libomp.dylib to allow executables like datatoc and tests to work.
|
||||
# `@executable_path/../Resources/lib/` is a default dylib search path.
|
||||
@@ -437,50 +428,36 @@ endif()
|
||||
|
||||
set(EXETYPE MACOSX_BUNDLE)
|
||||
|
||||
set(CMAKE_C_FLAGS_DEBUG "-g")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-g")
|
||||
set(CMAKE_C_FLAGS_DEBUG "-fno-strict-aliasing -g")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-fno-strict-aliasing -g")
|
||||
if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" OR CMAKE_OSX_ARCHITECTURES MATCHES "i386")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -mdynamic-no-pic -msse -msse2 -msse3 -mssse3")
|
||||
set(CMAKE_C_FLAGS_RELEASE "-O2 -mdynamic-no-pic -msse -msse2 -msse3 -mssse3")
|
||||
if(NOT CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
string(APPEND CMAKE_C_FLAGS_RELEASE " -ftree-vectorize -fvariable-expansion-in-unroller")
|
||||
string(APPEND CMAKE_CXX_FLAGS_RELEASE " -ftree-vectorize -fvariable-expansion-in-unroller")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -ftree-vectorize -fvariable-expansion-in-unroller")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -ftree-vectorize -fvariable-expansion-in-unroller")
|
||||
endif()
|
||||
else()
|
||||
set(CMAKE_C_FLAGS_RELEASE "-O2 -mdynamic-no-pic")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -mdynamic-no-pic")
|
||||
set(CMAKE_C_FLAGS_RELEASE "-O2 -mdynamic-no-pic -fno-strict-aliasing")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -mdynamic-no-pic -fno-strict-aliasing")
|
||||
endif()
|
||||
|
||||
if(${XCODE_VERSION} VERSION_EQUAL 5 OR ${XCODE_VERSION} VERSION_GREATER 5)
|
||||
# Xcode 5 is always using CLANG, which has too low template depth of 128 for libmv
|
||||
string(APPEND CMAKE_CXX_FLAGS " -ftemplate-depth=1024")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ftemplate-depth=1024")
|
||||
endif()
|
||||
|
||||
# Avoid conflicts with Luxrender, and other plug-ins that may use the same
|
||||
# libraries as Blender with a different version or build options.
|
||||
string(APPEND PLATFORM_LINKFLAGS
|
||||
" -Wl,-unexported_symbols_list,'${CMAKE_SOURCE_DIR}/source/creator/osx_locals.map'"
|
||||
set(PLATFORM_LINKFLAGS
|
||||
"${PLATFORM_LINKFLAGS} -Xlinker -unexported_symbols_list -Xlinker '${CMAKE_SOURCE_DIR}/source/creator/osx_locals.map'"
|
||||
)
|
||||
|
||||
string(APPEND CMAKE_CXX_FLAGS " -stdlib=libc++")
|
||||
string(APPEND PLATFORM_LINKFLAGS " -stdlib=libc++")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -stdlib=libc++")
|
||||
|
||||
# Suppress ranlib "has no symbols" warnings (workaround for T48250)
|
||||
set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_C_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
|
||||
set(CMAKE_CXX_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
|
||||
|
||||
if(WITH_COMPILER_CCACHE)
|
||||
if(NOT CMAKE_GENERATOR STREQUAL "Xcode")
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
if(CCACHE_PROGRAM)
|
||||
# Makefiles and ninja
|
||||
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "" FORCE)
|
||||
else()
|
||||
message(WARNING "Ccache NOT found, disabling WITH_COMPILER_CCACHE")
|
||||
set(WITH_COMPILER_CCACHE OFF)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
@@ -150,36 +150,7 @@ endif()
|
||||
|
||||
if(NOT ${CMAKE_GENERATOR} MATCHES "Xcode")
|
||||
# Force CMAKE_OSX_DEPLOYMENT_TARGET for makefiles, will not work else (CMake bug?)
|
||||
string(APPEND CMAKE_C_FLAGS " -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||
add_definitions("-DMACOSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||
endif()
|
||||
|
||||
if(WITH_COMPILER_CCACHE)
|
||||
if(CMAKE_GENERATOR STREQUAL "Xcode")
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
if(CCACHE_PROGRAM)
|
||||
get_filename_component(ccompiler "${CMAKE_C_COMPILER}" NAME)
|
||||
get_filename_component(cxxcompiler "${CMAKE_CXX_COMPILER}" NAME)
|
||||
# Ccache can figure out which compiler to use if it's invoked from
|
||||
# a symlink with the name of the compiler.
|
||||
# https://ccache.dev/manual/4.1.html#_run_modes
|
||||
set(_fake_compiler_dir "${CMAKE_BINARY_DIR}/ccache")
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${_fake_compiler_dir})
|
||||
set(_fake_C_COMPILER "${_fake_compiler_dir}/${ccompiler}")
|
||||
set(_fake_CXX_COMPILER "${_fake_compiler_dir}/${cxxcompiler}")
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink "${CCACHE_PROGRAM}" ${_fake_C_COMPILER})
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink "${CCACHE_PROGRAM}" ${_fake_CXX_COMPILER})
|
||||
set(CMAKE_XCODE_ATTRIBUTE_CC ${_fake_C_COMPILER} CACHE STRING "" FORCE)
|
||||
set(CMAKE_XCODE_ATTRIBUTE_CXX ${_fake_CXX_COMPILER} CACHE STRING "" FORCE)
|
||||
set(CMAKE_XCODE_ATTRIBUTE_LD ${_fake_C_COMPILER} CACHE STRING "" FORCE)
|
||||
set(CMAKE_XCODE_ATTRIBUTE_LDPLUSPLUS ${_fake_CXX_COMPILER} CACHE STRING "" FORCE)
|
||||
unset(_fake_compiler_dir)
|
||||
unset(_fake_C_COMPILER)
|
||||
unset(_fake_CXX_COMPILER)
|
||||
else()
|
||||
message(WARNING "Ccache NOT found, disabling WITH_COMPILER_CCACHE")
|
||||
set(WITH_COMPILER_CCACHE OFF)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
@@ -73,7 +73,7 @@ if(EXISTS ${LIBDIR})
|
||||
endif()
|
||||
|
||||
if(WITH_STATIC_LIBS)
|
||||
string(APPEND CMAKE_EXE_LINKER_FLAGS " -static-libstdc++")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++")
|
||||
endif()
|
||||
|
||||
# Wrapper to prefer static libraries
|
||||
@@ -350,12 +350,15 @@ if(WITH_BOOST)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_PUGIXML)
|
||||
find_package_wrapper(PugiXML)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENIMAGEIO)
|
||||
find_package_wrapper(OpenImageIO)
|
||||
if(NOT OPENIMAGEIO_PUGIXML_FOUND AND WITH_CYCLES_STANDALONE)
|
||||
find_package_wrapper(PugiXML)
|
||||
else()
|
||||
set(PUGIXML_INCLUDE_DIR "${OPENIMAGEIO_INCLUDE_DIR/OpenImageIO}")
|
||||
set(PUGIXML_LIBRARIES "")
|
||||
endif()
|
||||
|
||||
set(OPENIMAGEIO_LIBRARIES
|
||||
${OPENIMAGEIO_LIBRARIES}
|
||||
${PNG_LIBRARIES}
|
||||
@@ -617,7 +620,7 @@ if(CMAKE_COMPILER_IS_GNUCC)
|
||||
set(CMAKE_C_FLAGS_RELEASE "${GCC_EXTRA_FLAGS_RELEASE} ${CMAKE_C_FLAGS_RELEASE}")
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${GCC_EXTRA_FLAGS_RELEASE} ${CMAKE_C_FLAGS_RELWITHDEBINFO}")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${GCC_EXTRA_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_RELEASE}")
|
||||
string(PREPEND CMAKE_CXX_FLAGS_RELWITHDEBINFO "${GCC_EXTRA_FLAGS_RELEASE} ")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${GCC_EXTRA_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}")
|
||||
unset(GCC_EXTRA_FLAGS_RELEASE)
|
||||
|
||||
if(WITH_LINKER_GOLD)
|
||||
@@ -625,8 +628,8 @@ if(CMAKE_COMPILER_IS_GNUCC)
|
||||
COMMAND ${CMAKE_C_COMPILER} -fuse-ld=gold -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if("${LD_VERSION}" MATCHES "GNU gold")
|
||||
string(APPEND CMAKE_C_FLAGS " -fuse-ld=gold")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -fuse-ld=gold")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fuse-ld=gold")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fuse-ld=gold")
|
||||
else()
|
||||
message(STATUS "GNU gold linker isn't available, using the default system linker.")
|
||||
endif()
|
||||
@@ -638,8 +641,8 @@ if(CMAKE_COMPILER_IS_GNUCC)
|
||||
COMMAND ${CMAKE_C_COMPILER} -fuse-ld=lld -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if("${LD_VERSION}" MATCHES "LLD")
|
||||
string(APPEND CMAKE_C_FLAGS " -fuse-ld=lld")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -fuse-ld=lld")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fuse-ld=lld")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fuse-ld=lld")
|
||||
else()
|
||||
message(STATUS "LLD linker isn't available, using the default system linker.")
|
||||
endif()
|
||||
@@ -664,12 +667,12 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
|
||||
endif()
|
||||
mark_as_advanced(XILD)
|
||||
|
||||
string(APPEND CMAKE_C_FLAGS " -fp-model precise -prec_div -parallel")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -fp-model precise -prec_div -parallel")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fp-model precise -prec_div -parallel")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fp-model precise -prec_div -parallel")
|
||||
|
||||
# string(APPEND PLATFORM_CFLAGS " -diag-enable sc3")
|
||||
# set(PLATFORM_CFLAGS "${PLATFORM_CFLAGS} -diag-enable sc3")
|
||||
set(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing")
|
||||
string(APPEND PLATFORM_LINKFLAGS " -static-intel")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -static-intel")
|
||||
endif()
|
||||
|
||||
# Avoid conflicts with Mesa llvmpipe, Luxrender, and other plug-ins that may
|
||||
@@ -682,17 +685,5 @@ set(PLATFORM_LINKFLAGS
|
||||
# browsers can't properly detect blender as an executable then. Still enabled
|
||||
# for non-portable installs as typically used by Linux distributions.
|
||||
if(WITH_INSTALL_PORTABLE)
|
||||
string(APPEND CMAKE_EXE_LINKER_FLAGS " -no-pie")
|
||||
endif()
|
||||
|
||||
if(WITH_COMPILER_CCACHE)
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
if(CCACHE_PROGRAM)
|
||||
# Makefiles and ninja
|
||||
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "" FORCE)
|
||||
else()
|
||||
message(WARNING "Ccache NOT found, disabling WITH_COMPILER_CCACHE")
|
||||
set(WITH_COMPILER_CCACHE OFF)
|
||||
endif()
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie")
|
||||
endif()
|
||||
|
@@ -49,7 +49,7 @@ if(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
if(NOT EXISTS "${CLANG_OPENMP_DLL}")
|
||||
message(FATAL_ERROR "Clang OpenMP library (${CLANG_OPENMP_DLL}) not found.")
|
||||
endif()
|
||||
string(APPEND CMAKE_EXE_LINKER_FLAGS " \"${CLANG_OPENMP_LIB}\"")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} \"${CLANG_OPENMP_LIB}\"")
|
||||
endif()
|
||||
if(WITH_WINDOWS_STRIPPED_PDB)
|
||||
message(WARNING "stripped pdb not supported with clang, disabling..")
|
||||
@@ -112,9 +112,9 @@ unset(_min_ver)
|
||||
|
||||
# needed for some MSVC installations
|
||||
# 4099 : PDB 'filename' was not found with 'object/library'
|
||||
string(APPEND CMAKE_EXE_LINKER_FLAGS " /SAFESEH:NO /ignore:4099")
|
||||
string(APPEND CMAKE_SHARED_LINKER_FLAGS " /SAFESEH:NO /ignore:4099")
|
||||
string(APPEND CMAKE_MODULE_LINKER_FLAGS " /SAFESEH:NO /ignore:4099")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /SAFESEH:NO /ignore:4099")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SAFESEH:NO /ignore:4099")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /SAFESEH:NO /ignore:4099")
|
||||
|
||||
list(APPEND PLATFORM_LINKLIBS
|
||||
ws2_32 vfw32 winmm kernel32 user32 gdi32 comdlg32 Comctl32 version
|
||||
@@ -154,18 +154,18 @@ if(WITH_WINDOWS_PDB)
|
||||
endif()
|
||||
|
||||
if(MSVC_CLANG) # Clangs version of cl doesn't support all flags
|
||||
string(APPEND CMAKE_CXX_FLAGS " ${CXX_WARN_FLAGS} /nologo /J /Gd /EHsc -Wno-unused-command-line-argument -Wno-microsoft-enum-forward-reference ")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX_WARN_FLAGS} /nologo /J /Gd /EHsc -Wno-unused-command-line-argument -Wno-microsoft-enum-forward-reference ")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /nologo /J /Gd -Wno-unused-command-line-argument -Wno-microsoft-enum-forward-reference")
|
||||
else()
|
||||
string(APPEND CMAKE_CXX_FLAGS " /nologo /J /Gd /MP /EHsc /bigobj")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /nologo /J /Gd /MP /EHsc /bigobj")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /nologo /J /Gd /MP /bigobj")
|
||||
endif()
|
||||
|
||||
# C++ standards conformace (/permissive-) is available on msvc 15.5 (1912) and up
|
||||
if(MSVC_VERSION GREATER 1911 AND NOT MSVC_CLANG)
|
||||
string(APPEND CMAKE_CXX_FLAGS " /permissive-")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /permissive-")
|
||||
# Two-phase name lookup does not place nicely with OpenMP yet, so disable for now
|
||||
string(APPEND CMAKE_CXX_FLAGS " /Zc:twoPhase-")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zc:twoPhase-")
|
||||
endif()
|
||||
|
||||
if(WITH_WINDOWS_SCCACHE AND CMAKE_VS_MSBUILD_COMMAND)
|
||||
@@ -183,33 +183,33 @@ else()
|
||||
set(SYMBOL_FORMAT /ZI)
|
||||
endif()
|
||||
|
||||
string(APPEND CMAKE_CXX_FLAGS_DEBUG " /MDd ${SYMBOL_FORMAT}")
|
||||
string(APPEND CMAKE_C_FLAGS_DEBUG " /MDd ${SYMBOL_FORMAT}")
|
||||
string(APPEND CMAKE_CXX_FLAGS_RELEASE " /MD ${PDB_INFO_OVERRIDE_FLAGS}")
|
||||
string(APPEND CMAKE_C_FLAGS_RELEASE " /MD ${PDB_INFO_OVERRIDE_FLAGS}")
|
||||
string(APPEND CMAKE_CXX_FLAGS_MINSIZEREL " /MD ${PDB_INFO_OVERRIDE_FLAGS}")
|
||||
string(APPEND CMAKE_C_FLAGS_MINSIZEREL " /MD ${PDB_INFO_OVERRIDE_FLAGS}")
|
||||
string(APPEND CMAKE_CXX_FLAGS_RELWITHDEBINFO " /MD ${SYMBOL_FORMAT}")
|
||||
string(APPEND CMAKE_C_FLAGS_RELWITHDEBINFO " /MD ${SYMBOL_FORMAT}")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd ${SYMBOL_FORMAT}")
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /MDd ${SYMBOL_FORMAT}")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MD ${PDB_INFO_OVERRIDE_FLAGS}")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /MD ${PDB_INFO_OVERRIDE_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /MD ${PDB_INFO_OVERRIDE_FLAGS}")
|
||||
set(CMAKE_C_FLAGS_MINSIZEREL "${CMAKE_C_FLAGS_MINSIZEREL} /MD ${PDB_INFO_OVERRIDE_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /MD ${SYMBOL_FORMAT}")
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} /MD ${SYMBOL_FORMAT}")
|
||||
unset(SYMBOL_FORMAT)
|
||||
# JMC is available on msvc 15.8 (1915) and up
|
||||
if(MSVC_VERSION GREATER 1914 AND NOT MSVC_CLANG)
|
||||
string(APPEND CMAKE_CXX_FLAGS_DEBUG " /JMC")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /JMC")
|
||||
endif()
|
||||
|
||||
string(APPEND PLATFORM_LINKFLAGS " /SUBSYSTEM:CONSOLE /STACK:2097152")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} /SUBSYSTEM:CONSOLE /STACK:2097152")
|
||||
set(PLATFORM_LINKFLAGS_RELEASE "/NODEFAULTLIB:libcmt.lib /NODEFAULTLIB:libcmtd.lib /NODEFAULTLIB:msvcrtd.lib")
|
||||
string(APPEND PLATFORM_LINKFLAGS_DEBUG " /IGNORE:4099 /NODEFAULTLIB:libcmt.lib /NODEFAULTLIB:msvcrt.lib /NODEFAULTLIB:libcmtd.lib")
|
||||
set(PLATFORM_LINKFLAGS_DEBUG "${PLATFORM_LINKFLAGS_DEBUG} /IGNORE:4099 /NODEFAULTLIB:libcmt.lib /NODEFAULTLIB:msvcrt.lib /NODEFAULTLIB:libcmtd.lib")
|
||||
|
||||
# Ignore meaningless for us linker warnings.
|
||||
string(APPEND PLATFORM_LINKFLAGS " /ignore:4049 /ignore:4217 /ignore:4221")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} /ignore:4049 /ignore:4217 /ignore:4221")
|
||||
set(PLATFORM_LINKFLAGS_RELEASE "${PLATFORM_LINKFLAGS} ${PDB_INFO_OVERRIDE_LINKER_FLAGS}")
|
||||
string(APPEND CMAKE_STATIC_LINKER_FLAGS " /ignore:4221")
|
||||
set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /ignore:4221")
|
||||
|
||||
if(CMAKE_CL_64)
|
||||
string(PREPEND PLATFORM_LINKFLAGS "/MACHINE:X64 ")
|
||||
set(PLATFORM_LINKFLAGS "/MACHINE:X64 ${PLATFORM_LINKFLAGS}")
|
||||
else()
|
||||
string(PREPEND PLATFORM_LINKFLAGS "/MACHINE:IX86 /LARGEADDRESSAWARE ")
|
||||
set(PLATFORM_LINKFLAGS "/MACHINE:IX86 /LARGEADDRESSAWARE ${PLATFORM_LINKFLAGS}")
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED LIBDIR)
|
||||
@@ -239,24 +239,9 @@ if(NOT EXISTS "${LIBDIR}/")
|
||||
message(FATAL_ERROR "\n\nWindows requires pre-compiled libs at: '${LIBDIR}'. Please run `make update` in the blender source folder to obtain them.")
|
||||
endif()
|
||||
|
||||
if(CMAKE_GENERATOR MATCHES "^Visual Studio.+" AND # Only supported in the VS IDE
|
||||
MSVC_VERSION GREATER_EQUAL 1924 AND # Supported for 16.4+
|
||||
WITH_CLANG_TIDY # And Clang Tidy needs to be on
|
||||
)
|
||||
set(CMAKE_VS_GLOBALS
|
||||
"RunCodeAnalysis=false"
|
||||
"EnableMicrosoftCodeAnalysis=false"
|
||||
"EnableClangTidyCodeAnalysis=true"
|
||||
)
|
||||
set(VS_CLANG_TIDY On)
|
||||
endif()
|
||||
|
||||
# Mark libdir as system headers with a lower warn level, to resolve some warnings
|
||||
# that we have very little control over
|
||||
if(MSVC_VERSION GREATER_EQUAL 1914 AND # Available with 15.7+
|
||||
NOT MSVC_CLANG AND # But not for clang
|
||||
NOT WITH_WINDOWS_SCCACHE AND # And not when sccache is enabled
|
||||
NOT VS_CLANG_TIDY) # Clang-tidy does not like these options
|
||||
if(MSVC_VERSION GREATER_EQUAL 1914 AND NOT MSVC_CLANG AND NOT WITH_WINDOWS_SCCACHE)
|
||||
add_compile_options(/experimental:external /external:templates- /external:I "${LIBDIR}" /external:W0)
|
||||
endif()
|
||||
|
||||
@@ -268,11 +253,6 @@ foreach(child ${children})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
if(WITH_PUGIXML)
|
||||
set(PUGIXML_LIBRARIES optimized ${LIBDIR}/pugixml/lib/pugixml.lib debug ${LIBDIR}/pugixml/lib/pugixml_d.lib)
|
||||
set(PUGIXML_INCLUDE_DIR ${LIBDIR}/pugixml/include)
|
||||
endif()
|
||||
|
||||
set(ZLIB_INCLUDE_DIRS ${LIBDIR}/zlib/include)
|
||||
set(ZLIB_LIBRARIES ${LIBDIR}/zlib/lib/libz_st.lib)
|
||||
set(ZLIB_INCLUDE_DIR ${LIBDIR}/zlib/include)
|
||||
@@ -671,10 +651,11 @@ if(WITH_CYCLES_OSL)
|
||||
optimized ${OSL_LIB_COMP}
|
||||
optimized ${OSL_LIB_EXEC}
|
||||
optimized ${OSL_LIB_QUERY}
|
||||
optimized ${CYCLES_OSL}/lib/pugixml.lib
|
||||
debug ${OSL_LIB_EXEC_DEBUG}
|
||||
debug ${OSL_LIB_COMP_DEBUG}
|
||||
debug ${OSL_LIB_QUERY_DEBUG}
|
||||
${PUGIXML_LIBRARIES}
|
||||
debug ${CYCLES_OSL}/lib/pugixml_d.lib
|
||||
)
|
||||
find_path(OSL_INCLUDE_DIR OSL/oslclosure.h PATHS ${CYCLES_OSL}/include)
|
||||
find_program(OSL_COMPILER NAMES oslc PATHS ${CYCLES_OSL}/bin)
|
||||
@@ -739,7 +720,7 @@ if(WINDOWS_PYTHON_DEBUG)
|
||||
string(REPLACE "/" "\\" _group_path "${_source_path}")
|
||||
source_group("${_group_path}" FILES "${_source}")
|
||||
endforeach()
|
||||
|
||||
|
||||
# If the user scripts env var is set, include scripts from there otherwise
|
||||
# include user scripts in the profile folder.
|
||||
if(DEFINED ENV{BLENDER_USER_SCRIPTS})
|
||||
@@ -750,7 +731,7 @@ if(WINDOWS_PYTHON_DEBUG)
|
||||
# Include the user scripts from the profile folder in the blender_python_user_scripts project.
|
||||
set(USER_SCRIPTS_ROOT "$ENV{appdata}/blender foundation/blender/${BLENDER_VERSION}/scripts")
|
||||
endif()
|
||||
|
||||
|
||||
file(TO_CMAKE_PATH ${USER_SCRIPTS_ROOT} USER_SCRIPTS_ROOT)
|
||||
FILE(GLOB_RECURSE inFiles "${USER_SCRIPTS_ROOT}/*.*" )
|
||||
ADD_CUSTOM_TARGET(blender_python_user_scripts SOURCES ${inFiles})
|
||||
|
@@ -31,7 +31,7 @@ if(WITH_WINDOWS_BUNDLE_CRT)
|
||||
foreach(lib ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS})
|
||||
get_filename_component(filename ${lib} NAME)
|
||||
file(SHA1 "${lib}" sha1_file)
|
||||
string(APPEND CRTLIBS " <file name=\"${filename}\" hash=\"${sha1_file}\" hashalg=\"SHA1\" />\n")
|
||||
set(CRTLIBS "${CRTLIBS} <file name=\"${filename}\" hash=\"${sha1_file}\" hashalg=\"SHA1\" />\n")
|
||||
endforeach()
|
||||
configure_file(${CMAKE_SOURCE_DIR}/release/windows/manifest/blender.crt.manifest.in ${CMAKE_CURRENT_BINARY_DIR}/blender.crt.manifest @ONLY)
|
||||
file(TOUCH ${manifest_trigger_file})
|
||||
|
@@ -92,5 +92,5 @@ echo if "%%VSCMD_VER%%" == "" ^( >> %BUILD_DIR%\rebuild.cmd
|
||||
echo call "%VCVARS%" %BUILD_ARCH% >> %BUILD_DIR%\rebuild.cmd
|
||||
echo ^) >> %BUILD_DIR%\rebuild.cmd
|
||||
echo echo %%TIME%% ^> buildtime.txt >> %BUILD_DIR%\rebuild.cmd
|
||||
echo ninja install %%* >> %BUILD_DIR%\rebuild.cmd
|
||||
echo ninja install >> %BUILD_DIR%\rebuild.cmd
|
||||
echo echo %%TIME%% ^>^> buildtime.txt >> %BUILD_DIR%\rebuild.cmd
|
@@ -453,7 +453,7 @@ TYPEDEF_HIDES_STRUCT = NO
|
||||
# the optimal cache size from a speed point of view.
|
||||
# Minimum value: 0, maximum value: 9, default value: 0.
|
||||
|
||||
LOOKUP_CACHE_SIZE = 3
|
||||
LOOKUP_CACHE_SIZE = 0
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Build related configuration options
|
||||
@@ -1321,7 +1321,7 @@ DOCSET_PUBLISHER_NAME = Publisher
|
||||
# The default value is: NO.
|
||||
# This tag requires that the tag GENERATE_HTML is set to YES.
|
||||
|
||||
GENERATE_HTMLHELP = NO
|
||||
GENERATE_HTMLHELP = YES
|
||||
|
||||
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
|
||||
# file. You can add a path in front of the file if the result should not be
|
||||
|
@@ -121,10 +121,6 @@
|
||||
* \ingroup editors
|
||||
*/
|
||||
|
||||
/** \defgroup edasset asset
|
||||
* \ingroup editors
|
||||
*/
|
||||
|
||||
/** \defgroup edcurve curve
|
||||
* \ingroup editors
|
||||
*/
|
||||
|
@@ -1,30 +0,0 @@
|
||||
"""
|
||||
.. note::
|
||||
|
||||
Properties defined at run-time store the values of the properties as custom-properties.
|
||||
|
||||
This method checks if the underlying data exists, causing the property to be considered *set*.
|
||||
|
||||
A common pattern for operators is to calculate a value for the properties
|
||||
that have not had their values explicitly set by the caller
|
||||
(where the caller could be a key-binding, menu-items or Python script for example).
|
||||
|
||||
In the case of executing operators multiple times, values are re-used from the previous execution.
|
||||
|
||||
For example: subdividing a mesh with a smooth value of 1.0 will keep using
|
||||
that value on subsequent calls to subdivision, unless the operator is called with
|
||||
that property set to a different value.
|
||||
|
||||
This behavior can be disabled using the ``SKIP_SAVE`` option when the property is declared (see: :mod:`bpy.props`).
|
||||
|
||||
The ``ghost`` argument allows detecting how a value from a previous execution is handled.
|
||||
|
||||
- When true: The property is considered unset even if the value from a previous call is used.
|
||||
- When false: The existence of any values causes ``is_property_set`` to return true.
|
||||
|
||||
While this argument should typically be omitted, there are times when
|
||||
it's important to know if a value is anything besides the default.
|
||||
|
||||
For example, the previous value may have been scaled by the scene's unit scale.
|
||||
In this case scaling the value multiple times would cause problems, so the ``ghost`` argument should be false.
|
||||
"""
|
@@ -163,13 +163,13 @@ Now in the button's context menu select *Copy Data Path*, then paste the result
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.context.active_object.modifiers["Subdivision"].levels
|
||||
bpy.context.active_object.modifiers["Subsurf"].levels
|
||||
|
||||
Press :kbd:`Return` and you'll get the current value of 1. Now try changing the value to 2:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.context.active_object.modifiers["Subdivision"].levels = 2
|
||||
bpy.context.active_object.modifiers["Subsurf"].levels = 2
|
||||
|
||||
You can see the value update in the Subdivision Surface modifier's UI as well as the cube.
|
||||
|
||||
@@ -185,31 +185,43 @@ For example, if you want to access the texture of a brush via Python to adjust i
|
||||
#. Start in the default scene and enable Sculpt Mode from the 3D Viewport header.
|
||||
#. From the Sidebar expand the Brush Settings panel's *Texture* subpanel and add a new texture.
|
||||
*Notice the texture data-block menu itself doesn't have very useful links (you can check the tooltips).*
|
||||
#. The contrast setting isn't exposed in the Sidebar, so view the texture in the
|
||||
:ref:`Properties Editor <blender_manual:bpy.types.Texture.contrast`
|
||||
#. The contrast setting isn't exposed in the Sidebar, so view the texture in the properties editor:
|
||||
|
||||
- In the properties editor select the Texture tab.
|
||||
- Select brush texture.
|
||||
- Expand the *Colors* panel to locate the *Contrast* number field.
|
||||
#. Open the context menu of the contrast field and select *Online Python Reference*.
|
||||
This takes you to ``bpy.types.Texture.contrast``. Now you can see that ``contrast`` is a property of texture.
|
||||
#. To find out how to access the texture from the brush check on the references at the bottom of the page.
|
||||
Sometimes there are many references, and it may take some guesswork to find the right one,
|
||||
but in this case it's ``tool_settings.sculpt.brush.texture``.
|
||||
but in this case it's ``Brush.texture``.
|
||||
|
||||
#. Now you know that the texture can be accessed from ``bpy.data.brushes["BrushName"].texture``
|
||||
but normally you *won't* want to access the brush by name, instead you want to access the active brush.
|
||||
So the next step is to check on where brushes are accessed from via the references.
|
||||
In this case there it is simply ``bpy.context.brush``.
|
||||
|
||||
Now you can use the Python console to form the nested properties needed to access brush textures contrast:
|
||||
:menuselection:`Context --> Tool Settings --> Sculpt --> Brush --> Texture --> Contrast`.
|
||||
*Context -> Brush -> Texture -> Contrast*.
|
||||
|
||||
Since the attribute for each is given along the way you can compose the data path in the Python console:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.context.tool_settings.sculpt.brush.texture.contrast
|
||||
bpy.context.brush.texture.contrast
|
||||
|
||||
There can be multiple ways to access the same data, which you choose often depends on the task.
|
||||
An alternate path to access the same setting is:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.context.sculpt.brush.texture.contrast
|
||||
|
||||
Or access the brush directly:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.data.textures["Texture"].contrast
|
||||
bpy.data.brushes["BrushName"].texture.contrast
|
||||
|
||||
|
||||
If you are writing a user tool normally you want to use the :mod:`bpy.context` since the user normally expects
|
||||
|
@@ -35,13 +35,12 @@ but not to fully cover each topic.
|
||||
|
||||
A quick list of helpful things to know before starting:
|
||||
|
||||
- Enable :ref:`Developer Extra <blender_manual:prefs-interface-dev-extras`
|
||||
and :ref:`Python Tooltips <blender_manual:prefs-interface-tooltips-python>`.
|
||||
- The :ref:`Python Console <blender_manual:bpy.types.SpaceConsole>`
|
||||
is great for testing one-liners; it has autocompletion so you can inspect the API quickly.
|
||||
- Button tooltips show Python attributes and operator names (when enabled see above).
|
||||
- The context menu of buttons directly links to this API documentation (when enabled see above).
|
||||
- Many python examples can be found in the text editor's template menu.
|
||||
- Blender uses Python 3.x; some online documentation still assumes version 2.x.
|
||||
- The interactive console is great for testing one-liners.
|
||||
It also has autocompletion so you can inspect the API quickly.
|
||||
- Button tooltips show Python attributes and operator names.
|
||||
- The context menu of buttons directly links to this API documentation.
|
||||
- More operator examples can be found in the text editor's template menu.
|
||||
- To examine further scripts distributed with Blender, see:
|
||||
|
||||
- ``scripts/startup/bl_ui`` for the user interface.
|
||||
@@ -238,7 +237,7 @@ Examples:
|
||||
{'FINISHED'}
|
||||
>>> bpy.ops.mesh.hide(unselected=False)
|
||||
{'FINISHED'}
|
||||
>>> bpy.ops.object.transform_apply()
|
||||
>>> bpy.ops.object.scale_apply()
|
||||
{'FINISHED'}
|
||||
|
||||
.. tip::
|
||||
|
@@ -24,9 +24,10 @@ The three main use cases for the terminal are:
|
||||
- If the script runs for too long or you accidentally enter an infinite loop,
|
||||
:kbd:`Ctrl-C` in the terminal (:kbd:`Ctrl-Break` on Windows) will quit the script early.
|
||||
|
||||
.. seealso::
|
||||
.. note::
|
||||
|
||||
:ref:`blender_manual:command_line-launch-index`.
|
||||
For Linux and macOS users this means starting the terminal first, then running Blender from within it.
|
||||
On Windows the terminal can be enabled from the Help menu.
|
||||
|
||||
|
||||
Interface Tricks
|
||||
|
@@ -551,7 +551,7 @@ def example_extract_docstring(filepath):
|
||||
file.close()
|
||||
return "", 0
|
||||
|
||||
for line in file:
|
||||
for line in file.readlines():
|
||||
line_no += 1
|
||||
if line.startswith('"""'):
|
||||
break
|
||||
@@ -559,13 +559,6 @@ def example_extract_docstring(filepath):
|
||||
text.append(line.rstrip())
|
||||
|
||||
line_no += 1
|
||||
|
||||
# Skip over blank lines so the Python code doesn't have blank lines at the top.
|
||||
for line in file:
|
||||
if line.strip():
|
||||
break
|
||||
line_no += 1
|
||||
|
||||
file.close()
|
||||
return "\n".join(text), line_no
|
||||
|
||||
@@ -2195,20 +2188,9 @@ def setup_blender():
|
||||
|
||||
# Remove handlers since the functions get included
|
||||
# in the doc-string and don't have meaningful names.
|
||||
lists_to_restore = []
|
||||
for var in bpy.app.handlers:
|
||||
if isinstance(var, list):
|
||||
lists_to_restore.append((var[:], var))
|
||||
var.clear()
|
||||
|
||||
return {
|
||||
"lists_to_restore": lists_to_restore,
|
||||
}
|
||||
|
||||
|
||||
def teardown_blender(setup_data):
|
||||
for var_src, var_dst in setup_data["lists_to_restore"]:
|
||||
var_dst[:] = var_src
|
||||
for ls in bpy.app.handlers:
|
||||
if isinstance(ls, list):
|
||||
ls.clear()
|
||||
|
||||
|
||||
def main():
|
||||
@@ -2217,7 +2199,7 @@ def main():
|
||||
setup_monkey_patch()
|
||||
|
||||
# Perform changes to Blender it's self.
|
||||
setup_data = setup_blender()
|
||||
setup_blender()
|
||||
|
||||
# eventually, create the dirs
|
||||
for dir_path in [ARGS.output_dir, SPHINX_IN]:
|
||||
@@ -2323,8 +2305,6 @@ def main():
|
||||
shutil.copy(os.path.join(SPHINX_OUT_PDF, "contents.pdf"),
|
||||
os.path.join(REFERENCE_PATH, BLENDER_PDF_FILENAME))
|
||||
|
||||
teardown_blender(setup_data)
|
||||
|
||||
sys.exit()
|
||||
|
||||
|
||||
|
@@ -1,5 +1,7 @@
|
||||
/* T76453: Prevent Long enum lists */
|
||||
.field-list li {
|
||||
/* Prevent Long enum lists */
|
||||
.field-body {
|
||||
display: block;
|
||||
width: 100%;
|
||||
max-height: 245px;
|
||||
overflow-y: auto !important;
|
||||
}
|
||||
|
4
extern/README
vendored
4
extern/README
vendored
@@ -1,4 +0,0 @@
|
||||
When updating a library remember to:
|
||||
|
||||
* Update the README.blender with the corresponding version.
|
||||
* Update the THIRD-PARTY-LICENSE.txt document
|
5
extern/audaspace/README.blender
vendored
5
extern/audaspace/README.blender
vendored
@@ -1,5 +0,0 @@
|
||||
Project: Audaspace
|
||||
URL: https://audaspace.github.io/
|
||||
License: Apache 2.0
|
||||
Upstream version: 1.3 (Last Release)
|
||||
Local modifications: None
|
2
extern/audaspace/blender_config.cmake
vendored
2
extern/audaspace/blender_config.cmake
vendored
@@ -24,6 +24,6 @@ set(JACK_FOUND ${WITH_JACK})
|
||||
set(LIBSNDFILE_FOUND ${WITH_CODEC_SNDFILE})
|
||||
set(OPENAL_FOUND ${WITH_OPENAL})
|
||||
set(PYTHONLIBS_FOUND TRUE)
|
||||
set(NUMPY_FOUND ${WITH_PYTHON_NUMPY})
|
||||
set(NUMPY_FOUND TRUE)
|
||||
set(NUMPY_INCLUDE_DIRS ${PYTHON_NUMPY_INCLUDE_DIRS})
|
||||
set(SDL_FOUND ${WITH_SDL})
|
||||
|
@@ -72,9 +72,6 @@ protected:
|
||||
/// The channel mapper reader in between.
|
||||
std::shared_ptr<ChannelMapperReader> m_mapper;
|
||||
|
||||
/// Whether the source is being read for the first time.
|
||||
bool m_first_reading;
|
||||
|
||||
/// Whether to keep the source if end of it is reached.
|
||||
bool m_keep;
|
||||
|
||||
|
19
extern/audaspace/src/devices/SoftwareDevice.cpp
vendored
19
extern/audaspace/src/devices/SoftwareDevice.cpp
vendored
@@ -78,7 +78,7 @@ bool SoftwareDevice::SoftwareHandle::pause(bool keep)
|
||||
}
|
||||
|
||||
SoftwareDevice::SoftwareHandle::SoftwareHandle(SoftwareDevice* device, std::shared_ptr<IReader> reader, std::shared_ptr<PitchReader> pitch, std::shared_ptr<ResampleReader> resampler, std::shared_ptr<ChannelMapperReader> mapper, bool keep) :
|
||||
m_reader(reader), m_pitch(pitch), m_resampler(resampler), m_mapper(mapper), m_first_reading(true), m_keep(keep), m_user_pitch(1.0f), m_user_volume(1.0f), m_user_pan(0.0f), m_volume(0.0f), m_old_volume(0.0f), m_loopcount(0),
|
||||
m_reader(reader), m_pitch(pitch), m_resampler(resampler), m_mapper(mapper), m_keep(keep), m_user_pitch(1.0f), m_user_volume(1.0f), m_user_pan(0.0f), m_volume(0.0f), m_old_volume(0.0f), m_loopcount(0),
|
||||
m_relative(true), m_volume_max(1.0f), m_volume_min(0), m_distance_max(std::numeric_limits<float>::max()),
|
||||
m_distance_reference(1.0f), m_attenuation(1.0f), m_cone_angle_outer(M_PI), m_cone_angle_inner(M_PI), m_cone_volume_outer(0),
|
||||
m_flags(RENDER_CONE), m_stop(nullptr), m_stop_data(nullptr), m_status(STATUS_PLAYING), m_device(device)
|
||||
@@ -106,14 +106,6 @@ void SoftwareDevice::SoftwareHandle::update()
|
||||
if(m_pitch->getSpecs().channels != CHANNELS_MONO)
|
||||
{
|
||||
m_volume = m_user_volume;
|
||||
|
||||
// we don't know a previous volume if this source has never been read before
|
||||
if(m_first_reading)
|
||||
{
|
||||
m_old_volume = m_volume;
|
||||
m_first_reading = false;
|
||||
}
|
||||
|
||||
m_pitch->setPitch(m_user_pitch);
|
||||
return;
|
||||
}
|
||||
@@ -222,13 +214,6 @@ void SoftwareDevice::SoftwareHandle::update()
|
||||
m_volume *= m_user_volume;
|
||||
}
|
||||
|
||||
// we don't know a previous volume if this source has never been read before
|
||||
if(m_first_reading)
|
||||
{
|
||||
m_old_volume = m_volume;
|
||||
m_first_reading = false;
|
||||
}
|
||||
|
||||
// 3D Cue
|
||||
|
||||
Quaternion orientation;
|
||||
@@ -769,8 +754,6 @@ void SoftwareDevice::mix(data_t* buffer, int length)
|
||||
{
|
||||
m_mixer->mix(buf, pos, len, sound->m_volume, sound->m_old_volume);
|
||||
|
||||
sound->m_old_volume = sound->m_volume;
|
||||
|
||||
pos += len;
|
||||
|
||||
if(sound->m_loopcount > 0)
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include <mutex>
|
||||
|
||||
#define KEEP_TIME 10
|
||||
#define POSITION_EPSILON (1.0 / static_cast<double>(RATE_48000))
|
||||
|
||||
AUD_NAMESPACE_BEGIN
|
||||
|
||||
@@ -65,7 +64,7 @@ bool SequenceHandle::updatePosition(double position)
|
||||
if(m_handle.get())
|
||||
{
|
||||
// we currently have a handle, let's check where we are
|
||||
if(position - POSITION_EPSILON >= m_entry->m_end)
|
||||
if(position >= m_entry->m_end)
|
||||
{
|
||||
if(position >= m_entry->m_end + KEEP_TIME)
|
||||
// far end, stopping
|
||||
@@ -77,7 +76,7 @@ bool SequenceHandle::updatePosition(double position)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else if(position + POSITION_EPSILON >= m_entry->m_begin)
|
||||
else if(position >= m_entry->m_begin)
|
||||
{
|
||||
// inside, resuming
|
||||
m_handle->resume();
|
||||
@@ -99,7 +98,7 @@ bool SequenceHandle::updatePosition(double position)
|
||||
else
|
||||
{
|
||||
// we don't have a handle, let's start if we should be playing
|
||||
if(position + POSITION_EPSILON >= m_entry->m_begin && position - POSITION_EPSILON <= m_entry->m_end)
|
||||
if(position >= m_entry->m_begin && position <= m_entry->m_end)
|
||||
{
|
||||
start();
|
||||
return m_valid;
|
||||
|
2
extern/bullet2/CMakeLists.txt
vendored
2
extern/bullet2/CMakeLists.txt
vendored
@@ -423,7 +423,7 @@ set(LIB
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
# needed for gcc 4.6+
|
||||
string(APPEND CMAKE_CXX_FLAGS " -fpermissive")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fpermissive")
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
|
5
extern/bullet2/README.blender
vendored
5
extern/bullet2/README.blender
vendored
@@ -1,5 +0,0 @@
|
||||
Project: Bullet Continuous Collision Detection and Physics Library
|
||||
URL: http://bulletphysics.org
|
||||
License: zlib
|
||||
Upstream version: 3.07
|
||||
Local modifications: Fixed inertia
|
1137
extern/ceres/ChangeLog
vendored
1137
extern/ceres/ChangeLog
vendored
File diff suppressed because it is too large
Load Diff
3
extern/ceres/README.blender
vendored
3
extern/ceres/README.blender
vendored
@@ -1,5 +1,4 @@
|
||||
Project: Ceres Solver
|
||||
URL: http://ceres-solver.org/
|
||||
License: BSD 3-Clause
|
||||
Upstream version 2.0.0
|
||||
Upstream version 1.11 (aef9c9563b08d5f39eee1576af133a84749d1b48)
|
||||
Local modifications: None
|
||||
|
4
extern/ceres/bundle.sh
vendored
4
extern/ceres/bundle.sh
vendored
@@ -8,8 +8,8 @@ else
|
||||
fi
|
||||
|
||||
repo="https://ceres-solver.googlesource.com/ceres-solver"
|
||||
#branch="master"
|
||||
tag="2.0.0"
|
||||
branch="master"
|
||||
tag=""
|
||||
tmp=`mktemp -d`
|
||||
checkout="$tmp/ceres"
|
||||
|
||||
|
@@ -153,44 +153,28 @@ template <typename CostFunctor,
|
||||
int... Ns> // Number of parameters in each parameter block.
|
||||
class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
|
||||
public:
|
||||
// Takes ownership of functor by default. Uses the template-provided
|
||||
// value for the number of residuals ("kNumResiduals").
|
||||
explicit AutoDiffCostFunction(CostFunctor* functor,
|
||||
Ownership ownership = TAKE_OWNERSHIP)
|
||||
: functor_(functor), ownership_(ownership) {
|
||||
// Takes ownership of functor. Uses the template-provided value for the
|
||||
// number of residuals ("kNumResiduals").
|
||||
explicit AutoDiffCostFunction(CostFunctor* functor) : functor_(functor) {
|
||||
static_assert(kNumResiduals != DYNAMIC,
|
||||
"Can't run the fixed-size constructor if the number of "
|
||||
"residuals is set to ceres::DYNAMIC.");
|
||||
}
|
||||
|
||||
// Takes ownership of functor by default. Ignores the template-provided
|
||||
// Takes ownership of functor. Ignores the template-provided
|
||||
// kNumResiduals in favor of the "num_residuals" argument provided.
|
||||
//
|
||||
// This allows for having autodiff cost functions which return varying
|
||||
// numbers of residuals at runtime.
|
||||
AutoDiffCostFunction(CostFunctor* functor,
|
||||
int num_residuals,
|
||||
Ownership ownership = TAKE_OWNERSHIP)
|
||||
: functor_(functor), ownership_(ownership) {
|
||||
AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
|
||||
: functor_(functor) {
|
||||
static_assert(kNumResiduals == DYNAMIC,
|
||||
"Can't run the dynamic-size constructor if the number of "
|
||||
"residuals is not ceres::DYNAMIC.");
|
||||
SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals);
|
||||
}
|
||||
|
||||
explicit AutoDiffCostFunction(AutoDiffCostFunction&& other)
|
||||
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
|
||||
|
||||
virtual ~AutoDiffCostFunction() {
|
||||
// Manually release pointer if configured to not take ownership rather than
|
||||
// deleting only if ownership is taken.
|
||||
// This is to stay maximally compatible to old user code which may have
|
||||
// forgotten to implement a virtual destructor, from when the
|
||||
// AutoDiffCostFunction always took ownership.
|
||||
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
}
|
||||
}
|
||||
virtual ~AutoDiffCostFunction() {}
|
||||
|
||||
// Implementation details follow; clients of the autodiff cost function should
|
||||
// not have to examine below here.
|
||||
@@ -217,7 +201,6 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
|
||||
|
||||
private:
|
||||
std::unique_ptr<CostFunctor> functor_;
|
||||
Ownership ownership_;
|
||||
};
|
||||
|
||||
} // namespace ceres
|
||||
|
2
extern/ceres/include/ceres/c_api.h
vendored
2
extern/ceres/include/ceres/c_api.h
vendored
@@ -38,10 +38,8 @@
|
||||
#ifndef CERES_PUBLIC_C_API_H_
|
||||
#define CERES_PUBLIC_C_API_H_
|
||||
|
||||
// clang-format off
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/internal/disable_warnings.h"
|
||||
// clang-format on
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@@ -144,7 +144,8 @@ class CostFunctionToFunctor {
|
||||
|
||||
// Extract parameter block pointers from params.
|
||||
using Indices =
|
||||
std::make_integer_sequence<int, ParameterDims::kNumParameterBlocks>;
|
||||
std::make_integer_sequence<int,
|
||||
ParameterDims::kNumParameterBlocks>;
|
||||
std::array<const T*, ParameterDims::kNumParameterBlocks> parameter_blocks =
|
||||
GetParameterPointers<T>(params, Indices());
|
||||
|
||||
|
10
extern/ceres/include/ceres/covariance.h
vendored
10
extern/ceres/include/ceres/covariance.h
vendored
@@ -51,7 +51,7 @@ class CovarianceImpl;
|
||||
// =======
|
||||
// It is very easy to use this class incorrectly without understanding
|
||||
// the underlying mathematics. Please read and understand the
|
||||
// documentation completely before attempting to use it.
|
||||
// documentation completely before attempting to use this class.
|
||||
//
|
||||
//
|
||||
// This class allows the user to evaluate the covariance for a
|
||||
@@ -73,7 +73,7 @@ class CovarianceImpl;
|
||||
// the maximum likelihood estimate of x given observations y is the
|
||||
// solution to the non-linear least squares problem:
|
||||
//
|
||||
// x* = arg min_x |f(x) - y|^2
|
||||
// x* = arg min_x |f(x)|^2
|
||||
//
|
||||
// And the covariance of x* is given by
|
||||
//
|
||||
@@ -220,11 +220,11 @@ class CERES_EXPORT Covariance {
|
||||
// 1. DENSE_SVD uses Eigen's JacobiSVD to perform the
|
||||
// computations. It computes the singular value decomposition
|
||||
//
|
||||
// U * D * V' = J
|
||||
// U * S * V' = J
|
||||
//
|
||||
// and then uses it to compute the pseudo inverse of J'J as
|
||||
//
|
||||
// pseudoinverse[J'J] = V * pseudoinverse[D^2] * V'
|
||||
// pseudoinverse[J'J]^ = V * pseudoinverse[S] * V'
|
||||
//
|
||||
// It is an accurate but slow method and should only be used
|
||||
// for small to moderate sized problems. It can handle
|
||||
@@ -235,7 +235,7 @@ class CERES_EXPORT Covariance {
|
||||
//
|
||||
// Q * R = J
|
||||
//
|
||||
// [J'J]^-1 = [R'*R]^-1
|
||||
// [J'J]^-1 = [R*R']^-1
|
||||
//
|
||||
// SPARSE_QR is not capable of computing the covariance if the
|
||||
// Jacobian is rank deficient. Depending on the value of
|
||||
|
@@ -40,7 +40,6 @@
|
||||
#include "ceres/dynamic_cost_function.h"
|
||||
#include "ceres/internal/fixed_array.h"
|
||||
#include "ceres/jet.h"
|
||||
#include "ceres/types.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -79,24 +78,10 @@ namespace ceres {
|
||||
template <typename CostFunctor, int Stride = 4>
|
||||
class DynamicAutoDiffCostFunction : public DynamicCostFunction {
|
||||
public:
|
||||
// Takes ownership by default.
|
||||
DynamicAutoDiffCostFunction(CostFunctor* functor,
|
||||
Ownership ownership = TAKE_OWNERSHIP)
|
||||
: functor_(functor), ownership_(ownership) {}
|
||||
explicit DynamicAutoDiffCostFunction(CostFunctor* functor)
|
||||
: functor_(functor) {}
|
||||
|
||||
explicit DynamicAutoDiffCostFunction(DynamicAutoDiffCostFunction&& other)
|
||||
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
|
||||
|
||||
virtual ~DynamicAutoDiffCostFunction() {
|
||||
// Manually release pointer if configured to not take ownership
|
||||
// rather than deleting only if ownership is taken. This is to
|
||||
// stay maximally compatible to old user code which may have
|
||||
// forgotten to implement a virtual destructor, from when the
|
||||
// AutoDiffCostFunction always took ownership.
|
||||
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
}
|
||||
}
|
||||
virtual ~DynamicAutoDiffCostFunction() {}
|
||||
|
||||
bool Evaluate(double const* const* parameters,
|
||||
double* residuals,
|
||||
@@ -166,9 +151,6 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
|
||||
}
|
||||
}
|
||||
|
||||
if (num_active_parameters == 0) {
|
||||
return (*functor_)(parameters, residuals);
|
||||
}
|
||||
// When `num_active_parameters % Stride != 0` then it can be the case
|
||||
// that `active_parameter_count < Stride` while parameter_cursor is less
|
||||
// than the total number of parameters and with no remaining non-constant
|
||||
@@ -266,7 +248,6 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
|
||||
|
||||
private:
|
||||
std::unique_ptr<CostFunctor> functor_;
|
||||
Ownership ownership_;
|
||||
};
|
||||
|
||||
} // namespace ceres
|
||||
|
@@ -44,7 +44,6 @@
|
||||
#include "ceres/internal/numeric_diff.h"
|
||||
#include "ceres/internal/parameter_dims.h"
|
||||
#include "ceres/numeric_diff_options.h"
|
||||
#include "ceres/types.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -85,10 +84,6 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction {
|
||||
const NumericDiffOptions& options = NumericDiffOptions())
|
||||
: functor_(functor), ownership_(ownership), options_(options) {}
|
||||
|
||||
explicit DynamicNumericDiffCostFunction(
|
||||
DynamicNumericDiffCostFunction&& other)
|
||||
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
|
||||
|
||||
virtual ~DynamicNumericDiffCostFunction() {
|
||||
if (ownership_ != TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
|
@@ -62,8 +62,7 @@ class CERES_EXPORT GradientProblemSolver {
|
||||
// Minimizer options ----------------------------------------
|
||||
LineSearchDirectionType line_search_direction_type = LBFGS;
|
||||
LineSearchType line_search_type = WOLFE;
|
||||
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
|
||||
FLETCHER_REEVES;
|
||||
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
|
||||
|
||||
// The LBFGS hessian approximation is a low rank approximation to
|
||||
// the inverse of the Hessian matrix. The rank of the
|
||||
|
@@ -198,7 +198,7 @@ struct Make1stOrderPerturbation {
|
||||
template <int N, int Offset, typename T, typename JetT>
|
||||
struct Make1stOrderPerturbation<N, N, Offset, T, JetT> {
|
||||
public:
|
||||
static void Apply(const T* src, JetT* dst) {}
|
||||
static void Apply(const T* /*src*/, JetT* /*dst*/) {}
|
||||
};
|
||||
|
||||
// Calls Make1stOrderPerturbation for every parameter block.
|
||||
@@ -229,9 +229,7 @@ struct Make1stOrderPerturbations<std::integer_sequence<int, N, Ns...>,
|
||||
|
||||
// End of 'recursion'. Nothing more to do.
|
||||
template <int ParameterIdx, int Total>
|
||||
struct Make1stOrderPerturbations<std::integer_sequence<int>,
|
||||
ParameterIdx,
|
||||
Total> {
|
||||
struct Make1stOrderPerturbations<std::integer_sequence<int>, ParameterIdx, Total> {
|
||||
template <typename T, typename JetT>
|
||||
static void Apply(T const* const* /* NOT USED */, JetT* /* NOT USED */) {}
|
||||
};
|
||||
|
@@ -34,11 +34,11 @@
|
||||
#define CERES_WARNINGS_DISABLED
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning( push )
|
||||
// Disable the warning C4251 which is triggered by stl classes in
|
||||
// Ceres' public interface. To quote MSDN: "C4251 can be ignored "
|
||||
// "if you are deriving from a type in the Standard C++ Library"
|
||||
#pragma warning(disable : 4251)
|
||||
#pragma warning( disable : 4251 )
|
||||
#endif
|
||||
|
||||
#endif // CERES_WARNINGS_DISABLED
|
||||
|
23
extern/ceres/include/ceres/internal/eigen.h
vendored
23
extern/ceres/include/ceres/internal/eigen.h
vendored
@@ -36,26 +36,31 @@
|
||||
namespace ceres {
|
||||
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
|
||||
Matrix;
|
||||
typedef Eigen::Matrix<double,
|
||||
Eigen::Dynamic,
|
||||
Eigen::Dynamic,
|
||||
Eigen::RowMajor> Matrix;
|
||||
typedef Eigen::Map<Vector> VectorRef;
|
||||
typedef Eigen::Map<Matrix> MatrixRef;
|
||||
typedef Eigen::Map<const Vector> ConstVectorRef;
|
||||
typedef Eigen::Map<const Matrix> ConstMatrixRef;
|
||||
|
||||
// Column major matrices for DenseSparseMatrix/DenseQRSolver
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>
|
||||
ColMajorMatrix;
|
||||
typedef Eigen::Matrix<double,
|
||||
Eigen::Dynamic,
|
||||
Eigen::Dynamic,
|
||||
Eigen::ColMajor> ColMajorMatrix;
|
||||
|
||||
typedef Eigen::Map<ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>
|
||||
ColMajorMatrixRef;
|
||||
typedef Eigen::Map<ColMajorMatrix, 0,
|
||||
Eigen::Stride<Eigen::Dynamic, 1>> ColMajorMatrixRef;
|
||||
|
||||
typedef Eigen::Map<const ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>
|
||||
ConstColMajorMatrixRef;
|
||||
typedef Eigen::Map<const ColMajorMatrix,
|
||||
0,
|
||||
Eigen::Stride<Eigen::Dynamic, 1>> ConstColMajorMatrixRef;
|
||||
|
||||
// C++ does not support templated typdefs, thus the need for this
|
||||
// struct so that we can support statically sized Matrix and Maps.
|
||||
template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
|
||||
template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
|
||||
struct EigenTypes {
|
||||
typedef Eigen::Matrix<double,
|
||||
num_rows,
|
||||
|
@@ -30,7 +30,6 @@
|
||||
#ifndef CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
|
||||
#define CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
|
||||
|
||||
#include <Eigen/Core> // For Eigen::aligned_allocator
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
@@ -38,6 +37,8 @@
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
|
||||
#include <Eigen/Core> // For Eigen::aligned_allocator
|
||||
|
||||
#include "ceres/internal/memory.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
|
@@ -62,8 +62,7 @@ struct SumImpl;
|
||||
// Strip of and sum the first number.
|
||||
template <typename T, T N, T... Ns>
|
||||
struct SumImpl<std::integer_sequence<T, N, Ns...>> {
|
||||
static constexpr T Value =
|
||||
N + SumImpl<std::integer_sequence<T, Ns...>>::Value;
|
||||
static constexpr T Value = N + SumImpl<std::integer_sequence<T, Ns...>>::Value;
|
||||
};
|
||||
|
||||
// Strip of and sum the first two numbers.
|
||||
@@ -130,14 +129,10 @@ template <typename T, T Sum, typename SeqIn, typename SeqOut>
|
||||
struct ExclusiveScanImpl;
|
||||
|
||||
template <typename T, T Sum, T N, T... Ns, T... Rs>
|
||||
struct ExclusiveScanImpl<T,
|
||||
Sum,
|
||||
std::integer_sequence<T, N, Ns...>,
|
||||
struct ExclusiveScanImpl<T, Sum, std::integer_sequence<T, N, Ns...>,
|
||||
std::integer_sequence<T, Rs...>> {
|
||||
using Type =
|
||||
typename ExclusiveScanImpl<T,
|
||||
Sum + N,
|
||||
std::integer_sequence<T, Ns...>,
|
||||
typename ExclusiveScanImpl<T, Sum + N, std::integer_sequence<T, Ns...>,
|
||||
std::integer_sequence<T, Rs..., Sum>>::Type;
|
||||
};
|
||||
|
||||
|
118
extern/ceres/include/ceres/internal/numeric_diff.h
vendored
118
extern/ceres/include/ceres/internal/numeric_diff.h
vendored
@@ -47,17 +47,15 @@
|
||||
#include "ceres/types.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
// This is split from the main class because C++ doesn't allow partial template
|
||||
// specializations for member functions. The alternative is to repeat the main
|
||||
// class for differing numbers of parameters, which is also unfortunate.
|
||||
template <typename CostFunctor,
|
||||
NumericDiffMethodType kMethod,
|
||||
int kNumResiduals,
|
||||
typename ParameterDims,
|
||||
int kParameterBlock,
|
||||
template <typename CostFunctor, NumericDiffMethodType kMethod,
|
||||
int kNumResiduals, typename ParameterDims, int kParameterBlock,
|
||||
int kParameterBlockSize>
|
||||
struct NumericDiff {
|
||||
// Mutates parameters but must restore them before return.
|
||||
@@ -68,23 +66,23 @@ struct NumericDiff {
|
||||
int num_residuals,
|
||||
int parameter_block_index,
|
||||
int parameter_block_size,
|
||||
double** parameters,
|
||||
double* jacobian) {
|
||||
using Eigen::ColMajor;
|
||||
double **parameters,
|
||||
double *jacobian) {
|
||||
using Eigen::Map;
|
||||
using Eigen::Matrix;
|
||||
using Eigen::RowMajor;
|
||||
using Eigen::ColMajor;
|
||||
|
||||
DCHECK(jacobian);
|
||||
|
||||
const int num_residuals_internal =
|
||||
(kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
|
||||
const int parameter_block_index_internal =
|
||||
(kParameterBlock != ceres::DYNAMIC ? kParameterBlock
|
||||
: parameter_block_index);
|
||||
(kParameterBlock != ceres::DYNAMIC ? kParameterBlock :
|
||||
parameter_block_index);
|
||||
const int parameter_block_size_internal =
|
||||
(kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize
|
||||
: parameter_block_size);
|
||||
(kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize :
|
||||
parameter_block_size);
|
||||
|
||||
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
|
||||
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
|
||||
@@ -99,17 +97,17 @@ struct NumericDiff {
|
||||
(kParameterBlockSize == 1) ? ColMajor : RowMajor>
|
||||
JacobianMatrix;
|
||||
|
||||
Map<JacobianMatrix> parameter_jacobian(
|
||||
jacobian, num_residuals_internal, parameter_block_size_internal);
|
||||
Map<JacobianMatrix> parameter_jacobian(jacobian,
|
||||
num_residuals_internal,
|
||||
parameter_block_size_internal);
|
||||
|
||||
Map<ParameterVector> x_plus_delta(
|
||||
parameters[parameter_block_index_internal],
|
||||
parameter_block_size_internal);
|
||||
ParameterVector x(x_plus_delta);
|
||||
ParameterVector step_size =
|
||||
x.array().abs() * ((kMethod == RIDDERS)
|
||||
? options.ridders_relative_initial_step_size
|
||||
: options.relative_step_size);
|
||||
ParameterVector step_size = x.array().abs() *
|
||||
((kMethod == RIDDERS) ? options.ridders_relative_initial_step_size :
|
||||
options.relative_step_size);
|
||||
|
||||
// It is not a good idea to make the step size arbitrarily
|
||||
// small. This will lead to problems with round off and numerical
|
||||
@@ -120,8 +118,8 @@ struct NumericDiff {
|
||||
// For Ridders' method, the initial step size is required to be large,
|
||||
// thus ridders_relative_initial_step_size is used.
|
||||
if (kMethod == RIDDERS) {
|
||||
min_step_size =
|
||||
std::max(min_step_size, options.ridders_relative_initial_step_size);
|
||||
min_step_size = std::max(min_step_size,
|
||||
options.ridders_relative_initial_step_size);
|
||||
}
|
||||
|
||||
// For each parameter in the parameter block, use finite differences to
|
||||
@@ -135,9 +133,7 @@ struct NumericDiff {
|
||||
const double delta = std::max(min_step_size, step_size(j));
|
||||
|
||||
if (kMethod == RIDDERS) {
|
||||
if (!EvaluateRiddersJacobianColumn(functor,
|
||||
j,
|
||||
delta,
|
||||
if (!EvaluateRiddersJacobianColumn(functor, j, delta,
|
||||
options,
|
||||
num_residuals_internal,
|
||||
parameter_block_size_internal,
|
||||
@@ -150,9 +146,7 @@ struct NumericDiff {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (!EvaluateJacobianColumn(functor,
|
||||
j,
|
||||
delta,
|
||||
if (!EvaluateJacobianColumn(functor, j, delta,
|
||||
num_residuals_internal,
|
||||
parameter_block_size_internal,
|
||||
x.data(),
|
||||
@@ -188,7 +182,8 @@ struct NumericDiff {
|
||||
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
|
||||
|
||||
Map<const ParameterVector> x(x_ptr, parameter_block_size);
|
||||
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
|
||||
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr,
|
||||
parameter_block_size);
|
||||
|
||||
Map<ResidualVector> residuals(residuals_ptr, num_residuals);
|
||||
Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
|
||||
@@ -196,8 +191,9 @@ struct NumericDiff {
|
||||
// Mutate 1 element at a time and then restore.
|
||||
x_plus_delta(parameter_index) = x(parameter_index) + delta;
|
||||
|
||||
if (!VariadicEvaluate<ParameterDims>(
|
||||
*functor, parameters, residuals.data())) {
|
||||
if (!VariadicEvaluate<ParameterDims>(*functor,
|
||||
parameters,
|
||||
residuals.data())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -210,8 +206,9 @@ struct NumericDiff {
|
||||
// Compute the function on the other side of x(parameter_index).
|
||||
x_plus_delta(parameter_index) = x(parameter_index) - delta;
|
||||
|
||||
if (!VariadicEvaluate<ParameterDims>(
|
||||
*functor, parameters, temp_residuals.data())) {
|
||||
if (!VariadicEvaluate<ParameterDims>(*functor,
|
||||
parameters,
|
||||
temp_residuals.data())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -220,7 +217,8 @@ struct NumericDiff {
|
||||
} else {
|
||||
// Forward difference only; reuse existing residuals evaluation.
|
||||
residuals -=
|
||||
Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
|
||||
Map<const ResidualVector>(residuals_at_eval_point,
|
||||
num_residuals);
|
||||
}
|
||||
|
||||
// Restore x_plus_delta.
|
||||
@@ -256,17 +254,17 @@ struct NumericDiff {
|
||||
double* x_plus_delta_ptr,
|
||||
double* temp_residuals_ptr,
|
||||
double* residuals_ptr) {
|
||||
using Eigen::aligned_allocator;
|
||||
using Eigen::Map;
|
||||
using Eigen::Matrix;
|
||||
using Eigen::aligned_allocator;
|
||||
|
||||
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
|
||||
typedef Matrix<double, kNumResiduals, Eigen::Dynamic>
|
||||
ResidualCandidateMatrix;
|
||||
typedef Matrix<double, kNumResiduals, Eigen::Dynamic> ResidualCandidateMatrix;
|
||||
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
|
||||
|
||||
Map<const ParameterVector> x(x_ptr, parameter_block_size);
|
||||
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
|
||||
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr,
|
||||
parameter_block_size);
|
||||
|
||||
Map<ResidualVector> residuals(residuals_ptr, num_residuals);
|
||||
Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
|
||||
@@ -277,16 +275,18 @@ struct NumericDiff {
|
||||
// As the derivative is estimated, the step size decreases.
|
||||
// By default, the step sizes are chosen so that the middle column
|
||||
// of the Romberg tableau uses the input delta.
|
||||
double current_step_size =
|
||||
delta * pow(options.ridders_step_shrink_factor,
|
||||
options.max_num_ridders_extrapolations / 2);
|
||||
double current_step_size = delta *
|
||||
pow(options.ridders_step_shrink_factor,
|
||||
options.max_num_ridders_extrapolations / 2);
|
||||
|
||||
// Double-buffering temporary differential candidate vectors
|
||||
// from previous step size.
|
||||
ResidualCandidateMatrix stepsize_candidates_a(
|
||||
num_residuals, options.max_num_ridders_extrapolations);
|
||||
num_residuals,
|
||||
options.max_num_ridders_extrapolations);
|
||||
ResidualCandidateMatrix stepsize_candidates_b(
|
||||
num_residuals, options.max_num_ridders_extrapolations);
|
||||
num_residuals,
|
||||
options.max_num_ridders_extrapolations);
|
||||
ResidualCandidateMatrix* current_candidates = &stepsize_candidates_a;
|
||||
ResidualCandidateMatrix* previous_candidates = &stepsize_candidates_b;
|
||||
|
||||
@@ -304,9 +304,7 @@ struct NumericDiff {
|
||||
// 3. Extrapolation becomes numerically unstable.
|
||||
for (int i = 0; i < options.max_num_ridders_extrapolations; ++i) {
|
||||
// Compute the numerical derivative at this step size.
|
||||
if (!EvaluateJacobianColumn(functor,
|
||||
parameter_index,
|
||||
current_step_size,
|
||||
if (!EvaluateJacobianColumn(functor, parameter_index, current_step_size,
|
||||
num_residuals,
|
||||
parameter_block_size,
|
||||
x.data(),
|
||||
@@ -329,24 +327,23 @@ struct NumericDiff {
|
||||
|
||||
// Extrapolation factor for Richardson acceleration method (see below).
|
||||
double richardson_factor = options.ridders_step_shrink_factor *
|
||||
options.ridders_step_shrink_factor;
|
||||
options.ridders_step_shrink_factor;
|
||||
for (int k = 1; k <= i; ++k) {
|
||||
// Extrapolate the various orders of finite differences using
|
||||
// the Richardson acceleration method.
|
||||
current_candidates->col(k) =
|
||||
(richardson_factor * current_candidates->col(k - 1) -
|
||||
previous_candidates->col(k - 1)) /
|
||||
(richardson_factor - 1.0);
|
||||
previous_candidates->col(k - 1)) / (richardson_factor - 1.0);
|
||||
|
||||
richardson_factor *= options.ridders_step_shrink_factor *
|
||||
options.ridders_step_shrink_factor;
|
||||
options.ridders_step_shrink_factor;
|
||||
|
||||
// Compute the difference between the previous value and the current.
|
||||
double candidate_error = std::max(
|
||||
(current_candidates->col(k) - current_candidates->col(k - 1))
|
||||
.norm(),
|
||||
(current_candidates->col(k) - previous_candidates->col(k - 1))
|
||||
.norm());
|
||||
(current_candidates->col(k) -
|
||||
current_candidates->col(k - 1)).norm(),
|
||||
(current_candidates->col(k) -
|
||||
previous_candidates->col(k - 1)).norm());
|
||||
|
||||
// If the error has decreased, update results.
|
||||
if (candidate_error <= norm_error) {
|
||||
@@ -368,9 +365,8 @@ struct NumericDiff {
|
||||
// Check to see if the current gradient estimate is numerically unstable.
|
||||
// If so, bail out and return the last stable result.
|
||||
if (i > 0) {
|
||||
double tableau_error =
|
||||
(current_candidates->col(i) - previous_candidates->col(i - 1))
|
||||
.norm();
|
||||
double tableau_error = (current_candidates->col(i) -
|
||||
previous_candidates->col(i - 1)).norm();
|
||||
|
||||
// Compare current error to the chosen candidate's error.
|
||||
if (tableau_error >= 2 * norm_error) {
|
||||
@@ -486,18 +482,14 @@ struct EvaluateJacobianForParameterBlocks<ParameterDims,
|
||||
|
||||
// End of 'recursion'. Nothing more to do.
|
||||
template <typename ParameterDims, int ParameterIdx>
|
||||
struct EvaluateJacobianForParameterBlocks<ParameterDims,
|
||||
std::integer_sequence<int>,
|
||||
struct EvaluateJacobianForParameterBlocks<ParameterDims, std::integer_sequence<int>,
|
||||
ParameterIdx> {
|
||||
template <NumericDiffMethodType method,
|
||||
int kNumResiduals,
|
||||
template <NumericDiffMethodType method, int kNumResiduals,
|
||||
typename CostFunctor>
|
||||
static bool Apply(const CostFunctor* /* NOT USED*/,
|
||||
const double* /* NOT USED*/,
|
||||
const NumericDiffOptions& /* NOT USED*/,
|
||||
int /* NOT USED*/,
|
||||
double** /* NOT USED*/,
|
||||
double** /* NOT USED*/) {
|
||||
const NumericDiffOptions& /* NOT USED*/, int /* NOT USED*/,
|
||||
double** /* NOT USED*/, double** /* NOT USED*/) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
92
extern/ceres/include/ceres/internal/port.h
vendored
92
extern/ceres/include/ceres/internal/port.h
vendored
@@ -35,17 +35,17 @@
|
||||
#include "ceres/internal/config.h"
|
||||
|
||||
#if defined(CERES_USE_OPENMP)
|
||||
#if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
|
||||
#error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
|
||||
#endif
|
||||
# if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
|
||||
# error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
|
||||
# endif
|
||||
#elif defined(CERES_USE_CXX_THREADS)
|
||||
#if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
|
||||
#error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
|
||||
#endif
|
||||
# if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
|
||||
# error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
|
||||
# endif
|
||||
#elif defined(CERES_NO_THREADS)
|
||||
#if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
|
||||
#error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
|
||||
#endif
|
||||
# if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
|
||||
# error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
|
||||
# endif
|
||||
#else
|
||||
# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined.
|
||||
#endif
|
||||
@@ -54,57 +54,37 @@
|
||||
// compiled without any sparse back-end. Verify that it has not subsequently
|
||||
// been inconsistently redefined.
|
||||
#if defined(CERES_NO_SPARSE)
|
||||
#if !defined(CERES_NO_SUITESPARSE)
|
||||
#error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
|
||||
#endif
|
||||
#if !defined(CERES_NO_CXSPARSE)
|
||||
#error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
|
||||
#endif
|
||||
#if !defined(CERES_NO_ACCELERATE_SPARSE)
|
||||
#error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
|
||||
#endif
|
||||
#if defined(CERES_USE_EIGEN_SPARSE)
|
||||
#error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
|
||||
#endif
|
||||
# if !defined(CERES_NO_SUITESPARSE)
|
||||
# error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
|
||||
# endif
|
||||
# if !defined(CERES_NO_CXSPARSE)
|
||||
# error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
|
||||
# endif
|
||||
# if !defined(CERES_NO_ACCELERATE_SPARSE)
|
||||
# error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
|
||||
# endif
|
||||
# if defined(CERES_USE_EIGEN_SPARSE)
|
||||
# error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// A macro to signal which functions and classes are exported when
|
||||
// building a shared library.
|
||||
#if defined(_MSC_VER)
|
||||
#define CERES_API_SHARED_IMPORT __declspec(dllimport)
|
||||
#define CERES_API_SHARED_EXPORT __declspec(dllexport)
|
||||
#elif defined(__GNUC__)
|
||||
#define CERES_API_SHARED_IMPORT __attribute__((visibility("default")))
|
||||
#define CERES_API_SHARED_EXPORT __attribute__((visibility("default")))
|
||||
// building a DLL with MSVC.
|
||||
//
|
||||
// Note that the ordering here is important, CERES_BUILDING_SHARED_LIBRARY
|
||||
// is only defined locally when Ceres is compiled, it is never exported to
|
||||
// users. However, in order that we do not have to configure config.h
|
||||
// separately for building vs installing, if we are using MSVC and building
|
||||
// a shared library, then both CERES_BUILDING_SHARED_LIBRARY and
|
||||
// CERES_USING_SHARED_LIBRARY will be defined when Ceres is compiled.
|
||||
// Hence it is important that the check for CERES_BUILDING_SHARED_LIBRARY
|
||||
// happens first.
|
||||
#if defined(_MSC_VER) && defined(CERES_BUILDING_SHARED_LIBRARY)
|
||||
# define CERES_EXPORT __declspec(dllexport)
|
||||
#elif defined(_MSC_VER) && defined(CERES_USING_SHARED_LIBRARY)
|
||||
# define CERES_EXPORT __declspec(dllimport)
|
||||
#else
|
||||
#define CERES_API_SHARED_IMPORT
|
||||
#define CERES_API_SHARED_EXPORT
|
||||
#endif
|
||||
|
||||
// CERES_BUILDING_SHARED_LIBRARY is only defined locally when Ceres itself is
|
||||
// compiled as a shared library, it is never exported to users. In order that
|
||||
// we do not have to configure config.h separately when building Ceres as either
|
||||
// a static or dynamic library, we define both CERES_USING_SHARED_LIBRARY and
|
||||
// CERES_BUILDING_SHARED_LIBRARY when building as a shared library.
|
||||
#if defined(CERES_USING_SHARED_LIBRARY)
|
||||
#if defined(CERES_BUILDING_SHARED_LIBRARY)
|
||||
// Compiling Ceres itself as a shared library.
|
||||
#define CERES_EXPORT CERES_API_SHARED_EXPORT
|
||||
#else
|
||||
// Using Ceres as a shared library.
|
||||
#define CERES_EXPORT CERES_API_SHARED_IMPORT
|
||||
#endif
|
||||
#else
|
||||
// Ceres was compiled as a static library, export everything.
|
||||
#define CERES_EXPORT
|
||||
#endif
|
||||
|
||||
// Unit tests reach in and test internal functionality so we need a way to make
|
||||
// those symbols visible
|
||||
#ifdef CERES_EXPORT_INTERNAL_SYMBOLS
|
||||
#define CERES_EXPORT_INTERNAL CERES_EXPORT
|
||||
#else
|
||||
#define CERES_EXPORT_INTERNAL
|
||||
# define CERES_EXPORT
|
||||
#endif
|
||||
|
||||
#endif // CERES_PUBLIC_INTERNAL_PORT_H_
|
||||
|
@@ -32,7 +32,7 @@
|
||||
#undef CERES_WARNINGS_DISABLED
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#pragma warning( pop )
|
||||
#endif
|
||||
|
||||
#endif // CERES_WARNINGS_DISABLED
|
||||
|
@@ -46,10 +46,8 @@ namespace internal {
|
||||
|
||||
// For fixed size cost functors
|
||||
template <typename Functor, typename T, int... Indices>
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
T const* const* input,
|
||||
T* output,
|
||||
std::false_type /*is_dynamic*/,
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
T* output, std::false_type /*is_dynamic*/,
|
||||
std::integer_sequence<int, Indices...>) {
|
||||
static_assert(sizeof...(Indices),
|
||||
"Invalid number of parameter blocks. At least one parameter "
|
||||
@@ -59,31 +57,26 @@ inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
|
||||
// For dynamic sized cost functors
|
||||
template <typename Functor, typename T>
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
T const* const* input,
|
||||
T* output,
|
||||
std::true_type /*is_dynamic*/,
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
T* output, std::true_type /*is_dynamic*/,
|
||||
std::integer_sequence<int>) {
|
||||
return functor(input, output);
|
||||
}
|
||||
|
||||
// For ceres cost functors (not ceres::CostFunction)
|
||||
template <typename ParameterDims, typename Functor, typename T>
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
T const* const* input,
|
||||
T* output,
|
||||
const void* /* NOT USED */) {
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
T* output, const void* /* NOT USED */) {
|
||||
using ParameterBlockIndices =
|
||||
std::make_integer_sequence<int, ParameterDims::kNumParameterBlocks>;
|
||||
using IsDynamic = std::integral_constant<bool, ParameterDims::kIsDynamic>;
|
||||
return VariadicEvaluateImpl(
|
||||
functor, input, output, IsDynamic(), ParameterBlockIndices());
|
||||
return VariadicEvaluateImpl(functor, input, output, IsDynamic(),
|
||||
ParameterBlockIndices());
|
||||
}
|
||||
|
||||
// For ceres::CostFunction
|
||||
template <typename ParameterDims, typename Functor, typename T>
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
T const* const* input,
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
T* output,
|
||||
const CostFunction* /* NOT USED */) {
|
||||
return functor.Evaluate(input, output, nullptr);
|
||||
@@ -102,8 +95,7 @@ inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
// blocks. The signature of the functor must have the following signature
|
||||
// 'bool()(const T* i_1, const T* i_2, ... const T* i_n, T* output)'.
|
||||
template <typename ParameterDims, typename Functor, typename T>
|
||||
inline bool VariadicEvaluate(const Functor& functor,
|
||||
T const* const* input,
|
||||
inline bool VariadicEvaluate(const Functor& functor, T const* const* input,
|
||||
T* output) {
|
||||
return VariadicEvaluateImpl<ParameterDims>(functor, input, output, &functor);
|
||||
}
|
||||
|
@@ -73,7 +73,7 @@ struct CERES_EXPORT IterationSummary {
|
||||
bool step_is_successful = false;
|
||||
|
||||
// Value of the objective function.
|
||||
double cost = 0.0;
|
||||
double cost = 0.90;
|
||||
|
||||
// Change in the value of the objective function in this
|
||||
// iteration. This can be positive or negative.
|
||||
|
17
extern/ceres/include/ceres/jet.h
vendored
17
extern/ceres/include/ceres/jet.h
vendored
@@ -388,8 +388,6 @@ using std::cbrt;
|
||||
using std::ceil;
|
||||
using std::cos;
|
||||
using std::cosh;
|
||||
using std::erf;
|
||||
using std::erfc;
|
||||
using std::exp;
|
||||
using std::exp2;
|
||||
using std::floor;
|
||||
@@ -575,21 +573,6 @@ inline Jet<T, N> fmin(const Jet<T, N>& x, const Jet<T, N>& y) {
|
||||
return y < x ? y : x;
|
||||
}
|
||||
|
||||
// erf is defined as an integral that cannot be expressed analyticaly
|
||||
// however, the derivative is trivial to compute
|
||||
// erf(x + h) = erf(x) + h * 2*exp(-x^2)/sqrt(pi)
|
||||
template <typename T, int N>
|
||||
inline Jet<T, N> erf(const Jet<T, N>& x) {
|
||||
return Jet<T, N>(erf(x.a), x.v * M_2_SQRTPI * exp(-x.a * x.a));
|
||||
}
|
||||
|
||||
// erfc(x) = 1-erf(x)
|
||||
// erfc(x + h) = erfc(x) + h * (-2*exp(-x^2)/sqrt(pi))
|
||||
template <typename T, int N>
|
||||
inline Jet<T, N> erfc(const Jet<T, N>& x) {
|
||||
return Jet<T, N>(erfc(x.a), -x.v * M_2_SQRTPI * exp(-x.a * x.a));
|
||||
}
|
||||
|
||||
// Bessel functions of the first kind with integer order equal to 0, 1, n.
|
||||
//
|
||||
// Microsoft has deprecated the j[0,1,n]() POSIX Bessel functions in favour of
|
||||
|
@@ -90,8 +90,8 @@ namespace ceres {
|
||||
//
|
||||
// An example that occurs commonly in Structure from Motion problems
|
||||
// is when camera rotations are parameterized using Quaternion. There,
|
||||
// it is useful to only make updates orthogonal to that 4-vector
|
||||
// defining the quaternion. One way to do this is to let delta be a 3
|
||||
// it is useful only make updates orthogonal to that 4-vector defining
|
||||
// the quaternion. One way to do this is to let delta be a 3
|
||||
// dimensional vector and define Plus to be
|
||||
//
|
||||
// Plus(x, delta) = [cos(|delta|), sin(|delta|) delta / |delta|] * x
|
||||
@@ -99,7 +99,7 @@ namespace ceres {
|
||||
// The multiplication between the two 4-vectors on the RHS is the
|
||||
// standard quaternion product.
|
||||
//
|
||||
// Given f and a point x, optimizing f can now be restated as
|
||||
// Given g and a point x, optimizing f can now be restated as
|
||||
//
|
||||
// min f(Plus(x, delta))
|
||||
// delta
|
||||
@@ -306,7 +306,6 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
|
||||
public:
|
||||
ProductParameterization(const ProductParameterization&) = delete;
|
||||
ProductParameterization& operator=(const ProductParameterization&) = delete;
|
||||
virtual ~ProductParameterization() {}
|
||||
//
|
||||
// NOTE: The constructor takes ownership of the input local
|
||||
// parameterizations.
|
||||
@@ -342,8 +341,7 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
|
||||
bool Plus(const double* x,
|
||||
const double* delta,
|
||||
double* x_plus_delta) const override;
|
||||
bool ComputeJacobian(const double* x,
|
||||
double* jacobian) const override;
|
||||
bool ComputeJacobian(const double* x, double* jacobian) const override;
|
||||
int GlobalSize() const override { return global_size_; }
|
||||
int LocalSize() const override { return local_size_; }
|
||||
|
||||
@@ -356,8 +354,8 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
|
||||
|
||||
} // namespace ceres
|
||||
|
||||
// clang-format off
|
||||
#include "ceres/internal/reenable_warnings.h"
|
||||
#include "ceres/internal/line_parameterization.h"
|
||||
|
||||
#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
|
||||
|
||||
|
@@ -192,10 +192,7 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
|
||||
}
|
||||
}
|
||||
|
||||
explicit NumericDiffCostFunction(NumericDiffCostFunction&& other)
|
||||
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
|
||||
|
||||
virtual ~NumericDiffCostFunction() {
|
||||
~NumericDiffCostFunction() {
|
||||
if (ownership_ != TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
}
|
||||
|
48
extern/ceres/include/ceres/problem.h
vendored
48
extern/ceres/include/ceres/problem.h
vendored
@@ -453,15 +453,13 @@ class CERES_EXPORT Problem {
|
||||
// problem.AddResidualBlock(new MyCostFunction, nullptr, &x);
|
||||
//
|
||||
// double cost = 0.0;
|
||||
// problem.Evaluate(Problem::EvaluateOptions(), &cost,
|
||||
// nullptr, nullptr, nullptr);
|
||||
// problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
|
||||
//
|
||||
// The cost is evaluated at x = 1. If you wish to evaluate the
|
||||
// problem at x = 2, then
|
||||
//
|
||||
// x = 2;
|
||||
// problem.Evaluate(Problem::EvaluateOptions(), &cost,
|
||||
// nullptr, nullptr, nullptr);
|
||||
// problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
|
||||
//
|
||||
// is the way to do so.
|
||||
//
|
||||
@@ -477,7 +475,7 @@ class CERES_EXPORT Problem {
|
||||
// at the end of an iteration during a solve.
|
||||
//
|
||||
// Note 4: If an EvaluationCallback is associated with the problem,
|
||||
// then its PrepareForEvaluation method will be called every time
|
||||
// then its PrepareForEvaluation method will be called everytime
|
||||
// this method is called with new_point = true.
|
||||
bool Evaluate(const EvaluateOptions& options,
|
||||
double* cost,
|
||||
@@ -511,41 +509,23 @@ class CERES_EXPORT Problem {
|
||||
// apply_loss_function as the name implies allows the user to switch
|
||||
// the application of the loss function on and off.
|
||||
//
|
||||
// If an EvaluationCallback is associated with the problem, then its
|
||||
// PrepareForEvaluation method will be called every time this method
|
||||
// is called with new_point = true. This conservatively assumes that
|
||||
// the user may have changed the parameter values since the previous
|
||||
// call to evaluate / solve. For improved efficiency, and only if
|
||||
// you know that the parameter values have not changed between
|
||||
// calls, see EvaluateResidualBlockAssumingParametersUnchanged().
|
||||
// WARNING: If an EvaluationCallback is associated with the problem
|
||||
// then it is the user's responsibility to call it before calling
|
||||
// this method.
|
||||
//
|
||||
// This is because, if the user calls this method multiple times, we
|
||||
// cannot tell if the underlying parameter blocks have changed
|
||||
// between calls or not. So if EvaluateResidualBlock was responsible
|
||||
// for calling the EvaluationCallback, it will have to do it
|
||||
// everytime it is called. Which makes the common case where the
|
||||
// parameter blocks do not change, inefficient. So we leave it to
|
||||
// the user to call the EvaluationCallback as needed.
|
||||
bool EvaluateResidualBlock(ResidualBlockId residual_block_id,
|
||||
bool apply_loss_function,
|
||||
double* cost,
|
||||
double* residuals,
|
||||
double** jacobians) const;
|
||||
|
||||
// Same as EvaluateResidualBlock except that if an
|
||||
// EvaluationCallback is associated with the problem, then its
|
||||
// PrepareForEvaluation method will be called every time this method
|
||||
// is called with new_point = false.
|
||||
//
|
||||
// This means, if an EvaluationCallback is associated with the
|
||||
// problem then it is the user's responsibility to call
|
||||
// PrepareForEvaluation before calling this method if necessary,
|
||||
// i.e. iff the parameter values have been changed since the last
|
||||
// call to evaluate / solve.'
|
||||
//
|
||||
// This is because, as the name implies, we assume that the
|
||||
// parameter blocks did not change since the last time
|
||||
// PrepareForEvaluation was called (via Solve, Evaluate or
|
||||
// EvaluateResidualBlock).
|
||||
bool EvaluateResidualBlockAssumingParametersUnchanged(
|
||||
ResidualBlockId residual_block_id,
|
||||
bool apply_loss_function,
|
||||
double* cost,
|
||||
double* residuals,
|
||||
double** jacobians) const;
|
||||
|
||||
private:
|
||||
friend class Solver;
|
||||
friend class Covariance;
|
||||
|
4
extern/ceres/include/ceres/rotation.h
vendored
4
extern/ceres/include/ceres/rotation.h
vendored
@@ -320,8 +320,8 @@ inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void RotationMatrixToQuaternion(const T* R, T* quaternion) {
|
||||
RotationMatrixToQuaternion(ColumnMajorAdapter3x3(R), quaternion);
|
||||
void RotationMatrixToQuaternion(const T* R, T* angle_axis) {
|
||||
RotationMatrixToQuaternion(ColumnMajorAdapter3x3(R), angle_axis);
|
||||
}
|
||||
|
||||
// This algorithm comes from "Quaternion Calculus and Fast Animation",
|
||||
|
5
extern/ceres/include/ceres/solver.h
vendored
5
extern/ceres/include/ceres/solver.h
vendored
@@ -360,8 +360,7 @@ class CERES_EXPORT Solver {
|
||||
//
|
||||
// If Solver::Options::preconditioner_type == SUBSET, then
|
||||
// residual_blocks_for_subset_preconditioner must be non-empty.
|
||||
std::unordered_set<ResidualBlockId>
|
||||
residual_blocks_for_subset_preconditioner;
|
||||
std::unordered_set<ResidualBlockId> residual_blocks_for_subset_preconditioner;
|
||||
|
||||
// Ceres supports using multiple dense linear algebra libraries
|
||||
// for dense matrix factorizations. Currently EIGEN and LAPACK are
|
||||
@@ -839,7 +838,7 @@ class CERES_EXPORT Solver {
|
||||
int num_linear_solves = -1;
|
||||
|
||||
// Time (in seconds) spent evaluating the residual vector.
|
||||
double residual_evaluation_time_in_seconds = -1.0;
|
||||
double residual_evaluation_time_in_seconds = 1.0;
|
||||
|
||||
// Number of residual only evaluations.
|
||||
int num_residual_evaluations = -1;
|
||||
|
53
extern/ceres/include/ceres/types.h
vendored
53
extern/ceres/include/ceres/types.h
vendored
@@ -50,7 +50,7 @@ namespace ceres {
|
||||
// delete on it upon completion.
|
||||
enum Ownership {
|
||||
DO_NOT_TAKE_OWNERSHIP,
|
||||
TAKE_OWNERSHIP,
|
||||
TAKE_OWNERSHIP
|
||||
};
|
||||
|
||||
// TODO(keir): Considerably expand the explanations of each solver type.
|
||||
@@ -185,19 +185,19 @@ enum SparseLinearAlgebraLibraryType {
|
||||
|
||||
enum DenseLinearAlgebraLibraryType {
|
||||
EIGEN,
|
||||
LAPACK,
|
||||
LAPACK
|
||||
};
|
||||
|
||||
// Logging options
|
||||
// The options get progressively noisier.
|
||||
enum LoggingType {
|
||||
SILENT,
|
||||
PER_MINIMIZER_ITERATION,
|
||||
PER_MINIMIZER_ITERATION
|
||||
};
|
||||
|
||||
enum MinimizerType {
|
||||
LINE_SEARCH,
|
||||
TRUST_REGION,
|
||||
TRUST_REGION
|
||||
};
|
||||
|
||||
enum LineSearchDirectionType {
|
||||
@@ -412,7 +412,7 @@ enum DumpFormatType {
|
||||
// specified for the number of residuals. If specified, then the
|
||||
// number of residuas for that cost function can vary at runtime.
|
||||
enum DimensionType {
|
||||
DYNAMIC = -1,
|
||||
DYNAMIC = -1
|
||||
};
|
||||
|
||||
// The differentiation method used to compute numerical derivatives in
|
||||
@@ -433,7 +433,7 @@ enum NumericDiffMethodType {
|
||||
enum LineSearchInterpolationType {
|
||||
BISECTION,
|
||||
QUADRATIC,
|
||||
CUBIC,
|
||||
CUBIC
|
||||
};
|
||||
|
||||
enum CovarianceAlgorithmType {
|
||||
@@ -448,7 +448,8 @@ enum CovarianceAlgorithmType {
|
||||
// did not write to that memory location.
|
||||
const double kImpossibleValue = 1e302;
|
||||
|
||||
CERES_EXPORT const char* LinearSolverTypeToString(LinearSolverType type);
|
||||
CERES_EXPORT const char* LinearSolverTypeToString(
|
||||
LinearSolverType type);
|
||||
CERES_EXPORT bool StringToLinearSolverType(std::string value,
|
||||
LinearSolverType* type);
|
||||
|
||||
@@ -458,23 +459,25 @@ CERES_EXPORT bool StringToPreconditionerType(std::string value,
|
||||
|
||||
CERES_EXPORT const char* VisibilityClusteringTypeToString(
|
||||
VisibilityClusteringType type);
|
||||
CERES_EXPORT bool StringToVisibilityClusteringType(
|
||||
std::string value, VisibilityClusteringType* type);
|
||||
CERES_EXPORT bool StringToVisibilityClusteringType(std::string value,
|
||||
VisibilityClusteringType* type);
|
||||
|
||||
CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString(
|
||||
SparseLinearAlgebraLibraryType type);
|
||||
CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType(
|
||||
std::string value, SparseLinearAlgebraLibraryType* type);
|
||||
std::string value,
|
||||
SparseLinearAlgebraLibraryType* type);
|
||||
|
||||
CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString(
|
||||
DenseLinearAlgebraLibraryType type);
|
||||
CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType(
|
||||
std::string value, DenseLinearAlgebraLibraryType* type);
|
||||
std::string value,
|
||||
DenseLinearAlgebraLibraryType* type);
|
||||
|
||||
CERES_EXPORT const char* TrustRegionStrategyTypeToString(
|
||||
TrustRegionStrategyType type);
|
||||
CERES_EXPORT bool StringToTrustRegionStrategyType(
|
||||
std::string value, TrustRegionStrategyType* type);
|
||||
CERES_EXPORT bool StringToTrustRegionStrategyType(std::string value,
|
||||
TrustRegionStrategyType* type);
|
||||
|
||||
CERES_EXPORT const char* DoglegTypeToString(DoglegType type);
|
||||
CERES_EXPORT bool StringToDoglegType(std::string value, DoglegType* type);
|
||||
@@ -484,39 +487,41 @@ CERES_EXPORT bool StringToMinimizerType(std::string value, MinimizerType* type);
|
||||
|
||||
CERES_EXPORT const char* LineSearchDirectionTypeToString(
|
||||
LineSearchDirectionType type);
|
||||
CERES_EXPORT bool StringToLineSearchDirectionType(
|
||||
std::string value, LineSearchDirectionType* type);
|
||||
CERES_EXPORT bool StringToLineSearchDirectionType(std::string value,
|
||||
LineSearchDirectionType* type);
|
||||
|
||||
CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type);
|
||||
CERES_EXPORT bool StringToLineSearchType(std::string value,
|
||||
LineSearchType* type);
|
||||
CERES_EXPORT bool StringToLineSearchType(std::string value, LineSearchType* type);
|
||||
|
||||
CERES_EXPORT const char* NonlinearConjugateGradientTypeToString(
|
||||
NonlinearConjugateGradientType type);
|
||||
CERES_EXPORT bool StringToNonlinearConjugateGradientType(
|
||||
std::string value, NonlinearConjugateGradientType* type);
|
||||
std::string value,
|
||||
NonlinearConjugateGradientType* type);
|
||||
|
||||
CERES_EXPORT const char* LineSearchInterpolationTypeToString(
|
||||
LineSearchInterpolationType type);
|
||||
CERES_EXPORT bool StringToLineSearchInterpolationType(
|
||||
std::string value, LineSearchInterpolationType* type);
|
||||
std::string value,
|
||||
LineSearchInterpolationType* type);
|
||||
|
||||
CERES_EXPORT const char* CovarianceAlgorithmTypeToString(
|
||||
CovarianceAlgorithmType type);
|
||||
CERES_EXPORT bool StringToCovarianceAlgorithmType(
|
||||
std::string value, CovarianceAlgorithmType* type);
|
||||
std::string value,
|
||||
CovarianceAlgorithmType* type);
|
||||
|
||||
CERES_EXPORT const char* NumericDiffMethodTypeToString(
|
||||
NumericDiffMethodType type);
|
||||
CERES_EXPORT bool StringToNumericDiffMethodType(std::string value,
|
||||
NumericDiffMethodType* type);
|
||||
CERES_EXPORT bool StringToNumericDiffMethodType(
|
||||
std::string value,
|
||||
NumericDiffMethodType* type);
|
||||
|
||||
CERES_EXPORT const char* LoggingTypeToString(LoggingType type);
|
||||
CERES_EXPORT bool StringtoLoggingType(std::string value, LoggingType* type);
|
||||
|
||||
CERES_EXPORT const char* DumpFormatTypeToString(DumpFormatType type);
|
||||
CERES_EXPORT bool StringtoDumpFormatType(std::string value,
|
||||
DumpFormatType* type);
|
||||
CERES_EXPORT bool StringtoDumpFormatType(std::string value, DumpFormatType* type);
|
||||
CERES_EXPORT bool StringtoDumpFormatType(std::string value, LoggingType* type);
|
||||
|
||||
CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
|
||||
|
7
extern/ceres/include/ceres/version.h
vendored
7
extern/ceres/include/ceres/version.h
vendored
@@ -41,9 +41,8 @@
|
||||
#define CERES_TO_STRING(x) CERES_TO_STRING_HELPER(x)
|
||||
|
||||
// The Ceres version as a string; for example "1.9.0".
|
||||
#define CERES_VERSION_STRING \
|
||||
CERES_TO_STRING(CERES_VERSION_MAJOR) \
|
||||
"." CERES_TO_STRING(CERES_VERSION_MINOR) "." CERES_TO_STRING( \
|
||||
CERES_VERSION_REVISION)
|
||||
#define CERES_VERSION_STRING CERES_TO_STRING(CERES_VERSION_MAJOR) "." \
|
||||
CERES_TO_STRING(CERES_VERSION_MINOR) "." \
|
||||
CERES_TO_STRING(CERES_VERSION_REVISION)
|
||||
|
||||
#endif // CERES_PUBLIC_VERSION_H_
|
||||
|
87
extern/ceres/internal/ceres/accelerate_sparse.cc
vendored
87
extern/ceres/internal/ceres/accelerate_sparse.cc
vendored
@@ -33,19 +33,18 @@
|
||||
|
||||
#ifndef CERES_NO_ACCELERATE_SPARSE
|
||||
|
||||
#include "ceres/accelerate_sparse.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/accelerate_sparse.h"
|
||||
#include "ceres/compressed_col_sparse_matrix_utils.h"
|
||||
#include "ceres/compressed_row_sparse_matrix.h"
|
||||
#include "ceres/triplet_sparse_matrix.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
#define CASESTR(x) \
|
||||
case x: \
|
||||
return #x
|
||||
#define CASESTR(x) case x: return #x
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -69,7 +68,7 @@ const char* SparseStatusToString(SparseStatus_t status) {
|
||||
// aligned to kAccelerateRequiredAlignment and returns a pointer to the
|
||||
// aligned start.
|
||||
void* ResizeForAccelerateAlignment(const size_t required_size,
|
||||
std::vector<uint8_t>* workspace) {
|
||||
std::vector<uint8_t> *workspace) {
|
||||
// As per the Accelerate documentation, all workspace memory passed to the
|
||||
// sparse solver functions must be 16-byte aligned.
|
||||
constexpr int kAccelerateRequiredAlignment = 16;
|
||||
@@ -81,28 +80,29 @@ void* ResizeForAccelerateAlignment(const size_t required_size,
|
||||
size_t size_from_aligned_start = workspace->size();
|
||||
void* aligned_solve_workspace_start =
|
||||
reinterpret_cast<void*>(workspace->data());
|
||||
aligned_solve_workspace_start = std::align(kAccelerateRequiredAlignment,
|
||||
required_size,
|
||||
aligned_solve_workspace_start,
|
||||
size_from_aligned_start);
|
||||
aligned_solve_workspace_start =
|
||||
std::align(kAccelerateRequiredAlignment,
|
||||
required_size,
|
||||
aligned_solve_workspace_start,
|
||||
size_from_aligned_start);
|
||||
CHECK(aligned_solve_workspace_start != nullptr)
|
||||
<< "required_size: " << required_size
|
||||
<< ", workspace size: " << workspace->size();
|
||||
return aligned_solve_workspace_start;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
void AccelerateSparse<Scalar>::Solve(NumericFactorization* numeric_factor,
|
||||
DenseVector* rhs_and_solution) {
|
||||
// From SparseSolve() documentation in Solve.h
|
||||
const int required_size = numeric_factor->solveWorkspaceRequiredStatic +
|
||||
numeric_factor->solveWorkspaceRequiredPerRHS;
|
||||
SparseSolve(*numeric_factor,
|
||||
*rhs_and_solution,
|
||||
const int required_size =
|
||||
numeric_factor->solveWorkspaceRequiredStatic +
|
||||
numeric_factor->solveWorkspaceRequiredPerRHS;
|
||||
SparseSolve(*numeric_factor, *rhs_and_solution,
|
||||
ResizeForAccelerateAlignment(required_size, &solve_workspace_));
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
typename AccelerateSparse<Scalar>::ASSparseMatrix
|
||||
AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
|
||||
CompressedRowSparseMatrix* A) {
|
||||
@@ -112,7 +112,7 @@ AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
|
||||
//
|
||||
// Accelerate's columnStarts is a long*, not an int*. These types might be
|
||||
// different (e.g. ARM on iOS) so always make a copy.
|
||||
column_starts_.resize(A->num_rows() + 1); // +1 for final column length.
|
||||
column_starts_.resize(A->num_rows() +1); // +1 for final column length.
|
||||
std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
|
||||
|
||||
ASSparseMatrix At;
|
||||
@@ -136,31 +136,29 @@ AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
|
||||
return At;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
typename AccelerateSparse<Scalar>::SymbolicFactorization
|
||||
AccelerateSparse<Scalar>::AnalyzeCholesky(ASSparseMatrix* A) {
|
||||
return SparseFactor(SparseFactorizationCholesky, A->structure);
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
typename AccelerateSparse<Scalar>::NumericFactorization
|
||||
AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
|
||||
SymbolicFactorization* symbolic_factor) {
|
||||
return SparseFactor(*symbolic_factor, *A);
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
|
||||
NumericFactorization* numeric_factor) {
|
||||
// From SparseRefactor() documentation in Solve.h
|
||||
const int required_size =
|
||||
std::is_same<Scalar, double>::value
|
||||
? numeric_factor->symbolicFactorization.workspaceSize_Double
|
||||
: numeric_factor->symbolicFactorization.workspaceSize_Float;
|
||||
return SparseRefactor(
|
||||
*A,
|
||||
numeric_factor,
|
||||
ResizeForAccelerateAlignment(required_size, &factorization_workspace_));
|
||||
const int required_size = std::is_same<Scalar, double>::value
|
||||
? numeric_factor->symbolicFactorization.workspaceSize_Double
|
||||
: numeric_factor->symbolicFactorization.workspaceSize_Float;
|
||||
return SparseRefactor(*A, numeric_factor,
|
||||
ResizeForAccelerateAlignment(required_size,
|
||||
&factorization_workspace_));
|
||||
}
|
||||
|
||||
// Instantiate only for the specific template types required/supported s/t the
|
||||
@@ -168,33 +166,34 @@ void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
|
||||
template class AccelerateSparse<double>;
|
||||
template class AccelerateSparse<float>;
|
||||
|
||||
template <typename Scalar>
|
||||
std::unique_ptr<SparseCholesky> AppleAccelerateCholesky<Scalar>::Create(
|
||||
OrderingType ordering_type) {
|
||||
template<typename Scalar>
|
||||
std::unique_ptr<SparseCholesky>
|
||||
AppleAccelerateCholesky<Scalar>::Create(OrderingType ordering_type) {
|
||||
return std::unique_ptr<SparseCholesky>(
|
||||
new AppleAccelerateCholesky<Scalar>(ordering_type));
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
AppleAccelerateCholesky<Scalar>::AppleAccelerateCholesky(
|
||||
const OrderingType ordering_type)
|
||||
: ordering_type_(ordering_type) {}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
AppleAccelerateCholesky<Scalar>::~AppleAccelerateCholesky() {
|
||||
FreeSymbolicFactorization();
|
||||
FreeNumericFactorization();
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
CompressedRowSparseMatrix::StorageType
|
||||
AppleAccelerateCholesky<Scalar>::StorageType() const {
|
||||
return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Factorize(
|
||||
CompressedRowSparseMatrix* lhs, std::string* message) {
|
||||
template<typename Scalar>
|
||||
LinearSolverTerminationType
|
||||
AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
|
||||
std::string* message) {
|
||||
CHECK_EQ(lhs->storage_type(), StorageType());
|
||||
if (lhs == NULL) {
|
||||
*message = "Failure: Input lhs is NULL.";
|
||||
@@ -235,9 +234,11 @@ LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Factorize(
|
||||
return LINEAR_SOLVER_SUCCESS;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Solve(
|
||||
const double* rhs, double* solution, std::string* message) {
|
||||
template<typename Scalar>
|
||||
LinearSolverTerminationType
|
||||
AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
|
||||
double* solution,
|
||||
std::string* message) {
|
||||
CHECK_EQ(numeric_factor_->status, SparseStatusOK)
|
||||
<< "Solve called without a call to Factorize first ("
|
||||
<< SparseStatusToString(numeric_factor_->status) << ").";
|
||||
@@ -261,7 +262,7 @@ LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Solve(
|
||||
return LINEAR_SOLVER_SUCCESS;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
|
||||
if (symbolic_factor_) {
|
||||
SparseCleanup(*symbolic_factor_);
|
||||
@@ -269,7 +270,7 @@ void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
|
||||
if (numeric_factor_) {
|
||||
SparseCleanup(*numeric_factor_);
|
||||
@@ -282,7 +283,7 @@ void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
|
||||
template class AppleAccelerateCholesky<double>;
|
||||
template class AppleAccelerateCholesky<float>;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
}
|
||||
}
|
||||
|
||||
#endif // CERES_NO_ACCELERATE_SPARSE
|
||||
|
31
extern/ceres/internal/ceres/accelerate_sparse.h
vendored
31
extern/ceres/internal/ceres/accelerate_sparse.h
vendored
@@ -40,9 +40,9 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "Accelerate.h"
|
||||
#include "ceres/linear_solver.h"
|
||||
#include "ceres/sparse_cholesky.h"
|
||||
#include "Accelerate.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -50,10 +50,11 @@ namespace internal {
|
||||
class CompressedRowSparseMatrix;
|
||||
class TripletSparseMatrix;
|
||||
|
||||
template <typename Scalar>
|
||||
struct SparseTypesTrait {};
|
||||
template<typename Scalar>
|
||||
struct SparseTypesTrait {
|
||||
};
|
||||
|
||||
template <>
|
||||
template<>
|
||||
struct SparseTypesTrait<double> {
|
||||
typedef DenseVector_Double DenseVector;
|
||||
typedef SparseMatrix_Double SparseMatrix;
|
||||
@@ -61,7 +62,7 @@ struct SparseTypesTrait<double> {
|
||||
typedef SparseOpaqueFactorization_Double NumericFactorization;
|
||||
};
|
||||
|
||||
template <>
|
||||
template<>
|
||||
struct SparseTypesTrait<float> {
|
||||
typedef DenseVector_Float DenseVector;
|
||||
typedef SparseMatrix_Float SparseMatrix;
|
||||
@@ -69,16 +70,14 @@ struct SparseTypesTrait<float> {
|
||||
typedef SparseOpaqueFactorization_Float NumericFactorization;
|
||||
};
|
||||
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
class AccelerateSparse {
|
||||
public:
|
||||
using DenseVector = typename SparseTypesTrait<Scalar>::DenseVector;
|
||||
// Use ASSparseMatrix to avoid collision with ceres::internal::SparseMatrix.
|
||||
using ASSparseMatrix = typename SparseTypesTrait<Scalar>::SparseMatrix;
|
||||
using SymbolicFactorization =
|
||||
typename SparseTypesTrait<Scalar>::SymbolicFactorization;
|
||||
using NumericFactorization =
|
||||
typename SparseTypesTrait<Scalar>::NumericFactorization;
|
||||
using SymbolicFactorization = typename SparseTypesTrait<Scalar>::SymbolicFactorization;
|
||||
using NumericFactorization = typename SparseTypesTrait<Scalar>::NumericFactorization;
|
||||
|
||||
// Solves a linear system given its symbolic (reference counted within
|
||||
// NumericFactorization) and numeric factorization.
|
||||
@@ -110,7 +109,7 @@ class AccelerateSparse {
|
||||
|
||||
// An implementation of SparseCholesky interface using Apple's Accelerate
|
||||
// framework.
|
||||
template <typename Scalar>
|
||||
template<typename Scalar>
|
||||
class AppleAccelerateCholesky : public SparseCholesky {
|
||||
public:
|
||||
// Factory
|
||||
@@ -123,7 +122,7 @@ class AppleAccelerateCholesky : public SparseCholesky {
|
||||
std::string* message) final;
|
||||
LinearSolverTerminationType Solve(const double* rhs,
|
||||
double* solution,
|
||||
std::string* message) final;
|
||||
std::string* message) final ;
|
||||
|
||||
private:
|
||||
AppleAccelerateCholesky(const OrderingType ordering_type);
|
||||
@@ -133,15 +132,15 @@ class AppleAccelerateCholesky : public SparseCholesky {
|
||||
const OrderingType ordering_type_;
|
||||
AccelerateSparse<Scalar> as_;
|
||||
std::unique_ptr<typename AccelerateSparse<Scalar>::SymbolicFactorization>
|
||||
symbolic_factor_;
|
||||
symbolic_factor_;
|
||||
std::unique_ptr<typename AccelerateSparse<Scalar>::NumericFactorization>
|
||||
numeric_factor_;
|
||||
numeric_factor_;
|
||||
// Copy of rhs/solution if Scalar != double (necessitating a copy).
|
||||
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> scalar_rhs_and_solution_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
}
|
||||
}
|
||||
|
||||
#endif // CERES_NO_ACCELERATE_SPARSE
|
||||
|
||||
|
14
extern/ceres/internal/ceres/array_utils.cc
vendored
14
extern/ceres/internal/ceres/array_utils.cc
vendored
@@ -35,7 +35,6 @@
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/stringprintf.h"
|
||||
#include "ceres/types.h"
|
||||
namespace ceres {
|
||||
@@ -46,7 +45,7 @@ using std::string;
|
||||
bool IsArrayValid(const int size, const double* x) {
|
||||
if (x != NULL) {
|
||||
for (int i = 0; i < size; ++i) {
|
||||
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
|
||||
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -60,7 +59,7 @@ int FindInvalidValue(const int size, const double* x) {
|
||||
}
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
|
||||
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@@ -93,13 +92,14 @@ void AppendArrayToString(const int size, const double* x, string* result) {
|
||||
void MapValuesToContiguousRange(const int size, int* array) {
|
||||
std::vector<int> unique_values(array, array + size);
|
||||
std::sort(unique_values.begin(), unique_values.end());
|
||||
unique_values.erase(std::unique(unique_values.begin(), unique_values.end()),
|
||||
unique_values.erase(std::unique(unique_values.begin(),
|
||||
unique_values.end()),
|
||||
unique_values.end());
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
array[i] =
|
||||
std::lower_bound(unique_values.begin(), unique_values.end(), array[i]) -
|
||||
unique_values.begin();
|
||||
array[i] = std::lower_bound(unique_values.begin(),
|
||||
unique_values.end(),
|
||||
array[i]) - unique_values.begin();
|
||||
}
|
||||
}
|
||||
|
||||
|
13
extern/ceres/internal/ceres/array_utils.h
vendored
13
extern/ceres/internal/ceres/array_utils.h
vendored
@@ -44,7 +44,6 @@
|
||||
#define CERES_INTERNAL_ARRAY_UTILS_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -52,22 +51,20 @@ namespace internal {
|
||||
|
||||
// Fill the array x with an impossible value that the user code is
|
||||
// never expected to compute.
|
||||
CERES_EXPORT_INTERNAL void InvalidateArray(int size, double* x);
|
||||
void InvalidateArray(int size, double* x);
|
||||
|
||||
// Check if all the entries of the array x are valid, i.e. all the
|
||||
// values in the array should be finite and none of them should be
|
||||
// equal to the "impossible" value used by InvalidateArray.
|
||||
CERES_EXPORT_INTERNAL bool IsArrayValid(int size, const double* x);
|
||||
bool IsArrayValid(int size, const double* x);
|
||||
|
||||
// If the array contains an invalid value, return the index for it,
|
||||
// otherwise return size.
|
||||
CERES_EXPORT_INTERNAL int FindInvalidValue(const int size, const double* x);
|
||||
int FindInvalidValue(const int size, const double* x);
|
||||
|
||||
// Utility routine to print an array of doubles to a string. If the
|
||||
// array pointer is NULL, it is treated as an array of zeros.
|
||||
CERES_EXPORT_INTERNAL void AppendArrayToString(const int size,
|
||||
const double* x,
|
||||
std::string* result);
|
||||
void AppendArrayToString(const int size, const double* x, std::string* result);
|
||||
|
||||
// This routine takes an array of integer values, sorts and uniques
|
||||
// them and then maps each value in the array to its position in the
|
||||
@@ -82,7 +79,7 @@ CERES_EXPORT_INTERNAL void AppendArrayToString(const int size,
|
||||
// gets mapped to
|
||||
//
|
||||
// [1 0 2 3 0 1 3]
|
||||
CERES_EXPORT_INTERNAL void MapValuesToContiguousRange(int size, int* array);
|
||||
void MapValuesToContiguousRange(int size, int* array);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
1
extern/ceres/internal/ceres/blas.cc
vendored
1
extern/ceres/internal/ceres/blas.cc
vendored
@@ -29,7 +29,6 @@
|
||||
// Author: sameeragarwal@google.com (Sameer Agarwal)
|
||||
|
||||
#include "ceres/blas.h"
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
|
@@ -31,7 +31,6 @@
|
||||
#include "ceres/block_evaluate_preparer.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/block_sparse_matrix.h"
|
||||
#include "ceres/casts.h"
|
||||
#include "ceres/parameter_block.h"
|
||||
@@ -54,8 +53,10 @@ void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
|
||||
double** jacobians) {
|
||||
// If the overall jacobian is not available, use the scratch space.
|
||||
if (jacobian == NULL) {
|
||||
scratch_evaluate_preparer_.Prepare(
|
||||
residual_block, residual_block_index, jacobian, jacobians);
|
||||
scratch_evaluate_preparer_.Prepare(residual_block,
|
||||
residual_block_index,
|
||||
jacobian,
|
||||
jacobians);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -30,9 +30,9 @@
|
||||
|
||||
#include "ceres/block_jacobi_preconditioner.h"
|
||||
|
||||
#include "ceres/block_random_access_diagonal_matrix.h"
|
||||
#include "ceres/block_sparse_matrix.h"
|
||||
#include "ceres/block_structure.h"
|
||||
#include "ceres/block_random_access_diagonal_matrix.h"
|
||||
#include "ceres/casts.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
|
||||
@@ -65,11 +65,13 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
|
||||
const int col_block_size = bs->cols[block_id].size;
|
||||
|
||||
int r, c, row_stride, col_stride;
|
||||
CellInfo* cell_info =
|
||||
m_->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride);
|
||||
CellInfo* cell_info = m_->GetCell(block_id, block_id,
|
||||
&r, &c,
|
||||
&row_stride, &col_stride);
|
||||
MatrixRef m(cell_info->values, row_stride, col_stride);
|
||||
ConstMatrixRef b(
|
||||
values + cells[j].position, row_block_size, col_block_size);
|
||||
ConstMatrixRef b(values + cells[j].position,
|
||||
row_block_size,
|
||||
col_block_size);
|
||||
m.block(r, c, col_block_size, col_block_size) += b.transpose() * b;
|
||||
}
|
||||
}
|
||||
@@ -80,7 +82,9 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
|
||||
for (int i = 0; i < bs->cols.size(); ++i) {
|
||||
const int block_size = bs->cols[i].size;
|
||||
int r, c, row_stride, col_stride;
|
||||
CellInfo* cell_info = m_->GetCell(i, i, &r, &c, &row_stride, &col_stride);
|
||||
CellInfo* cell_info = m_->GetCell(i, i,
|
||||
&r, &c,
|
||||
&row_stride, &col_stride);
|
||||
MatrixRef m(cell_info->values, row_stride, col_stride);
|
||||
m.block(r, c, block_size, block_size).diagonal() +=
|
||||
ConstVectorRef(D + position, block_size).array().square().matrix();
|
||||
|
@@ -32,9 +32,7 @@
|
||||
#define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "ceres/block_random_access_diagonal_matrix.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/preconditioner.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -53,8 +51,7 @@ struct CompressedRowBlockStructure;
|
||||
// update the matrix by running Update(A, D). The values of the matrix A are
|
||||
// inspected to construct the preconditioner. The vector D is applied as the
|
||||
// D^TD diagonal term.
|
||||
class CERES_EXPORT_INTERNAL BlockJacobiPreconditioner
|
||||
: public BlockSparseMatrixPreconditioner {
|
||||
class BlockJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
|
||||
public:
|
||||
// A must remain valid while the BlockJacobiPreconditioner is.
|
||||
explicit BlockJacobiPreconditioner(const BlockSparseMatrix& A);
|
||||
|
@@ -32,11 +32,11 @@
|
||||
|
||||
#include "ceres/block_evaluate_preparer.h"
|
||||
#include "ceres/block_sparse_matrix.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/parameter_block.h"
|
||||
#include "ceres/program.h"
|
||||
#include "ceres/residual_block.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
@@ -39,7 +39,6 @@
|
||||
#define CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/evaluator.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
@@ -53,7 +52,8 @@ class SparseMatrix;
|
||||
// TODO(sameeragarwal): This class needs documemtation.
|
||||
class BlockJacobianWriter {
|
||||
public:
|
||||
BlockJacobianWriter(const Evaluator::Options& options, Program* program);
|
||||
BlockJacobianWriter(const Evaluator::Options& options,
|
||||
Program* program);
|
||||
|
||||
// JacobianWriter interface.
|
||||
|
||||
|
@@ -31,7 +31,6 @@
|
||||
#include "ceres/block_random_access_dense_matrix.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
@@ -60,7 +59,8 @@ BlockRandomAccessDenseMatrix::BlockRandomAccessDenseMatrix(
|
||||
|
||||
// Assume that the user does not hold any locks on any cell blocks
|
||||
// when they are calling SetZero.
|
||||
BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {}
|
||||
BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {
|
||||
}
|
||||
|
||||
CellInfo* BlockRandomAccessDenseMatrix::GetCell(const int row_block_id,
|
||||
const int col_block_id,
|
||||
|
@@ -31,10 +31,11 @@
|
||||
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
|
||||
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
|
||||
|
||||
#include "ceres/block_random_access_matrix.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/block_random_access_matrix.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -50,8 +51,7 @@ namespace internal {
|
||||
// pair.
|
||||
//
|
||||
// ReturnCell is a nop.
|
||||
class CERES_EXPORT_INTERNAL BlockRandomAccessDenseMatrix
|
||||
: public BlockRandomAccessMatrix {
|
||||
class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
|
||||
public:
|
||||
// blocks is a vector of block sizes. The resulting matrix has
|
||||
// blocks.size() * blocks.size() cells.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user