Compare commits

..

2 Commits

Author SHA1 Message Date
2cbc328bdc Build bot release pipeline
* downloads builds from builder.blender.org
* generates checksums (still need to cleanup filenames)
* create source archive
* upload to download.blender.org

Next steps
* Next step is to place them in the correct folder
* file permissions download.blender.org
* snap package
* steam packages
* release notes

TODO:
* add more generic functions in master.cfg
* use a separate folder per build
2020-07-03 16:19:50 +02:00
314783f2e7 BuildbotLTS: Project Documentation
Added README.md containing a overview of the project.
2020-07-02 16:56:26 +02:00
2949 changed files with 43323 additions and 62446 deletions

View File

@@ -1,44 +0,0 @@
Checks: >
-*,
readability-*,
-readability-uppercase-literal-suffix,
-readability-magic-numbers,
-readability-isolate-declaration,
-readability-convert-member-functions-to-static,
-readability-implicit-bool-conversion,
-readability-avoid-const-params-in-decls,
-readability-simplify-boolean-expr,
-readability-make-member-function-const,
-readability-misleading-indentation,
-readability-else-after-return,
-readability-inconsistent-declaration-parameter-name,
-readability-redundant-preprocessor,
-readability-function-size,
-readability-function-size,
-readability-redundant-member-init,
-readability-const-return-type,
-readability-static-accessed-through-instance,
-readability-redundant-declaration,
-readability-qualified-auto,
-readability-use-anyofallof,
bugprone-*,
-bugprone-narrowing-conversions,
-bugprone-unhandled-self-assignment,
-bugprone-branch-clone,
-bugprone-macro-parentheses,
-bugprone-reserved-identifier,
-bugprone-sizeof-expression,
-bugprone-integer-division,
-bugprone-incorrect-roundings,
-bugprone-suspicious-string-compare,
-bugprone-not-null-terminated-result,
-bugprone-suspicious-missing-comma,
-bugprone-parent-virtual-call,
-bugprone-infinite-loop,
-bugprone-copy-constructor-init,
WarningsAsErrors: '*'

View File

@@ -41,7 +41,7 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
endif()
endif()
cmake_minimum_required(VERSION 3.10)
cmake_minimum_required(VERSION 3.5)
# Prever LEGACY OpenGL to eb compatible with all the existing releases and
# platforms which don't hare GLVND yet. Only do it if preference was not set
@@ -214,8 +214,6 @@ if(WITH_GHOST_X11)
option(WITH_GHOST_XDND "Enable drag'n'drop support on X11 using XDND protocol" ON)
endif()
option(WITH_GMP "Use the gmp library for more accurate booleans" OFF)
# Misc...
option(WITH_HEADLESS "Build without graphical support (renderfarm, server mode only)" OFF)
mark_as_advanced(WITH_HEADLESS)
@@ -369,7 +367,7 @@ option(WITH_CYCLES_CUDA_BINARIES "Build Cycles CUDA binaries" OFF)
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 compute_75 CACHE STRING "CUDA architectures to build binaries for")
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 CACHE STRING "CUDA architectures to build binaries for")
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
unset(PLATFORM_DEFAULT)
option(WITH_CYCLES_LOGGING "Build Cycles with logging support" ON)
@@ -417,11 +415,6 @@ mark_as_advanced(WITH_CXX_GUARDEDALLOC)
option(WITH_ASSERT_ABORT "Call abort() when raising an assertion through BLI_assert()" ON)
mark_as_advanced(WITH_ASSERT_ABORT)
if(UNIX AND NOT APPLE)
option(WITH_CLANG_TIDY "Use Clang Tidy to analyze the source code (only enable for development on Linux using Clang)" OFF)
mark_as_advanced(WITH_CLANG_TIDY)
endif()
option(WITH_BOOST "Enable features depending on boost" ON)
option(WITH_TBB "Enable features depending on TBB (OpenVDB, OpenImageDenoise, sculpt multithreading)" ON)
@@ -902,7 +895,7 @@ if(MSVC)
# endianess-detection and auto-setting is counterproductive
# so we just set endianness according CMAKE_OSX_ARCHITECTURES
elseif(CMAKE_OSX_ARCHITECTURES MATCHES i386 OR CMAKE_OSX_ARCHITECTURES MATCHES x86_64 OR CMAKE_OSX_ARCHITECTURES MATCHES arm64)
elseif(CMAKE_OSX_ARCHITECTURES MATCHES i386 OR CMAKE_OSX_ARCHITECTURES MATCHES x86_64)
add_definitions(-D__LITTLE_ENDIAN__)
elseif(CMAKE_OSX_ARCHITECTURES MATCHES ppc OR CMAKE_OSX_ARCHITECTURES MATCHES ppc64)
add_definitions(-D__BIG_ENDIAN__)
@@ -1630,6 +1623,10 @@ endif()
#-----------------------------------------------------------------------------
# Libraries
if(WITH_GTESTS)
include(GTestTesting)
endif()
if(WITH_BLENDER)
add_subdirectory(intern)
add_subdirectory(extern)

View File

@@ -30,7 +30,7 @@
# build_deps 2015 x64 / build_deps 2015 x86
#
# MAC OS X USAGE:
# Install with homebrew: brew install cmake autoconf automake libtool yasm nasm bison
# Install with homebrew: brew install cmake autoconf automake libtool yasm nasm
# Run "make deps" from main Blender directory
#
# LINUX USAGE:
@@ -76,7 +76,6 @@ include(cmake/llvm.cmake)
include(cmake/clang.cmake)
if(APPLE)
include(cmake/openmp.cmake)
include(cmake/nasm.cmake)
endif()
include(cmake/openimageio.cmake)
include(cmake/tiff.cmake)
@@ -94,11 +93,9 @@ if(UNIX)
else()
include(cmake/pugixml.cmake)
endif()
if((NOT APPLE) OR ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
include(cmake/ispc.cmake)
include(cmake/openimagedenoise.cmake)
include(cmake/embree.cmake)
endif()
include(cmake/ispc.cmake)
include(cmake/openimagedenoise.cmake)
include(cmake/embree.cmake)
if(NOT APPLE)
include(cmake/xr_openxr.cmake)
endif()
@@ -117,7 +114,6 @@ if(WIN32)
endif()
if(NOT WIN32 OR ENABLE_MINGW64)
include(cmake/gmp.cmake)
include(cmake/openjpeg.cmake)
if(NOT WIN32 OR BUILD_MODE STREQUAL Release)
if(WIN32)

View File

@@ -44,7 +44,7 @@ if(WIN32)
elseif(APPLE)
set(BOOST_CONFIGURE_COMMAND ./bootstrap.sh)
set(BOOST_BUILD_COMMAND ./b2)
set(BOOST_BUILD_OPTIONS toolset=clang-darwin cxxflags=${PLATFORM_CXXFLAGS} linkflags=${PLATFORM_LDFLAGS} visibility=global --disable-icu boost.locale.icu=off)
set(BOOST_BUILD_OPTIONS toolset=darwin cxxflags=${PLATFORM_CXXFLAGS} linkflags=${PLATFORM_LDFLAGS} visibility=global --disable-icu boost.locale.icu=off)
set(BOOST_HARVEST_CMD echo .)
set(BOOST_PATCH_COMMAND echo .)
else()

View File

@@ -30,7 +30,6 @@ if(UNIX)
nasm
yasm
tclsh
bison
)
foreach(_software ${_required_software})
@@ -41,12 +40,6 @@ if(UNIX)
unset(_software_find CACHE)
endforeach()
if(APPLE)
if(NOT EXISTS "/usr/local/opt/bison/bin/bison")
set(_software_missing "${_software_missing} bison")
endif()
endif()
if(_software_missing)
message(
"\n"
@@ -57,7 +50,7 @@ if(UNIX)
" apt install autoconf automake libtool yasm nasm tcl\n"
"\n"
"On macOS (with homebrew):\n"
" brew install cmake autoconf automake libtool yasm nasm bison\n"
" brew install cmake autoconf automake libtool yasm nasm\n"
"\n"
"Other platforms:\n"
" Install equivalent packages.\n")

View File

@@ -30,11 +30,6 @@ else()
set(CLANG_GENERATOR "Unix Makefiles")
endif()
if(APPLE)
set(CLANG_EXTRA_ARGS ${CLANG_EXTRA_ARGS}
-DLIBXML2_LIBRARY=${LIBDIR}/xml2/lib/libxml2.a
)
endif()
ExternalProject_Add(external_clang
URL ${CLANG_URI}

View File

@@ -50,8 +50,7 @@ if(APPLE)
set(FFMPEG_EXTRA_FLAGS
${FFMPEG_EXTRA_FLAGS}
--target-os=darwin
--x86asmexe=${LIBDIR}/nasm/bin/nasm
)
)
endif()
ExternalProject_Add(external_ffmpeg
@@ -144,12 +143,6 @@ if(WIN32)
external_zlib_mingw
)
endif()
if(APPLE)
add_dependencies(
external_ffmpeg
external_nasm
)
endif()
if(BUILD_MODE STREQUAL Release AND WIN32)
ExternalProject_Add_Step(external_ffmpeg after_install

View File

@@ -24,8 +24,7 @@ set(FREETYPE_EXTRA_ARGS
-DFT_WITH_HARFBUZZ=OFF
-DFT_WITH_BZIP2=OFF
-DCMAKE_DISABLE_FIND_PACKAGE_HarfBuzz=TRUE
-DCMAKE_DISABLE_FIND_PACKAGE_BZip2=TRUE
-DCMAKE_DISABLE_FIND_PACKAGE_BrotliDec=TRUE)
-DCMAKE_DISABLE_FIND_PACKAGE_BZip2=TRUE)
ExternalProject_Add(external_freetype
URL ${FREETYPE_URI}

View File

@@ -1,88 +0,0 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
set(GMP_EXTRA_ARGS -enable-cxx)
if(WIN32)
# Shared for windows because static libs will drag in a libgcc dependency.
set(GMP_OPTIONS --disable-static --enable-shared --host=x86_64-w64-mingw32 --build=x86_64-w64-mingw32)
else()
set(GMP_OPTIONS --enable-static --disable-shared )
endif()
ExternalProject_Add(external_gmp
URL ${GMP_URI}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH MD5=${GMP_HASH}
PREFIX ${BUILD_DIR}/gmp
CONFIGURE_COMMAND ${CONFIGURE_ENV_NO_PERL} && cd ${BUILD_DIR}/gmp/src/external_gmp/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/gmp ${GMP_OPTIONS} ${GMP_EXTRA_ARGS}
BUILD_COMMAND ${CONFIGURE_ENV_NO_PERL} && cd ${BUILD_DIR}/gmp/src/external_gmp/ && make -j${MAKE_THREADS}
INSTALL_COMMAND ${CONFIGURE_ENV_NO_PERL} && cd ${BUILD_DIR}/gmp/src/external_gmp/ && make install
INSTALL_DIR ${LIBDIR}/gmp
)
if(MSVC)
set_target_properties(external_gmp PROPERTIES FOLDER Mingw)
endif()
if(BUILD_MODE STREQUAL Release AND WIN32)
ExternalProject_Add_Step(external_gmp after_install
COMMAND ${CMAKE_COMMAND} -E copy ${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-3.dll.def ${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-10.def
COMMAND lib /def:${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-10.def /machine:x64 /out:${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-10.lib
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/gmp/bin/libgmp-10.dll ${HARVEST_TARGET}/gmp/lib/libgmp-10.dll
COMMAND ${CMAKE_COMMAND} -E copy ${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-10.lib ${HARVEST_TARGET}/gmp/lib/libgmp-10.lib
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/gmp/include ${HARVEST_TARGET}/gmp/include
DEPENDEES install
)
endif()
if(BUILD_MODE STREQUAL Debug AND WIN32)
ExternalProject_Add_Step(external_gmp after_install
COMMAND ${CMAKE_COMMAND} -E copy ${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-3.dll.def ${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-10.def
COMMAND lib /def:${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-10.def /machine:x64 /out:${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-10.lib
DEPENDEES install
)
endif()
if(WIN32)
# gmpxx is somewhat special, it builds on top of the C style gmp library but exposes C++ bindings
# given the C++ ABI between MSVC and mingw is not compatible, we need to build the bindings
# with MSVC, while GMP can only be build with mingw.
ExternalProject_Add(external_gmpxx
URL ${GMP_URI}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH MD5=${GMP_HASH}
PREFIX ${BUILD_DIR}/gmpxx
PATCH_COMMAND COMMAND ${CMAKE_COMMAND} -E copy ${PATCH_DIR}/cmakelists_gmpxx.txt ${BUILD_DIR}/gmpxx/src/external_gmpxx/CMakeLists.txt &&
${CMAKE_COMMAND} -E copy ${PATCH_DIR}/config_gmpxx.h ${BUILD_DIR}/gmpxx/src/external_gmpxx/config.h
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/gmpxx ${DEFAULT_CMAKE_FLAGS} -DGMP_LIBRARY=${BUILD_DIR}/gmp/src/external_gmp/.libs/libgmp-10.lib -DGMP_INCLUDE_DIR=${BUILD_DIR}/gmp/src/external_gmp -DCMAKE_DEBUG_POSTFIX=_d
INSTALL_DIR ${LIBDIR}/gmpxx
)
set_target_properties(external_gmpxx PROPERTIES FOLDER Mingw)
add_dependencies(
external_gmpxx
external_gmp
)
ExternalProject_Add_Step(external_gmpxx after_install
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/gmpxx/ ${HARVEST_TARGET}/gmp
DEPENDEES install
)
endif()

View File

@@ -42,7 +42,7 @@ if(BUILD_MODE STREQUAL Release)
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/glew/include/ ${HARVEST_TARGET}/opengl/include/ &&
# tiff
${CMAKE_COMMAND} -E copy ${LIBDIR}/tiff/lib/tiff.lib ${HARVEST_TARGET}/tiff/lib/libtiff.lib &&
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/tiff/include/ ${HARVEST_TARGET}/tiff/include/
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/tiff/include/ ${HARVEST_TARGET}/tiff/include/ &&
DEPENDS
)
endif()
@@ -89,8 +89,6 @@ harvest(freetype/include freetype/include "*.h")
harvest(freetype/lib/libfreetype2ST.a freetype/lib/libfreetype.a)
harvest(glew/include glew/include "*.h")
harvest(glew/lib glew/lib "*.a")
harvest(gmp/include gmp/include "*.h")
harvest(gmp/lib gmp/lib "*.a")
harvest(jemalloc/include jemalloc/include "*.h")
harvest(jemalloc/lib jemalloc/lib "*.a")
harvest(jpg/include jpeg/include "*.h")
@@ -134,12 +132,8 @@ harvest(openimageio/bin openimageio/bin "maketx")
harvest(openimageio/bin openimageio/bin "oiiotool")
harvest(openimageio/include openimageio/include "*")
harvest(openimageio/lib openimageio/lib "*.a")
if((NOT APPLE) OR ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
harvest(openimagedenoise/include openimagedenoise/include "*")
harvest(openimagedenoise/lib openimagedenoise/lib "*.a")
harvest(embree/include embree/include "*.h")
harvest(embree/lib embree/lib "*.a")
endif()
harvest(openimagedenoise/include openimagedenoise/include "*")
harvest(openimagedenoise/lib openimagedenoise/lib "*.a")
harvest(openjpeg/include/openjpeg-2.3 openjpeg/include "*.h")
harvest(openjpeg/lib openjpeg/lib "*.a")
harvest(opensubdiv/include opensubdiv/include "*.h")
@@ -174,6 +168,8 @@ harvest(vpx/lib ffmpeg/lib "*.a")
harvest(webp/lib ffmpeg/lib "*.a")
harvest(x264/lib ffmpeg/lib "*.a")
harvest(xvidcore/lib ffmpeg/lib "*.a")
harvest(embree/include embree/include "*.h")
harvest(embree/lib embree/lib "*.a")
harvest(usd/include usd/include "*.h")
harvest(usd/lib/usd usd/lib/usd "*")
harvest(usd/plugin usd/plugin "*")

View File

@@ -22,17 +22,6 @@ if(WIN32)
-DBISON_EXECUTABLE=${LIBDIR}/flexbison/win_bison.exe
-DM4_EXECUTABLE=${DOWNLOAD_DIR}/mingw/mingw64/msys/1.0/bin/m4.exe
)
elseif(APPLE)
# Use bison installed via Homebrew.
# The one which comes which Xcode toolset is too old.
set(ISPC_EXTRA_ARGS_APPLE
-DBISON_EXECUTABLE=/usr/local/opt/bison/bin/bison
)
elseif(UNIX)
set(ISPC_EXTRA_ARGS_UNIX
-DCMAKE_C_COMPILER=${LIBDIR}/clang/bin/clang
-DCMAKE_CXX_COMPILER=${LIBDIR}/clang/bin/clang++
)
endif()
set(ISPC_EXTRA_ARGS
@@ -47,8 +36,6 @@ set(ISPC_EXTRA_ARGS
-DCLANG_LIBRARY_DIR=${LIBDIR}/clang/lib
-DCLANG_INCLUDE_DIRS=${LIBDIR}/clang/include
${ISPC_EXTRA_ARGS_WIN}
${ISPC_EXTRA_ARGS_APPLE}
${ISPC_EXTRA_ARGS_UNIX}
)
ExternalProject_Add(external_ispc
@@ -73,3 +60,4 @@ if(WIN32)
external_flexbison
)
endif()

View File

@@ -16,17 +16,11 @@
#
# ***** END GPL LICENSE BLOCK *****
if(APPLE AND "${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(LLVM_TARGETS AArch64)
else()
set(LLVM_TARGETS X86)
endif()
set(LLVM_EXTRA_ARGS
-DLLVM_USE_CRT_RELEASE=MD
-DLLVM_USE_CRT_DEBUG=MDd
-DLLVM_INCLUDE_TESTS=OFF
-DLLVM_TARGETS_TO_BUILD=${LLVM_TARGETS}
-DLLVM_TARGETS_TO_BUILD=X86
-DLLVM_INCLUDE_EXAMPLES=OFF
-DLLVM_ENABLE_TERMINFO=OFF
-DLLVM_BUILD_LLVM_C_DYLIB=OFF

View File

@@ -1,29 +0,0 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
ExternalProject_Add(external_nasm
URL ${NASM_URI}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH SHA256=${NASM_HASH}
PREFIX ${BUILD_DIR}/nasm
PATCH_COMMAND ${PATCH_CMD} --verbose -p 1 -N -d ${BUILD_DIR}/nasm/src/external_nasm < ${PATCH_DIR}/nasm.diff
CONFIGURE_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/nasm/src/external_nasm/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/nasm
BUILD_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/nasm/src/external_nasm/ && make -j${MAKE_THREADS}
INSTALL_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/nasm/src/external_nasm/ && make install
INSTALL_DIR ${LIBDIR}/nasm
)

View File

@@ -38,7 +38,6 @@ ExternalProject_Add(external_numpy
PREFIX ${BUILD_DIR}/numpy
PATCH_COMMAND ${NUMPY_PATCH}
CONFIGURE_COMMAND ""
PATCH_COMMAND COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/numpy/src/external_numpy < ${PATCH_DIR}/numpy.diff
LOG_BUILD 1
BUILD_COMMAND ${PYTHON_BINARY} ${BUILD_DIR}/numpy/src/external_numpy/setup.py build ${NUMPY_BUILD_OPTION} install --old-and-unmanageable
INSTALL_COMMAND ""

View File

@@ -21,7 +21,6 @@ ExternalProject_Add(external_ogg
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH SHA256=${OGG_HASH}
PREFIX ${BUILD_DIR}/ogg
PATCH_COMMAND ${PATCH_CMD} --verbose -p 1 -N -d ${BUILD_DIR}/ogg/src/external_ogg < ${PATCH_DIR}/ogg.diff
CONFIGURE_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/ogg/src/external_ogg/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/ogg --disable-shared --enable-static
BUILD_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/ogg/src/external_ogg/ && make -j${MAKE_THREADS}
INSTALL_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/ogg/src/external_ogg/ && make install

View File

@@ -30,13 +30,6 @@ set(OPENCOLORIO_EXTRA_ARGS
-DOCIO_STATIC_JNIGLUE=OFF
)
if(APPLE AND NOT("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
set(OPENCOLORIO_EXTRA_ARGS
${OPENCOLORIO_EXTRA_ARGS}
-DOCIO_USE_SSE=OFF
)
endif()
if(WIN32)
set(OCIO_PATCH opencolorio_win.diff)
set(OPENCOLORIO_EXTRA_ARGS

View File

@@ -22,7 +22,6 @@ ExternalProject_Add(external_openmp
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH MD5=${OPENMP_HASH}
PREFIX ${BUILD_DIR}/openmp
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/openmp/src/external_openmp < ${PATCH_DIR}/openmp.diff
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/openmp ${DEFAULT_CMAKE_FLAGS}
INSTALL_COMMAND cd ${BUILD_DIR}/openmp/src/external_openmp-build && install_name_tool -id @executable_path/../Resources/lib/libomp.dylib runtime/src/libomp.dylib && make install
INSTALL_DIR ${LIBDIR}/openmp

View File

@@ -113,32 +113,16 @@ else()
COMMAND xcode-select --print-path
OUTPUT_VARIABLE XCODE_DEV_PATH OUTPUT_STRIP_TRAILING_WHITESPACE
)
execute_process(
COMMAND xcodebuild -version -sdk macosx SDKVersion
OUTPUT_VARIABLE MACOSX_SDK_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT CMAKE_OSX_ARCHITECTURES)
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Detected native architecture ${ARCHITECTURE}.")
set(CMAKE_OSX_ARCHITECTURES "${ARCHITECTURE}")
endif()
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64")
set(OSX_DEPLOYMENT_TARGET 10.13)
else()
set(OSX_DEPLOYMENT_TARGET 11.00)
endif()
set(OSX_ARCHITECTURES x86_64)
set(OSX_DEPLOYMENT_TARGET 10.11)
set(OSX_SYSROOT ${XCODE_DEV_PATH}/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk)
set(PLATFORM_CFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -arch ${CMAKE_OSX_ARCHITECTURES}")
set(PLATFORM_CXXFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -std=c++11 -stdlib=libc++ -arch ${CMAKE_OSX_ARCHITECTURES}")
set(PLATFORM_LDFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -arch ${CMAKE_OSX_ARCHITECTURES}")
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64")
set(PLATFORM_BUILD_TARGET --build=x86_64-apple-darwin17.0.0) # OS X 10.13
else()
set(PLATFORM_BUILD_TARGET --build=aarch64-apple-darwin20.0.0) # macOS 11.00
endif()
set(PLATFORM_CFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET}")
set(PLATFORM_CXXFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -std=c++11 -stdlib=libc++")
set(PLATFORM_LDFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET}")
set(PLATFORM_BUILD_TARGET --build=x86_64-apple-darwin15.0.0) # OS X 10.11
set(PLATFORM_CMAKE_FLAGS
-DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES}
-DCMAKE_OSX_ARCHITECTURES:STRING=${OSX_ARCHITECTURES}
-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=${OSX_DEPLOYMENT_TARGET}
-DCMAKE_OSX_SYSROOT:PATH=${OSX_SYSROOT}
)
@@ -171,7 +155,6 @@ else()
set(CONFIGURE_ENV
export MACOSX_DEPLOYMENT_TARGET=${OSX_DEPLOYMENT_TARGET} &&
export MACOSX_SDK_VERSION=${OSX_DEPLOYMENT_TARGET} &&
export CFLAGS=${PLATFORM_CFLAGS} &&
export CXXFLAGS=${PLATFORM_CXXFLAGS} &&
export LDFLAGS=${PLATFORM_LDFLAGS}

View File

@@ -22,10 +22,6 @@ set(PNG_EXTRA_ARGS
-DPNG_STATIC=ON
)
if(APPLE AND ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64"))
set(PNG_EXTRA_ARGS ${PNG_EXTRA_ARGS} -DPNG_HARDWARE_OPTIMIZATIONS=ON -DPNG_ARM_NEON=on -DCMAKE_SYSTEM_PROCESSOR="aarch64")
endif()
ExternalProject_Add(external_png
URL ${PNG_URI}
DOWNLOAD_DIR ${DOWNLOAD_DIR}

View File

@@ -48,12 +48,7 @@ if(WIN32)
else()
if(APPLE)
# Disable functions that can be in 10.13 sdk but aren't available on 10.9 target.
#
# Disable libintl (gettext library) as it might come from Homebrew, which makes
# it so test program compiles, but the Python does not. This is because for Python
# we use isysroot, which seems to forbid using libintl.h.
# The gettext functionality seems to come from CoreFoundation, so should be all fine.
# disable functions that can be in 10.13 sdk but aren't available on 10.9 target
set(PYTHON_FUNC_CONFIGS
export ac_cv_func_futimens=no &&
export ac_cv_func_utimensat=no &&
@@ -65,21 +60,13 @@ else()
export ac_cv_func_getentropy=no &&
export ac_cv_func_mkostemp=no &&
export ac_cv_func_mkostemps=no &&
export ac_cv_func_timingsafe_bcmp=no &&
export ac_cv_header_libintl_h=no &&
export ac_cv_lib_intl_textdomain=no
)
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(PYTHON_FUNC_CONFIGS ${PYTHON_FUNC_CONFIGS} && export PYTHON_DECIMAL_WITH_MACHINE=ansi64)
endif()
export ac_cv_func_timingsafe_bcmp=no)
set(PYTHON_CONFIGURE_ENV ${CONFIGURE_ENV} && ${PYTHON_FUNC_CONFIGS})
set(PYTHON_BINARY ${BUILD_DIR}/python/src/external_python/python.exe)
set(PYTHON_PATCH ${PATCH_CMD} --verbose -p1 -d ${BUILD_DIR}/python/src/external_python < ${PATCH_DIR}/python_macos.diff)
else()
set(PYTHON_CONFIGURE_ENV ${CONFIGURE_ENV})
set(PYTHON_BINARY ${BUILD_DIR}/python/src/external_python/python)
set(PYTHON_PATCH ${PATCH_CMD} --verbose -p1 -d ${BUILD_DIR}/python/src/external_python < ${PATCH_DIR}/python_linux.diff)
endif()
endif()
set(PYTHON_CONFIGURE_EXTRA_ARGS "--with-openssl=${LIBDIR}/ssl")
set(PYTHON_CFLAGS "-I${LIBDIR}/sqlite/include -I${LIBDIR}/bzip2/include -I${LIBDIR}/lzma/include -I${LIBDIR}/zlib/include")
@@ -89,6 +76,7 @@ else()
export CPPFLAGS=${PYTHON_CFLAGS} &&
export LDFLAGS=${PYTHON_LDFLAGS} &&
export PKG_CONFIG_PATH=${LIBDIR}/ffi/lib/pkgconfig)
set(PYTHON_PATCH ${PATCH_CMD} --verbose -p1 -d ${BUILD_DIR}/python/src/external_python < ${PATCH_DIR}/python_linux.diff)
ExternalProject_Add(external_python
URL ${PYTHON_URI}

View File

@@ -51,7 +51,7 @@ ExternalProject_Add(external_sqlite
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH SHA1=${SQLITE_HASH}
PREFIX ${BUILD_DIR}/sqlite
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/sqlite/src/external_sqlite < ${PATCH_DIR}/sqlite.diff
PATCH_COMMAND ${SQLITE_PATCH_CMD}
CONFIGURE_COMMAND ${SQLITE_CONFIGURE_ENV} && cd ${BUILD_DIR}/sqlite/src/external_sqlite/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/sqlite ${SQLITE_CONFIGURATION_ARGS}
BUILD_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/sqlite/src/external_sqlite/ && make -j${MAKE_THREADS}
INSTALL_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/sqlite/src/external_sqlite/ && make install

View File

@@ -20,7 +20,7 @@ set(SSL_CONFIGURE_COMMAND ./Configure)
set(SSL_PATCH_CMD echo .)
if(APPLE)
set(SSL_OS_COMPILER "blender-darwin-${CMAKE_OSX_ARCHITECTURES}")
set(SSL_OS_COMPILER "blender-darwin-x86_64")
else()
if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set(SSL_EXTRA_ARGS enable-ec_nistp_64_gcc_128)

View File

@@ -12,9 +12,4 @@ my %targets = (
inherit_from => [ "darwin64-x86_64-cc" ],
cflags => add("-fPIC"),
},
"blender-darwin-arm64" => {
inherit_from => [ "darwin-common" ],
cxxflags => add("-fPIC -arch arm64"),
cflags => add("-fPIC -arch arm64"),
},
);

View File

@@ -27,7 +27,6 @@ ExternalProject_Add(external_theora
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH SHA256=${THEORA_HASH}
PREFIX ${BUILD_DIR}/theora
PATCH_COMMAND ${PATCH_CMD} -p 0 -d ${BUILD_DIR}/theora/src/external_theora < ${PATCH_DIR}/theora.diff
CONFIGURE_COMMAND ${THEORA_CONFIGURE_ENV} && cd ${BUILD_DIR}/theora/src/external_theora/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/theora
--disable-shared
--enable-static

View File

@@ -16,12 +16,6 @@
#
# ***** END GPL LICENSE BLOCK *****
if(WITH_WEBP)
set(WITH_TIFF_WEBP ON)
else()
set(WITH_TIFF_WEBP OFF)
endif()
set(TIFF_EXTRA_ARGS
-DZLIB_LIBRARY=${LIBDIR}/zlib/lib/${ZLIB_LIBRARY}
-DZLIB_INCLUDE_DIR=${LIBDIR}/zlib/include
@@ -29,8 +23,6 @@ set(TIFF_EXTRA_ARGS
-DBUILD_SHARED_LIBS=OFF
-Dlzma=OFF
-Djbig=OFF
-Dzstd=OFF
-Dwebp=${WITH_TIFF_WEBP}
)
ExternalProject_Add(external_tiff

View File

@@ -305,17 +305,9 @@ set(MESA_VERSION 18.3.1)
set(MESA_URI ftp://ftp.freedesktop.org/pub/mesa//mesa-${MESA_VERSION}.tar.xz)
set(MESA_HASH d60828056d77bfdbae0970f9b15fb1be)
set(NASM_VERSION 2.15.02)
set(NASM_URI https://www.nasm.us/pub/nasm/releasebuilds/${NASM_VERSION}/nasm-${NASM_VERSION}.tar.xz)
set(NASM_HASH f4fd1329b1713e1ccd34b2fc121c4bcd278c9f91cc4cb205ae8fcd2e4728dd14)
set(XR_OPENXR_SDK_VERSION 1.0.8)
set(XR_OPENXR_SDK_URI https://github.com/KhronosGroup/OpenXR-SDK/archive/release-${XR_OPENXR_SDK_VERSION}.tar.gz)
set(XR_OPENXR_SDK_HASH c6de63d2e0f9029aa58dfa97cad8ce07)
set(ISPC_VERSION v1.13.0)
set(ISPC_URI https://github.com/ispc/ispc/archive/${ISPC_VERSION}.tar.gz)
set(ISPC_HASH 4bf5e8d0020c4b9980faa702c1a6f25f)
set(GMP_VERSION 6.2.0)
set(GMP_URI https://gmplib.org/download/gmp/gmp-${GMP_VERSION}.tar.xz)
set(GMP_HASH a325e3f09e6d91e62101e59f9bda3ec1)

View File

@@ -24,11 +24,7 @@ if(WIN32)
endif()
else()
if(APPLE)
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(VPX_EXTRA_FLAGS --target=generic-gnu)
else()
set(VPX_EXTRA_FLAGS --target=x86_64-darwin17-gcc)
endif()
set(VPX_EXTRA_FLAGS --target=x86_64-darwin13-gcc)
else()
set(VPX_EXTRA_FLAGS --target=generic-gnu)
endif()

View File

@@ -21,26 +21,12 @@ if(WIN32)
endif()
if(APPLE)
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(X264_EXTRA_ARGS ${X264_EXTRA_ARGS} "--disable-asm")
set(X264_CONFIGURE_ENV echo .)
else()
set(X264_CONFIGURE_ENV
export AS=${LIBDIR}/nasm/bin/nasm
)
endif()
else()
set(X264_CONFIGURE_ENV echo .)
endif()
ExternalProject_Add(external_x264
URL ${X264_URI}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH SHA256=${X264_HASH}
PREFIX ${BUILD_DIR}/x264
CONFIGURE_COMMAND ${CONFIGURE_ENV} && ${X264_CONFIGURE_ENV} && cd ${BUILD_DIR}/x264/src/external_x264/ &&
${CONFIGURE_COMMAND} --prefix=${LIBDIR}/x264
CONFIGURE_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/x264/src/external_x264/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/x264
--enable-static
--enable-pic
--disable-lavf
@@ -53,10 +39,3 @@ ExternalProject_Add(external_x264
if(MSVC)
set_target_properties(external_x264 PROPERTIES FOLDER Mingw)
endif()
if(APPLE)
add_dependencies(
external_x264
external_nasm
)
endif()

View File

@@ -1030,7 +1030,7 @@ Those libraries should be available as packages in all recent distributions (opt
* libjpeg, libpng, libtiff, [openjpeg2], [libopenal].
* libx11, libxcursor, libxi, libxrandr, libxinerama (and other libx... as needed).
* libsqlite3, libbz2, libssl, libfftw3, libxml2, libtinyxml, yasm, libyaml-cpp.
* libsdl2, libglew, [libgmp], [libglewmx].\""
* libsdl2, libglew, [libglewmx].\""
DEPS_SPECIFIC_INFO="\"BUILDABLE DEPENDENCIES:
@@ -3524,8 +3524,7 @@ install_DEB() {
libxcursor-dev libxi-dev wget libsqlite3-dev libxrandr-dev libxinerama-dev \
libbz2-dev libncurses5-dev libssl-dev liblzma-dev libreadline-dev \
libopenal-dev libglew-dev yasm $THEORA_DEV $VORBIS_DEV $OGG_DEV \
libsdl2-dev libfftw3-dev patch bzip2 libxml2-dev libtinyxml-dev libjemalloc-dev \
libgmp-dev"
libsdl2-dev libfftw3-dev patch bzip2 libxml2-dev libtinyxml-dev libjemalloc-dev"
# libglewmx-dev (broken in deb testing currently...)
VORBIS_USE=true
@@ -4176,7 +4175,7 @@ install_RPM() {
libX11-devel libXi-devel libXcursor-devel libXrandr-devel libXinerama-devel \
wget ncurses-devel readline-devel $OPENJPEG_DEV openal-soft-devel \
glew-devel yasm $THEORA_DEV $VORBIS_DEV $OGG_DEV patch \
libxml2-devel yaml-cpp-devel tinyxml-devel jemalloc-devel gmp-devel"
libxml2-devel yaml-cpp-devel tinyxml-devel jemalloc-devel"
OPENJPEG_USE=true
VORBIS_USE=true
@@ -4736,7 +4735,7 @@ install_ARCH() {
_packages="$BASE_DEVEL git cmake \
libxi libxcursor libxrandr libxinerama glew libpng libtiff wget openal \
$OPENJPEG_DEV $VORBIS_DEV $OGG_DEV $THEORA_DEV yasm sdl2 fftw \
libxml2 yaml-cpp tinyxml python-requests jemalloc gmp"
libxml2 yaml-cpp tinyxml python-requests jemalloc"
OPENJPEG_USE=true
VORBIS_USE=true

View File

@@ -91,41 +91,3 @@ diff -Naur external_blosc.orig/blosc/blosc.c external_blosc/blosc/blosc.c
/* Some useful units */
diff --git a/blosc/shuffle.c b/blosc/shuffle.c
index 84b5095..23053b4 100644
--- a/blosc/shuffle.c
+++ b/blosc/shuffle.c
@@ -490,12 +490,12 @@ void unshuffle(size_t bytesoftype, size_t blocksize,
#else /* no __SSE2__ available */
void shuffle(size_t bytesoftype, size_t blocksize,
- uint8_t* _src, uint8_t* _dest) {
+ const uint8_t* _src, uint8_t* _dest) {
_shuffle(bytesoftype, blocksize, _src, _dest);
}
void unshuffle(size_t bytesoftype, size_t blocksize,
- uint8_t* _src, uint8_t* _dest) {
+ const uint8_t* _src, uint8_t* _dest) {
_unshuffle(bytesoftype, blocksize, _src, _dest);
}
--- a/cmake/FindSSE.cmake
+++ b/cmake/FindSSE.cmake
@@ -49,6 +49,17 @@
set(AVX_FOUND false CACHE BOOL "AVX available on host")
ENDIF (AVX_TRUE)
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Darwin")
+ execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE)
+ message(STATUS "Detected architecture ${ARCHITECTURE}")
+ IF("${ARCHITECTURE}" STREQUAL "arm64")
+ set(SSE2_FOUND false CACHE BOOL "SSE2 available on host")
+ set(SSE3_FOUND false CACHE BOOL "SSE3 available on host")
+ set(SSSE3_FOUND false CACHE BOOL "SSSE3 available on host")
+ set(SSE4_1_FOUND false CACHE BOOL "SSE4.1 available on host")
+ set(AVX_FOUND false CACHE BOOL "AVX available on host")
+ return()
+ ENDIF()
+
EXEC_PROGRAM("/usr/sbin/sysctl -n machdep.cpu.features" OUTPUT_VARIABLE
CPUINFO)

View File

@@ -1,22 +0,0 @@
cmake_minimum_required(VERSION 3.1)
project(libgmpxx)
include_directories(. cxx ${GMP_INCLUDE_DIR})
add_definitions(-D__GMP_WITHIN_GMPXX)
add_library(libgmpxx SHARED
cxx/dummy.cc
cxx/isfuns.cc
cxx/ismpf.cc
cxx/ismpq.cc
cxx/ismpz.cc
cxx/ismpznw.cc
cxx/limits.cc
cxx/osdoprnti.cc
cxx/osfuns.cc
cxx/osmpf.cc
cxx/osmpq.cc
cxx/osmpz.cc
)
target_link_libraries(libgmpxx ${GMP_LIBRARY})
install(TARGETS libgmpxx DESTINATION lib)

View File

@@ -1,668 +0,0 @@
/* config.h. Generated from config.in by configure. */
/* config.in. Generated from configure.ac by autoheader. */
/*
Copyright 1996-2020 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any
later version.
or both in parallel, as here.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received copies of the GNU General Public License and the
GNU Lesser General Public License along with the GNU MP Library. If not,
see https://www.gnu.org/licenses/.
*/
/* Define if building universal (internal helper macro) */
/* #undef AC_APPLE_UNIVERSAL_BUILD */
/* The gmp-mparam.h file (a string) the tune program should suggest updating.
*/
#define GMP_MPARAM_H_SUGGEST "./mpn/x86_64/coreisbr/gmp-mparam.h"
/* Define to 1 if you have the `alarm' function. */
#define HAVE_ALARM 1
/* Define to 1 if alloca() works (via gmp-impl.h). */
#define HAVE_ALLOCA 1
/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix).
*/
/* #undef HAVE_ALLOCA_H */
/* Define to 1 if the compiler accepts gcc style __attribute__ ((const)) */
//#define HAVE_ATTRIBUTE_CONST 1
/* Define to 1 if the compiler accepts gcc style __attribute__ ((malloc)) */
//#define HAVE_ATTRIBUTE_MALLOC 1
/* Define to 1 if the compiler accepts gcc style __attribute__ ((mode (XX)))
*/
//#define HAVE_ATTRIBUTE_MODE 1
/* Define to 1 if the compiler accepts gcc style __attribute__ ((noreturn)) */
//#define HAVE_ATTRIBUTE_NORETURN 1
/* Define to 1 if you have the `attr_get' function. */
/* #undef HAVE_ATTR_GET */
/* Define to 1 if tests/libtests has calling conventions checking for the CPU
*/
/* #undef HAVE_CALLING_CONVENTIONS */
/* Define to 1 if you have the `clock' function. */
#define HAVE_CLOCK 1
/* Define to 1 if you have the `clock_gettime' function */
/* #undef HAVE_CLOCK_GETTIME */
/* Define to 1 if you have the `cputime' function. */
/* #undef HAVE_CPUTIME */
/* Define to 1 if you have the declaration of `fgetc', and to 0 if you don't.
*/
#define HAVE_DECL_FGETC 1
/* Define to 1 if you have the declaration of `fscanf', and to 0 if you don't.
*/
#define HAVE_DECL_FSCANF 1
/* Define to 1 if you have the declaration of `optarg', and to 0 if you don't.
*/
#define HAVE_DECL_OPTARG 1
/* Define to 1 if you have the declaration of `sys_errlist', and to 0 if you
don't. */
#define HAVE_DECL_SYS_ERRLIST 0
/* Define to 1 if you have the declaration of `sys_nerr', and to 0 if you
don't. */
#define HAVE_DECL_SYS_NERR 0
/* Define to 1 if you have the declaration of `ungetc', and to 0 if you don't.
*/
#define HAVE_DECL_UNGETC 1
/* Define to 1 if you have the declaration of `vfprintf', and to 0 if you
don't. */
#define HAVE_DECL_VFPRINTF 1
/* Define to 1 if you have the <dlfcn.h> header file. */
/* #undef HAVE_DLFCN_H */
/* Define one of the following to 1 for the format of a `double'.
If your format is not among these choices, or you don't know what it is,
then leave all undefined.
IEEE_LITTLE_SWAPPED means little endian, but with the two 4-byte halves
swapped, as used by ARM CPUs in little endian mode. */
/* #undef HAVE_DOUBLE_IEEE_BIG_ENDIAN */
#define HAVE_DOUBLE_IEEE_LITTLE_ENDIAN 1
/* #undef HAVE_DOUBLE_IEEE_LITTLE_SWAPPED */
/* #undef HAVE_DOUBLE_VAX_D */
/* #undef HAVE_DOUBLE_VAX_G */
/* #undef HAVE_DOUBLE_CRAY_CFP */
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the <float.h> header file. */
#define HAVE_FLOAT_H 1
/* Define to 1 if you have the `getpagesize' function. */
#define HAVE_GETPAGESIZE 1
/* Define to 1 if you have the `getrusage' function. */
/* #undef HAVE_GETRUSAGE */
/* Define to 1 if you have the `getsysinfo' function. */
/* #undef HAVE_GETSYSINFO */
/* Define to 1 if you have the `gettimeofday' function. */
#define HAVE_GETTIMEOFDAY 1
/* Define to 1 if the compiler accepts gcc style __attribute__ ((visibility))
and __attribute__ ((alias)) */
#define HAVE_HIDDEN_ALIAS 1
/* Define one of these to 1 for the host CPU family.
If your CPU is not in any of these families, leave all undefined.
For an AMD64 chip, define "x86" in ABI=32, but not in ABI=64. */
/* #undef HAVE_HOST_CPU_FAMILY_alpha */
/* #undef HAVE_HOST_CPU_FAMILY_m68k */
/* #undef HAVE_HOST_CPU_FAMILY_power */
/* #undef HAVE_HOST_CPU_FAMILY_powerpc */
/* #undef HAVE_HOST_CPU_FAMILY_x86 */
#define HAVE_HOST_CPU_FAMILY_x86_64 1
/* Define one of the following to 1 for the host CPU, as per the output of
./config.guess. If your CPU is not listed here, leave all undefined. */
/* #undef HAVE_HOST_CPU_alphaev67 */
/* #undef HAVE_HOST_CPU_alphaev68 */
/* #undef HAVE_HOST_CPU_alphaev7 */
/* #undef HAVE_HOST_CPU_m68020 */
/* #undef HAVE_HOST_CPU_m68030 */
/* #undef HAVE_HOST_CPU_m68040 */
/* #undef HAVE_HOST_CPU_m68060 */
/* #undef HAVE_HOST_CPU_m68360 */
/* #undef HAVE_HOST_CPU_powerpc604 */
/* #undef HAVE_HOST_CPU_powerpc604e */
/* #undef HAVE_HOST_CPU_powerpc750 */
/* #undef HAVE_HOST_CPU_powerpc7400 */
/* #undef HAVE_HOST_CPU_supersparc */
/* #undef HAVE_HOST_CPU_i386 */
/* #undef HAVE_HOST_CPU_i586 */
/* #undef HAVE_HOST_CPU_i686 */
/* #undef HAVE_HOST_CPU_pentium */
/* #undef HAVE_HOST_CPU_pentiummmx */
/* #undef HAVE_HOST_CPU_pentiumpro */
/* #undef HAVE_HOST_CPU_pentium2 */
/* #undef HAVE_HOST_CPU_pentium3 */
/* #undef HAVE_HOST_CPU_pentium4 */
/* #undef HAVE_HOST_CPU_core2 */
/* #undef HAVE_HOST_CPU_nehalem */
/* #undef HAVE_HOST_CPU_westmere */
/* #undef HAVE_HOST_CPU_sandybridge */
#define HAVE_HOST_CPU_ivybridge 1
/* #undef HAVE_HOST_CPU_haswell */
/* #undef HAVE_HOST_CPU_broadwell */
/* #undef HAVE_HOST_CPU_skylake */
/* #undef HAVE_HOST_CPU_silvermont */
/* #undef HAVE_HOST_CPU_goldmont */
/* #undef HAVE_HOST_CPU_k8 */
/* #undef HAVE_HOST_CPU_k10 */
/* #undef HAVE_HOST_CPU_bulldozer */
/* #undef HAVE_HOST_CPU_piledriver */
/* #undef HAVE_HOST_CPU_steamroller */
/* #undef HAVE_HOST_CPU_excavator */
/* #undef HAVE_HOST_CPU_zen */
/* #undef HAVE_HOST_CPU_bobcat */
/* #undef HAVE_HOST_CPU_jaguar */
/* #undef HAVE_HOST_CPU_s390_z900 */
/* #undef HAVE_HOST_CPU_s390_z990 */
/* #undef HAVE_HOST_CPU_s390_z9 */
/* #undef HAVE_HOST_CPU_s390_z10 */
/* #undef HAVE_HOST_CPU_s390_z196 */
/* Define to 1 iff we have a s390 with 64-bit registers. */
/* #undef HAVE_HOST_CPU_s390_zarch */
/* Define to 1 if the system has the type `intmax_t'. */
#define HAVE_INTMAX_T 1
/* Define to 1 if the system has the type `intptr_t'. */
#define HAVE_INTPTR_T 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the <invent.h> header file. */
/* #undef HAVE_INVENT_H */
/* Define to 1 if you have the <langinfo.h> header file. */
/* #undef HAVE_LANGINFO_H */
/* Define one of these to 1 for the endianness of `mp_limb_t'.
If the endianness is not a simple big or little, or you don't know what
it is, then leave both undefined. */
/* #undef HAVE_LIMB_BIG_ENDIAN */
#define HAVE_LIMB_LITTLE_ENDIAN 1
/* Define to 1 if you have the `localeconv' function. */
#define HAVE_LOCALECONV 1
/* Define to 1 if you have the <locale.h> header file. */
#define HAVE_LOCALE_H 1
/* Define to 1 if the system has the type `long double'. */
#define HAVE_LONG_DOUBLE 1
/* Define to 1 if the system has the type `long long'. */
#define HAVE_LONG_LONG 1
/* Define to 1 if you have the <machine/hal_sysinfo.h> header file. */
/* #undef HAVE_MACHINE_HAL_SYSINFO_H */
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the `memset' function. */
#define HAVE_MEMSET 1
/* Define to 1 if you have the `mmap' function. */
/* #undef HAVE_MMAP */
/* Define to 1 if you have the `mprotect' function. */
#define HAVE_MPROTECT 1
/* Define to 1 each of the following for which a native (ie. CPU specific)
implementation of the corresponding routine exists. */
#define HAVE_NATIVE_mpn_add_n 1
/* #undef HAVE_NATIVE_mpn_add_n_sub_n */
#define HAVE_NATIVE_mpn_add_nc 1
/* #undef HAVE_NATIVE_mpn_addaddmul_1msb0 */
#define HAVE_NATIVE_mpn_addlsh1_n 1
#define HAVE_NATIVE_mpn_addlsh2_n 1
#define HAVE_NATIVE_mpn_addlsh_n 1
#define HAVE_NATIVE_mpn_addlsh1_nc 1
#define HAVE_NATIVE_mpn_addlsh2_nc 1
#define HAVE_NATIVE_mpn_addlsh_nc 1
/* #undef HAVE_NATIVE_mpn_addlsh1_n_ip1 */
/* #undef HAVE_NATIVE_mpn_addlsh2_n_ip1 */
/* #undef HAVE_NATIVE_mpn_addlsh_n_ip1 */
/* #undef HAVE_NATIVE_mpn_addlsh1_nc_ip1 */
/* #undef HAVE_NATIVE_mpn_addlsh2_nc_ip1 */
/* #undef HAVE_NATIVE_mpn_addlsh_nc_ip1 */
/* #undef HAVE_NATIVE_mpn_addlsh1_n_ip2 */
/* #undef HAVE_NATIVE_mpn_addlsh2_n_ip2 */
/* #undef HAVE_NATIVE_mpn_addlsh_n_ip2 */
/* #undef HAVE_NATIVE_mpn_addlsh1_nc_ip2 */
/* #undef HAVE_NATIVE_mpn_addlsh2_nc_ip2 */
/* #undef HAVE_NATIVE_mpn_addlsh_nc_ip2 */
/* #undef HAVE_NATIVE_mpn_addmul_1c */
#define HAVE_NATIVE_mpn_addmul_2 1
/* #undef HAVE_NATIVE_mpn_addmul_3 */
/* #undef HAVE_NATIVE_mpn_addmul_4 */
/* #undef HAVE_NATIVE_mpn_addmul_5 */
/* #undef HAVE_NATIVE_mpn_addmul_6 */
/* #undef HAVE_NATIVE_mpn_addmul_7 */
/* #undef HAVE_NATIVE_mpn_addmul_8 */
/* #undef HAVE_NATIVE_mpn_addmul_2s */
#define HAVE_NATIVE_mpn_and_n 1
#define HAVE_NATIVE_mpn_andn_n 1
#define HAVE_NATIVE_mpn_bdiv_dbm1c 1
#define HAVE_NATIVE_mpn_bdiv_q_1 1
#define HAVE_NATIVE_mpn_pi1_bdiv_q_1 1
#define HAVE_NATIVE_mpn_cnd_add_n 1
#define HAVE_NATIVE_mpn_cnd_sub_n 1
#define HAVE_NATIVE_mpn_com 1
#define HAVE_NATIVE_mpn_copyd 1
#define HAVE_NATIVE_mpn_copyi 1
/* #undef HAVE_NATIVE_mpn_div_qr_1n_pi1 */
/* #undef HAVE_NATIVE_mpn_div_qr_2 */
#define HAVE_NATIVE_mpn_divexact_1 1
/* #undef HAVE_NATIVE_mpn_divexact_by3c */
#define HAVE_NATIVE_mpn_divrem_1 1
/* #undef HAVE_NATIVE_mpn_divrem_1c */
#define HAVE_NATIVE_mpn_divrem_2 1
/* #undef HAVE_NATIVE_mpn_gcd_1 */
#define HAVE_NATIVE_mpn_gcd_11 1
/* #undef HAVE_NATIVE_mpn_gcd_22 */
#define HAVE_NATIVE_mpn_hamdist 1
#define HAVE_NATIVE_mpn_invert_limb 1
#define HAVE_NATIVE_mpn_ior_n 1
#define HAVE_NATIVE_mpn_iorn_n 1
#define HAVE_NATIVE_mpn_lshift 1
#define HAVE_NATIVE_mpn_lshiftc 1
/* #undef HAVE_NATIVE_mpn_lshsub_n */
/* #undef HAVE_NATIVE_mpn_mod_1 */
#define HAVE_NATIVE_mpn_mod_1_1p 1
/* #undef HAVE_NATIVE_mpn_mod_1c */
#define HAVE_NATIVE_mpn_mod_1s_2p 1
#define HAVE_NATIVE_mpn_mod_1s_4p 1
#define HAVE_NATIVE_mpn_mod_34lsub1 1
#define HAVE_NATIVE_mpn_modexact_1_odd 1
#define HAVE_NATIVE_mpn_modexact_1c_odd 1
#define HAVE_NATIVE_mpn_mul_1 1
#define HAVE_NATIVE_mpn_mul_1c 1
#define HAVE_NATIVE_mpn_mul_2 1
/* #undef HAVE_NATIVE_mpn_mul_3 */
/* #undef HAVE_NATIVE_mpn_mul_4 */
/* #undef HAVE_NATIVE_mpn_mul_5 */
/* #undef HAVE_NATIVE_mpn_mul_6 */
#define HAVE_NATIVE_mpn_mul_basecase 1
#define HAVE_NATIVE_mpn_mullo_basecase 1
#define HAVE_NATIVE_mpn_nand_n 1
#define HAVE_NATIVE_mpn_nior_n 1
#define HAVE_NATIVE_mpn_popcount 1
#define HAVE_NATIVE_mpn_preinv_divrem_1 1
/* #undef HAVE_NATIVE_mpn_preinv_mod_1 */
#define HAVE_NATIVE_mpn_redc_1 1
/* #undef HAVE_NATIVE_mpn_redc_2 */
#define HAVE_NATIVE_mpn_rsblsh1_n 1
#define HAVE_NATIVE_mpn_rsblsh2_n 1
#define HAVE_NATIVE_mpn_rsblsh_n 1
#define HAVE_NATIVE_mpn_rsblsh1_nc 1
/* #undef HAVE_NATIVE_mpn_rsblsh2_nc */
/* #undef HAVE_NATIVE_mpn_rsblsh_nc */
#define HAVE_NATIVE_mpn_rsh1add_n 1
#define HAVE_NATIVE_mpn_rsh1add_nc 1
#define HAVE_NATIVE_mpn_rsh1sub_n 1
#define HAVE_NATIVE_mpn_rsh1sub_nc 1
#define HAVE_NATIVE_mpn_rshift 1
/* #undef HAVE_NATIVE_mpn_sbpi1_bdiv_r */
#define HAVE_NATIVE_mpn_sqr_basecase 1
/* #undef HAVE_NATIVE_mpn_sqr_diagonal */
#define HAVE_NATIVE_mpn_sqr_diag_addlsh1 1
#define HAVE_NATIVE_mpn_sub_n 1
#define HAVE_NATIVE_mpn_sub_nc 1
#define HAVE_NATIVE_mpn_sublsh1_n 1
#define HAVE_NATIVE_mpn_sublsh2_n 1
/* #undef HAVE_NATIVE_mpn_sublsh_n */
/* #undef HAVE_NATIVE_mpn_sublsh1_nc */
/* #undef HAVE_NATIVE_mpn_sublsh2_nc */
/* #undef HAVE_NATIVE_mpn_sublsh_nc */
/* #undef HAVE_NATIVE_mpn_sublsh1_n_ip1 */
/* #undef HAVE_NATIVE_mpn_sublsh2_n_ip1 */
/* #undef HAVE_NATIVE_mpn_sublsh_n_ip1 */
/* #undef HAVE_NATIVE_mpn_sublsh1_nc_ip1 */
/* #undef HAVE_NATIVE_mpn_sublsh2_nc_ip1 */
/* #undef HAVE_NATIVE_mpn_sublsh_nc_ip1 */
/* #undef HAVE_NATIVE_mpn_submul_1c */
/* #undef HAVE_NATIVE_mpn_tabselect */
/* #undef HAVE_NATIVE_mpn_udiv_qrnnd */
/* #undef HAVE_NATIVE_mpn_udiv_qrnnd_r */
/* #undef HAVE_NATIVE_mpn_umul_ppmm */
/* #undef HAVE_NATIVE_mpn_umul_ppmm_r */
#define HAVE_NATIVE_mpn_xor_n 1
#define HAVE_NATIVE_mpn_xnor_n 1
/* Define to 1 if you have the `nl_langinfo' function. */
/* #undef HAVE_NL_LANGINFO */
/* Define to 1 if you have the <nl_types.h> header file. */
/* #undef HAVE_NL_TYPES_H */
/* Define to 1 if you have the `obstack_vprintf' function. */
/* #undef HAVE_OBSTACK_VPRINTF */
/* Define to 1 if you have the `popen' function. */
#define HAVE_POPEN 1
/* Define to 1 if you have the `processor_info' function. */
/* #undef HAVE_PROCESSOR_INFO */
/* Define to 1 if <sys/pstat.h> `struct pst_processor' exists and contains
`psp_iticksperclktick'. */
/* #undef HAVE_PSP_ITICKSPERCLKTICK */
/* Define to 1 if you have the `pstat_getprocessor' function. */
/* #undef HAVE_PSTAT_GETPROCESSOR */
/* Define to 1 if the system has the type `ptrdiff_t'. */
#define HAVE_PTRDIFF_T 1
/* Define to 1 if the system has the type `quad_t'. */
/* #undef HAVE_QUAD_T */
/* Define to 1 if you have the `raise' function. */
#define HAVE_RAISE 1
/* Define to 1 if you have the `read_real_time' function. */
/* #undef HAVE_READ_REAL_TIME */
/* Define to 1 if you have the `sigaction' function. */
/* #undef HAVE_SIGACTION */
/* Define to 1 if you have the `sigaltstack' function. */
/* #undef HAVE_SIGALTSTACK */
/* Define to 1 if you have the `sigstack' function. */
/* #undef HAVE_SIGSTACK */
/* Tune directory speed_cyclecounter, undef=none, 1=32bits, 2=64bits) */
#define HAVE_SPEED_CYCLECOUNTER 2
/* Define to 1 if you have the <sstream> header file. */
#define HAVE_SSTREAM 1
/* Define to 1 if the system has the type `stack_t'. */
/* #undef HAVE_STACK_T */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if the system has the type `std::locale'. */
#define HAVE_STD__LOCALE 1
/* Define to 1 if you have the `strchr' function. */
#define HAVE_STRCHR 1
/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strnlen' function. */
#define HAVE_STRNLEN 1
/* Define to 1 if you have the `strtol' function. */
#define HAVE_STRTOL 1
/* Define to 1 if you have the `strtoul' function. */
#define HAVE_STRTOUL 1
/* Define to 1 if you have the `sysconf' function. */
/* #undef HAVE_SYSCONF */
/* Define to 1 if you have the `sysctl' function. */
/* #undef HAVE_SYSCTL */
/* Define to 1 if you have the `sysctlbyname' function. */
/* #undef HAVE_SYSCTLBYNAME */
/* Define to 1 if you have the `syssgi' function. */
/* #undef HAVE_SYSSGI */
/* Define to 1 if you have the <sys/attributes.h> header file. */
/* #undef HAVE_SYS_ATTRIBUTES_H */
/* Define to 1 if you have the <sys/iograph.h> header file. */
/* #undef HAVE_SYS_IOGRAPH_H */
/* Define to 1 if you have the <sys/mman.h> header file. */
/* #undef HAVE_SYS_MMAN_H */
/* Define to 1 if you have the <sys/param.h> header file. */
#define HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/processor.h> header file. */
/* #undef HAVE_SYS_PROCESSOR_H */
/* Define to 1 if you have the <sys/pstat.h> header file. */
/* #undef HAVE_SYS_PSTAT_H */
/* Define to 1 if you have the <sys/resource.h> header file. */
/* #undef HAVE_SYS_RESOURCE_H */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/sysctl.h> header file. */
/* #undef HAVE_SYS_SYSCTL_H */
/* Define to 1 if you have the <sys/sysinfo.h> header file. */
/* #undef HAVE_SYS_SYSINFO_H */
/* Define to 1 if you have the <sys/syssgi.h> header file. */
/* #undef HAVE_SYS_SYSSGI_H */
/* Define to 1 if you have the <sys/systemcfg.h> header file. */
/* #undef HAVE_SYS_SYSTEMCFG_H */
/* Define to 1 if you have the <sys/times.h> header file. */
/* #undef HAVE_SYS_TIMES_H */
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the `times' function. */
/* #undef HAVE_TIMES */
/* Define to 1 if the system has the type `uint_least32_t'. */
#define HAVE_UINT_LEAST32_T 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the `vsnprintf' function and it works properly. */
/* #undef HAVE_VSNPRINTF */
/* Define to 1 for Windos/64 */
#define HOST_DOS64 1
/* Assembler local label prefix */
#define LSYM_PREFIX "L"
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* Define to 1 to disable the use of inline assembly */
/* #undef NO_ASM */
/* Name of package */
#define PACKAGE "gmp"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "gmp-bugs@gmplib.org, see https://gmplib.org/manual/Reporting-Bugs.html"
/* Define to the full name of this package. */
#define PACKAGE_NAME "GNU MP"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "GNU MP 6.2.0"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "gmp"
/* Define to the home page for this package. */
#define PACKAGE_URL "http://www.gnu.org/software/gmp/"
/* Define to the version of this package. */
#define PACKAGE_VERSION "6.2.0"
/* Define as the return type of signal handlers (`int' or `void'). */
#define RETSIGTYPE void
/* The size of `mp_limb_t', as computed by sizeof. */
#define SIZEOF_MP_LIMB_T 8
/* The size of `unsigned', as computed by sizeof. */
#define SIZEOF_UNSIGNED 4
/* The size of `unsigned long', as computed by sizeof. */
#define SIZEOF_UNSIGNED_LONG 4
/* The size of `unsigned short', as computed by sizeof. */
#define SIZEOF_UNSIGNED_SHORT 2
/* The size of `void *', as computed by sizeof. */
#define SIZEOF_VOID_P 8
/* Define to 1 if sscanf requires writable inputs */
/* #undef SSCANF_WRITABLE_INPUT */
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define TIME_WITH_SYS_TIME 1
/* Maximum size the tune program can test for SQR_TOOM2_THRESHOLD */
/* #undef TUNE_SQR_TOOM2_MAX */
/* Version number of package */
#define VERSION "6.2.0"
/* Define to 1 to enable ASSERT checking, per --enable-assert */
/* #undef WANT_ASSERT */
/* Define to 1 to enable GMP_CPU_TYPE faking cpuid, per --enable-fake-cpuid */
/* #undef WANT_FAKE_CPUID */
/* Define to 1 when building a fat binary. */
/* #undef WANT_FAT_BINARY */
/* Define to 1 to enable FFTs for multiplication, per --enable-fft */
#define WANT_FFT 1
/* Define to 1 to enable old mpn_mul_fft_full for multiplication, per
--enable-old-fft-full */
/* #undef WANT_OLD_FFT_FULL */
/* Define to 1 if --enable-profiling=gprof */
/* #undef WANT_PROFILING_GPROF */
/* Define to 1 if --enable-profiling=instrument */
/* #undef WANT_PROFILING_INSTRUMENT */
/* Define to 1 if --enable-profiling=prof */
/* #undef WANT_PROFILING_PROF */
/* Define one of these to 1 for the desired temporary memory allocation
method, per --enable-alloca. */
#define WANT_TMP_ALLOCA 1
/* #undef WANT_TMP_REENTRANT */
/* #undef WANT_TMP_NOTREENTRANT */
/* #undef WANT_TMP_DEBUG */
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
# if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
# endif
#else
# ifndef WORDS_BIGENDIAN
/* # undef WORDS_BIGENDIAN */
# endif
#endif
/* Define to 1 if the assembler understands the mulx instruction */
/* #undef X86_ASM_MULX */
/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a
`char[]'. */
/* #undef YYTEXT_POINTER */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef __cplusplus
/* #undef inline */
#endif
/* Define to the equivalent of the C99 'restrict' keyword, or to
nothing if this is not supported. Do not define if restrict is
supported directly. */
#define restrict __restrict
/* Work around a bug in Sun C++: it does not support _Restrict or
__restrict__, even though the corresponding Sun C compiler ends up with
"#define restrict _Restrict" or "#define restrict __restrict__" in the
previous line. Perhaps some future version of Sun C++ will work with
restrict; if so, hopefully it defines __RESTRICT like Sun C does. */
#if defined __SUNPRO_CC && !defined __RESTRICT
# define _Restrict
# define __restrict__
#endif
/* Define to empty if the keyword `volatile' does not work. Warning: valid
code using `volatile' can become incorrect without. Disable with care. */
/* #undef volatile */

View File

@@ -9,62 +9,3 @@
enabled libopenmpt && require_pkg_config libopenmpt "libopenmpt >= 0.2.6557" libopenmpt/libopenmpt.h openmpt_module_create -lstdc++ && append libopenmpt_extralibs "-lstdc++"
enabled libopus && {
enabled libopus_decoder && {
--- a/libavcodec/cfhddata.c
+++ b/libavcodec/cfhddata.c
@@ -276,10 +276,10 @@
av_cold int ff_cfhd_init_vlcs(CFHDContext *s)
{
int i, j, ret = 0;
- uint32_t new_cfhd_vlc_bits[NB_VLC_TABLE_18 * 2];
- uint8_t new_cfhd_vlc_len[NB_VLC_TABLE_18 * 2];
- uint16_t new_cfhd_vlc_run[NB_VLC_TABLE_18 * 2];
- int16_t new_cfhd_vlc_level[NB_VLC_TABLE_18 * 2];
+ uint32_t *new_cfhd_vlc_bits = av_calloc(sizeof(uint32_t), NB_VLC_TABLE_18 * 2);
+ uint8_t *new_cfhd_vlc_len = av_calloc(sizeof(uint8_t), NB_VLC_TABLE_18 * 2);
+ uint16_t *new_cfhd_vlc_run = av_calloc(sizeof(uint16_t), NB_VLC_TABLE_18 * 2);
+ int16_t *new_cfhd_vlc_level = av_calloc(sizeof(int16_t), NB_VLC_TABLE_18 * 2);
/** Similar to dv.c, generate signed VLC tables **/
@@ -305,8 +305,13 @@
ret = init_vlc(&s->vlc_9, VLC_BITS, j, new_cfhd_vlc_len,
1, 1, new_cfhd_vlc_bits, 4, 4, 0);
- if (ret < 0)
+ if (ret < 0) {
+ av_free(new_cfhd_vlc_bits);
+ av_free(new_cfhd_vlc_len);
+ av_free(new_cfhd_vlc_run);
+ av_free(new_cfhd_vlc_level);
return ret;
+ }
for (i = 0; i < s->vlc_9.table_size; i++) {
int code = s->vlc_9.table[i][0];
int len = s->vlc_9.table[i][1];
@@ -346,8 +351,14 @@
ret = init_vlc(&s->vlc_18, VLC_BITS, j, new_cfhd_vlc_len,
1, 1, new_cfhd_vlc_bits, 4, 4, 0);
- if (ret < 0)
+ if (ret < 0) {
+ av_free(new_cfhd_vlc_bits);
+ av_free(new_cfhd_vlc_len);
+ av_free(new_cfhd_vlc_run);
+ av_free(new_cfhd_vlc_level);
return ret;
+ }
+
av_assert0(s->vlc_18.table_size == 4572);
for (i = 0; i < s->vlc_18.table_size; i++) {
@@ -367,5 +378,10 @@
s->table_18_rl_vlc[i].run = run;
}
+ av_free(new_cfhd_vlc_bits);
+ av_free(new_cfhd_vlc_len);
+ av_free(new_cfhd_vlc_run);
+ av_free(new_cfhd_vlc_level);
+
return ret;
}

View File

@@ -34,52 +34,3 @@ diff -Naur orig/cmake/GenerateBuiltins.cmake.txt external_ispc/cmake/GenerateBui
elseif ("${bit}" STREQUAL "64" AND ${arch} STREQUAL "x86")
set(target_arch "x86_64")
elseif ("${bit}" STREQUAL "32" AND ${arch} STREQUAL "arm")
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 46a8db8..f53beef 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -36,8 +36,12 @@
cmake_minimum_required(VERSION 3.13)
if (UNIX)
- set(CMAKE_C_COMPILER "clang")
- set(CMAKE_CXX_COMPILER "clang++")
+ if (NOT CMAKE_C_COMPILER)
+ set(CMAKE_C_COMPILER "clang")
+ endif()
+ if (NOT CMAKE_CXX_COMPILER)
+ set(CMAKE_CXX_COMPILER "clang++")
+ endif()
endif()
set(PROJECT_NAME ispc)
@@ -412,6 +416,29 @@ else()
endif()
endif()
+# Link against libstdc++.a which must be provided to the linker after
+# LLVM and CLang libraries.
+# This is needed because some of LLVM/CLang dependencies are using
+# std::make_shared, which is defined in one of those:
+# - libclang-cpp.so
+# - libstdc++.a
+# Using the former one is tricky because then generated binary depends
+# on a library which is outside of the LD_LIBRARY_PATH.
+#
+# Hence, using C++ implementation from G++ which seems to work just fine.
+# In fact, from investigation seems that libclang-cpp.so itself is pulling
+# std::_Sp_make_shared_tag from G++'s libstdc++.a.
+if(UNIX AND NOT APPLE)
+ execute_process(
+ COMMAND g++ --print-file-name libstdc++.a
+ OUTPUT_VARIABLE GCC_LIBSTDCXX_A
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+ if(GCC_LIBSTDCXX_A AND EXISTS ${GCC_LIBSTDCXX_A})
+ target_link_libraries(${PROJECT_NAME} ${GCC_LIBSTDCXX_A})
+ endif()
+endif()
+
# Build target for utility checking host ISA
if (ISPC_INCLUDE_UTILS)
add_executable(check_isa "")

View File

@@ -1,129 +0,0 @@
diff --git a/output/macho.h b/output/macho.h
index 538c531e..fd5e8849 100644
--- a/output/macho.h
+++ b/output/macho.h
@@ -60,6 +60,8 @@
#define LC_SEGMENT 0x1
#define LC_SEGMENT_64 0x19
#define LC_SYMTAB 0x2
+#define LC_VERSION_MIN_MACOSX 0x24
+#define LC_BUILD_VERSION 0x32
/* Symbol type bits */
#define N_STAB 0xe0
diff --git a/output/outmacho.c b/output/outmacho.c
index 08147883..de6ec902 100644
--- a/output/outmacho.c
+++ b/output/outmacho.c
@@ -38,6 +38,8 @@
#include "compiler.h"
+#include <stdlib.h>
+
#include "nctype.h"
#include "nasm.h"
@@ -64,6 +66,8 @@
#define MACHO_SYMCMD_SIZE 24
#define MACHO_NLIST_SIZE 12
#define MACHO_RELINFO_SIZE 8
+#define MACHO_BUILD_VERSION_SIZE 24
+#define MACHO_VERSION_MIN_MACOSX_SIZE 16
#define MACHO_HEADER64_SIZE 32
#define MACHO_SEGCMD64_SIZE 72
@@ -1224,6 +1228,46 @@ static void macho_layout_symbols (uint32_t *numsyms,
}
}
+static bool get_full_version_from_env (const char *variable_name,
+ int *r_major,
+ int *r_minor,
+ int *r_patch) {
+ *r_major = 0;
+ *r_minor = 0;
+ *r_patch = 0;
+
+ const char *value = getenv(variable_name);
+ if (value == NULL || value[0] == '\0') {
+ return false;
+ }
+
+ const char *current_value = value;
+ const char *end_value = value + strlen(value);
+
+ char *endptr;
+
+ *r_major = strtol(current_value, &endptr, 10);
+ if (endptr >= end_value) {
+ return true;
+ }
+ current_value = endptr + 1;
+
+ *r_minor = strtol(current_value, &endptr, 10);
+ if (endptr >= end_value) {
+ return true;
+ }
+ current_value = endptr + 1;
+
+ *r_patch = strtol(current_value, &endptr, 10);
+
+ return true;
+}
+
+static bool need_version_min_macosx_command (void) {
+ return getenv("MACOSX_DEPLOYMENT_TARGET") &&
+ getenv("MACOSX_SDK_VERSION");
+}
+
/* Calculate some values we'll need for writing later. */
static void macho_calculate_sizes (void)
@@ -1270,6 +1314,12 @@ static void macho_calculate_sizes (void)
head_sizeofcmds += fmt.segcmd_size + seg_nsects * fmt.sectcmd_size;
}
+ /* LC_VERSION_MIN_MACOSX */
+ if (need_version_min_macosx_command()) {
+ ++head_ncmds;
+ head_sizeofcmds += MACHO_VERSION_MIN_MACOSX_SIZE;
+ }
+
if (nsyms > 0) {
++head_ncmds;
head_sizeofcmds += MACHO_SYMCMD_SIZE;
@@ -1653,6 +1703,33 @@ static void macho_write (void)
else
nasm_warn(WARN_OTHER, "no sections?");
+#define ENCODE_BUILD_VERSION(major, minor, patch) \
+ (((major) << 16) | ((minor) << 8) | (patch))
+
+ if (0) {
+ fwriteint32_t(LC_BUILD_VERSION, ofile); /* cmd == LC_BUILD_VERSION */
+ fwriteint32_t(MACHO_BUILD_VERSION_SIZE, ofile); /* size of load command */
+ fwriteint32_t(1, ofile); /* platform */
+ fwriteint32_t(ENCODE_BUILD_VERSION(10, 13, 0), ofile); /* minos, X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ fwriteint32_t(ENCODE_BUILD_VERSION(10, 15, 4), ofile); /* sdk, X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ fwriteint32_t(0, ofile); /* number of tool entries following this */
+ }
+
+ if (need_version_min_macosx_command()) {
+ int sdk_major, sdk_minor, sdk_patch;
+ get_full_version_from_env("MACOSX_SDK_VERSION", &sdk_major, &sdk_minor, &sdk_patch);
+
+ int version_major, version_minor, version_patch;
+ get_full_version_from_env("MACOSX_DEPLOYMENT_TARGET", &version_major, &version_minor, &version_patch);
+
+ fwriteint32_t(LC_VERSION_MIN_MACOSX, ofile); /* cmd == LC_VERSION_MIN_MACOSX */
+ fwriteint32_t(MACHO_VERSION_MIN_MACOSX_SIZE, ofile); /* size of load command */
+ fwriteint32_t(ENCODE_BUILD_VERSION(version_major, version_minor, version_patch), ofile); /* minos, X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ fwriteint32_t(ENCODE_BUILD_VERSION(sdk_major, sdk_minor, sdk_patch), ofile); /* sdk, X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ }
+
+#undef ENCODE_BUILD_VERSION
+
if (nsyms > 0) {
/* write out symbol command */
fwriteint32_t(LC_SYMTAB, ofile); /* cmd == LC_SYMTAB */

View File

@@ -1,27 +0,0 @@
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index ba2b1f4..b10f7df 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -2164,8 +2164,8 @@ class accelerate_info(system_info):
'accelerate' in libraries):
if intel:
args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
+# else:
+# args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
@@ -2174,8 +2174,8 @@ class accelerate_info(system_info):
'veclib' in libraries):
if intel:
args.extend(['-msse3'])
- else:
- args.extend(['-faltivec'])
+# else:
+# args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])

View File

@@ -1,12 +0,0 @@
diff --git a/include/ogg/os_types.h b/include/ogg/os_types.h
index eb8a322..6f73b72 100644
--- a/include/ogg/os_types.h
+++ b/include/ogg/os_types.h
@@ -71,6 +71,7 @@
#elif (defined(__APPLE__) && defined(__MACH__)) /* MacOS X Framework build */
# include <sys/types.h>
+# include <stdint.h>
typedef int16_t ogg_int16_t;
typedef uint16_t ogg_uint16_t;
typedef int32_t ogg_int32_t;

View File

@@ -86,47 +86,3 @@ index 1f9a3ee..d151e9a 100644
return isnan( value );
#else
return std::isnan(value);
diff --git a/DAEValidator/library/src/Dae.cpp b/DAEValidator/library/src/Dae.cpp
index 9256ee1..241ad67 100644
--- a/DAEValidator/library/src/Dae.cpp
+++ b/DAEValidator/library/src/Dae.cpp
@@ -304,7 +304,7 @@ namespace opencollada
if (auto root_node = root())
{
const auto & nodes = root_node.selectNodes("//*[@id]");
- for (const auto & node : nodes)
+ for (const auto node : nodes)
{
string id = node.attribute("id").value();
mIdCache.insert(id);
@@ -312,4 +312,4 @@ namespace opencollada
}
}
}
-}
\ No newline at end of file
+}
diff --git a/DAEValidator/library/src/DaeValidator.cpp b/DAEValidator/library/src/DaeValidator.cpp
index 715d903..24423ce 100644
--- a/DAEValidator/library/src/DaeValidator.cpp
+++ b/DAEValidator/library/src/DaeValidator.cpp
@@ -162,7 +162,7 @@ namespace opencollada
// Find xsi:schemaLocation attributes in dae and try to validate against specified xsd documents
const auto & elements = dae.root().selectNodes("//*[@xsi:schemaLocation]");
- for (const auto & element : elements)
+ for (const auto element : elements)
{
if (auto schemaLocation = element.attribute("schemaLocation"))
{
@@ -274,7 +274,7 @@ namespace opencollada
int result = 0;
map<string, size_t> ids;
const auto & nodes = dae.root().selectNodes("//*[@id]");
- for (const auto & node : nodes)
+ for (const auto node : nodes)
{
string id = node.attribute("id").value();
size_t line = node.line();

View File

@@ -1,23 +0,0 @@
diff --git a/runtime/src/z_Linux_asm.S b/runtime/src/z_Linux_asm.S
index 0d8885e..42aa5ad 100644
--- a/runtime/src/z_Linux_asm.S
+++ b/runtime/src/z_Linux_asm.S
@@ -1540,10 +1540,12 @@ __kmp_unnamed_critical_addr:
.comm .gomp_critical_user_,32,8
.data
.align 8
- .global __kmp_unnamed_critical_addr
-__kmp_unnamed_critical_addr:
+ .global ___kmp_unnamed_critical_addr
+___kmp_unnamed_critical_addr:
.8byte .gomp_critical_user_
- .size __kmp_unnamed_critical_addr,8
+# if !(KMP_OS_DARWIN)
+ .size ___kmp_unnamed_critical_addr,8
+# endif
#endif /* KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 */
#if KMP_OS_LINUX

View File

@@ -1,289 +0,0 @@
diff -ru a/Doc/library/ctypes.rst b/Doc/library/ctypes.rst
--- a/Doc/library/ctypes.rst 2020-03-10 07:11:12.000000000 +0100
+++ b/Doc/library/ctypes.rst 2020-07-14 08:10:10.000000000 +0200
@@ -1551,6 +1551,13 @@
value usable as argument (integer, string, ctypes instance). This allows
defining adapters that can adapt custom objects as function parameters.
+ .. attribute:: variadic
+
+ Assign a boolean to specify that the function takes a variable number of
+ arguments. This does not matter on most platforms, but for Apple arm64
+ platforms variadic functions have a different calling convention than
+ normal functions.
+
.. attribute:: errcheck
Assign a Python function or another callable to this attribute. The
diff -ru a/Modules/_ctypes/_ctypes.c b/Modules/_ctypes/_ctypes.c
--- a/Modules/_ctypes/_ctypes.c 2020-03-10 07:11:12.000000000 +0100
+++ b/Modules/_ctypes/_ctypes.c 2020-07-14 08:14:41.000000000 +0200
@@ -3175,6 +3175,35 @@
}
static int
+PyCFuncPtr_set_variadic(PyCFuncPtrObject *self, PyObject *ob, void *Py_UNUSED(ignored))
+{
+ StgDictObject *dict = PyObject_stgdict((PyObject *)self);
+ assert(dict);
+ int r = PyObject_IsTrue(ob);
+ if (r == 1) {
+ dict->flags |= FUNCFLAG_VARIADIC;
+ return 0;
+ } else if (r == 0) {
+ dict->flags &= ~FUNCFLAG_VARIADIC;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static PyObject *
+PyCFuncPtr_get_variadic(PyCFuncPtrObject *self, void *Py_UNUSED(ignored))
+{
+ StgDictObject *dict = PyObject_stgdict((PyObject *)self);
+ assert(dict); /* Cannot be NULL for PyCFuncPtrObject instances */
+ if (dict->flags & FUNCFLAG_VARIADIC)
+ Py_RETURN_TRUE;
+ else
+ Py_RETURN_FALSE;
+}
+
+
+static int
PyCFuncPtr_set_argtypes(PyCFuncPtrObject *self, PyObject *ob, void *Py_UNUSED(ignored))
{
PyObject *converters;
@@ -5632,6 +5661,7 @@
PyModule_AddObject(m, "FUNCFLAG_USE_ERRNO", PyLong_FromLong(FUNCFLAG_USE_ERRNO));
PyModule_AddObject(m, "FUNCFLAG_USE_LASTERROR", PyLong_FromLong(FUNCFLAG_USE_LASTERROR));
PyModule_AddObject(m, "FUNCFLAG_PYTHONAPI", PyLong_FromLong(FUNCFLAG_PYTHONAPI));
+ PyModule_AddObject(m, "FUNCFLAG_VARIADIC", PyLong_FromLong(FUNCFLAG_VARIADIC));
PyModule_AddStringConstant(m, "__version__", "1.1.0");
PyModule_AddObject(m, "_memmove_addr", PyLong_FromVoidPtr(memmove));
diff -ru a/Modules/_ctypes/callproc.c b/Modules/_ctypes/callproc.c
--- a/Modules/_ctypes/callproc.c 2020-03-10 07:11:12.000000000 +0100
+++ b/Modules/_ctypes/callproc.c 2020-07-14 08:18:33.000000000 +0200
@@ -767,7 +767,8 @@
ffi_type **atypes,
ffi_type *restype,
void *resmem,
- int argcount)
+ int argcount,
+ int argtypecount)
{
PyThreadState *_save = NULL; /* For Py_BLOCK_THREADS and Py_UNBLOCK_THREADS */
PyObject *error_object = NULL;
@@ -793,15 +794,38 @@
if ((flags & FUNCFLAG_CDECL) == 0)
cc = FFI_STDCALL;
#endif
- if (FFI_OK != ffi_prep_cif(&cif,
- cc,
- argcount,
- restype,
- atypes)) {
- PyErr_SetString(PyExc_RuntimeError,
- "ffi_prep_cif failed");
- return -1;
- }
+#if HAVE_FFI_PREP_CIF_VAR
+ /* Everyone SHOULD set f.variadic=True on variadic function pointers, but
+ * lots of existing code will not. If there's at least one arg and more
+ * args are passed than are defined in the prototype, then it must be a
+ * variadic function. */
+ if ((flags & FUNCFLAG_VARIADIC) ||
+ (argtypecount != 0 && argcount > argtypecount))
+ {
+ if (FFI_OK != ffi_prep_cif_var(&cif,
+ cc,
+ argtypecount,
+ argcount,
+ restype,
+ atypes)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "ffi_prep_cif_var failed");
+ return -1;
+ }
+ } else {
+#endif
+ if (FFI_OK != ffi_prep_cif(&cif,
+ cc,
+ argcount,
+ restype,
+ atypes)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "ffi_prep_cif failed");
+ return -1;
+ }
+#if HAVE_FFI_PREP_CIF_VAR
+ }
+#endif
if (flags & (FUNCFLAG_USE_ERRNO | FUNCFLAG_USE_LASTERROR)) {
error_object = _ctypes_get_errobj(&space);
@@ -1185,9 +1209,8 @@
if (-1 == _call_function_pointer(flags, pProc, avalues, atypes,
rtype, resbuf,
- Py_SAFE_DOWNCAST(argcount,
- Py_ssize_t,
- int)))
+ Py_SAFE_DOWNCAST(argcount, Py_ssize_t, int),
+ Py_SAFE_DOWNCAST(argtype_count, Py_ssize_t, int)))
goto cleanup;
#ifdef WORDS_BIGENDIAN
diff -ru a/Modules/_ctypes/ctypes.h b/Modules/_ctypes/ctypes.h
--- a/Modules/_ctypes/ctypes.h 2020-03-10 07:11:12.000000000 +0100
+++ b/Modules/_ctypes/ctypes.h 2020-07-14 08:30:53.000000000 +0200
@@ -285,6 +285,7 @@
#define FUNCFLAG_PYTHONAPI 0x4
#define FUNCFLAG_USE_ERRNO 0x8
#define FUNCFLAG_USE_LASTERROR 0x10
+#define FUNCFLAG_VARIADIC 0x20
#define TYPEFLAG_ISPOINTER 0x100
#define TYPEFLAG_HASPOINTER 0x200
diff -ru a/configure b/configure
--- a/configure 2020-03-10 07:11:12.000000000 +0100
+++ b/configure 2020-07-14 08:03:27.000000000 +0200
@@ -3374,7 +3374,7 @@
# has no effect, don't bother defining them
Darwin/[6789].*)
define_xopen_source=no;;
- Darwin/1[0-9].*)
+ Darwin/[12][0-9].*)
define_xopen_source=no;;
# On AIX 4 and 5.1, mbstate_t is defined only when _XOPEN_SOURCE == 500 but
# used in wcsnrtombs() and mbsnrtowcs() even if _XOPEN_SOURCE is not defined
@@ -9251,6 +9251,9 @@
ppc)
MACOSX_DEFAULT_ARCH="ppc64"
;;
+ arm64)
+ MACOSX_DEFAULT_ARCH="arm64"
+ ;;
*)
as_fn_error $? "Unexpected output of 'arch' on OSX" "$LINENO" 5
;;
diff -ru a/configure.ac b/configure.ac
--- a/configure.ac 2020-03-10 07:11:12.000000000 +0100
+++ b/configure.ac 2020-07-14 08:03:27.000000000 +0200
@@ -2456,6 +2456,9 @@
ppc)
MACOSX_DEFAULT_ARCH="ppc64"
;;
+ arm64)
+ MACOSX_DEFAULT_ARCH="arm64"
+ ;;
*)
AC_MSG_ERROR([Unexpected output of 'arch' on OSX])
;;
diff -ru a/setup.py b/setup.py
--- a/setup.py 2020-03-10 07:11:12.000000000 +0100
+++ b/setup.py 2020-07-14 08:28:12.000000000 +0200
@@ -141,6 +141,13 @@
os.unlink(tmpfile)
return MACOS_SDK_ROOT
+
+def is_macosx_at_least(vers):
+ if host_platform == 'darwin':
+ dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+ if dep_target:
+ return tuple(map(int, dep_target.split('.'))) >= vers
+ return False
def is_macosx_sdk_path(path):
"""
@@ -150,6 +157,13 @@
or path.startswith('/System/')
or path.startswith('/Library/') )
+def grep_headers_for(function, headers):
+ for header in headers:
+ with open(header, 'r') as f:
+ if function in f.read():
+ return True
+ return False
+
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
@@ -1972,7 +1986,11 @@
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
- self.use_system_libffi = False
+ if not sysconfig.get_config_var("LIBFFI_INCLUDEDIR") and is_macosx_at_least((10,15)):
+ self.use_system_libffi = True
+ else:
+ self.use_system_libffi = '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS")
+
include_dirs = []
extra_compile_args = []
extra_link_args = []
@@ -2016,32 +2034,48 @@
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'],
libraries=['m'])
+ ffi_inc = sysconfig.get_config_var("LIBFFI_INCLUDEDIR")
+ ffi_lib = None
+
self.extensions.extend([ext, ext_test])
if host_platform == 'darwin':
- if '--with-system-ffi' not in sysconfig.get_config_var("CONFIG_ARGS"):
+ if not self.use_system_libffi:
return
- # OS X 10.5 comes with libffi.dylib; the include files are
- # in /usr/include/ffi
- inc_dirs.append('/usr/include/ffi')
-
- ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
- if not ffi_inc or ffi_inc[0] == '':
- ffi_inc = find_file('ffi.h', [], inc_dirs)
- if ffi_inc is not None:
- ffi_h = ffi_inc[0] + '/ffi.h'
+ ffi_in_sdk = os.path.join(macosx_sdk_root(), "usr/include/ffi")
+ if os.path.exists(ffi_in_sdk):
+ ffi_inc = ffi_in_sdk
+ ffi_lib = 'ffi'
+ else:
+ # OS X 10.5 comes with libffi.dylib; the include files are
+ # in /usr/include/ffi
+ ffi_inc_dirs.append('/usr/include/ffi')
+
+ if not ffi_inc:
+ found = find_file('ffi.h', [], ffi_inc_dirs)
+ if found:
+ ffi_inc = found[0]
+ if ffi_inc:
+ ffi_h = ffi_inc + '/ffi.h'
if not os.path.exists(ffi_h):
ffi_inc = None
print('Header file {} does not exist'.format(ffi_h))
- ffi_lib = None
- if ffi_inc is not None:
+ if ffi_lib is None and ffi_inc:
for lib_name in ('ffi', 'ffi_pic'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
- ext.include_dirs.extend(ffi_inc)
+ ffi_headers = glob(os.path.join(ffi_inc, '*.h'))
+ if grep_headers_for('ffi_closure_alloc', ffi_headers):
+ try:
+ sources.remove('_ctypes/malloc_closure.c')
+ except ValueError:
+ pass
+ if grep_headers_for('ffi_prep_cif_var', ffi_headers):
+ ext.extra_compile_args.append("-DHAVE_FFI_PREP_CIF_VAR=1")
+ ext.include_dirs.append(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True

View File

@@ -1,14 +0,0 @@
Only in external_sqlite_orig: config.log
diff -ru external_sqlite_orig/config.sub external_sqlite/config.sub
--- external_sqlite_orig/config.sub 2020-07-10 14:06:42.000000000 +0200
+++ external_sqlite/config.sub 2020-07-10 14:10:24.000000000 +0200
@@ -314,6 +314,7 @@
# Recognize the basic CPU types with company name.
580-* \
| a29k-* \
+ | aarch64-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
Only in external_sqlite: mksourceid
Only in external_sqlite: sqlite3session.h

View File

@@ -1,18 +0,0 @@
--- config.sub
+++ config.sub
@@ -226,6 +226,7 @@
# Some are omitted here because they have special meanings below.
1750a | 580 \
| a29k \
+ | aarch64 \
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
@@ -286,6 +287,7 @@
# Recognize the basic CPU types with company name.
580-* \
| a29k-* \
+ | aarch64-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \

View File

@@ -97,36 +97,3 @@ diff -Naur external_usd_base/cmake/macros/Public.cmake external_usd/cmake/macros
endforeach()
foreach(lib ${PXR_OBJECT_LIBS})
set(objects "${objects};\$<TARGET_OBJECTS:${lib}>")
diff --git a/pxr/base/arch/align.h b/pxr/base/arch/align.h
index f3cabf4..ebc8a69 100644
--- a/pxr/base/arch/align.h
+++ b/pxr/base/arch/align.h
@@ -77,7 +77,11 @@ ArchAlignMemory(void *base)
/// The size of a CPU cache line on the current processor architecture in bytes.
///
/// \hideinitializer
+#if defined(ARCH_OS_DARWIN) && defined(ARCH_CPU_ARM)
+#define ARCH_CACHE_LINE_SIZE 128
+#else
#define ARCH_CACHE_LINE_SIZE 64
+#endif
///@}
diff --git a/pxr/base/arch/math.h b/pxr/base/arch/math.h
index 3e66c37..64a052c 100644
--- a/pxr/base/arch/math.h
+++ b/pxr/base/arch/math.h
@@ -42,7 +42,7 @@ PXR_NAMESPACE_OPEN_SCOPE
/// \addtogroup group_arch_Math
///@{
-#if defined (ARCH_CPU_INTEL) || defined(doxygen)
+#if defined (ARCH_CPU_INTEL) || defined(ARCH_CPU_ARM) || defined(doxygen)
/// This is the smallest value e such that 1+e^2 == 1, using floats.
/// True for all IEEE754 chipsets.

View File

@@ -0,0 +1,34 @@
Buildbot-lts
============
This folder contains configuration files and build scripts for making a release.
It originated when we started to do LTS releases what happens once every two
weeks. The idea is to automate the manual steps where possible and provide a
pipeline process for the manual steps that still needs to happen.
By using the same software as builder.blender.org it would already put us in
the right direction when we might want to make this part of builder.blender.org
for now it is running on a VM in the blender institute as it contains user
private keys and the process needs to be controlled security wise.
But of course the source and configurations are public available for anyone to
check, develop and use.
Setting up build-bot
--------------------
instructions from https://github.com/cjolowicz/docker-buildbot.
Create custom buildbot worker containing packages we need for building (git, wget).
cd docker
docker build -t buildbot --no-cache .
cd worker
docker build -t buildbot-worker --no-cache .
docker network create net-buildbot
docker rm lts-buildbot && docker run --init --name lts-buildbot --network net-buildbot --publish=127.0.0.1:8010:8010 -t -i buildbot
docker rm lts-worker && docker run --init --name lts-worker --network net-buildbot --name lts-worker -e BUILDMASTER=lts-buildbot -e WORKERNAME=lts-worker -e WORKERPASS=secret -t -i buildbot-worker

View File

@@ -0,0 +1,36 @@
FROM ubuntu:18.04
RUN apt update && apt upgrade -y
RUN apt install -y \
gosu \
wget \
openssh-client \
build-essential \
libffi-dev \
libssl-dev \
python3-dev \
python3-pip
ENV BUILDBOT_VERSION 2.5.0
RUN pip3 --no-cache-dir install --upgrade pip && pip --no-cache-dir install \
buildbot[bundle,tls]==$BUILDBOT_VERSION \
buildbot-docker-swarm-worker
COPY buildbot.tac /var/lib/buildbot/
COPY docker-entrypoint.sh /usr/local/bin/
COPY master.cfg /etc/buildbot/
COPY bin/create_checksum.sh /var/lib/buildbot/bin/
COPY bin/upload_file.sh /var/lib/buildbot/bin/
COPY .ssh/id_rsa /var/lib/buildbot/.ssh/
RUN adduser --home /var/lib/buildbot --disabled-password --gecos '' buildbot
WORKDIR /var/lib/buildbot
RUN ln -s /etc/buildbot/master.cfg
VOLUME /var/lib/buildbot
EXPOSE 8010
EXPOSE 9989
ENTRYPOINT ["docker-entrypoint.sh"]
CMD ["twistd", "--pidfile=", "--nodaemon", "--python=buildbot.tac"]

View File

@@ -0,0 +1,4 @@
#!/bin/sh
set -e
md5sum $1 > $2.md5
sha256sum $1 > $2.sha256

View File

@@ -0,0 +1,7 @@
#!/bin/sh
set -e
if [ ! -f /var/lib/buildbot/.ssh/known_hosts ]
then
ssh-keyscan download.blender.org > /var/lib/buildbot/.ssh/known_hosts
fi
scp $1 jeroen@download.blender.org:$2

View File

@@ -0,0 +1,20 @@
import os
import sys
from twisted.application import service
from twisted.python.log import FileLogObserver
from twisted.python.log import ILogObserver
from buildbot.master import BuildMaster
basedir = os.environ.get("BUILDBOT_BASEDIR",
os.path.abspath(os.path.dirname(__file__)))
configfile = 'master.cfg'
# note: this line is matched against to check that this is a buildmaster
# directory; do not edit it.
application = service.Application('buildmaster')
application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit)
m = BuildMaster(basedir, configfile, umask=None)
m.setServiceParent(application)

View File

@@ -0,0 +1,36 @@
version: '1'
services:
buildbot:
image: buildbot/buildbot-master:master
env_file:
- db.env
environment:
- BUILDBOT_CONFIG_DIR=config
- BUILDBOT_CONFIG_URL=https://github.com/buildbot/buildbot-docker-example-config/archive/master.tar.gz
- BUILDBOT_WORKER_PORT=9989
- BUILDBOT_WEB_URL=http://localhost:8010/
- BUILDBOT_WEB_PORT=tcp:port=8010
links:
- db
depends_on:
- db
ports:
- "8010:8010"
db:
env_file:
- db.env
image: "postgres:9.4"
expose:
- 5432
worker:
image: "buildbot/buildbot-worker:master"
environment:
BUILDMASTER: buildbot
BUILDMASTER_PORT: 9989
WORKERNAME: lts-worker
WORKERPASS: pass
WORKER_ENVIRONMENT_BLACKLIST: DOCKER_BUILDBOT* BUILDBOT_ENV_* BUILDBOT_1* WORKER_ENVIRONMENT_BLACKLIST
links:
- buildbot

View File

@@ -0,0 +1,18 @@
#!/bin/sh
set -e
buildbot upgrade-master .
chown buildbot /var/lib/buildbot/
chown buildbot /var/lib/buildbot/.ssh/
chown -R buildbot /var/lib/buildbot/*
if [ -S /var/run/docker.sock ]
then
group=$(stat -c '%g' /var/run/docker.sock)
else
group=buildbot
fi
gosu buildbot:$group "$@"

View File

@@ -0,0 +1,240 @@
# -*- python -*-
# ex: set filetype=python:
import os
from buildbot.plugins import *
# This is a sample buildmaster config file. It must be installed as
# 'master.cfg' in your buildmaster's base directory.
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
####### WORKERS
# The 'workers' list defines the set of recognized workers. Each element is
# a Worker object, specifying a unique worker name and password. The same
# worker name and password must be configured on the worker.
c['workers'] = [worker.Worker("lts-worker", 'secret')]
if 'BUILDBOT_MQ_URL' in os.environ:
c['mq'] = {
'type' : 'wamp',
'router_url': os.environ['BUILDBOT_MQ_URL'],
'realm': os.environ.get('BUILDBOT_MQ_REALM', 'buildbot').decode('utf-8'),
'debug' : 'BUILDBOT_MQ_DEBUG' in os.environ,
'debug_websockets' : 'BUILDBOT_MQ_DEBUG' in os.environ,
'debug_lowlevel' : 'BUILDBOT_MQ_DEBUG' in os.environ,
}
# 'protocols' contains information about protocols which master will use for
# communicating with workers. You must define at least 'port' option that workers
# could connect to your master with this protocol.
# 'port' must match the value configured into the workers (with their
# --master option)
c['protocols'] = {'pb': {'port': os.environ.get("BUILDBOT_WORKER_PORT", 9989)}}
####### CHANGESOURCES
# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes. We don't build when sources change.
c['change_source'] = []
####### SCHEDULERS
# Configure the Schedulers, which decide how to react to incoming changes. In this
# case, just kick off a 'runtests' build
c['schedulers'] = [
schedulers.ForceScheduler(
name="build",
builderNames=["release-blender-283"],
properties=[
util.StringParameter(
name="blender_version",
label="Version of blender.",
default="2.83"),
util.StringParameter(
name="blender_version_full",
label="Version of blender (Full).",
default="2.83.1")
]
),
schedulers.Triggerable(
name="deploy-source-archive",
builderNames=["deploy-source-archive"]
),
schedulers.Triggerable(
name="deploy-buildbot-packages",
builderNames=["deploy-buildbot-packages"]
),
]
####### BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which workers can execute them. Note that any particular build will
# only take place on one worker.
deploy_source_archive_factory = util.BuildFactory()
deploy_source_archive_factory.addStep(steps.Git(name="Checkout Blender Source", repourl='https://git.blender.org/blender.git', branch=util.Interpolate('blender-v%(prop:blender_version)s-release'), mode='full'))
deploy_source_archive_factory.addStep(steps.ShellSequence(name="Make source archive", commands=[
util.ShellArg(command=["make", "source_archive"], logfile='make_source_archive'),
# The next two shell commands are only needed during development
util.ShellArg(command=["mv", "blender-2.83.2-beta.tar.xz", util.Interpolate('blender-v%(prop:blender_version_full)s.tar.xz')]),
util.ShellArg(command=["mv", "blender-2.83.2-beta.tar.xz.md5sum", util.Interpolate('blender-v%(prop:blender_version_full)s.tar.xz.md5sum')]),
]))
deploy_source_archive_factory.addStep(steps.FileUpload(
name="Upload source archive",
workersrc=util.Interpolate('blender-v%(prop:blender_version_full)s.tar.xz'),
masterdest=util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-v%(prop:blender_version_full)s.tar.xz')))
deploy_source_archive_factory.addStep(steps.FileUpload(
name="Upload source archive checksum",
workersrc=util.Interpolate('blender-v%(prop:blender_version_full)s.tar.xz.md5sum'),
masterdest=util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-v%(prop:blender_version_full)s.tar.xz.md5sum')))
deploy_source_archive_factory.addStep(steps.MasterShellCommand(
name="Upload source archive",
command=[
'sh', '-C', '/var/lib/buildbot/bin/upload_file.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-v%(prop:blender_version_full)s.tar.xz'),
util.Interpolate('/data/www/vhosts/download.blender.org/ftp/jeroen/')]))
deploy_source_archive_factory.addStep(steps.MasterShellCommand(
name="Upload windows64.zip build",
command=[
'sh', '-C', '/var/lib/buildbot/bin/upload_file.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-v%(prop:blender_version_full)s.tar.xz.md5sum'),
util.Interpolate('/data/www/vhosts/download.blender.org/ftp/jeroen/')]))
deploy_buildbot_packages_factory = util.BuildFactory()
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Download linux64 build",
command=[
'wget', util.Interpolate('https://builder.blender.org/download/blender-%(prop:blender_version_full)s-linux64.tar.xz'),
"-O", util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-linux64.tar.xz')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Download windows64.msi build",
command=[
'wget', util.Interpolate('https://builder.blender.org/download/blender-%(prop:blender_version_full)s-windows64.msi'),
"-O", util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-windows64.msi')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Download windows64.zip build",
command=[
'wget', util.Interpolate('https://builder.blender.org/download/blender-%(prop:blender_version_full)s-windows64.zip'),
"-O", util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-windows64.zip')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Download macOS build",
command=[
'wget', util.Interpolate('https://builder.blender.org/download/blender-%(prop:blender_version_full)s-macOS.dmg'),
"-O", util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-macOS.dmg')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Create checksum (md5/sha256)",
command=[
'sh',
'-C',
'/var/lib/buildbot/bin/create_checksum.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-*'),
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s'),
]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Upload linux64 build",
command=[
'sh', '-C', '/var/lib/buildbot/bin/upload_file.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-linux64.tar.xz'),
util.Interpolate('/data/www/vhosts/download.blender.org/ftp/jeroen/')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Upload windows64.msi build",
command=[
'sh', '-C', '/var/lib/buildbot/bin/upload_file.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-windows64.msi'),
util.Interpolate('/data/www/vhosts/download.blender.org/ftp/jeroen/')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Upload windows64.zip build",
command=[
'sh', '-C', '/var/lib/buildbot/bin/upload_file.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-windows64.zip'),
util.Interpolate('/data/www/vhosts/download.blender.org/ftp/jeroen/')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Upload macOS build",
command=[
'sh', '-C', '/var/lib/buildbot/bin/upload_file.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s-macOS.dmg'),
util.Interpolate('/data/www/vhosts/download.blender.org/ftp/jeroen/')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Upload md5 checksum",
command=[
'sh', '-C', '/var/lib/buildbot/bin/upload_file.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s.md5'),
util.Interpolate('/data/www/vhosts/download.blender.org/ftp/jeroen/')]))
deploy_buildbot_packages_factory.addStep(steps.MasterShellCommand(
name="Upload sha256 checksum",
command=[
'sh', '-C', '/var/lib/buildbot/bin/upload_file.sh',
util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s/blender-%(prop:blender_version_full)s.sha256'),
util.Interpolate('/data/www/vhosts/download.blender.org/ftp/jeroen/')]))
factory = util.BuildFactory()
factory.addStep(steps.MasterShellCommand(name="Init release folder", command=["mkdir", "-p", util.Interpolate('/var/lib/buildbot/builds//blender-v%(prop:blender_version_full)s')]))
factory.addStep(steps.Trigger(
name='Deploy',
schedulerNames=[
'deploy-source-archive',
'deploy-buildbot-packages',
],
waitForFinish=True,
set_properties={
'blender_version_full': util.Property('blender_version_full'),
'blender_version': util.Property('blender_version'),
}
))
# Download builds from builder.blender.org
c['builders'] = [
util.BuilderConfig(name="release-blender-283",
workernames=["lts-worker"],
factory=factory),
util.BuilderConfig(name="deploy-source-archive",
workernames=["lts-worker"],
factory=deploy_source_archive_factory),
util.BuilderConfig(name="deploy-buildbot-packages",
workernames=["lts-worker"],
factory=deploy_buildbot_packages_factory),
]
####### STATUS TARGETS
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# like IRC bots.
# c['status'] = []
####### PROJECT IDENTITY
# the 'title' string will appear at the top of this buildbot installation's
# home pages (linked to the 'titleURL').
c['title'] = "Blender Release LTS"
c['titleURL'] = "https://www.blender.org/download/lts/"
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server is visible. This typically uses the port number set in
# the 'www' entry below, but with an externally-visible host name which the
# buildbot cannot figure out without some help.
c['buildbotURL'] = os.environ.get("BUILDBOT_WEB_URL", "http://localhost:8010/")
# minimalistic config to activate new web UI
c['www'] = dict(port=os.environ.get("BUILDBOT_WEB_PORT", 8010),
plugins=dict(waterfall_view={}, console_view={}))
####### DB URL
c['db'] = {
# This specifies what database buildbot uses to store its state. You can leave
# this at its default for all but the largest installations.
'db_url' : os.environ.get("BUILDBOT_DB_URL", "sqlite://").format(**os.environ),
}

View File

@@ -0,0 +1,27 @@
FROM ubuntu:18.04
RUN set -ex; \
apt-get update; \
apt-get install -y --no-install-recommends \
build-essential \
git \
wget \
python3-dev \
python3-pip \
python3-setuptools \
python3-wheel \
; \
rm -rf /var/lib/apt/lists/*
ENV BUILDBOT_VERSION 2.5.0
RUN pip3 install --upgrade pip && \
pip --no-cache-dir install twisted[tls] && \
pip --no-cache-dir install buildbot_worker==$BUILDBOT_VERSION
RUN useradd --create-home --home-dir=/var/lib/buildbot buildbot
WORKDIR /var/lib/buildbot
USER buildbot
COPY buildbot.tac .
CMD ["twistd", "--pidfile=", "--nodaemon", "--python=buildbot.tac"]

View File

@@ -0,0 +1,39 @@
import fnmatch
import os
import sys
from twisted.application import service
from twisted.python.log import FileLogObserver
from twisted.python.log import ILogObserver
from buildbot_worker.bot import Worker
# setup worker
basedir = os.path.abspath(os.path.dirname(__file__))
application = service.Application('buildbot-worker')
application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit)
# and worker on the same process!
buildmaster_host = os.environ.get("BUILDMASTER", 'localhost')
port = int(os.environ.get("BUILDMASTER_PORT", 9989))
workername = os.environ.get("WORKERNAME", 'docker')
passwd = os.environ.get("WORKERPASS")
# delete the password from the environ so that it is not leaked in the log
blacklist = os.environ.get("WORKER_ENVIRONMENT_BLACKLIST", "WORKERPASS").split()
for name in list(os.environ.keys()):
for toremove in blacklist:
if fnmatch.fnmatch(name, toremove):
del os.environ[name]
keepalive = 600
umask = None
maxdelay = 300
allow_shutdown = None
maxretries = 10
s = Worker(buildmaster_host, port, workername, passwd, basedir,
keepalive, umask=umask, maxdelay=maxdelay,
allow_shutdown=allow_shutdown, maxRetries=maxretries)
s.setServiceParent(application)

View File

@@ -33,11 +33,10 @@ def is_tool(name):
return which(name) is not None
class Builder:
def __init__(self, name, branch, codesign):
def __init__(self, name, branch):
self.name = name
self.branch = branch
self.is_release_branch = re.match("^blender-v(.*)-release$", branch) is not None
self.codesign = codesign
# Buildbot runs from build/ directory
self.blender_dir = os.path.abspath(os.path.join('..', 'blender.git'))
@@ -68,9 +67,8 @@ def create_builder_from_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('builder_name')
parser.add_argument('branch', default='master', nargs='?')
parser.add_argument("--codesign", action="store_true")
args = parser.parse_args()
return Builder(args.builder_name, args.branch, args.codesign)
return Builder(args.builder_name, args.branch)
class VersionInfo:

View File

@@ -82,10 +82,6 @@ def create_argument_parser():
type=Path,
help="Optional path to applescript to set up folder looks of DMG."
"If not provided default Blender's one is used.")
parser.add_argument(
'--codesign',
action="store_true",
help="Code sign and notarize DMG contents.")
return parser
@@ -399,8 +395,7 @@ def create_final_dmg(app_bundles: List[Path],
dmg_filepath: Path,
background_image_filepath: Path,
volume_name: str,
applescript: Path,
codesign: bool) -> None:
applescript: Path) -> None:
"""
Create DMG with all app bundles
@@ -426,8 +421,7 @@ def create_final_dmg(app_bundles: List[Path],
#
# This allows to recurs into the content of bundles without worrying about
# possible interfereice of Application symlink.
if codesign:
codesign_app_bundles_in_dmg(mount_directory)
codesign_app_bundles_in_dmg(mount_directory)
copy_background_if_needed(background_image_filepath, mount_directory)
create_applications_link(mount_directory)
@@ -440,8 +434,7 @@ def create_final_dmg(app_bundles: List[Path],
compress_dmg(writable_dmg_filepath, dmg_filepath)
writable_dmg_filepath.unlink()
if codesign:
codesign_and_notarize_dmg(dmg_filepath)
codesign_and_notarize_dmg(dmg_filepath)
def ensure_dmg_extension(filepath: Path) -> Path:
@@ -528,7 +521,6 @@ def main():
source_dir = args.source_dir.absolute()
background_image_filepath = get_background_image(args.background_image)
applescript = get_applescript(args.applescript)
codesign = args.codesign
app_bundles = collect_and_log_app_bundles(source_dir)
if not app_bundles:
@@ -543,8 +535,7 @@ def main():
dmg_filepath,
background_image_filepath,
volume_name,
applescript,
codesign)
applescript)
if __name__ == "__main__":

View File

@@ -24,7 +24,7 @@ import shutil
import buildbot_utils
def get_cmake_options(builder):
codesign_script = os.path.join(
post_install_script = os.path.join(
builder.blender_dir, 'build_files', 'buildbot', 'worker_codesign.cmake')
config_file = "build_files/cmake/config/blender_release.cmake"
@@ -36,8 +36,7 @@ def get_cmake_options(builder):
options.append('-DCMAKE_OSX_DEPLOYMENT_TARGET=10.9')
elif builder.platform == 'win':
options.extend(['-G', 'Visual Studio 16 2019', '-A', 'x64'])
if builder.codesign:
options.extend(['-DPOSTINSTALL_SCRIPT:PATH=' + codesign_script])
options.extend(['-DPOSTINSTALL_SCRIPT:PATH=' + post_install_script])
elif builder.platform == 'linux':
config_file = "build_files/buildbot/config/blender_linux.cmake"

View File

@@ -117,8 +117,6 @@ def pack_mac(builder):
if info.is_development_build:
background_image = os.path.join(release_dir, 'buildbot', 'background.tif')
command += ['--background-image', background_image]
if builder.codesign:
command += ['--codesign']
command += [builder.install_dir]
buildbot_utils.call(command)
@@ -152,8 +150,7 @@ def pack_win(builder):
package_filename = package_name + '.msi'
package_filepath = os.path.join(builder.build_dir, package_filename)
if builder.codesign:
sign_file_or_directory(package_filepath)
sign_file_or_directory(package_filepath)
package_files += [(package_filepath, package_filename)]

View File

@@ -1,104 +0,0 @@
# - Find clang-tidy executable
#
# Find the native clang-tidy executable
#
# This module defines
# CLANG_TIDY_EXECUTABLE, the ful lpath to clang-tidy executable
#
# CLANG_TIDY_VERSION, the full version of the clang-tidy in the
# major,minor.patch format
#
# CLANG_TIDY_VERSION_MAJOR,
# CLANG_TIDY_VERSION_MINOR,
# CLANG_TIDY_VERSION_PATCH, individual components of the clang-tidy version.
#
# CLANG_TIDY_FOUND, If false, do not try to use Eigen3.
#=============================================================================
# Copyright 2020 Blender Foundation.
#
# Distributed under the OSI-approved BSD License (the "License");
# see accompanying file Copyright.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
# If CLANG_TIDY_ROOT_DIR was defined in the environment, use it.
if(NOT CLANG_TIDY_ROOT_DIR AND NOT $ENV{CLANG_TIDY_ROOT_DIR} STREQUAL "")
set(CLANG_TIDY_ROOT_DIR $ENV{CLANG_TIDY_ROOT_DIR})
endif()
set(_clang_tidy_SEARCH_DIRS
${CLANG_TIDY_ROOT_DIR}
/usr/local/bin
)
# TODO(sergey): Find more reliable way of finding the latest clang-tidy.
find_program(CLANG_TIDY_EXECUTABLE
NAMES
clang-tidy-10
clang-tidy-9
clang-tidy-8
clang-tidy-7
clang-tidy
HINTS
${_clang_tidy_SEARCH_DIRS}
)
if(CLANG_TIDY_EXECUTABLE)
# Mark clang-tidy as found.
set(CLANG_TIDY_FOUND TRUE)
# Setup fallback values.
set(CLANG_TIDY_VERSION_MAJOR 0)
set(CLANG_TIDY_VERSION_MINOR 0)
set(CLANG_TIDY_VERSION_PATCH 0)
# Get version from the output.
#
# NOTE: Don't use name of the executable file since that only includes a
# major version. Also, even the major version might be missing in the
# executable name.
execute_process(COMMAND ${CLANG_TIDY_EXECUTABLE} -version
OUTPUT_VARIABLE CLANG_TIDY_VERSION_RAW
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
# Parse parts.
if(CLANG_TIDY_VERSION_RAW MATCHES "LLVM version .*")
# Strip the LLVM prefix and get list of individual version components.
string(REGEX REPLACE
".*LLVM version ([.0-9]+).*" "\\1"
CLANG_SEMANTIC_VERSION "${CLANG_TIDY_VERSION_RAW}")
string(REPLACE "." ";" CLANG_VERSION_PARTS "${CLANG_SEMANTIC_VERSION}")
list(LENGTH CLANG_VERSION_PARTS NUM_CLANG_TIDY_VERSION_PARTS)
# Extract components into corresponding variables.
if(NUM_CLANG_TIDY_VERSION_PARTS GREATER 0)
list(GET CLANG_VERSION_PARTS 0 CLANG_TIDY_VERSION_MAJOR)
endif()
if(NUM_CLANG_TIDY_VERSION_PARTS GREATER 1)
list(GET CLANG_VERSION_PARTS 1 CLANG_TIDY_VERSION_MINOR)
endif()
if(NUM_CLANG_TIDY_VERSION_PARTS GREATER 2)
list(GET CLANG_VERSION_PARTS 2 CLANG_TIDY_VERSION_PATCH)
endif()
# Unset temp variables.
unset(NUM_CLANG_TIDY_VERSION_PARTS)
unset(CLANG_SEMANTIC_VERSION)
unset(CLANG_VERSION_PARTS)
endif()
# Construct full semantic version.
set(CLANG_TIDY_VERSION "${CLANG_TIDY_VERSION_MAJOR}.\
${CLANG_TIDY_VERSION_MINOR}.\
${CLANG_TIDY_VERSION_PATCH}")
unset(CLANG_TIDY_VERSION_RAW)
message(STATUS "Found clang-tidy ${CLANG_TIDY_EXECUTABLE} (${CLANG_TIDY_VERSION})")
else()
set(CLANG_TIDY_FOUND FALSE)
endif()

View File

@@ -82,7 +82,7 @@ FIND_LIBRARY(EMBREE_LIBRARY
# handle the QUIETLY and REQUIRED arguments and set EMBREE_FOUND to TRUE if
# all listed variables are TRUE
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(Embree DEFAULT_MSG
FIND_PACKAGE_HANDLE_STANDARD_ARGS(EMBREE DEFAULT_MSG
_embree_LIBRARIES EMBREE_INCLUDE_DIR)
IF(EMBREE_FOUND)

View File

@@ -1,96 +0,0 @@
# - Find GMP library
# Find the native GMP includes and library
# This module defines
# GMP_INCLUDE_DIRS, where to find gmp.h, Set when
# GMP_INCLUDE_DIR is found.
# GMP_LIBRARIES, libraries to link against to use GMP.
# GMP_ROOT_DIR, The base directory to search for GMP.
# This can also be an environment variable.
# GMP_FOUND, If false, do not try to use GMP.
#
# also defined, but not for general use are
# GMP_LIBRARY, where to find the GMP library.
#=============================================================================
# Copyright 2011 Blender Foundation.
#
# Distributed under the OSI-approved BSD License (the "License");
# see accompanying file Copyright.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
# If GMP_ROOT_DIR was defined in the environment, use it.
IF(NOT GMP_ROOT_DIR AND NOT $ENV{GMP_ROOT_DIR} STREQUAL "")
SET(GMP_ROOT_DIR $ENV{GMP_ROOT_DIR})
ENDIF()
SET(_gmp_SEARCH_DIRS
${GMP_ROOT_DIR}
/opt/lib/gmp
)
FIND_PATH(GMP_INCLUDE_DIR
NAMES
gmp.h
HINTS
${_gmp_SEARCH_DIRS}
PATH_SUFFIXES
include/gmp
)
FIND_PATH(GMPXX_INCLUDE_DIR
NAMES
gmpxx.h
HINTS
${_gmp_SEARCH_DIRS}
PATH_SUFFIXES
include/gmp
)
FIND_LIBRARY(GMP_LIBRARY
NAMES
gmp
HINTS
${_gmp_SEARCH_DIRS}
PATH_SUFFIXES
lib64 lib
)
FIND_LIBRARY(GMPXX_LIBRARY
NAMES
gmpxx
HINTS
${_gmp_SEARCH_DIRS}
PATH_SUFFIXES
lib64 lib
)
if(GMP_INCLUDE_DIR)
SET(_version_regex "^#define[ \t]+__GNU_MP_VERSION[ \t]+\"([^\"]+)\".*")
file(STRINGS "${GMP_INCLUDE_DIR}/gmp.h"
GMP_VERSION REGEX "${_version_regex}")
string(REGEX REPLACE "${_version_regex}" "\\1"
GMP_VERSION "${GMP_VERSION}")
unset(_version_regex)
endif()
# handle the QUIETLY and REQUIRED arguments and set GMP_FOUND to TRUE if
# all listed variables are TRUE
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(GMP DEFAULT_MSG
GMP_LIBRARY GMPXX_LIBRARY GMP_INCLUDE_DIR GMPXX_INCLUDE_DIR)
IF(GMP_FOUND)
SET(GMP_LIBRARIES ${GMP_LIBRARY} ${GMPXX_LIBRARY})
SET(GMP_INCLUDE_DIRS ${GMP_INCLUDE_DIR} ${GMPXX_INCLUDE_DIR})
ENDIF(GMP_FOUND)
MARK_AS_ADVANCED(
GMP_INCLUDE_DIR
GMP_LIBRARY
GMPXX_INCLUDE_DIR
GMPXX_LIBRARY
)

View File

@@ -1,550 +0,0 @@
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
# file Copyright.txt or https://cmake.org/licensing for details.
#[=======================================================================[.rst:
GoogleTest
----------
.. versionadded:: 3.9
This module defines functions to help use the Google Test infrastructure. Two
mechanisms for adding tests are provided. :command:`gtest_add_tests` has been
around for some time, originally via ``find_package(GTest)``.
:command:`gtest_discover_tests` was introduced in CMake 3.10.
The (older) :command:`gtest_add_tests` scans source files to identify tests.
This is usually effective, with some caveats, including in cross-compiling
environments, and makes setting additional properties on tests more convenient.
However, its handling of parameterized tests is less comprehensive, and it
requires re-running CMake to detect changes to the list of tests.
The (newer) :command:`gtest_discover_tests` discovers tests by asking the
compiled test executable to enumerate its tests. This is more robust and
provides better handling of parameterized tests, and does not require CMake
to be re-run when tests change. However, it may not work in a cross-compiling
environment, and setting test properties is less convenient.
More details can be found in the documentation of the respective functions.
Both commands are intended to replace use of :command:`add_test` to register
tests, and will create a separate CTest test for each Google Test test case.
Note that this is in some cases less efficient, as common set-up and tear-down
logic cannot be shared by multiple test cases executing in the same instance.
However, it provides more fine-grained pass/fail information to CTest, which is
usually considered as more beneficial. By default, the CTest test name is the
same as the Google Test name (i.e. ``suite.testcase``); see also
``TEST_PREFIX`` and ``TEST_SUFFIX``.
.. command:: gtest_add_tests
Automatically add tests with CTest by scanning source code for Google Test
macros::
gtest_add_tests(TARGET target
[SOURCES src1...]
[EXTRA_ARGS arg1...]
[WORKING_DIRECTORY dir]
[TEST_PREFIX prefix]
[TEST_SUFFIX suffix]
[SKIP_DEPENDENCY]
[TEST_LIST outVar]
)
``gtest_add_tests`` attempts to identify tests by scanning source files.
Although this is generally effective, it uses only a basic regular expression
match, which can be defeated by atypical test declarations, and is unable to
fully "split" parameterized tests. Additionally, it requires that CMake be
re-run to discover any newly added, removed or renamed tests (by default,
this means that CMake is re-run when any test source file is changed, but see
``SKIP_DEPENDENCY``). However, it has the advantage of declaring tests at
CMake time, which somewhat simplifies setting additional properties on tests,
and always works in a cross-compiling environment.
The options are:
``TARGET target``
Specifies the Google Test executable, which must be a known CMake
executable target. CMake will substitute the location of the built
executable when running the test.
``SOURCES src1...``
When provided, only the listed files will be scanned for test cases. If
this option is not given, the :prop_tgt:`SOURCES` property of the
specified ``target`` will be used to obtain the list of sources.
``EXTRA_ARGS arg1...``
Any extra arguments to pass on the command line to each test case.
``WORKING_DIRECTORY dir``
Specifies the directory in which to run the discovered test cases. If this
option is not provided, the current binary directory is used.
``TEST_PREFIX prefix``
Specifies a ``prefix`` to be prepended to the name of each discovered test
case. This can be useful when the same source files are being used in
multiple calls to ``gtest_add_test()`` but with different ``EXTRA_ARGS``.
``TEST_SUFFIX suffix``
Similar to ``TEST_PREFIX`` except the ``suffix`` is appended to the name of
every discovered test case. Both ``TEST_PREFIX`` and ``TEST_SUFFIX`` may
be specified.
``SKIP_DEPENDENCY``
Normally, the function creates a dependency which will cause CMake to be
re-run if any of the sources being scanned are changed. This is to ensure
that the list of discovered tests is updated. If this behavior is not
desired (as may be the case while actually writing the test cases), this
option can be used to prevent the dependency from being added.
``TEST_LIST outVar``
The variable named by ``outVar`` will be populated in the calling scope
with the list of discovered test cases. This allows the caller to do
things like manipulate test properties of the discovered tests.
.. code-block:: cmake
include(GoogleTest)
add_executable(FooTest FooUnitTest.cxx)
gtest_add_tests(TARGET FooTest
TEST_SUFFIX .noArgs
TEST_LIST noArgsTests
)
gtest_add_tests(TARGET FooTest
EXTRA_ARGS --someArg someValue
TEST_SUFFIX .withArgs
TEST_LIST withArgsTests
)
set_tests_properties(${noArgsTests} PROPERTIES TIMEOUT 10)
set_tests_properties(${withArgsTests} PROPERTIES TIMEOUT 20)
For backward compatibility, the following form is also supported::
gtest_add_tests(exe args files...)
``exe``
The path to the test executable or the name of a CMake target.
``args``
A ;-list of extra arguments to be passed to executable. The entire
list must be passed as a single argument. Enclose it in quotes,
or pass ``""`` for no arguments.
``files...``
A list of source files to search for tests and test fixtures.
Alternatively, use ``AUTO`` to specify that ``exe`` is the name
of a CMake executable target whose sources should be scanned.
.. code-block:: cmake
include(GoogleTest)
set(FooTestArgs --foo 1 --bar 2)
add_executable(FooTest FooUnitTest.cxx)
gtest_add_tests(FooTest "${FooTestArgs}" AUTO)
.. command:: gtest_discover_tests
Automatically add tests with CTest by querying the compiled test executable
for available tests::
gtest_discover_tests(target
[EXTRA_ARGS arg1...]
[WORKING_DIRECTORY dir]
[TEST_PREFIX prefix]
[TEST_SUFFIX suffix]
[NO_PRETTY_TYPES] [NO_PRETTY_VALUES]
[PROPERTIES name1 value1...]
[TEST_LIST var]
[DISCOVERY_TIMEOUT seconds]
[XML_OUTPUT_DIR dir]
[DISCOVERY_MODE <POST_BUILD|PRE_TEST>]
)
``gtest_discover_tests()`` sets up a post-build command on the test executable
that generates the list of tests by parsing the output from running the test
with the ``--gtest_list_tests`` argument. Compared to the source parsing
approach of :command:`gtest_add_tests`, this ensures that the full list of
tests, including instantiations of parameterized tests, is obtained. Since
test discovery occurs at build time, it is not necessary to re-run CMake when
the list of tests changes.
However, it requires that :prop_tgt:`CROSSCOMPILING_EMULATOR` is properly set
in order to function in a cross-compiling environment.
Additionally, setting properties on tests is somewhat less convenient, since
the tests are not available at CMake time. Additional test properties may be
assigned to the set of tests as a whole using the ``PROPERTIES`` option. If
more fine-grained test control is needed, custom content may be provided
through an external CTest script using the :prop_dir:`TEST_INCLUDE_FILES`
directory property. The set of discovered tests is made accessible to such a
script via the ``<target>_TESTS`` variable.
The options are:
``target``
Specifies the Google Test executable, which must be a known CMake
executable target. CMake will substitute the location of the built
executable when running the test.
``EXTRA_ARGS arg1...``
Any extra arguments to pass on the command line to each test case.
``WORKING_DIRECTORY dir``
Specifies the directory in which to run the discovered test cases. If this
option is not provided, the current binary directory is used.
``TEST_PREFIX prefix``
Specifies a ``prefix`` to be prepended to the name of each discovered test
case. This can be useful when the same test executable is being used in
multiple calls to ``gtest_discover_tests()`` but with different
``EXTRA_ARGS``.
``TEST_SUFFIX suffix``
Similar to ``TEST_PREFIX`` except the ``suffix`` is appended to the name of
every discovered test case. Both ``TEST_PREFIX`` and ``TEST_SUFFIX`` may
be specified.
``NO_PRETTY_TYPES``
By default, the type index of type-parameterized tests is replaced by the
actual type name in the CTest test name. If this behavior is undesirable
(e.g. because the type names are unwieldy), this option will suppress this
behavior.
``NO_PRETTY_VALUES``
By default, the value index of value-parameterized tests is replaced by the
actual value in the CTest test name. If this behavior is undesirable
(e.g. because the value strings are unwieldy), this option will suppress
this behavior.
``PROPERTIES name1 value1...``
Specifies additional properties to be set on all tests discovered by this
invocation of ``gtest_discover_tests()``.
``TEST_LIST var``
Make the list of tests available in the variable ``var``, rather than the
default ``<target>_TESTS``. This can be useful when the same test
executable is being used in multiple calls to ``gtest_discover_tests()``.
Note that this variable is only available in CTest.
``DISCOVERY_TIMEOUT num``
Specifies how long (in seconds) CMake will wait for the test to enumerate
available tests. If the test takes longer than this, discovery (and your
build) will fail. Most test executables will enumerate their tests very
quickly, but under some exceptional circumstances, a test may require a
longer timeout. The default is 5. See also the ``TIMEOUT`` option of
:command:`execute_process`.
.. note::
In CMake versions 3.10.1 and 3.10.2, this option was called ``TIMEOUT``.
This clashed with the ``TIMEOUT`` test property, which is one of the
common properties that would be set with the ``PROPERTIES`` keyword,
usually leading to legal but unintended behavior. The keyword was
changed to ``DISCOVERY_TIMEOUT`` in CMake 3.10.3 to address this
problem. The ambiguous behavior of the ``TIMEOUT`` keyword in 3.10.1
and 3.10.2 has not been preserved.
``XML_OUTPUT_DIR dir``
If specified, the parameter is passed along with ``--gtest_output=xml:``
to test executable. The actual file name is the same as the test target,
including prefix and suffix. This should be used instead of
``EXTRA_ARGS --gtest_output=xml`` to avoid race conditions writing the
XML result output when using parallel test execution.
``DISCOVERY_MODE``
Provides greater control over when ``gtest_discover_tests()`` performs test
discovery. By default, ``POST_BUILD`` sets up a post-build command
to perform test discovery at build time. In certain scenarios, like
cross-compiling, this ``POST_BUILD`` behavior is not desirable.
By contrast, ``PRE_TEST`` delays test discovery until just prior to test
execution. This way test discovery occurs in the target environment
where the test has a better chance at finding appropriate runtime
dependencies.
``DISCOVERY_MODE`` defaults to the value of the
``CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE`` variable if it is not
passed when calling ``gtest_discover_tests()``. This provides a mechanism
for globally selecting a preferred test discovery behavior without having
to modify each call site.
#]=======================================================================]
# Save project's policies
cmake_policy(PUSH)
cmake_policy(SET CMP0057 NEW) # if IN_LIST
#------------------------------------------------------------------------------
function(gtest_add_tests)
if (ARGC LESS 1)
message(FATAL_ERROR "No arguments supplied to gtest_add_tests()")
endif()
set(options
SKIP_DEPENDENCY
)
set(oneValueArgs
TARGET
WORKING_DIRECTORY
TEST_PREFIX
TEST_SUFFIX
TEST_LIST
)
set(multiValueArgs
SOURCES
EXTRA_ARGS
)
set(allKeywords ${options} ${oneValueArgs} ${multiValueArgs})
unset(sources)
if("${ARGV0}" IN_LIST allKeywords)
cmake_parse_arguments(ARGS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(autoAddSources YES)
else()
# Non-keyword syntax, convert to keyword form
if (ARGC LESS 3)
message(FATAL_ERROR "gtest_add_tests() without keyword options requires at least 3 arguments")
endif()
set(ARGS_TARGET "${ARGV0}")
set(ARGS_EXTRA_ARGS "${ARGV1}")
if(NOT "${ARGV2}" STREQUAL "AUTO")
set(ARGS_SOURCES "${ARGV}")
list(REMOVE_AT ARGS_SOURCES 0 1)
endif()
endif()
# The non-keyword syntax allows the first argument to be an arbitrary
# executable rather than a target if source files are also provided. In all
# other cases, both forms require a target.
if(NOT TARGET "${ARGS_TARGET}" AND NOT ARGS_SOURCES)
message(FATAL_ERROR "${ARGS_TARGET} does not define an existing CMake target")
endif()
if(NOT ARGS_WORKING_DIRECTORY)
unset(workDir)
else()
set(workDir WORKING_DIRECTORY "${ARGS_WORKING_DIRECTORY}")
endif()
if(NOT ARGS_SOURCES)
get_property(ARGS_SOURCES TARGET ${ARGS_TARGET} PROPERTY SOURCES)
endif()
unset(testList)
set(gtest_case_name_regex ".*\\( *([A-Za-z_0-9]+) *, *([A-Za-z_0-9]+) *\\).*")
set(gtest_test_type_regex "(TYPED_TEST|TEST_?[FP]?)")
foreach(source IN LISTS ARGS_SOURCES)
if(NOT ARGS_SKIP_DEPENDENCY)
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${source})
endif()
file(READ "${source}" contents)
string(REGEX MATCHALL "${gtest_test_type_regex} *\\(([A-Za-z_0-9 ,]+)\\)" found_tests "${contents}")
foreach(hit ${found_tests})
string(REGEX MATCH "${gtest_test_type_regex}" test_type ${hit})
# Parameterized tests have a different signature for the filter
if("x${test_type}" STREQUAL "xTEST_P")
string(REGEX REPLACE ${gtest_case_name_regex} "*/\\1.\\2/*" gtest_test_name ${hit})
elseif("x${test_type}" STREQUAL "xTEST_F" OR "x${test_type}" STREQUAL "xTEST")
string(REGEX REPLACE ${gtest_case_name_regex} "\\1.\\2" gtest_test_name ${hit})
elseif("x${test_type}" STREQUAL "xTYPED_TEST")
string(REGEX REPLACE ${gtest_case_name_regex} "\\1/*.\\2" gtest_test_name ${hit})
else()
message(WARNING "Could not parse GTest ${hit} for adding to CTest.")
continue()
endif()
# Make sure tests disabled in GTest get disabled in CTest
if(gtest_test_name MATCHES "(^|\\.)DISABLED_")
# Add the disabled test if CMake is new enough
# Note that this check is to allow backwards compatibility so this
# module can be copied locally in projects to use with older CMake
# versions
if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.8.20170401)
string(REGEX REPLACE
"(^|\\.)DISABLED_" "\\1"
orig_test_name "${gtest_test_name}"
)
set(ctest_test_name
${ARGS_TEST_PREFIX}${orig_test_name}${ARGS_TEST_SUFFIX}
)
add_test(NAME ${ctest_test_name}
${workDir}
COMMAND ${ARGS_TARGET}
--gtest_also_run_disabled_tests
--gtest_filter=${gtest_test_name}
${ARGS_EXTRA_ARGS}
)
set_tests_properties(${ctest_test_name} PROPERTIES DISABLED TRUE)
list(APPEND testList ${ctest_test_name})
endif()
else()
set(ctest_test_name ${ARGS_TEST_PREFIX}${gtest_test_name}${ARGS_TEST_SUFFIX})
add_test(NAME ${ctest_test_name}
${workDir}
COMMAND ${ARGS_TARGET}
--gtest_filter=${gtest_test_name}
${ARGS_EXTRA_ARGS}
)
list(APPEND testList ${ctest_test_name})
endif()
endforeach()
endforeach()
if(ARGS_TEST_LIST)
set(${ARGS_TEST_LIST} ${testList} PARENT_SCOPE)
endif()
endfunction()
#------------------------------------------------------------------------------
function(gtest_discover_tests TARGET)
cmake_parse_arguments(
""
"NO_PRETTY_TYPES;NO_PRETTY_VALUES"
"TEST_PREFIX;TEST_SUFFIX;WORKING_DIRECTORY;TEST_LIST;DISCOVERY_TIMEOUT;XML_OUTPUT_DIR;DISCOVERY_MODE"
"EXTRA_ARGS;PROPERTIES"
${ARGN}
)
if(NOT _WORKING_DIRECTORY)
set(_WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
endif()
if(NOT _TEST_LIST)
set(_TEST_LIST ${TARGET}_TESTS)
endif()
if(NOT _DISCOVERY_TIMEOUT)
set(_DISCOVERY_TIMEOUT 5)
endif()
if(NOT _DISCOVERY_MODE)
if(NOT CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE)
set(CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE "POST_BUILD")
endif()
set(_DISCOVERY_MODE ${CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE})
endif()
get_property(
has_counter
TARGET ${TARGET}
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
SET
)
if(has_counter)
get_property(
counter
TARGET ${TARGET}
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
)
math(EXPR counter "${counter} + 1")
else()
set(counter 1)
endif()
set_property(
TARGET ${TARGET}
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
${counter}
)
# Define rule to generate test list for aforementioned test executable
# Blender: use _ instead of [] to avoid problems with zsh regex.
set(ctest_file_base "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_${counter}_")
set(ctest_include_file "${ctest_file_base}_include.cmake")
set(ctest_tests_file "${ctest_file_base}_tests.cmake")
get_property(crosscompiling_emulator
TARGET ${TARGET}
PROPERTY CROSSCOMPILING_EMULATOR
)
if(_DISCOVERY_MODE STREQUAL "POST_BUILD")
add_custom_command(
TARGET ${TARGET} POST_BUILD
BYPRODUCTS "${ctest_tests_file}"
COMMAND "${CMAKE_COMMAND}"
-D "TEST_TARGET=${TARGET}"
-D "TEST_EXECUTABLE=$<TARGET_FILE:${TARGET}>"
-D "TEST_EXECUTOR=${crosscompiling_emulator}"
-D "TEST_WORKING_DIR=${_WORKING_DIRECTORY}"
-D "TEST_EXTRA_ARGS=${_EXTRA_ARGS}"
-D "TEST_PROPERTIES=${_PROPERTIES}"
-D "TEST_PREFIX=${_TEST_PREFIX}"
-D "TEST_SUFFIX=${_TEST_SUFFIX}"
-D "NO_PRETTY_TYPES=${_NO_PRETTY_TYPES}"
-D "NO_PRETTY_VALUES=${_NO_PRETTY_VALUES}"
-D "TEST_LIST=${_TEST_LIST}"
-D "CTEST_FILE=${ctest_tests_file}"
-D "TEST_DISCOVERY_TIMEOUT=${_DISCOVERY_TIMEOUT}"
-D "TEST_XML_OUTPUT_DIR=${_XML_OUTPUT_DIR}"
-P "${_GOOGLETEST_DISCOVER_TESTS_SCRIPT}"
VERBATIM
)
file(WRITE "${ctest_include_file}"
"if(EXISTS \"${ctest_tests_file}\")\n"
" include(\"${ctest_tests_file}\")\n"
"else()\n"
" add_test(${TARGET}_NOT_BUILT ${TARGET}_NOT_BUILT)\n"
"endif()\n"
)
elseif(_DISCOVERY_MODE STREQUAL "PRE_TEST")
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL
PROPERTY GENERATOR_IS_MULTI_CONFIG
)
if(GENERATOR_IS_MULTI_CONFIG)
set(ctest_tests_file "${ctest_file_base}_tests-$<CONFIG>.cmake")
endif()
string(CONCAT ctest_include_content
"if(EXISTS \"$<TARGET_FILE:${TARGET}>\")" "\n"
" if(\"$<TARGET_FILE:${TARGET}>\" IS_NEWER_THAN \"${ctest_tests_file}\")" "\n"
" include(\"${_GOOGLETEST_DISCOVER_TESTS_SCRIPT}\")" "\n"
" gtest_discover_tests_impl(" "\n"
" TEST_EXECUTABLE" " [==[" "$<TARGET_FILE:${TARGET}>" "]==]" "\n"
" TEST_EXECUTOR" " [==[" "${crosscompiling_emulator}" "]==]" "\n"
" TEST_WORKING_DIR" " [==[" "${_WORKING_DIRECTORY}" "]==]" "\n"
" TEST_EXTRA_ARGS" " [==[" "${_EXTRA_ARGS}" "]==]" "\n"
" TEST_PROPERTIES" " [==[" "${_PROPERTIES}" "]==]" "\n"
" TEST_PREFIX" " [==[" "${_TEST_PREFIX}" "]==]" "\n"
" TEST_SUFFIX" " [==[" "${_TEST_SUFFIX}" "]==]" "\n"
" NO_PRETTY_TYPES" " [==[" "${_NO_PRETTY_TYPES}" "]==]" "\n"
" NO_PRETTY_VALUES" " [==[" "${_NO_PRETTY_VALUES}" "]==]" "\n"
" TEST_LIST" " [==[" "${_TEST_LIST}" "]==]" "\n"
" CTEST_FILE" " [==[" "${ctest_tests_file}" "]==]" "\n"
" TEST_DISCOVERY_TIMEOUT" " [==[" "${_DISCOVERY_TIMEOUT}" "]==]" "\n"
" TEST_XML_OUTPUT_DIR" " [==[" "${_XML_OUTPUT_DIR}" "]==]" "\n"
" )" "\n"
" endif()" "\n"
" include(\"${ctest_tests_file}\")" "\n"
"else()" "\n"
" add_test(${TARGET}_NOT_BUILT ${TARGET}_NOT_BUILT)" "\n"
"endif()" "\n"
)
if(GENERATOR_IS_MULTI_CONFIG)
foreach(_config ${CMAKE_CONFIGURATION_TYPES})
file(GENERATE OUTPUT "${ctest_file_base}_include-${_config}.cmake" CONTENT "${ctest_include_content}" CONDITION $<CONFIG:${_config}>)
endforeach()
file(WRITE "${ctest_include_file}" "include(\"${ctest_file_base}_include-\${CTEST_CONFIGURATION_TYPE}.cmake\")")
else()
file(GENERATE OUTPUT "${ctest_file_base}_include.cmake" CONTENT "${ctest_include_content}")
file(WRITE "${ctest_include_file}" "include(\"${ctest_file_base}_include.cmake\")")
endif()
else()
message(FATAL_ERROR "Unknown DISCOVERY_MODE: ${_DISCOVERY_MODE}")
endif()
# Add discovered tests to directory TEST_INCLUDE_FILES
set_property(DIRECTORY
APPEND PROPERTY TEST_INCLUDE_FILES "${ctest_include_file}"
)
endfunction()
###############################################################################
set(_GOOGLETEST_DISCOVER_TESTS_SCRIPT
${CMAKE_CURRENT_LIST_DIR}/GTestAddTests.cmake
)
# Restore project's policies
cmake_policy(POP)

View File

@@ -1,191 +0,0 @@
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
# file Copyright.txt or https://cmake.org/licensing for details.
# Blender: disable ASAN leak detection when trying to discover tests.
set(ENV{ASAN_OPTIONS} "detect_leaks=0")
cmake_minimum_required(VERSION ${CMAKE_VERSION})
# Overwrite possibly existing ${_CTEST_FILE} with empty file
set(flush_tests_MODE WRITE)
# Flushes script to ${_CTEST_FILE}
macro(flush_script)
file(${flush_tests_MODE} "${_CTEST_FILE}" "${script}")
set(flush_tests_MODE APPEND)
set(script "")
endmacro()
# Flushes tests_buffer to tests
macro(flush_tests_buffer)
list(APPEND tests "${tests_buffer}")
set(tests_buffer "")
endmacro()
macro(add_command NAME)
set(_args "")
foreach(_arg ${ARGN})
if(_arg MATCHES "[^-./:a-zA-Z0-9_]")
string(APPEND _args " [==[${_arg}]==]")
else()
string(APPEND _args " ${_arg}")
endif()
endforeach()
string(APPEND script "${NAME}(${_args})\n")
string(LENGTH "${script}" _script_len)
if(${_script_len} GREATER "50000")
flush_script()
endif()
# Unsets macro local variables to prevent leakage outside of this macro.
unset(_args)
unset(_script_len)
endmacro()
function(gtest_discover_tests_impl)
cmake_parse_arguments(
""
""
"NO_PRETTY_TYPES;NO_PRETTY_VALUES;TEST_EXECUTABLE;TEST_EXECUTOR;TEST_WORKING_DIR;TEST_PREFIX;TEST_SUFFIX;TEST_LIST;CTEST_FILE;TEST_DISCOVERY_TIMEOUT;TEST_XML_OUTPUT_DIR"
"TEST_EXTRA_ARGS;TEST_PROPERTIES"
${ARGN}
)
set(prefix "${_TEST_PREFIX}")
set(suffix "${_TEST_SUFFIX}")
set(extra_args ${_TEST_EXTRA_ARGS})
set(properties ${_TEST_PROPERTIES})
set(script)
set(suite)
set(tests)
set(tests_buffer)
# Run test executable to get list of available tests
if(NOT EXISTS "${_TEST_EXECUTABLE}")
message(FATAL_ERROR
"Specified test executable does not exist.\n"
" Path: '${_TEST_EXECUTABLE}'"
)
endif()
execute_process(
COMMAND ${_TEST_EXECUTOR} "${_TEST_EXECUTABLE}" --gtest_list_tests
WORKING_DIRECTORY "${_TEST_WORKING_DIR}"
TIMEOUT ${_TEST_DISCOVERY_TIMEOUT}
OUTPUT_VARIABLE output
RESULT_VARIABLE result
)
if(NOT ${result} EQUAL 0)
string(REPLACE "\n" "\n " output "${output}")
message(FATAL_ERROR
"Error running test executable.\n"
" Path: '${_TEST_EXECUTABLE}'\n"
" Result: ${result}\n"
" Output:\n"
" ${output}\n"
)
endif()
# Preserve semicolon in test-parameters
string(REPLACE [[;]] [[\;]] output "${output}")
string(REPLACE "\n" ";" output "${output}")
# Parse output
foreach(line ${output})
# Skip header
if(NOT line MATCHES "gtest_main\\.cc")
# Do we have a module name or a test name?
if(NOT line MATCHES "^ ")
# Module; remove trailing '.' to get just the name...
string(REGEX REPLACE "\\.( *#.*)?" "" suite "${line}")
if(line MATCHES "#" AND NOT _NO_PRETTY_TYPES)
string(REGEX REPLACE "/[0-9]\\.+ +#.*= +" "/" pretty_suite "${line}")
else()
set(pretty_suite "${suite}")
endif()
string(REGEX REPLACE "^DISABLED_" "" pretty_suite "${pretty_suite}")
else()
# Test name; strip spaces and comments to get just the name...
string(REGEX REPLACE " +" "" test "${line}")
if(test MATCHES "#" AND NOT _NO_PRETTY_VALUES)
string(REGEX REPLACE "/[0-9]+#GetParam..=" "/" pretty_test "${test}")
else()
string(REGEX REPLACE "#.*" "" pretty_test "${test}")
endif()
string(REGEX REPLACE "^DISABLED_" "" pretty_test "${pretty_test}")
string(REGEX REPLACE "#.*" "" test "${test}")
if(NOT "${_TEST_XML_OUTPUT_DIR}" STREQUAL "")
set(TEST_XML_OUTPUT_PARAM "--gtest_output=xml:${_TEST_XML_OUTPUT_DIR}/${prefix}${suite}.${test}${suffix}.xml")
else()
unset(TEST_XML_OUTPUT_PARAM)
endif()
# sanitize test name for further processing downstream
set(testname "${prefix}${pretty_suite}.${pretty_test}${suffix}")
# escape \
string(REPLACE [[\]] [[\\]] testname "${testname}")
# escape ;
string(REPLACE [[;]] [[\;]] testname "${testname}")
# escape $
string(REPLACE [[$]] [[\$]] testname "${testname}")
# ...and add to script
add_command(add_test
"${testname}"
${_TEST_EXECUTOR}
"${_TEST_EXECUTABLE}"
"--gtest_filter=${suite}.${test}"
"--gtest_also_run_disabled_tests"
${TEST_XML_OUTPUT_PARAM}
${extra_args}
)
if(suite MATCHES "^DISABLED" OR test MATCHES "^DISABLED")
add_command(set_tests_properties
"${testname}"
PROPERTIES DISABLED TRUE
)
endif()
add_command(set_tests_properties
"${testname}"
PROPERTIES
WORKING_DIRECTORY "${_TEST_WORKING_DIR}"
SKIP_REGULAR_EXPRESSION "\\\\[ SKIPPED \\\\]"
${properties}
)
list(APPEND tests_buffer "${testname}")
list(LENGTH tests_buffer tests_buffer_length)
if(${tests_buffer_length} GREATER "250")
flush_tests_buffer()
endif()
endif()
endif()
endforeach()
# Create a list of all discovered tests, which users may use to e.g. set
# properties on the tests
flush_tests_buffer()
add_command(set ${_TEST_LIST} ${tests})
# Write CTest script
flush_script()
endfunction()
if(CMAKE_SCRIPT_MODE_FILE)
gtest_discover_tests_impl(
NO_PRETTY_TYPES ${NO_PRETTY_TYPES}
NO_PRETTY_VALUES ${NO_PRETTY_VALUES}
TEST_EXECUTABLE ${TEST_EXECUTABLE}
TEST_EXECUTOR ${TEST_EXECUTOR}
TEST_WORKING_DIR ${TEST_WORKING_DIR}
TEST_PREFIX ${TEST_PREFIX}
TEST_SUFFIX ${TEST_SUFFIX}
TEST_LIST ${TEST_LIST}
CTEST_FILE ${CTEST_FILE}
TEST_DISCOVERY_TIMEOUT ${TEST_DISCOVERY_TIMEOUT}
TEST_XML_OUTPUT_DIR ${TEST_XML_OUTPUT_DIR}
TEST_EXTRA_ARGS ${TEST_EXTRA_ARGS}
TEST_PROPERTIES ${TEST_PROPERTIES}
)
endif()

View File

@@ -37,11 +37,6 @@ macro(BLENDER_SRC_GTEST_EX)
if(WIN32)
set(MANIFEST "${CMAKE_BINARY_DIR}/tests.exe.manifest")
endif()
add_definitions(-DBLENDER_GFLAGS_NAMESPACE=${GFLAGS_NAMESPACE})
add_definitions(${GFLAGS_DEFINES})
add_definitions(${GLOG_DEFINES})
add_executable(${TARGET_NAME} ${ARG_SRC} ${MANIFEST})
target_include_directories(${TARGET_NAME} PUBLIC "${TEST_INC}")
target_include_directories(${TARGET_NAME} SYSTEM PUBLIC "${TEST_INC_SYS}")

View File

@@ -15,7 +15,6 @@ set(WITH_CYCLES_EMBREE ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL ON CACHE BOOL "" FORCE)
set(WITH_DRACO ON CACHE BOOL "" FORCE)
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
set(WITH_GMP OFF CACHE BOOL "" FORCE)
set(WITH_LIBMV ON CACHE BOOL "" FORCE)
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS ON CACHE BOOL "" FORCE)
set(WITH_COMPOSITOR ON CACHE BOOL "" FORCE)

View File

@@ -20,7 +20,6 @@ set(WITH_CYCLES_OSL OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_DEVICE_OPTIX OFF CACHE BOOL "" FORCE)
set(WITH_DRACO OFF CACHE BOOL "" FORCE)
set(WITH_FFTW3 OFF CACHE BOOL "" FORCE)
set(WITH_GMP OFF CACHE BOOL "" FORCE)
set(WITH_LIBMV OFF CACHE BOOL "" FORCE)
set(WITH_LLVM OFF CACHE BOOL "" FORCE)
set(WITH_COMPOSITOR OFF CACHE BOOL "" FORCE)

View File

@@ -16,7 +16,6 @@ set(WITH_CYCLES_EMBREE ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL ON CACHE BOOL "" FORCE)
set(WITH_DRACO ON CACHE BOOL "" FORCE)
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
set(WITH_GMP OFF CACHE BOOL "" FORCE)
set(WITH_LIBMV ON CACHE BOOL "" FORCE)
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS ON CACHE BOOL "" FORCE)
set(WITH_COMPOSITOR ON CACHE BOOL "" FORCE)
@@ -54,7 +53,7 @@ set(WITH_USD ON CACHE BOOL "" FORCE)
set(WITH_MEM_JEMALLOC ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
set(CYCLES_CUDA_BINARIES_ARCH sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61;sm_70;sm_75;compute_75 CACHE STRING "" FORCE)
set(CYCLES_CUDA_BINARIES_ARCH sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61;sm_70;sm_75 CACHE STRING "" FORCE)
set(WITH_CYCLES_DEVICE_OPTIX ON CACHE BOOL "" FORCE)
# platform dependent options

View File

@@ -169,26 +169,6 @@ function(blender_include_dirs_sys
include_directories(SYSTEM ${_ALL_INCS})
endfunction()
# Set include paths for header files included with "*.h" syntax.
# This enables auto-complete suggestions for user header files on Xcode.
# Build process is not affected since the include paths are the same
# as in HEADER_SEARCH_PATHS.
function(blender_user_header_search_paths
name
includes
)
if(XCODE)
set(_ALL_INCS "")
foreach(_INC ${includes})
get_filename_component(_ABS_INC ${_INC} ABSOLUTE)
# _ALL_INCS is a space-separated string of file paths in quotes.
set(_ALL_INCS "${_ALL_INCS} \"${_ABS_INC}\"")
endforeach()
set_target_properties(${name} PROPERTIES XCODE_ATTRIBUTE_USER_HEADER_SEARCH_PATHS "${_ALL_INCS}")
endif()
endfunction()
function(blender_source_group
name
sources
@@ -337,7 +317,6 @@ function(blender_add_lib__impl
# works fine without having the includes
# listed is helpful for IDE's (QtCreator/MSVC)
blender_source_group("${name}" "${sources}")
blender_user_header_search_paths("${name}" "${includes}")
list_assert_duplicates("${sources}")
list_assert_duplicates("${includes}")
@@ -375,42 +354,6 @@ function(blender_add_lib
set_property(GLOBAL APPEND PROPERTY BLENDER_LINK_LIBS ${name})
endfunction()
# blender_add_test_lib() is used to define a test library. It is intended to be
# called in tandem with blender_add_lib(). The test library will be linked into
# the bf_gtest_runner_test executable (see tests/gtests/CMakeLists.txt).
function(blender_add_test_lib
name
sources
includes
includes_sys
library_deps
)
add_cc_flags_custom_test(${name} PARENT_SCOPE)
# Otherwise external projects will produce warnings that we cannot fix.
remove_strict_flags()
# This duplicates logic that's also in GTestTesting.cmake, macro BLENDER_SRC_GTEST_EX.
# TODO(Sybren): deduplicate after the general approach in D7649 has been approved.
LIST(APPEND includes
${CMAKE_SOURCE_DIR}/tests/gtests
)
LIST(APPEND includes_sys
${GLOG_INCLUDE_DIRS}
${GFLAGS_INCLUDE_DIRS}
${CMAKE_SOURCE_DIR}/extern/gtest/include
${CMAKE_SOURCE_DIR}/extern/gmock/include
)
add_definitions(-DBLENDER_GFLAGS_NAMESPACE=${GFLAGS_NAMESPACE})
add_definitions(${GFLAGS_DEFINES})
add_definitions(${GLOG_DEFINES})
blender_add_lib__impl(${name} "${sources}" "${includes}" "${includes_sys}" "${library_deps}")
set_property(GLOBAL APPEND PROPERTY BLENDER_TEST_LIBS ${name})
endfunction()
# Ninja only: assign 'heavy pool' to some targets that are especially RAM-consuming to build.
function(setup_heavy_lib_pool)
if(WITH_NINJA_POOL_JOBS AND NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS)
@@ -418,7 +361,7 @@ function(setup_heavy_lib_pool)
list(APPEND _HEAVY_LIBS "cycles_device" "cycles_kernel")
endif()
if(WITH_LIBMV)
list(APPEND _HEAVY_LIBS "extern_ceres" "bf_intern_libmv")
list(APPEND _HEAVY_LIBS "bf_intern_libmv")
endif()
if(WITH_OPENVDB)
list(APPEND _HEAVY_LIBS "bf_intern_openvdb")

View File

@@ -20,11 +20,7 @@
# Libraries configuration for Apple.
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(MACOSX_DEPLOYMENT_TARGET 11.00)
else()
set(MACOSX_DEPLOYMENT_TARGET 10.13)
endif()
set(MACOSX_DEPLOYMENT_TARGET "10.11")
macro(find_package_wrapper)
# do nothing, just satisfy the macro
@@ -382,12 +378,6 @@ if(WITH_CYCLES_OSL)
endif()
endif()
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(WITH_CYCLES_EMBREE OFF)
set(WITH_OPENIMAGEDENOISE OFF)
set(WITH_CPU_SSE OFF)
endif()
if(WITH_CYCLES_EMBREE)
find_package(Embree 3.8.0 REQUIRED)
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -Xlinker -stack_size -Xlinker 0x100000")
@@ -437,14 +427,6 @@ if(WITH_XR_OPENXR)
endif()
endif()
if(WITH_GMP)
find_package(GMP)
if(NOT GMP_FOUND)
message(WARNING "GMP not found, disabling WITH_GMP")
set(WITH_GMP OFF)
endif()
endif()
set(EXETYPE MACOSX_BUNDLE)
set(CMAKE_C_FLAGS_DEBUG "-fno-strict-aliasing -g")
@@ -457,8 +439,8 @@ if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" OR CMAKE_OSX_ARCHITECTURES MATCHES "
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -ftree-vectorize -fvariable-expansion-in-unroller")
endif()
else()
set(CMAKE_C_FLAGS_RELEASE "-O2 -mdynamic-no-pic -fno-strict-aliasing")
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -mdynamic-no-pic -fno-strict-aliasing")
set(CMAKE_C_FLAGS_RELEASE "-mdynamic-no-pic -fno-strict-aliasing")
set(CMAKE_CXX_FLAGS_RELEASE "-mdynamic-no-pic -fno-strict-aliasing")
endif()
if(${XCODE_VERSION} VERSION_EQUAL 5 OR ${XCODE_VERSION} VERSION_GREATER 5)

View File

@@ -21,10 +21,8 @@
# Xcode and system configuration for Apple.
if(NOT CMAKE_OSX_ARCHITECTURES)
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Detected native architecture ${ARCHITECTURE}.")
set(CMAKE_OSX_ARCHITECTURES ${ARCHITECTURE} CACHE STRING
"Choose the architecture you want to build Blender for: arm64 or x86_64"
set(CMAKE_OSX_ARCHITECTURES x86_64 CACHE STRING
"Choose the architecture you want to build Blender for: i386, x86_64 or ppc"
FORCE)
endif()
@@ -67,9 +65,13 @@ endif()
message(STATUS "Detected OS X ${OSX_SYSTEM} and Xcode ${XCODE_VERSION} at ${XCODE_BUNDLE}")
# Require a relatively recent Xcode version.
if(${XCODE_VERSION} VERSION_LESS 10.0)
message(FATAL_ERROR "Only Xcode version 10.0 and newer is supported")
# Older Xcode versions had different approach to the directory hiearchy.
# Require newer Xcode which is also have better chances of being able to compile with the
# required deployment target.
#
# NOTE: Xcode version 8.2 is the latest one which runs on macOS 10.11.
if(${XCODE_VERSION} VERSION_LESS 8.2)
message(FATAL_ERROR "Only Xcode version 8.2 and newer is supported")
endif()
# note: xcode-select path could be ambiguous,
@@ -131,21 +133,14 @@ if(${CMAKE_GENERATOR} MATCHES "Xcode")
endif()
unset(OSX_SDKROOT)
# 10.13 is our min. target, if you use higher sdk, weak linking happens
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(OSX_MIN_DEPLOYMENT_TARGET 11.00)
else()
set(OSX_MIN_DEPLOYMENT_TARGET 10.13)
endif()
# 10.11 is our min. target, if you use higher sdk, weak linking happens
if(CMAKE_OSX_DEPLOYMENT_TARGET)
if(${CMAKE_OSX_DEPLOYMENT_TARGET} VERSION_LESS ${OSX_MIN_DEPLOYMENT_TARGET})
message(STATUS "Setting deployment target to ${OSX_MIN_DEPLOYMENT_TARGET}, lower versions are not supported")
set(CMAKE_OSX_DEPLOYMENT_TARGET "${OSX_MIN_DEPLOYMENT_TARGET}" CACHE STRING "" FORCE)
if(${CMAKE_OSX_DEPLOYMENT_TARGET} VERSION_LESS 10.11)
message(STATUS "Setting deployment target to 10.11, lower versions are not supported")
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.11" CACHE STRING "" FORCE)
endif()
else()
set(CMAKE_OSX_DEPLOYMENT_TARGET "${OSX_MIN_DEPLOYMENT_TARGET}" CACHE STRING "" FORCE)
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.11" CACHE STRING "" FORCE)
endif()
if(NOT ${CMAKE_GENERATOR} MATCHES "Xcode")

View File

@@ -36,11 +36,6 @@ if(NOT DEFINED LIBDIR)
elseif(EXISTS ${LIBDIR_CENTOS7_ABI})
set(LIBDIR ${LIBDIR_CENTOS7_ABI})
set(WITH_CXX11_ABI OFF)
if(CMAKE_COMPILER_IS_GNUCC AND
CMAKE_C_COMPILER_VERSION VERSION_LESS 9.3)
message(FATAL_ERROR "GCC version must be at least 9.3 for precompiled libraries, found ${CMAKE_C_COMPILER_VERSION}")
endif()
endif()
# Avoid namespace pollustion.
@@ -435,14 +430,6 @@ if(WITH_XR_OPENXR)
endif()
endif()
if(WITH_GMP)
find_package_wrapper(GMP)
if(NOT GMP_FOUND)
message(WARNING "GMP not found, disabling WITH_GMP")
set(WITH_GMP OFF)
endif()
endif()
if(EXISTS ${LIBDIR})
without_system_libs_end()
endif()

View File

@@ -750,10 +750,3 @@ if(WITH_XR_OPENXR)
set(WITH_XR_OPENXR OFF)
endif()
endif()
if(WITH_GMP)
set(GMP_INCLUDE_DIRS ${LIBDIR}/gmp/include)
set(GMP_LIBRARIES ${LIBDIR}/gmp/lib/libgmp-10.lib optimized ${LIBDIR}/gmp/lib/libgmpxx.lib debug ${LIBDIR}/gmp/lib/libgmpxx_d.lib)
set(GMP_ROOT_DIR ${LIBDIR}/gmp)
set(GMP_FOUND On)
endif()

View File

@@ -40,8 +40,7 @@ if make_utils.command_missing(git_command):
# Test if we are building a specific release version.
branch = make_utils.git_branch(git_command)
tag = make_utils.git_tag(git_command)
release_version = make_utils.git_branch_release_version(branch, tag)
release_version = make_utils.git_branch_release_version(branch)
lib_tests_dirpath = os.path.join('..', 'lib', "tests")
if not os.path.exists(lib_tests_dirpath):

View File

@@ -197,8 +197,7 @@ if __name__ == "__main__":
# Test if we are building a specific release version.
branch = make_utils.git_branch(args.git_command)
tag = make_utils.git_tag(args.git_command)
release_version = make_utils.git_branch_release_version(branch, tag)
release_version = make_utils.git_branch_release_version(branch)
if not args.no_libraries:
svn_update(args, release_version)

View File

@@ -36,7 +36,7 @@ def check_output(cmd, exit_on_error=True):
return output.strip()
def git_branch(git_command):
# Get current branch name.
# Test if we are building a specific release version.
try:
branch = subprocess.check_output([git_command, "rev-parse", "--abbrev-ref", "HEAD"])
except subprocess.CalledProcessError as e:
@@ -45,23 +45,10 @@ def git_branch(git_command):
return branch.strip().decode('utf8')
def git_tag(git_command):
# Get current tag name.
try:
tag = subprocess.check_output([git_command, "describe", "--exact-match"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return None
return tag.strip().decode('utf8')
def git_branch_release_version(branch, tag):
def git_branch_release_version(branch):
release_version = re.search("^blender-v(.*)-release$", branch)
if release_version:
release_version = release_version.group(1)
elif tag:
release_version = re.search("^v([0-9]*\.[0-9]*).*", tag)
if release_version:
release_version = release_version.group(1)
return release_version
def svn_libraries_base_url(release_version):

View File

@@ -38,7 +38,7 @@ PROJECT_NAME = Blender
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = "V2.91"
PROJECT_NUMBER = "V2.90"
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a

View File

@@ -20,7 +20,6 @@ from gpu_extras.presets import draw_circle_2d
offscreen = gpu.types.GPUOffScreen(512, 512)
with offscreen.bind():
bgl.glClearColor(0.0, 0.0, 0.0, 0.0)
bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
with gpu.matrix.push_pop():
# reset matrices -> use normalized device coordinates [-1, 1]

View File

@@ -25,7 +25,6 @@ RING_AMOUNT = 10
offscreen = gpu.types.GPUOffScreen(WIDTH, HEIGHT)
with offscreen.bind():
bgl.glClearColor(0.0, 0.0, 0.0, 0.0)
bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
with gpu.matrix.push_pop():
# reset matrices -> use normalized device coordinates [-1, 1]

View File

@@ -248,7 +248,7 @@ using the ``bl_idname`` rather than the classes original name.
.. note::
There are some exceptions to this for class names which aren't guarantee to be unique.
In this case use: :func:`bpy.types.Struct.bl_rna_get_subclass_py`.
In this case use: :func:`bpy.types.Struct.bl_rna_get_subclass`.
When loading a class, Blender performs sanity checks making sure all required properties and functions are found,

View File

@@ -225,7 +225,6 @@ else:
"aud",
"bgl",
"blf",
"bl_math",
"imbuf",
"bmesh",
"bmesh.ops",
@@ -696,11 +695,13 @@ def py_descr2sphinx(ident, fw, descr, module_name, type_name, identifier):
doc = undocumented_message(module_name, type_name, identifier)
if type(descr) == GetSetDescriptorType:
fw(ident + ".. attribute:: %s\n\n" % identifier)
fw(ident + ".. attribute:: %s\n" % identifier)
fw(ident + " :noindex:\n\n")
write_indented_lines(ident + " ", fw, doc, False)
fw("\n")
elif type(descr) == MemberDescriptorType: # same as above but use 'data'
fw(ident + ".. data:: %s\n\n" % identifier)
fw(ident + ".. data:: %s\n" % identifier)
fw(ident + " :noindex:\n\n")
write_indented_lines(ident + " ", fw, doc, False)
fw("\n")
elif type(descr) in {MethodDescriptorType, ClassMethodDescriptorType}:
@@ -740,11 +741,14 @@ def pyprop2sphinx(ident, fw, identifier, py_prop):
'''
# readonly properties use "data" directive, variables use "attribute" directive
if py_prop.fset is None:
fw(ident + ".. data:: %s\n\n" % identifier)
fw(ident + ".. data:: %s\n" % identifier)
fw(ident + " :noindex:\n\n")
else:
fw(ident + ".. attribute:: %s\n\n" % identifier)
fw(ident + ".. attribute:: %s\n" % identifier)
fw(ident + " :noindex:\n\n")
write_indented_lines(ident + " ", fw, py_prop.__doc__)
if py_prop.fset is None:
fw("\n")
fw(ident + " (readonly)\n\n")
else:
fw("\n")
@@ -910,7 +914,8 @@ def pymodule2sphinx(basepath, module_name, module, title):
elif issubclass(value_type, (bool, int, float, str, tuple)):
# constant, not much fun we can do here except to list it.
# TODO, figure out some way to document these!
fw(".. data:: %s\n\n" % attribute)
fw(".. data:: %s\n" % attribute)
fw(" :noindex:\n\n")
write_indented_lines(" ", fw, "constant value %s" % repr(value), False)
fw("\n")
else:
@@ -1120,7 +1125,8 @@ def pycontext2sphinx(basepath):
type_descr = prop.get_type_description(
class_fmt=":class:`bpy.types.%s`", collection_id=_BPY_PROP_COLLECTION_ID)
fw(".. data:: %s\n\n" % prop.identifier)
fw(".. data:: %s\n" % prop.identifier)
fw(" :noindex:\n\n")
if prop.description:
fw(" %s\n\n" % prop.description)
@@ -1165,7 +1171,8 @@ def pycontext2sphinx(basepath):
i = 0
while char_array[i] is not None:
member = ctypes.string_at(char_array[i]).decode(encoding="ascii")
fw(".. data:: %s\n\n" % member)
fw(".. data:: %s\n" % member)
fw(" :noindex:\n\n")
member_type, is_seq = context_type_map[member]
fw(" :type: %s :class:`bpy.types.%s`\n\n" % ("sequence of " if is_seq else "", member_type))
unique.add(member)
@@ -1371,9 +1378,11 @@ def pyrna2sphinx(basepath):
type_descr = prop.get_type_description(class_fmt=":class:`%s`", collection_id=_BPY_PROP_COLLECTION_ID)
# readonly properties use "data" directive, variables properties use "attribute" directive
if 'readonly' in type_descr:
fw(" .. data:: %s\n\n" % prop.identifier)
fw(" .. data:: %s\n" % prop.identifier)
fw(" :noindex:\n\n")
else:
fw(" .. attribute:: %s\n\n" % prop.identifier)
fw(" .. attribute:: %s\n" % prop.identifier)
fw(" :noindex:\n\n")
if prop.description:
fw(" %s\n\n" % prop.description)
@@ -1789,18 +1798,8 @@ def write_rst_contents(basepath):
standalone_modules = (
# submodules are added in parent page
"aud",
"bgl",
"bl_math",
"blf",
"bmesh",
"bpy_extras",
"freestyle",
"gpu",
"gpu_extras",
"idprop.types",
"imbuf",
"mathutils",
"mathutils", "freestyle", "bgl", "blf", "imbuf", "gpu", "gpu_extras",
"aud", "bpy_extras", "idprop.types", "bmesh",
)
for mod in standalone_modules:
@@ -1952,7 +1951,6 @@ def write_rst_importable_modules(basepath):
"mathutils.kdtree": "KDTree Utilities",
"mathutils.interpolate": "Interpolation Utilities",
"mathutils.noise": "Noise Utilities",
"bl_math": "Additional Math Functions",
"freestyle": "Freestyle Module",
"freestyle.types": "Freestyle Types",
"freestyle.predicates": "Freestyle Predicates",

View File

@@ -1395,9 +1395,9 @@ PyDoc_STRVAR(M_aud_Sound_threshold_doc,
" all between to 0.\n\n"
" :arg threshold: Threshold value over which an amplitude counts\n"
" non-zero.\n\n"
" :type threshold: float\n"
" :return: The created :class:`Sound` object.\n"
" :rtype: :class:`Sound`");
":type threshold: float\n"
":return: The created :class:`Sound` object.\n"
":rtype: :class:`Sound`");
static PyObject *
Sound_threshold(Sound* self, PyObject* args)

View File

@@ -119,8 +119,8 @@ void JOSResampleReader::updateBuffer(int size, double factor, int samplesize)
P = int_to_fp(m_L) - P;\
\
end = std::floor((m_len - 1) / double(m_L) + m_P) - 1;\
if(m_cache_valid - int(m_n) - 2 < end)\
end = m_cache_valid - int(m_n) - 2;\
if(m_cache_valid - m_n - 2 < end)\
end = m_cache_valid - m_n - 2;\
\
data = buf + (m_n + 2 + end) * m_channels - 1;\
l = fp_to_int(P);\
@@ -166,8 +166,8 @@ void JOSResampleReader::updateBuffer(int size, double factor, int samplesize)
P = 0 - P;\
\
end = (int_to_fp(m_len) - P) / P_increment - 1;\
if(m_cache_valid - int(m_n) - 2 < end)\
end = m_cache_valid - int(m_n) - 2;\
if(m_cache_valid - m_n - 2 < end)\
end = m_cache_valid - m_n - 2;\
\
P += P_increment * end;\
data = buf + (m_n + 2 + end) * m_channels - 1;\

View File

@@ -1,41 +0,0 @@
diff --git a/extern/bullet2/src/BulletCollision/CollisionShapes/btPolyhedralConvexShape.cpp b/extern/bullet2/src/BulletCollision/CollisionShapes/btPolyhedralConvexShape.cpp
index 9095c592d87..b831e20c2f9 100644
--- a/extern/bullet2/src/BulletCollision/CollisionShapes/btPolyhedralConvexShape.cpp
+++ b/extern/bullet2/src/BulletCollision/CollisionShapes/btPolyhedralConvexShape.cpp
@@ -406,17 +406,17 @@ void btPolyhedralConvexShape::calculateLocalInertia(btScalar mass,btVector3& ine
#ifndef __SPU__
//not yet, return box inertia
- btScalar margin = getMargin();
+ //btScalar margin = getMargin();
btTransform ident;
ident.setIdentity();
btVector3 aabbMin,aabbMax;
- getAabb(ident,aabbMin,aabbMax);
+ getAabb(ident,aabbMin,aabbMax); // This already contains the margin
btVector3 halfExtents = (aabbMax-aabbMin)*btScalar(0.5);
- btScalar lx=btScalar(2.)*(halfExtents.x()+margin);
- btScalar ly=btScalar(2.)*(halfExtents.y()+margin);
- btScalar lz=btScalar(2.)*(halfExtents.z()+margin);
+ btScalar lx=btScalar(2.)*(halfExtents.x());
+ btScalar ly=btScalar(2.)*(halfExtents.y());
+ btScalar lz=btScalar(2.)*(halfExtents.z());
const btScalar x2 = lx*lx;
const btScalar y2 = ly*ly;
const btScalar z2 = lz*lz;
@@ -476,10 +476,10 @@ void btPolyhedralConvexAabbCachingShape::recalcLocalAabb()
for ( int i = 0; i < 3; ++i )
{
- m_localAabbMax[i] = _supporting[i][i] + m_collisionMargin;
- m_localAabbMin[i] = _supporting[i + 3][i] - m_collisionMargin;
+ m_localAabbMax[i] = _supporting[i][i];
+ m_localAabbMin[i] = _supporting[i + 3][i];
}
-
+
#else
for (int i=0;i<3;i++)

View File

@@ -180,7 +180,7 @@ void btCompoundShape::getAabb(const btTransform& trans,btVector3& aabbMin,btVect
localHalfExtents += btVector3(getMargin(),getMargin(),getMargin());
btMatrix3x3 abs_b = trans.getBasis().absolute();
btMatrix3x3 abs_b = trans.getBasis().absolute();
btVector3 center = trans(localCenter);

View File

@@ -406,17 +406,17 @@ void btPolyhedralConvexShape::calculateLocalInertia(btScalar mass,btVector3& ine
#ifndef __SPU__
//not yet, return box inertia
//btScalar margin = getMargin();
btScalar margin = getMargin();
btTransform ident;
ident.setIdentity();
btVector3 aabbMin,aabbMax;
getAabb(ident,aabbMin,aabbMax); // This already contains the margin
getAabb(ident,aabbMin,aabbMax);
btVector3 halfExtents = (aabbMax-aabbMin)*btScalar(0.5);
btScalar lx=btScalar(2.)*(halfExtents.x());
btScalar ly=btScalar(2.)*(halfExtents.y());
btScalar lz=btScalar(2.)*(halfExtents.z());
btScalar lx=btScalar(2.)*(halfExtents.x()+margin);
btScalar ly=btScalar(2.)*(halfExtents.y()+margin);
btScalar lz=btScalar(2.)*(halfExtents.z()+margin);
const btScalar x2 = lx*lx;
const btScalar y2 = ly*ly;
const btScalar z2 = lz*lz;
@@ -476,10 +476,10 @@ void btPolyhedralConvexAabbCachingShape::recalcLocalAabb()
for ( int i = 0; i < 3; ++i )
{
m_localAabbMax[i] = _supporting[i][i];
m_localAabbMin[i] = _supporting[i + 3][i];
m_localAabbMax[i] = _supporting[i][i] + m_collisionMargin;
m_localAabbMin[i] = _supporting[i + 3][i] - m_collisionMargin;
}
#else
for (int i=0;i<3;i++)

View File

@@ -879,7 +879,7 @@ int cuewCompilerVersion(void) {
}
/* get --version output */
strcat(command, "\"");
strncat(command, "\"", 1);
strncat(command, path, sizeof(command) - 1);
strncat(command, "\" --version", sizeof(command) - strlen(path) - 1);
pipe = popen(command, "r");

View File

@@ -31,32 +31,19 @@ if(MSVC_CLANG AND WITH_OPENMP AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0.1
remove_cc_flag("-fopenmp")
endif()
set(MANTAVERSION "0.13")
set(MANTAVERSION "0.12")
add_definitions(-DWITH_FLUID=1)
# Compile Mantaflow dependencies too (e.g. cnpy for numpy file IO).
# Make sure that dependencies exist before enabling this option by updating the source files in extern/
set(WITH_MANTA_DEPENDENCIES 0)
# Enable Mantaflow numpy support
set(WITH_MANTA_NUMPY 0)
if(NOT WITH_MANTA_DEPENDENCIES)
add_definitions(-DNO_CNPY=1)
endif()
set(MANTA_DEP
dependencies
)
set(MANTA_HLP
helper
)
set(MANTA_PP
preprocessed
)
if(WITH_MANTA_DEPENDENCIES)
set(MANTA_DEP
dependencies
)
endif()
if(WITH_TBB)
add_definitions(-DTBB=1)
@@ -75,10 +62,6 @@ if(WIN32)
add_definitions(-D_USE_MATH_DEFINES)
endif()
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
add_definitions(-DNUMPY=1)
endif()
set(INC
${MANTA_PP}
${MANTA_PP}/fileio
@@ -86,25 +69,14 @@ set(INC
${MANTA_PP}/plugin
${MANTA_HLP}/pwrapper
${MANTA_HLP}/util
${MANTA_DEP}/cnpy
)
if(WITH_MANTA_DEPENDENCIES)
list(APPEND INC
${MANTA_DEP}/cnpy
)
endif()
set(INC_SYS
${PYTHON_INCLUDE_DIRS}
${ZLIB_INCLUDE_DIRS}
)
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
list(APPEND INC_SYS
${PYTHON_NUMPY_INCLUDE_DIRS}
)
endif()
if(WITH_TBB)
list(APPEND INC_SYS
${TBB_INCLUDE_DIRS}
@@ -124,6 +96,9 @@ if(WITH_OPENVDB)
endif()
set(SRC
${MANTA_DEP}/cnpy/cnpy.cpp
${MANTA_DEP}/cnpy/cnpy.h
${MANTA_PP}/commonkernels.h
${MANTA_PP}/commonkernels.h.reg.cpp
${MANTA_PP}/conjugategrad.cpp
@@ -186,10 +161,14 @@ set(SRC
${MANTA_PP}/plugin/initplugins.cpp
${MANTA_PP}/plugin/kepsilon.cpp
${MANTA_PP}/plugin/meshplugins.cpp
# TODO (sebbas): add numpy to libraries
# ${MANTA_PP}/plugin/numpyconvert.cpp
${MANTA_PP}/plugin/pressure.cpp
${MANTA_PP}/plugin/ptsplugins.cpp
${MANTA_PP}/plugin/secondaryparticles.cpp
${MANTA_PP}/plugin/surfaceturbulence.cpp
# TODO (sebbas): add numpy to libraries
# ${MANTA_PP}/plugin/tfplugins.cpp
${MANTA_PP}/plugin/vortexplugins.cpp
${MANTA_PP}/plugin/waveletturbulence.cpp
${MANTA_PP}/plugin/waves.cpp
@@ -214,6 +193,9 @@ set(SRC
${MANTA_PP}/vortexsheet.h.reg.cpp
${MANTA_HLP}/pwrapper/manta.h
# TODO (sebbas): add numpy to libraries
# ${MANTA_HLP}/pwrapper/numpyWrap.cpp
# ${MANTA_HLP}/pwrapper/numpyWrap.h
${MANTA_HLP}/pwrapper/pclass.cpp
${MANTA_HLP}/pwrapper/pclass.h
${MANTA_HLP}/pwrapper/pconvert.cpp
@@ -239,22 +221,6 @@ set(SRC
${MANTA_HLP}/util/vectorbase.h
)
if(WITH_MANTA_DEPENDENCIES)
list(APPEND SRC
${MANTA_DEP}/cnpy/cnpy.cpp
${MANTA_DEP}/cnpy/cnpy.h
)
endif()
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
list(APPEND SRC
${MANTA_PP}/plugin/numpyconvert.cpp
${MANTA_PP}/plugin/tfplugins.cpp
${MANTA_HLP}/pwrapper/numpyWrap.cpp
${MANTA_HLP}/pwrapper/numpyWrap.h
)
endif()
set(LIB
${PYTHON_LINKFLAGS}
${PYTHON_LIBRARIES}

View File

@@ -13,12 +13,6 @@ BLENDER_INSTALLATION=/Users/sebbas/Developer/Blender/fluid-mantaflow
# Try to check out Mantaflow repository before building?
CLEAN_REPOSITORY=0
# Skip copying dependency files?
WITH_DEPENDENCIES=0
# Build with numpy support?
USE_NUMPY=0
# Choose which multithreading platform to use for Mantaflow preprocessing
USE_OMP=0
USE_TBB=1
@@ -56,21 +50,17 @@ fi
MANTA_BUILD_PATH=$MANTA_INSTALLATION/build_blender/
mkdir -p $MANTA_BUILD_PATH
cd $MANTA_BUILD_PATH
cmake ../mantaflowgit -DGUI=0 -DOPENMP=$USE_OMP -DTBB=$USE_TBB -DBLENDER=1 -DPREPDEBUG=1 -DNUMPY=$USE_NUMPY && make -j8
cmake ../mantaflowgit -DGUI=OFF -DOPENMP=$USE_OMP -DTBB=$USE_TBB -DBLENDER=ON -DPREPDEBUG=ON && make -j8
# ==================== 3) COPY MANTAFLOW FILES TO BLENDER ROOT ===========================
if [[ "$WITH_DEPENDENCIES" -eq "1" ]]; then
mkdir -p $BLENDER_INSTALLATION/blender/tmp/dependencies/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/dependencies/cnpy "$_"
fi
mkdir -p $BLENDER_INSTALLATION/blender/tmp/dependencies/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/dependencies/cnpy "$_"
mkdir -p $BLENDER_INSTALLATION/blender/tmp/helper/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/source/util "$_"
mkdir -p $BLENDER_INSTALLATION/blender/tmp/helper/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/source/pwrapper "$_"
mkdir -p $BLENDER_INSTALLATION/blender/tmp/preprocessed/ && cp -Rf $MANTA_INSTALLATION/build_blender/pp/source/. "$_"
# Remove some files that are not need in Blender
if [[ "$WITH_DEPENDENCIES" -eq "1" ]]; then
rm $BLENDER_INSTALLATION/blender/tmp/dependencies/cnpy/example1.cpp
fi
rm $BLENDER_INSTALLATION/blender/tmp/dependencies/cnpy/example1.cpp
rm $BLENDER_INSTALLATION/blender/tmp/helper/pwrapper/pymain.cpp
rm $BLENDER_INSTALLATION/blender/tmp/preprocessed/*.reg
rm $BLENDER_INSTALLATION/blender/tmp/preprocessed/python/*.reg
@@ -92,13 +82,8 @@ BLENDER_TMP_DEP=$BLENDER_TMP/dependencies
BLENDER_TMP_HLP=$BLENDER_TMP/helper
BLENDER_TMP_PP=$BLENDER_TMP/preprocessed
# Before moving new files, delete all existing file in the Blender repository
rm -Rf $BLENDER_MANTA_EXTERN/dependencies $BLENDER_MANTA_EXTERN/helper $BLENDER_MANTA_EXTERN/preprocessed
# Move files from tmp dir to extern/
if [[ "$WITH_DEPENDENCIES" -eq "1" ]]; then
cp -Rf $BLENDER_TMP_DEP $BLENDER_MANTA_EXTERN
fi
cp -Rf $BLENDER_TMP_DEP $BLENDER_MANTA_EXTERN
cp -Rf $BLENDER_TMP_HLP $BLENDER_MANTA_EXTERN
cp -Rf $BLENDER_TMP_PP $BLENDER_MANTA_EXTERN

View File

@@ -0,0 +1,21 @@
The MIT License
Copyright (c) Carl Rogers, 2011
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,385 @@
// Copyright (C) 2011 Carl Rogers
// Released under MIT License
// license available in LICENSE file, or at http://www.opensource.org/licenses/mit-license.php
#include "cnpy.h"
#include <complex>
#include <cstdlib>
#include <algorithm>
#include <cstring>
#include <iomanip>
#include <stdint.h>
#include <stdexcept>
#include <regex>
char cnpy::BigEndianTest()
{
int x = 1;
return (((char *)&x)[0]) ? '<' : '>';
}
char cnpy::map_type(const std::type_info &t)
{
if (t == typeid(float))
return 'f';
if (t == typeid(double))
return 'f';
if (t == typeid(long double))
return 'f';
if (t == typeid(int))
return 'i';
if (t == typeid(char))
return 'i';
if (t == typeid(short))
return 'i';
if (t == typeid(long))
return 'i';
if (t == typeid(long long))
return 'i';
if (t == typeid(unsigned char))
return 'u';
if (t == typeid(unsigned short))
return 'u';
if (t == typeid(unsigned long))
return 'u';
if (t == typeid(unsigned long long))
return 'u';
if (t == typeid(unsigned int))
return 'u';
if (t == typeid(bool))
return 'b';
if (t == typeid(std::complex<float>))
return 'c';
if (t == typeid(std::complex<double>))
return 'c';
if (t == typeid(std::complex<long double>))
return 'c';
else
return '?';
}
template<> std::vector<char> &cnpy::operator+=(std::vector<char> &lhs, const std::string rhs)
{
lhs.insert(lhs.end(), rhs.begin(), rhs.end());
return lhs;
}
template<> std::vector<char> &cnpy::operator+=(std::vector<char> &lhs, const char *rhs)
{
// write in little endian
size_t len = strlen(rhs);
lhs.reserve(len);
for (size_t byte = 0; byte < len; byte++) {
lhs.push_back(rhs[byte]);
}
return lhs;
}
void cnpy::parse_npy_header(unsigned char *buffer,
size_t &word_size,
std::vector<size_t> &shape,
bool &fortran_order)
{
// std::string magic_string(buffer,6);
uint8_t major_version = *reinterpret_cast<uint8_t *>(buffer + 6);
uint8_t minor_version = *reinterpret_cast<uint8_t *>(buffer + 7);
uint16_t header_len = *reinterpret_cast<uint16_t *>(buffer + 8);
std::string header(reinterpret_cast<char *>(buffer + 9), header_len);
size_t loc1, loc2;
// fortran order
loc1 = header.find("fortran_order") + 16;
fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
// shape
loc1 = header.find("(");
loc2 = header.find(")");
std::regex num_regex("[0-9][0-9]*");
std::smatch sm;
shape.clear();
std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
while (std::regex_search(str_shape, sm, num_regex)) {
shape.push_back(std::stoi(sm[0].str()));
str_shape = sm.suffix().str();
}
// endian, word size, data type
// byte order code | stands for not applicable.
// not sure when this applies except for byte array
loc1 = header.find("descr") + 9;
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian);
// char type = header[loc1+1];
// assert(type == map_type(T));
std::string str_ws = header.substr(loc1 + 2);
loc2 = str_ws.find("'");
word_size = atoi(str_ws.substr(0, loc2).c_str());
}
void cnpy::parse_npy_header(FILE *fp,
size_t &word_size,
std::vector<size_t> &shape,
bool &fortran_order)
{
char buffer[256];
size_t res = fread(buffer, sizeof(char), 11, fp);
if (res != 11)
throw std::runtime_error("parse_npy_header: failed fread");
std::string header = fgets(buffer, 256, fp);
assert(header[header.size() - 1] == '\n');
size_t loc1, loc2;
// fortran order
loc1 = header.find("fortran_order");
if (loc1 == std::string::npos)
throw std::runtime_error("parse_npy_header: failed to find header keyword: 'fortran_order'");
loc1 += 16;
fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
// shape
loc1 = header.find("(");
loc2 = header.find(")");
if (loc1 == std::string::npos || loc2 == std::string::npos)
throw std::runtime_error("parse_npy_header: failed to find header keyword: '(' or ')'");
std::regex num_regex("[0-9][0-9]*");
std::smatch sm;
shape.clear();
std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
while (std::regex_search(str_shape, sm, num_regex)) {
shape.push_back(std::stoi(sm[0].str()));
str_shape = sm.suffix().str();
}
// endian, word size, data type
// byte order code | stands for not applicable.
// not sure when this applies except for byte array
loc1 = header.find("descr");
if (loc1 == std::string::npos)
throw std::runtime_error("parse_npy_header: failed to find header keyword: 'descr'");
loc1 += 9;
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian);
// char type = header[loc1+1];
// assert(type == map_type(T));
std::string str_ws = header.substr(loc1 + 2);
loc2 = str_ws.find("'");
word_size = atoi(str_ws.substr(0, loc2).c_str());
}
void cnpy::parse_zip_footer(FILE *fp,
uint16_t &nrecs,
size_t &global_header_size,
size_t &global_header_offset)
{
std::vector<char> footer(22);
fseek(fp, -22, SEEK_END);
size_t res = fread(&footer[0], sizeof(char), 22, fp);
if (res != 22)
throw std::runtime_error("parse_zip_footer: failed fread");
uint16_t disk_no, disk_start, nrecs_on_disk, comment_len;
disk_no = *(uint16_t *)&footer[4];
disk_start = *(uint16_t *)&footer[6];
nrecs_on_disk = *(uint16_t *)&footer[8];
nrecs = *(uint16_t *)&footer[10];
global_header_size = *(uint32_t *)&footer[12];
global_header_offset = *(uint32_t *)&footer[16];
comment_len = *(uint16_t *)&footer[20];
assert(disk_no == 0);
assert(disk_start == 0);
assert(nrecs_on_disk == nrecs);
assert(comment_len == 0);
}
cnpy::NpyArray load_the_npy_file(FILE *fp)
{
std::vector<size_t> shape;
size_t word_size;
bool fortran_order;
cnpy::parse_npy_header(fp, word_size, shape, fortran_order);
cnpy::NpyArray arr(shape, word_size, fortran_order);
size_t nread = fread(arr.data<char>(), 1, arr.num_bytes(), fp);
if (nread != arr.num_bytes())
throw std::runtime_error("load_the_npy_file: failed fread");
return arr;
}
cnpy::NpyArray load_the_npz_array(FILE *fp, uint32_t compr_bytes, uint32_t uncompr_bytes)
{
std::vector<unsigned char> buffer_compr(compr_bytes);
std::vector<unsigned char> buffer_uncompr(uncompr_bytes);
size_t nread = fread(&buffer_compr[0], 1, compr_bytes, fp);
if (nread != compr_bytes)
throw std::runtime_error("load_the_npy_file: failed fread");
int err;
z_stream d_stream;
d_stream.zalloc = Z_NULL;
d_stream.zfree = Z_NULL;
d_stream.opaque = Z_NULL;
d_stream.avail_in = 0;
d_stream.next_in = Z_NULL;
err = inflateInit2(&d_stream, -MAX_WBITS);
d_stream.avail_in = compr_bytes;
d_stream.next_in = &buffer_compr[0];
d_stream.avail_out = uncompr_bytes;
d_stream.next_out = &buffer_uncompr[0];
err = inflate(&d_stream, Z_FINISH);
err = inflateEnd(&d_stream);
std::vector<size_t> shape;
size_t word_size;
bool fortran_order;
cnpy::parse_npy_header(&buffer_uncompr[0], word_size, shape, fortran_order);
cnpy::NpyArray array(shape, word_size, fortran_order);
size_t offset = uncompr_bytes - array.num_bytes();
memcpy(array.data<unsigned char>(), &buffer_uncompr[0] + offset, array.num_bytes());
return array;
}
cnpy::npz_t cnpy::npz_load(std::string fname)
{
FILE *fp = fopen(fname.c_str(), "rb");
if (!fp) {
throw std::runtime_error("npz_load: Error! Unable to open file " + fname + "!");
}
cnpy::npz_t arrays;
while (1) {
std::vector<char> local_header(30);
size_t headerres = fread(&local_header[0], sizeof(char), 30, fp);
if (headerres != 30)
throw std::runtime_error("npz_load: failed fread");
// if we've reached the global header, stop reading
if (local_header[2] != 0x03 || local_header[3] != 0x04)
break;
// read in the variable name
uint16_t name_len = *(uint16_t *)&local_header[26];
std::string varname(name_len, ' ');
size_t vname_res = fread(&varname[0], sizeof(char), name_len, fp);
if (vname_res != name_len)
throw std::runtime_error("npz_load: failed fread");
// erase the lagging .npy
varname.erase(varname.end() - 4, varname.end());
// read in the extra field
uint16_t extra_field_len = *(uint16_t *)&local_header[28];
if (extra_field_len > 0) {
std::vector<char> buff(extra_field_len);
size_t efield_res = fread(&buff[0], sizeof(char), extra_field_len, fp);
if (efield_res != extra_field_len)
throw std::runtime_error("npz_load: failed fread");
}
uint16_t compr_method = *reinterpret_cast<uint16_t *>(&local_header[0] + 8);
uint32_t compr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 18);
uint32_t uncompr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 22);
if (compr_method == 0) {
arrays[varname] = load_the_npy_file(fp);
}
else {
arrays[varname] = load_the_npz_array(fp, compr_bytes, uncompr_bytes);
}
}
fclose(fp);
return arrays;
}
cnpy::NpyArray cnpy::npz_load(std::string fname, std::string varname)
{
FILE *fp = fopen(fname.c_str(), "rb");
if (!fp)
throw std::runtime_error("npz_load: Unable to open file " + fname);
while (1) {
std::vector<char> local_header(30);
size_t header_res = fread(&local_header[0], sizeof(char), 30, fp);
if (header_res != 30)
throw std::runtime_error("npz_load: failed fread");
// if we've reached the global header, stop reading
if (local_header[2] != 0x03 || local_header[3] != 0x04)
break;
// read in the variable name
uint16_t name_len = *(uint16_t *)&local_header[26];
std::string vname(name_len, ' ');
size_t vname_res = fread(&vname[0], sizeof(char), name_len, fp);
if (vname_res != name_len)
throw std::runtime_error("npz_load: failed fread");
vname.erase(vname.end() - 4, vname.end()); // erase the lagging .npy
// read in the extra field
uint16_t extra_field_len = *(uint16_t *)&local_header[28];
fseek(fp, extra_field_len, SEEK_CUR); // skip past the extra field
uint16_t compr_method = *reinterpret_cast<uint16_t *>(&local_header[0] + 8);
uint32_t compr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 18);
uint32_t uncompr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 22);
if (vname == varname) {
NpyArray array = (compr_method == 0) ? load_the_npy_file(fp) :
load_the_npz_array(fp, compr_bytes, uncompr_bytes);
fclose(fp);
return array;
}
else {
// skip past the data
// uint32_t size = *(uint32_t*) &local_header[22];
uint32_t size = *(uint32_t *)&local_header[18]; // using index 18 instead of 22 enables
// support for compressed data
fseek(fp, size, SEEK_CUR);
}
}
fclose(fp);
// if we get here, we haven't found the variable in the file
throw std::runtime_error("npz_load: Variable name " + varname + " not found in " + fname);
}
cnpy::NpyArray cnpy::npy_load(std::string fname)
{
FILE *fp = fopen(fname.c_str(), "rb");
if (!fp)
throw std::runtime_error("npy_load: Unable to open file " + fname);
NpyArray arr = load_the_npy_file(fp);
fclose(fp);
return arr;
}

View File

@@ -0,0 +1,310 @@
// Copyright (C) 2011 Carl Rogers
// Released under MIT License
// license available in LICENSE file, or at http://www.opensource.org/licenses/mit-license.php
#ifndef LIBCNPY_H_
#define LIBCNPY_H_
#include <string>
#include <stdexcept>
#include <sstream>
#include <vector>
#include <cstdio>
#include <typeinfo>
#include <iostream>
#include <cassert>
#include <zlib.h>
#include <map>
#include <memory>
#include <stdint.h>
#include <numeric>
namespace cnpy {
struct NpyArray {
NpyArray(const std::vector<size_t> &_shape, size_t _word_size, bool _fortran_order)
: shape(_shape), word_size(_word_size), fortran_order(_fortran_order)
{
num_vals = 1;
for (size_t i = 0; i < shape.size(); i++)
num_vals *= shape[i];
data_holder = std::shared_ptr<std::vector<char>>(new std::vector<char>(num_vals * word_size));
}
NpyArray() : shape(0), word_size(0), fortran_order(0), num_vals(0)
{
}
template<typename T> T *data()
{
return reinterpret_cast<T *>(&(*data_holder)[0]);
}
template<typename T> const T *data() const
{
return reinterpret_cast<T *>(&(*data_holder)[0]);
}
template<typename T> std::vector<T> as_vec() const
{
const T *p = data<T>();
return std::vector<T>(p, p + num_vals);
}
size_t num_bytes() const
{
return data_holder->size();
}
std::shared_ptr<std::vector<char>> data_holder;
std::vector<size_t> shape;
size_t word_size;
bool fortran_order;
size_t num_vals;
};
using npz_t = std::map<std::string, NpyArray>;
char BigEndianTest();
char map_type(const std::type_info &t);
template<typename T> std::vector<char> create_npy_header(const std::vector<size_t> &shape);
void parse_npy_header(FILE *fp,
size_t &word_size,
std::vector<size_t> &shape,
bool &fortran_order);
void parse_npy_header(unsigned char *buffer,
size_t &word_size,
std::vector<size_t> &shape,
bool &fortran_order);
void parse_zip_footer(FILE *fp,
uint16_t &nrecs,
size_t &global_header_size,
size_t &global_header_offset);
npz_t npz_load(std::string fname);
NpyArray npz_load(std::string fname, std::string varname);
NpyArray npy_load(std::string fname);
template<typename T> std::vector<char> &operator+=(std::vector<char> &lhs, const T rhs)
{
// write in little endian
for (size_t byte = 0; byte < sizeof(T); byte++) {
char val = *((char *)&rhs + byte);
lhs.push_back(val);
}
return lhs;
}
template<> std::vector<char> &operator+=(std::vector<char> &lhs, const std::string rhs);
template<> std::vector<char> &operator+=(std::vector<char> &lhs, const char *rhs);
template<typename T>
void npy_save(std::string fname,
const T *data,
const std::vector<size_t> shape,
std::string mode = "w")
{
FILE *fp = NULL;
std::vector<size_t> true_data_shape; // if appending, the shape of existing + new data
if (mode == "a")
fp = fopen(fname.c_str(), "r+b");
if (fp) {
// file exists. we need to append to it. read the header, modify the array size
size_t word_size;
bool fortran_order;
parse_npy_header(fp, word_size, true_data_shape, fortran_order);
assert(!fortran_order);
if (word_size != sizeof(T)) {
std::cout << "libnpy error: " << fname << " has word size " << word_size
<< " but npy_save appending data sized " << sizeof(T) << "\n";
assert(word_size == sizeof(T));
}
if (true_data_shape.size() != shape.size()) {
std::cout << "libnpy error: npy_save attempting to append misdimensioned data to " << fname
<< "\n";
assert(true_data_shape.size() != shape.size());
}
for (size_t i = 1; i < shape.size(); i++) {
if (shape[i] != true_data_shape[i]) {
std::cout << "libnpy error: npy_save attempting to append misshaped data to " << fname
<< "\n";
assert(shape[i] == true_data_shape[i]);
}
}
true_data_shape[0] += shape[0];
}
else {
fp = fopen(fname.c_str(), "wb");
true_data_shape = shape;
}
std::vector<char> header = create_npy_header<T>(true_data_shape);
size_t nels = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<size_t>());
fseek(fp, 0, SEEK_SET);
fwrite(&header[0], sizeof(char), header.size(), fp);
fseek(fp, 0, SEEK_END);
fwrite(data, sizeof(T), nels, fp);
fclose(fp);
}
template<typename T>
void npz_save(std::string zipname,
std::string fname,
const T *data,
const std::vector<size_t> &shape,
std::string mode = "w")
{
// first, append a .npy to the fname
fname += ".npy";
// now, on with the show
FILE *fp = NULL;
uint16_t nrecs = 0;
size_t global_header_offset = 0;
std::vector<char> global_header;
if (mode == "a")
fp = fopen(zipname.c_str(), "r+b");
if (fp) {
// zip file exists. we need to add a new npy file to it.
// first read the footer. this gives us the offset and size of the global header
// then read and store the global header.
// below, we will write the the new data at the start of the global header then append the
// global header and footer below it
size_t global_header_size;
parse_zip_footer(fp, nrecs, global_header_size, global_header_offset);
fseek(fp, global_header_offset, SEEK_SET);
global_header.resize(global_header_size);
size_t res = fread(&global_header[0], sizeof(char), global_header_size, fp);
if (res != global_header_size) {
throw std::runtime_error("npz_save: header read error while adding to existing zip");
}
fseek(fp, global_header_offset, SEEK_SET);
}
else {
fp = fopen(zipname.c_str(), "wb");
}
std::vector<char> npy_header = create_npy_header<T>(shape);
size_t nels = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<size_t>());
size_t nbytes = nels * sizeof(T) + npy_header.size();
// get the CRC of the data to be added
uint32_t crc = crc32(0L, (uint8_t *)&npy_header[0], npy_header.size());
crc = crc32(crc, (uint8_t *)data, nels * sizeof(T));
// build the local header
std::vector<char> local_header;
local_header += "PK"; // first part of sig
local_header += (uint16_t)0x0403; // second part of sig
local_header += (uint16_t)20; // min version to extract
local_header += (uint16_t)0; // general purpose bit flag
local_header += (uint16_t)0; // compression method
local_header += (uint16_t)0; // file last mod time
local_header += (uint16_t)0; // file last mod date
local_header += (uint32_t)crc; // crc
local_header += (uint32_t)nbytes; // compressed size
local_header += (uint32_t)nbytes; // uncompressed size
local_header += (uint16_t)fname.size(); // fname length
local_header += (uint16_t)0; // extra field length
local_header += fname;
// build global header
global_header += "PK"; // first part of sig
global_header += (uint16_t)0x0201; // second part of sig
global_header += (uint16_t)20; // version made by
global_header.insert(global_header.end(), local_header.begin() + 4, local_header.begin() + 30);
global_header += (uint16_t)0; // file comment length
global_header += (uint16_t)0; // disk number where file starts
global_header += (uint16_t)0; // internal file attributes
global_header += (uint32_t)0; // external file attributes
global_header += (uint32_t)
global_header_offset; // relative offset of local file header, since it begins where the
// global header used to begin
global_header += fname;
// build footer
std::vector<char> footer;
footer += "PK"; // first part of sig
footer += (uint16_t)0x0605; // second part of sig
footer += (uint16_t)0; // number of this disk
footer += (uint16_t)0; // disk where footer starts
footer += (uint16_t)(nrecs + 1); // number of records on this disk
footer += (uint16_t)(nrecs + 1); // total number of records
footer += (uint32_t)global_header.size(); // nbytes of global headers
footer += (uint32_t)(global_header_offset + nbytes +
local_header.size()); // offset of start of global headers, since global
// header now starts after newly written array
footer += (uint16_t)0; // zip file comment length
// write everything
fwrite(&local_header[0], sizeof(char), local_header.size(), fp);
fwrite(&npy_header[0], sizeof(char), npy_header.size(), fp);
fwrite(data, sizeof(T), nels, fp);
fwrite(&global_header[0], sizeof(char), global_header.size(), fp);
fwrite(&footer[0], sizeof(char), footer.size(), fp);
fclose(fp);
}
template<typename T>
void npy_save(std::string fname, const std::vector<T> data, std::string mode = "w")
{
std::vector<size_t> shape;
shape.push_back(data.size());
npy_save(fname, &data[0], shape, mode);
}
template<typename T>
void npz_save(std::string zipname,
std::string fname,
const std::vector<T> data,
std::string mode = "w")
{
std::vector<size_t> shape;
shape.push_back(data.size());
npz_save(zipname, fname, &data[0], shape, mode);
}
template<typename T> std::vector<char> create_npy_header(const std::vector<size_t> &shape)
{
std::vector<char> dict;
dict += "{'descr': '";
dict += BigEndianTest();
dict += map_type(typeid(T));
dict += std::to_string(sizeof(T));
dict += "', 'fortran_order': False, 'shape': (";
dict += std::to_string(shape[0]);
for (size_t i = 1; i < shape.size(); i++) {
dict += ", ";
dict += std::to_string(shape[i]);
}
if (shape.size() == 1)
dict += ",";
dict += "), }";
// pad with spaces so that preamble+dict is modulo 16 bytes. preamble is 10 bytes. dict needs to
// end with \n
int remainder = 16 - (10 + dict.size()) % 16;
dict.insert(dict.end(), remainder, ' ');
dict.back() = '\n';
std::vector<char> header;
header += (char)0x93;
header += "NUMPY";
header += (char)0x01; // major version of numpy format
header += (char)0x00; // minor version of numpy format
header += (uint16_t)dict.size();
header.insert(header.end(), dict.begin(), dict.end());
return header;
}
} // namespace cnpy
#endif

View File

@@ -248,14 +248,12 @@ template<class S> class Vector3D {
protected:
};
//! helper to check whether value is non-zero
template<class S> inline bool notZero(S v)
//! helper to check whether float/double value is non-zero
inline bool notZero(Real f)
{
return (std::abs(v) > VECTOR_EPSILON);
}
template<class S> inline bool notZero(Vector3D<S> v)
{
return (std::abs(norm(v)) > VECTOR_EPSILON);
if (std::abs(f) > VECTOR_EPSILON)
return true;
return false;
}
//************************************************************************
@@ -439,36 +437,6 @@ inline Real normSquare(const int v)
return square(v);
}
//! Compute sum of all components, allow use of int, Real too
template<class S> inline S sum(const S v)
{
return v;
}
template<class S> inline S sum(const Vector3D<S> &v)
{
return v.x + v.y + v.z;
}
//! Get absolute representation of vector, allow use of int, Real too
inline Real abs(const Real v)
{
return std::fabs(v);
}
inline int abs(const int v)
{
return std::abs(v);
}
template<class S> inline Vector3D<S> abs(const Vector3D<S> &v)
{
Vector3D<S> cp(v.x, v.y, v.z);
for (int i = 0; i < 3; ++i) {
if (cp[i] < 0)
cp[i] *= (-1.0);
}
return cp;
}
//! Returns a normalized vector
template<class S> inline Vector3D<S> getNormalized(const Vector3D<S> &v)
{

View File

@@ -27,10 +27,7 @@ extern "C" {
}
#endif
#if NO_CNPY != 1
# include "cnpy.h"
#endif
#include "cnpy.h"
#include "mantaio.h"
#include "grid.h"
#include "vector4d.h"
@@ -968,16 +965,12 @@ int readGrid4dUni(
};
void readGrid4dUniCleanup(void **fileHandle)
{
#if NO_ZLIB != 1
gzFile gzf = NULL;
if (fileHandle) {
gzf = (gzFile)(*fileHandle);
gzclose(gzf);
*fileHandle = NULL;
}
#else
debMsg("file format not supported without zlib", 1);
#endif
}
template<class T> int writeGrid4dRaw(const string &name, Grid4d<T> *grid)
@@ -1028,13 +1021,15 @@ template<class T> int readGrid4dRaw(const string &name, Grid4d<T> *grid)
template<class T> int writeGridNumpy(const string &name, Grid<T> *grid)
{
#if NO_ZLIB == 1
debMsg("file format not supported without zlib", 1);
return 0;
#endif
#if FLOATINGPOINT_PRECISION != 1
errMsg("writeGridNumpy: Double precision not yet supported");
return 0;
#endif
#if NO_CNPY != 1
// find suffix to differentiate between npy <-> npz , TODO: check for actual "npy" string
std::string::size_type idx;
bool bUseNpz = false;
@@ -1080,21 +1075,19 @@ template<class T> int writeGridNumpy(const string &name, Grid<T> *grid)
cnpy::npy_save(name, &grid[0], shape, "w");
}
return 1;
#else
debMsg("file format not supported without cnpy", 1);
return 0;
#endif
}
};
template<class T> int readGridNumpy(const string &name, Grid<T> *grid)
{
#if NO_ZLIB == 1
debMsg("file format not supported without zlib", 1);
return 0;
#endif
#if FLOATINGPOINT_PRECISION != 1
errMsg("readGridNumpy: Double precision not yet supported");
return 0;
#endif
#if NO_CNPY != 1
// find suffix to differentiate between npy <-> npz
std::string::size_type idx;
bool bUseNpz = false;
@@ -1151,11 +1144,7 @@ template<class T> int readGridNumpy(const string &name, Grid<T> *grid)
gridArr.data<T>(),
sizeof(T) * grid->getSizeX() * grid->getSizeY() * grid->getSizeZ());
return 1;
#else
debMsg("file format not supported without cnpy", 1);
return 0;
#endif
}
};
int writeGridsNumpy(const string &name, std::vector<PbClass *> *grids)
{
@@ -1174,12 +1163,13 @@ void getNpzFileSize(
const string &name, int &x, int &y, int &z, int *t = NULL, std::string *info = NULL)
{
x = y = z = 0;
#if NO_ZLIB != 1
debMsg("file format not supported without zlib", 1);
return;
#endif
#if FLOATINGPOINT_PRECISION != 1
errMsg("getNpzFileSize: Double precision not yet supported");
#endif
#if NO_CNPY != 1
// find suffix to differentiate between npy <-> npz
cnpy::NpyArray gridArr;
cnpy::npz_t fNpz = cnpy::npz_load(name);
@@ -1190,9 +1180,6 @@ void getNpzFileSize(
x = gridArr.shape[2];
if (t)
(*t) = 0; // unused for now
#else
debMsg("file format not supported without cnpy", 1);
#endif
}
Vec3 getNpzFileSize(const string &name)
{

View File

@@ -315,14 +315,10 @@ int readObjFile(const std::string &name, Mesh *mesh, bool append)
return 0;
}
const Real dx = mesh->getParent()->getDx();
const Vec3 gs = toVec3(mesh->getParent()->getGridSize());
if (!append)
mesh->clear();
int nodebase = mesh->numNodes();
int cntNodes = nodebase, cntNormals = nodebase;
int cnt = nodebase;
while (ifs.good() && !ifs.eof()) {
string id;
ifs >> id;
@@ -337,23 +333,19 @@ int readObjFile(const std::string &name, Mesh *mesh, bool append)
}
else if (id == "vn") {
// normals
if (mesh->numNodes() != cntNodes) {
if (!mesh->numNodes()) {
errMsg("invalid amount of nodes");
return 0;
}
Node *n = &mesh->nodes(cntNormals);
ifs >> n->normal.x >> n->normal.y >> n->normal.z;
cntNormals++;
Node n = mesh->nodes(cnt);
ifs >> n.normal.x >> n.normal.y >> n.normal.z;
cnt++;
}
else if (id == "v") {
// vertex
Node n;
ifs >> n.pos.x >> n.pos.y >> n.pos.z;
// convert to grid space
n.pos /= dx;
n.pos += gs * 0.5;
mesh->addNode(n);
cntNodes++;
}
else if (id == "g") {
// group
@@ -416,6 +408,7 @@ int writeObjFile(const string &name, Mesh *mesh)
// write normals
for (int i = 0; i < numVerts; i++) {
Vector3D<float> n = toVec3f(mesh->nodes(i).normal);
// normalize to unit cube around 0
ofs << "vn " << n.value[0] << " " << n.value[1] << " " << n.value[2] << " "
<< "\n";
}

View File

@@ -322,7 +322,6 @@ template<class T> int readPdataUni(const std::string &name, ParticleDataImpl<T>
UniPartHeader head;
assertMsg(gzread(gzf, &head, sizeof(UniPartHeader)) == sizeof(UniPartHeader),
"can't read file, no header present");
pdata->getParticleSys()->resize(head.dim); // ensure that parent particle system has same size
pdata->resize(head.dim);
assertMsg(head.dim == pdata->size(), "pdata size doesn't match");

View File

@@ -26,18 +26,17 @@
extern "C" {
# include <zlib.h>
}
#endif
#if defined(WIN32) || defined(_WIN32)
# include <windows.h>
# include <string>
#endif
# if defined(WIN32) || defined(_WIN32)
# include <windows.h>
# include <string>
# endif
using namespace std;
namespace Manta {
#if defined(WIN32) || defined(_WIN32)
# if defined(WIN32) || defined(_WIN32)
static wstring stringToWstring(const char *str)
{
const int length_wc = MultiByteToWideChar(CP_UTF8, 0, str, strlen(str), NULL, 0);
@@ -45,11 +44,10 @@ static wstring stringToWstring(const char *str)
MultiByteToWideChar(CP_UTF8, 0, str, strlen(str), &strWide[0], length_wc);
return strWide;
}
#endif // WIN32==1
# endif // WIN32==1
void *safeGzopen(const char *filename, const char *mode)
{
#if NO_ZLIB != 1
gzFile gzfile;
# if defined(WIN32) || defined(_WIN32)
@@ -60,11 +58,8 @@ void *safeGzopen(const char *filename, const char *mode)
# endif
return gzfile;
#else
debMsg("safeGzopen not supported without zlib", 1);
return nullptr;
#endif // NO_ZLIB != 1
}
#endif // NO_ZLIB != 1
#if defined(OPENVDB)
// Convert from OpenVDB value to Manta value.
@@ -114,4 +109,4 @@ template<> void convertTo(openvdb::Vec3s *out, Vec3 &in)
}
#endif // OPENVDB==1
} // namespace Manta
} // namespace

View File

@@ -1,3 +1,3 @@
#define MANTA_GIT_VERSION "commit 841bfd09c068dfb95637c0ec14fa78305286a433"
#define MANTA_GIT_VERSION "commit d80d3c821de74315ab26b5efd153d41477b976c4"

View File

@@ -213,36 +213,34 @@ Mesh &Mesh::operator=(const Mesh &o)
return *this;
}
int Mesh::load(string name, bool append)
void Mesh::load(string name, bool append)
{
if (name.find_last_of('.') == string::npos)
errMsg("file '" + name + "' does not have an extension");
string ext = name.substr(name.find_last_of('.'));
if (ext == ".gz") // assume bobj gz
return readBobjFile(name, this, append);
readBobjFile(name, this, append);
else if (ext == ".obj")
return readObjFile(name, this, append);
readObjFile(name, this, append);
else
errMsg("file '" + name + "' filetype not supported");
// dont always rebuild...
// rebuildCorners();
// rebuildLookup();
return 0;
}
int Mesh::save(string name)
void Mesh::save(string name)
{
if (name.find_last_of('.') == string::npos)
errMsg("file '" + name + "' does not have an extension");
string ext = name.substr(name.find_last_of('.'));
if (ext == ".obj")
return writeObjFile(name, this);
writeObjFile(name, this);
else if (ext == ".gz")
return writeBobjFile(name, this);
writeBobjFile(name, this);
else
errMsg("file '" + name + "' filetype not supported");
return 0;
}
void Mesh::fromShape(Shape &shape, bool append)
@@ -1341,8 +1339,8 @@ template<class T> void MeshDataImpl<T>::setSource(Grid<T> *grid, bool isMAC)
{
mpGridSource = grid;
mGridSourceMAC = isMAC;
if (grid && isMAC)
assertMsg(grid->getType() & GridBase::TypeMAC, "Given grid is not a valid MAC grid");
if (isMAC)
assertMsg(dynamic_cast<MACGrid *>(grid) != NULL, "Given grid is not a valid MAC grid");
}
template<class T> void MeshDataImpl<T>::initNewValue(IndexInt idx, Vec3 pos)
@@ -1373,40 +1371,38 @@ void Mesh::updateDataFields()
for (size_t i = 0; i < mNodes.size(); ++i) {
Vec3 pos = mNodes[i].pos;
for (IndexInt md = 0; md < (IndexInt)mMdataReal.size(); ++md)
mMdataReal[md]->initNewValue(i, pos);
mMdataReal[md]->initNewValue(i, mNodes[i].pos);
for (IndexInt md = 0; md < (IndexInt)mMdataVec3.size(); ++md)
mMdataVec3[md]->initNewValue(i, pos);
mMdataVec3[md]->initNewValue(i, mNodes[i].pos);
for (IndexInt md = 0; md < (IndexInt)mMdataInt.size(); ++md)
mMdataInt[md]->initNewValue(i, pos);
mMdataInt[md]->initNewValue(i, mNodes[i].pos);
}
}
template<typename T> int MeshDataImpl<T>::load(string name)
template<typename T> void MeshDataImpl<T>::load(string name)
{
if (name.find_last_of('.') == string::npos)
errMsg("file '" + name + "' does not have an extension");
string ext = name.substr(name.find_last_of('.'));
if (ext == ".uni")
return readMdataUni<T>(name, this);
readMdataUni<T>(name, this);
else if (ext == ".raw") // raw = uni for now
return readMdataUni<T>(name, this);
readMdataUni<T>(name, this);
else
errMsg("mesh data '" + name + "' filetype not supported for loading");
return 0;
}
template<typename T> int MeshDataImpl<T>::save(string name)
template<typename T> void MeshDataImpl<T>::save(string name)
{
if (name.find_last_of('.') == string::npos)
errMsg("file '" + name + "' does not have an extension");
string ext = name.substr(name.find_last_of('.'));
if (ext == ".uni")
return writeMdataUni<T>(name, this);
writeMdataUni<T>(name, this);
else if (ext == ".raw") // raw = uni for now
return writeMdataUni<T>(name, this);
writeMdataUni<T>(name, this);
else
errMsg("mesh data '" + name + "' filetype not supported for saving");
return 0;
}
// specializations

View File

@@ -240,8 +240,35 @@ class Mesh : public PbClass {
}
}
void fromShape(Shape &shape, bool append = false);
void load(std::string name, bool append = false);
static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
pbPreparePlugin(pbo->getParent(), "Mesh::load", !noTiming);
PyObject *_retval = 0;
{
ArgLocker _lock;
std::string name = _args.get<std::string>("name", 0, &_lock);
bool append = _args.getOpt<bool>("append", 1, false, &_lock);
pbo->_args.copy(_args);
_retval = getPyNone();
pbo->load(name, append);
pbo->_args.check();
}
pbFinalizePlugin(pbo->getParent(), "Mesh::load", !noTiming);
return _retval;
}
catch (std::exception &e) {
pbSetError("Mesh::load", e.what());
return 0;
}
}
void fromShape(Shape &shape, bool append = false);
static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
@@ -267,8 +294,34 @@ class Mesh : public PbClass {
}
}
void save(std::string name);
static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
pbPreparePlugin(pbo->getParent(), "Mesh::save", !noTiming);
PyObject *_retval = 0;
{
ArgLocker _lock;
std::string name = _args.get<std::string>("name", 0, &_lock);
pbo->_args.copy(_args);
_retval = getPyNone();
pbo->save(name);
pbo->_args.check();
}
pbFinalizePlugin(pbo->getParent(), "Mesh::save", !noTiming);
return _retval;
}
catch (std::exception &e) {
pbSetError("Mesh::save", e.what());
return 0;
}
}
void advectInGrid(FlagGrid &flags, MACGrid &vel, int integrationMode);
static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
@@ -296,7 +349,7 @@ class Mesh : public PbClass {
}
void scale(Vec3 s);
static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
@@ -322,7 +375,7 @@ class Mesh : public PbClass {
}
void offset(Vec3 o);
static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
@@ -348,7 +401,7 @@ class Mesh : public PbClass {
}
void rotate(Vec3 thetas);
static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
@@ -374,7 +427,7 @@ class Mesh : public PbClass {
}
void computeVelocity(Mesh &oldMesh, MACGrid &vel);
static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
@@ -400,58 +453,6 @@ class Mesh : public PbClass {
}
}
//! file io
int load(std::string name, bool append = false);
static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
pbPreparePlugin(pbo->getParent(), "Mesh::load", !noTiming);
PyObject *_retval = 0;
{
ArgLocker _lock;
std::string name = _args.get<std::string>("name", 0, &_lock);
bool append = _args.getOpt<bool>("append", 1, false, &_lock);
pbo->_args.copy(_args);
_retval = toPy(pbo->load(name, append));
pbo->_args.check();
}
pbFinalizePlugin(pbo->getParent(), "Mesh::load", !noTiming);
return _retval;
}
catch (std::exception &e) {
pbSetError("Mesh::load", e.what());
return 0;
}
}
int save(std::string name);
static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
PbArgs _args(_linargs, _kwds);
Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
pbPreparePlugin(pbo->getParent(), "Mesh::save", !noTiming);
PyObject *_retval = 0;
{
ArgLocker _lock;
std::string name = _args.get<std::string>("name", 0, &_lock);
pbo->_args.copy(_args);
_retval = toPy(pbo->save(name));
pbo->_args.check();
}
pbFinalizePlugin(pbo->getParent(), "Mesh::save", !noTiming);
return _retval;
}
catch (std::exception &e) {
pbSetError("Mesh::save", e.what());
return 0;
}
}
void computeLevelset(LevelsetGrid &levelset, Real sigma, Real cutoff = -1.);
static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
@@ -1563,7 +1564,7 @@ template<class T> class MeshDataImpl : public MeshDataBase {
}
//! file io
int save(const std::string name);
void save(const std::string name);
static PyObject *_W_41(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
@@ -1576,7 +1577,8 @@ template<class T> class MeshDataImpl : public MeshDataBase {
ArgLocker _lock;
const std::string name = _args.get<std::string>("name", 0, &_lock);
pbo->_args.copy(_args);
_retval = toPy(pbo->save(name));
_retval = getPyNone();
pbo->save(name);
pbo->_args.check();
}
pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::save", !noTiming);
@@ -1588,7 +1590,7 @@ template<class T> class MeshDataImpl : public MeshDataBase {
}
}
int load(const std::string name);
void load(const std::string name);
static PyObject *_W_42(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
{
try {
@@ -1601,7 +1603,8 @@ template<class T> class MeshDataImpl : public MeshDataBase {
ArgLocker _lock;
const std::string name = _args.get<std::string>("name", 0, &_lock);
pbo->_args.copy(_args);
_retval = toPy(pbo->load(name));
_retval = getPyNone();
pbo->load(name);
pbo->_args.check();
}
pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::load", !noTiming);

View File

@@ -10,14 +10,14 @@ static const Pb::Register _R_12("Mesh", "Mesh", "PbClass");
template<> const char *Namify<Mesh>::S = "Mesh";
static const Pb::Register _R_13("Mesh", "Mesh", Mesh::_W_0);
static const Pb::Register _R_14("Mesh", "clear", Mesh::_W_1);
static const Pb::Register _R_15("Mesh", "fromShape", Mesh::_W_2);
static const Pb::Register _R_16("Mesh", "advectInGrid", Mesh::_W_3);
static const Pb::Register _R_17("Mesh", "scale", Mesh::_W_4);
static const Pb::Register _R_18("Mesh", "offset", Mesh::_W_5);
static const Pb::Register _R_19("Mesh", "rotate", Mesh::_W_6);
static const Pb::Register _R_20("Mesh", "computeVelocity", Mesh::_W_7);
static const Pb::Register _R_21("Mesh", "load", Mesh::_W_8);
static const Pb::Register _R_22("Mesh", "save", Mesh::_W_9);
static const Pb::Register _R_15("Mesh", "load", Mesh::_W_2);
static const Pb::Register _R_16("Mesh", "fromShape", Mesh::_W_3);
static const Pb::Register _R_17("Mesh", "save", Mesh::_W_4);
static const Pb::Register _R_18("Mesh", "advectInGrid", Mesh::_W_5);
static const Pb::Register _R_19("Mesh", "scale", Mesh::_W_6);
static const Pb::Register _R_20("Mesh", "offset", Mesh::_W_7);
static const Pb::Register _R_21("Mesh", "rotate", Mesh::_W_8);
static const Pb::Register _R_22("Mesh", "computeVelocity", Mesh::_W_9);
static const Pb::Register _R_23("Mesh", "computeLevelset", Mesh::_W_10);
static const Pb::Register _R_24("Mesh", "getLevelset", Mesh::_W_11);
static const Pb::Register _R_25("Mesh", "applyMeshToGrid", Mesh::_W_12);

View File

@@ -29,7 +29,7 @@ using namespace std;
namespace Manta {
ParticleBase::ParticleBase(FluidSolver *parent)
: PbClass(parent), mMaxParticles(0), mAllowCompress(true), mFreePdata(false)
: PbClass(parent), mAllowCompress(true), mFreePdata(false)
{
}
@@ -359,8 +359,8 @@ template<class T> void ParticleDataImpl<T>::setSource(Grid<T> *grid, bool isMAC)
{
mpGridSource = grid;
mGridSourceMAC = isMAC;
if (grid && isMAC)
assertMsg(grid->getType() & GridBase::TypeMAC, "Given grid is not a valid MAC grid");
if (isMAC)
assertMsg(dynamic_cast<MACGrid *>(grid) != NULL, "Given grid is not a valid MAC grid");
}
template<class T> void ParticleDataImpl<T>::initNewValue(IndexInt idx, Vec3 pos)

Some files were not shown because too many files have changed in this diff Show More