Compare commits
303 Commits
tmp-pointc
...
temp-remes
Author | SHA1 | Date | |
---|---|---|---|
4a38856327 | |||
f495b583be | |||
ec17b45034 | |||
236ca8fbe8 | |||
b53c46d760 | |||
141deeefff | |||
22b8ac80d2 | |||
3886392430 | |||
13fa4b9898 | |||
1a5945e2c3 | |||
2bd2db120e | |||
2a186af2b3 | |||
0e9999a93e | |||
ba99b12287 | |||
698af2e774 | |||
0b50ea51cc | |||
f359672c35 | |||
c049fe7979 | |||
9cbf43801b | |||
5ccb0f46dd | |||
954aa441bf | |||
149bb0c26d | |||
87c0a38a39 | |||
766edbdf1f | |||
67857b5d9f | |||
cf123da640 | |||
4619562e2f | |||
ad586e7eeb | |||
50cb693f1d | |||
ad6cfcef01 | |||
706b690ec6 | |||
6f3c37a3ff | |||
c9975d19b5 | |||
8d35dceb14 | |||
c8b24af1b2 | |||
ee98dc8d0f | |||
37ad90c35f | |||
4ae24c0b57 | |||
be1969d2a4 | |||
b64ae920e4 | |||
65aba4fc7a | |||
634585aa68 | |||
6596891121 | |||
bfeff73252 | |||
60b80ce142 | |||
79440c37ac | |||
fa21ba0179 | |||
d998258fa6 | |||
ef17e7e51b | |||
85157e74b3 | |||
ad602a3f03 | |||
c8653e516d | |||
8a5da947dc | |||
944df0c784 | |||
62e53788aa | |||
67002402bd | |||
8438ef4d8e | |||
19afb0ea67 | |||
797820fc20 | |||
ce4c2b8f0b | |||
4b31f4ace7 | |||
e5cd770e9f | |||
06e84fef88 | |||
a51048a5d3 | |||
4d6ac72264 | |||
e77644982c | |||
e0400d0a1c | |||
0a5ef40c2c | |||
750899fd31 | |||
921c95ab5e | |||
72fc70879c | |||
6f3ba033f8 | |||
0e280b96ca | |||
![]() |
5a1a4f4dd2 | ||
![]() |
62a0e1d54f | ||
![]() |
a4e3943a08 | ||
1fb9b6b1a1 | |||
![]() |
0c0a9dabc7 | ||
4950022e7d | |||
980dd43bd4 | |||
47b6c33258 | |||
![]() |
8f658ec27c | ||
ef52a9f791 | |||
077a2f7bbb | |||
![]() |
5c28955d3a | ||
a197b81090 | |||
b5194520e3 | |||
83cb4f5f8f | |||
7fc9666c89 | |||
59e48d8fe4 | |||
cfa788cf9a | |||
607d745a79 | |||
878d191bae | |||
46b126a2c8 | |||
69afdf6970 | |||
4aa2a5481c | |||
a39da2cbca | |||
7aabd74222 | |||
b453e9e3a4 | |||
f8cc01595d | |||
8369adabc0 | |||
6c7e62ef9b | |||
17ebbdf1c1 | |||
c2b0c64843 | |||
696c23f284 | |||
cd6250c220 | |||
d42530824e | |||
6b6e2e742f | |||
727e569ac3 | |||
3fe5c0e5df | |||
5904953d22 | |||
269c184ac9 | |||
6b6970d43f | |||
bafe22e8ce | |||
6778ef4dcc | |||
1c0fa3aff6 | |||
494a6a0bf2 | |||
629726177a | |||
0bfec04cd5 | |||
ece6d8b25a | |||
b277a75a3b | |||
8c916a3aec | |||
fdc3f56234 | |||
c46663ad66 | |||
b31d3d1202 | |||
2bc1fd7e91 | |||
b88dd3b8e7 | |||
50fe187443 | |||
731d9f0bfa | |||
ed870f87b9 | |||
d40c39fca0 | |||
![]() |
52543be9a6 | ||
a9644c812f | |||
a5ded0720c | |||
bb63ce9839 | |||
62a819202e | |||
![]() |
7d85495ab9 | ||
4432209849 | |||
7484e45297 | |||
ccc2a7996b | |||
ed184050b6 | |||
4f4af0cbe1 | |||
9016a29f19 | |||
230f7d79ce | |||
cbf2278266 | |||
ac5f011e9c | |||
33ad95b677 | |||
579b180053 | |||
8cbbdedaf4 | |||
![]() |
686ab4c940 | ||
6fbfa522e6 | |||
e4daac84b6 | |||
eb5cd628bd | |||
b219ae4498 | |||
615af4e239 | |||
9db4e44961 | |||
![]() |
78e40ad21f | ||
6247ec7827 | |||
3884d78e49 | |||
5063820c9b | |||
8c90910dcc | |||
ccd2af43b3 | |||
52a7c724b5 | |||
71d0f6f896 | |||
b8601b64c7 | |||
c08d847488 | |||
fe49e4139c | |||
9eaa48f520 | |||
fd67b521b9 | |||
63db971a00 | |||
1dd381828f | |||
c48ccb38cb | |||
4e73ba2cab | |||
f24ccedc18 | |||
016253a648 | |||
fd08d6f391 | |||
d99b343b31 | |||
d9228c557b | |||
4c28b1c74e | |||
59975b0adf | |||
8dfc31f61f | |||
264b1e1e15 | |||
5993c53a6d | |||
c14e6cdd56 | |||
1f2edba1fb | |||
d19b3019d9 | |||
a6bd7777c2 | |||
35f1b3e43b | |||
faeaf53255 | |||
e2305690eb | |||
2d9eee15c5 | |||
0ef57d3fc0 | |||
122d0c8f4b | |||
dcf6e12a60 | |||
7e66616b7e | |||
6d160f199c | |||
99b424334d | |||
5b4a862f83 | |||
2840782d84 | |||
02c09773ea | |||
acad0d75b1 | |||
3d536f69f6 | |||
aa8279648e | |||
25582aef61 | |||
69d14c0ddb | |||
8b0df381d9 | |||
d8a6eec1a3 | |||
893eb30730 | |||
0cdc75ccd2 | |||
e3f8768d8a | |||
2679236047 | |||
3ef59121a4 | |||
![]() |
9582797d4b | ||
bf2bb6db26 | |||
0e3d34e48f | |||
0fcd23a388 | |||
c5f61fbf48 | |||
5910dbdbf7 | |||
0a40c671b0 | |||
83e204702d | |||
618f31312c | |||
89a7a1c156 | |||
608d9b5aa1 | |||
b13bbb22e4 | |||
02a3720000 | |||
93f21ebb13 | |||
e7c1a32a78 | |||
a138bf57c9 | |||
![]() |
09a483a3aa | ||
671c6d8afd | |||
07d70a76df | |||
5099cbeec9 | |||
436d38bb54 | |||
3481f6eaf2 | |||
71ac137662 | |||
8084b7e6e2 | |||
ab19abe223 | |||
bc85081156 | |||
56b8adb9e2 | |||
1494ad20b9 | |||
76bf050853 | |||
aa547ce88b | |||
f64710a518 | |||
4249d6f58e | |||
ada173ebfd | |||
72df7c23c4 | |||
c7aa0f9d74 | |||
279cc34343 | |||
83955d6769 | |||
99fda4d31e | |||
9b6088cb9d | |||
9363c4de06 | |||
66b48ad8fb | |||
f3ea6a5b28 | |||
3d8f8085fb | |||
b882f89fe3 | |||
2ddb3dc617 | |||
56aa5b0d8c | |||
065a00ee3e | |||
d4ce777aed | |||
d897228682 | |||
f6f93b5b12 | |||
7e0bf7a0f1 | |||
35bfe1702c | |||
![]() |
4a9d903e2b | ||
![]() |
9715ad5aca | ||
ad4928a171 | |||
75520894c7 | |||
2811de6e5c | |||
5b099a25d7 | |||
123e29c274 | |||
54abab53bf | |||
a3d90337b8 | |||
d11a2dfe71 | |||
75a09ba2ed | |||
974e36d7d0 | |||
058514aa0a | |||
d4d810f817 | |||
37a8c6d809 | |||
47885abbe6 | |||
cd8f3c9ee7 | |||
2c1edcf3ef | |||
24c846b2b4 | |||
987d14a3b2 | |||
8e16873086 | |||
5dcf60e053 | |||
f1104c2828 | |||
eb54624a9a | |||
5057b28a2f | |||
ba100c883c | |||
a082e49671 | |||
cda6da4957 | |||
ae4098e234 | |||
36e836d0e9 | |||
19d17b217a | |||
57ec1f37e9 | |||
e06a346458 | |||
10cacbbb15 | |||
613d314251 | |||
b3c34011c0 | |||
efc6f6e1ae | |||
5a11c8ba24 | |||
87f8949f0e |
@@ -23,12 +23,14 @@ Checks: >
|
||||
-readability-static-accessed-through-instance,
|
||||
-readability-redundant-declaration,
|
||||
-readability-qualified-auto,
|
||||
-readability-use-anyofallof,
|
||||
|
||||
bugprone-*,
|
||||
-bugprone-narrowing-conversions,
|
||||
-bugprone-unhandled-self-assignment,
|
||||
-bugprone-branch-clone,
|
||||
-bugprone-macro-parentheses,
|
||||
-bugprone-reserved-identifier,
|
||||
|
||||
-bugprone-sizeof-expression,
|
||||
-bugprone-integer-division,
|
||||
|
@@ -367,7 +367,7 @@ option(WITH_CYCLES_CUDA_BINARIES "Build Cycles CUDA binaries" OFF)
|
||||
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
|
||||
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
|
||||
mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 CACHE STRING "CUDA architectures to build binaries for")
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 compute_75 CACHE STRING "CUDA architectures to build binaries for")
|
||||
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
|
||||
unset(PLATFORM_DEFAULT)
|
||||
option(WITH_CYCLES_LOGGING "Build Cycles with logging support" ON)
|
||||
@@ -900,7 +900,7 @@ if(MSVC)
|
||||
# endianess-detection and auto-setting is counterproductive
|
||||
# so we just set endianness according CMAKE_OSX_ARCHITECTURES
|
||||
|
||||
elseif(CMAKE_OSX_ARCHITECTURES MATCHES i386 OR CMAKE_OSX_ARCHITECTURES MATCHES x86_64)
|
||||
elseif(CMAKE_OSX_ARCHITECTURES MATCHES i386 OR CMAKE_OSX_ARCHITECTURES MATCHES x86_64 OR CMAKE_OSX_ARCHITECTURES MATCHES arm64)
|
||||
add_definitions(-D__LITTLE_ENDIAN__)
|
||||
elseif(CMAKE_OSX_ARCHITECTURES MATCHES ppc OR CMAKE_OSX_ARCHITECTURES MATCHES ppc64)
|
||||
add_definitions(-D__BIG_ENDIAN__)
|
||||
@@ -1628,10 +1628,6 @@ endif()
|
||||
#-----------------------------------------------------------------------------
|
||||
# Libraries
|
||||
|
||||
if(WITH_GTESTS)
|
||||
include(GTestTesting)
|
||||
endif()
|
||||
|
||||
if(WITH_BLENDER)
|
||||
add_subdirectory(intern)
|
||||
add_subdirectory(extern)
|
||||
|
@@ -94,9 +94,11 @@ if(UNIX)
|
||||
else()
|
||||
include(cmake/pugixml.cmake)
|
||||
endif()
|
||||
include(cmake/ispc.cmake)
|
||||
include(cmake/openimagedenoise.cmake)
|
||||
include(cmake/embree.cmake)
|
||||
if((NOT APPLE) OR ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
|
||||
include(cmake/ispc.cmake)
|
||||
include(cmake/openimagedenoise.cmake)
|
||||
include(cmake/embree.cmake)
|
||||
endif()
|
||||
if(NOT APPLE)
|
||||
include(cmake/xr_openxr.cmake)
|
||||
endif()
|
||||
|
@@ -30,6 +30,11 @@ else()
|
||||
set(CLANG_GENERATOR "Unix Makefiles")
|
||||
endif()
|
||||
|
||||
if(APPLE)
|
||||
set(CLANG_EXTRA_ARGS ${CLANG_EXTRA_ARGS}
|
||||
-DLIBXML2_LIBRARY=${LIBDIR}/xml2/lib/libxml2.a
|
||||
)
|
||||
endif()
|
||||
|
||||
ExternalProject_Add(external_clang
|
||||
URL ${CLANG_URI}
|
||||
|
@@ -42,7 +42,7 @@ if(BUILD_MODE STREQUAL Release)
|
||||
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/glew/include/ ${HARVEST_TARGET}/opengl/include/ &&
|
||||
# tiff
|
||||
${CMAKE_COMMAND} -E copy ${LIBDIR}/tiff/lib/tiff.lib ${HARVEST_TARGET}/tiff/lib/libtiff.lib &&
|
||||
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/tiff/include/ ${HARVEST_TARGET}/tiff/include/ &&
|
||||
${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/tiff/include/ ${HARVEST_TARGET}/tiff/include/
|
||||
DEPENDS
|
||||
)
|
||||
endif()
|
||||
@@ -132,8 +132,12 @@ harvest(openimageio/bin openimageio/bin "maketx")
|
||||
harvest(openimageio/bin openimageio/bin "oiiotool")
|
||||
harvest(openimageio/include openimageio/include "*")
|
||||
harvest(openimageio/lib openimageio/lib "*.a")
|
||||
harvest(openimagedenoise/include openimagedenoise/include "*")
|
||||
harvest(openimagedenoise/lib openimagedenoise/lib "*.a")
|
||||
if((NOT APPLE) OR ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
|
||||
harvest(openimagedenoise/include openimagedenoise/include "*")
|
||||
harvest(openimagedenoise/lib openimagedenoise/lib "*.a")
|
||||
harvest(embree/include embree/include "*.h")
|
||||
harvest(embree/lib embree/lib "*.a")
|
||||
endif()
|
||||
harvest(openjpeg/include/openjpeg-2.3 openjpeg/include "*.h")
|
||||
harvest(openjpeg/lib openjpeg/lib "*.a")
|
||||
harvest(opensubdiv/include opensubdiv/include "*.h")
|
||||
@@ -168,8 +172,6 @@ harvest(vpx/lib ffmpeg/lib "*.a")
|
||||
harvest(webp/lib ffmpeg/lib "*.a")
|
||||
harvest(x264/lib ffmpeg/lib "*.a")
|
||||
harvest(xvidcore/lib ffmpeg/lib "*.a")
|
||||
harvest(embree/include embree/include "*.h")
|
||||
harvest(embree/lib embree/lib "*.a")
|
||||
harvest(usd/include usd/include "*.h")
|
||||
harvest(usd/lib/usd usd/lib/usd "*")
|
||||
harvest(usd/plugin usd/plugin "*")
|
||||
|
@@ -16,11 +16,17 @@
|
||||
#
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
if(APPLE AND "${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
|
||||
set(LLVM_TARGETS AArch64)
|
||||
else()
|
||||
set(LLVM_TARGETS X86)
|
||||
endif()
|
||||
|
||||
set(LLVM_EXTRA_ARGS
|
||||
-DLLVM_USE_CRT_RELEASE=MD
|
||||
-DLLVM_USE_CRT_DEBUG=MDd
|
||||
-DLLVM_INCLUDE_TESTS=OFF
|
||||
-DLLVM_TARGETS_TO_BUILD=X86
|
||||
-DLLVM_TARGETS_TO_BUILD=${LLVM_TARGETS}
|
||||
-DLLVM_INCLUDE_EXAMPLES=OFF
|
||||
-DLLVM_ENABLE_TERMINFO=OFF
|
||||
-DLLVM_BUILD_LLVM_C_DYLIB=OFF
|
||||
|
@@ -38,6 +38,7 @@ ExternalProject_Add(external_numpy
|
||||
PREFIX ${BUILD_DIR}/numpy
|
||||
PATCH_COMMAND ${NUMPY_PATCH}
|
||||
CONFIGURE_COMMAND ""
|
||||
PATCH_COMMAND COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/numpy/src/external_numpy < ${PATCH_DIR}/numpy.diff
|
||||
LOG_BUILD 1
|
||||
BUILD_COMMAND ${PYTHON_BINARY} ${BUILD_DIR}/numpy/src/external_numpy/setup.py build ${NUMPY_BUILD_OPTION} install --old-and-unmanageable
|
||||
INSTALL_COMMAND ""
|
||||
|
@@ -30,6 +30,13 @@ set(OPENCOLORIO_EXTRA_ARGS
|
||||
-DOCIO_STATIC_JNIGLUE=OFF
|
||||
)
|
||||
|
||||
if(APPLE AND NOT("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
|
||||
set(OPENCOLORIO_EXTRA_ARGS
|
||||
${OPENCOLORIO_EXTRA_ARGS}
|
||||
-DOCIO_USE_SSE=OFF
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
set(OCIO_PATCH opencolorio_win.diff)
|
||||
set(OPENCOLORIO_EXTRA_ARGS
|
||||
|
@@ -22,6 +22,7 @@ ExternalProject_Add(external_openmp
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
URL_HASH MD5=${OPENMP_HASH}
|
||||
PREFIX ${BUILD_DIR}/openmp
|
||||
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/openmp/src/external_openmp < ${PATCH_DIR}/openmp.diff
|
||||
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/openmp ${DEFAULT_CMAKE_FLAGS}
|
||||
INSTALL_COMMAND cd ${BUILD_DIR}/openmp/src/external_openmp-build && install_name_tool -id @executable_path/../Resources/lib/libomp.dylib runtime/src/libomp.dylib && make install
|
||||
INSTALL_DIR ${LIBDIR}/openmp
|
||||
|
@@ -117,16 +117,28 @@ else()
|
||||
COMMAND xcodebuild -version -sdk macosx SDKVersion
|
||||
OUTPUT_VARIABLE MACOSX_SDK_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
set(OSX_ARCHITECTURES x86_64)
|
||||
set(OSX_DEPLOYMENT_TARGET 10.13)
|
||||
if(NOT CMAKE_OSX_ARCHITECTURES)
|
||||
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
message(STATUS "Detected native architecture ${ARCHITECTURE}.")
|
||||
set(CMAKE_OSX_ARCHITECTURES "${ARCHITECTURE}")
|
||||
endif()
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64")
|
||||
set(OSX_DEPLOYMENT_TARGET 10.13)
|
||||
else()
|
||||
set(OSX_DEPLOYMENT_TARGET 11.00)
|
||||
endif()
|
||||
set(OSX_SYSROOT ${XCODE_DEV_PATH}/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk)
|
||||
|
||||
set(PLATFORM_CFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET}")
|
||||
set(PLATFORM_CXXFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -std=c++11 -stdlib=libc++")
|
||||
set(PLATFORM_LDFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET}")
|
||||
set(PLATFORM_BUILD_TARGET --build=x86_64-apple-darwin17.0.0) # OS X 10.13
|
||||
set(PLATFORM_CFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -arch ${CMAKE_OSX_ARCHITECTURES}")
|
||||
set(PLATFORM_CXXFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -std=c++11 -stdlib=libc++ -arch ${CMAKE_OSX_ARCHITECTURES}")
|
||||
set(PLATFORM_LDFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -arch ${CMAKE_OSX_ARCHITECTURES}")
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64")
|
||||
set(PLATFORM_BUILD_TARGET --build=x86_64-apple-darwin17.0.0) # OS X 10.13
|
||||
else()
|
||||
set(PLATFORM_BUILD_TARGET --build=aarch64-apple-darwin20.0.0) # macOS 11.00
|
||||
endif()
|
||||
set(PLATFORM_CMAKE_FLAGS
|
||||
-DCMAKE_OSX_ARCHITECTURES:STRING=${OSX_ARCHITECTURES}
|
||||
-DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES}
|
||||
-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=${OSX_DEPLOYMENT_TARGET}
|
||||
-DCMAKE_OSX_SYSROOT:PATH=${OSX_SYSROOT}
|
||||
)
|
||||
|
@@ -22,6 +22,10 @@ set(PNG_EXTRA_ARGS
|
||||
-DPNG_STATIC=ON
|
||||
)
|
||||
|
||||
if(APPLE AND ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64"))
|
||||
set(PNG_EXTRA_ARGS ${PNG_EXTRA_ARGS} -DPNG_HARDWARE_OPTIMIZATIONS=ON -DPNG_ARM_NEON=on -DCMAKE_SYSTEM_PROCESSOR="aarch64")
|
||||
endif()
|
||||
|
||||
ExternalProject_Add(external_png
|
||||
URL ${PNG_URI}
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
|
@@ -69,12 +69,17 @@ else()
|
||||
export ac_cv_header_libintl_h=no &&
|
||||
export ac_cv_lib_intl_textdomain=no
|
||||
)
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
|
||||
set(PYTHON_FUNC_CONFIGS ${PYTHON_FUNC_CONFIGS} && export PYTHON_DECIMAL_WITH_MACHINE=ansi64)
|
||||
endif()
|
||||
set(PYTHON_CONFIGURE_ENV ${CONFIGURE_ENV} && ${PYTHON_FUNC_CONFIGS})
|
||||
set(PYTHON_BINARY ${BUILD_DIR}/python/src/external_python/python.exe)
|
||||
set(PYTHON_PATCH ${PATCH_CMD} --verbose -p1 -d ${BUILD_DIR}/python/src/external_python < ${PATCH_DIR}/python_macos.diff)
|
||||
else()
|
||||
set(PYTHON_CONFIGURE_ENV ${CONFIGURE_ENV})
|
||||
set(PYTHON_BINARY ${BUILD_DIR}/python/src/external_python/python)
|
||||
endif()
|
||||
set(PYTHON_PATCH ${PATCH_CMD} --verbose -p1 -d ${BUILD_DIR}/python/src/external_python < ${PATCH_DIR}/python_linux.diff)
|
||||
endif()
|
||||
|
||||
set(PYTHON_CONFIGURE_EXTRA_ARGS "--with-openssl=${LIBDIR}/ssl")
|
||||
set(PYTHON_CFLAGS "-I${LIBDIR}/sqlite/include -I${LIBDIR}/bzip2/include -I${LIBDIR}/lzma/include -I${LIBDIR}/zlib/include")
|
||||
@@ -84,7 +89,6 @@ else()
|
||||
export CPPFLAGS=${PYTHON_CFLAGS} &&
|
||||
export LDFLAGS=${PYTHON_LDFLAGS} &&
|
||||
export PKG_CONFIG_PATH=${LIBDIR}/ffi/lib/pkgconfig)
|
||||
set(PYTHON_PATCH ${PATCH_CMD} --verbose -p1 -d ${BUILD_DIR}/python/src/external_python < ${PATCH_DIR}/python_linux.diff)
|
||||
|
||||
ExternalProject_Add(external_python
|
||||
URL ${PYTHON_URI}
|
||||
|
@@ -51,7 +51,7 @@ ExternalProject_Add(external_sqlite
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
URL_HASH SHA1=${SQLITE_HASH}
|
||||
PREFIX ${BUILD_DIR}/sqlite
|
||||
PATCH_COMMAND ${SQLITE_PATCH_CMD}
|
||||
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/sqlite/src/external_sqlite < ${PATCH_DIR}/sqlite.diff
|
||||
CONFIGURE_COMMAND ${SQLITE_CONFIGURE_ENV} && cd ${BUILD_DIR}/sqlite/src/external_sqlite/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/sqlite ${SQLITE_CONFIGURATION_ARGS}
|
||||
BUILD_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/sqlite/src/external_sqlite/ && make -j${MAKE_THREADS}
|
||||
INSTALL_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/sqlite/src/external_sqlite/ && make install
|
||||
|
@@ -20,7 +20,7 @@ set(SSL_CONFIGURE_COMMAND ./Configure)
|
||||
set(SSL_PATCH_CMD echo .)
|
||||
|
||||
if(APPLE)
|
||||
set(SSL_OS_COMPILER "blender-darwin-x86_64")
|
||||
set(SSL_OS_COMPILER "blender-darwin-${CMAKE_OSX_ARCHITECTURES}")
|
||||
else()
|
||||
if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
|
||||
set(SSL_EXTRA_ARGS enable-ec_nistp_64_gcc_128)
|
||||
|
@@ -12,4 +12,9 @@ my %targets = (
|
||||
inherit_from => [ "darwin64-x86_64-cc" ],
|
||||
cflags => add("-fPIC"),
|
||||
},
|
||||
"blender-darwin-arm64" => {
|
||||
inherit_from => [ "darwin-common" ],
|
||||
cxxflags => add("-fPIC -arch arm64"),
|
||||
cflags => add("-fPIC -arch arm64"),
|
||||
},
|
||||
);
|
||||
|
@@ -27,6 +27,7 @@ ExternalProject_Add(external_theora
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
URL_HASH SHA256=${THEORA_HASH}
|
||||
PREFIX ${BUILD_DIR}/theora
|
||||
PATCH_COMMAND ${PATCH_CMD} -p 0 -d ${BUILD_DIR}/theora/src/external_theora < ${PATCH_DIR}/theora.diff
|
||||
CONFIGURE_COMMAND ${THEORA_CONFIGURE_ENV} && cd ${BUILD_DIR}/theora/src/external_theora/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/theora
|
||||
--disable-shared
|
||||
--enable-static
|
||||
|
@@ -24,7 +24,11 @@ if(WIN32)
|
||||
endif()
|
||||
else()
|
||||
if(APPLE)
|
||||
set(VPX_EXTRA_FLAGS --target=x86_64-darwin13-gcc)
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
|
||||
set(VPX_EXTRA_FLAGS --target=generic-gnu)
|
||||
else()
|
||||
set(VPX_EXTRA_FLAGS --target=x86_64-darwin17-gcc)
|
||||
endif()
|
||||
else()
|
||||
set(VPX_EXTRA_FLAGS --target=generic-gnu)
|
||||
endif()
|
||||
|
@@ -22,9 +22,14 @@ endif()
|
||||
|
||||
|
||||
if(APPLE)
|
||||
set(X264_CONFIGURE_ENV
|
||||
export AS=${LIBDIR}/nasm/bin/nasm
|
||||
)
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
|
||||
set(X264_EXTRA_ARGS ${X264_EXTRA_ARGS} "--disable-asm")
|
||||
set(X264_CONFIGURE_ENV echo .)
|
||||
else()
|
||||
set(X264_CONFIGURE_ENV
|
||||
export AS=${LIBDIR}/nasm/bin/nasm
|
||||
)
|
||||
endif()
|
||||
else()
|
||||
set(X264_CONFIGURE_ENV echo .)
|
||||
endif()
|
||||
|
@@ -91,3 +91,41 @@ diff -Naur external_blosc.orig/blosc/blosc.c external_blosc/blosc/blosc.c
|
||||
|
||||
|
||||
/* Some useful units */
|
||||
diff --git a/blosc/shuffle.c b/blosc/shuffle.c
|
||||
index 84b5095..23053b4 100644
|
||||
--- a/blosc/shuffle.c
|
||||
+++ b/blosc/shuffle.c
|
||||
@@ -490,12 +490,12 @@ void unshuffle(size_t bytesoftype, size_t blocksize,
|
||||
#else /* no __SSE2__ available */
|
||||
|
||||
void shuffle(size_t bytesoftype, size_t blocksize,
|
||||
- uint8_t* _src, uint8_t* _dest) {
|
||||
+ const uint8_t* _src, uint8_t* _dest) {
|
||||
_shuffle(bytesoftype, blocksize, _src, _dest);
|
||||
}
|
||||
|
||||
void unshuffle(size_t bytesoftype, size_t blocksize,
|
||||
- uint8_t* _src, uint8_t* _dest) {
|
||||
+ const uint8_t* _src, uint8_t* _dest) {
|
||||
_unshuffle(bytesoftype, blocksize, _src, _dest);
|
||||
}
|
||||
--- a/cmake/FindSSE.cmake
|
||||
+++ b/cmake/FindSSE.cmake
|
||||
@@ -49,6 +49,17 @@
|
||||
set(AVX_FOUND false CACHE BOOL "AVX available on host")
|
||||
ENDIF (AVX_TRUE)
|
||||
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
+ execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
+ message(STATUS "Detected architecture ${ARCHITECTURE}")
|
||||
+ IF("${ARCHITECTURE}" STREQUAL "arm64")
|
||||
+ set(SSE2_FOUND false CACHE BOOL "SSE2 available on host")
|
||||
+ set(SSE3_FOUND false CACHE BOOL "SSE3 available on host")
|
||||
+ set(SSSE3_FOUND false CACHE BOOL "SSSE3 available on host")
|
||||
+ set(SSE4_1_FOUND false CACHE BOOL "SSE4.1 available on host")
|
||||
+ set(AVX_FOUND false CACHE BOOL "AVX available on host")
|
||||
+ return()
|
||||
+ ENDIF()
|
||||
+
|
||||
EXEC_PROGRAM("/usr/sbin/sysctl -n machdep.cpu.features" OUTPUT_VARIABLE
|
||||
CPUINFO)
|
||||
|
27
build_files/build_environment/patches/numpy.diff
Normal file
27
build_files/build_environment/patches/numpy.diff
Normal file
@@ -0,0 +1,27 @@
|
||||
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
|
||||
index ba2b1f4..b10f7df 100644
|
||||
--- a/numpy/distutils/system_info.py
|
||||
+++ b/numpy/distutils/system_info.py
|
||||
@@ -2164,8 +2164,8 @@ class accelerate_info(system_info):
|
||||
'accelerate' in libraries):
|
||||
if intel:
|
||||
args.extend(['-msse3'])
|
||||
- else:
|
||||
- args.extend(['-faltivec'])
|
||||
+# else:
|
||||
+# args.extend(['-faltivec'])
|
||||
args.extend([
|
||||
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
|
||||
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
|
||||
@@ -2174,8 +2174,8 @@ class accelerate_info(system_info):
|
||||
'veclib' in libraries):
|
||||
if intel:
|
||||
args.extend(['-msse3'])
|
||||
- else:
|
||||
- args.extend(['-faltivec'])
|
||||
+# else:
|
||||
+# args.extend(['-faltivec'])
|
||||
args.extend([
|
||||
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
|
||||
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
|
||||
|
@@ -86,3 +86,47 @@ index 1f9a3ee..d151e9a 100644
|
||||
return isnan( value );
|
||||
#else
|
||||
return std::isnan(value);
|
||||
|
||||
|
||||
diff --git a/DAEValidator/library/src/Dae.cpp b/DAEValidator/library/src/Dae.cpp
|
||||
index 9256ee1..241ad67 100644
|
||||
--- a/DAEValidator/library/src/Dae.cpp
|
||||
+++ b/DAEValidator/library/src/Dae.cpp
|
||||
@@ -304,7 +304,7 @@ namespace opencollada
|
||||
if (auto root_node = root())
|
||||
{
|
||||
const auto & nodes = root_node.selectNodes("//*[@id]");
|
||||
- for (const auto & node : nodes)
|
||||
+ for (const auto node : nodes)
|
||||
{
|
||||
string id = node.attribute("id").value();
|
||||
mIdCache.insert(id);
|
||||
@@ -312,4 +312,4 @@ namespace opencollada
|
||||
}
|
||||
}
|
||||
}
|
||||
-}
|
||||
\ No newline at end of file
|
||||
+}
|
||||
diff --git a/DAEValidator/library/src/DaeValidator.cpp b/DAEValidator/library/src/DaeValidator.cpp
|
||||
index 715d903..24423ce 100644
|
||||
--- a/DAEValidator/library/src/DaeValidator.cpp
|
||||
+++ b/DAEValidator/library/src/DaeValidator.cpp
|
||||
@@ -162,7 +162,7 @@ namespace opencollada
|
||||
|
||||
// Find xsi:schemaLocation attributes in dae and try to validate against specified xsd documents
|
||||
const auto & elements = dae.root().selectNodes("//*[@xsi:schemaLocation]");
|
||||
- for (const auto & element : elements)
|
||||
+ for (const auto element : elements)
|
||||
{
|
||||
if (auto schemaLocation = element.attribute("schemaLocation"))
|
||||
{
|
||||
@@ -274,7 +274,7 @@ namespace opencollada
|
||||
int result = 0;
|
||||
map<string, size_t> ids;
|
||||
const auto & nodes = dae.root().selectNodes("//*[@id]");
|
||||
- for (const auto & node : nodes)
|
||||
+ for (const auto node : nodes)
|
||||
{
|
||||
string id = node.attribute("id").value();
|
||||
size_t line = node.line();
|
||||
|
23
build_files/build_environment/patches/openmp.diff
Normal file
23
build_files/build_environment/patches/openmp.diff
Normal file
@@ -0,0 +1,23 @@
|
||||
diff --git a/runtime/src/z_Linux_asm.S b/runtime/src/z_Linux_asm.S
|
||||
index 0d8885e..42aa5ad 100644
|
||||
--- a/runtime/src/z_Linux_asm.S
|
||||
+++ b/runtime/src/z_Linux_asm.S
|
||||
@@ -1540,10 +1540,12 @@ __kmp_unnamed_critical_addr:
|
||||
.comm .gomp_critical_user_,32,8
|
||||
.data
|
||||
.align 8
|
||||
- .global __kmp_unnamed_critical_addr
|
||||
-__kmp_unnamed_critical_addr:
|
||||
+ .global ___kmp_unnamed_critical_addr
|
||||
+___kmp_unnamed_critical_addr:
|
||||
.8byte .gomp_critical_user_
|
||||
- .size __kmp_unnamed_critical_addr,8
|
||||
+# if !(KMP_OS_DARWIN)
|
||||
+ .size ___kmp_unnamed_critical_addr,8
|
||||
+# endif
|
||||
#endif /* KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 */
|
||||
|
||||
#if KMP_OS_LINUX
|
||||
|
||||
|
||||
|
289
build_files/build_environment/patches/python_macos.diff
Normal file
289
build_files/build_environment/patches/python_macos.diff
Normal file
@@ -0,0 +1,289 @@
|
||||
diff -ru a/Doc/library/ctypes.rst b/Doc/library/ctypes.rst
|
||||
--- a/Doc/library/ctypes.rst 2020-03-10 07:11:12.000000000 +0100
|
||||
+++ b/Doc/library/ctypes.rst 2020-07-14 08:10:10.000000000 +0200
|
||||
@@ -1551,6 +1551,13 @@
|
||||
value usable as argument (integer, string, ctypes instance). This allows
|
||||
defining adapters that can adapt custom objects as function parameters.
|
||||
|
||||
+ .. attribute:: variadic
|
||||
+
|
||||
+ Assign a boolean to specify that the function takes a variable number of
|
||||
+ arguments. This does not matter on most platforms, but for Apple arm64
|
||||
+ platforms variadic functions have a different calling convention than
|
||||
+ normal functions.
|
||||
+
|
||||
.. attribute:: errcheck
|
||||
|
||||
Assign a Python function or another callable to this attribute. The
|
||||
diff -ru a/Modules/_ctypes/_ctypes.c b/Modules/_ctypes/_ctypes.c
|
||||
--- a/Modules/_ctypes/_ctypes.c 2020-03-10 07:11:12.000000000 +0100
|
||||
+++ b/Modules/_ctypes/_ctypes.c 2020-07-14 08:14:41.000000000 +0200
|
||||
@@ -3175,6 +3175,35 @@
|
||||
}
|
||||
|
||||
static int
|
||||
+PyCFuncPtr_set_variadic(PyCFuncPtrObject *self, PyObject *ob, void *Py_UNUSED(ignored))
|
||||
+{
|
||||
+ StgDictObject *dict = PyObject_stgdict((PyObject *)self);
|
||||
+ assert(dict);
|
||||
+ int r = PyObject_IsTrue(ob);
|
||||
+ if (r == 1) {
|
||||
+ dict->flags |= FUNCFLAG_VARIADIC;
|
||||
+ return 0;
|
||||
+ } else if (r == 0) {
|
||||
+ dict->flags &= ~FUNCFLAG_VARIADIC;
|
||||
+ return 0;
|
||||
+ } else {
|
||||
+ return -1;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static PyObject *
|
||||
+PyCFuncPtr_get_variadic(PyCFuncPtrObject *self, void *Py_UNUSED(ignored))
|
||||
+{
|
||||
+ StgDictObject *dict = PyObject_stgdict((PyObject *)self);
|
||||
+ assert(dict); /* Cannot be NULL for PyCFuncPtrObject instances */
|
||||
+ if (dict->flags & FUNCFLAG_VARIADIC)
|
||||
+ Py_RETURN_TRUE;
|
||||
+ else
|
||||
+ Py_RETURN_FALSE;
|
||||
+}
|
||||
+
|
||||
+
|
||||
+static int
|
||||
PyCFuncPtr_set_argtypes(PyCFuncPtrObject *self, PyObject *ob, void *Py_UNUSED(ignored))
|
||||
{
|
||||
PyObject *converters;
|
||||
@@ -5632,6 +5661,7 @@
|
||||
PyModule_AddObject(m, "FUNCFLAG_USE_ERRNO", PyLong_FromLong(FUNCFLAG_USE_ERRNO));
|
||||
PyModule_AddObject(m, "FUNCFLAG_USE_LASTERROR", PyLong_FromLong(FUNCFLAG_USE_LASTERROR));
|
||||
PyModule_AddObject(m, "FUNCFLAG_PYTHONAPI", PyLong_FromLong(FUNCFLAG_PYTHONAPI));
|
||||
+ PyModule_AddObject(m, "FUNCFLAG_VARIADIC", PyLong_FromLong(FUNCFLAG_VARIADIC));
|
||||
PyModule_AddStringConstant(m, "__version__", "1.1.0");
|
||||
|
||||
PyModule_AddObject(m, "_memmove_addr", PyLong_FromVoidPtr(memmove));
|
||||
diff -ru a/Modules/_ctypes/callproc.c b/Modules/_ctypes/callproc.c
|
||||
--- a/Modules/_ctypes/callproc.c 2020-03-10 07:11:12.000000000 +0100
|
||||
+++ b/Modules/_ctypes/callproc.c 2020-07-14 08:18:33.000000000 +0200
|
||||
@@ -767,7 +767,8 @@
|
||||
ffi_type **atypes,
|
||||
ffi_type *restype,
|
||||
void *resmem,
|
||||
- int argcount)
|
||||
+ int argcount,
|
||||
+ int argtypecount)
|
||||
{
|
||||
PyThreadState *_save = NULL; /* For Py_BLOCK_THREADS and Py_UNBLOCK_THREADS */
|
||||
PyObject *error_object = NULL;
|
||||
@@ -793,15 +794,38 @@
|
||||
if ((flags & FUNCFLAG_CDECL) == 0)
|
||||
cc = FFI_STDCALL;
|
||||
#endif
|
||||
- if (FFI_OK != ffi_prep_cif(&cif,
|
||||
- cc,
|
||||
- argcount,
|
||||
- restype,
|
||||
- atypes)) {
|
||||
- PyErr_SetString(PyExc_RuntimeError,
|
||||
- "ffi_prep_cif failed");
|
||||
- return -1;
|
||||
- }
|
||||
+#if HAVE_FFI_PREP_CIF_VAR
|
||||
+ /* Everyone SHOULD set f.variadic=True on variadic function pointers, but
|
||||
+ * lots of existing code will not. If there's at least one arg and more
|
||||
+ * args are passed than are defined in the prototype, then it must be a
|
||||
+ * variadic function. */
|
||||
+ if ((flags & FUNCFLAG_VARIADIC) ||
|
||||
+ (argtypecount != 0 && argcount > argtypecount))
|
||||
+ {
|
||||
+ if (FFI_OK != ffi_prep_cif_var(&cif,
|
||||
+ cc,
|
||||
+ argtypecount,
|
||||
+ argcount,
|
||||
+ restype,
|
||||
+ atypes)) {
|
||||
+ PyErr_SetString(PyExc_RuntimeError,
|
||||
+ "ffi_prep_cif_var failed");
|
||||
+ return -1;
|
||||
+ }
|
||||
+ } else {
|
||||
+#endif
|
||||
+ if (FFI_OK != ffi_prep_cif(&cif,
|
||||
+ cc,
|
||||
+ argcount,
|
||||
+ restype,
|
||||
+ atypes)) {
|
||||
+ PyErr_SetString(PyExc_RuntimeError,
|
||||
+ "ffi_prep_cif failed");
|
||||
+ return -1;
|
||||
+ }
|
||||
+#if HAVE_FFI_PREP_CIF_VAR
|
||||
+ }
|
||||
+#endif
|
||||
|
||||
if (flags & (FUNCFLAG_USE_ERRNO | FUNCFLAG_USE_LASTERROR)) {
|
||||
error_object = _ctypes_get_errobj(&space);
|
||||
@@ -1185,9 +1209,8 @@
|
||||
|
||||
if (-1 == _call_function_pointer(flags, pProc, avalues, atypes,
|
||||
rtype, resbuf,
|
||||
- Py_SAFE_DOWNCAST(argcount,
|
||||
- Py_ssize_t,
|
||||
- int)))
|
||||
+ Py_SAFE_DOWNCAST(argcount, Py_ssize_t, int),
|
||||
+ Py_SAFE_DOWNCAST(argtype_count, Py_ssize_t, int)))
|
||||
goto cleanup;
|
||||
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
diff -ru a/Modules/_ctypes/ctypes.h b/Modules/_ctypes/ctypes.h
|
||||
--- a/Modules/_ctypes/ctypes.h 2020-03-10 07:11:12.000000000 +0100
|
||||
+++ b/Modules/_ctypes/ctypes.h 2020-07-14 08:30:53.000000000 +0200
|
||||
@@ -285,6 +285,7 @@
|
||||
#define FUNCFLAG_PYTHONAPI 0x4
|
||||
#define FUNCFLAG_USE_ERRNO 0x8
|
||||
#define FUNCFLAG_USE_LASTERROR 0x10
|
||||
+#define FUNCFLAG_VARIADIC 0x20
|
||||
|
||||
#define TYPEFLAG_ISPOINTER 0x100
|
||||
#define TYPEFLAG_HASPOINTER 0x200
|
||||
diff -ru a/configure b/configure
|
||||
--- a/configure 2020-03-10 07:11:12.000000000 +0100
|
||||
+++ b/configure 2020-07-14 08:03:27.000000000 +0200
|
||||
@@ -3374,7 +3374,7 @@
|
||||
# has no effect, don't bother defining them
|
||||
Darwin/[6789].*)
|
||||
define_xopen_source=no;;
|
||||
- Darwin/1[0-9].*)
|
||||
+ Darwin/[12][0-9].*)
|
||||
define_xopen_source=no;;
|
||||
# On AIX 4 and 5.1, mbstate_t is defined only when _XOPEN_SOURCE == 500 but
|
||||
# used in wcsnrtombs() and mbsnrtowcs() even if _XOPEN_SOURCE is not defined
|
||||
@@ -9251,6 +9251,9 @@
|
||||
ppc)
|
||||
MACOSX_DEFAULT_ARCH="ppc64"
|
||||
;;
|
||||
+ arm64)
|
||||
+ MACOSX_DEFAULT_ARCH="arm64"
|
||||
+ ;;
|
||||
*)
|
||||
as_fn_error $? "Unexpected output of 'arch' on OSX" "$LINENO" 5
|
||||
;;
|
||||
diff -ru a/configure.ac b/configure.ac
|
||||
--- a/configure.ac 2020-03-10 07:11:12.000000000 +0100
|
||||
+++ b/configure.ac 2020-07-14 08:03:27.000000000 +0200
|
||||
@@ -2456,6 +2456,9 @@
|
||||
ppc)
|
||||
MACOSX_DEFAULT_ARCH="ppc64"
|
||||
;;
|
||||
+ arm64)
|
||||
+ MACOSX_DEFAULT_ARCH="arm64"
|
||||
+ ;;
|
||||
*)
|
||||
AC_MSG_ERROR([Unexpected output of 'arch' on OSX])
|
||||
;;
|
||||
diff -ru a/setup.py b/setup.py
|
||||
--- a/setup.py 2020-03-10 07:11:12.000000000 +0100
|
||||
+++ b/setup.py 2020-07-14 08:28:12.000000000 +0200
|
||||
@@ -141,6 +141,13 @@
|
||||
os.unlink(tmpfile)
|
||||
|
||||
return MACOS_SDK_ROOT
|
||||
+
|
||||
+def is_macosx_at_least(vers):
|
||||
+ if host_platform == 'darwin':
|
||||
+ dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
|
||||
+ if dep_target:
|
||||
+ return tuple(map(int, dep_target.split('.'))) >= vers
|
||||
+ return False
|
||||
|
||||
def is_macosx_sdk_path(path):
|
||||
"""
|
||||
@@ -150,6 +157,13 @@
|
||||
or path.startswith('/System/')
|
||||
or path.startswith('/Library/') )
|
||||
|
||||
+def grep_headers_for(function, headers):
|
||||
+ for header in headers:
|
||||
+ with open(header, 'r') as f:
|
||||
+ if function in f.read():
|
||||
+ return True
|
||||
+ return False
|
||||
+
|
||||
def find_file(filename, std_dirs, paths):
|
||||
"""Searches for the directory where a given file is located,
|
||||
and returns a possibly-empty list of additional directories, or None
|
||||
@@ -1972,7 +1986,11 @@
|
||||
return True
|
||||
|
||||
def detect_ctypes(self, inc_dirs, lib_dirs):
|
||||
- self.use_system_libffi = False
|
||||
+ if not sysconfig.get_config_var("LIBFFI_INCLUDEDIR") and is_macosx_at_least((10,15)):
|
||||
+ self.use_system_libffi = True
|
||||
+ else:
|
||||
+ self.use_system_libffi = '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS")
|
||||
+
|
||||
include_dirs = []
|
||||
extra_compile_args = []
|
||||
extra_link_args = []
|
||||
@@ -2016,32 +2034,48 @@
|
||||
ext_test = Extension('_ctypes_test',
|
||||
sources=['_ctypes/_ctypes_test.c'],
|
||||
libraries=['m'])
|
||||
+ ffi_inc = sysconfig.get_config_var("LIBFFI_INCLUDEDIR")
|
||||
+ ffi_lib = None
|
||||
+
|
||||
self.extensions.extend([ext, ext_test])
|
||||
|
||||
if host_platform == 'darwin':
|
||||
- if '--with-system-ffi' not in sysconfig.get_config_var("CONFIG_ARGS"):
|
||||
+ if not self.use_system_libffi:
|
||||
return
|
||||
- # OS X 10.5 comes with libffi.dylib; the include files are
|
||||
- # in /usr/include/ffi
|
||||
- inc_dirs.append('/usr/include/ffi')
|
||||
-
|
||||
- ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
|
||||
- if not ffi_inc or ffi_inc[0] == '':
|
||||
- ffi_inc = find_file('ffi.h', [], inc_dirs)
|
||||
- if ffi_inc is not None:
|
||||
- ffi_h = ffi_inc[0] + '/ffi.h'
|
||||
+ ffi_in_sdk = os.path.join(macosx_sdk_root(), "usr/include/ffi")
|
||||
+ if os.path.exists(ffi_in_sdk):
|
||||
+ ffi_inc = ffi_in_sdk
|
||||
+ ffi_lib = 'ffi'
|
||||
+ else:
|
||||
+ # OS X 10.5 comes with libffi.dylib; the include files are
|
||||
+ # in /usr/include/ffi
|
||||
+ ffi_inc_dirs.append('/usr/include/ffi')
|
||||
+
|
||||
+ if not ffi_inc:
|
||||
+ found = find_file('ffi.h', [], ffi_inc_dirs)
|
||||
+ if found:
|
||||
+ ffi_inc = found[0]
|
||||
+ if ffi_inc:
|
||||
+ ffi_h = ffi_inc + '/ffi.h'
|
||||
if not os.path.exists(ffi_h):
|
||||
ffi_inc = None
|
||||
print('Header file {} does not exist'.format(ffi_h))
|
||||
- ffi_lib = None
|
||||
- if ffi_inc is not None:
|
||||
+ if ffi_lib is None and ffi_inc:
|
||||
for lib_name in ('ffi', 'ffi_pic'):
|
||||
if (self.compiler.find_library_file(lib_dirs, lib_name)):
|
||||
ffi_lib = lib_name
|
||||
break
|
||||
|
||||
if ffi_inc and ffi_lib:
|
||||
- ext.include_dirs.extend(ffi_inc)
|
||||
+ ffi_headers = glob(os.path.join(ffi_inc, '*.h'))
|
||||
+ if grep_headers_for('ffi_closure_alloc', ffi_headers):
|
||||
+ try:
|
||||
+ sources.remove('_ctypes/malloc_closure.c')
|
||||
+ except ValueError:
|
||||
+ pass
|
||||
+ if grep_headers_for('ffi_prep_cif_var', ffi_headers):
|
||||
+ ext.extra_compile_args.append("-DHAVE_FFI_PREP_CIF_VAR=1")
|
||||
+ ext.include_dirs.append(ffi_inc)
|
||||
ext.libraries.append(ffi_lib)
|
||||
self.use_system_libffi = True
|
||||
|
14
build_files/build_environment/patches/sqlite.diff
Normal file
14
build_files/build_environment/patches/sqlite.diff
Normal file
@@ -0,0 +1,14 @@
|
||||
Only in external_sqlite_orig: config.log
|
||||
diff -ru external_sqlite_orig/config.sub external_sqlite/config.sub
|
||||
--- external_sqlite_orig/config.sub 2020-07-10 14:06:42.000000000 +0200
|
||||
+++ external_sqlite/config.sub 2020-07-10 14:10:24.000000000 +0200
|
||||
@@ -314,6 +314,7 @@
|
||||
# Recognize the basic CPU types with company name.
|
||||
580-* \
|
||||
| a29k-* \
|
||||
+ | aarch64-* \
|
||||
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
|
||||
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
|
||||
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
|
||||
Only in external_sqlite: mksourceid
|
||||
Only in external_sqlite: sqlite3session.h
|
18
build_files/build_environment/patches/theora.diff
Normal file
18
build_files/build_environment/patches/theora.diff
Normal file
@@ -0,0 +1,18 @@
|
||||
--- config.sub
|
||||
+++ config.sub
|
||||
@@ -226,6 +226,7 @@
|
||||
# Some are omitted here because they have special meanings below.
|
||||
1750a | 580 \
|
||||
| a29k \
|
||||
+ | aarch64 \
|
||||
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
|
||||
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
|
||||
| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
|
||||
@@ -286,6 +287,7 @@
|
||||
# Recognize the basic CPU types with company name.
|
||||
580-* \
|
||||
| a29k-* \
|
||||
+ | aarch64-* \
|
||||
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
|
||||
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
|
||||
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
|
@@ -97,3 +97,36 @@ diff -Naur external_usd_base/cmake/macros/Public.cmake external_usd/cmake/macros
|
||||
endforeach()
|
||||
foreach(lib ${PXR_OBJECT_LIBS})
|
||||
set(objects "${objects};\$<TARGET_OBJECTS:${lib}>")
|
||||
|
||||
diff --git a/pxr/base/arch/align.h b/pxr/base/arch/align.h
|
||||
index f3cabf4..ebc8a69 100644
|
||||
--- a/pxr/base/arch/align.h
|
||||
+++ b/pxr/base/arch/align.h
|
||||
@@ -77,7 +77,11 @@ ArchAlignMemory(void *base)
|
||||
/// The size of a CPU cache line on the current processor architecture in bytes.
|
||||
///
|
||||
/// \hideinitializer
|
||||
+#if defined(ARCH_OS_DARWIN) && defined(ARCH_CPU_ARM)
|
||||
+#define ARCH_CACHE_LINE_SIZE 128
|
||||
+#else
|
||||
#define ARCH_CACHE_LINE_SIZE 64
|
||||
+#endif
|
||||
|
||||
///@}
|
||||
|
||||
diff --git a/pxr/base/arch/math.h b/pxr/base/arch/math.h
|
||||
index 3e66c37..64a052c 100644
|
||||
--- a/pxr/base/arch/math.h
|
||||
+++ b/pxr/base/arch/math.h
|
||||
@@ -42,7 +42,7 @@ PXR_NAMESPACE_OPEN_SCOPE
|
||||
/// \addtogroup group_arch_Math
|
||||
///@{
|
||||
|
||||
-#if defined (ARCH_CPU_INTEL) || defined(doxygen)
|
||||
+#if defined (ARCH_CPU_INTEL) || defined(ARCH_CPU_ARM) || defined(doxygen)
|
||||
|
||||
/// This is the smallest value e such that 1+e^2 == 1, using floats.
|
||||
/// True for all IEEE754 chipsets.
|
||||
|
||||
|
||||
|
||||
|
@@ -37,6 +37,11 @@ macro(BLENDER_SRC_GTEST_EX)
|
||||
if(WIN32)
|
||||
set(MANIFEST "${CMAKE_BINARY_DIR}/tests.exe.manifest")
|
||||
endif()
|
||||
|
||||
add_definitions(-DBLENDER_GFLAGS_NAMESPACE=${GFLAGS_NAMESPACE})
|
||||
add_definitions(${GFLAGS_DEFINES})
|
||||
add_definitions(${GLOG_DEFINES})
|
||||
|
||||
add_executable(${TARGET_NAME} ${ARG_SRC} ${MANIFEST})
|
||||
target_include_directories(${TARGET_NAME} PUBLIC "${TEST_INC}")
|
||||
target_include_directories(${TARGET_NAME} SYSTEM PUBLIC "${TEST_INC_SYS}")
|
||||
|
@@ -53,7 +53,7 @@ set(WITH_USD ON CACHE BOOL "" FORCE)
|
||||
set(WITH_MEM_JEMALLOC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61;sm_70;sm_75 CACHE STRING "" FORCE)
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61;sm_70;sm_75;compute_75 CACHE STRING "" FORCE)
|
||||
set(WITH_CYCLES_DEVICE_OPTIX ON CACHE BOOL "" FORCE)
|
||||
|
||||
# platform dependent options
|
||||
|
@@ -354,6 +354,42 @@ function(blender_add_lib
|
||||
set_property(GLOBAL APPEND PROPERTY BLENDER_LINK_LIBS ${name})
|
||||
endfunction()
|
||||
|
||||
# blender_add_test_lib() is used to define a test library. It is intended to be
|
||||
# called in tandem with blender_add_lib(). The test library will be linked into
|
||||
# the bf_gtest_runner_test executable (see tests/gtests/CMakeLists.txt).
|
||||
function(blender_add_test_lib
|
||||
name
|
||||
sources
|
||||
includes
|
||||
includes_sys
|
||||
library_deps
|
||||
)
|
||||
|
||||
add_cc_flags_custom_test(${name} PARENT_SCOPE)
|
||||
|
||||
# Otherwise external projects will produce warnings that we cannot fix.
|
||||
remove_strict_flags()
|
||||
|
||||
# This duplicates logic that's also in GTestTesting.cmake, macro BLENDER_SRC_GTEST_EX.
|
||||
# TODO(Sybren): deduplicate after the general approach in D7649 has been approved.
|
||||
LIST(APPEND includes
|
||||
${CMAKE_SOURCE_DIR}/tests/gtests
|
||||
)
|
||||
LIST(APPEND includes_sys
|
||||
${GLOG_INCLUDE_DIRS}
|
||||
${GFLAGS_INCLUDE_DIRS}
|
||||
${CMAKE_SOURCE_DIR}/extern/gtest/include
|
||||
${CMAKE_SOURCE_DIR}/extern/gmock/include
|
||||
)
|
||||
add_definitions(-DBLENDER_GFLAGS_NAMESPACE=${GFLAGS_NAMESPACE})
|
||||
add_definitions(${GFLAGS_DEFINES})
|
||||
add_definitions(${GLOG_DEFINES})
|
||||
|
||||
blender_add_lib__impl(${name} "${sources}" "${includes}" "${includes_sys}" "${library_deps}")
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY BLENDER_TEST_LIBS ${name})
|
||||
endfunction()
|
||||
|
||||
# Ninja only: assign 'heavy pool' to some targets that are especially RAM-consuming to build.
|
||||
function(setup_heavy_lib_pool)
|
||||
if(WITH_NINJA_POOL_JOBS AND NINJA_MAX_NUM_PARALLEL_COMPILE_HEAVY_JOBS)
|
||||
|
@@ -20,7 +20,11 @@
|
||||
|
||||
# Libraries configuration for Apple.
|
||||
|
||||
set(MACOSX_DEPLOYMENT_TARGET "10.13")
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
|
||||
set(MACOSX_DEPLOYMENT_TARGET 11.00)
|
||||
else()
|
||||
set(MACOSX_DEPLOYMENT_TARGET 10.13)
|
||||
endif()
|
||||
|
||||
macro(find_package_wrapper)
|
||||
# do nothing, just satisfy the macro
|
||||
@@ -378,6 +382,12 @@ if(WITH_CYCLES_OSL)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
|
||||
set(WITH_CYCLES_EMBREE OFF)
|
||||
set(WITH_OPENIMAGEDENOISE OFF)
|
||||
set(WITH_CPU_SSE OFF)
|
||||
endif()
|
||||
|
||||
if(WITH_CYCLES_EMBREE)
|
||||
find_package(Embree 3.8.0 REQUIRED)
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -Xlinker -stack_size -Xlinker 0x100000")
|
||||
@@ -439,8 +449,8 @@ if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" OR CMAKE_OSX_ARCHITECTURES MATCHES "
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -ftree-vectorize -fvariable-expansion-in-unroller")
|
||||
endif()
|
||||
else()
|
||||
set(CMAKE_C_FLAGS_RELEASE "-mdynamic-no-pic -fno-strict-aliasing")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-mdynamic-no-pic -fno-strict-aliasing")
|
||||
set(CMAKE_C_FLAGS_RELEASE "-O2 -mdynamic-no-pic -fno-strict-aliasing")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -mdynamic-no-pic -fno-strict-aliasing")
|
||||
endif()
|
||||
|
||||
if(${XCODE_VERSION} VERSION_EQUAL 5 OR ${XCODE_VERSION} VERSION_GREATER 5)
|
||||
|
@@ -21,8 +21,10 @@
|
||||
# Xcode and system configuration for Apple.
|
||||
|
||||
if(NOT CMAKE_OSX_ARCHITECTURES)
|
||||
set(CMAKE_OSX_ARCHITECTURES x86_64 CACHE STRING
|
||||
"Choose the architecture you want to build Blender for: i386, x86_64 or ppc"
|
||||
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
message(STATUS "Detected native architecture ${ARCHITECTURE}.")
|
||||
set(CMAKE_OSX_ARCHITECTURES ${ARCHITECTURE} CACHE STRING
|
||||
"Choose the architecture you want to build Blender for: arm64 or x86_64"
|
||||
FORCE)
|
||||
endif()
|
||||
|
||||
@@ -129,14 +131,21 @@ if(${CMAKE_GENERATOR} MATCHES "Xcode")
|
||||
endif()
|
||||
unset(OSX_SDKROOT)
|
||||
|
||||
|
||||
# 10.13 is our min. target, if you use higher sdk, weak linking happens
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
|
||||
set(OSX_MIN_DEPLOYMENT_TARGET 11.00)
|
||||
else()
|
||||
set(OSX_MIN_DEPLOYMENT_TARGET 10.13)
|
||||
endif()
|
||||
|
||||
if(CMAKE_OSX_DEPLOYMENT_TARGET)
|
||||
if(${CMAKE_OSX_DEPLOYMENT_TARGET} VERSION_LESS 10.13)
|
||||
message(STATUS "Setting deployment target to 10.13, lower versions are not supported")
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.13" CACHE STRING "" FORCE)
|
||||
if(${CMAKE_OSX_DEPLOYMENT_TARGET} VERSION_LESS ${OSX_MIN_DEPLOYMENT_TARGET})
|
||||
message(STATUS "Setting deployment target to ${OSX_MIN_DEPLOYMENT_TARGET}, lower versions are not supported")
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "${OSX_MIN_DEPLOYMENT_TARGET}" CACHE STRING "" FORCE)
|
||||
endif()
|
||||
else()
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.13" CACHE STRING "" FORCE)
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "${OSX_MIN_DEPLOYMENT_TARGET}" CACHE STRING "" FORCE)
|
||||
endif()
|
||||
|
||||
if(NOT ${CMAKE_GENERATOR} MATCHES "Xcode")
|
||||
|
@@ -48,7 +48,7 @@ def git_branch(git_command):
|
||||
def git_tag(git_command):
|
||||
# Get current tag name.
|
||||
try:
|
||||
tag = subprocess.check_output([git_command, "describe", "--exact-match"])
|
||||
tag = subprocess.check_output([git_command, "describe", "--exact-match"], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
return None
|
||||
|
||||
|
@@ -38,7 +38,7 @@ PROJECT_NAME = Blender
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = "V2.90"
|
||||
PROJECT_NUMBER = "V2.91"
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
@@ -225,6 +225,7 @@ else:
|
||||
"aud",
|
||||
"bgl",
|
||||
"blf",
|
||||
"bl_math",
|
||||
"imbuf",
|
||||
"bmesh",
|
||||
"bmesh.ops",
|
||||
@@ -1798,8 +1799,18 @@ def write_rst_contents(basepath):
|
||||
|
||||
standalone_modules = (
|
||||
# submodules are added in parent page
|
||||
"mathutils", "freestyle", "bgl", "blf", "imbuf", "gpu", "gpu_extras",
|
||||
"aud", "bpy_extras", "idprop.types", "bmesh",
|
||||
"aud",
|
||||
"bgl",
|
||||
"bl_math",
|
||||
"blf",
|
||||
"bmesh",
|
||||
"bpy_extras",
|
||||
"freestyle",
|
||||
"gpu",
|
||||
"gpu_extras",
|
||||
"idprop.types",
|
||||
"imbuf",
|
||||
"mathutils",
|
||||
)
|
||||
|
||||
for mod in standalone_modules:
|
||||
@@ -1951,6 +1962,7 @@ def write_rst_importable_modules(basepath):
|
||||
"mathutils.kdtree": "KDTree Utilities",
|
||||
"mathutils.interpolate": "Interpolation Utilities",
|
||||
"mathutils.noise": "Noise Utilities",
|
||||
"bl_math": "Additional Math Functions",
|
||||
"freestyle": "Freestyle Module",
|
||||
"freestyle.types": "Freestyle Types",
|
||||
"freestyle.predicates": "Freestyle Predicates",
|
||||
|
64
extern/mantaflow/CMakeLists.txt
vendored
64
extern/mantaflow/CMakeLists.txt
vendored
@@ -31,19 +31,32 @@ if(MSVC_CLANG AND WITH_OPENMP AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0.1
|
||||
remove_cc_flag("-fopenmp")
|
||||
endif()
|
||||
|
||||
set(MANTAVERSION "0.12")
|
||||
set(MANTAVERSION "0.13")
|
||||
|
||||
add_definitions(-DWITH_FLUID=1)
|
||||
|
||||
set(MANTA_DEP
|
||||
dependencies
|
||||
)
|
||||
# Compile Mantaflow dependencies too (e.g. cnpy for numpy file IO).
|
||||
# Make sure that dependencies exist before enabling this option by updating the source files in extern/
|
||||
set(WITH_MANTA_DEPENDENCIES 0)
|
||||
|
||||
# Enable Mantaflow numpy support
|
||||
set(WITH_MANTA_NUMPY 0)
|
||||
|
||||
if(NOT WITH_MANTA_DEPENDENCIES)
|
||||
add_definitions(-DNO_CNPY=1)
|
||||
endif()
|
||||
|
||||
set(MANTA_HLP
|
||||
helper
|
||||
)
|
||||
set(MANTA_PP
|
||||
preprocessed
|
||||
)
|
||||
if(WITH_MANTA_DEPENDENCIES)
|
||||
set(MANTA_DEP
|
||||
dependencies
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_TBB)
|
||||
add_definitions(-DTBB=1)
|
||||
@@ -62,6 +75,10 @@ if(WIN32)
|
||||
add_definitions(-D_USE_MATH_DEFINES)
|
||||
endif()
|
||||
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
|
||||
add_definitions(-DNUMPY=1)
|
||||
endif()
|
||||
|
||||
set(INC
|
||||
${MANTA_PP}
|
||||
${MANTA_PP}/fileio
|
||||
@@ -69,14 +86,25 @@ set(INC
|
||||
${MANTA_PP}/plugin
|
||||
${MANTA_HLP}/pwrapper
|
||||
${MANTA_HLP}/util
|
||||
${MANTA_DEP}/cnpy
|
||||
)
|
||||
|
||||
if(WITH_MANTA_DEPENDENCIES)
|
||||
list(APPEND INC
|
||||
${MANTA_DEP}/cnpy
|
||||
)
|
||||
endif()
|
||||
|
||||
set(INC_SYS
|
||||
${PYTHON_INCLUDE_DIRS}
|
||||
${ZLIB_INCLUDE_DIRS}
|
||||
)
|
||||
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
|
||||
list(APPEND INC_SYS
|
||||
${PYTHON_NUMPY_INCLUDE_DIRS}
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_TBB)
|
||||
list(APPEND INC_SYS
|
||||
${TBB_INCLUDE_DIRS}
|
||||
@@ -96,9 +124,6 @@ if(WITH_OPENVDB)
|
||||
endif()
|
||||
|
||||
set(SRC
|
||||
${MANTA_DEP}/cnpy/cnpy.cpp
|
||||
${MANTA_DEP}/cnpy/cnpy.h
|
||||
|
||||
${MANTA_PP}/commonkernels.h
|
||||
${MANTA_PP}/commonkernels.h.reg.cpp
|
||||
${MANTA_PP}/conjugategrad.cpp
|
||||
@@ -161,14 +186,10 @@ set(SRC
|
||||
${MANTA_PP}/plugin/initplugins.cpp
|
||||
${MANTA_PP}/plugin/kepsilon.cpp
|
||||
${MANTA_PP}/plugin/meshplugins.cpp
|
||||
# TODO (sebbas): add numpy to libraries
|
||||
# ${MANTA_PP}/plugin/numpyconvert.cpp
|
||||
${MANTA_PP}/plugin/pressure.cpp
|
||||
${MANTA_PP}/plugin/ptsplugins.cpp
|
||||
${MANTA_PP}/plugin/secondaryparticles.cpp
|
||||
${MANTA_PP}/plugin/surfaceturbulence.cpp
|
||||
# TODO (sebbas): add numpy to libraries
|
||||
# ${MANTA_PP}/plugin/tfplugins.cpp
|
||||
${MANTA_PP}/plugin/vortexplugins.cpp
|
||||
${MANTA_PP}/plugin/waveletturbulence.cpp
|
||||
${MANTA_PP}/plugin/waves.cpp
|
||||
@@ -193,9 +214,6 @@ set(SRC
|
||||
${MANTA_PP}/vortexsheet.h.reg.cpp
|
||||
|
||||
${MANTA_HLP}/pwrapper/manta.h
|
||||
# TODO (sebbas): add numpy to libraries
|
||||
# ${MANTA_HLP}/pwrapper/numpyWrap.cpp
|
||||
# ${MANTA_HLP}/pwrapper/numpyWrap.h
|
||||
${MANTA_HLP}/pwrapper/pclass.cpp
|
||||
${MANTA_HLP}/pwrapper/pclass.h
|
||||
${MANTA_HLP}/pwrapper/pconvert.cpp
|
||||
@@ -221,6 +239,22 @@ set(SRC
|
||||
${MANTA_HLP}/util/vectorbase.h
|
||||
)
|
||||
|
||||
if(WITH_MANTA_DEPENDENCIES)
|
||||
list(APPEND SRC
|
||||
${MANTA_DEP}/cnpy/cnpy.cpp
|
||||
${MANTA_DEP}/cnpy/cnpy.h
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
|
||||
list(APPEND SRC
|
||||
${MANTA_PP}/plugin/numpyconvert.cpp
|
||||
${MANTA_PP}/plugin/tfplugins.cpp
|
||||
${MANTA_HLP}/pwrapper/numpyWrap.cpp
|
||||
${MANTA_HLP}/pwrapper/numpyWrap.h
|
||||
)
|
||||
endif()
|
||||
|
||||
set(LIB
|
||||
${PYTHON_LINKFLAGS}
|
||||
${PYTHON_LIBRARIES}
|
||||
|
23
extern/mantaflow/UPDATE.sh
vendored
23
extern/mantaflow/UPDATE.sh
vendored
@@ -13,6 +13,12 @@ BLENDER_INSTALLATION=/Users/sebbas/Developer/Blender/fluid-mantaflow
|
||||
# Try to check out Mantaflow repository before building?
|
||||
CLEAN_REPOSITORY=0
|
||||
|
||||
# Skip copying dependency files?
|
||||
WITH_DEPENDENCIES=0
|
||||
|
||||
# Build with numpy support?
|
||||
USE_NUMPY=0
|
||||
|
||||
# Choose which multithreading platform to use for Mantaflow preprocessing
|
||||
USE_OMP=0
|
||||
USE_TBB=1
|
||||
@@ -50,17 +56,21 @@ fi
|
||||
MANTA_BUILD_PATH=$MANTA_INSTALLATION/build_blender/
|
||||
mkdir -p $MANTA_BUILD_PATH
|
||||
cd $MANTA_BUILD_PATH
|
||||
cmake ../mantaflowgit -DGUI=OFF -DOPENMP=$USE_OMP -DTBB=$USE_TBB -DBLENDER=ON -DPREPDEBUG=ON && make -j8
|
||||
cmake ../mantaflowgit -DGUI=0 -DOPENMP=$USE_OMP -DTBB=$USE_TBB -DBLENDER=1 -DPREPDEBUG=1 -DNUMPY=$USE_NUMPY && make -j8
|
||||
|
||||
# ==================== 3) COPY MANTAFLOW FILES TO BLENDER ROOT ===========================
|
||||
|
||||
mkdir -p $BLENDER_INSTALLATION/blender/tmp/dependencies/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/dependencies/cnpy "$_"
|
||||
if [[ "$WITH_DEPENDENCIES" -eq "1" ]]; then
|
||||
mkdir -p $BLENDER_INSTALLATION/blender/tmp/dependencies/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/dependencies/cnpy "$_"
|
||||
fi
|
||||
mkdir -p $BLENDER_INSTALLATION/blender/tmp/helper/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/source/util "$_"
|
||||
mkdir -p $BLENDER_INSTALLATION/blender/tmp/helper/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/source/pwrapper "$_"
|
||||
mkdir -p $BLENDER_INSTALLATION/blender/tmp/preprocessed/ && cp -Rf $MANTA_INSTALLATION/build_blender/pp/source/. "$_"
|
||||
|
||||
# Remove some files that are not need in Blender
|
||||
rm $BLENDER_INSTALLATION/blender/tmp/dependencies/cnpy/example1.cpp
|
||||
if [[ "$WITH_DEPENDENCIES" -eq "1" ]]; then
|
||||
rm $BLENDER_INSTALLATION/blender/tmp/dependencies/cnpy/example1.cpp
|
||||
fi
|
||||
rm $BLENDER_INSTALLATION/blender/tmp/helper/pwrapper/pymain.cpp
|
||||
rm $BLENDER_INSTALLATION/blender/tmp/preprocessed/*.reg
|
||||
rm $BLENDER_INSTALLATION/blender/tmp/preprocessed/python/*.reg
|
||||
@@ -82,8 +92,13 @@ BLENDER_TMP_DEP=$BLENDER_TMP/dependencies
|
||||
BLENDER_TMP_HLP=$BLENDER_TMP/helper
|
||||
BLENDER_TMP_PP=$BLENDER_TMP/preprocessed
|
||||
|
||||
# Before moving new files, delete all existing file in the Blender repository
|
||||
rm -Rf $BLENDER_MANTA_EXTERN/dependencies $BLENDER_MANTA_EXTERN/helper $BLENDER_MANTA_EXTERN/preprocessed
|
||||
|
||||
# Move files from tmp dir to extern/
|
||||
cp -Rf $BLENDER_TMP_DEP $BLENDER_MANTA_EXTERN
|
||||
if [[ "$WITH_DEPENDENCIES" -eq "1" ]]; then
|
||||
cp -Rf $BLENDER_TMP_DEP $BLENDER_MANTA_EXTERN
|
||||
fi
|
||||
cp -Rf $BLENDER_TMP_HLP $BLENDER_MANTA_EXTERN
|
||||
cp -Rf $BLENDER_TMP_PP $BLENDER_MANTA_EXTERN
|
||||
|
||||
|
21
extern/mantaflow/dependencies/cnpy/LICENSE
vendored
21
extern/mantaflow/dependencies/cnpy/LICENSE
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) Carl Rogers, 2011
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
385
extern/mantaflow/dependencies/cnpy/cnpy.cpp
vendored
385
extern/mantaflow/dependencies/cnpy/cnpy.cpp
vendored
@@ -1,385 +0,0 @@
|
||||
// Copyright (C) 2011 Carl Rogers
|
||||
// Released under MIT License
|
||||
// license available in LICENSE file, or at http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
#include "cnpy.h"
|
||||
#include <complex>
|
||||
#include <cstdlib>
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include <iomanip>
|
||||
#include <stdint.h>
|
||||
#include <stdexcept>
|
||||
#include <regex>
|
||||
|
||||
char cnpy::BigEndianTest()
|
||||
{
|
||||
int x = 1;
|
||||
return (((char *)&x)[0]) ? '<' : '>';
|
||||
}
|
||||
|
||||
char cnpy::map_type(const std::type_info &t)
|
||||
{
|
||||
if (t == typeid(float))
|
||||
return 'f';
|
||||
if (t == typeid(double))
|
||||
return 'f';
|
||||
if (t == typeid(long double))
|
||||
return 'f';
|
||||
|
||||
if (t == typeid(int))
|
||||
return 'i';
|
||||
if (t == typeid(char))
|
||||
return 'i';
|
||||
if (t == typeid(short))
|
||||
return 'i';
|
||||
if (t == typeid(long))
|
||||
return 'i';
|
||||
if (t == typeid(long long))
|
||||
return 'i';
|
||||
|
||||
if (t == typeid(unsigned char))
|
||||
return 'u';
|
||||
if (t == typeid(unsigned short))
|
||||
return 'u';
|
||||
if (t == typeid(unsigned long))
|
||||
return 'u';
|
||||
if (t == typeid(unsigned long long))
|
||||
return 'u';
|
||||
if (t == typeid(unsigned int))
|
||||
return 'u';
|
||||
|
||||
if (t == typeid(bool))
|
||||
return 'b';
|
||||
|
||||
if (t == typeid(std::complex<float>))
|
||||
return 'c';
|
||||
if (t == typeid(std::complex<double>))
|
||||
return 'c';
|
||||
if (t == typeid(std::complex<long double>))
|
||||
return 'c';
|
||||
|
||||
else
|
||||
return '?';
|
||||
}
|
||||
|
||||
template<> std::vector<char> &cnpy::operator+=(std::vector<char> &lhs, const std::string rhs)
|
||||
{
|
||||
lhs.insert(lhs.end(), rhs.begin(), rhs.end());
|
||||
return lhs;
|
||||
}
|
||||
|
||||
template<> std::vector<char> &cnpy::operator+=(std::vector<char> &lhs, const char *rhs)
|
||||
{
|
||||
// write in little endian
|
||||
size_t len = strlen(rhs);
|
||||
lhs.reserve(len);
|
||||
for (size_t byte = 0; byte < len; byte++) {
|
||||
lhs.push_back(rhs[byte]);
|
||||
}
|
||||
return lhs;
|
||||
}
|
||||
|
||||
void cnpy::parse_npy_header(unsigned char *buffer,
|
||||
size_t &word_size,
|
||||
std::vector<size_t> &shape,
|
||||
bool &fortran_order)
|
||||
{
|
||||
// std::string magic_string(buffer,6);
|
||||
uint8_t major_version = *reinterpret_cast<uint8_t *>(buffer + 6);
|
||||
uint8_t minor_version = *reinterpret_cast<uint8_t *>(buffer + 7);
|
||||
uint16_t header_len = *reinterpret_cast<uint16_t *>(buffer + 8);
|
||||
std::string header(reinterpret_cast<char *>(buffer + 9), header_len);
|
||||
|
||||
size_t loc1, loc2;
|
||||
|
||||
// fortran order
|
||||
loc1 = header.find("fortran_order") + 16;
|
||||
fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
|
||||
|
||||
// shape
|
||||
loc1 = header.find("(");
|
||||
loc2 = header.find(")");
|
||||
|
||||
std::regex num_regex("[0-9][0-9]*");
|
||||
std::smatch sm;
|
||||
shape.clear();
|
||||
|
||||
std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
|
||||
while (std::regex_search(str_shape, sm, num_regex)) {
|
||||
shape.push_back(std::stoi(sm[0].str()));
|
||||
str_shape = sm.suffix().str();
|
||||
}
|
||||
|
||||
// endian, word size, data type
|
||||
// byte order code | stands for not applicable.
|
||||
// not sure when this applies except for byte array
|
||||
loc1 = header.find("descr") + 9;
|
||||
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
|
||||
assert(littleEndian);
|
||||
|
||||
// char type = header[loc1+1];
|
||||
// assert(type == map_type(T));
|
||||
|
||||
std::string str_ws = header.substr(loc1 + 2);
|
||||
loc2 = str_ws.find("'");
|
||||
word_size = atoi(str_ws.substr(0, loc2).c_str());
|
||||
}
|
||||
|
||||
void cnpy::parse_npy_header(FILE *fp,
|
||||
size_t &word_size,
|
||||
std::vector<size_t> &shape,
|
||||
bool &fortran_order)
|
||||
{
|
||||
char buffer[256];
|
||||
size_t res = fread(buffer, sizeof(char), 11, fp);
|
||||
if (res != 11)
|
||||
throw std::runtime_error("parse_npy_header: failed fread");
|
||||
std::string header = fgets(buffer, 256, fp);
|
||||
assert(header[header.size() - 1] == '\n');
|
||||
|
||||
size_t loc1, loc2;
|
||||
|
||||
// fortran order
|
||||
loc1 = header.find("fortran_order");
|
||||
if (loc1 == std::string::npos)
|
||||
throw std::runtime_error("parse_npy_header: failed to find header keyword: 'fortran_order'");
|
||||
loc1 += 16;
|
||||
fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
|
||||
|
||||
// shape
|
||||
loc1 = header.find("(");
|
||||
loc2 = header.find(")");
|
||||
if (loc1 == std::string::npos || loc2 == std::string::npos)
|
||||
throw std::runtime_error("parse_npy_header: failed to find header keyword: '(' or ')'");
|
||||
|
||||
std::regex num_regex("[0-9][0-9]*");
|
||||
std::smatch sm;
|
||||
shape.clear();
|
||||
|
||||
std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
|
||||
while (std::regex_search(str_shape, sm, num_regex)) {
|
||||
shape.push_back(std::stoi(sm[0].str()));
|
||||
str_shape = sm.suffix().str();
|
||||
}
|
||||
|
||||
// endian, word size, data type
|
||||
// byte order code | stands for not applicable.
|
||||
// not sure when this applies except for byte array
|
||||
loc1 = header.find("descr");
|
||||
if (loc1 == std::string::npos)
|
||||
throw std::runtime_error("parse_npy_header: failed to find header keyword: 'descr'");
|
||||
loc1 += 9;
|
||||
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
|
||||
assert(littleEndian);
|
||||
|
||||
// char type = header[loc1+1];
|
||||
// assert(type == map_type(T));
|
||||
|
||||
std::string str_ws = header.substr(loc1 + 2);
|
||||
loc2 = str_ws.find("'");
|
||||
word_size = atoi(str_ws.substr(0, loc2).c_str());
|
||||
}
|
||||
|
||||
void cnpy::parse_zip_footer(FILE *fp,
|
||||
uint16_t &nrecs,
|
||||
size_t &global_header_size,
|
||||
size_t &global_header_offset)
|
||||
{
|
||||
std::vector<char> footer(22);
|
||||
fseek(fp, -22, SEEK_END);
|
||||
size_t res = fread(&footer[0], sizeof(char), 22, fp);
|
||||
if (res != 22)
|
||||
throw std::runtime_error("parse_zip_footer: failed fread");
|
||||
|
||||
uint16_t disk_no, disk_start, nrecs_on_disk, comment_len;
|
||||
disk_no = *(uint16_t *)&footer[4];
|
||||
disk_start = *(uint16_t *)&footer[6];
|
||||
nrecs_on_disk = *(uint16_t *)&footer[8];
|
||||
nrecs = *(uint16_t *)&footer[10];
|
||||
global_header_size = *(uint32_t *)&footer[12];
|
||||
global_header_offset = *(uint32_t *)&footer[16];
|
||||
comment_len = *(uint16_t *)&footer[20];
|
||||
|
||||
assert(disk_no == 0);
|
||||
assert(disk_start == 0);
|
||||
assert(nrecs_on_disk == nrecs);
|
||||
assert(comment_len == 0);
|
||||
}
|
||||
|
||||
cnpy::NpyArray load_the_npy_file(FILE *fp)
|
||||
{
|
||||
std::vector<size_t> shape;
|
||||
size_t word_size;
|
||||
bool fortran_order;
|
||||
cnpy::parse_npy_header(fp, word_size, shape, fortran_order);
|
||||
|
||||
cnpy::NpyArray arr(shape, word_size, fortran_order);
|
||||
size_t nread = fread(arr.data<char>(), 1, arr.num_bytes(), fp);
|
||||
if (nread != arr.num_bytes())
|
||||
throw std::runtime_error("load_the_npy_file: failed fread");
|
||||
return arr;
|
||||
}
|
||||
|
||||
cnpy::NpyArray load_the_npz_array(FILE *fp, uint32_t compr_bytes, uint32_t uncompr_bytes)
|
||||
{
|
||||
|
||||
std::vector<unsigned char> buffer_compr(compr_bytes);
|
||||
std::vector<unsigned char> buffer_uncompr(uncompr_bytes);
|
||||
size_t nread = fread(&buffer_compr[0], 1, compr_bytes, fp);
|
||||
if (nread != compr_bytes)
|
||||
throw std::runtime_error("load_the_npy_file: failed fread");
|
||||
|
||||
int err;
|
||||
z_stream d_stream;
|
||||
|
||||
d_stream.zalloc = Z_NULL;
|
||||
d_stream.zfree = Z_NULL;
|
||||
d_stream.opaque = Z_NULL;
|
||||
d_stream.avail_in = 0;
|
||||
d_stream.next_in = Z_NULL;
|
||||
err = inflateInit2(&d_stream, -MAX_WBITS);
|
||||
|
||||
d_stream.avail_in = compr_bytes;
|
||||
d_stream.next_in = &buffer_compr[0];
|
||||
d_stream.avail_out = uncompr_bytes;
|
||||
d_stream.next_out = &buffer_uncompr[0];
|
||||
|
||||
err = inflate(&d_stream, Z_FINISH);
|
||||
err = inflateEnd(&d_stream);
|
||||
|
||||
std::vector<size_t> shape;
|
||||
size_t word_size;
|
||||
bool fortran_order;
|
||||
cnpy::parse_npy_header(&buffer_uncompr[0], word_size, shape, fortran_order);
|
||||
|
||||
cnpy::NpyArray array(shape, word_size, fortran_order);
|
||||
|
||||
size_t offset = uncompr_bytes - array.num_bytes();
|
||||
memcpy(array.data<unsigned char>(), &buffer_uncompr[0] + offset, array.num_bytes());
|
||||
|
||||
return array;
|
||||
}
|
||||
|
||||
cnpy::npz_t cnpy::npz_load(std::string fname)
|
||||
{
|
||||
FILE *fp = fopen(fname.c_str(), "rb");
|
||||
|
||||
if (!fp) {
|
||||
throw std::runtime_error("npz_load: Error! Unable to open file " + fname + "!");
|
||||
}
|
||||
|
||||
cnpy::npz_t arrays;
|
||||
|
||||
while (1) {
|
||||
std::vector<char> local_header(30);
|
||||
size_t headerres = fread(&local_header[0], sizeof(char), 30, fp);
|
||||
if (headerres != 30)
|
||||
throw std::runtime_error("npz_load: failed fread");
|
||||
|
||||
// if we've reached the global header, stop reading
|
||||
if (local_header[2] != 0x03 || local_header[3] != 0x04)
|
||||
break;
|
||||
|
||||
// read in the variable name
|
||||
uint16_t name_len = *(uint16_t *)&local_header[26];
|
||||
std::string varname(name_len, ' ');
|
||||
size_t vname_res = fread(&varname[0], sizeof(char), name_len, fp);
|
||||
if (vname_res != name_len)
|
||||
throw std::runtime_error("npz_load: failed fread");
|
||||
|
||||
// erase the lagging .npy
|
||||
varname.erase(varname.end() - 4, varname.end());
|
||||
|
||||
// read in the extra field
|
||||
uint16_t extra_field_len = *(uint16_t *)&local_header[28];
|
||||
if (extra_field_len > 0) {
|
||||
std::vector<char> buff(extra_field_len);
|
||||
size_t efield_res = fread(&buff[0], sizeof(char), extra_field_len, fp);
|
||||
if (efield_res != extra_field_len)
|
||||
throw std::runtime_error("npz_load: failed fread");
|
||||
}
|
||||
|
||||
uint16_t compr_method = *reinterpret_cast<uint16_t *>(&local_header[0] + 8);
|
||||
uint32_t compr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 18);
|
||||
uint32_t uncompr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 22);
|
||||
|
||||
if (compr_method == 0) {
|
||||
arrays[varname] = load_the_npy_file(fp);
|
||||
}
|
||||
else {
|
||||
arrays[varname] = load_the_npz_array(fp, compr_bytes, uncompr_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
return arrays;
|
||||
}
|
||||
|
||||
cnpy::NpyArray cnpy::npz_load(std::string fname, std::string varname)
|
||||
{
|
||||
FILE *fp = fopen(fname.c_str(), "rb");
|
||||
|
||||
if (!fp)
|
||||
throw std::runtime_error("npz_load: Unable to open file " + fname);
|
||||
|
||||
while (1) {
|
||||
std::vector<char> local_header(30);
|
||||
size_t header_res = fread(&local_header[0], sizeof(char), 30, fp);
|
||||
if (header_res != 30)
|
||||
throw std::runtime_error("npz_load: failed fread");
|
||||
|
||||
// if we've reached the global header, stop reading
|
||||
if (local_header[2] != 0x03 || local_header[3] != 0x04)
|
||||
break;
|
||||
|
||||
// read in the variable name
|
||||
uint16_t name_len = *(uint16_t *)&local_header[26];
|
||||
std::string vname(name_len, ' ');
|
||||
size_t vname_res = fread(&vname[0], sizeof(char), name_len, fp);
|
||||
if (vname_res != name_len)
|
||||
throw std::runtime_error("npz_load: failed fread");
|
||||
vname.erase(vname.end() - 4, vname.end()); // erase the lagging .npy
|
||||
|
||||
// read in the extra field
|
||||
uint16_t extra_field_len = *(uint16_t *)&local_header[28];
|
||||
fseek(fp, extra_field_len, SEEK_CUR); // skip past the extra field
|
||||
|
||||
uint16_t compr_method = *reinterpret_cast<uint16_t *>(&local_header[0] + 8);
|
||||
uint32_t compr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 18);
|
||||
uint32_t uncompr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 22);
|
||||
|
||||
if (vname == varname) {
|
||||
NpyArray array = (compr_method == 0) ? load_the_npy_file(fp) :
|
||||
load_the_npz_array(fp, compr_bytes, uncompr_bytes);
|
||||
fclose(fp);
|
||||
return array;
|
||||
}
|
||||
else {
|
||||
// skip past the data
|
||||
// uint32_t size = *(uint32_t*) &local_header[22];
|
||||
uint32_t size = *(uint32_t *)&local_header[18]; // using index 18 instead of 22 enables
|
||||
// support for compressed data
|
||||
fseek(fp, size, SEEK_CUR);
|
||||
}
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
|
||||
// if we get here, we haven't found the variable in the file
|
||||
throw std::runtime_error("npz_load: Variable name " + varname + " not found in " + fname);
|
||||
}
|
||||
|
||||
cnpy::NpyArray cnpy::npy_load(std::string fname)
|
||||
{
|
||||
|
||||
FILE *fp = fopen(fname.c_str(), "rb");
|
||||
|
||||
if (!fp)
|
||||
throw std::runtime_error("npy_load: Unable to open file " + fname);
|
||||
|
||||
NpyArray arr = load_the_npy_file(fp);
|
||||
|
||||
fclose(fp);
|
||||
return arr;
|
||||
}
|
310
extern/mantaflow/dependencies/cnpy/cnpy.h
vendored
310
extern/mantaflow/dependencies/cnpy/cnpy.h
vendored
@@ -1,310 +0,0 @@
|
||||
// Copyright (C) 2011 Carl Rogers
|
||||
// Released under MIT License
|
||||
// license available in LICENSE file, or at http://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
#ifndef LIBCNPY_H_
|
||||
#define LIBCNPY_H_
|
||||
|
||||
#include <string>
|
||||
#include <stdexcept>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
#include <cstdio>
|
||||
#include <typeinfo>
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
#include <zlib.h>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <stdint.h>
|
||||
#include <numeric>
|
||||
|
||||
namespace cnpy {
|
||||
|
||||
struct NpyArray {
|
||||
NpyArray(const std::vector<size_t> &_shape, size_t _word_size, bool _fortran_order)
|
||||
: shape(_shape), word_size(_word_size), fortran_order(_fortran_order)
|
||||
{
|
||||
num_vals = 1;
|
||||
for (size_t i = 0; i < shape.size(); i++)
|
||||
num_vals *= shape[i];
|
||||
data_holder = std::shared_ptr<std::vector<char>>(new std::vector<char>(num_vals * word_size));
|
||||
}
|
||||
|
||||
NpyArray() : shape(0), word_size(0), fortran_order(0), num_vals(0)
|
||||
{
|
||||
}
|
||||
|
||||
template<typename T> T *data()
|
||||
{
|
||||
return reinterpret_cast<T *>(&(*data_holder)[0]);
|
||||
}
|
||||
|
||||
template<typename T> const T *data() const
|
||||
{
|
||||
return reinterpret_cast<T *>(&(*data_holder)[0]);
|
||||
}
|
||||
|
||||
template<typename T> std::vector<T> as_vec() const
|
||||
{
|
||||
const T *p = data<T>();
|
||||
return std::vector<T>(p, p + num_vals);
|
||||
}
|
||||
|
||||
size_t num_bytes() const
|
||||
{
|
||||
return data_holder->size();
|
||||
}
|
||||
|
||||
std::shared_ptr<std::vector<char>> data_holder;
|
||||
std::vector<size_t> shape;
|
||||
size_t word_size;
|
||||
bool fortran_order;
|
||||
size_t num_vals;
|
||||
};
|
||||
|
||||
using npz_t = std::map<std::string, NpyArray>;
|
||||
|
||||
char BigEndianTest();
|
||||
char map_type(const std::type_info &t);
|
||||
template<typename T> std::vector<char> create_npy_header(const std::vector<size_t> &shape);
|
||||
void parse_npy_header(FILE *fp,
|
||||
size_t &word_size,
|
||||
std::vector<size_t> &shape,
|
||||
bool &fortran_order);
|
||||
void parse_npy_header(unsigned char *buffer,
|
||||
size_t &word_size,
|
||||
std::vector<size_t> &shape,
|
||||
bool &fortran_order);
|
||||
void parse_zip_footer(FILE *fp,
|
||||
uint16_t &nrecs,
|
||||
size_t &global_header_size,
|
||||
size_t &global_header_offset);
|
||||
npz_t npz_load(std::string fname);
|
||||
NpyArray npz_load(std::string fname, std::string varname);
|
||||
NpyArray npy_load(std::string fname);
|
||||
|
||||
template<typename T> std::vector<char> &operator+=(std::vector<char> &lhs, const T rhs)
|
||||
{
|
||||
// write in little endian
|
||||
for (size_t byte = 0; byte < sizeof(T); byte++) {
|
||||
char val = *((char *)&rhs + byte);
|
||||
lhs.push_back(val);
|
||||
}
|
||||
return lhs;
|
||||
}
|
||||
|
||||
template<> std::vector<char> &operator+=(std::vector<char> &lhs, const std::string rhs);
|
||||
template<> std::vector<char> &operator+=(std::vector<char> &lhs, const char *rhs);
|
||||
|
||||
template<typename T>
|
||||
void npy_save(std::string fname,
|
||||
const T *data,
|
||||
const std::vector<size_t> shape,
|
||||
std::string mode = "w")
|
||||
{
|
||||
FILE *fp = NULL;
|
||||
std::vector<size_t> true_data_shape; // if appending, the shape of existing + new data
|
||||
|
||||
if (mode == "a")
|
||||
fp = fopen(fname.c_str(), "r+b");
|
||||
|
||||
if (fp) {
|
||||
// file exists. we need to append to it. read the header, modify the array size
|
||||
size_t word_size;
|
||||
bool fortran_order;
|
||||
parse_npy_header(fp, word_size, true_data_shape, fortran_order);
|
||||
assert(!fortran_order);
|
||||
|
||||
if (word_size != sizeof(T)) {
|
||||
std::cout << "libnpy error: " << fname << " has word size " << word_size
|
||||
<< " but npy_save appending data sized " << sizeof(T) << "\n";
|
||||
assert(word_size == sizeof(T));
|
||||
}
|
||||
if (true_data_shape.size() != shape.size()) {
|
||||
std::cout << "libnpy error: npy_save attempting to append misdimensioned data to " << fname
|
||||
<< "\n";
|
||||
assert(true_data_shape.size() != shape.size());
|
||||
}
|
||||
|
||||
for (size_t i = 1; i < shape.size(); i++) {
|
||||
if (shape[i] != true_data_shape[i]) {
|
||||
std::cout << "libnpy error: npy_save attempting to append misshaped data to " << fname
|
||||
<< "\n";
|
||||
assert(shape[i] == true_data_shape[i]);
|
||||
}
|
||||
}
|
||||
true_data_shape[0] += shape[0];
|
||||
}
|
||||
else {
|
||||
fp = fopen(fname.c_str(), "wb");
|
||||
true_data_shape = shape;
|
||||
}
|
||||
|
||||
std::vector<char> header = create_npy_header<T>(true_data_shape);
|
||||
size_t nels = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<size_t>());
|
||||
|
||||
fseek(fp, 0, SEEK_SET);
|
||||
fwrite(&header[0], sizeof(char), header.size(), fp);
|
||||
fseek(fp, 0, SEEK_END);
|
||||
fwrite(data, sizeof(T), nels, fp);
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void npz_save(std::string zipname,
|
||||
std::string fname,
|
||||
const T *data,
|
||||
const std::vector<size_t> &shape,
|
||||
std::string mode = "w")
|
||||
{
|
||||
// first, append a .npy to the fname
|
||||
fname += ".npy";
|
||||
|
||||
// now, on with the show
|
||||
FILE *fp = NULL;
|
||||
uint16_t nrecs = 0;
|
||||
size_t global_header_offset = 0;
|
||||
std::vector<char> global_header;
|
||||
|
||||
if (mode == "a")
|
||||
fp = fopen(zipname.c_str(), "r+b");
|
||||
|
||||
if (fp) {
|
||||
// zip file exists. we need to add a new npy file to it.
|
||||
// first read the footer. this gives us the offset and size of the global header
|
||||
// then read and store the global header.
|
||||
// below, we will write the the new data at the start of the global header then append the
|
||||
// global header and footer below it
|
||||
size_t global_header_size;
|
||||
parse_zip_footer(fp, nrecs, global_header_size, global_header_offset);
|
||||
fseek(fp, global_header_offset, SEEK_SET);
|
||||
global_header.resize(global_header_size);
|
||||
size_t res = fread(&global_header[0], sizeof(char), global_header_size, fp);
|
||||
if (res != global_header_size) {
|
||||
throw std::runtime_error("npz_save: header read error while adding to existing zip");
|
||||
}
|
||||
fseek(fp, global_header_offset, SEEK_SET);
|
||||
}
|
||||
else {
|
||||
fp = fopen(zipname.c_str(), "wb");
|
||||
}
|
||||
|
||||
std::vector<char> npy_header = create_npy_header<T>(shape);
|
||||
|
||||
size_t nels = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<size_t>());
|
||||
size_t nbytes = nels * sizeof(T) + npy_header.size();
|
||||
|
||||
// get the CRC of the data to be added
|
||||
uint32_t crc = crc32(0L, (uint8_t *)&npy_header[0], npy_header.size());
|
||||
crc = crc32(crc, (uint8_t *)data, nels * sizeof(T));
|
||||
|
||||
// build the local header
|
||||
std::vector<char> local_header;
|
||||
local_header += "PK"; // first part of sig
|
||||
local_header += (uint16_t)0x0403; // second part of sig
|
||||
local_header += (uint16_t)20; // min version to extract
|
||||
local_header += (uint16_t)0; // general purpose bit flag
|
||||
local_header += (uint16_t)0; // compression method
|
||||
local_header += (uint16_t)0; // file last mod time
|
||||
local_header += (uint16_t)0; // file last mod date
|
||||
local_header += (uint32_t)crc; // crc
|
||||
local_header += (uint32_t)nbytes; // compressed size
|
||||
local_header += (uint32_t)nbytes; // uncompressed size
|
||||
local_header += (uint16_t)fname.size(); // fname length
|
||||
local_header += (uint16_t)0; // extra field length
|
||||
local_header += fname;
|
||||
|
||||
// build global header
|
||||
global_header += "PK"; // first part of sig
|
||||
global_header += (uint16_t)0x0201; // second part of sig
|
||||
global_header += (uint16_t)20; // version made by
|
||||
global_header.insert(global_header.end(), local_header.begin() + 4, local_header.begin() + 30);
|
||||
global_header += (uint16_t)0; // file comment length
|
||||
global_header += (uint16_t)0; // disk number where file starts
|
||||
global_header += (uint16_t)0; // internal file attributes
|
||||
global_header += (uint32_t)0; // external file attributes
|
||||
global_header += (uint32_t)
|
||||
global_header_offset; // relative offset of local file header, since it begins where the
|
||||
// global header used to begin
|
||||
global_header += fname;
|
||||
|
||||
// build footer
|
||||
std::vector<char> footer;
|
||||
footer += "PK"; // first part of sig
|
||||
footer += (uint16_t)0x0605; // second part of sig
|
||||
footer += (uint16_t)0; // number of this disk
|
||||
footer += (uint16_t)0; // disk where footer starts
|
||||
footer += (uint16_t)(nrecs + 1); // number of records on this disk
|
||||
footer += (uint16_t)(nrecs + 1); // total number of records
|
||||
footer += (uint32_t)global_header.size(); // nbytes of global headers
|
||||
footer += (uint32_t)(global_header_offset + nbytes +
|
||||
local_header.size()); // offset of start of global headers, since global
|
||||
// header now starts after newly written array
|
||||
footer += (uint16_t)0; // zip file comment length
|
||||
|
||||
// write everything
|
||||
fwrite(&local_header[0], sizeof(char), local_header.size(), fp);
|
||||
fwrite(&npy_header[0], sizeof(char), npy_header.size(), fp);
|
||||
fwrite(data, sizeof(T), nels, fp);
|
||||
fwrite(&global_header[0], sizeof(char), global_header.size(), fp);
|
||||
fwrite(&footer[0], sizeof(char), footer.size(), fp);
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void npy_save(std::string fname, const std::vector<T> data, std::string mode = "w")
|
||||
{
|
||||
std::vector<size_t> shape;
|
||||
shape.push_back(data.size());
|
||||
npy_save(fname, &data[0], shape, mode);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void npz_save(std::string zipname,
|
||||
std::string fname,
|
||||
const std::vector<T> data,
|
||||
std::string mode = "w")
|
||||
{
|
||||
std::vector<size_t> shape;
|
||||
shape.push_back(data.size());
|
||||
npz_save(zipname, fname, &data[0], shape, mode);
|
||||
}
|
||||
|
||||
template<typename T> std::vector<char> create_npy_header(const std::vector<size_t> &shape)
|
||||
{
|
||||
|
||||
std::vector<char> dict;
|
||||
dict += "{'descr': '";
|
||||
dict += BigEndianTest();
|
||||
dict += map_type(typeid(T));
|
||||
dict += std::to_string(sizeof(T));
|
||||
dict += "', 'fortran_order': False, 'shape': (";
|
||||
dict += std::to_string(shape[0]);
|
||||
for (size_t i = 1; i < shape.size(); i++) {
|
||||
dict += ", ";
|
||||
dict += std::to_string(shape[i]);
|
||||
}
|
||||
if (shape.size() == 1)
|
||||
dict += ",";
|
||||
dict += "), }";
|
||||
// pad with spaces so that preamble+dict is modulo 16 bytes. preamble is 10 bytes. dict needs to
|
||||
// end with \n
|
||||
int remainder = 16 - (10 + dict.size()) % 16;
|
||||
dict.insert(dict.end(), remainder, ' ');
|
||||
dict.back() = '\n';
|
||||
|
||||
std::vector<char> header;
|
||||
header += (char)0x93;
|
||||
header += "NUMPY";
|
||||
header += (char)0x01; // major version of numpy format
|
||||
header += (char)0x00; // minor version of numpy format
|
||||
header += (uint16_t)dict.size();
|
||||
header.insert(header.end(), dict.begin(), dict.end());
|
||||
|
||||
return header;
|
||||
}
|
||||
|
||||
} // namespace cnpy
|
||||
|
||||
#endif
|
30
extern/mantaflow/helper/util/vectorbase.h
vendored
30
extern/mantaflow/helper/util/vectorbase.h
vendored
@@ -439,6 +439,36 @@ inline Real normSquare(const int v)
|
||||
return square(v);
|
||||
}
|
||||
|
||||
//! Compute sum of all components, allow use of int, Real too
|
||||
template<class S> inline S sum(const S v)
|
||||
{
|
||||
return v;
|
||||
}
|
||||
template<class S> inline S sum(const Vector3D<S> &v)
|
||||
{
|
||||
return v.x + v.y + v.z;
|
||||
}
|
||||
|
||||
//! Get absolute representation of vector, allow use of int, Real too
|
||||
inline Real abs(const Real v)
|
||||
{
|
||||
return std::fabs(v);
|
||||
}
|
||||
inline int abs(const int v)
|
||||
{
|
||||
return std::abs(v);
|
||||
}
|
||||
|
||||
template<class S> inline Vector3D<S> abs(const Vector3D<S> &v)
|
||||
{
|
||||
Vector3D<S> cp(v.x, v.y, v.z);
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
if (cp[i] < 0)
|
||||
cp[i] *= (-1.0);
|
||||
}
|
||||
return cp;
|
||||
}
|
||||
|
||||
//! Returns a normalized vector
|
||||
template<class S> inline Vector3D<S> getNormalized(const Vector3D<S> &v)
|
||||
{
|
||||
|
43
extern/mantaflow/preprocessed/fileio/iogrids.cpp
vendored
43
extern/mantaflow/preprocessed/fileio/iogrids.cpp
vendored
@@ -27,7 +27,10 @@ extern "C" {
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "cnpy.h"
|
||||
#if NO_CNPY != 1
|
||||
# include "cnpy.h"
|
||||
#endif
|
||||
|
||||
#include "mantaio.h"
|
||||
#include "grid.h"
|
||||
#include "vector4d.h"
|
||||
@@ -965,12 +968,16 @@ int readGrid4dUni(
|
||||
};
|
||||
void readGrid4dUniCleanup(void **fileHandle)
|
||||
{
|
||||
#if NO_ZLIB != 1
|
||||
gzFile gzf = NULL;
|
||||
if (fileHandle) {
|
||||
gzf = (gzFile)(*fileHandle);
|
||||
gzclose(gzf);
|
||||
*fileHandle = NULL;
|
||||
}
|
||||
#else
|
||||
debMsg("file format not supported without zlib", 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<class T> int writeGrid4dRaw(const string &name, Grid4d<T> *grid)
|
||||
@@ -1021,15 +1028,13 @@ template<class T> int readGrid4dRaw(const string &name, Grid4d<T> *grid)
|
||||
|
||||
template<class T> int writeGridNumpy(const string &name, Grid<T> *grid)
|
||||
{
|
||||
#if NO_ZLIB == 1
|
||||
debMsg("file format not supported without zlib", 1);
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
#if FLOATINGPOINT_PRECISION != 1
|
||||
errMsg("writeGridNumpy: Double precision not yet supported");
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
#if NO_CNPY != 1
|
||||
// find suffix to differentiate between npy <-> npz , TODO: check for actual "npy" string
|
||||
std::string::size_type idx;
|
||||
bool bUseNpz = false;
|
||||
@@ -1075,19 +1080,21 @@ template<class T> int writeGridNumpy(const string &name, Grid<T> *grid)
|
||||
cnpy::npy_save(name, &grid[0], shape, "w");
|
||||
}
|
||||
return 1;
|
||||
};
|
||||
#else
|
||||
debMsg("file format not supported without cnpy", 1);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<class T> int readGridNumpy(const string &name, Grid<T> *grid)
|
||||
{
|
||||
#if NO_ZLIB == 1
|
||||
debMsg("file format not supported without zlib", 1);
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
#if FLOATINGPOINT_PRECISION != 1
|
||||
errMsg("readGridNumpy: Double precision not yet supported");
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
#if NO_CNPY != 1
|
||||
// find suffix to differentiate between npy <-> npz
|
||||
std::string::size_type idx;
|
||||
bool bUseNpz = false;
|
||||
@@ -1144,7 +1151,11 @@ template<class T> int readGridNumpy(const string &name, Grid<T> *grid)
|
||||
gridArr.data<T>(),
|
||||
sizeof(T) * grid->getSizeX() * grid->getSizeY() * grid->getSizeZ());
|
||||
return 1;
|
||||
};
|
||||
#else
|
||||
debMsg("file format not supported without cnpy", 1);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int writeGridsNumpy(const string &name, std::vector<PbClass *> *grids)
|
||||
{
|
||||
@@ -1163,13 +1174,12 @@ void getNpzFileSize(
|
||||
const string &name, int &x, int &y, int &z, int *t = NULL, std::string *info = NULL)
|
||||
{
|
||||
x = y = z = 0;
|
||||
#if NO_ZLIB != 1
|
||||
debMsg("file format not supported without zlib", 1);
|
||||
return;
|
||||
#endif
|
||||
|
||||
#if FLOATINGPOINT_PRECISION != 1
|
||||
errMsg("getNpzFileSize: Double precision not yet supported");
|
||||
#endif
|
||||
|
||||
#if NO_CNPY != 1
|
||||
// find suffix to differentiate between npy <-> npz
|
||||
cnpy::NpyArray gridArr;
|
||||
cnpy::npz_t fNpz = cnpy::npz_load(name);
|
||||
@@ -1180,6 +1190,9 @@ void getNpzFileSize(
|
||||
x = gridArr.shape[2];
|
||||
if (t)
|
||||
(*t) = 0; // unused for now
|
||||
#else
|
||||
debMsg("file format not supported without cnpy", 1);
|
||||
#endif
|
||||
}
|
||||
Vec3 getNpzFileSize(const string &name)
|
||||
{
|
||||
|
21
extern/mantaflow/preprocessed/fileio/ioutil.cpp
vendored
21
extern/mantaflow/preprocessed/fileio/ioutil.cpp
vendored
@@ -26,17 +26,18 @@
|
||||
extern "C" {
|
||||
# include <zlib.h>
|
||||
}
|
||||
#endif
|
||||
|
||||
# if defined(WIN32) || defined(_WIN32)
|
||||
# include <windows.h>
|
||||
# include <string>
|
||||
# endif
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
# include <windows.h>
|
||||
# include <string>
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace Manta {
|
||||
|
||||
# if defined(WIN32) || defined(_WIN32)
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
static wstring stringToWstring(const char *str)
|
||||
{
|
||||
const int length_wc = MultiByteToWideChar(CP_UTF8, 0, str, strlen(str), NULL, 0);
|
||||
@@ -44,10 +45,11 @@ static wstring stringToWstring(const char *str)
|
||||
MultiByteToWideChar(CP_UTF8, 0, str, strlen(str), &strWide[0], length_wc);
|
||||
return strWide;
|
||||
}
|
||||
# endif // WIN32==1
|
||||
#endif // WIN32==1
|
||||
|
||||
void *safeGzopen(const char *filename, const char *mode)
|
||||
{
|
||||
#if NO_ZLIB != 1
|
||||
gzFile gzfile;
|
||||
|
||||
# if defined(WIN32) || defined(_WIN32)
|
||||
@@ -58,8 +60,11 @@ void *safeGzopen(const char *filename, const char *mode)
|
||||
# endif
|
||||
|
||||
return gzfile;
|
||||
}
|
||||
#else
|
||||
debMsg("safeGzopen not supported without zlib", 1);
|
||||
return nullptr;
|
||||
#endif // NO_ZLIB != 1
|
||||
}
|
||||
|
||||
#if defined(OPENVDB)
|
||||
// Convert from OpenVDB value to Manta value.
|
||||
@@ -109,4 +114,4 @@ template<> void convertTo(openvdb::Vec3s *out, Vec3 &in)
|
||||
}
|
||||
#endif // OPENVDB==1
|
||||
|
||||
} // namespace
|
||||
} // namespace Manta
|
||||
|
2
extern/mantaflow/preprocessed/gitinfo.h
vendored
2
extern/mantaflow/preprocessed/gitinfo.h
vendored
@@ -1,3 +1,3 @@
|
||||
|
||||
|
||||
#define MANTA_GIT_VERSION "commit 7395d36e3f504edbdabe34b30edc855b422c7baa"
|
||||
#define MANTA_GIT_VERSION "commit 3370c2014ad7192041cb4fbed19ed74ae9725fb5"
|
||||
|
38
extern/mantaflow/preprocessed/mesh.cpp
vendored
38
extern/mantaflow/preprocessed/mesh.cpp
vendored
@@ -213,34 +213,36 @@ Mesh &Mesh::operator=(const Mesh &o)
|
||||
return *this;
|
||||
}
|
||||
|
||||
void Mesh::load(string name, bool append)
|
||||
int Mesh::load(string name, bool append)
|
||||
{
|
||||
if (name.find_last_of('.') == string::npos)
|
||||
errMsg("file '" + name + "' does not have an extension");
|
||||
string ext = name.substr(name.find_last_of('.'));
|
||||
if (ext == ".gz") // assume bobj gz
|
||||
readBobjFile(name, this, append);
|
||||
return readBobjFile(name, this, append);
|
||||
else if (ext == ".obj")
|
||||
readObjFile(name, this, append);
|
||||
return readObjFile(name, this, append);
|
||||
else
|
||||
errMsg("file '" + name + "' filetype not supported");
|
||||
|
||||
// dont always rebuild...
|
||||
// rebuildCorners();
|
||||
// rebuildLookup();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Mesh::save(string name)
|
||||
int Mesh::save(string name)
|
||||
{
|
||||
if (name.find_last_of('.') == string::npos)
|
||||
errMsg("file '" + name + "' does not have an extension");
|
||||
string ext = name.substr(name.find_last_of('.'));
|
||||
if (ext == ".obj")
|
||||
writeObjFile(name, this);
|
||||
return writeObjFile(name, this);
|
||||
else if (ext == ".gz")
|
||||
writeBobjFile(name, this);
|
||||
return writeBobjFile(name, this);
|
||||
else
|
||||
errMsg("file '" + name + "' filetype not supported");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Mesh::fromShape(Shape &shape, bool append)
|
||||
@@ -1339,8 +1341,8 @@ template<class T> void MeshDataImpl<T>::setSource(Grid<T> *grid, bool isMAC)
|
||||
{
|
||||
mpGridSource = grid;
|
||||
mGridSourceMAC = isMAC;
|
||||
if (isMAC)
|
||||
assertMsg(dynamic_cast<MACGrid *>(grid) != NULL, "Given grid is not a valid MAC grid");
|
||||
if (grid && isMAC)
|
||||
assertMsg(grid->getType() & GridBase::TypeMAC, "Given grid is not a valid MAC grid");
|
||||
}
|
||||
|
||||
template<class T> void MeshDataImpl<T>::initNewValue(IndexInt idx, Vec3 pos)
|
||||
@@ -1371,38 +1373,40 @@ void Mesh::updateDataFields()
|
||||
for (size_t i = 0; i < mNodes.size(); ++i) {
|
||||
Vec3 pos = mNodes[i].pos;
|
||||
for (IndexInt md = 0; md < (IndexInt)mMdataReal.size(); ++md)
|
||||
mMdataReal[md]->initNewValue(i, mNodes[i].pos);
|
||||
mMdataReal[md]->initNewValue(i, pos);
|
||||
for (IndexInt md = 0; md < (IndexInt)mMdataVec3.size(); ++md)
|
||||
mMdataVec3[md]->initNewValue(i, mNodes[i].pos);
|
||||
mMdataVec3[md]->initNewValue(i, pos);
|
||||
for (IndexInt md = 0; md < (IndexInt)mMdataInt.size(); ++md)
|
||||
mMdataInt[md]->initNewValue(i, mNodes[i].pos);
|
||||
mMdataInt[md]->initNewValue(i, pos);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T> void MeshDataImpl<T>::load(string name)
|
||||
template<typename T> int MeshDataImpl<T>::load(string name)
|
||||
{
|
||||
if (name.find_last_of('.') == string::npos)
|
||||
errMsg("file '" + name + "' does not have an extension");
|
||||
string ext = name.substr(name.find_last_of('.'));
|
||||
if (ext == ".uni")
|
||||
readMdataUni<T>(name, this);
|
||||
return readMdataUni<T>(name, this);
|
||||
else if (ext == ".raw") // raw = uni for now
|
||||
readMdataUni<T>(name, this);
|
||||
return readMdataUni<T>(name, this);
|
||||
else
|
||||
errMsg("mesh data '" + name + "' filetype not supported for loading");
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<typename T> void MeshDataImpl<T>::save(string name)
|
||||
template<typename T> int MeshDataImpl<T>::save(string name)
|
||||
{
|
||||
if (name.find_last_of('.') == string::npos)
|
||||
errMsg("file '" + name + "' does not have an extension");
|
||||
string ext = name.substr(name.find_last_of('.'));
|
||||
if (ext == ".uni")
|
||||
writeMdataUni<T>(name, this);
|
||||
return writeMdataUni<T>(name, this);
|
||||
else if (ext == ".raw") // raw = uni for now
|
||||
writeMdataUni<T>(name, this);
|
||||
return writeMdataUni<T>(name, this);
|
||||
else
|
||||
errMsg("mesh data '" + name + "' filetype not supported for saving");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// specializations
|
||||
|
127
extern/mantaflow/preprocessed/mesh.h
vendored
127
extern/mantaflow/preprocessed/mesh.h
vendored
@@ -240,35 +240,8 @@ class Mesh : public PbClass {
|
||||
}
|
||||
}
|
||||
|
||||
void load(std::string name, bool append = false);
|
||||
static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
|
||||
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
|
||||
pbPreparePlugin(pbo->getParent(), "Mesh::load", !noTiming);
|
||||
PyObject *_retval = 0;
|
||||
{
|
||||
ArgLocker _lock;
|
||||
std::string name = _args.get<std::string>("name", 0, &_lock);
|
||||
bool append = _args.getOpt<bool>("append", 1, false, &_lock);
|
||||
pbo->_args.copy(_args);
|
||||
_retval = getPyNone();
|
||||
pbo->load(name, append);
|
||||
pbo->_args.check();
|
||||
}
|
||||
pbFinalizePlugin(pbo->getParent(), "Mesh::load", !noTiming);
|
||||
return _retval;
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
pbSetError("Mesh::load", e.what());
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void fromShape(Shape &shape, bool append = false);
|
||||
static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -294,34 +267,8 @@ class Mesh : public PbClass {
|
||||
}
|
||||
}
|
||||
|
||||
void save(std::string name);
|
||||
static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
|
||||
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
|
||||
pbPreparePlugin(pbo->getParent(), "Mesh::save", !noTiming);
|
||||
PyObject *_retval = 0;
|
||||
{
|
||||
ArgLocker _lock;
|
||||
std::string name = _args.get<std::string>("name", 0, &_lock);
|
||||
pbo->_args.copy(_args);
|
||||
_retval = getPyNone();
|
||||
pbo->save(name);
|
||||
pbo->_args.check();
|
||||
}
|
||||
pbFinalizePlugin(pbo->getParent(), "Mesh::save", !noTiming);
|
||||
return _retval;
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
pbSetError("Mesh::save", e.what());
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void advectInGrid(FlagGrid &flags, MACGrid &vel, int integrationMode);
|
||||
static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -349,7 +296,7 @@ class Mesh : public PbClass {
|
||||
}
|
||||
|
||||
void scale(Vec3 s);
|
||||
static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -375,7 +322,7 @@ class Mesh : public PbClass {
|
||||
}
|
||||
|
||||
void offset(Vec3 o);
|
||||
static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -401,7 +348,7 @@ class Mesh : public PbClass {
|
||||
}
|
||||
|
||||
void rotate(Vec3 thetas);
|
||||
static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -427,7 +374,7 @@ class Mesh : public PbClass {
|
||||
}
|
||||
|
||||
void computeVelocity(Mesh &oldMesh, MACGrid &vel);
|
||||
static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -453,6 +400,58 @@ class Mesh : public PbClass {
|
||||
}
|
||||
}
|
||||
|
||||
//! file io
|
||||
int load(std::string name, bool append = false);
|
||||
static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
|
||||
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
|
||||
pbPreparePlugin(pbo->getParent(), "Mesh::load", !noTiming);
|
||||
PyObject *_retval = 0;
|
||||
{
|
||||
ArgLocker _lock;
|
||||
std::string name = _args.get<std::string>("name", 0, &_lock);
|
||||
bool append = _args.getOpt<bool>("append", 1, false, &_lock);
|
||||
pbo->_args.copy(_args);
|
||||
_retval = toPy(pbo->load(name, append));
|
||||
pbo->_args.check();
|
||||
}
|
||||
pbFinalizePlugin(pbo->getParent(), "Mesh::load", !noTiming);
|
||||
return _retval;
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
pbSetError("Mesh::load", e.what());
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int save(std::string name);
|
||||
static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
|
||||
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
|
||||
pbPreparePlugin(pbo->getParent(), "Mesh::save", !noTiming);
|
||||
PyObject *_retval = 0;
|
||||
{
|
||||
ArgLocker _lock;
|
||||
std::string name = _args.get<std::string>("name", 0, &_lock);
|
||||
pbo->_args.copy(_args);
|
||||
_retval = toPy(pbo->save(name));
|
||||
pbo->_args.check();
|
||||
}
|
||||
pbFinalizePlugin(pbo->getParent(), "Mesh::save", !noTiming);
|
||||
return _retval;
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
pbSetError("Mesh::save", e.what());
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void computeLevelset(LevelsetGrid &levelset, Real sigma, Real cutoff = -1.);
|
||||
static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
@@ -1564,7 +1563,7 @@ template<class T> class MeshDataImpl : public MeshDataBase {
|
||||
}
|
||||
|
||||
//! file io
|
||||
void save(const std::string name);
|
||||
int save(const std::string name);
|
||||
static PyObject *_W_41(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
@@ -1577,8 +1576,7 @@ template<class T> class MeshDataImpl : public MeshDataBase {
|
||||
ArgLocker _lock;
|
||||
const std::string name = _args.get<std::string>("name", 0, &_lock);
|
||||
pbo->_args.copy(_args);
|
||||
_retval = getPyNone();
|
||||
pbo->save(name);
|
||||
_retval = toPy(pbo->save(name));
|
||||
pbo->_args.check();
|
||||
}
|
||||
pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::save", !noTiming);
|
||||
@@ -1590,7 +1588,7 @@ template<class T> class MeshDataImpl : public MeshDataBase {
|
||||
}
|
||||
}
|
||||
|
||||
void load(const std::string name);
|
||||
int load(const std::string name);
|
||||
static PyObject *_W_42(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
@@ -1603,8 +1601,7 @@ template<class T> class MeshDataImpl : public MeshDataBase {
|
||||
ArgLocker _lock;
|
||||
const std::string name = _args.get<std::string>("name", 0, &_lock);
|
||||
pbo->_args.copy(_args);
|
||||
_retval = getPyNone();
|
||||
pbo->load(name);
|
||||
_retval = toPy(pbo->load(name));
|
||||
pbo->_args.check();
|
||||
}
|
||||
pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::load", !noTiming);
|
||||
|
16
extern/mantaflow/preprocessed/mesh.h.reg.cpp
vendored
16
extern/mantaflow/preprocessed/mesh.h.reg.cpp
vendored
@@ -10,14 +10,14 @@ static const Pb::Register _R_12("Mesh", "Mesh", "PbClass");
|
||||
template<> const char *Namify<Mesh>::S = "Mesh";
|
||||
static const Pb::Register _R_13("Mesh", "Mesh", Mesh::_W_0);
|
||||
static const Pb::Register _R_14("Mesh", "clear", Mesh::_W_1);
|
||||
static const Pb::Register _R_15("Mesh", "load", Mesh::_W_2);
|
||||
static const Pb::Register _R_16("Mesh", "fromShape", Mesh::_W_3);
|
||||
static const Pb::Register _R_17("Mesh", "save", Mesh::_W_4);
|
||||
static const Pb::Register _R_18("Mesh", "advectInGrid", Mesh::_W_5);
|
||||
static const Pb::Register _R_19("Mesh", "scale", Mesh::_W_6);
|
||||
static const Pb::Register _R_20("Mesh", "offset", Mesh::_W_7);
|
||||
static const Pb::Register _R_21("Mesh", "rotate", Mesh::_W_8);
|
||||
static const Pb::Register _R_22("Mesh", "computeVelocity", Mesh::_W_9);
|
||||
static const Pb::Register _R_15("Mesh", "fromShape", Mesh::_W_2);
|
||||
static const Pb::Register _R_16("Mesh", "advectInGrid", Mesh::_W_3);
|
||||
static const Pb::Register _R_17("Mesh", "scale", Mesh::_W_4);
|
||||
static const Pb::Register _R_18("Mesh", "offset", Mesh::_W_5);
|
||||
static const Pb::Register _R_19("Mesh", "rotate", Mesh::_W_6);
|
||||
static const Pb::Register _R_20("Mesh", "computeVelocity", Mesh::_W_7);
|
||||
static const Pb::Register _R_21("Mesh", "load", Mesh::_W_8);
|
||||
static const Pb::Register _R_22("Mesh", "save", Mesh::_W_9);
|
||||
static const Pb::Register _R_23("Mesh", "computeLevelset", Mesh::_W_10);
|
||||
static const Pb::Register _R_24("Mesh", "getLevelset", Mesh::_W_11);
|
||||
static const Pb::Register _R_25("Mesh", "applyMeshToGrid", Mesh::_W_12);
|
||||
|
4
extern/mantaflow/preprocessed/particle.cpp
vendored
4
extern/mantaflow/preprocessed/particle.cpp
vendored
@@ -359,8 +359,8 @@ template<class T> void ParticleDataImpl<T>::setSource(Grid<T> *grid, bool isMAC)
|
||||
{
|
||||
mpGridSource = grid;
|
||||
mGridSourceMAC = isMAC;
|
||||
if (isMAC)
|
||||
assertMsg(dynamic_cast<MACGrid *>(grid) != NULL, "Given grid is not a valid MAC grid");
|
||||
if (grid && isMAC)
|
||||
assertMsg(grid->getType() & GridBase::TypeMAC, "Given grid is not a valid MAC grid");
|
||||
}
|
||||
|
||||
template<class T> void ParticleDataImpl<T>::initNewValue(IndexInt idx, Vec3 pos)
|
||||
|
144
extern/mantaflow/preprocessed/plugin/initplugins.cpp
vendored
144
extern/mantaflow/preprocessed/plugin/initplugins.cpp
vendored
@@ -1479,48 +1479,24 @@ void PbRegister_addTestParts()
|
||||
}
|
||||
|
||||
//! calculate the difference between two pdata fields (note - slow!, not parallelized)
|
||||
|
||||
Real pdataMaxDiff(const ParticleDataBase *a, const ParticleDataBase *b)
|
||||
template<class T> Real getPdataMaxDiff(const ParticleDataImpl<T> *a, const ParticleDataImpl<T> *b)
|
||||
{
|
||||
double maxVal = 0.;
|
||||
// debMsg(" PD "<< a->getType()<<" as"<<a->getSizeSlow()<<" bs"<<b->getSizeSlow() , 1);
|
||||
assertMsg(a->getType() == b->getType(), "pdataMaxDiff problem - different pdata types!");
|
||||
assertMsg(a->getSizeSlow() == b->getSizeSlow(), "pdataMaxDiff problem - different pdata sizes!");
|
||||
|
||||
if (a->getType() & ParticleDataBase::TypeReal) {
|
||||
const ParticleDataImpl<Real> &av = *dynamic_cast<const ParticleDataImpl<Real> *>(a);
|
||||
const ParticleDataImpl<Real> &bv = *dynamic_cast<const ParticleDataImpl<Real> *>(b);
|
||||
FOR_PARTS(av)
|
||||
{
|
||||
maxVal = std::max(maxVal, (double)fabs(av[idx] - bv[idx]));
|
||||
}
|
||||
Real maxVal = 0.;
|
||||
FOR_PARTS(*a)
|
||||
{
|
||||
T diff = a->get(idx) - b->get(idx);
|
||||
Real s = (Real)sum(abs(diff));
|
||||
maxVal = std::max(maxVal, s);
|
||||
}
|
||||
else if (a->getType() & ParticleDataBase::TypeInt) {
|
||||
const ParticleDataImpl<int> &av = *dynamic_cast<const ParticleDataImpl<int> *>(a);
|
||||
const ParticleDataImpl<int> &bv = *dynamic_cast<const ParticleDataImpl<int> *>(b);
|
||||
FOR_PARTS(av)
|
||||
{
|
||||
maxVal = std::max(maxVal, (double)fabs((double)av[idx] - bv[idx]));
|
||||
}
|
||||
}
|
||||
else if (a->getType() & ParticleDataBase::TypeVec3) {
|
||||
const ParticleDataImpl<Vec3> &av = *dynamic_cast<const ParticleDataImpl<Vec3> *>(a);
|
||||
const ParticleDataImpl<Vec3> &bv = *dynamic_cast<const ParticleDataImpl<Vec3> *>(b);
|
||||
FOR_PARTS(av)
|
||||
{
|
||||
double d = 0.;
|
||||
for (int c = 0; c < 3; ++c) {
|
||||
d += fabs((double)av[idx][c] - (double)bv[idx][c]);
|
||||
}
|
||||
maxVal = std::max(maxVal, d);
|
||||
}
|
||||
}
|
||||
else {
|
||||
errMsg("pdataMaxDiff: Grid Type is not supported (only Real, Vec3, int)");
|
||||
}
|
||||
|
||||
return maxVal;
|
||||
}
|
||||
Real pdataMaxDiff(const ParticleDataImpl<Real> *a, const ParticleDataImpl<Real> *b)
|
||||
{
|
||||
return getPdataMaxDiff(a, b);
|
||||
}
|
||||
static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
@@ -1531,8 +1507,8 @@ static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
PyObject *_retval = 0;
|
||||
{
|
||||
ArgLocker _lock;
|
||||
const ParticleDataBase *a = _args.getPtr<ParticleDataBase>("a", 0, &_lock);
|
||||
const ParticleDataBase *b = _args.getPtr<ParticleDataBase>("b", 1, &_lock);
|
||||
const ParticleDataImpl<Real> *a = _args.getPtr<ParticleDataImpl<Real>>("a", 0, &_lock);
|
||||
const ParticleDataImpl<Real> *b = _args.getPtr<ParticleDataImpl<Real>>("b", 1, &_lock);
|
||||
_retval = toPy(pdataMaxDiff(a, b));
|
||||
_args.check();
|
||||
}
|
||||
@@ -1552,6 +1528,76 @@ void PbRegister_pdataMaxDiff()
|
||||
}
|
||||
}
|
||||
|
||||
Real pdataMaxDiffInt(const ParticleDataImpl<int> *a, const ParticleDataImpl<int> *b)
|
||||
{
|
||||
return getPdataMaxDiff(a, b);
|
||||
}
|
||||
static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
FluidSolver *parent = _args.obtainParent();
|
||||
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
|
||||
pbPreparePlugin(parent, "pdataMaxDiffInt", !noTiming);
|
||||
PyObject *_retval = 0;
|
||||
{
|
||||
ArgLocker _lock;
|
||||
const ParticleDataImpl<int> *a = _args.getPtr<ParticleDataImpl<int>>("a", 0, &_lock);
|
||||
const ParticleDataImpl<int> *b = _args.getPtr<ParticleDataImpl<int>>("b", 1, &_lock);
|
||||
_retval = toPy(pdataMaxDiffInt(a, b));
|
||||
_args.check();
|
||||
}
|
||||
pbFinalizePlugin(parent, "pdataMaxDiffInt", !noTiming);
|
||||
return _retval;
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
pbSetError("pdataMaxDiffInt", e.what());
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static const Pb::Register _RP_pdataMaxDiffInt("", "pdataMaxDiffInt", _W_16);
|
||||
extern "C" {
|
||||
void PbRegister_pdataMaxDiffInt()
|
||||
{
|
||||
KEEP_UNUSED(_RP_pdataMaxDiffInt);
|
||||
}
|
||||
}
|
||||
|
||||
Real pdataMaxDiffVec3(const ParticleDataImpl<Vec3> *a, const ParticleDataImpl<Vec3> *b)
|
||||
{
|
||||
return getPdataMaxDiff(a, b);
|
||||
}
|
||||
static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
FluidSolver *parent = _args.obtainParent();
|
||||
bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
|
||||
pbPreparePlugin(parent, "pdataMaxDiffVec3", !noTiming);
|
||||
PyObject *_retval = 0;
|
||||
{
|
||||
ArgLocker _lock;
|
||||
const ParticleDataImpl<Vec3> *a = _args.getPtr<ParticleDataImpl<Vec3>>("a", 0, &_lock);
|
||||
const ParticleDataImpl<Vec3> *b = _args.getPtr<ParticleDataImpl<Vec3>>("b", 1, &_lock);
|
||||
_retval = toPy(pdataMaxDiffVec3(a, b));
|
||||
_args.check();
|
||||
}
|
||||
pbFinalizePlugin(parent, "pdataMaxDiffVec3", !noTiming);
|
||||
return _retval;
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
pbSetError("pdataMaxDiffVec3", e.what());
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static const Pb::Register _RP_pdataMaxDiffVec3("", "pdataMaxDiffVec3", _W_17);
|
||||
extern "C" {
|
||||
void PbRegister_pdataMaxDiffVec3()
|
||||
{
|
||||
KEEP_UNUSED(_RP_pdataMaxDiffVec3);
|
||||
}
|
||||
}
|
||||
|
||||
//! calculate center of mass given density grid, for re-centering
|
||||
|
||||
Vec3 calcCenterOfMass(const Grid<Real> &density)
|
||||
@@ -1567,7 +1613,7 @@ Vec3 calcCenterOfMass(const Grid<Real> &density)
|
||||
p /= w;
|
||||
return p;
|
||||
}
|
||||
static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -1589,7 +1635,7 @@ static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static const Pb::Register _RP_calcCenterOfMass("", "calcCenterOfMass", _W_16);
|
||||
static const Pb::Register _RP_calcCenterOfMass("", "calcCenterOfMass", _W_18);
|
||||
extern "C" {
|
||||
void PbRegister_calcCenterOfMass()
|
||||
{
|
||||
@@ -1789,7 +1835,7 @@ void updateFractions(const FlagGrid &flags,
|
||||
fractions.setConst(Vec3(0.));
|
||||
KnUpdateFractions(flags, phiObs, fractions, boundaryWidth, fracThreshold);
|
||||
}
|
||||
static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -1816,7 +1862,7 @@ static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static const Pb::Register _RP_updateFractions("", "updateFractions", _W_17);
|
||||
static const Pb::Register _RP_updateFractions("", "updateFractions", _W_19);
|
||||
extern "C" {
|
||||
void PbRegister_updateFractions()
|
||||
{
|
||||
@@ -1968,7 +2014,7 @@ void setObstacleFlags(FlagGrid &flags,
|
||||
{
|
||||
KnUpdateFlagsObs(flags, fractions, phiObs, phiOut, phiIn, boundaryWidth);
|
||||
}
|
||||
static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -1996,7 +2042,7 @@ static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static const Pb::Register _RP_setObstacleFlags("", "setObstacleFlags", _W_18);
|
||||
static const Pb::Register _RP_setObstacleFlags("", "setObstacleFlags", _W_20);
|
||||
extern "C" {
|
||||
void PbRegister_setObstacleFlags()
|
||||
{
|
||||
@@ -2113,7 +2159,7 @@ void initVortexVelocity(const Grid<Real> &phiObs,
|
||||
{
|
||||
kninitVortexVelocity(phiObs, vel, center, radius);
|
||||
}
|
||||
static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_21(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -2139,7 +2185,7 @@ static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static const Pb::Register _RP_initVortexVelocity("", "initVortexVelocity", _W_19);
|
||||
static const Pb::Register _RP_initVortexVelocity("", "initVortexVelocity", _W_21);
|
||||
extern "C" {
|
||||
void PbRegister_initVortexVelocity()
|
||||
{
|
||||
@@ -2465,7 +2511,7 @@ int blurMacGrid(MACGrid &oG, MACGrid &tG, float si)
|
||||
}
|
||||
return tmGK.mDim;
|
||||
}
|
||||
static PyObject *_W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_22(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -2489,7 +2535,7 @@ static PyObject *_W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static const Pb::Register _RP_blurMacGrid("", "blurMacGrid", _W_20);
|
||||
static const Pb::Register _RP_blurMacGrid("", "blurMacGrid", _W_22);
|
||||
extern "C" {
|
||||
void PbRegister_blurMacGrid()
|
||||
{
|
||||
@@ -2501,7 +2547,7 @@ int blurRealGrid(Grid<Real> &oG, Grid<Real> &tG, float si)
|
||||
{
|
||||
return blurGrid<Real>(oG, tG, si);
|
||||
}
|
||||
static PyObject *_W_21(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
static PyObject *_W_23(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
{
|
||||
try {
|
||||
PbArgs _args(_linargs, _kwds);
|
||||
@@ -2525,7 +2571,7 @@ static PyObject *_W_21(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
static const Pb::Register _RP_blurRealGrid("", "blurRealGrid", _W_21);
|
||||
static const Pb::Register _RP_blurRealGrid("", "blurRealGrid", _W_23);
|
||||
extern "C" {
|
||||
void PbRegister_blurRealGrid()
|
||||
{
|
||||
|
@@ -111,6 +111,8 @@ extern void PbRegister_checkSymmetryVec3();
|
||||
extern void PbRegister_projectPpmFull();
|
||||
extern void PbRegister_addTestParts();
|
||||
extern void PbRegister_pdataMaxDiff();
|
||||
extern void PbRegister_pdataMaxDiffInt();
|
||||
extern void PbRegister_pdataMaxDiffVec3();
|
||||
extern void PbRegister_calcCenterOfMass();
|
||||
extern void PbRegister_updateFractions();
|
||||
extern void PbRegister_setObstacleFlags();
|
||||
@@ -306,6 +308,8 @@ void MantaEnsureRegistration()
|
||||
PbRegister_projectPpmFull();
|
||||
PbRegister_addTestParts();
|
||||
PbRegister_pdataMaxDiff();
|
||||
PbRegister_pdataMaxDiffInt();
|
||||
PbRegister_pdataMaxDiffVec3();
|
||||
PbRegister_calcCenterOfMass();
|
||||
PbRegister_updateFractions();
|
||||
PbRegister_setObstacleFlags();
|
||||
|
@@ -383,11 +383,24 @@ string CUDADevice::compile_kernel(const DeviceRequestedFeatures &requested_featu
|
||||
}
|
||||
}
|
||||
|
||||
const string ptx = path_get(string_printf("lib/%s_compute_%d%d.ptx", name, major, minor));
|
||||
VLOG(1) << "Testing for pre-compiled kernel " << ptx << ".";
|
||||
if (path_exists(ptx)) {
|
||||
VLOG(1) << "Using precompiled kernel.";
|
||||
return ptx;
|
||||
/* The driver can JIT-compile PTX generated for older generations, so find the closest one. */
|
||||
int ptx_major = major, ptx_minor = minor;
|
||||
while (ptx_major >= 3) {
|
||||
const string ptx = path_get(
|
||||
string_printf("lib/%s_compute_%d%d.ptx", name, ptx_major, ptx_minor));
|
||||
VLOG(1) << "Testing for pre-compiled kernel " << ptx << ".";
|
||||
if (path_exists(ptx)) {
|
||||
VLOG(1) << "Using precompiled kernel.";
|
||||
return ptx;
|
||||
}
|
||||
|
||||
if (ptx_minor > 0) {
|
||||
ptx_minor--;
|
||||
}
|
||||
else {
|
||||
ptx_major--;
|
||||
ptx_minor = 9;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -539,7 +539,7 @@ if(WITH_CYCLES_DEVICE_OPTIX AND WITH_CYCLES_CUDA_BINARIES)
|
||||
${SRC_UTIL_HEADERS}
|
||||
COMMAND ${CUBIN_CC_ENV}
|
||||
"$<TARGET_FILE:cycles_cubin_cc>"
|
||||
-target 30
|
||||
-target 52
|
||||
-ptx
|
||||
-i ${CMAKE_CURRENT_SOURCE_DIR}/${input}
|
||||
${cuda_flags}
|
||||
@@ -563,7 +563,7 @@ if(WITH_CYCLES_DEVICE_OPTIX AND WITH_CYCLES_CUDA_BINARIES)
|
||||
COMMAND
|
||||
${CUDA_NVCC_EXECUTABLE}
|
||||
--ptx
|
||||
-arch=sm_30
|
||||
-arch=sm_52
|
||||
${cuda_flags}
|
||||
${input}
|
||||
WORKING_DIRECTORY
|
||||
|
@@ -77,7 +77,7 @@ ccl_device_inline float4 svm_image_texture_read(KernelGlobals *kg,
|
||||
return make_float4(f, f, f, 1.0f);
|
||||
}
|
||||
/* Byte */
|
||||
#ifdef cl_khr_fp16
|
||||
#ifdef __KERNEL_CL_KHR_FP16__
|
||||
/* half and half4 are optional in OpenCL */
|
||||
else if (texture_type == IMAGE_DATA_TYPE_HALF) {
|
||||
float f = tex_fetch(half, info, offset);
|
||||
|
@@ -204,8 +204,8 @@ color sky_radiance_nishita(vector dir, float nishita_data[10], string filename)
|
||||
mul;
|
||||
}
|
||||
}
|
||||
/* convert to RGB and adjust strength */
|
||||
return xyz_to_rgb(xyz[0], xyz[1], xyz[2]) * 120000.0;
|
||||
/* convert to RGB */
|
||||
return xyz_to_rgb(xyz[0], xyz[1], xyz[2]);
|
||||
}
|
||||
|
||||
shader node_sky_texture(
|
||||
|
@@ -205,8 +205,8 @@ ccl_device float3 sky_radiance_nishita(KernelGlobals *kg,
|
||||
}
|
||||
}
|
||||
|
||||
/* convert to rgb and adjust strength */
|
||||
return xyz_to_rgb(kg, xyz) * 120000.0f;
|
||||
/* convert to RGB */
|
||||
return xyz_to_rgb(kg, xyz);
|
||||
}
|
||||
|
||||
ccl_device void svm_node_tex_sky(
|
||||
|
@@ -798,7 +798,7 @@ NODE_DEFINE(SkyTextureNode)
|
||||
SOCKET_BOOLEAN(sun_disc, "Sun Disc", true);
|
||||
SOCKET_FLOAT(sun_size, "Sun Size", 0.009512f);
|
||||
SOCKET_FLOAT(sun_intensity, "Sun Intensity", 1.0f);
|
||||
SOCKET_FLOAT(sun_elevation, "Sun Elevation", M_PI_2_F);
|
||||
SOCKET_FLOAT(sun_elevation, "Sun Elevation", 15.0f * M_PI_F / 180.0f);
|
||||
SOCKET_FLOAT(sun_rotation, "Sun Rotation", 0.0f);
|
||||
SOCKET_FLOAT(altitude, "Altitude", 1.0f);
|
||||
SOCKET_FLOAT(air_density, "Air", 1.0f);
|
||||
|
@@ -100,6 +100,7 @@ set(SRC_HEADERS
|
||||
util_simd.h
|
||||
util_avxf.h
|
||||
util_avxb.h
|
||||
util_avxi.h
|
||||
util_semaphore.h
|
||||
util_sseb.h
|
||||
util_ssef.h
|
||||
|
@@ -429,6 +429,7 @@ if(WITH_XR_OPENXR)
|
||||
GHOST_IXrContext.h
|
||||
intern/GHOST_IXrGraphicsBinding.h
|
||||
intern/GHOST_XrContext.h
|
||||
intern/GHOST_XrException.h
|
||||
intern/GHOST_XrSession.h
|
||||
intern/GHOST_XrSwapchain.h
|
||||
intern/GHOST_Xr_intern.h
|
||||
|
@@ -180,6 +180,10 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
|
||||
wl_surface_commit(w->surface);
|
||||
wl_display_roundtrip(m_system->display());
|
||||
|
||||
#ifdef GHOST_OPENGL_ALPHA
|
||||
setOpaque();
|
||||
#endif
|
||||
|
||||
setState(state);
|
||||
|
||||
setTitle(title);
|
||||
@@ -214,6 +218,10 @@ GHOST_TSuccess GHOST_WindowWayland::deactivate()
|
||||
|
||||
GHOST_TSuccess GHOST_WindowWayland::notify_size()
|
||||
{
|
||||
#ifdef GHOST_OPENGL_ALPHA
|
||||
setOpaque();
|
||||
#endif
|
||||
|
||||
return m_system->pushEvent(
|
||||
new GHOST_Event(m_system->getMilliSeconds(), GHOST_kEventWindowSize, this));
|
||||
}
|
||||
@@ -385,6 +393,19 @@ bool GHOST_WindowWayland::isDialog() const
|
||||
return w->is_dialog;
|
||||
}
|
||||
|
||||
#ifdef GHOST_OPENGL_ALPHA
|
||||
void GHOST_WindowWayland::setOpaque() const
|
||||
{
|
||||
struct wl_region *region;
|
||||
|
||||
/* Make the window opaque. */
|
||||
region = wl_compositor_create_region(m_system->compositor());
|
||||
wl_region_add(region, 0, 0, w->width, w->height);
|
||||
wl_surface_set_opaque_region(w->surface, region);
|
||||
wl_region_destroy(region);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* \param type The type of rendering context create.
|
||||
* \return Indication of success.
|
||||
|
@@ -109,6 +109,10 @@ class GHOST_WindowWayland : public GHOST_Window {
|
||||
|
||||
bool isDialog() const override;
|
||||
|
||||
#ifdef GHOST_OPENGL_ALPHA
|
||||
void setOpaque() const;
|
||||
#endif
|
||||
|
||||
private:
|
||||
GHOST_SystemWayland *m_system;
|
||||
struct window_t *w;
|
||||
|
@@ -28,6 +28,7 @@ set(INC_SYS
|
||||
)
|
||||
|
||||
set(SRC
|
||||
./intern/leak_detector.cc
|
||||
./intern/mallocn.c
|
||||
./intern/mallocn_guarded_impl.c
|
||||
./intern/mallocn_lockfree_impl.c
|
||||
|
@@ -211,6 +211,10 @@ extern size_t (*MEM_get_peak_memory)(void) ATTR_WARN_UNUSED_RESULT;
|
||||
extern const char *(*MEM_name_ptr)(void *vmemh);
|
||||
#endif
|
||||
|
||||
/** This should be called as early as possible in the program. When it has been called, information
|
||||
* about memory leaks will be printed on exit. */
|
||||
void MEM_initialize_memleak_detection(void);
|
||||
|
||||
/* Switch allocator to slower but fully guarded mode. */
|
||||
void MEM_use_guarded_allocator(void);
|
||||
|
||||
|
61
intern/guardedalloc/intern/leak_detector.cc
Normal file
61
intern/guardedalloc/intern/leak_detector.cc
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
/** \file
|
||||
* \ingroup MEM
|
||||
*/
|
||||
|
||||
#include "MEM_guardedalloc.h"
|
||||
#include "mallocn_intern.h"
|
||||
|
||||
bool leak_detector_has_run = false;
|
||||
char free_after_leak_detection_message[] =
|
||||
"Freeing memory after the leak detector has run. This can happen when using "
|
||||
"static variables in C++ that are defined outside of functions. To fix this "
|
||||
"error, use the 'construct on first use' idiom.";
|
||||
|
||||
namespace {
|
||||
class MemLeakPrinter {
|
||||
public:
|
||||
~MemLeakPrinter()
|
||||
{
|
||||
leak_detector_has_run = true;
|
||||
const uint leaked_blocks = MEM_get_memory_blocks_in_use();
|
||||
if (leaked_blocks == 0) {
|
||||
return;
|
||||
}
|
||||
const size_t mem_in_use = MEM_get_memory_in_use();
|
||||
printf("Error: Not freed memory blocks: %u, total unfreed memory %f MB\n",
|
||||
leaked_blocks,
|
||||
(double)mem_in_use / 1024 / 1024);
|
||||
MEM_printmemlist();
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
void MEM_initialize_memleak_detection(void)
|
||||
{
|
||||
/**
|
||||
* This variable is constructed when this function is first called. This should happen as soon as
|
||||
* possible when the program starts.
|
||||
*
|
||||
* It is destructed when the program exits. During destruction, it will print information about
|
||||
* leaked memory blocks. Static variables are destructed in reversed order of their
|
||||
* construction. Therefore, all static variables that own memory have to be constructed after
|
||||
* this function has been called.
|
||||
*/
|
||||
static MemLeakPrinter printer;
|
||||
}
|
@@ -898,6 +898,10 @@ void MEM_guarded_freeN(void *vmemh)
|
||||
memt = (MemTail *)(((char *)memh) + sizeof(MemHead) + memh->len);
|
||||
if (memt->tag3 == MEMTAG3) {
|
||||
|
||||
if (leak_detector_has_run) {
|
||||
MemorY_ErroR(memh->name, free_after_leak_detection_message);
|
||||
}
|
||||
|
||||
memh->tag1 = MEMFREE;
|
||||
memh->tag2 = MEMFREE;
|
||||
memt->tag3 = MEMFREE;
|
||||
|
@@ -33,6 +33,10 @@
|
||||
#ifndef __MALLOCN_INLINE_H__
|
||||
#define __MALLOCN_INLINE_H__
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
MEM_INLINE bool MEM_size_safe_multiply(size_t a, size_t b, size_t *result)
|
||||
{
|
||||
/* A size_t with its high-half bits all set to 1. */
|
||||
@@ -52,4 +56,8 @@ MEM_INLINE bool MEM_size_safe_multiply(size_t a, size_t b, size_t *result)
|
||||
return ((high_bits & (a | b)) == 0 || (*result / b == a));
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __MALLOCN_INLINE_H__ */
|
||||
|
@@ -100,11 +100,18 @@ size_t malloc_usable_size(void *ptr);
|
||||
|
||||
#include "mallocn_inline.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define ALIGNED_MALLOC_MINIMUM_ALIGNMENT sizeof(void *)
|
||||
|
||||
void *aligned_malloc(size_t size, size_t alignment);
|
||||
void aligned_free(void *ptr);
|
||||
|
||||
extern bool leak_detector_has_run;
|
||||
extern char free_after_leak_detection_message[];
|
||||
|
||||
/* Prototypes for counted allocator functions */
|
||||
size_t MEM_lockfree_allocN_len(const void *vmemh) ATTR_WARN_UNUSED_RESULT;
|
||||
void MEM_lockfree_freeN(void *vmemh);
|
||||
@@ -191,4 +198,8 @@ size_t MEM_guarded_get_peak_memory(void) ATTR_WARN_UNUSED_RESULT;
|
||||
const char *MEM_guarded_name_ptr(void *vmemh);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __MALLOCN_INTERN_H__ */
|
||||
|
@@ -101,6 +101,10 @@ size_t MEM_lockfree_allocN_len(const void *vmemh)
|
||||
|
||||
void MEM_lockfree_freeN(void *vmemh)
|
||||
{
|
||||
if (leak_detector_has_run) {
|
||||
print_error(free_after_leak_detection_message);
|
||||
}
|
||||
|
||||
MemHead *memh = MEMHEAD_FROM_PTR(vmemh);
|
||||
size_t len = MEM_lockfree_allocN_len(vmemh);
|
||||
|
||||
|
@@ -212,6 +212,8 @@ if(WITH_LIBMV)
|
||||
|
||||
|
||||
if(WITH_GTESTS)
|
||||
include(GTestTesting)
|
||||
|
||||
blender_add_lib(libmv_test_dataset "./libmv/multiview/test_data_sets.cc" "" "" "")
|
||||
|
||||
BLENDER_SRC_GTEST("libmv_predict_tracks" "./libmv/autotrack/predict_tracks_test.cc" "libmv_test_dataset;bf_intern_libmv;extern_ceres")
|
||||
|
68
intern/mantaflow/extern/manta_fluid_API.h
vendored
68
intern/mantaflow/extern/manta_fluid_API.h
vendored
@@ -111,46 +111,16 @@ float *manta_get_phioutstatic_in(struct MANTA *fluid);
|
||||
|
||||
/* Smoke functions */
|
||||
void manta_smoke_export_script(struct MANTA *smoke, struct FluidModifierData *fmd);
|
||||
void manta_smoke_export(struct MANTA *smoke,
|
||||
float *dt,
|
||||
float *dx,
|
||||
float **dens,
|
||||
float **react,
|
||||
float **flame,
|
||||
float **fuel,
|
||||
float **heat,
|
||||
float **vx,
|
||||
float **vy,
|
||||
float **vz,
|
||||
float **r,
|
||||
float **g,
|
||||
float **b,
|
||||
int **flags,
|
||||
float **shadow);
|
||||
void manta_smoke_turbulence_export(struct MANTA *smoke,
|
||||
float **dens,
|
||||
float **react,
|
||||
float **flame,
|
||||
float **fuel,
|
||||
float **r,
|
||||
float **g,
|
||||
float **b,
|
||||
float **tcu,
|
||||
float **tcv,
|
||||
float **tcw,
|
||||
float **tcu2,
|
||||
float **tcv2,
|
||||
float **tcw2);
|
||||
void manta_smoke_get_rgba(struct MANTA *smoke, float *data, int sequential);
|
||||
void manta_smoke_turbulence_get_rgba(struct MANTA *smoke, float *data, int sequential);
|
||||
void manta_noise_get_rgba(struct MANTA *smoke, float *data, int sequential);
|
||||
void manta_smoke_get_rgba_fixed_color(struct MANTA *smoke,
|
||||
float color[3],
|
||||
float *data,
|
||||
int sequential);
|
||||
void manta_smoke_turbulence_get_rgba_fixed_color(struct MANTA *smoke,
|
||||
float color[3],
|
||||
float *data,
|
||||
int sequential);
|
||||
void manta_noise_get_rgba_fixed_color(struct MANTA *smoke,
|
||||
float color[3],
|
||||
float *data,
|
||||
int sequential);
|
||||
void manta_smoke_ensure_heat(struct MANTA *smoke, struct FluidModifierData *fmd);
|
||||
void manta_smoke_ensure_fire(struct MANTA *smoke, struct FluidModifierData *fmd);
|
||||
void manta_smoke_ensure_colors(struct MANTA *smoke, struct FluidModifierData *fmd);
|
||||
@@ -177,17 +147,23 @@ float *manta_smoke_get_emission_in(struct MANTA *smoke);
|
||||
int manta_smoke_has_heat(struct MANTA *smoke);
|
||||
int manta_smoke_has_fuel(struct MANTA *smoke);
|
||||
int manta_smoke_has_colors(struct MANTA *smoke);
|
||||
float *manta_smoke_turbulence_get_density(struct MANTA *smoke);
|
||||
float *manta_smoke_turbulence_get_fuel(struct MANTA *smoke);
|
||||
float *manta_smoke_turbulence_get_react(struct MANTA *smoke);
|
||||
float *manta_smoke_turbulence_get_color_r(struct MANTA *smoke);
|
||||
float *manta_smoke_turbulence_get_color_g(struct MANTA *smoke);
|
||||
float *manta_smoke_turbulence_get_color_b(struct MANTA *smoke);
|
||||
float *manta_smoke_turbulence_get_flame(struct MANTA *smoke);
|
||||
int manta_smoke_turbulence_has_fuel(struct MANTA *smoke);
|
||||
int manta_smoke_turbulence_has_colors(struct MANTA *smoke);
|
||||
void manta_smoke_turbulence_get_res(struct MANTA *smoke, int *res);
|
||||
int manta_smoke_turbulence_get_cells(struct MANTA *smoke);
|
||||
float *manta_noise_get_density(struct MANTA *smoke);
|
||||
float *manta_noise_get_fuel(struct MANTA *smoke);
|
||||
float *manta_noise_get_react(struct MANTA *smoke);
|
||||
float *manta_noise_get_color_r(struct MANTA *smoke);
|
||||
float *manta_noise_get_color_g(struct MANTA *smoke);
|
||||
float *manta_noise_get_color_b(struct MANTA *smoke);
|
||||
float *manta_noise_get_texture_u(struct MANTA *smoke);
|
||||
float *manta_noise_get_texture_v(struct MANTA *smoke);
|
||||
float *manta_noise_get_texture_w(struct MANTA *smoke);
|
||||
float *manta_noise_get_texture_u2(struct MANTA *smoke);
|
||||
float *manta_noise_get_texture_v2(struct MANTA *smoke);
|
||||
float *manta_noise_get_texture_w2(struct MANTA *smoke);
|
||||
float *manta_noise_get_flame(struct MANTA *smoke);
|
||||
int manta_noise_has_fuel(struct MANTA *smoke);
|
||||
int manta_noise_has_colors(struct MANTA *smoke);
|
||||
void manta_noise_get_res(struct MANTA *smoke, int *res);
|
||||
int manta_noise_get_cells(struct MANTA *smoke);
|
||||
|
||||
/* Liquid functions */
|
||||
void manta_liquid_export_script(struct MANTA *smoke, struct FluidModifierData *fmd);
|
||||
|
@@ -368,89 +368,6 @@ void manta_smoke_export_script(MANTA *smoke, FluidModifierData *fmd)
|
||||
smoke->exportSmokeScript(fmd);
|
||||
}
|
||||
|
||||
void manta_smoke_export(MANTA *smoke,
|
||||
float *dt,
|
||||
float *dx,
|
||||
float **dens,
|
||||
float **react,
|
||||
float **flame,
|
||||
float **fuel,
|
||||
float **heat,
|
||||
float **vx,
|
||||
float **vy,
|
||||
float **vz,
|
||||
float **r,
|
||||
float **g,
|
||||
float **b,
|
||||
int **flags,
|
||||
float **shadow)
|
||||
{
|
||||
if (dens)
|
||||
*dens = smoke->getDensity();
|
||||
if (fuel)
|
||||
*fuel = smoke->getFuel();
|
||||
if (react)
|
||||
*react = smoke->getReact();
|
||||
if (flame)
|
||||
*flame = smoke->getFlame();
|
||||
if (heat)
|
||||
*heat = smoke->getHeat();
|
||||
*vx = smoke->getVelocityX();
|
||||
*vy = smoke->getVelocityY();
|
||||
*vz = smoke->getVelocityZ();
|
||||
if (r)
|
||||
*r = smoke->getColorR();
|
||||
if (g)
|
||||
*g = smoke->getColorG();
|
||||
if (b)
|
||||
*b = smoke->getColorB();
|
||||
*flags = smoke->getFlags();
|
||||
if (shadow)
|
||||
*shadow = smoke->getShadow();
|
||||
*dt = 1; // dummy value, not needed for smoke
|
||||
*dx = 1; // dummy value, not needed for smoke
|
||||
}
|
||||
|
||||
void manta_smoke_turbulence_export(MANTA *smoke,
|
||||
float **dens,
|
||||
float **react,
|
||||
float **flame,
|
||||
float **fuel,
|
||||
float **r,
|
||||
float **g,
|
||||
float **b,
|
||||
float **tcu,
|
||||
float **tcv,
|
||||
float **tcw,
|
||||
float **tcu2,
|
||||
float **tcv2,
|
||||
float **tcw2)
|
||||
{
|
||||
if (!smoke && !(smoke->usingNoise()))
|
||||
return;
|
||||
|
||||
*dens = smoke->getDensityHigh();
|
||||
if (fuel)
|
||||
*fuel = smoke->getFuelHigh();
|
||||
if (react)
|
||||
*react = smoke->getReactHigh();
|
||||
if (flame)
|
||||
*flame = smoke->getFlameHigh();
|
||||
if (r)
|
||||
*r = smoke->getColorRHigh();
|
||||
if (g)
|
||||
*g = smoke->getColorGHigh();
|
||||
if (b)
|
||||
*b = smoke->getColorBHigh();
|
||||
*tcu = smoke->getTextureU();
|
||||
*tcv = smoke->getTextureV();
|
||||
*tcw = smoke->getTextureW();
|
||||
|
||||
*tcu2 = smoke->getTextureU2();
|
||||
*tcv2 = smoke->getTextureV2();
|
||||
*tcw2 = smoke->getTextureW2();
|
||||
}
|
||||
|
||||
static void get_rgba(
|
||||
float *r, float *g, float *b, float *a, int total_cells, float *data, int sequential)
|
||||
{
|
||||
@@ -484,7 +401,7 @@ void manta_smoke_get_rgba(MANTA *smoke, float *data, int sequential)
|
||||
sequential);
|
||||
}
|
||||
|
||||
void manta_smoke_turbulence_get_rgba(MANTA *smoke, float *data, int sequential)
|
||||
void manta_noise_get_rgba(MANTA *smoke, float *data, int sequential)
|
||||
{
|
||||
get_rgba(smoke->getColorRHigh(),
|
||||
smoke->getColorGHigh(),
|
||||
@@ -519,10 +436,7 @@ void manta_smoke_get_rgba_fixed_color(MANTA *smoke, float color[3], float *data,
|
||||
get_rgba_fixed_color(color, smoke->getTotalCells(), data, sequential);
|
||||
}
|
||||
|
||||
void manta_smoke_turbulence_get_rgba_fixed_color(MANTA *smoke,
|
||||
float color[3],
|
||||
float *data,
|
||||
int sequential)
|
||||
void manta_noise_get_rgba_fixed_color(MANTA *smoke, float color[3], float *data, int sequential)
|
||||
{
|
||||
get_rgba_fixed_color(color, smoke->getTotalCellsHigh(), data, sequential);
|
||||
}
|
||||
@@ -647,45 +561,69 @@ int manta_smoke_has_colors(MANTA *smoke)
|
||||
return (smoke->getColorR() && smoke->getColorG() && smoke->getColorB()) ? 1 : 0;
|
||||
}
|
||||
|
||||
float *manta_smoke_turbulence_get_density(MANTA *smoke)
|
||||
float *manta_noise_get_density(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getDensityHigh() : nullptr;
|
||||
}
|
||||
float *manta_smoke_turbulence_get_fuel(MANTA *smoke)
|
||||
float *manta_noise_get_fuel(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getFuelHigh() : nullptr;
|
||||
}
|
||||
float *manta_smoke_turbulence_get_react(MANTA *smoke)
|
||||
float *manta_noise_get_react(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getReactHigh() : nullptr;
|
||||
}
|
||||
float *manta_smoke_turbulence_get_color_r(MANTA *smoke)
|
||||
float *manta_noise_get_color_r(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getColorRHigh() : nullptr;
|
||||
}
|
||||
float *manta_smoke_turbulence_get_color_g(MANTA *smoke)
|
||||
float *manta_noise_get_color_g(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getColorGHigh() : nullptr;
|
||||
}
|
||||
float *manta_smoke_turbulence_get_color_b(MANTA *smoke)
|
||||
float *manta_noise_get_color_b(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getColorBHigh() : nullptr;
|
||||
}
|
||||
float *manta_smoke_turbulence_get_flame(MANTA *smoke)
|
||||
float *manta_noise_get_flame(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getFlameHigh() : nullptr;
|
||||
}
|
||||
float *manta_noise_get_texture_u(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getTextureU() : nullptr;
|
||||
}
|
||||
float *manta_noise_get_texture_v(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getTextureV() : nullptr;
|
||||
}
|
||||
float *manta_noise_get_texture_w(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getTextureW() : nullptr;
|
||||
}
|
||||
float *manta_noise_get_texture_u2(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getTextureU2() : nullptr;
|
||||
}
|
||||
float *manta_noise_get_texture_v2(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getTextureV2() : nullptr;
|
||||
}
|
||||
float *manta_noise_get_texture_w2(MANTA *smoke)
|
||||
{
|
||||
return (smoke && smoke->usingNoise()) ? smoke->getTextureW2() : nullptr;
|
||||
}
|
||||
|
||||
int manta_smoke_turbulence_has_fuel(MANTA *smoke)
|
||||
int manta_noise_has_fuel(MANTA *smoke)
|
||||
{
|
||||
return (smoke->getFuelHigh()) ? 1 : 0;
|
||||
}
|
||||
int manta_smoke_turbulence_has_colors(MANTA *smoke)
|
||||
int manta_noise_has_colors(MANTA *smoke)
|
||||
{
|
||||
return (smoke->getColorRHigh() && smoke->getColorGHigh() && smoke->getColorBHigh()) ? 1 : 0;
|
||||
}
|
||||
|
||||
void manta_smoke_turbulence_get_res(MANTA *smoke, int *res)
|
||||
void manta_noise_get_res(MANTA *smoke, int *res)
|
||||
{
|
||||
if (smoke && smoke->usingNoise()) {
|
||||
res[0] = smoke->getResXHigh();
|
||||
@@ -693,7 +631,7 @@ void manta_smoke_turbulence_get_res(MANTA *smoke, int *res)
|
||||
res[2] = smoke->getResZHigh();
|
||||
}
|
||||
}
|
||||
int manta_smoke_turbulence_get_cells(MANTA *smoke)
|
||||
int manta_noise_get_cells(MANTA *smoke)
|
||||
{
|
||||
int total_cells_high = smoke->getResXHigh() * smoke->getResYHigh() * smoke->getResZHigh();
|
||||
return (smoke && smoke->usingNoise()) ? total_cells_high : 0;
|
||||
|
@@ -100,6 +100,9 @@ color_r_in_s$ID$ = None\n\
|
||||
color_g_in_s$ID$ = None\n\
|
||||
color_b_in_s$ID$ = None\n\
|
||||
\n\
|
||||
# Set some initial values\n\
|
||||
shadow_s$ID$.setConst(-1)\n\
|
||||
\n\
|
||||
# Keep track of important objects in dict to load them later on\n\
|
||||
smoke_data_dict_final_s$ID$ = { 'density' : density_s$ID$, 'shadow' : shadow_s$ID$ }\n\
|
||||
smoke_data_dict_resume_s$ID$ = { 'densityIn' : densityIn_s$ID$, 'emission' : emission_s$ID$ }\n";
|
||||
@@ -490,6 +493,9 @@ def step_noise_$ID$():\n\
|
||||
advectSemiLagrange(flags=flags_s$ID$, vel=vel_s$ID$, grid=uvGrid1_s$ID$, order=2)\n\
|
||||
updateUvWeight(resetTime=sn$ID$.timestep*10.0 , index=1, numUvs=uvs_s$ID$, uv=uvGrid1_s$ID$, offset=uvs_offset_s$ID$)\n\
|
||||
\n\
|
||||
if not domainClosed_s$ID$ or using_outflow_s$ID$:\n\
|
||||
resetOutflow(flags=flags_sn$ID$, real=density_sn$ID$)\n\
|
||||
\n\
|
||||
mantaMsg('Energy')\n\
|
||||
computeEnergy(flags=flags_s$ID$, vel=vel_s$ID$, energy=energy_s$ID$)\n\
|
||||
\n\
|
||||
|
@@ -27,6 +27,10 @@ set(INC_SYS
|
||||
set(SRC
|
||||
source/sky_model.cpp
|
||||
source/sky_nishita.cpp
|
||||
|
||||
include/sky_model.h
|
||||
source/sky_float3.h
|
||||
source/sky_model_data.h
|
||||
)
|
||||
|
||||
set(LIB
|
||||
|
@@ -18,20 +18,21 @@
|
||||
#include "sky_model.h"
|
||||
|
||||
/* Constants */
|
||||
static const float rayleigh_scale = 8000.0f; // Rayleigh scale height (m)
|
||||
static const float mie_scale = 1200.0f; // Mie scale height (m)
|
||||
static const float mie_coeff = 2e-5f; // Mie scattering coefficient
|
||||
static const float mie_G = 0.76f; // aerosols anisotropy
|
||||
static const float sqr_G = mie_G * mie_G; // squared aerosols anisotropy
|
||||
static const float earth_radius = 6360000.0f; // radius of Earth (m)
|
||||
static const float atmosphere_radius = 6420000.0f; // radius of atmosphere (m)
|
||||
static const int steps = 32; // segments per primary ray
|
||||
static const int steps_light = 16; // segments per sun connection ray
|
||||
static const int num_wavelengths = 21; // number of wavelengths
|
||||
static const int max_luminous_efficacy = 683; // maximum luminous efficacy
|
||||
static const float step_lambda = (num_wavelengths - 1) *
|
||||
1e-9f; // step between each sampled wavelength
|
||||
/* irradiance at top of atmosphere */
|
||||
static const float rayleigh_scale = 8e3f; // Rayleigh scale height (m)
|
||||
static const float mie_scale = 1.2e3f; // Mie scale height (m)
|
||||
static const float mie_coeff = 2e-5f; // Mie scattering coefficient (m^-1)
|
||||
static const float mie_G = 0.76f; // aerosols anisotropy
|
||||
static const float sqr_G = mie_G * mie_G; // squared aerosols anisotropy
|
||||
static const float earth_radius = 6360e3f; // radius of Earth (m)
|
||||
static const float atmosphere_radius = 6420e3f; // radius of atmosphere (m)
|
||||
static const int steps = 32; // segments of primary ray
|
||||
static const int steps_light = 16; // segments of sun connection ray
|
||||
static const int num_wavelengths = 21; // number of wavelengths
|
||||
static const int min_wavelength = 380; // lowest sampled wavelength (nm)
|
||||
static const int max_wavelength = 780; // highest sampled wavelength (nm)
|
||||
// step between each sampled wavelength (nm)
|
||||
static const float step_lambda = (max_wavelength - min_wavelength) / (num_wavelengths - 1);
|
||||
/* Sun irradiance on top of the atmosphere (W*m^-2*nm^-1) */
|
||||
static const float irradiance[] = {
|
||||
1.45756829855592995315f, 1.56596305559738380175f, 1.65148449067670455293f,
|
||||
1.71496242737209314555f, 1.75797983805020541226f, 1.78256407885924539336f,
|
||||
@@ -40,7 +41,7 @@ static const float irradiance[] = {
|
||||
1.61993437242451854274f, 1.57083597368892080581f, 1.51932335059305478886f,
|
||||
1.46628494965214395407f, 1.41245852740172450623f, 1.35844961970384092709f,
|
||||
1.30474913844739281998f, 1.25174963272610817455f, 1.19975998755420620867f};
|
||||
/* Rayleigh scattering coefficient */
|
||||
/* Rayleigh scattering coefficient (m^-1) */
|
||||
static const float rayleigh_coeff[] = {
|
||||
0.00005424820087636473f, 0.00004418549866505454f, 0.00003635151910165377f,
|
||||
0.00003017929012024763f, 0.00002526320226989157f, 0.00002130859310621843f,
|
||||
@@ -49,7 +50,7 @@ static const float rayleigh_coeff[] = {
|
||||
0.00000765513700977967f, 0.00000674217203751443f, 0.00000596134125832052f,
|
||||
0.00000529034598065810f, 0.00000471115687557433f, 0.00000420910481110487f,
|
||||
0.00000377218381260133f, 0.00000339051255477280f, 0.00000305591531679811f};
|
||||
/* Ozone absorption coefficient */
|
||||
/* Ozone absorption coefficient (m^-1) */
|
||||
static const float ozone_coeff[] = {
|
||||
0.00000000325126849861f, 0.00000000585395365047f, 0.00000001977191155085f,
|
||||
0.00000007309568762914f, 0.00000020084561514287f, 0.00000040383958096161f,
|
||||
@@ -94,11 +95,10 @@ static float3 spec_to_xyz(float *spectrum)
|
||||
xyz.y += cmf_xyz[i][1] * spectrum[i];
|
||||
xyz.z += cmf_xyz[i][2] * spectrum[i];
|
||||
}
|
||||
return xyz * step_lambda * max_luminous_efficacy;
|
||||
return xyz * step_lambda;
|
||||
}
|
||||
|
||||
/* Atmosphere volume models */
|
||||
|
||||
static float density_rayleigh(float height)
|
||||
{
|
||||
return expf(-height / rayleigh_scale);
|
||||
@@ -135,11 +135,13 @@ static bool surface_intersection(float3 pos, float3 dir)
|
||||
{
|
||||
if (dir.z >= 0)
|
||||
return false;
|
||||
float t = dot(dir, -pos) / len_squared(dir);
|
||||
float D = pos.x * pos.x - 2.0f * (-pos.x) * dir.x * t + dir.x * t * dir.x * t + pos.y * pos.y -
|
||||
2.0f * (-pos.y) * dir.y * t + (dir.y * t) * (dir.y * t) + pos.z * pos.z -
|
||||
2.0f * (-pos.z) * dir.z * t + dir.z * t * dir.z * t;
|
||||
return (D <= sqr(earth_radius));
|
||||
float b = -2.0f * dot(dir, -pos);
|
||||
float c = len_squared(pos) - sqr(earth_radius);
|
||||
float t = b * b - 4.0f * c;
|
||||
if (t >= 0.0f)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static float3 atmosphere_intersection(float3 pos, float3 dir)
|
||||
@@ -152,41 +154,40 @@ static float3 atmosphere_intersection(float3 pos, float3 dir)
|
||||
|
||||
static float3 ray_optical_depth(float3 ray_origin, float3 ray_dir)
|
||||
{
|
||||
/* This code computes the optical depth along a ray through the atmosphere. */
|
||||
/* this code computes the optical depth along a ray through the atmosphere */
|
||||
float3 ray_end = atmosphere_intersection(ray_origin, ray_dir);
|
||||
float ray_length = distance(ray_origin, ray_end);
|
||||
|
||||
/* To compute the optical depth, we step along the ray in segments and
|
||||
* accumulate the optical depth along each segment. */
|
||||
/* to compute the optical depth, we step along the ray in segments and
|
||||
* accumulate the optical depth along each segment */
|
||||
float segment_length = ray_length / steps_light;
|
||||
float3 segment = segment_length * ray_dir;
|
||||
|
||||
/* Instead of tracking the transmission spectrum across all wavelengths directly,
|
||||
/* instead of tracking the transmission spectrum across all wavelengths directly,
|
||||
* we use the fact that the density always has the same spectrum for each type of
|
||||
* scattering, so we split the density into a constant spectrum and a factor and
|
||||
* only track the factors. */
|
||||
* only track the factors */
|
||||
float3 optical_depth = make_float3(0.0f, 0.0f, 0.0f);
|
||||
|
||||
/* The density of each segment is evaluated at its middle. */
|
||||
/* the density of each segment is evaluated at its middle */
|
||||
float3 P = ray_origin + 0.5f * segment;
|
||||
|
||||
for (int i = 0; i < steps_light; i++) {
|
||||
/* Compute height above sea level. */
|
||||
/* height above sea level */
|
||||
float height = len(P) - earth_radius;
|
||||
|
||||
/* Accumulate optical depth of this segment (density is assumed to be constant along it). */
|
||||
/* accumulate optical depth of this segment (density is assumed to be constant along it) */
|
||||
float3 density = make_float3(
|
||||
density_rayleigh(height), density_mie(height), density_ozone(height));
|
||||
optical_depth += density;
|
||||
|
||||
/* Advance along ray. */
|
||||
/* advance along ray */
|
||||
P += segment;
|
||||
}
|
||||
|
||||
return optical_depth * segment_length;
|
||||
}
|
||||
|
||||
/* Single Scattering implementation */
|
||||
static void single_scattering(float3 ray_dir,
|
||||
float3 sun_dir,
|
||||
float3 ray_origin,
|
||||
@@ -195,45 +196,45 @@ static void single_scattering(float3 ray_dir,
|
||||
float ozone_density,
|
||||
float *r_spectrum)
|
||||
{
|
||||
/* This code computes single-inscattering along a ray through the atmosphere. */
|
||||
/* this code computes single-inscattering along a ray through the atmosphere */
|
||||
float3 ray_end = atmosphere_intersection(ray_origin, ray_dir);
|
||||
float ray_length = distance(ray_origin, ray_end);
|
||||
|
||||
/* To compute the inscattering, we step along the ray in segments and accumulate
|
||||
* the inscattering as well as the optical depth along each segment. */
|
||||
/* to compute the inscattering, we step along the ray in segments and accumulate
|
||||
* the inscattering as well as the optical depth along each segment */
|
||||
float segment_length = ray_length / steps;
|
||||
float3 segment = segment_length * ray_dir;
|
||||
|
||||
/* Instead of tracking the transmission spectrum across all wavelengths directly,
|
||||
/* instead of tracking the transmission spectrum across all wavelengths directly,
|
||||
* we use the fact that the density always has the same spectrum for each type of
|
||||
* scattering, so we split the density into a constant spectrum and a factor and
|
||||
* only track the factors. */
|
||||
* only track the factors */
|
||||
float3 optical_depth = make_float3(0.0f, 0.0f, 0.0f);
|
||||
|
||||
/* Zero out light accumulation. */
|
||||
/* zero out light accumulation */
|
||||
for (int wl = 0; wl < num_wavelengths; wl++) {
|
||||
r_spectrum[wl] = 0.0f;
|
||||
}
|
||||
|
||||
/* Compute phase function for scattering and the density scale factor. */
|
||||
/* phase function for scattering and the density scale factor */
|
||||
float mu = dot(ray_dir, sun_dir);
|
||||
float3 phase_function = make_float3(phase_rayleigh(mu), phase_mie(mu), 0.0f);
|
||||
float3 density_scale = make_float3(air_density, dust_density, ozone_density);
|
||||
|
||||
/* The density and in-scattering of each segment is evaluated at its middle. */
|
||||
/* the density and in-scattering of each segment is evaluated at its middle */
|
||||
float3 P = ray_origin + 0.5f * segment;
|
||||
|
||||
for (int i = 0; i < steps; i++) {
|
||||
/* Compute height above sea level. */
|
||||
/* height above sea level */
|
||||
float height = len(P) - earth_radius;
|
||||
|
||||
/* Evaluate and accumulate optical depth along the ray. */
|
||||
/* evaluate and accumulate optical depth along the ray */
|
||||
float3 density = density_scale * make_float3(density_rayleigh(height),
|
||||
density_mie(height),
|
||||
density_ozone(height));
|
||||
optical_depth += segment_length * density;
|
||||
|
||||
/* If the earth isn't in the way, evaluate inscattering from the sun. */
|
||||
/* if the Earth isn't in the way, evaluate inscattering from the sun */
|
||||
if (!surface_intersection(P, sun_dir)) {
|
||||
float3 light_optical_depth = density_scale * ray_optical_depth(P, sun_dir);
|
||||
float3 total_optical_depth = optical_depth + light_optical_depth;
|
||||
@@ -247,7 +248,7 @@ static void single_scattering(float3 ray_dir,
|
||||
|
||||
float3 scattering_density = density * make_float3(rayleigh_coeff[wl], mie_coeff, 0.0f);
|
||||
|
||||
/* The total inscattered radiance from one segment is:
|
||||
/* the total inscattered radiance from one segment is:
|
||||
* Tr(A<->B) * Tr(B<->C) * sigma_s * phase * L * segment_length
|
||||
*
|
||||
* These terms are:
|
||||
@@ -258,19 +259,18 @@ static void single_scattering(float3 ray_dir,
|
||||
* length of the segment
|
||||
*
|
||||
* The code here is just that, with a bit of additional optimization to not store full
|
||||
* spectra for the optical depth.
|
||||
* spectra for the optical depth
|
||||
*/
|
||||
r_spectrum[wl] += attenuation * reduce_add(phase_function * scattering_density) *
|
||||
irradiance[wl] * segment_length;
|
||||
}
|
||||
}
|
||||
|
||||
/* Advance along ray. */
|
||||
/* advance along ray */
|
||||
P += segment;
|
||||
}
|
||||
}
|
||||
|
||||
/* calculate texture array */
|
||||
void SKY_nishita_skymodel_precompute_texture(float *pixels,
|
||||
int stride,
|
||||
int start_y,
|
||||
@@ -305,6 +305,7 @@ void SKY_nishita_skymodel_precompute_texture(float *pixels,
|
||||
single_scattering(dir, sun_dir, cam_pos, air_density, dust_density, ozone_density, spectrum);
|
||||
float3 xyz = spec_to_xyz(spectrum);
|
||||
|
||||
/* store pixels */
|
||||
int pos_x = x * stride;
|
||||
pixel_row[pos_x] = xyz.x;
|
||||
pixel_row[pos_x + 1] = xyz.y;
|
||||
@@ -318,7 +319,7 @@ void SKY_nishita_skymodel_precompute_texture(float *pixels,
|
||||
}
|
||||
}
|
||||
|
||||
/* Sun disc */
|
||||
/*********** Sun ***********/
|
||||
static void sun_radiation(float3 cam_dir,
|
||||
float altitude,
|
||||
float air_density,
|
||||
@@ -329,9 +330,9 @@ static void sun_radiation(float3 cam_dir,
|
||||
float3 cam_pos = make_float3(0, 0, earth_radius + altitude);
|
||||
float3 optical_depth = ray_optical_depth(cam_pos, cam_dir);
|
||||
|
||||
/* Compute final spectrum. */
|
||||
/* compute final spectrum */
|
||||
for (int i = 0; i < num_wavelengths; i++) {
|
||||
/* Combine spectra and the optical depth into transmittance. */
|
||||
/* combine spectra and the optical depth into transmittance */
|
||||
float transmittance = rayleigh_coeff[i] * optical_depth.x * air_density +
|
||||
1.11f * mie_coeff * optical_depth.y * dust_density;
|
||||
r_spectrum[i] = irradiance[i] * expf(-transmittance) / solid_angle;
|
||||
|
Binary file not shown.
Binary file not shown.
Submodule release/datafiles/locale updated: f1ab6e28bf...4af22e0492
@@ -229,6 +229,8 @@ const UserDef U_default = {
|
||||
|
||||
.collection_instance_empty_size = 1.0f,
|
||||
|
||||
.statusbar_flag = STATUSBAR_SHOW_VERSION,
|
||||
|
||||
.runtime =
|
||||
{
|
||||
.is_dirty = 0,
|
||||
|
Submodule release/scripts/addons updated: 9128155de3...25b00a0a52
Submodule release/scripts/addons_contrib updated: 45aa940dab...f2f4a8b3bf
@@ -367,7 +367,7 @@ def enable(module_name, *, default_set=False, persistent=False, handle_error=Non
|
||||
|
||||
if mod.bl_info.get("blender", (0, 0, 0)) < (2, 80, 0):
|
||||
if _bpy.app.debug:
|
||||
print(f"Warning: Add-on '{module_name:s}' was not upgraded for 2.80, ignoring")
|
||||
print("Warning: Add-on '%s' was not upgraded for 2.80, ignoring" % module_name)
|
||||
return None
|
||||
|
||||
# 2) Try register collected modules.
|
||||
|
@@ -789,7 +789,7 @@ def register_tool(tool_cls, *, after=None, separator=False, group=False):
|
||||
|
||||
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
|
||||
if cls is None:
|
||||
raise Exception(f"Space type {space_type!r} has no toolbar")
|
||||
raise Exception("Space type %r has no toolbar" % space_type)
|
||||
tools = cls._tools[context_mode]
|
||||
|
||||
# First sanity check
|
||||
@@ -799,9 +799,9 @@ def register_tool(tool_cls, *, after=None, separator=False, group=False):
|
||||
if item is not None
|
||||
}
|
||||
if not issubclass(tool_cls, WorkSpaceTool):
|
||||
raise Exception(f"Expected WorkSpaceTool subclass, not {type(tool_cls)!r}")
|
||||
raise Exception("Expected WorkSpaceTool subclass, not %r" % type(tool_cls))
|
||||
if tool_cls.bl_idname in tools_id:
|
||||
raise Exception(f"Tool {tool_cls.bl_idname!r} already exists!")
|
||||
raise Exception("Tool %r already exists!" % tool_cls.bl_idname)
|
||||
del tools_id, WorkSpaceTool
|
||||
|
||||
# Convert the class into a ToolDef.
|
||||
@@ -900,7 +900,7 @@ def unregister_tool(tool_cls):
|
||||
from bl_ui.space_toolsystem_common import ToolSelectPanelHelper
|
||||
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
|
||||
if cls is None:
|
||||
raise Exception(f"Space type {space_type!r} has no toolbar")
|
||||
raise Exception("Space type %r has no toolbar" % space_type)
|
||||
tools = cls._tools[context_mode]
|
||||
|
||||
tool_def = tool_cls._bl_tool
|
||||
@@ -952,7 +952,7 @@ def unregister_tool(tool_cls):
|
||||
break
|
||||
|
||||
if not changed:
|
||||
raise Exception(f"Unable to remove {tool_cls!r}")
|
||||
raise Exception("Unable to remove %r" % tool_cls)
|
||||
del tool_cls._bl_tool
|
||||
|
||||
keymap_data = tool_def.keymap
|
||||
@@ -963,7 +963,7 @@ def unregister_tool(tool_cls):
|
||||
for kc in (keyconfigs.default, keyconfigs.addon):
|
||||
km = kc.keymaps.get(keymap_data[0])
|
||||
if km is None:
|
||||
print(f"Warning keymap {keymap_data[0]!r} not found in {kc.name!r}!")
|
||||
print("Warning keymap %r not found in %r!" % (keymap_data[0], kc.name))
|
||||
else:
|
||||
kc.keymaps.remove(km)
|
||||
|
||||
|
@@ -85,7 +85,7 @@ class ImagePreviewCollection(dict):
|
||||
|
||||
def new(self, name):
|
||||
if name in self:
|
||||
raise KeyError(f"key {name!r} already exists")
|
||||
raise KeyError("key %r already exists" % name)
|
||||
p = self[name] = _utils_previews.new(
|
||||
self._gen_key(name))
|
||||
return p
|
||||
@@ -93,7 +93,7 @@ class ImagePreviewCollection(dict):
|
||||
|
||||
def load(self, name, path, path_type, force_reload=False):
|
||||
if name in self:
|
||||
raise KeyError(f"key {name!r} already exists")
|
||||
raise KeyError("key %r already exists" % name)
|
||||
p = self[name] = _utils_previews.load(
|
||||
self._gen_key(name), path, path_type, force_reload)
|
||||
return p
|
||||
|
@@ -54,6 +54,15 @@ class Prefs(bpy.types.KeyConfigPreferences):
|
||||
default='PLAY',
|
||||
update=update_fn,
|
||||
)
|
||||
use_alt_click_leader: BoolProperty(
|
||||
name="Alt Click Tool Prompt",
|
||||
description=(
|
||||
"Tapping Alt (without pressing any other keys) shows a prompt in the status-bar\n"
|
||||
"prompting a second keystroke to activate the tool"
|
||||
),
|
||||
default=False,
|
||||
update=update_fn,
|
||||
)
|
||||
use_select_all_toggle: BoolProperty(
|
||||
name="Select All Toggles",
|
||||
description=(
|
||||
@@ -164,13 +173,16 @@ class Prefs(bpy.types.KeyConfigPreferences):
|
||||
col = layout.column()
|
||||
col.row().prop(self, "select_mouse", text="Select with Mouse Button", expand=True)
|
||||
col.row().prop(self, "spacebar_action", text="Spacebar Action", expand=True)
|
||||
|
||||
if is_select_left:
|
||||
col.row().prop(self, "gizmo_action", text="Activate Gizmo Event", expand=True)
|
||||
|
||||
# Checkboxes sub-layout.
|
||||
col = layout.column()
|
||||
sub = col.column(align=True)
|
||||
sub.prop(self, "use_select_all_toggle")
|
||||
row = sub.row()
|
||||
row.prop(self, "use_select_all_toggle")
|
||||
row.prop(self, "use_alt_click_leader")
|
||||
|
||||
# 3DView settings.
|
||||
col = layout.column()
|
||||
@@ -217,6 +229,7 @@ def load():
|
||||
kc_prefs.select_mouse == 'LEFT' and
|
||||
kc_prefs.gizmo_action == 'DRAG'
|
||||
),
|
||||
use_alt_click_leader=kc_prefs.use_alt_click_leader,
|
||||
use_pie_click_drag=kc_prefs.use_pie_click_drag,
|
||||
),
|
||||
)
|
||||
|
@@ -52,6 +52,8 @@ class Params:
|
||||
"use_v3d_shade_ex_pie",
|
||||
# Swap orbit/pan keys (for 2D workflows).
|
||||
"use_v3d_mmb_pan",
|
||||
# Alt click to access tools.
|
||||
"use_alt_click_leader",
|
||||
# Experimental option.
|
||||
"use_pie_click_drag",
|
||||
"v3d_tilde_action",
|
||||
@@ -73,6 +75,7 @@ class Params:
|
||||
use_v3d_tab_menu=False,
|
||||
use_v3d_shade_ex_pie=False,
|
||||
use_v3d_mmb_pan=False,
|
||||
use_alt_click_leader=False,
|
||||
use_pie_click_drag=False,
|
||||
v3d_tilde_action='VIEW',
|
||||
v3d_alt_mmb_drag_action='RELATIVE',
|
||||
@@ -126,6 +129,7 @@ class Params:
|
||||
self.v3d_tilde_action = v3d_tilde_action
|
||||
self.v3d_alt_mmb_drag_action = v3d_alt_mmb_drag_action
|
||||
|
||||
self.use_alt_click_leader = use_alt_click_leader
|
||||
self.use_pie_click_drag = use_pie_click_drag
|
||||
if not use_pie_click_drag:
|
||||
self.pie_value = 'PRESS'
|
||||
@@ -449,11 +453,15 @@ def km_window(params):
|
||||
op_menu("TOPBAR_MT_file_context_menu", {"type": 'F4', "value": 'PRESS'}),
|
||||
# Pass through when when no tool-system exists or the fallback isn't available.
|
||||
("wm.toolbar_fallback_pie", {"type": 'W', "value": 'PRESS', "alt": True}, None),
|
||||
# Alt as "Leader-Key".
|
||||
("wm.toolbar_prompt", {"type": 'LEFT_ALT', "value": 'CLICK'}, None),
|
||||
("wm.toolbar_prompt", {"type": 'RIGHT_ALT', "value": 'CLICK'}, None),
|
||||
])
|
||||
|
||||
if params.use_alt_click_leader:
|
||||
items.extend([
|
||||
# Alt as "Leader-Key".
|
||||
("wm.toolbar_prompt", {"type": 'LEFT_ALT', "value": 'CLICK'}, None),
|
||||
("wm.toolbar_prompt", {"type": 'RIGHT_ALT', "value": 'CLICK'}, None),
|
||||
])
|
||||
|
||||
if params.spacebar_action == 'TOOL':
|
||||
items.append(
|
||||
("wm.toolbar", {"type": 'SPACE', "value": 'PRESS'}, None),
|
||||
@@ -741,6 +749,9 @@ def km_property_editor(_params):
|
||||
# ShaderFX panels
|
||||
("object.shaderfx_remove", {"type": 'X', "value": 'PRESS'}, {"properties": [("report", True)]}),
|
||||
("object.shaderfx_remove", {"type": 'DEL', "value": 'PRESS'}, {"properties": [("report", True)]}),
|
||||
# Constraint panels
|
||||
("constraint.delete", {"type": 'X', "value": 'PRESS'}, {"properties": [("report", True)]}),
|
||||
("constraint.delete", {"type": 'DEL', "value": 'PRESS'}, {"properties": [("report", True)]}),
|
||||
])
|
||||
|
||||
return keymap
|
||||
@@ -850,7 +861,14 @@ def km_uv_editor(params):
|
||||
{"properties": [("extend", False)]}),
|
||||
("uv.select_loop", {"type": params.select_mouse, "value": params.select_mouse_value, "shift": True, "alt": True},
|
||||
{"properties": [("extend", True)]}),
|
||||
("uv.shortest_path_pick", {"type": params.select_mouse, "value": params.select_mouse_value, "ctrl": True}, None),
|
||||
("uv.select_edge_ring", {"type": params.select_mouse, "value": params.select_mouse_value, "ctrl": True, "alt": True},
|
||||
{"properties": [("extend", False)]}),
|
||||
("uv.select_edge_ring", {"type": params.select_mouse, "value": params.select_mouse_value, "ctrl": True, "shift": True, "alt": True},
|
||||
{"properties": [("extend", True)]}),
|
||||
("uv.shortest_path_pick", {"type": params.select_mouse, "value": params.select_mouse_value, "ctrl": True},
|
||||
{"properties": [("use_fill", False)]}),
|
||||
("uv.shortest_path_pick", {"type": params.select_mouse, "value": params.select_mouse_value, "ctrl": True, "shift": True},
|
||||
{"properties": [("use_fill", True)]}),
|
||||
("uv.select_split", {"type": 'Y', "value": 'PRESS'}, None),
|
||||
("uv.select_box", {"type": 'B', "value": 'PRESS'},
|
||||
{"properties": [("pinned", False)]}),
|
||||
@@ -3158,7 +3176,7 @@ def km_grease_pencil_stroke_edit_mode(params):
|
||||
("gpencil.copy", {"type": 'C', "value": 'PRESS', "ctrl": True}, None),
|
||||
("gpencil.paste", {"type": 'V', "value": 'PRESS', "ctrl": True}, None),
|
||||
# Snap
|
||||
op_menu("GPENCIL_MT_snap", {"type": 'S', "value": 'PRESS', "shift": True}),
|
||||
op_menu_pie("GPENCIL_MT_snap_pie", {"type": 'S', "value": 'PRESS', "shift": True}),
|
||||
# Show/hide
|
||||
("gpencil.reveal", {"type": 'H', "value": 'PRESS', "alt": True}, None),
|
||||
("gpencil.hide", {"type": 'H', "value": 'PRESS'},
|
||||
@@ -4438,9 +4456,9 @@ def km_mesh(params):
|
||||
{"properties": [("TRANSFORM_OT_edge_slide", [("release_confirm", False), ],)]}),
|
||||
("mesh.inset", {"type": 'I', "value": 'PRESS'}, None),
|
||||
("mesh.bevel", {"type": 'B', "value": 'PRESS', "ctrl": True},
|
||||
{"properties": [("vertex_only", False)]}),
|
||||
{"properties": [("affect", 'EDGES')]}),
|
||||
("mesh.bevel", {"type": 'B', "value": 'PRESS', "shift": True, "ctrl": True},
|
||||
{"properties": [("vertex_only", True)]}),
|
||||
{"properties": [("affect", 'VERTICES')]}),
|
||||
# Selection modes.
|
||||
*_template_items_editmode_mesh_select_mode(params),
|
||||
# Loop Select with alt. Double click in case MMB emulation is on (below).
|
||||
@@ -5192,7 +5210,7 @@ def km_bevel_modal_map(_params):
|
||||
("SEGMENTS_DOWN", {"type": 'NUMPAD_MINUS', "value": 'PRESS', "any": True}, None),
|
||||
("OFFSET_MODE_CHANGE", {"type": 'M', "value": 'PRESS', "any": True}, None),
|
||||
("CLAMP_OVERLAP_TOGGLE", {"type": 'C', "value": 'PRESS', "any": True}, None),
|
||||
("VERTEX_ONLY_TOGGLE", {"type": 'V', "value": 'PRESS', "any": True}, None),
|
||||
("AFFECT_CHANGE", {"type": 'V', "value": 'PRESS', "any": True}, None),
|
||||
("HARDEN_NORMALS_TOGGLE", {"type": 'H', "value": 'PRESS', "any": True}, None),
|
||||
("MARK_SEAM_TOGGLE", {"type": 'U', "value": 'PRESS', "any": True}, None),
|
||||
("MARK_SHARP_TOGGLE", {"type": 'K', "value": 'PRESS', "any": True}, None),
|
||||
|
@@ -2316,7 +2316,7 @@ def km_grease_pencil_stroke_edit_mode(params):
|
||||
("gpencil.copy", {"type": 'C', "value": 'PRESS', "ctrl": True}, None),
|
||||
("gpencil.paste", {"type": 'V', "value": 'PRESS', "ctrl": True}, None),
|
||||
# Snap
|
||||
op_menu("GPENCIL_MT_snap", {"type": 'X', "value": 'PRESS', "shift": True}),
|
||||
op_menu_pie("GPENCIL_MT_snap_pie", {"type": 'X', "value": 'PRESS', "shift": True}),
|
||||
# Show/hide
|
||||
("gpencil.reveal", {"type": 'H', "value": 'PRESS', "alt": True}, None),
|
||||
("gpencil.hide", {"type": 'H', "value": 'PRESS', "ctrl": True},
|
||||
|
@@ -143,10 +143,10 @@ class PlayRenderedAnim(Operator):
|
||||
]
|
||||
cmd.extend(opts)
|
||||
elif preset == 'FRAMECYCLER':
|
||||
opts = [file, f"{scene.frame_start:d}-{scene.frame_end:d}"]
|
||||
opts = [file, "%d-%d" % (scene.frame_start, scene.frame_end)]
|
||||
cmd.extend(opts)
|
||||
elif preset == 'RV':
|
||||
opts = ["-fps", str(rd.fps), "-play", f"[ {file:s} ]"]
|
||||
opts = ["-fps", str(rd.fps), "-play", "[ %s ]" % file]
|
||||
cmd.extend(opts)
|
||||
elif preset == 'MPLAYER':
|
||||
opts = []
|
||||
@@ -156,7 +156,7 @@ class PlayRenderedAnim(Operator):
|
||||
opts += [
|
||||
("mf://" + file.replace("#", "?")),
|
||||
"-mf",
|
||||
f"fps={fps_final:4f}"
|
||||
"fps=%.4f" % fps_final,
|
||||
]
|
||||
|
||||
opts += ["-loop", "0", "-really-quiet", "-fs"]
|
||||
|
@@ -119,8 +119,11 @@ class PREFERENCES_OT_copy_prev(Operator):
|
||||
# Find config folder from previous version.
|
||||
import os
|
||||
version = bpy.app.version
|
||||
version_new = ((version[0] * 100) + version[1])
|
||||
version_old = ((version[0] * 100) + version[1]) - 1
|
||||
while version_old % 10 > 0:
|
||||
# Ensure we only try to copy files from a point release.
|
||||
# The check below ensures the second numbers match.
|
||||
while (version_new % 100) // 10 == (version_old % 100) // 10:
|
||||
version_split = version_old // 100, version_old % 100
|
||||
if os.path.isdir(cls._old_version_path(version_split)):
|
||||
return version_split
|
||||
|
@@ -160,7 +160,12 @@ def extend(obj, EXTEND_MODE):
|
||||
l_b_uv = [l[uv_act].uv for l in l_b]
|
||||
|
||||
if EXTEND_MODE == 'LENGTH_AVERAGE':
|
||||
fac = edge_lengths[l_b[2].edge.index][0] / edge_lengths[l_a[1].edge.index][0]
|
||||
d1 = edge_lengths[l_a[1].edge.index][0]
|
||||
d2 = edge_lengths[l_b[2].edge.index][0]
|
||||
try:
|
||||
fac = d2 / d1
|
||||
except ZeroDivisionError:
|
||||
fac = 1.0
|
||||
elif EXTEND_MODE == 'LENGTH':
|
||||
a0, b0, c0 = l_a[3].vert.co, l_a[0].vert.co, l_b[3].vert.co
|
||||
a1, b1, c1 = l_a[2].vert.co, l_a[1].vert.co, l_b[2].vert.co
|
||||
|
@@ -1450,6 +1450,10 @@ class WM_OT_properties_edit(Operator):
|
||||
)
|
||||
|
||||
layout = self.layout
|
||||
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
layout.prop(self, "property")
|
||||
layout.prop(self, "value")
|
||||
|
||||
@@ -1460,18 +1464,18 @@ class WM_OT_properties_edit(Operator):
|
||||
row.enabled = proptype in {int, float, str}
|
||||
row.prop(self, "default")
|
||||
|
||||
row = layout.row(align=True)
|
||||
row.prop(self, "min")
|
||||
row.prop(self, "max")
|
||||
col = layout.column(align=True)
|
||||
col.prop(self, "min")
|
||||
col.prop(self, "max")
|
||||
|
||||
row = layout.row()
|
||||
row.prop(self, "use_soft_limits")
|
||||
row.prop(self, "is_overridable_library")
|
||||
col = layout.column()
|
||||
col.prop(self, "is_overridable_library")
|
||||
col.prop(self, "use_soft_limits")
|
||||
|
||||
row = layout.row(align=True)
|
||||
row.enabled = self.use_soft_limits
|
||||
row.prop(self, "soft_min", text="Soft Min")
|
||||
row.prop(self, "soft_max", text="Soft Max")
|
||||
col = layout.column(align=True)
|
||||
col.enabled = self.use_soft_limits
|
||||
col.prop(self, "soft_min", text="Soft Min")
|
||||
col.prop(self, "soft_max", text="Max")
|
||||
layout.prop(self, "description")
|
||||
|
||||
if is_array and proptype == float:
|
||||
|
@@ -267,7 +267,7 @@ class BONE_PT_display(BoneButtonsPanel, Panel):
|
||||
if bone:
|
||||
|
||||
col = layout.column()
|
||||
col.prop(bone, "hide", text="Hide", toggle=0)
|
||||
col.prop(bone, "hide", text="Hide", toggle=False)
|
||||
|
||||
|
||||
class BONE_PT_display_custom_shape(BoneButtonsPanel, Panel):
|
||||
|
@@ -346,9 +346,11 @@ class DATA_PT_shape_keys(MeshButtonsPanel, Panel):
|
||||
|
||||
enable_edit = ob.mode != 'EDIT'
|
||||
enable_edit_value = False
|
||||
enable_pin = False
|
||||
|
||||
if ob.show_only_shape_key is False:
|
||||
if enable_edit or (ob.type == 'MESH' and ob.use_shape_key_edit_mode):
|
||||
if enable_edit or (ob.use_shape_key_edit_mode and ob.type == 'MESH'):
|
||||
enable_pin = True
|
||||
if ob.show_only_shape_key:
|
||||
enable_edit_value = True
|
||||
|
||||
row = layout.row()
|
||||
@@ -386,7 +388,7 @@ class DATA_PT_shape_keys(MeshButtonsPanel, Panel):
|
||||
sub = row.row(align=True)
|
||||
sub.label() # XXX, for alignment only
|
||||
subsub = sub.row(align=True)
|
||||
subsub.active = enable_edit_value
|
||||
subsub.active = enable_pin
|
||||
subsub.prop(ob, "show_only_shape_key", text="")
|
||||
sub.prop(ob, "use_shape_key_edit_mode", text="")
|
||||
|
||||
|
@@ -279,6 +279,31 @@ class GPENCIL_MT_snap(Menu):
|
||||
layout.operator("view3d.snap_cursor_to_grid", text="Cursor to Grid")
|
||||
|
||||
|
||||
class GPENCIL_MT_snap_pie(Menu):
|
||||
bl_label = "Snap"
|
||||
|
||||
def draw(self, _context):
|
||||
layout = self.layout
|
||||
pie = layout.menu_pie()
|
||||
|
||||
pie.operator("view3d.snap_cursor_to_grid", text="Cursor to Grid", icon='CURSOR')
|
||||
pie.operator("gpencil.snap_to_grid", text="Selection to Grid", icon='RESTRICT_SELECT_OFF')
|
||||
pie.operator("gpencil.snap_cursor_to_selected", text="Cursor to Selected", icon='CURSOR')
|
||||
pie.operator(
|
||||
"gpencil.snap_to_cursor",
|
||||
text="Selection to Cursor",
|
||||
icon='RESTRICT_SELECT_OFF'
|
||||
).use_offset = False
|
||||
pie.operator(
|
||||
"gpencil.snap_to_cursor",
|
||||
text="Selection to Cursor (Keep Offset)",
|
||||
icon='RESTRICT_SELECT_OFF'
|
||||
).use_offset = True
|
||||
pie.separator()
|
||||
pie.operator("view3d.snap_cursor_to_center", text="Cursor to World Origin", icon='CURSOR')
|
||||
pie.separator()
|
||||
|
||||
|
||||
class GPENCIL_MT_move_to_layer(Menu):
|
||||
bl_label = "Move to Layer"
|
||||
|
||||
@@ -901,6 +926,7 @@ class GreasePencilFlipTintColors(Operator):
|
||||
|
||||
classes = (
|
||||
GPENCIL_MT_snap,
|
||||
GPENCIL_MT_snap_pie,
|
||||
GPENCIL_MT_cleanup,
|
||||
GPENCIL_MT_move_to_layer,
|
||||
GPENCIL_MT_layer_active,
|
||||
|
@@ -212,7 +212,6 @@ class OBJECT_PT_display(ObjectButtonsPanel, Panel):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
|
||||
|
||||
obj = context.object
|
||||
obj_type = obj.type
|
||||
is_geometry = (obj_type in {'MESH', 'CURVE', 'SURFACE', 'META', 'FONT', 'VOLUME', 'HAIR', 'POINTCLOUD'})
|
||||
@@ -237,10 +236,11 @@ class OBJECT_PT_display(ObjectButtonsPanel, Panel):
|
||||
col.prop(obj, "show_in_front", text="In Front")
|
||||
# if obj_type == 'MESH' or is_empty_image:
|
||||
# col.prop(obj, "show_transparent", text="Transparency")
|
||||
sub = layout.column()
|
||||
if is_wire:
|
||||
# wire objects only use the max. display type for duplis
|
||||
col.active = is_dupli
|
||||
col.prop(obj, "display_type", text="Display As")
|
||||
sub.active = is_dupli
|
||||
sub.prop(obj, "display_type", text="Display As")
|
||||
|
||||
if is_geometry or is_dupli or is_empty_image or is_gpencil:
|
||||
# Only useful with object having faces/materials...
|
||||
|
@@ -541,10 +541,15 @@ def brush_settings(layout, context, brush, popover=False):
|
||||
# Sculpt Mode #
|
||||
if mode == 'SCULPT':
|
||||
capabilities = brush.sculpt_capabilities
|
||||
sculpt_tool = brush.sculpt_tool
|
||||
|
||||
# normal_radius_factor
|
||||
layout.prop(brush, "normal_radius_factor", slider=True)
|
||||
layout.prop(brush, "hardness", slider=True)
|
||||
|
||||
row = layout.row(align=True)
|
||||
row.prop(brush, "hardness", slider=True)
|
||||
row.prop(brush, "invert_hardness_pressure", text = "")
|
||||
row.prop(brush, "use_hardness_pressure", text = "")
|
||||
|
||||
# auto_smooth_factor and use_inverse_smooth_pressure
|
||||
if capabilities.has_auto_smooth:
|
||||
@@ -571,7 +576,7 @@ def brush_settings(layout, context, brush, popover=False):
|
||||
# crease_pinch_factor
|
||||
if capabilities.has_pinch_factor:
|
||||
text = "Pinch"
|
||||
if brush.sculpt_tool in {'BLOB', 'SNAKE_HOOK'}:
|
||||
if sculpt_tool in {'BLOB', 'SNAKE_HOOK'}:
|
||||
text = "Magnify"
|
||||
layout.prop(brush, "crease_pinch_factor", slider=True, text=text)
|
||||
|
||||
@@ -621,17 +626,19 @@ def brush_settings(layout, context, brush, popover=False):
|
||||
row.prop(ups, "use_unified_color", text="", icon='BRUSHES_ALL')
|
||||
layout.prop(brush, "blend", text="Blend Mode")
|
||||
|
||||
if brush.sculpt_tool == 'CLAY_STRIPS':
|
||||
# Per sculpt tool options.
|
||||
|
||||
if sculpt_tool == 'CLAY_STRIPS':
|
||||
row = layout.row()
|
||||
row.prop(brush, "tip_roundness")
|
||||
|
||||
if brush.sculpt_tool == 'ELASTIC_DEFORM':
|
||||
elif sculpt_tool == 'ELASTIC_DEFORM':
|
||||
layout.separator()
|
||||
layout.prop(brush, "elastic_deform_type")
|
||||
layout.prop(brush, "elastic_deform_volume_preservation", slider=True)
|
||||
layout.separator()
|
||||
|
||||
if brush.sculpt_tool == 'POSE':
|
||||
elif sculpt_tool == 'POSE':
|
||||
layout.separator()
|
||||
layout.prop(brush, "pose_deform_type")
|
||||
layout.prop(brush, "pose_origin_type")
|
||||
@@ -643,10 +650,9 @@ def brush_settings(layout, context, brush, popover=False):
|
||||
layout.prop(brush, "use_connected_only")
|
||||
layout.prop(brush, "disconnected_distance_max")
|
||||
|
||||
|
||||
layout.separator()
|
||||
|
||||
if brush.sculpt_tool == 'CLOTH':
|
||||
elif sculpt_tool == 'CLOTH':
|
||||
layout.separator()
|
||||
layout.prop(brush, "cloth_sim_limit")
|
||||
layout.prop(brush, "cloth_sim_falloff")
|
||||
@@ -658,41 +664,63 @@ def brush_settings(layout, context, brush, popover=False):
|
||||
layout.prop(brush, "cloth_damping")
|
||||
layout.separator()
|
||||
|
||||
if brush.sculpt_tool == 'SCRAPE':
|
||||
elif sculpt_tool == 'SCRAPE':
|
||||
row = layout.row()
|
||||
row.prop(brush, "area_radius_factor", slider=True)
|
||||
row = layout.row()
|
||||
row.prop(brush, "invert_to_scrape_fill", text="Invert to Fill")
|
||||
|
||||
if brush.sculpt_tool == 'FILL':
|
||||
elif sculpt_tool == 'FILL':
|
||||
row = layout.row()
|
||||
row.prop(brush, "area_radius_factor", slider=True)
|
||||
row = layout.row()
|
||||
row.prop(brush, "invert_to_scrape_fill", text="Invert to Scrape")
|
||||
|
||||
if brush.sculpt_tool == 'GRAB':
|
||||
elif sculpt_tool == 'GRAB':
|
||||
layout.prop(brush, "use_grab_active_vertex")
|
||||
|
||||
if brush.sculpt_tool == 'PAINT':
|
||||
col = layout.column()
|
||||
col.prop(brush, "flow")
|
||||
col.prop(brush, "wet_mix")
|
||||
col.prop(brush, "wet_persistence")
|
||||
col.prop(brush, "density")
|
||||
col.prop(brush, "tip_roundness")
|
||||
col.prop(brush, "tip_scale_x")
|
||||
elif sculpt_tool == 'PAINT':
|
||||
row = layout.row(align=True)
|
||||
row.prop(brush, "flow")
|
||||
row.prop(brush, "invert_flow_pressure", text = "")
|
||||
row.prop(brush, "use_flow_pressure", text= "")
|
||||
|
||||
if brush.sculpt_tool == 'SMEAR':
|
||||
row = layout.row(align=True)
|
||||
row.prop(brush, "wet_mix")
|
||||
row.prop(brush, "invert_wet_mix_pressure", text = "")
|
||||
row.prop(brush, "use_wet_mix_pressure", text = "")
|
||||
|
||||
row = layout.row(align=True)
|
||||
row.prop(brush, "wet_persistence")
|
||||
row.prop(brush, "invert_wet_persistence_pressure", text ="")
|
||||
row.prop(brush, "use_wet_persistence_pressure", text= "")
|
||||
|
||||
row = layout.row(align=True)
|
||||
row.prop(brush, "density")
|
||||
row.prop(brush, "invert_density_pressure", text = "")
|
||||
row.prop(brush, "use_density_pressure", text = "")
|
||||
|
||||
row = layout.row()
|
||||
row.prop(brush, "tip_roundness")
|
||||
|
||||
row = layout.row()
|
||||
row.prop(brush, "tip_scale_x")
|
||||
|
||||
elif sculpt_tool == 'SMEAR':
|
||||
col = layout.column()
|
||||
col.prop(brush, "smear_deform_type")
|
||||
|
||||
if brush.sculpt_tool == 'MULTIPLANE_SCRAPE':
|
||||
elif sculpt_tool == 'TOPOLOGY':
|
||||
col = layout.column()
|
||||
col.prop(brush, "slide_deform_type")
|
||||
|
||||
elif sculpt_tool == 'MULTIPLANE_SCRAPE':
|
||||
col = layout.column()
|
||||
col.prop(brush, "multiplane_scrape_angle")
|
||||
col.prop(brush, "use_multiplane_scrape_dynamic")
|
||||
col.prop(brush, "show_multiplane_scrape_planes_preview")
|
||||
|
||||
if brush.sculpt_tool == 'SMOOTH':
|
||||
elif sculpt_tool == 'SMOOTH':
|
||||
col = layout.column()
|
||||
col.prop(brush, "smooth_deform_type")
|
||||
if brush.smooth_deform_type == 'SURFACE':
|
||||
@@ -700,9 +728,11 @@ def brush_settings(layout, context, brush, popover=False):
|
||||
col.prop(brush, "surface_smooth_current_vertex")
|
||||
col.prop(brush, "surface_smooth_iterations")
|
||||
|
||||
if brush.sculpt_tool == 'MASK':
|
||||
elif sculpt_tool == 'MASK':
|
||||
layout.row().prop(brush, "mask_tool", expand=True)
|
||||
|
||||
# End sculpt_tool interface.
|
||||
|
||||
# 3D and 2D Texture Paint Mode.
|
||||
elif mode in {'PAINT_TEXTURE', 'PAINT_2D'}:
|
||||
capabilities = brush.image_paint_capabilities
|
||||
|
@@ -1238,7 +1238,7 @@ class CLIP_MT_view_zoom(Menu):
|
||||
|
||||
layout.operator(
|
||||
"clip.view_zoom_ratio",
|
||||
text=iface_(f"Zoom {a:d}:{b:d}"),
|
||||
text=iface_("Zoom %d:%d") % (a, b),
|
||||
translate=False,
|
||||
).ratio = a / b
|
||||
|
||||
|
@@ -97,16 +97,14 @@ class FILEBROWSER_PT_filter(Panel):
|
||||
params = space.params
|
||||
is_lib_browser = params.use_library_browsing
|
||||
|
||||
row = layout.row(align=True)
|
||||
row.prop(params, "use_filter", text="", toggle=0)
|
||||
row.label(text="Filter")
|
||||
layout.prop(params, "use_filter", text="Filter", toggle=False)
|
||||
|
||||
col = layout.column()
|
||||
col.active = params.use_filter
|
||||
|
||||
row = col.row()
|
||||
row.label(icon='FILE_FOLDER')
|
||||
row.prop(params, "use_filter_folder", text="Folders", toggle=0)
|
||||
row.prop(params, "use_filter_folder", text="Folders", toggle=False)
|
||||
|
||||
if params.filter_glob:
|
||||
col.label(text=params.filter_glob)
|
||||
@@ -114,33 +112,33 @@ class FILEBROWSER_PT_filter(Panel):
|
||||
row = col.row()
|
||||
row.label(icon='FILE_BLEND')
|
||||
row.prop(params, "use_filter_blender",
|
||||
text=".blend Files", toggle=0)
|
||||
text=".blend Files", toggle=False)
|
||||
row = col.row()
|
||||
row.label(icon='FILE_BACKUP')
|
||||
row.prop(params, "use_filter_backup",
|
||||
text="Backup .blend Files", toggle=0)
|
||||
text="Backup .blend Files", toggle=False)
|
||||
row = col.row()
|
||||
row.label(icon='FILE_IMAGE')
|
||||
row.prop(params, "use_filter_image", text="Image Files", toggle=0)
|
||||
row.prop(params, "use_filter_image", text="Image Files", toggle=False)
|
||||
row = col.row()
|
||||
row.label(icon='FILE_MOVIE')
|
||||
row.prop(params, "use_filter_movie", text="Movie Files", toggle=0)
|
||||
row.prop(params, "use_filter_movie", text="Movie Files", toggle=False)
|
||||
row = col.row()
|
||||
row.label(icon='FILE_SCRIPT')
|
||||
row.prop(params, "use_filter_script",
|
||||
text="Script Files", toggle=0)
|
||||
text="Script Files", toggle=False)
|
||||
row = col.row()
|
||||
row.label(icon='FILE_FONT')
|
||||
row.prop(params, "use_filter_font", text="Font Files", toggle=0)
|
||||
row.prop(params, "use_filter_font", text="Font Files", toggle=False)
|
||||
row = col.row()
|
||||
row.label(icon='FILE_SOUND')
|
||||
row.prop(params, "use_filter_sound", text="Sound Files", toggle=0)
|
||||
row.prop(params, "use_filter_sound", text="Sound Files", toggle=False)
|
||||
row = col.row()
|
||||
row.label(icon='FILE_TEXT')
|
||||
row.prop(params, "use_filter_text", text="Text Files", toggle=0)
|
||||
row.prop(params, "use_filter_text", text="Text Files", toggle=False)
|
||||
row = col.row()
|
||||
row.label(icon='FILE_VOLUME')
|
||||
row.prop(params, "use_filter_volume", text="Volume Files", toggle=0)
|
||||
row.prop(params, "use_filter_volume", text="Volume Files", toggle=False)
|
||||
|
||||
col.separator()
|
||||
|
||||
@@ -148,7 +146,7 @@ class FILEBROWSER_PT_filter(Panel):
|
||||
row = col.row()
|
||||
row.label(icon='BLANK1') # Indentation
|
||||
row.prop(params, "use_filter_blendid",
|
||||
text="Blender IDs", toggle=0)
|
||||
text="Blender IDs", toggle=False)
|
||||
if params.use_filter_blendid:
|
||||
row = col.row()
|
||||
row.label(icon='BLANK1') # Indentation
|
||||
|
@@ -699,7 +699,12 @@ class IMAGE_HT_header(Header):
|
||||
|
||||
# Proportional Editing
|
||||
row = layout.row(align=True)
|
||||
row.prop(tool_settings, "use_proportional_edit", icon_only=True)
|
||||
row.prop(
|
||||
tool_settings,
|
||||
"use_proportional_edit",
|
||||
icon_only=True,
|
||||
icon='PROP_CON' if tool_settings.use_proportional_connected else 'PROP_ON',
|
||||
)
|
||||
sub = row.row(align=True)
|
||||
sub.active = tool_settings.use_proportional_edit
|
||||
sub.prop_with_popover(
|
||||
|
@@ -31,17 +31,20 @@ class STATUSBAR_HT_header(Header):
|
||||
|
||||
layout.separator_spacer()
|
||||
|
||||
# messages
|
||||
layout.template_reports_banner()
|
||||
layout.template_running_jobs()
|
||||
|
||||
# Nothing in the center.
|
||||
layout.separator_spacer()
|
||||
|
||||
# stats
|
||||
scene = context.scene
|
||||
view_layer = context.view_layer
|
||||
row = layout.row()
|
||||
row.alignment = 'RIGHT'
|
||||
|
||||
layout.label(text=scene.statistics(view_layer), translate=False)
|
||||
# Stats & Info
|
||||
row.label(text=context.screen.statusbar_info(), translate=False)
|
||||
|
||||
# Messages
|
||||
row.template_reports_banner()
|
||||
|
||||
# Progress Bar
|
||||
row.template_running_jobs()
|
||||
|
||||
|
||||
classes = (
|
||||
|
@@ -271,19 +271,24 @@ class ToolSelectPanelHelper:
|
||||
yield item, i
|
||||
i += 1
|
||||
|
||||
# Special internal function, gives use items that contain keymaps.
|
||||
@staticmethod
|
||||
def _tools_flatten_with_keymap(tools):
|
||||
def _tools_flatten_with_dynamic(tools, *, context):
|
||||
"""
|
||||
Expands dynamic items, indices aren't aligned with other flatten functions.
|
||||
The context may be None, use as signal to return all items.
|
||||
"""
|
||||
for item_parent in tools:
|
||||
if item_parent is None:
|
||||
continue
|
||||
yield None
|
||||
for item in item_parent if (type(item_parent) is tuple) else (item_parent,):
|
||||
# skip None or generator function
|
||||
if item is None or _item_is_fn(item):
|
||||
continue
|
||||
if item.keymap is not None:
|
||||
if item is None:
|
||||
yield None
|
||||
elif _item_is_fn(item):
|
||||
yield from ToolSelectPanelHelper._tools_flatten_with_dynamic(item(context), context=context)
|
||||
else:
|
||||
yield item
|
||||
|
||||
|
||||
@classmethod
|
||||
def _tool_get_active(cls, context, space_type, mode, with_icon=False):
|
||||
"""
|
||||
@@ -484,8 +489,12 @@ class ToolSelectPanelHelper:
|
||||
else:
|
||||
context_descr = context_mode.replace("_", " ").title()
|
||||
|
||||
for item in cls._tools_flatten_with_keymap(tools):
|
||||
for item in cls._tools_flatten_with_dynamic(tools, context=None):
|
||||
if item is None:
|
||||
continue
|
||||
keymap_data = item.keymap
|
||||
if keymap_data is None:
|
||||
continue
|
||||
if callable(keymap_data[0]):
|
||||
cls._km_action_simple(kc_default, kc_default, context_descr, item.label, keymap_data)
|
||||
|
||||
@@ -498,8 +507,13 @@ class ToolSelectPanelHelper:
|
||||
|
||||
for context_mode_test, tools in cls.tools_all():
|
||||
if context_mode_test == context_mode:
|
||||
for item in cls._tools_flatten_with_keymap(tools):
|
||||
km_name = item.keymap[0]
|
||||
for item in cls._tools_flatten(tools):
|
||||
if item is None:
|
||||
continue
|
||||
keymap_data = item.keymap
|
||||
if keymap_data is None:
|
||||
continue
|
||||
km_name = keymap_data[0]
|
||||
# print((km.name, cls.bl_space_type, 'WINDOW', []))
|
||||
|
||||
if km_name in visited:
|
||||
|
@@ -455,6 +455,11 @@ class _defs_view3d_add:
|
||||
row = layout.row()
|
||||
row.prop(props, "plane_axis", text="")
|
||||
row = layout.row()
|
||||
row.scale_x = 0.8
|
||||
row.label(text="Orientation:")
|
||||
row = layout.row()
|
||||
row.prop(props, "plane_orientation", text="")
|
||||
row = layout.row()
|
||||
row.scale_x = 0.7
|
||||
row.prop(props, "plane_origin")
|
||||
|
||||
@@ -744,18 +749,22 @@ class _defs_edit_mesh:
|
||||
|
||||
region_is_header = context.region.type == 'TOOL_HEADER'
|
||||
|
||||
edge_bevel = props.affect == 'EDGES'
|
||||
|
||||
if not extra:
|
||||
if region_is_header:
|
||||
layout.prop(props, "offset_type", text="")
|
||||
else:
|
||||
layout.row().prop(props, "affect", expand=True)
|
||||
layout.separator()
|
||||
layout.prop(props, "offset_type")
|
||||
|
||||
layout.prop(props, "segments")
|
||||
|
||||
row = layout.row()
|
||||
row.prop(props, "profile_type", text="" if region_is_header else None)
|
||||
if props.profile_type == 'SUPERELLIPSE':
|
||||
layout.prop(props, "profile", text="Shape", slider=True)
|
||||
if region_is_header:
|
||||
layout.prop(props, "affect", text="")
|
||||
|
||||
layout.prop(props, "profile", text="Shape", slider=True)
|
||||
|
||||
if region_is_header:
|
||||
layout.popover("TOPBAR_PT_tool_settings_extra", text="...")
|
||||
@@ -766,25 +775,35 @@ class _defs_edit_mesh:
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
if props.profile_type == 'CUSTOM':
|
||||
layout.prop(props, "profile", text="Miter Shape", slider=True)
|
||||
layout.prop(props, "material")
|
||||
|
||||
col = layout.column()
|
||||
col.prop(props, "vertex_only")
|
||||
col.prop(props, "harden_normals")
|
||||
col.prop(props, "clamp_overlap")
|
||||
col.prop(props, "loop_slide")
|
||||
col.prop(props, "harden_normals")
|
||||
|
||||
col = layout.column(heading="Mark")
|
||||
col.active = edge_bevel
|
||||
col.prop(props, "mark_seam", text="Seam")
|
||||
col.prop(props, "mark_sharp", text="Sharp")
|
||||
|
||||
layout.prop(props, "material")
|
||||
|
||||
layout.prop(props, "miter_outer", text="Outer Miter")
|
||||
layout.prop(props, "miter_inner", text="Inner Miter")
|
||||
col = layout.column()
|
||||
col.active = edge_bevel
|
||||
col.prop(props, "miter_outer", text="Miter Outer")
|
||||
col.prop(props, "miter_inner", text="Inner")
|
||||
if props.miter_inner == 'ARC':
|
||||
layout.prop(props, "spread")
|
||||
col.prop(props, "spread")
|
||||
|
||||
layout.separator()
|
||||
|
||||
col = layout.column()
|
||||
col.active = edge_bevel
|
||||
col.prop(props, "vmesh_method", text="Intersections")
|
||||
|
||||
layout.prop(props, "face_strength_mode", text="Face Strength")
|
||||
|
||||
layout.prop(props, "profile_type")
|
||||
|
||||
if props.profile_type == 'CUSTOM':
|
||||
tool_settings = context.tool_settings
|
||||
@@ -2496,15 +2515,17 @@ class VIEW3D_PT_tools_active(ToolSelectPanelHelper, Panel):
|
||||
_defs_sculpt.cloth_filter,
|
||||
lambda context: (
|
||||
(_defs_sculpt.color_filter,)
|
||||
if bpy.context.preferences.view.show_developer_ui and \
|
||||
bpy.context.preferences.experimental.use_sculpt_vertex_colors
|
||||
if context is None or (
|
||||
context.preferences.view.show_developer_ui and
|
||||
context.preferences.experimental.use_sculpt_vertex_colors)
|
||||
else ()
|
||||
),
|
||||
None,
|
||||
lambda context: (
|
||||
(_defs_sculpt.mask_by_color,)
|
||||
if bpy.context.preferences.view.show_developer_ui and \
|
||||
bpy.context.preferences.experimental.use_sculpt_vertex_colors
|
||||
if context is None or (
|
||||
context.preferences.view.show_developer_ui and
|
||||
context.preferences.experimental.use_sculpt_vertex_colors)
|
||||
else ()
|
||||
),
|
||||
None,
|
||||
|
@@ -283,6 +283,22 @@ class USERPREF_PT_interface_temporary_windows(InterfacePanel, CenterAlignMixIn,
|
||||
col.prop(view, "filebrowser_display_type", text="File Browser")
|
||||
|
||||
|
||||
class USERPREF_PT_interface_statusbar(InterfacePanel, CenterAlignMixIn, Panel):
|
||||
bl_label = "Status Bar"
|
||||
bl_parent_id = "USERPREF_PT_interface_editors"
|
||||
bl_options = {'DEFAULT_CLOSED'}
|
||||
|
||||
def draw_centered(self, context, layout):
|
||||
prefs = context.preferences
|
||||
view = prefs.view
|
||||
|
||||
col = layout.column(heading="Show")
|
||||
col.prop(view, "show_statusbar_stats", text="Scene Statistics")
|
||||
col.prop(view, "show_statusbar_memory", text="System Memory")
|
||||
col.prop(view, "show_statusbar_vram", text="Video Memory")
|
||||
col.prop(view, "show_statusbar_version", text="Blender Version")
|
||||
|
||||
|
||||
class USERPREF_PT_interface_menus(InterfacePanel, Panel):
|
||||
bl_label = "Menus"
|
||||
bl_options = {'DEFAULT_CLOSED'}
|
||||
@@ -994,7 +1010,7 @@ class USERPREF_PT_theme_bone_color_sets(ThemePanel, CenterAlignMixIn, Panel):
|
||||
layout.use_property_split = True
|
||||
|
||||
for i, ui in enumerate(theme.bone_color_sets, 1):
|
||||
layout.label(text=iface_(f"Color Set {i:d}"), translate=False)
|
||||
layout.label(text=iface_("Color Set %d") % i, translate=False)
|
||||
|
||||
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=False)
|
||||
|
||||
@@ -2189,6 +2205,7 @@ classes = (
|
||||
USERPREF_PT_interface_display,
|
||||
USERPREF_PT_interface_editors,
|
||||
USERPREF_PT_interface_temporary_windows,
|
||||
USERPREF_PT_interface_statusbar,
|
||||
USERPREF_PT_interface_translation,
|
||||
USERPREF_PT_interface_text,
|
||||
USERPREF_PT_interface_menus,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user