Compare commits
268 Commits
temp-geome
...
geometry-n
Author | SHA1 | Date | |
---|---|---|---|
abd2ee4a8d | |||
98b1e09d06 | |||
4bdb67d5c0 | |||
62953c2bf3 | |||
0e757da3b5 | |||
200cbb6adf | |||
c61c9f61c6 | |||
9acba6f716 | |||
81fb9720fe | |||
96feddd85f | |||
043926bfd2 | |||
ae4afcfbee | |||
e0c199a527 | |||
d6354a7ee2 | |||
e7ec68cd46 | |||
e902379764 | |||
be3bb4da03 | |||
6505f9c09a | |||
d9b8ddd883 | |||
f58d9d7500 | |||
920edc358c | |||
390c4efa0c | |||
2f0f269684 | |||
267ba329ec | |||
bce42fbfe4 | |||
59414e6a6f | |||
4eef1d2edc | |||
f14c9444c8 | |||
399e5cc069 | |||
5ae666c2cc | |||
b9525e3279 | |||
1bb2c7dda3 | |||
a0f41a4fd1 | |||
7aa47cd6ff | |||
1ecc2b305d | |||
f7d9ec6a40 | |||
7c4c6809b9 | |||
69f4088ba1 | |||
eb9402996e | |||
a7680e2bcf | |||
bc3b48400d | |||
659ef4938b | |||
683041fd48 | |||
39ee0a5484 | |||
45e2c12124 | |||
c19b363256 | |||
abe38ee033 | |||
d40126d636 | |||
1bb864e40a | |||
0d97c0dfa6 | |||
76d78c9212 | |||
d1eec74c56 | |||
ac257864a6 | |||
6d0e92cf0a | |||
881e148cc5 | |||
4a647ee188 | |||
622b7fff15 | |||
6c241b2dd7 | |||
c1b655c06a | |||
b4f3c3cca0 | |||
e3f8ffc482 | |||
559999b2f5 | |||
257725f829 | |||
b356288a75 | |||
d344d25b7b | |||
8742bbd607 | |||
3576e669cb | |||
044def82b9 | |||
92d12741a5 | |||
41a74159c3 | |||
8a2b329a21 | |||
268508d2b0 | |||
ce08a2bd48 | |||
7585f55a4c | |||
49f1c8ebec | |||
9831362243 | |||
710842cd1d | |||
433fcbec1a | |||
ef1ee262ad | |||
72386c64ef | |||
e68e897077 | |||
52c6d36e4e | |||
75a92d451e | |||
d84bf961f8 | |||
02d61faab0 | |||
7f4eb529c4 | |||
18e768c78e | |||
26c7be27b7 | |||
e72b235fce | |||
8aea0e8ac2 | |||
2ce1b89d75 | |||
0649131be7 | |||
1bebd12eaf | |||
797c8f6d75 | |||
1a67617631 | |||
c2359ff4aa | |||
763ebc3156 | |||
f7d9bfafd9 | |||
088e6a5282 | |||
9798d68cc1 | |||
323335f076 | |||
70d493e5ee | |||
87366baaed | |||
8e1d7d9e8c | |||
![]() |
baa79dc805 | ||
3736a7cadd | |||
66ea03e908 | |||
2781e51cda | |||
1f2ce121a3 | |||
9524019d58 | |||
61e0f22945 | |||
b7d7051dc3 | |||
e0516d4d8e | |||
097e1ca1a1 | |||
dc628e7c38 | |||
bb960c85a5 | |||
eb25446b9d | |||
0c7df205a5 | |||
0f1fe96e0b | |||
417924d840 | |||
a581367a2b | |||
00928f7826 | |||
91971093bb | |||
96beb711e3 | |||
7e930f4e8b | |||
aa780a4c17 | |||
5c3496f3cd | |||
425b15e23f | |||
5c2730f64d | |||
866a56f76e | |||
63a286e4ce | |||
15c6390960 | |||
083cde0b43 | |||
0783a9a194 | |||
b05f841c3a | |||
4db4177587 | |||
c0f3d31998 | |||
b26cbb5d53 | |||
52e3608fe9 | |||
6ba6e97407 | |||
4913b624d8 | |||
96ce1e9a95 | |||
a3efa1d798 | |||
e50553c61a | |||
b458ea6b23 | |||
056d7bb175 | |||
![]() |
9769cf1ee6 | ||
4a2734a835 | |||
1ddd717803 | |||
0066e59f2d | |||
ec7fffb033 | |||
956cf9a48d | |||
c27095b5cb | |||
![]() |
8ef8cb7e34 | ||
0f6bee8e54 | |||
3093f89498 | |||
f211030344 | |||
b277025d8e | |||
94572a4e30 | |||
bc2230df71 | |||
caa942b033 | |||
5dff952b67 | |||
5877e34eb4 | |||
770bcfac9b | |||
b081108819 | |||
2be7b2aaf9 | |||
912b38001f | |||
c4352f44bc | |||
0feca5f07d | |||
2984fb2b49 | |||
9e6553c0d4 | |||
107a0894cc | |||
8ecc1bea4c | |||
91ad33ef8f | |||
fa5190e742 | |||
74ed591f62 | |||
3d5efb4335 | |||
![]() |
140b7cfe0d | ||
af7cc3f8bb | |||
d2c4af9865 | |||
e5c637f5fe | |||
11d12d543d | |||
2c6114b238 | |||
152bd43ae1 | |||
45012c3309 | |||
5177c35498 | |||
49c8042f29 | |||
![]() |
f8c52590c0 | ||
0b897e0308 | |||
c34438dd78 | |||
e0a4dc9396 | |||
51fa44522f | |||
343e13f464 | |||
1103809fab | |||
5b89d49b0d | |||
e805bfc768 | |||
67cb4fdbdc | |||
8ed74c9daf | |||
d7cb25e028 | |||
e3395093ff | |||
f73a420e5a | |||
b8b240b329 | |||
ae2827bb52 | |||
bb97284f8a | |||
209e82da45 | |||
e63d43e6b9 | |||
802ba35654 | |||
79c9c11b35 | |||
e1d0ab2dbc | |||
687f994251 | |||
5c7767e0e0 | |||
af9ee8e2bc | |||
562f2e604a | |||
84e81b8cc3 | |||
5ae9527770 | |||
082c17a2d2 | |||
09677d737c | |||
bec7248178 | |||
0103cb2051 | |||
9f38ebefcb | |||
8f934852f0 | |||
cb16db7803 | |||
f8965f12a1 | |||
7f2a20f89b | |||
5566818603 | |||
9a06c6a040 | |||
e097116072 | |||
b844caca6d | |||
046ac5fccd | |||
ef4fbba596 | |||
aa8360d0fa | |||
76c356c12e | |||
e04491073d | |||
a5dda5996e | |||
c48e4b7a44 | |||
4ae2d6206a | |||
47f5a635da | |||
eb8574bc8b | |||
bc4e31afb6 | |||
9d7672be71 | |||
994e7178bb | |||
1719743066 | |||
8910033f57 | |||
2a4c6c612a | |||
b062b922f9 | |||
895f4620a0 | |||
fafed6234b | |||
7ff8094a8b | |||
5aabf67a9c | |||
ab8c7fe946 | |||
a05012d500 | |||
97a93566e9 | |||
da4d697772 | |||
87218899be | |||
ffa0a6df9d | |||
706fa5ad76 | |||
b7f6de490d | |||
7e485b4620 | |||
a7dba81aab | |||
4606e83a75 | |||
1d28de57a4 | |||
3cfcfb938d | |||
bcdc6910a0 | |||
7793e8c884 | |||
05d9bd7c4a | |||
9255ce9247 | |||
a0ce0154e7 | |||
0cd7f7ddd1 |
@@ -10,9 +10,8 @@
|
||||
# Changes that belong here:
|
||||
# - Massive comment, doxy-sections, or spelling corrections.
|
||||
# - Clang-format, PEP8 or other automated changes which are *strictly* "no functional change".
|
||||
# - Several commits should be added to this list at once, because adding
|
||||
# one extra commit (to edit this file) after every cleanup is noisy.
|
||||
# - No clang-tidy changes.
|
||||
# - Several smaller commits should be added to this list at once, because adding
|
||||
# one extra commit (to edit this file) after every small cleanup is noisy.
|
||||
#
|
||||
# Note:
|
||||
# - The comment above the SHA should be the first line of the commit.
|
||||
@@ -93,12 +92,78 @@ c42a6b77b52560d257279de2cb624b4ef2c0d24c
|
||||
# Cleanup: use doxy sections for imbuf
|
||||
c207f7c22e1439e0b285fba5d2c072bdae23f981
|
||||
|
||||
# Cleanup: Clang-Tidy, modernize-use-bool-literals
|
||||
af35ada2f3fa8da4d46b3a71de724d353d716820
|
||||
|
||||
# Cleanup: Use nullptr everywhere in fluid code
|
||||
311031ecd03dbfbf43e1df672a395f24b2e7d4d3
|
||||
|
||||
# Cleanup: Clang-Tidy, modernize-redundant-void-arg
|
||||
a331d5c99299c4514ca33c843b1c79b872f2728d
|
||||
|
||||
# Cleanup: Clang-Tidy modernize-use-nullptr
|
||||
16732def37c5a66f3ea28dbe247b09cc6bca6677
|
||||
|
||||
# Cleanup: Clang-tidy, modernize-concat-nested-namespaces
|
||||
4525049aa0cf818f6483dce589ac9791eb562338
|
||||
|
||||
# Cleanup: Clang-tidy else-after-return
|
||||
ae342ed4511cf2e144dcd27ce2c635d3d536f9ad
|
||||
|
||||
# Cleanup: Clang-Tidy, readability-redundant-member-init
|
||||
190170d4cc92ff34abe1744a10474ac4f1074086
|
||||
|
||||
# Cleanup: use 'filepath' instead of 'name' for ImBuf utilities
|
||||
99f56b4c16323f96c0cbf54e392fb509fcac5bda
|
||||
|
||||
# Cleanup: clang-format
|
||||
c4d8f6a4a8ddc29ed27311ed7578b3c8c31399d2
|
||||
b5d310b569e07a937798a2d38539cfd290149f1c
|
||||
8c846cccd6bdfd3e90a695fabbf05f53e5466a57
|
||||
4eac03d821fa17546f562485f7d073813a5e5943
|
||||
1166110a9d66af9c5a47cee2be591f50fdc445e8
|
||||
|
||||
# Cleanup: clang-format.
|
||||
40d4a4cb1a6b4c3c2a486e8f2868f547530e0811
|
||||
4eac03d821fa17546f562485f7d073813a5e5943
|
||||
|
||||
# Cleanup: use preprocessor version check for PyTypeObject declaration
|
||||
cd9acfed4f7674b84be965d469a367aef96f8af3
|
||||
|
||||
# Cycles: fix compilation of OSL shaders following API change
|
||||
b980cd163a9d5d77eeffc2e353333e739fa9e719
|
||||
|
||||
# Cleanup: clang-tidy suppress warnings for PyTypeObject.tp_print
|
||||
efd71aad4f22ec0073d80b8dd296015d3f395aa8
|
||||
|
||||
# Cleanup: fix wrong merge, remove extra unique_ptr.
|
||||
6507449e54a167c63a72229e4d0119dd2af68ae5
|
||||
|
||||
# Cleanup: fix some clang tidy issues
|
||||
525a042c5c7513c41240b118acca002f6c60cc12
|
||||
|
||||
# Fix T82520: error building freestyle with Python3.8
|
||||
e118426e4695a97d67e65d69677f3c4e2db50a56
|
||||
|
||||
# Cleanup: Clang-tidy, readability-else-after-return
|
||||
7be47dadea5066ae095c644e0b4f1f10d75f5ab3
|
||||
|
||||
# Cleanup: Add `r_` to return parameter
|
||||
45dca05b1cd2a5ead59144c93d790fdfe7c35ee6
|
||||
|
||||
# Cleanup: Typo in `print_default_info` function name.
|
||||
41a73909dec716642f044e60b40a28335c9fdb10
|
||||
|
||||
# Cleanup: Reduce indentation
|
||||
1cc3a0e2cf73a5ff4f9e0a7f5338eda77266b300
|
||||
|
||||
# Build-system: Force C linkage for all DNA type headers
|
||||
ad4b7741dba45a2be210942c18af6b6e4438f129
|
||||
|
||||
# Cleanup: Move function to proper section
|
||||
c126e27cdc8b28365a9d5f9fafc4d521d1eb83df
|
||||
|
||||
# Cleanup: remove break after return statements
|
||||
bbdfeb751e16d939482d2e4b95c4d470f53f18a5
|
||||
|
||||
# Cleanup: clang-tidy
|
||||
af013ff76feef7e8b8ba642279c62a5dc275d59f
|
||||
|
||||
# Cleanup: Make panel type flag names more clear
|
||||
9d28353b525ecfbcca1501be72e4276dfb2bbc2a
|
||||
|
@@ -205,7 +205,6 @@ option(WITH_OPENVDB_BLOSC "Enable blosc compression for OpenVDB, only enable if
|
||||
option(WITH_OPENVDB_3_ABI_COMPATIBLE "Assume OpenVDB library has been compiled with version 3 ABI compatibility" OFF)
|
||||
mark_as_advanced(WITH_OPENVDB_3_ABI_COMPATIBLE)
|
||||
option(WITH_NANOVDB "Enable usage of NanoVDB data structure for rendering on the GPU" ON)
|
||||
option(WITH_HARU "Enable features relying on Libharu (Grease pencil PDF export)" ON)
|
||||
|
||||
# GHOST Windowing Library Options
|
||||
option(WITH_GHOST_DEBUG "Enable debugging output for the GHOST library" OFF)
|
||||
@@ -348,21 +347,16 @@ if(UNIX AND NOT APPLE)
|
||||
endif()
|
||||
|
||||
option(WITH_PYTHON_INSTALL "Copy system python into the blender install folder" ON)
|
||||
|
||||
if((WITH_AUDASPACE AND NOT WITH_SYSTEM_AUDASPACE) OR WITH_MOD_FLUID)
|
||||
option(WITH_PYTHON_NUMPY "Include NumPy in Blender (used by Audaspace and Mantaflow)" ON)
|
||||
endif()
|
||||
|
||||
if(WIN32 OR APPLE)
|
||||
# Windows and macOS have this bundled with Python libraries.
|
||||
elseif(WITH_PYTHON_INSTALL OR WITH_PYTHON_NUMPY)
|
||||
elseif(WITH_PYTHON_INSTALL OR (WITH_AUDASPACE AND NOT WITH_SYSTEM_AUDASPACE))
|
||||
set(PYTHON_NUMPY_PATH "" CACHE PATH "Path to python site-packages or dist-packages containing 'numpy' module")
|
||||
mark_as_advanced(PYTHON_NUMPY_PATH)
|
||||
set(PYTHON_NUMPY_INCLUDE_DIRS "" CACHE PATH "Path to the include directory of the NumPy module")
|
||||
set(PYTHON_NUMPY_INCLUDE_DIRS ${PYTHON_NUMPY_PATH}/numpy/core/include CACHE PATH "Path to the include directory of the numpy module")
|
||||
mark_as_advanced(PYTHON_NUMPY_INCLUDE_DIRS)
|
||||
endif()
|
||||
if(WITH_PYTHON_INSTALL)
|
||||
option(WITH_PYTHON_INSTALL_NUMPY "Copy system NumPy into the blender install folder" ON)
|
||||
option(WITH_PYTHON_INSTALL_NUMPY "Copy system numpy into the blender install folder" ON)
|
||||
|
||||
if(UNIX AND NOT APPLE)
|
||||
option(WITH_PYTHON_INSTALL_REQUESTS "Copy system requests into the blender install folder" ON)
|
||||
@@ -611,11 +605,6 @@ if(WIN32)
|
||||
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
# See WITH_WINDOWS_SCCACHE for Windows.
|
||||
option(WITH_COMPILER_CCACHE "Use ccache to improve rebuild times (Works with Ninja, Makefiles and Xcode)" OFF)
|
||||
endif()
|
||||
|
||||
# The following only works with the Ninja generator in CMake >= 3.0.
|
||||
if("${CMAKE_GENERATOR}" MATCHES "Ninja")
|
||||
option(WITH_NINJA_POOL_JOBS
|
||||
@@ -731,9 +720,6 @@ set_and_warn_dependency(WITH_OPENVDB WITH_NANOVDB OFF)
|
||||
# OpenVDB uses 'half' type from OpenEXR & fails to link without OpenEXR enabled.
|
||||
set_and_warn_dependency(WITH_IMAGE_OPENEXR WITH_OPENVDB OFF)
|
||||
|
||||
# Haru needs `TIFFFaxBlackCodes` & `TIFFFaxWhiteCodes` symbols from TIFF.
|
||||
set_and_warn_dependency(WITH_IMAGE_TIFF WITH_HARU OFF)
|
||||
|
||||
# auto enable openimageio for cycles
|
||||
if(WITH_CYCLES)
|
||||
set(WITH_OPENIMAGEIO ON)
|
||||
@@ -1635,16 +1621,19 @@ if(WITH_PYTHON)
|
||||
|
||||
if(WIN32 OR APPLE)
|
||||
# Windows and macOS have this bundled with Python libraries.
|
||||
elseif((WITH_PYTHON_INSTALL AND WITH_PYTHON_INSTALL_NUMPY) OR WITH_PYTHON_NUMPY)
|
||||
elseif((WITH_PYTHON_INSTALL AND WITH_PYTHON_INSTALL_NUMPY) OR (WITH_AUDASPACE AND NOT WITH_SYSTEM_AUDASPACE))
|
||||
if(("${PYTHON_NUMPY_PATH}" STREQUAL "") OR (${PYTHON_NUMPY_PATH} MATCHES NOTFOUND))
|
||||
find_python_package(numpy "core/include")
|
||||
find_python_package(numpy)
|
||||
unset(PYTHON_NUMPY_INCLUDE_DIRS CACHE)
|
||||
set(PYTHON_NUMPY_INCLUDE_DIRS ${PYTHON_NUMPY_PATH}/numpy/core/include CACHE PATH "Path to the include directory of the numpy module")
|
||||
mark_as_advanced(PYTHON_NUMPY_INCLUDE_DIRS)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WIN32 OR APPLE)
|
||||
# pass, we have this in lib/python/site-packages
|
||||
elseif(WITH_PYTHON_INSTALL_REQUESTS)
|
||||
find_python_package(requests "")
|
||||
find_python_package(requests)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -1767,20 +1756,8 @@ if(WITH_BLENDER)
|
||||
# internal and external library information first, for test linking
|
||||
add_subdirectory(source)
|
||||
elseif(WITH_CYCLES_STANDALONE)
|
||||
add_subdirectory(intern/glew-mx)
|
||||
add_subdirectory(intern/guardedalloc)
|
||||
add_subdirectory(intern/libc_compat)
|
||||
add_subdirectory(intern/numaapi)
|
||||
add_subdirectory(intern/sky)
|
||||
|
||||
add_subdirectory(intern/cycles)
|
||||
add_subdirectory(extern/clew)
|
||||
if(WITH_CYCLES_LOGGING)
|
||||
if(NOT WITH_SYSTEM_GFLAGS)
|
||||
add_subdirectory(extern/gflags)
|
||||
endif()
|
||||
add_subdirectory(extern/glog)
|
||||
endif()
|
||||
if(WITH_CUDA_DYNLOAD)
|
||||
add_subdirectory(extern/cuew)
|
||||
endif()
|
||||
@@ -1789,10 +1766,6 @@ elseif(WITH_CYCLES_STANDALONE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Testing
|
||||
add_subdirectory(tests)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Blender Application
|
||||
if(WITH_BLENDER)
|
||||
@@ -1800,6 +1773,11 @@ if(WITH_BLENDER)
|
||||
endif()
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Testing
|
||||
add_subdirectory(tests)
|
||||
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Define 'heavy' submodules (for Ninja builder when using pools).
|
||||
setup_heavy_lib_pool()
|
||||
@@ -1853,7 +1831,6 @@ if(FIRST_RUN)
|
||||
info_cfg_option(WITH_FFTW3)
|
||||
info_cfg_option(WITH_FREESTYLE)
|
||||
info_cfg_option(WITH_GMP)
|
||||
info_cfg_option(WITH_HARU)
|
||||
info_cfg_option(WITH_IK_ITASC)
|
||||
info_cfg_option(WITH_IK_SOLVER)
|
||||
info_cfg_option(WITH_INPUT_NDOF)
|
||||
@@ -1862,8 +1839,6 @@ if(FIRST_RUN)
|
||||
info_cfg_option(WITH_OPENCOLORIO)
|
||||
info_cfg_option(WITH_OPENIMAGEDENOISE)
|
||||
info_cfg_option(WITH_OPENVDB)
|
||||
info_cfg_option(WITH_POTRACE)
|
||||
info_cfg_option(WITH_PUGIXML)
|
||||
info_cfg_option(WITH_QUADRIFLOW)
|
||||
info_cfg_option(WITH_TBB)
|
||||
info_cfg_option(WITH_USD)
|
||||
|
19
GNUmakefile
19
GNUmakefile
@@ -41,7 +41,6 @@ Convenience Targets
|
||||
* developer: Enable faster builds, error checking and tests, recommended for developers.
|
||||
* config: Run cmake configuration tool to set build options.
|
||||
* ninja: Use ninja build tool for faster builds.
|
||||
* ccache: Use ccache for faster rebuilds.
|
||||
|
||||
Note: passing the argument 'BUILD_DIR=path' when calling make will override the default build dir.
|
||||
Note: passing the argument 'BUILD_CMAKE_ARGS=args' lets you add cmake arguments.
|
||||
@@ -183,13 +182,8 @@ endif
|
||||
ifndef DEPS_INSTALL_DIR
|
||||
DEPS_INSTALL_DIR:=$(shell dirname "$(BLENDER_DIR)")/lib/$(OS_NCASE)
|
||||
|
||||
# Add processor type to directory name, except for darwin x86_64
|
||||
# which by convention does not have it.
|
||||
ifeq ($(OS_NCASE),darwin)
|
||||
ifneq ($(CPU),x86_64)
|
||||
DEPS_INSTALL_DIR:=$(DEPS_INSTALL_DIR)_$(CPU)
|
||||
endif
|
||||
else
|
||||
ifneq ($(OS_NCASE),darwin)
|
||||
# Add processor type to directory name
|
||||
DEPS_INSTALL_DIR:=$(DEPS_INSTALL_DIR)_$(CPU)
|
||||
endif
|
||||
endif
|
||||
@@ -203,7 +197,7 @@ endif
|
||||
# in libraries, or python 2 for running make update to get it.
|
||||
ifeq ($(OS_NCASE),darwin)
|
||||
ifeq (, $(shell command -v $(PYTHON)))
|
||||
PYTHON:=$(DEPS_INSTALL_DIR)/python/bin/python3.7m
|
||||
PYTHON:=../lib/darwin/python/bin/python3.7m
|
||||
ifeq (, $(shell command -v $(PYTHON)))
|
||||
PYTHON:=python
|
||||
endif
|
||||
@@ -247,10 +241,6 @@ ifneq "$(findstring developer, $(MAKECMDGOALS))" ""
|
||||
CMAKE_CONFIG_ARGS:=-C"$(BLENDER_DIR)/build_files/cmake/config/blender_developer.cmake" $(CMAKE_CONFIG_ARGS)
|
||||
endif
|
||||
|
||||
ifneq "$(findstring ccache, $(MAKECMDGOALS))" ""
|
||||
CMAKE_CONFIG_ARGS:=-DWITH_COMPILER_CCACHE=YES $(CMAKE_CONFIG_ARGS)
|
||||
endif
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# build tool
|
||||
|
||||
@@ -350,7 +340,6 @@ headless: all
|
||||
bpy: all
|
||||
developer: all
|
||||
ninja: all
|
||||
ccache: all
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Build dependencies
|
||||
@@ -525,7 +514,7 @@ format: .FORCE
|
||||
|
||||
# Simple version of ./doc/python_api/sphinx_doc_gen.sh with no PDF generation.
|
||||
doc_py: .FORCE
|
||||
ASAN_OPTIONS=halt_on_error=0:${ASAN_OPTIONS} \
|
||||
ASAN_OPTIONS=halt_on_error=0 \
|
||||
$(BLENDER_BIN) --background -noaudio --factory-startup \
|
||||
--python doc/python_api/sphinx_doc_gen.py
|
||||
sphinx-build -b html -j $(NPROCS) doc/python_api/sphinx-in doc/python_api/sphinx-out
|
||||
|
@@ -92,7 +92,6 @@ include(cmake/package_python.cmake)
|
||||
include(cmake/numpy.cmake)
|
||||
include(cmake/usd.cmake)
|
||||
include(cmake/potrace.cmake)
|
||||
include(cmake/haru.cmake)
|
||||
# Boost needs to be included after python.cmake due to the PYTHON_BINARY variable being needed.
|
||||
include(cmake/boost.cmake)
|
||||
include(cmake/pugixml.cmake)
|
||||
|
@@ -1,46 +0,0 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
set(HARU_EXTRA_ARGS
|
||||
-DLIBHPDF_SHARED=OFF
|
||||
-DLIBHPDF_STATIC=ON
|
||||
-DLIBHPDF_EXAMPLES=OFF
|
||||
-DLIBHPDF_ENABLE_EXCEPTIONS=ON
|
||||
)
|
||||
|
||||
ExternalProject_Add(external_haru
|
||||
URL ${HARU_URI}
|
||||
DOWNLOAD_DIR ${DOWNLOAD_DIR}
|
||||
URL_HASH MD5=${HARU_HASH}
|
||||
PREFIX ${BUILD_DIR}/haru
|
||||
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/haru/src/external_haru < ${PATCH_DIR}/haru.diff
|
||||
CMAKE_ARGS
|
||||
-DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_INSTALL_PREFIX=${LIBDIR}/haru
|
||||
${DEFAULT_CMAKE_FLAGS} ${HARU_EXTRA_ARGS}
|
||||
INSTALL_DIR ${LIBDIR}/haru
|
||||
)
|
||||
|
||||
if(WIN32)
|
||||
if(BUILD_MODE STREQUAL Release)
|
||||
ExternalProject_Add_Step(external_haru after_install
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/haru/include ${HARVEST_TARGET}/haru/include
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/haru/lib/libhpdfs.lib ${HARVEST_TARGET}/haru/lib/libhpdfs.lib
|
||||
DEPENDEES install
|
||||
)
|
||||
endif()
|
||||
endif()
|
@@ -187,8 +187,6 @@ harvest(usd/lib/usd usd/lib/usd "*")
|
||||
harvest(usd/plugin usd/plugin "*")
|
||||
harvest(potrace/include potrace/include "*.h")
|
||||
harvest(potrace/lib potrace/lib "*.a")
|
||||
harvest(haru/include haru/include "*.h")
|
||||
harvest(haru/lib haru/lib "*.a")
|
||||
|
||||
if(UNIX AND NOT APPLE)
|
||||
harvest(libglu/lib mesa/lib "*.so*")
|
||||
|
@@ -42,7 +42,7 @@ if(UNIX)
|
||||
-DSQLITE_MAX_VARIABLE_NUMBER=250000 \
|
||||
-fPIC")
|
||||
set(SQLITE_CONFIGURE_ENV ${SQLITE_CONFIGURE_ENV} && export LDFLAGS=${SQLITE_LDFLAGS} && export CFLAGS=${SQLITE_CFLAGS})
|
||||
set(SQLITE_CONFIGURATION_ARGS ${SQLITE_CONFIGURATION_ARGS} --enable-threadsafe --enable-load-extension --enable-json1 --enable-fts4 --enable-fts5 --disable-tcl
|
||||
set(SQLITE_CONFIGURATION_ARGS ${SQLITE_CONFIGURATION_ARGS} --enable-threadsafe --enable-load-extension --enable-json1 --enable-fts4 --enable-fts5
|
||||
--enable-shared=no)
|
||||
endif()
|
||||
|
||||
|
@@ -331,7 +331,3 @@ set(GMP_HASH a325e3f09e6d91e62101e59f9bda3ec1)
|
||||
set(POTRACE_VERSION 1.16)
|
||||
set(POTRACE_URI http://potrace.sourceforge.net/download/${POTRACE_VERSION}/potrace-${POTRACE_VERSION}.tar.gz)
|
||||
set(POTRACE_HASH 5f0bd87ddd9a620b0c4e65652ef93d69)
|
||||
|
||||
set(HARU_VERSION 2_3_0)
|
||||
set(HARU_URI https://github.com/libharu/libharu/archive/RELEASE_${HARU_VERSION}.tar.gz)
|
||||
set(HARU_HASH 4f916aa49c3069b3a10850013c507460)
|
||||
|
@@ -385,25 +385,25 @@ USE_CXX11=true
|
||||
CLANG_FORMAT_VERSION_MIN="6.0"
|
||||
CLANG_FORMAT_VERSION_MAX="10.0"
|
||||
|
||||
PYTHON_VERSION="3.9.1"
|
||||
PYTHON_VERSION_SHORT="3.9"
|
||||
PYTHON_VERSION="3.7.7"
|
||||
PYTHON_VERSION_SHORT="3.7"
|
||||
PYTHON_VERSION_MIN="3.7"
|
||||
PYTHON_VERSION_MAX="3.10"
|
||||
PYTHON_VERSION_MAX="3.9"
|
||||
PYTHON_VERSION_INSTALLED=$PYTHON_VERSION_MIN
|
||||
PYTHON_FORCE_BUILD=false
|
||||
PYTHON_FORCE_REBUILD=false
|
||||
PYTHON_SKIP=false
|
||||
|
||||
NUMPY_VERSION="1.19.5"
|
||||
NUMPY_VERSION_SHORT="1.19"
|
||||
NUMPY_VERSION="1.17.5"
|
||||
NUMPY_VERSION_SHORT="1.17"
|
||||
NUMPY_VERSION_MIN="1.8"
|
||||
NUMPY_VERSION_MAX="2.0"
|
||||
NUMPY_FORCE_BUILD=false
|
||||
NUMPY_FORCE_REBUILD=false
|
||||
NUMPY_SKIP=false
|
||||
|
||||
BOOST_VERSION="1.73.0"
|
||||
BOOST_VERSION_SHORT="1.73"
|
||||
BOOST_VERSION="1.70.0"
|
||||
BOOST_VERSION_SHORT="1.70"
|
||||
BOOST_VERSION_MIN="1.49"
|
||||
BOOST_VERSION_MAX="2.0"
|
||||
BOOST_FORCE_BUILD=false
|
||||
@@ -439,7 +439,7 @@ _with_built_openexr=false
|
||||
OIIO_VERSION="2.1.15.0"
|
||||
OIIO_VERSION_SHORT="2.1"
|
||||
OIIO_VERSION_MIN="2.1.12"
|
||||
OIIO_VERSION_MAX="2.2.10"
|
||||
OIIO_VERSION_MAX="3.0"
|
||||
OIIO_FORCE_BUILD=false
|
||||
OIIO_FORCE_REBUILD=false
|
||||
OIIO_SKIP=false
|
||||
@@ -483,7 +483,7 @@ OPENVDB_FORCE_REBUILD=false
|
||||
OPENVDB_SKIP=false
|
||||
|
||||
# Alembic needs to be compiled for now
|
||||
ALEMBIC_VERSION="1.7.16"
|
||||
ALEMBIC_VERSION="1.7.12"
|
||||
ALEMBIC_VERSION_SHORT="1.7"
|
||||
ALEMBIC_VERSION_MIN="1.7"
|
||||
ALEMBIC_VERSION_MAX="2.0"
|
||||
@@ -2064,6 +2064,7 @@ compile_OIIO() {
|
||||
cmake_d="$cmake_d -D CMAKE_PREFIX_PATH=$_inst"
|
||||
cmake_d="$cmake_d -D CMAKE_INSTALL_PREFIX=$_inst"
|
||||
cmake_d="$cmake_d -D STOP_ON_WARNING=OFF"
|
||||
cmake_d="$cmake_d -D BUILDSTATIC=OFF"
|
||||
cmake_d="$cmake_d -D LINKSTATIC=OFF"
|
||||
cmake_d="$cmake_d -D USE_SIMD=sse2"
|
||||
|
||||
@@ -2085,7 +2086,7 @@ compile_OIIO() {
|
||||
cmake_d="$cmake_d -D USE_OPENCV=OFF"
|
||||
cmake_d="$cmake_d -D BUILD_TESTING=OFF"
|
||||
cmake_d="$cmake_d -D OIIO_BUILD_TESTS=OFF"
|
||||
cmake_d="$cmake_d -D OIIO_BUILD_TOOLS=ON"
|
||||
cmake_d="$cmake_d -D OIIO_BUILD_TOOLS=OFF"
|
||||
cmake_d="$cmake_d -D TXT2MAN="
|
||||
#cmake_d="$cmake_d -D CMAKE_EXPORT_COMPILE_COMMANDS=ON"
|
||||
#cmake_d="$cmake_d -D CMAKE_VERBOSE_MAKEFILE=ON"
|
||||
@@ -2098,6 +2099,9 @@ compile_OIIO() {
|
||||
# if [ -d $INST/ocio ]; then
|
||||
# cmake_d="$cmake_d -D OCIO_PATH=$INST/ocio"
|
||||
# fi
|
||||
cmake_d="$cmake_d -D USE_OCIO=OFF"
|
||||
|
||||
cmake_d="$cmake_d -D OIIO_BUILD_CPP11=ON"
|
||||
|
||||
if file /bin/cp | grep -q '32-bit'; then
|
||||
cflags="-fPIC -m32 -march=i686"
|
||||
@@ -4068,7 +4072,7 @@ install_DEB() {
|
||||
else
|
||||
check_package_version_ge_lt_DEB libopenimageio-dev $OIIO_VERSION_MIN $OIIO_VERSION_MAX
|
||||
if [ $? -eq 0 -a "$_with_built_openexr" = false ]; then
|
||||
install_packages_DEB libopenimageio-dev openimageio-tools
|
||||
install_packages_DEB libopenimageio-dev
|
||||
clean_OIIO
|
||||
else
|
||||
compile_OIIO
|
||||
@@ -4710,13 +4714,13 @@ install_RPM() {
|
||||
INFO "Forced OpenImageIO building, as requested..."
|
||||
compile_OIIO
|
||||
else
|
||||
check_package_version_ge_lt_RPM OpenImageIO-devel $OIIO_VERSION_MIN $OIIO_VERSION_MAX
|
||||
if [ $? -eq 0 -a $_with_built_openexr == false ]; then
|
||||
install_packages_RPM OpenImageIO-devel OpenImageIO-utils
|
||||
clean_OIIO
|
||||
else
|
||||
#check_package_version_ge_lt_RPM OpenImageIO-devel $OIIO_VERSION_MIN $OIIO_VERSION_MAX
|
||||
#if [ $? -eq 0 -a $_with_built_openexr == false ]; then
|
||||
# install_packages_RPM OpenImageIO-devel
|
||||
# clean_OIIO
|
||||
#else
|
||||
compile_OIIO
|
||||
fi
|
||||
#fi
|
||||
fi
|
||||
|
||||
|
||||
|
@@ -1,12 +0,0 @@
|
||||
diff --git a/src/hpdf_image_ccitt.c b/src/hpdf_image_ccitt.c
|
||||
index 8672763..9be531a 100644
|
||||
--- a/src/hpdf_image_ccitt.c
|
||||
+++ b/src/hpdf_image_ccitt.c
|
||||
@@ -21,7 +21,6 @@
|
||||
#include <memory.h>
|
||||
#include <assert.h>
|
||||
|
||||
-#define G3CODES
|
||||
#include "t4.h"
|
||||
|
||||
typedef unsigned int uint32;
|
@@ -88,6 +88,7 @@ class VersionInfo:
|
||||
self.short_version = "%d.%02d" % (version_numbers[0], version_numbers[1])
|
||||
self.version = "%d.%02d.%d" % version_numbers
|
||||
self.version_cycle = self._parse_header_file(blender_h, 'BLENDER_VERSION_CYCLE')
|
||||
self.version_cycle_number = self._parse_header_file(blender_h, 'BLENDER_VERSION_CYCLE_NUMBER')
|
||||
self.hash = self._parse_header_file(buildinfo_h, 'BUILD_HASH')[1:-1]
|
||||
|
||||
if self.version_cycle == "release":
|
||||
@@ -96,7 +97,8 @@ class VersionInfo:
|
||||
self.is_development_build = False
|
||||
elif self.version_cycle == "rc":
|
||||
# Release candidate
|
||||
self.full_version = self.version + self.version_cycle
|
||||
version_cycle = self.version_cycle + self.version_cycle_number
|
||||
self.full_version = self.version + version_cycle
|
||||
self.is_development_build = False
|
||||
else:
|
||||
# Development build
|
||||
|
@@ -42,7 +42,7 @@ def get_cmake_options(builder):
|
||||
elif builder.platform == 'linux':
|
||||
config_file = "build_files/buildbot/config/blender_linux.cmake"
|
||||
|
||||
optix_sdk_dir = os.path.join(builder.blender_dir, '..', '..', 'NVIDIA-Optix-SDK-7.1')
|
||||
optix_sdk_dir = os.path.join(builder.blender_dir, '..', '..', 'NVIDIA-Optix-SDK')
|
||||
options.append('-DOPTIX_ROOT_DIR:PATH=' + optix_sdk_dir)
|
||||
|
||||
# Workaround to build sm_30 kernels with CUDA 10, since CUDA 11 no longer supports that architecture
|
||||
|
@@ -34,8 +34,6 @@ set(_clang_tidy_SEARCH_DIRS
|
||||
# TODO(sergey): Find more reliable way of finding the latest clang-tidy.
|
||||
find_program(CLANG_TIDY_EXECUTABLE
|
||||
NAMES
|
||||
clang-tidy-12
|
||||
clang-tidy-11
|
||||
clang-tidy-10
|
||||
clang-tidy-9
|
||||
clang-tidy-8
|
||||
|
@@ -1,64 +0,0 @@
|
||||
# - Find HARU library
|
||||
# Find the native Haru includes and library
|
||||
# This module defines
|
||||
# HARU_INCLUDE_DIRS, where to find hpdf.h, set when
|
||||
# HARU_INCLUDE_DIR is found.
|
||||
# HARU_LIBRARIES, libraries to link against to use Haru.
|
||||
# HARU_ROOT_DIR, The base directory to search for Haru.
|
||||
# This can also be an environment variable.
|
||||
# HARU_FOUND, If false, do not try to use Haru.
|
||||
#
|
||||
# also defined, but not for general use are
|
||||
# HARU_LIBRARY, where to find the Haru library.
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2021 Blender Foundation.
|
||||
#
|
||||
# Distributed under the OSI-approved BSD 3-Clause License,
|
||||
# see accompanying file BSD-3-Clause-license.txt for details.
|
||||
#=============================================================================
|
||||
|
||||
# If HARU_ROOT_DIR was defined in the environment, use it.
|
||||
if(NOT HARU_ROOT_DIR AND NOT $ENV{HARU_ROOT_DIR} STREQUAL "")
|
||||
set(HARU_ROOT_DIR $ENV{HARU_ROOT_DIR})
|
||||
endif()
|
||||
|
||||
set(_haru_SEARCH_DIRS
|
||||
${HARU_ROOT_DIR}
|
||||
/opt/lib/haru
|
||||
)
|
||||
|
||||
find_path(HARU_INCLUDE_DIR
|
||||
NAMES
|
||||
hpdf.h
|
||||
HINTS
|
||||
${_haru_SEARCH_DIRS}
|
||||
PATH_SUFFIXES
|
||||
include/haru
|
||||
)
|
||||
|
||||
find_library(HARU_LIBRARY
|
||||
NAMES
|
||||
hpdfs
|
||||
HINTS
|
||||
${_haru_SEARCH_DIRS}
|
||||
PATH_SUFFIXES
|
||||
lib64 lib
|
||||
)
|
||||
|
||||
# Handle the QUIETLY and REQUIRED arguments and set HARU_FOUND to TRUE if
|
||||
# all listed variables are TRUE.
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Haru DEFAULT_MSG HARU_LIBRARY HARU_INCLUDE_DIR)
|
||||
|
||||
if(HARU_FOUND)
|
||||
set(HARU_LIBRARIES ${HARU_LIBRARY})
|
||||
set(HARU_INCLUDE_DIRS ${HARU_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
mark_as_advanced(
|
||||
HARU_INCLUDE_DIR
|
||||
HARU_LIBRARY
|
||||
)
|
||||
|
||||
unset(_haru_SEARCH_DIRS)
|
@@ -330,9 +330,6 @@ function(gtest_add_tests)
|
||||
set(gtest_case_name_regex ".*\\( *([A-Za-z_0-9]+) *, *([A-Za-z_0-9]+) *\\).*")
|
||||
set(gtest_test_type_regex "(TYPED_TEST|TEST_?[FP]?)")
|
||||
|
||||
# This will get a filter for each test suite.
|
||||
set(test_filters "")
|
||||
|
||||
foreach(source IN LISTS ARGS_SOURCES)
|
||||
if(NOT ARGS_SKIP_DEPENDENCY)
|
||||
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${source})
|
||||
@@ -379,32 +376,175 @@ function(gtest_add_tests)
|
||||
list(APPEND testList ${ctest_test_name})
|
||||
endif()
|
||||
else()
|
||||
# BLENDER: collect tests named "suite.testcase" as list of "suite.*" filters.
|
||||
string(REGEX REPLACE "\\..*$" "" gtest_suite_name ${gtest_test_name})
|
||||
list(APPEND test_filters "${gtest_suite_name}.*")
|
||||
set(ctest_test_name ${ARGS_TEST_PREFIX}${gtest_test_name}${ARGS_TEST_SUFFIX})
|
||||
add_test(NAME ${ctest_test_name}
|
||||
${workDir}
|
||||
COMMAND ${ARGS_TARGET}
|
||||
--gtest_filter=${gtest_test_name}
|
||||
${ARGS_EXTRA_ARGS}
|
||||
)
|
||||
list(APPEND testList ${ctest_test_name})
|
||||
endif()
|
||||
endforeach()
|
||||
endforeach()
|
||||
|
||||
# Join all found GTest suite names into one big filter.
|
||||
list(REMOVE_DUPLICATES test_filters)
|
||||
list(JOIN test_filters ":" gtest_filter)
|
||||
add_test(NAME ${ARGS_TEST_PREFIX}
|
||||
${workDir}
|
||||
COMMAND ${ARGS_TARGET}
|
||||
--gtest_filter=${gtest_filter}
|
||||
${ARGS_EXTRA_ARGS}
|
||||
)
|
||||
list(APPEND testList ${ARGS_TEST_PREFIX})
|
||||
|
||||
if(ARGS_TEST_LIST)
|
||||
set(${ARGS_TEST_LIST} ${testList} PARENT_SCOPE)
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
# BLENDER: remove the discovery function gtest_discover_tests(). It's not used,
|
||||
# as it generates too many test invocations.
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
function(gtest_discover_tests TARGET)
|
||||
cmake_parse_arguments(
|
||||
""
|
||||
"NO_PRETTY_TYPES;NO_PRETTY_VALUES"
|
||||
"TEST_PREFIX;TEST_SUFFIX;WORKING_DIRECTORY;TEST_LIST;DISCOVERY_TIMEOUT;XML_OUTPUT_DIR;DISCOVERY_MODE"
|
||||
"EXTRA_ARGS;PROPERTIES"
|
||||
${ARGN}
|
||||
)
|
||||
|
||||
if(NOT _WORKING_DIRECTORY)
|
||||
set(_WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
|
||||
endif()
|
||||
if(NOT _TEST_LIST)
|
||||
set(_TEST_LIST ${TARGET}_TESTS)
|
||||
endif()
|
||||
if(NOT _DISCOVERY_TIMEOUT)
|
||||
set(_DISCOVERY_TIMEOUT 5)
|
||||
endif()
|
||||
if(NOT _DISCOVERY_MODE)
|
||||
if(NOT CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE)
|
||||
set(CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE "POST_BUILD")
|
||||
endif()
|
||||
set(_DISCOVERY_MODE ${CMAKE_GTEST_DISCOVER_TESTS_DISCOVERY_MODE})
|
||||
endif()
|
||||
|
||||
get_property(
|
||||
has_counter
|
||||
TARGET ${TARGET}
|
||||
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
|
||||
SET
|
||||
)
|
||||
if(has_counter)
|
||||
get_property(
|
||||
counter
|
||||
TARGET ${TARGET}
|
||||
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
|
||||
)
|
||||
math(EXPR counter "${counter} + 1")
|
||||
else()
|
||||
set(counter 1)
|
||||
endif()
|
||||
set_property(
|
||||
TARGET ${TARGET}
|
||||
PROPERTY CTEST_DISCOVERED_TEST_COUNTER
|
||||
${counter}
|
||||
)
|
||||
|
||||
# Define rule to generate test list for aforementioned test executable
|
||||
# Blender: use _ instead of [] to avoid problems with zsh regex.
|
||||
set(ctest_file_base "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_${counter}_")
|
||||
set(ctest_include_file "${ctest_file_base}_include.cmake")
|
||||
set(ctest_tests_file "${ctest_file_base}_tests.cmake")
|
||||
get_property(crosscompiling_emulator
|
||||
TARGET ${TARGET}
|
||||
PROPERTY CROSSCOMPILING_EMULATOR
|
||||
)
|
||||
|
||||
if(_DISCOVERY_MODE STREQUAL "POST_BUILD")
|
||||
add_custom_command(
|
||||
TARGET ${TARGET} POST_BUILD
|
||||
BYPRODUCTS "${ctest_tests_file}"
|
||||
COMMAND "${CMAKE_COMMAND}"
|
||||
-D "TEST_TARGET=${TARGET}"
|
||||
-D "TEST_EXECUTABLE=$<TARGET_FILE:${TARGET}>"
|
||||
-D "TEST_EXECUTOR=${crosscompiling_emulator}"
|
||||
-D "TEST_WORKING_DIR=${_WORKING_DIRECTORY}"
|
||||
-D "TEST_EXTRA_ARGS=${_EXTRA_ARGS}"
|
||||
-D "TEST_PROPERTIES=${_PROPERTIES}"
|
||||
-D "TEST_PREFIX=${_TEST_PREFIX}"
|
||||
-D "TEST_SUFFIX=${_TEST_SUFFIX}"
|
||||
-D "NO_PRETTY_TYPES=${_NO_PRETTY_TYPES}"
|
||||
-D "NO_PRETTY_VALUES=${_NO_PRETTY_VALUES}"
|
||||
-D "TEST_LIST=${_TEST_LIST}"
|
||||
-D "CTEST_FILE=${ctest_tests_file}"
|
||||
-D "TEST_DISCOVERY_TIMEOUT=${_DISCOVERY_TIMEOUT}"
|
||||
-D "TEST_XML_OUTPUT_DIR=${_XML_OUTPUT_DIR}"
|
||||
-P "${_GOOGLETEST_DISCOVER_TESTS_SCRIPT}"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
file(WRITE "${ctest_include_file}"
|
||||
"if(EXISTS \"${ctest_tests_file}\")\n"
|
||||
" include(\"${ctest_tests_file}\")\n"
|
||||
"else()\n"
|
||||
" add_test(${TARGET}_NOT_BUILT ${TARGET}_NOT_BUILT)\n"
|
||||
"endif()\n"
|
||||
)
|
||||
elseif(_DISCOVERY_MODE STREQUAL "PRE_TEST")
|
||||
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL
|
||||
PROPERTY GENERATOR_IS_MULTI_CONFIG
|
||||
)
|
||||
|
||||
if(GENERATOR_IS_MULTI_CONFIG)
|
||||
set(ctest_tests_file "${ctest_file_base}_tests-$<CONFIG>.cmake")
|
||||
endif()
|
||||
|
||||
string(CONCAT ctest_include_content
|
||||
"if(EXISTS \"$<TARGET_FILE:${TARGET}>\")" "\n"
|
||||
" if(\"$<TARGET_FILE:${TARGET}>\" IS_NEWER_THAN \"${ctest_tests_file}\")" "\n"
|
||||
" include(\"${_GOOGLETEST_DISCOVER_TESTS_SCRIPT}\")" "\n"
|
||||
" gtest_discover_tests_impl(" "\n"
|
||||
" TEST_EXECUTABLE" " [==[" "$<TARGET_FILE:${TARGET}>" "]==]" "\n"
|
||||
" TEST_EXECUTOR" " [==[" "${crosscompiling_emulator}" "]==]" "\n"
|
||||
" TEST_WORKING_DIR" " [==[" "${_WORKING_DIRECTORY}" "]==]" "\n"
|
||||
" TEST_EXTRA_ARGS" " [==[" "${_EXTRA_ARGS}" "]==]" "\n"
|
||||
" TEST_PROPERTIES" " [==[" "${_PROPERTIES}" "]==]" "\n"
|
||||
" TEST_PREFIX" " [==[" "${_TEST_PREFIX}" "]==]" "\n"
|
||||
" TEST_SUFFIX" " [==[" "${_TEST_SUFFIX}" "]==]" "\n"
|
||||
" NO_PRETTY_TYPES" " [==[" "${_NO_PRETTY_TYPES}" "]==]" "\n"
|
||||
" NO_PRETTY_VALUES" " [==[" "${_NO_PRETTY_VALUES}" "]==]" "\n"
|
||||
" TEST_LIST" " [==[" "${_TEST_LIST}" "]==]" "\n"
|
||||
" CTEST_FILE" " [==[" "${ctest_tests_file}" "]==]" "\n"
|
||||
" TEST_DISCOVERY_TIMEOUT" " [==[" "${_DISCOVERY_TIMEOUT}" "]==]" "\n"
|
||||
" TEST_XML_OUTPUT_DIR" " [==[" "${_XML_OUTPUT_DIR}" "]==]" "\n"
|
||||
" )" "\n"
|
||||
" endif()" "\n"
|
||||
" include(\"${ctest_tests_file}\")" "\n"
|
||||
"else()" "\n"
|
||||
" add_test(${TARGET}_NOT_BUILT ${TARGET}_NOT_BUILT)" "\n"
|
||||
"endif()" "\n"
|
||||
)
|
||||
|
||||
if(GENERATOR_IS_MULTI_CONFIG)
|
||||
foreach(_config ${CMAKE_CONFIGURATION_TYPES})
|
||||
file(GENERATE OUTPUT "${ctest_file_base}_include-${_config}.cmake" CONTENT "${ctest_include_content}" CONDITION $<CONFIG:${_config}>)
|
||||
endforeach()
|
||||
file(WRITE "${ctest_include_file}" "include(\"${ctest_file_base}_include-\${CTEST_CONFIGURATION_TYPE}.cmake\")")
|
||||
else()
|
||||
file(GENERATE OUTPUT "${ctest_file_base}_include.cmake" CONTENT "${ctest_include_content}")
|
||||
file(WRITE "${ctest_include_file}" "include(\"${ctest_file_base}_include.cmake\")")
|
||||
endif()
|
||||
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown DISCOVERY_MODE: ${_DISCOVERY_MODE}")
|
||||
endif()
|
||||
|
||||
# Add discovered tests to directory TEST_INCLUDE_FILES
|
||||
set_property(DIRECTORY
|
||||
APPEND PROPERTY TEST_INCLUDE_FILES "${ctest_include_file}"
|
||||
)
|
||||
|
||||
endfunction()
|
||||
|
||||
###############################################################################
|
||||
|
||||
set(_GOOGLETEST_DISCOVER_TESTS_SCRIPT
|
||||
${CMAKE_CURRENT_LIST_DIR}/GTestAddTests.cmake
|
||||
)
|
||||
|
||||
# Restore project's policies
|
||||
cmake_policy(POP)
|
||||
|
194
build_files/cmake/Modules/GTestAddTests.cmake
Normal file
194
build_files/cmake/Modules/GTestAddTests.cmake
Normal file
@@ -0,0 +1,194 @@
|
||||
# Distributed under the OSI-approved BSD 3-Clause License,
|
||||
# see accompanying file BSD-3-Clause-license.txt for details.
|
||||
|
||||
# Changes made to this script have been marked with "BLENDER".
|
||||
|
||||
|
||||
# BLENDER: disable ASAN leak detection when trying to discover tests.
|
||||
set(ENV{ASAN_OPTIONS} "detect_leaks=0")
|
||||
|
||||
cmake_minimum_required(VERSION ${CMAKE_VERSION})
|
||||
|
||||
# Overwrite possibly existing ${_CTEST_FILE} with empty file
|
||||
set(flush_tests_MODE WRITE)
|
||||
|
||||
# Flushes script to ${_CTEST_FILE}
|
||||
macro(flush_script)
|
||||
file(${flush_tests_MODE} "${_CTEST_FILE}" "${script}")
|
||||
set(flush_tests_MODE APPEND)
|
||||
|
||||
set(script "")
|
||||
endmacro()
|
||||
|
||||
# Flushes tests_buffer to tests
|
||||
macro(flush_tests_buffer)
|
||||
list(APPEND tests "${tests_buffer}")
|
||||
set(tests_buffer "")
|
||||
endmacro()
|
||||
|
||||
macro(add_command NAME)
|
||||
set(_args "")
|
||||
foreach(_arg ${ARGN})
|
||||
if(_arg MATCHES "[^-./:a-zA-Z0-9_]")
|
||||
string(APPEND _args " [==[${_arg}]==]")
|
||||
else()
|
||||
string(APPEND _args " ${_arg}")
|
||||
endif()
|
||||
endforeach()
|
||||
string(APPEND script "${NAME}(${_args})\n")
|
||||
string(LENGTH "${script}" _script_len)
|
||||
if(${_script_len} GREATER "50000")
|
||||
flush_script()
|
||||
endif()
|
||||
# Unsets macro local variables to prevent leakage outside of this macro.
|
||||
unset(_args)
|
||||
unset(_script_len)
|
||||
endmacro()
|
||||
|
||||
function(gtest_discover_tests_impl)
|
||||
|
||||
cmake_parse_arguments(
|
||||
""
|
||||
""
|
||||
"NO_PRETTY_TYPES;NO_PRETTY_VALUES;TEST_EXECUTABLE;TEST_EXECUTOR;TEST_WORKING_DIR;TEST_PREFIX;TEST_SUFFIX;TEST_LIST;CTEST_FILE;TEST_DISCOVERY_TIMEOUT;TEST_XML_OUTPUT_DIR"
|
||||
"TEST_EXTRA_ARGS;TEST_PROPERTIES"
|
||||
${ARGN}
|
||||
)
|
||||
|
||||
set(prefix "${_TEST_PREFIX}")
|
||||
set(suffix "${_TEST_SUFFIX}")
|
||||
set(extra_args ${_TEST_EXTRA_ARGS})
|
||||
set(properties ${_TEST_PROPERTIES})
|
||||
set(script)
|
||||
set(suite)
|
||||
set(tests)
|
||||
set(tests_buffer)
|
||||
|
||||
# Run test executable to get list of available tests
|
||||
if(NOT EXISTS "${_TEST_EXECUTABLE}")
|
||||
message(FATAL_ERROR
|
||||
"Specified test executable does not exist.\n"
|
||||
" Path: '${_TEST_EXECUTABLE}'"
|
||||
)
|
||||
endif()
|
||||
execute_process(
|
||||
COMMAND ${_TEST_EXECUTOR} "${_TEST_EXECUTABLE}" --gtest_list_tests
|
||||
WORKING_DIRECTORY "${_TEST_WORKING_DIR}"
|
||||
TIMEOUT ${_TEST_DISCOVERY_TIMEOUT}
|
||||
OUTPUT_VARIABLE output
|
||||
RESULT_VARIABLE result
|
||||
)
|
||||
if(NOT ${result} EQUAL 0)
|
||||
string(REPLACE "\n" "\n " output "${output}")
|
||||
message(FATAL_ERROR
|
||||
"Error running test executable.\n"
|
||||
" Path: '${_TEST_EXECUTABLE}'\n"
|
||||
" Result: ${result}\n"
|
||||
" Output:\n"
|
||||
" ${output}\n"
|
||||
)
|
||||
endif()
|
||||
|
||||
# Preserve semicolon in test-parameters
|
||||
string(REPLACE [[;]] [[\;]] output "${output}")
|
||||
string(REPLACE "\n" ";" output "${output}")
|
||||
|
||||
# Parse output
|
||||
foreach(line ${output})
|
||||
# Skip header
|
||||
if(NOT line MATCHES "gtest_main\\.cc")
|
||||
# Do we have a module name or a test name?
|
||||
if(NOT line MATCHES "^ ")
|
||||
# Module; remove trailing '.' to get just the name...
|
||||
string(REGEX REPLACE "\\.( *#.*)?" "" suite "${line}")
|
||||
if(line MATCHES "#" AND NOT _NO_PRETTY_TYPES)
|
||||
string(REGEX REPLACE "/[0-9]\\.+ +#.*= +" "/" pretty_suite "${line}")
|
||||
else()
|
||||
set(pretty_suite "${suite}")
|
||||
endif()
|
||||
string(REGEX REPLACE "^DISABLED_" "" pretty_suite "${pretty_suite}")
|
||||
else()
|
||||
# Test name; strip spaces and comments to get just the name...
|
||||
string(REGEX REPLACE " +" "" test "${line}")
|
||||
if(test MATCHES "#" AND NOT _NO_PRETTY_VALUES)
|
||||
string(REGEX REPLACE "/[0-9]+#GetParam..=" "/" pretty_test "${test}")
|
||||
else()
|
||||
string(REGEX REPLACE "#.*" "" pretty_test "${test}")
|
||||
endif()
|
||||
string(REGEX REPLACE "^DISABLED_" "" pretty_test "${pretty_test}")
|
||||
string(REGEX REPLACE "#.*" "" test "${test}")
|
||||
if(NOT "${_TEST_XML_OUTPUT_DIR}" STREQUAL "")
|
||||
set(TEST_XML_OUTPUT_PARAM "--gtest_output=xml:${_TEST_XML_OUTPUT_DIR}/${prefix}${suite}.${test}${suffix}.xml")
|
||||
else()
|
||||
unset(TEST_XML_OUTPUT_PARAM)
|
||||
endif()
|
||||
|
||||
# sanitize test name for further processing downstream
|
||||
set(testname "${prefix}${pretty_suite}.${pretty_test}${suffix}")
|
||||
# escape \
|
||||
string(REPLACE [[\]] [[\\]] testname "${testname}")
|
||||
# escape ;
|
||||
string(REPLACE [[;]] [[\;]] testname "${testname}")
|
||||
# escape $
|
||||
string(REPLACE [[$]] [[\$]] testname "${testname}")
|
||||
|
||||
# ...and add to script
|
||||
add_command(add_test
|
||||
"${testname}"
|
||||
${_TEST_EXECUTOR}
|
||||
"${_TEST_EXECUTABLE}"
|
||||
"--gtest_filter=${suite}.${test}"
|
||||
"--gtest_also_run_disabled_tests"
|
||||
${TEST_XML_OUTPUT_PARAM}
|
||||
${extra_args}
|
||||
)
|
||||
if(suite MATCHES "^DISABLED" OR test MATCHES "^DISABLED")
|
||||
add_command(set_tests_properties
|
||||
"${testname}"
|
||||
PROPERTIES DISABLED TRUE
|
||||
)
|
||||
endif()
|
||||
add_command(set_tests_properties
|
||||
"${testname}"
|
||||
PROPERTIES
|
||||
WORKING_DIRECTORY "${_TEST_WORKING_DIR}"
|
||||
SKIP_REGULAR_EXPRESSION "\\\\[ SKIPPED \\\\]"
|
||||
${properties}
|
||||
)
|
||||
list(APPEND tests_buffer "${testname}")
|
||||
list(LENGTH tests_buffer tests_buffer_length)
|
||||
if(${tests_buffer_length} GREATER "250")
|
||||
flush_tests_buffer()
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
|
||||
# Create a list of all discovered tests, which users may use to e.g. set
|
||||
# properties on the tests
|
||||
flush_tests_buffer()
|
||||
add_command(set ${_TEST_LIST} ${tests})
|
||||
|
||||
# Write CTest script
|
||||
flush_script()
|
||||
|
||||
endfunction()
|
||||
|
||||
if(CMAKE_SCRIPT_MODE_FILE)
|
||||
gtest_discover_tests_impl(
|
||||
NO_PRETTY_TYPES ${NO_PRETTY_TYPES}
|
||||
NO_PRETTY_VALUES ${NO_PRETTY_VALUES}
|
||||
TEST_EXECUTABLE ${TEST_EXECUTABLE}
|
||||
TEST_EXECUTOR ${TEST_EXECUTOR}
|
||||
TEST_WORKING_DIR ${TEST_WORKING_DIR}
|
||||
TEST_PREFIX ${TEST_PREFIX}
|
||||
TEST_SUFFIX ${TEST_SUFFIX}
|
||||
TEST_LIST ${TEST_LIST}
|
||||
CTEST_FILE ${CTEST_FILE}
|
||||
TEST_DISCOVERY_TIMEOUT ${TEST_DISCOVERY_TIMEOUT}
|
||||
TEST_XML_OUTPUT_DIR ${TEST_XML_OUTPUT_DIR}
|
||||
TEST_EXTRA_ARGS ${TEST_EXTRA_ARGS}
|
||||
TEST_PROPERTIES ${TEST_PROPERTIES}
|
||||
)
|
||||
endif()
|
@@ -8,17 +8,6 @@
|
||||
#
|
||||
#=============================================================================
|
||||
|
||||
function(GET_BLENDER_TEST_INSTALL_DIR VARIABLE_NAME)
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(GENERATOR_IS_MULTI_CONFIG)
|
||||
string(REPLACE "\${BUILD_TYPE}" "$<CONFIG>" TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
else()
|
||||
string(REPLACE "\${BUILD_TYPE}" "" TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
endif()
|
||||
set(${VARIABLE_NAME} "${TEST_INSTALL_DIR}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
|
||||
macro(BLENDER_SRC_GTEST_EX)
|
||||
if(WITH_GTESTS)
|
||||
set(options SKIP_ADD_TEST)
|
||||
@@ -86,7 +75,13 @@ macro(BLENDER_SRC_GTEST_EX)
|
||||
target_link_libraries(${TARGET_NAME} ${GMP_LIBRARIES})
|
||||
endif()
|
||||
|
||||
GET_BLENDER_TEST_INSTALL_DIR(TEST_INSTALL_DIR)
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(GENERATOR_IS_MULTI_CONFIG)
|
||||
string(REPLACE "\${BUILD_TYPE}" "$<CONFIG>" TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
else()
|
||||
string(REPLACE "\${BUILD_TYPE}" "" TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX})
|
||||
endif()
|
||||
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES
|
||||
RUNTIME_OUTPUT_DIRECTORY "${TESTS_OUTPUT_DIR}"
|
||||
RUNTIME_OUTPUT_DIRECTORY_RELEASE "${TESTS_OUTPUT_DIR}"
|
||||
@@ -99,9 +94,7 @@ macro(BLENDER_SRC_GTEST_EX)
|
||||
|
||||
# Don't fail tests on leaks since these often happen in external libraries
|
||||
# that we can't fix.
|
||||
set_tests_properties(${TARGET_NAME} PROPERTIES
|
||||
ENVIRONMENT LSAN_OPTIONS=exitcode=0:$ENV{LSAN_OPTIONS}
|
||||
)
|
||||
set_tests_properties(${TARGET_NAME} PROPERTIES ENVIRONMENT LSAN_OPTIONS=exitcode=0)
|
||||
endif()
|
||||
if(WIN32)
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES VS_GLOBAL_VcpkgEnabled "false")
|
||||
|
@@ -13,7 +13,7 @@ Invocation:
|
||||
export CLANG_BIND_DIR="/dsk/src/llvm/tools/clang/bindings/python"
|
||||
export CLANG_LIB_DIR="/opt/llvm/lib"
|
||||
|
||||
python clang_array_check.py somefile.c -DSOME_DEFINE -I/some/include
|
||||
python2 clang_array_check.py somefile.c -DSOME_DEFINE -I/some/include
|
||||
|
||||
... defines and includes are optional
|
||||
|
||||
@@ -76,32 +76,6 @@ defs_precalc = {
|
||||
"glNormal3bv": {0: 3},
|
||||
"glNormal3iv": {0: 3},
|
||||
"glNormal3sv": {0: 3},
|
||||
|
||||
# GPU immediate mode.
|
||||
"immVertex2iv": {1: 2},
|
||||
|
||||
"immVertex2fv": {1: 2},
|
||||
"immVertex3fv": {1: 3},
|
||||
|
||||
"immAttr2fv": {1: 2},
|
||||
"immAttr3fv": {1: 3},
|
||||
"immAttr4fv": {1: 4},
|
||||
|
||||
"immAttr3ubv": {1: 3},
|
||||
"immAttr4ubv": {1: 4},
|
||||
|
||||
"immUniform2fv": {1: 2},
|
||||
"immUniform3fv": {1: 3},
|
||||
"immUniform4fv": {1: 4},
|
||||
|
||||
"immUniformColor3fv": {0: 3},
|
||||
"immUniformColor4fv": {0: 4},
|
||||
|
||||
"immUniformColor3ubv": {1: 3},
|
||||
"immUniformColor4ubv": {1: 4},
|
||||
|
||||
"immUniformColor3fvAlpha": {0: 3},
|
||||
"immUniformColor4fvAlpha": {0: 4},
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@@ -126,8 +100,7 @@ else:
|
||||
if CLANG_LIB_DIR is None:
|
||||
print("$CLANG_LIB_DIR clang lib dir not set")
|
||||
|
||||
if CLANG_BIND_DIR:
|
||||
sys.path.append(CLANG_BIND_DIR)
|
||||
sys.path.append(CLANG_BIND_DIR)
|
||||
|
||||
import clang
|
||||
import clang.cindex
|
||||
@@ -135,8 +108,7 @@ from clang.cindex import (CursorKind,
|
||||
TypeKind,
|
||||
TokenKind)
|
||||
|
||||
if CLANG_LIB_DIR:
|
||||
clang.cindex.Config.set_library_path(CLANG_LIB_DIR)
|
||||
clang.cindex.Config.set_library_path(CLANG_LIB_DIR)
|
||||
|
||||
index = clang.cindex.Index.create()
|
||||
|
||||
|
@@ -20,8 +20,6 @@
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# Note: this code should be cleaned up / refactored.
|
||||
|
||||
import sys
|
||||
if sys.version_info.major < 3:
|
||||
print("\nPython3.x needed, found %s.\nAborting!\n" %
|
||||
@@ -39,23 +37,12 @@ from cmake_consistency_check_config import (
|
||||
|
||||
|
||||
import os
|
||||
from os.path import (
|
||||
dirname,
|
||||
join,
|
||||
normpath,
|
||||
splitext,
|
||||
)
|
||||
from os.path import join, dirname, normpath, splitext
|
||||
|
||||
global_h = set()
|
||||
global_c = set()
|
||||
global_refs = {}
|
||||
|
||||
# Flatten `IGNORE_SOURCE_MISSING` to avoid nested looping.
|
||||
IGNORE_SOURCE_MISSING = [
|
||||
(k, ignore_path) for k, ig_list in IGNORE_SOURCE_MISSING
|
||||
for ignore_path in ig_list
|
||||
]
|
||||
|
||||
# Ignore cmake file, path pairs.
|
||||
global_ignore_source_missing = {}
|
||||
for k, v in IGNORE_SOURCE_MISSING:
|
||||
@@ -191,8 +178,6 @@ def cmake_get_src(f):
|
||||
|
||||
if not l:
|
||||
pass
|
||||
elif l in local_ignore_source_missing:
|
||||
local_ignore_source_missing.remove(l)
|
||||
elif l.startswith("$"):
|
||||
if context_name == "SRC":
|
||||
# assume if it ends with context_name we know about it
|
||||
@@ -242,7 +227,10 @@ def cmake_get_src(f):
|
||||
# replace_line(f, i - 1, new_path_rel)
|
||||
|
||||
else:
|
||||
raise Exception("non existent include %s:%d -> %s" % (f, i, new_file))
|
||||
if l in local_ignore_source_missing:
|
||||
local_ignore_source_missing.remove(l)
|
||||
else:
|
||||
raise Exception("non existent include %s:%d -> %s" % (f, i, new_file))
|
||||
|
||||
# print(new_file)
|
||||
|
||||
@@ -270,16 +258,16 @@ def cmake_get_src(f):
|
||||
|
||||
|
||||
def is_ignore_source(f, ignore_used):
|
||||
for index, ignore_path in enumerate(IGNORE_SOURCE):
|
||||
if ignore_path in f:
|
||||
for index, ig in enumerate(IGNORE_SOURCE):
|
||||
if ig in f:
|
||||
ignore_used[index] = True
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_ignore_cmake(f, ignore_used):
|
||||
for index, ignore_path in enumerate(IGNORE_CMAKE):
|
||||
if ignore_path in f:
|
||||
for index, ig in enumerate(IGNORE_CMAKE):
|
||||
if ig in f:
|
||||
ignore_used[index] = True
|
||||
return True
|
||||
return False
|
||||
@@ -310,7 +298,7 @@ def main():
|
||||
for cf, i in refs:
|
||||
errs.append((cf, i))
|
||||
else:
|
||||
raise Exception("CMake references missing, internal error, aborting!")
|
||||
raise Exception("CMake referenecs missing, internal error, aborting!")
|
||||
is_err = True
|
||||
|
||||
errs.sort()
|
||||
@@ -321,7 +309,7 @@ def main():
|
||||
# print("sed '%dd' '%s' > '%s.tmp' ; mv '%s.tmp' '%s'" % (i, cf, cf, cf, cf))
|
||||
|
||||
if is_err:
|
||||
raise Exception("CMake references missing files, aborting!")
|
||||
raise Exception("CMake referenecs missing files, aborting!")
|
||||
del is_err
|
||||
del errs
|
||||
|
||||
@@ -332,7 +320,7 @@ def main():
|
||||
if cf not in global_c:
|
||||
print("missing_c: ", cf)
|
||||
|
||||
# Check if automake builds a corresponding .o file.
|
||||
# check if automake builds a corrasponding .o file.
|
||||
'''
|
||||
if cf in global_c:
|
||||
out1 = os.path.splitext(cf)[0] + ".o"
|
||||
@@ -368,21 +356,21 @@ def main():
|
||||
|
||||
# Check ignores aren't stale
|
||||
print("\nCheck for unused 'IGNORE_SOURCE' paths...")
|
||||
for index, ignore_path in enumerate(IGNORE_SOURCE):
|
||||
for index, ig in enumerate(IGNORE_SOURCE):
|
||||
if not ignore_used_source[index]:
|
||||
print("unused ignore: %r" % ignore_path)
|
||||
print("unused ignore: %r" % ig)
|
||||
|
||||
# Check ignores aren't stale
|
||||
print("\nCheck for unused 'IGNORE_SOURCE_MISSING' paths...")
|
||||
for k, v in sorted(global_ignore_source_missing.items()):
|
||||
for ignore_path in v:
|
||||
print("unused ignore: %r -> %r" % (ignore_path, k))
|
||||
for ig in v:
|
||||
print("unused ignore: %r -> %r" % (ig, k))
|
||||
|
||||
# Check ignores aren't stale
|
||||
print("\nCheck for unused 'IGNORE_CMAKE' paths...")
|
||||
for index, ignore_path in enumerate(IGNORE_CMAKE):
|
||||
for index, ig in enumerate(IGNORE_CMAKE):
|
||||
if not ignore_used_cmake[index]:
|
||||
print("unused ignore: %r" % ignore_path)
|
||||
print("unused ignore: %r" % ig)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@@ -34,18 +34,8 @@ IGNORE_SOURCE = (
|
||||
|
||||
# Ignore cmake file, path pairs.
|
||||
IGNORE_SOURCE_MISSING = (
|
||||
( # Use for cycles stand-alone.
|
||||
"intern/cycles/util/CMakeLists.txt", (
|
||||
"../../third_party/numaapi/include",
|
||||
)),
|
||||
( # Use for `WITH_NANOVDB`.
|
||||
"intern/cycles/kernel/CMakeLists.txt", (
|
||||
"nanovdb/util/CSampleFromVoxels.h",
|
||||
"nanovdb/util/SampleFromVoxels.h",
|
||||
"nanovdb/NanoVDB.h",
|
||||
"nanovdb/CNanoVDB.h",
|
||||
),
|
||||
),
|
||||
# Use for cycles stand-alone.
|
||||
("intern/cycles/util/CMakeLists.txt", "../../third_party/numaapi/include"),
|
||||
)
|
||||
|
||||
IGNORE_CMAKE = (
|
||||
|
@@ -32,7 +32,7 @@ CHECKER_IGNORE_PREFIX = [
|
||||
"intern/moto",
|
||||
]
|
||||
|
||||
CHECKER_BIN = "python3"
|
||||
CHECKER_BIN = "python2"
|
||||
|
||||
CHECKER_ARGS = [
|
||||
os.path.join(os.path.dirname(__file__), "clang_array_check.py"),
|
||||
|
@@ -19,7 +19,6 @@ set(WITH_DRACO ON CACHE BOOL "" FORCE)
|
||||
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
|
||||
set(WITH_FREESTYLE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_GMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_HARU ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IK_ITASC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IK_SOLVER ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_CINEON ON CACHE BOOL "" FORCE)
|
||||
@@ -45,8 +44,6 @@ set(WITH_OPENMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB_BLOSC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PUGIXML ON CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PYTHON_INSTALL ON CACHE BOOL "" FORCE)
|
||||
|
@@ -24,7 +24,6 @@ set(WITH_DRACO OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_FFTW3 OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_FREESTYLE OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_GMP OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_HARU OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_IK_ITASC OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_IK_SOLVER OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_CINEON OFF CACHE BOOL "" FORCE)
|
||||
@@ -52,8 +51,6 @@ set(WITH_OPENIMAGEIO OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENMP OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_PUGIXML OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_QUADRIFLOW OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_SDL OFF CACHE BOOL "" FORCE)
|
||||
|
@@ -20,7 +20,6 @@ set(WITH_DRACO ON CACHE BOOL "" FORCE)
|
||||
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
|
||||
set(WITH_FREESTYLE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_GMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_HARU ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IK_SOLVER ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IK_ITASC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_CINEON ON CACHE BOOL "" FORCE)
|
||||
@@ -46,8 +45,6 @@ set(WITH_OPENMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB_BLOSC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PUGIXML ON CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PYTHON_INSTALL ON CACHE BOOL "" FORCE)
|
||||
|
@@ -388,43 +388,6 @@ function(blender_add_lib
|
||||
set_property(GLOBAL APPEND PROPERTY BLENDER_LINK_LIBS ${name})
|
||||
endfunction()
|
||||
|
||||
function(blender_add_test_suite)
|
||||
if (ARGC LESS 1)
|
||||
message(FATAL_ERROR "No arguments supplied to blender_add_test_suite()")
|
||||
endif()
|
||||
|
||||
# Parse the arguments
|
||||
set(oneValueArgs TARGET SUITE_NAME)
|
||||
set(multiValueArgs SOURCES)
|
||||
cmake_parse_arguments(ARGS "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
# Figure out the release dir, as some tests need files from there.
|
||||
GET_BLENDER_TEST_INSTALL_DIR(TEST_INSTALL_DIR)
|
||||
if(APPLE)
|
||||
set(_test_release_dir ${TEST_INSTALL_DIR}/Blender.app/Contents/Resources/${BLENDER_VERSION})
|
||||
else()
|
||||
if(WIN32 OR WITH_INSTALL_PORTABLE)
|
||||
set(_test_release_dir ${TEST_INSTALL_DIR}/${BLENDER_VERSION})
|
||||
else()
|
||||
set(_test_release_dir ${TEST_INSTALL_DIR}/share/blender/${BLENDER_VERSION})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Define a test case with our custom gtest_add_tests() command.
|
||||
include(GTest)
|
||||
gtest_add_tests(
|
||||
TARGET ${ARGS_TARGET}
|
||||
SOURCES "${ARGS_SOURCES}"
|
||||
TEST_PREFIX ${ARGS_SUITE_NAME}
|
||||
WORKING_DIRECTORY "${TEST_INSTALL_DIR}"
|
||||
EXTRA_ARGS
|
||||
--test-assets-dir "${CMAKE_SOURCE_DIR}/../lib/tests"
|
||||
--test-release-dir "${_test_release_dir}"
|
||||
)
|
||||
|
||||
unset(_test_release_dir)
|
||||
endfunction()
|
||||
|
||||
# Add tests for a Blender library, to be called in tandem with blender_add_lib().
|
||||
# The tests will be part of the blender_test executable (see tests/gtests/runner).
|
||||
function(blender_add_test_lib
|
||||
@@ -458,12 +421,6 @@ function(blender_add_test_lib
|
||||
blender_add_lib__impl(${name} "${sources}" "${includes}" "${includes_sys}" "${library_deps}")
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY BLENDER_TEST_LIBS ${name})
|
||||
|
||||
blender_add_test_suite(
|
||||
TARGET blender_test
|
||||
SUITE_NAME ${name}
|
||||
SOURCES "${sources}"
|
||||
)
|
||||
endfunction()
|
||||
|
||||
|
||||
@@ -497,10 +454,14 @@ function(blender_add_test_executable
|
||||
SKIP_ADD_TEST
|
||||
)
|
||||
|
||||
blender_add_test_suite(
|
||||
TARGET ${name}_test
|
||||
SUITE_NAME ${name}
|
||||
SOURCES "${sources}"
|
||||
include(GTest)
|
||||
set(_GOOGLETEST_DISCOVER_TESTS_SCRIPT
|
||||
${CMAKE_SOURCE_DIR}/build_files/cmake/Modules/GTestAddTests.cmake
|
||||
)
|
||||
|
||||
gtest_discover_tests(${name}_test
|
||||
DISCOVERY_MODE PRE_TEST
|
||||
WORKING_DIRECTORY "${TEST_INSTALL_DIR}"
|
||||
)
|
||||
endfunction()
|
||||
|
||||
@@ -1178,7 +1139,6 @@ endfunction()
|
||||
|
||||
function(find_python_package
|
||||
package
|
||||
relative_include_dir
|
||||
)
|
||||
|
||||
string(TOUPPER ${package} _upper_package)
|
||||
@@ -1210,10 +1170,7 @@ function(find_python_package
|
||||
dist-packages
|
||||
vendor-packages
|
||||
NO_DEFAULT_PATH
|
||||
DOC
|
||||
"Path to python site-packages or dist-packages containing '${package}' module"
|
||||
)
|
||||
mark_as_advanced(PYTHON_${_upper_package}_PATH)
|
||||
|
||||
if(NOT EXISTS "${PYTHON_${_upper_package}_PATH}")
|
||||
message(WARNING
|
||||
@@ -1231,50 +1188,6 @@ function(find_python_package
|
||||
set(WITH_PYTHON_INSTALL_${_upper_package} OFF PARENT_SCOPE)
|
||||
else()
|
||||
message(STATUS "${package} found at '${PYTHON_${_upper_package}_PATH}'")
|
||||
|
||||
if(NOT "${relative_include_dir}" STREQUAL "")
|
||||
set(_relative_include_dir "${package}/${relative_include_dir}")
|
||||
unset(PYTHON_${_upper_package}_INCLUDE_DIRS CACHE)
|
||||
find_path(PYTHON_${_upper_package}_INCLUDE_DIRS
|
||||
NAMES
|
||||
"${_relative_include_dir}"
|
||||
HINTS
|
||||
"${PYTHON_LIBPATH}/"
|
||||
"${PYTHON_LIBPATH}/python${PYTHON_VERSION}/"
|
||||
"${PYTHON_LIBPATH}/python${_PY_VER_MAJOR}/"
|
||||
PATH_SUFFIXES
|
||||
"site-packages/"
|
||||
"dist-packages/"
|
||||
"vendor-packages/"
|
||||
NO_DEFAULT_PATH
|
||||
DOC
|
||||
"Path to python site-packages or dist-packages containing '${package}' module header files"
|
||||
)
|
||||
mark_as_advanced(PYTHON_${_upper_package}_INCLUDE_DIRS)
|
||||
|
||||
if(NOT EXISTS "${PYTHON_${_upper_package}_INCLUDE_DIRS}")
|
||||
message(WARNING
|
||||
"Python package '${package}' include dir path could not be found in:\n"
|
||||
"'${PYTHON_LIBPATH}/python${PYTHON_VERSION}/site-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${_PY_VER_MAJOR}/site-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${PYTHON_VERSION}/dist-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${_PY_VER_MAJOR}/dist-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${PYTHON_VERSION}/vendor-packages/${_relative_include_dir}', "
|
||||
"'${PYTHON_LIBPATH}/python${_PY_VER_MAJOR}/vendor-packages/${_relative_include_dir}', "
|
||||
"\n"
|
||||
"The 'WITH_PYTHON_${_upper_package}' option will be disabled.\n"
|
||||
"The build will be usable, only add-ons that depend on this package won't be functional."
|
||||
)
|
||||
set(WITH_PYTHON_${_upper_package} OFF PARENT_SCOPE)
|
||||
else()
|
||||
set(_temp "${PYTHON_${_upper_package}_INCLUDE_DIRS}/${package}/${relative_include_dir}")
|
||||
unset(PYTHON_${_upper_package}_INCLUDE_DIRS CACHE)
|
||||
set(PYTHON_${_upper_package}_INCLUDE_DIRS "${_temp}"
|
||||
CACHE PATH "Path to the include directory of the ${package} module")
|
||||
|
||||
message(STATUS "${package} include files found at '${PYTHON_${_upper_package}_INCLUDE_DIRS}'")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endfunction()
|
||||
|
@@ -72,11 +72,7 @@ if(WITH_JACK)
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED LIBDIR)
|
||||
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64")
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/darwin)
|
||||
else()
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/darwin_${CMAKE_OSX_ARCHITECTURES})
|
||||
endif()
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/darwin)
|
||||
else()
|
||||
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
|
||||
endif()
|
||||
@@ -432,14 +428,6 @@ if(WITH_GMP)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_HARU)
|
||||
find_package(Haru)
|
||||
if(NOT HARU_FOUND)
|
||||
message(WARNING "Haru not found, disabling WITH_HARU")
|
||||
set(WITH_HARU OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(EXISTS ${LIBDIR})
|
||||
without_system_libs_end()
|
||||
endif()
|
||||
@@ -482,17 +470,3 @@ set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_C_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
|
||||
set(CMAKE_CXX_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
|
||||
|
||||
if(WITH_COMPILER_CCACHE)
|
||||
if(NOT CMAKE_GENERATOR STREQUAL "Xcode")
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
if(CCACHE_PROGRAM)
|
||||
# Makefiles and ninja
|
||||
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "" FORCE)
|
||||
else()
|
||||
message(WARNING "Ccache NOT found, disabling WITH_COMPILER_CCACHE")
|
||||
set(WITH_COMPILER_CCACHE OFF)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
@@ -154,32 +154,3 @@ if(NOT ${CMAKE_GENERATOR} MATCHES "Xcode")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||
add_definitions("-DMACOSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||
endif()
|
||||
|
||||
if(WITH_COMPILER_CCACHE)
|
||||
if(CMAKE_GENERATOR STREQUAL "Xcode")
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
if(CCACHE_PROGRAM)
|
||||
get_filename_component(ccompiler "${CMAKE_C_COMPILER}" NAME)
|
||||
get_filename_component(cxxcompiler "${CMAKE_CXX_COMPILER}" NAME)
|
||||
# Ccache can figure out which compiler to use if it's invoked from
|
||||
# a symlink with the name of the compiler.
|
||||
# https://ccache.dev/manual/4.1.html#_run_modes
|
||||
set(_fake_compiler_dir "${CMAKE_BINARY_DIR}/ccache")
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${_fake_compiler_dir})
|
||||
set(_fake_C_COMPILER "${_fake_compiler_dir}/${ccompiler}")
|
||||
set(_fake_CXX_COMPILER "${_fake_compiler_dir}/${cxxcompiler}")
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink "${CCACHE_PROGRAM}" ${_fake_C_COMPILER})
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink "${CCACHE_PROGRAM}" ${_fake_CXX_COMPILER})
|
||||
set(CMAKE_XCODE_ATTRIBUTE_CC ${_fake_C_COMPILER} CACHE STRING "" FORCE)
|
||||
set(CMAKE_XCODE_ATTRIBUTE_CXX ${_fake_CXX_COMPILER} CACHE STRING "" FORCE)
|
||||
set(CMAKE_XCODE_ATTRIBUTE_LD ${_fake_C_COMPILER} CACHE STRING "" FORCE)
|
||||
set(CMAKE_XCODE_ATTRIBUTE_LDPLUSPLUS ${_fake_CXX_COMPILER} CACHE STRING "" FORCE)
|
||||
unset(_fake_compiler_dir)
|
||||
unset(_fake_C_COMPILER)
|
||||
unset(_fake_CXX_COMPILER)
|
||||
else()
|
||||
message(WARNING "Ccache NOT found, disabling WITH_COMPILER_CCACHE")
|
||||
set(WITH_COMPILER_CCACHE OFF)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
@@ -352,11 +352,6 @@ endif()
|
||||
|
||||
if(WITH_PUGIXML)
|
||||
find_package_wrapper(PugiXML)
|
||||
|
||||
if (NOT PUGIXML_FOUND)
|
||||
set(WITH_PUGIXML OFF)
|
||||
message(STATUS "PugiXML not found, disabling WITH_PUGIXML")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_OPENIMAGEIO)
|
||||
@@ -615,13 +610,7 @@ endif()
|
||||
|
||||
# GNU Compiler
|
||||
if(CMAKE_COMPILER_IS_GNUCC)
|
||||
# ffp-contract=off:
|
||||
# Automatically turned on when building with "-march=native". This is
|
||||
# explicitly turned off here as it will make floating point math give a bit
|
||||
# different results. This will lead to automated test failures. So disable
|
||||
# this until we support it. Seems to default to off in clang and the intel
|
||||
# compiler.
|
||||
set(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing -ffp-contract=off")
|
||||
set(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing")
|
||||
|
||||
# `maybe-uninitialized` is unreliable in release builds, but fine in debug builds.
|
||||
set(GCC_EXTRA_FLAGS_RELEASE "-Wno-maybe-uninitialized")
|
||||
@@ -695,15 +684,3 @@ set(PLATFORM_LINKFLAGS
|
||||
if(WITH_INSTALL_PORTABLE)
|
||||
string(APPEND CMAKE_EXE_LINKER_FLAGS " -no-pie")
|
||||
endif()
|
||||
|
||||
if(WITH_COMPILER_CCACHE)
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
if(CCACHE_PROGRAM)
|
||||
# Makefiles and ninja
|
||||
set(CMAKE_C_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "" FORCE)
|
||||
set(CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PROGRAM}" CACHE STRING "" FORCE)
|
||||
else()
|
||||
message(WARNING "Ccache NOT found, disabling WITH_COMPILER_CCACHE")
|
||||
set(WITH_COMPILER_CCACHE OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
@@ -739,7 +739,7 @@ if(WINDOWS_PYTHON_DEBUG)
|
||||
string(REPLACE "/" "\\" _group_path "${_source_path}")
|
||||
source_group("${_group_path}" FILES "${_source}")
|
||||
endforeach()
|
||||
|
||||
|
||||
# If the user scripts env var is set, include scripts from there otherwise
|
||||
# include user scripts in the profile folder.
|
||||
if(DEFINED ENV{BLENDER_USER_SCRIPTS})
|
||||
@@ -750,7 +750,7 @@ if(WINDOWS_PYTHON_DEBUG)
|
||||
# Include the user scripts from the profile folder in the blender_python_user_scripts project.
|
||||
set(USER_SCRIPTS_ROOT "$ENV{appdata}/blender foundation/blender/${BLENDER_VERSION}/scripts")
|
||||
endif()
|
||||
|
||||
|
||||
file(TO_CMAKE_PATH ${USER_SCRIPTS_ROOT} USER_SCRIPTS_ROOT)
|
||||
FILE(GLOB_RECURSE inFiles "${USER_SCRIPTS_ROOT}/*.*" )
|
||||
ADD_CUSTOM_TARGET(blender_python_user_scripts SOURCES ${inFiles})
|
||||
@@ -800,15 +800,3 @@ if(WITH_POTRACE)
|
||||
set(POTRACE_LIBRARIES ${LIBDIR}/potrace/lib/potrace.lib)
|
||||
set(POTRACE_FOUND On)
|
||||
endif()
|
||||
|
||||
if(WITH_HARU)
|
||||
if(EXISTS ${LIBDIR}/haru)
|
||||
set(HARU_FOUND On)
|
||||
set(HARU_ROOT_DIR ${LIBDIR}/haru)
|
||||
set(HARU_INCLUDE_DIRS ${HARU_ROOT_DIR}/include)
|
||||
set(HARU_LIBRARIES ${HARU_ROOT_DIR}/lib/libhpdfs.lib)
|
||||
else()
|
||||
message(WARNING "Haru was not found, disabling WITH_HARU")
|
||||
set(WITH_HARU OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
@@ -8,7 +8,6 @@
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
@@ -50,12 +49,7 @@ def svn_update(args, release_version):
|
||||
|
||||
# Checkout precompiled libraries
|
||||
if sys.platform == 'darwin':
|
||||
if platform.machine() == 'x86_64':
|
||||
lib_platform = "darwin"
|
||||
elif platform.machine() == 'arm64':
|
||||
lib_platform = "darwin_arm64"
|
||||
else:
|
||||
lib_platform = None
|
||||
lib_platform = "darwin"
|
||||
elif sys.platform == 'win32':
|
||||
# Windows checkout is usually handled by bat scripts since python3 to run
|
||||
# this script is bundled as part of the precompiled libraries. However it
|
||||
|
@@ -38,7 +38,7 @@ PROJECT_NAME = Blender
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = "V2.93"
|
||||
PROJECT_NUMBER = "V2.92"
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
@@ -121,10 +121,6 @@
|
||||
* \ingroup editors
|
||||
*/
|
||||
|
||||
/** \defgroup edasset asset
|
||||
* \ingroup editors
|
||||
*/
|
||||
|
||||
/** \defgroup edcurve curve
|
||||
* \ingroup editors
|
||||
*/
|
||||
|
@@ -52,11 +52,10 @@ outfilename = sys.argv[2]
|
||||
|
||||
cmd = [blender_bin, "--help"]
|
||||
print(" executing:", " ".join(cmd))
|
||||
ASAN_OPTIONS = "exitcode=0:" + os.environ.get("ASAN_OPTIONS", "")
|
||||
blender_help = subprocess.run(
|
||||
cmd, env={"ASAN_OPTIONS": ASAN_OPTIONS}, check=True, stdout=subprocess.PIPE).stdout.decode(encoding="utf-8")
|
||||
cmd, env={"ASAN_OPTIONS": "exitcode=0"}, check=True, stdout=subprocess.PIPE).stdout.decode(encoding="utf-8")
|
||||
blender_version = subprocess.run(
|
||||
[blender_bin, "--version"], env={"ASAN_OPTIONS": ASAN_OPTIONS}, check=True, stdout=subprocess.PIPE).stdout.decode(encoding="utf-8").strip()
|
||||
[blender_bin, "--version"], env={"ASAN_OPTIONS": "exitcode=0"}, check=True, stdout=subprocess.PIPE).stdout.decode(encoding="utf-8").strip()
|
||||
blender_version, blender_date = (blender_version.split("build") + [None, None])[0:2]
|
||||
blender_version = blender_version.rstrip().partition(" ")[2] # remove 'Blender' prefix.
|
||||
if blender_date is None:
|
||||
|
@@ -163,13 +163,13 @@ Now in the button's context menu select *Copy Data Path*, then paste the result
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.context.active_object.modifiers["Subdivision"].levels
|
||||
bpy.context.active_object.modifiers["Subsurf"].levels
|
||||
|
||||
Press :kbd:`Return` and you'll get the current value of 1. Now try changing the value to 2:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.context.active_object.modifiers["Subdivision"].levels = 2
|
||||
bpy.context.active_object.modifiers["Subsurf"].levels = 2
|
||||
|
||||
You can see the value update in the Subdivision Surface modifier's UI as well as the cube.
|
||||
|
||||
@@ -185,31 +185,43 @@ For example, if you want to access the texture of a brush via Python to adjust i
|
||||
#. Start in the default scene and enable Sculpt Mode from the 3D Viewport header.
|
||||
#. From the Sidebar expand the Brush Settings panel's *Texture* subpanel and add a new texture.
|
||||
*Notice the texture data-block menu itself doesn't have very useful links (you can check the tooltips).*
|
||||
#. The contrast setting isn't exposed in the Sidebar, so view the texture in the
|
||||
:ref:`Properties Editor <blender_manual:bpy.types.Texture.contrast`
|
||||
#. The contrast setting isn't exposed in the Sidebar, so view the texture in the properties editor:
|
||||
|
||||
- In the properties editor select the Texture tab.
|
||||
- Select brush texture.
|
||||
- Expand the *Colors* panel to locate the *Contrast* number field.
|
||||
#. Open the context menu of the contrast field and select *Online Python Reference*.
|
||||
This takes you to ``bpy.types.Texture.contrast``. Now you can see that ``contrast`` is a property of texture.
|
||||
#. To find out how to access the texture from the brush check on the references at the bottom of the page.
|
||||
Sometimes there are many references, and it may take some guesswork to find the right one,
|
||||
but in this case it's ``tool_settings.sculpt.brush.texture``.
|
||||
but in this case it's ``Brush.texture``.
|
||||
|
||||
#. Now you know that the texture can be accessed from ``bpy.data.brushes["BrushName"].texture``
|
||||
but normally you *won't* want to access the brush by name, instead you want to access the active brush.
|
||||
So the next step is to check on where brushes are accessed from via the references.
|
||||
In this case there it is simply ``bpy.context.brush``.
|
||||
|
||||
Now you can use the Python console to form the nested properties needed to access brush textures contrast:
|
||||
:menuselection:`Context --> Tool Settings --> Sculpt --> Brush --> Texture --> Contrast`.
|
||||
*Context -> Brush -> Texture -> Contrast*.
|
||||
|
||||
Since the attribute for each is given along the way you can compose the data path in the Python console:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.context.tool_settings.sculpt.brush.texture.contrast
|
||||
bpy.context.brush.texture.contrast
|
||||
|
||||
There can be multiple ways to access the same data, which you choose often depends on the task.
|
||||
An alternate path to access the same setting is:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.context.sculpt.brush.texture.contrast
|
||||
|
||||
Or access the brush directly:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bpy.data.textures["Texture"].contrast
|
||||
bpy.data.brushes["BrushName"].texture.contrast
|
||||
|
||||
|
||||
If you are writing a user tool normally you want to use the :mod:`bpy.context` since the user normally expects
|
||||
|
@@ -35,13 +35,12 @@ but not to fully cover each topic.
|
||||
|
||||
A quick list of helpful things to know before starting:
|
||||
|
||||
- Enable :ref:`Developer Extra <blender_manual:prefs-interface-dev-extras`
|
||||
and :ref:`Python Tooltips <blender_manual:prefs-interface-tooltips-python>`.
|
||||
- The :ref:`Python Console <blender_manual:bpy.types.SpaceConsole>`
|
||||
is great for testing one-liners; it has autocompletion so you can inspect the API quickly.
|
||||
- Button tooltips show Python attributes and operator names (when enabled see above).
|
||||
- The context menu of buttons directly links to this API documentation (when enabled see above).
|
||||
- Many python examples can be found in the text editor's template menu.
|
||||
- Blender uses Python 3.x; some online documentation still assumes version 2.x.
|
||||
- The interactive console is great for testing one-liners.
|
||||
It also has autocompletion so you can inspect the API quickly.
|
||||
- Button tooltips show Python attributes and operator names.
|
||||
- The context menu of buttons directly links to this API documentation.
|
||||
- More operator examples can be found in the text editor's template menu.
|
||||
- To examine further scripts distributed with Blender, see:
|
||||
|
||||
- ``scripts/startup/bl_ui`` for the user interface.
|
||||
@@ -238,7 +237,7 @@ Examples:
|
||||
{'FINISHED'}
|
||||
>>> bpy.ops.mesh.hide(unselected=False)
|
||||
{'FINISHED'}
|
||||
>>> bpy.ops.object.transform_apply()
|
||||
>>> bpy.ops.object.scale_apply()
|
||||
{'FINISHED'}
|
||||
|
||||
.. tip::
|
||||
|
@@ -24,9 +24,10 @@ The three main use cases for the terminal are:
|
||||
- If the script runs for too long or you accidentally enter an infinite loop,
|
||||
:kbd:`Ctrl-C` in the terminal (:kbd:`Ctrl-Break` on Windows) will quit the script early.
|
||||
|
||||
.. seealso::
|
||||
.. note::
|
||||
|
||||
:ref:`blender_manual:command_line-launch-index`.
|
||||
For Linux and macOS users this means starting the terminal first, then running Blender from within it.
|
||||
On Windows the terminal can be enabled from the Help menu.
|
||||
|
||||
|
||||
Interface Tricks
|
||||
|
@@ -55,7 +55,7 @@ if $DO_EXE_BLENDER ; then
|
||||
# Don't delete existing docs, now partial updates are used for quick builds.
|
||||
#
|
||||
# Disable ASAN error halt since it results in nonzero exit code on any minor issue.
|
||||
ASAN_OPTIONS=halt_on_error=0:${ASAN_OPTIONS} \
|
||||
ASAN_OPTIONS=halt_on_error=0 \
|
||||
$BLENDER_BIN \
|
||||
--background \
|
||||
-noaudio \
|
||||
|
@@ -1,5 +1,7 @@
|
||||
/* T76453: Prevent Long enum lists */
|
||||
.field-list li {
|
||||
/* Prevent Long enum lists */
|
||||
.field-body {
|
||||
display: block;
|
||||
width: 100%;
|
||||
max-height: 245px;
|
||||
overflow-y: auto !important;
|
||||
}
|
||||
|
4
extern/README
vendored
4
extern/README
vendored
@@ -1,4 +0,0 @@
|
||||
When updating a library remember to:
|
||||
|
||||
* Update the README.blender with the corresponding version.
|
||||
* Update the THIRD-PARTY-LICENSE.txt document
|
2
extern/audaspace/blender_config.cmake
vendored
2
extern/audaspace/blender_config.cmake
vendored
@@ -24,6 +24,6 @@ set(JACK_FOUND ${WITH_JACK})
|
||||
set(LIBSNDFILE_FOUND ${WITH_CODEC_SNDFILE})
|
||||
set(OPENAL_FOUND ${WITH_OPENAL})
|
||||
set(PYTHONLIBS_FOUND TRUE)
|
||||
set(NUMPY_FOUND ${WITH_PYTHON_NUMPY})
|
||||
set(NUMPY_FOUND TRUE)
|
||||
set(NUMPY_INCLUDE_DIRS ${PYTHON_NUMPY_INCLUDE_DIRS})
|
||||
set(SDL_FOUND ${WITH_SDL})
|
||||
|
@@ -72,9 +72,6 @@ protected:
|
||||
/// The channel mapper reader in between.
|
||||
std::shared_ptr<ChannelMapperReader> m_mapper;
|
||||
|
||||
/// Whether the source is being read for the first time.
|
||||
bool m_first_reading;
|
||||
|
||||
/// Whether to keep the source if end of it is reached.
|
||||
bool m_keep;
|
||||
|
||||
|
19
extern/audaspace/src/devices/SoftwareDevice.cpp
vendored
19
extern/audaspace/src/devices/SoftwareDevice.cpp
vendored
@@ -78,7 +78,7 @@ bool SoftwareDevice::SoftwareHandle::pause(bool keep)
|
||||
}
|
||||
|
||||
SoftwareDevice::SoftwareHandle::SoftwareHandle(SoftwareDevice* device, std::shared_ptr<IReader> reader, std::shared_ptr<PitchReader> pitch, std::shared_ptr<ResampleReader> resampler, std::shared_ptr<ChannelMapperReader> mapper, bool keep) :
|
||||
m_reader(reader), m_pitch(pitch), m_resampler(resampler), m_mapper(mapper), m_first_reading(true), m_keep(keep), m_user_pitch(1.0f), m_user_volume(1.0f), m_user_pan(0.0f), m_volume(0.0f), m_old_volume(0.0f), m_loopcount(0),
|
||||
m_reader(reader), m_pitch(pitch), m_resampler(resampler), m_mapper(mapper), m_keep(keep), m_user_pitch(1.0f), m_user_volume(1.0f), m_user_pan(0.0f), m_volume(0.0f), m_old_volume(0.0f), m_loopcount(0),
|
||||
m_relative(true), m_volume_max(1.0f), m_volume_min(0), m_distance_max(std::numeric_limits<float>::max()),
|
||||
m_distance_reference(1.0f), m_attenuation(1.0f), m_cone_angle_outer(M_PI), m_cone_angle_inner(M_PI), m_cone_volume_outer(0),
|
||||
m_flags(RENDER_CONE), m_stop(nullptr), m_stop_data(nullptr), m_status(STATUS_PLAYING), m_device(device)
|
||||
@@ -106,14 +106,6 @@ void SoftwareDevice::SoftwareHandle::update()
|
||||
if(m_pitch->getSpecs().channels != CHANNELS_MONO)
|
||||
{
|
||||
m_volume = m_user_volume;
|
||||
|
||||
// we don't know a previous volume if this source has never been read before
|
||||
if(m_first_reading)
|
||||
{
|
||||
m_old_volume = m_volume;
|
||||
m_first_reading = false;
|
||||
}
|
||||
|
||||
m_pitch->setPitch(m_user_pitch);
|
||||
return;
|
||||
}
|
||||
@@ -222,13 +214,6 @@ void SoftwareDevice::SoftwareHandle::update()
|
||||
m_volume *= m_user_volume;
|
||||
}
|
||||
|
||||
// we don't know a previous volume if this source has never been read before
|
||||
if(m_first_reading)
|
||||
{
|
||||
m_old_volume = m_volume;
|
||||
m_first_reading = false;
|
||||
}
|
||||
|
||||
// 3D Cue
|
||||
|
||||
Quaternion orientation;
|
||||
@@ -769,8 +754,6 @@ void SoftwareDevice::mix(data_t* buffer, int length)
|
||||
{
|
||||
m_mixer->mix(buf, pos, len, sound->m_volume, sound->m_old_volume);
|
||||
|
||||
sound->m_old_volume = sound->m_volume;
|
||||
|
||||
pos += len;
|
||||
|
||||
if(sound->m_loopcount > 0)
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include <mutex>
|
||||
|
||||
#define KEEP_TIME 10
|
||||
#define POSITION_EPSILON (1.0 / static_cast<double>(RATE_48000))
|
||||
|
||||
AUD_NAMESPACE_BEGIN
|
||||
|
||||
@@ -65,7 +64,7 @@ bool SequenceHandle::updatePosition(double position)
|
||||
if(m_handle.get())
|
||||
{
|
||||
// we currently have a handle, let's check where we are
|
||||
if(position - POSITION_EPSILON >= m_entry->m_end)
|
||||
if(position >= m_entry->m_end)
|
||||
{
|
||||
if(position >= m_entry->m_end + KEEP_TIME)
|
||||
// far end, stopping
|
||||
@@ -77,7 +76,7 @@ bool SequenceHandle::updatePosition(double position)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else if(position + POSITION_EPSILON >= m_entry->m_begin)
|
||||
else if(position >= m_entry->m_begin)
|
||||
{
|
||||
// inside, resuming
|
||||
m_handle->resume();
|
||||
@@ -99,7 +98,7 @@ bool SequenceHandle::updatePosition(double position)
|
||||
else
|
||||
{
|
||||
// we don't have a handle, let's start if we should be playing
|
||||
if(position + POSITION_EPSILON >= m_entry->m_begin && position - POSITION_EPSILON <= m_entry->m_end)
|
||||
if(position >= m_entry->m_begin && position <= m_entry->m_end)
|
||||
{
|
||||
start();
|
||||
return m_valid;
|
||||
|
1
extern/draco/draco/CMakeLists.txt
vendored
1
extern/draco/draco/CMakeLists.txt
vendored
@@ -268,3 +268,4 @@ set(INC
|
||||
)
|
||||
|
||||
blender_add_lib(draco "${SRC}" "${INC}" "" "${LIB}")
|
||||
|
||||
|
8
extern/mantaflow/CMakeLists.txt
vendored
8
extern/mantaflow/CMakeLists.txt
vendored
@@ -85,7 +85,7 @@ if(WIN32)
|
||||
add_definitions(-D_USE_MATH_DEFINES)
|
||||
endif()
|
||||
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_NUMPY)
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
|
||||
add_definitions(-DNUMPY=1)
|
||||
endif()
|
||||
|
||||
@@ -109,7 +109,7 @@ set(INC_SYS
|
||||
${ZLIB_INCLUDE_DIRS}
|
||||
)
|
||||
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_NUMPY)
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
|
||||
list(APPEND INC_SYS
|
||||
${PYTHON_NUMPY_INCLUDE_DIRS}
|
||||
)
|
||||
@@ -200,7 +200,6 @@ set(SRC
|
||||
${MANTA_PP}/plugin/ptsplugins.cpp
|
||||
${MANTA_PP}/plugin/secondaryparticles.cpp
|
||||
${MANTA_PP}/plugin/surfaceturbulence.cpp
|
||||
${MANTA_PP}/plugin/viscosity.cpp
|
||||
${MANTA_PP}/plugin/vortexplugins.cpp
|
||||
${MANTA_PP}/plugin/waveletturbulence.cpp
|
||||
${MANTA_PP}/plugin/waves.cpp
|
||||
@@ -256,7 +255,8 @@ if(WITH_MANTA_DEPENDENCIES)
|
||||
${MANTA_DEP}/cnpy/cnpy.h
|
||||
)
|
||||
endif()
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_NUMPY)
|
||||
|
||||
if(WITH_MANTA_NUMPY AND WITH_PYTHON_INSTALL_NUMPY)
|
||||
list(APPEND SRC
|
||||
${MANTA_PP}/plugin/numpyconvert.cpp
|
||||
${MANTA_PP}/plugin/tfplugins.cpp
|
||||
|
2
extern/mantaflow/helper/util/rcmatrix.h
vendored
2
extern/mantaflow/helper/util/rcmatrix.h
vendored
@@ -1035,7 +1035,7 @@ template<class N, class T> struct RCFixedMatrix {
|
||||
typedef RCMatrix<int, Real> Matrix;
|
||||
typedef RCFixedMatrix<int, Real> FixedMatrix;
|
||||
|
||||
}
|
||||
} // namespace Manta
|
||||
|
||||
#undef parallel_for
|
||||
#undef parallel_end
|
||||
|
94
extern/mantaflow/preprocessed/conjugategrad.cpp
vendored
94
extern/mantaflow/preprocessed/conjugategrad.cpp
vendored
@@ -397,7 +397,7 @@ struct UpdateSearchVec : public KernelBase {
|
||||
};
|
||||
|
||||
//*****************************************************************************
|
||||
// CG class
|
||||
// CG class
|
||||
|
||||
template<class APPLYMAT>
|
||||
GridCg<APPLYMAT>::GridCg(Grid<Real> &dst,
|
||||
@@ -406,8 +406,10 @@ GridCg<APPLYMAT>::GridCg(Grid<Real> &dst,
|
||||
Grid<Real> &search,
|
||||
const FlagGrid &flags,
|
||||
Grid<Real> &tmp,
|
||||
std::vector<Grid<Real> *> matrixAVec,
|
||||
std::vector<Grid<Real> *> rhsVec)
|
||||
Grid<Real> *pA0,
|
||||
Grid<Real> *pAi,
|
||||
Grid<Real> *pAj,
|
||||
Grid<Real> *pAk)
|
||||
: GridCgInterface(),
|
||||
mInited(false),
|
||||
mIterations(0),
|
||||
@@ -417,8 +419,10 @@ GridCg<APPLYMAT>::GridCg(Grid<Real> &dst,
|
||||
mSearch(search),
|
||||
mFlags(flags),
|
||||
mTmp(tmp),
|
||||
mMatrixA(matrixAVec),
|
||||
mVecRhs(rhsVec),
|
||||
mpA0(pA0),
|
||||
mpAi(pAi),
|
||||
mpAj(pAj),
|
||||
mpAk(pAk),
|
||||
mPcMethod(PC_None),
|
||||
mpPCA0(nullptr),
|
||||
mpPCAi(nullptr),
|
||||
@@ -441,37 +445,19 @@ template<class APPLYMAT> void GridCg<APPLYMAT>::doInit()
|
||||
|
||||
if (mPcMethod == PC_ICP) {
|
||||
assertMsg(mDst.is3D(), "ICP only supports 3D grids so far");
|
||||
InitPreconditionIncompCholesky(mFlags,
|
||||
*mpPCA0,
|
||||
*mpPCAi,
|
||||
*mpPCAj,
|
||||
*mpPCAk,
|
||||
*mMatrixA[0],
|
||||
*mMatrixA[1],
|
||||
*mMatrixA[2],
|
||||
*mMatrixA[3]);
|
||||
ApplyPreconditionIncompCholesky(mTmp,
|
||||
mResidual,
|
||||
mFlags,
|
||||
*mpPCA0,
|
||||
*mpPCAi,
|
||||
*mpPCAj,
|
||||
*mpPCAk,
|
||||
*mMatrixA[0],
|
||||
*mMatrixA[1],
|
||||
*mMatrixA[2],
|
||||
*mMatrixA[3]);
|
||||
InitPreconditionIncompCholesky(
|
||||
mFlags, *mpPCA0, *mpPCAi, *mpPCAj, *mpPCAk, *mpA0, *mpAi, *mpAj, *mpAk);
|
||||
ApplyPreconditionIncompCholesky(
|
||||
mTmp, mResidual, mFlags, *mpPCA0, *mpPCAi, *mpPCAj, *mpPCAk, *mpA0, *mpAi, *mpAj, *mpAk);
|
||||
}
|
||||
else if (mPcMethod == PC_mICP) {
|
||||
assertMsg(mDst.is3D(), "mICP only supports 3D grids so far");
|
||||
InitPreconditionModifiedIncompCholesky2(
|
||||
mFlags, *mpPCA0, *mMatrixA[0], *mMatrixA[1], *mMatrixA[2], *mMatrixA[3]);
|
||||
InitPreconditionModifiedIncompCholesky2(mFlags, *mpPCA0, *mpA0, *mpAi, *mpAj, *mpAk);
|
||||
ApplyPreconditionModifiedIncompCholesky2(
|
||||
mTmp, mResidual, mFlags, *mpPCA0, *mMatrixA[0], *mMatrixA[1], *mMatrixA[2], *mMatrixA[3]);
|
||||
mTmp, mResidual, mFlags, *mpPCA0, *mpA0, *mpAi, *mpAj, *mpAk);
|
||||
}
|
||||
else if (mPcMethod == PC_MGP) {
|
||||
InitPreconditionMultigrid(
|
||||
mMG, *mMatrixA[0], *mMatrixA[1], *mMatrixA[2], *mMatrixA[3], mAccuracy);
|
||||
InitPreconditionMultigrid(mMG, *mpA0, *mpAi, *mpAj, *mpAk, mAccuracy);
|
||||
ApplyPreconditionMultigrid(mMG, mTmp, mResidual);
|
||||
}
|
||||
else {
|
||||
@@ -479,6 +465,7 @@ template<class APPLYMAT> void GridCg<APPLYMAT>::doInit()
|
||||
}
|
||||
|
||||
mSearch.copyFrom(mTmp);
|
||||
|
||||
mSigma = GridDotProduct(mTmp, mResidual);
|
||||
}
|
||||
|
||||
@@ -493,7 +480,7 @@ template<class APPLYMAT> bool GridCg<APPLYMAT>::iterate()
|
||||
// this could reinterpret the mpA pointers (not so clean right now)
|
||||
// tmp = applyMat(search)
|
||||
|
||||
APPLYMAT(mFlags, mTmp, mSearch, mMatrixA, mVecRhs);
|
||||
APPLYMAT(mFlags, mTmp, mSearch, *mpA0, *mpAi, *mpAj, *mpAk);
|
||||
|
||||
// alpha = sigma/dot(tmp, search)
|
||||
Real dp = GridDotProduct(mTmp, mSearch);
|
||||
@@ -505,20 +492,11 @@ template<class APPLYMAT> bool GridCg<APPLYMAT>::iterate()
|
||||
gridScaledAdd<Real, Real>(mResidual, mTmp, -alpha); // residual += tmp * -alpha
|
||||
|
||||
if (mPcMethod == PC_ICP)
|
||||
ApplyPreconditionIncompCholesky(mTmp,
|
||||
mResidual,
|
||||
mFlags,
|
||||
*mpPCA0,
|
||||
*mpPCAi,
|
||||
*mpPCAj,
|
||||
*mpPCAk,
|
||||
*mMatrixA[0],
|
||||
*mMatrixA[1],
|
||||
*mMatrixA[2],
|
||||
*mMatrixA[3]);
|
||||
ApplyPreconditionIncompCholesky(
|
||||
mTmp, mResidual, mFlags, *mpPCA0, *mpPCAi, *mpPCAj, *mpPCAk, *mpA0, *mpAi, *mpAj, *mpAk);
|
||||
else if (mPcMethod == PC_mICP)
|
||||
ApplyPreconditionModifiedIncompCholesky2(
|
||||
mTmp, mResidual, mFlags, *mpPCA0, *mMatrixA[0], *mMatrixA[1], *mMatrixA[2], *mMatrixA[3]);
|
||||
mTmp, mResidual, mFlags, *mpPCA0, *mpA0, *mpAi, *mpAj, *mpAk);
|
||||
else if (mPcMethod == PC_MGP)
|
||||
ApplyPreconditionMultigrid(mMG, mTmp, mResidual);
|
||||
else
|
||||
@@ -606,15 +584,13 @@ void GridCg<APPLYMAT>::setMGPreconditioner(PreconditionType method, GridMg *MG)
|
||||
assertMsg(method == PC_MGP, "GridCg<APPLYMAT>::setMGPreconditioner: Invalid method specified.");
|
||||
|
||||
mPcMethod = method;
|
||||
|
||||
mMG = MG;
|
||||
}
|
||||
|
||||
// explicit instantiation
|
||||
template class GridCg<ApplyMatrix>;
|
||||
template class GridCg<ApplyMatrix2D>;
|
||||
template class GridCg<ApplyMatrixViscosityU>;
|
||||
template class GridCg<ApplyMatrixViscosityV>;
|
||||
template class GridCg<ApplyMatrixViscosityW>;
|
||||
|
||||
//*****************************************************************************
|
||||
// diffusion for real and vec grids, e.g. for viscosity
|
||||
@@ -662,15 +638,10 @@ void cgSolveDiffusion(const FlagGrid &flags,
|
||||
if (grid.getType() & GridBase::TypeReal) {
|
||||
Grid<Real> &u = ((Grid<Real> &)grid);
|
||||
rhs.copyFrom(u);
|
||||
vector<Grid<Real> *> matA{&A0, &Ai, &Aj};
|
||||
|
||||
if (flags.is3D()) {
|
||||
matA.push_back(&Ak);
|
||||
gcg = new GridCg<ApplyMatrix>(u, rhs, residual, search, flags, tmp, matA);
|
||||
}
|
||||
else {
|
||||
gcg = new GridCg<ApplyMatrix2D>(u, rhs, residual, search, flags, tmp, matA);
|
||||
}
|
||||
if (flags.is3D())
|
||||
gcg = new GridCg<ApplyMatrix>(u, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
|
||||
else
|
||||
gcg = new GridCg<ApplyMatrix2D>(u, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
|
||||
|
||||
gcg->setAccuracy(cgAccuracy);
|
||||
gcg->solve(maxIter);
|
||||
@@ -682,17 +653,12 @@ void cgSolveDiffusion(const FlagGrid &flags,
|
||||
else if ((grid.getType() & GridBase::TypeVec3) || (grid.getType() & GridBase::TypeMAC)) {
|
||||
Grid<Vec3> &vec = ((Grid<Vec3> &)grid);
|
||||
Grid<Real> u(parent);
|
||||
vector<Grid<Real> *> matA{&A0, &Ai, &Aj};
|
||||
|
||||
// core solve is same as for a regular real grid
|
||||
if (flags.is3D()) {
|
||||
matA.push_back(&Ak);
|
||||
gcg = new GridCg<ApplyMatrix>(u, rhs, residual, search, flags, tmp, matA);
|
||||
}
|
||||
else {
|
||||
gcg = new GridCg<ApplyMatrix2D>(u, rhs, residual, search, flags, tmp, matA);
|
||||
}
|
||||
|
||||
if (flags.is3D())
|
||||
gcg = new GridCg<ApplyMatrix>(u, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
|
||||
else
|
||||
gcg = new GridCg<ApplyMatrix2D>(u, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
|
||||
gcg->setAccuracy(cgAccuracy);
|
||||
|
||||
// diffuse every component separately
|
||||
|
468
extern/mantaflow/preprocessed/conjugategrad.h
vendored
468
extern/mantaflow/preprocessed/conjugategrad.h
vendored
@@ -78,9 +78,13 @@ template<class APPLYMAT> class GridCg : public GridCgInterface {
|
||||
Grid<Real> &search,
|
||||
const FlagGrid &flags,
|
||||
Grid<Real> &tmp,
|
||||
std::vector<Grid<Real> *> matrixAVec,
|
||||
std::vector<Grid<Real> *> rhsVec = {});
|
||||
~GridCg(){};
|
||||
Grid<Real> *A0,
|
||||
Grid<Real> *pAi,
|
||||
Grid<Real> *pAj,
|
||||
Grid<Real> *pAk);
|
||||
~GridCg()
|
||||
{
|
||||
}
|
||||
|
||||
void doInit();
|
||||
bool iterate();
|
||||
@@ -129,10 +133,7 @@ template<class APPLYMAT> class GridCg : public GridCgInterface {
|
||||
const FlagGrid &mFlags;
|
||||
Grid<Real> &mTmp;
|
||||
|
||||
//! shape of A matrix defined here (e.g. diagonal, positive neighbor cells, etc)
|
||||
std::vector<Grid<Real> *> mMatrixA;
|
||||
//! shape of rhs vector defined here (e.g. 1 rhs for regular fluids solve, 3 rhs for viscosity)
|
||||
std::vector<Grid<Real> *> mVecRhs;
|
||||
Grid<Real> *mpA0, *mpAi, *mpAj, *mpAk;
|
||||
|
||||
PreconditionType mPcMethod;
|
||||
//! preconditioning grids
|
||||
@@ -153,9 +154,11 @@ struct ApplyMatrix : public KernelBase {
|
||||
ApplyMatrix(const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs)
|
||||
: KernelBase(&flags, 0), flags(flags), dst(dst), src(src), matrixA(matrixA), vecRhs(vecRhs)
|
||||
Grid<Real> &A0,
|
||||
Grid<Real> &Ai,
|
||||
Grid<Real> &Aj,
|
||||
Grid<Real> &Ak)
|
||||
: KernelBase(&flags, 0), flags(flags), dst(dst), src(src), A0(A0), Ai(Ai), Aj(Aj), Ak(Ak)
|
||||
{
|
||||
runMessage();
|
||||
run();
|
||||
@@ -164,18 +167,11 @@ struct ApplyMatrix : public KernelBase {
|
||||
const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs) const
|
||||
Grid<Real> &A0,
|
||||
Grid<Real> &Ai,
|
||||
Grid<Real> &Aj,
|
||||
Grid<Real> &Ak) const
|
||||
{
|
||||
unusedParameter(vecRhs); // Not needed in this matrix application
|
||||
|
||||
if (matrixA.size() != 4)
|
||||
errMsg("ConjugateGrad: Invalid A matrix in apply matrix step");
|
||||
Grid<Real> &A0 = *matrixA[0];
|
||||
Grid<Real> &Ai = *matrixA[1];
|
||||
Grid<Real> &Aj = *matrixA[2];
|
||||
Grid<Real> &Ak = *matrixA[3];
|
||||
|
||||
if (!flags.isFluid(idx)) {
|
||||
dst[idx] = src[idx];
|
||||
return;
|
||||
@@ -200,16 +196,26 @@ struct ApplyMatrix : public KernelBase {
|
||||
return src;
|
||||
}
|
||||
typedef Grid<Real> type2;
|
||||
inline const std::vector<Grid<Real> *> &getArg3()
|
||||
inline Grid<Real> &getArg3()
|
||||
{
|
||||
return matrixA;
|
||||
return A0;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type3;
|
||||
inline const std::vector<Grid<Real> *> &getArg4()
|
||||
typedef Grid<Real> type3;
|
||||
inline Grid<Real> &getArg4()
|
||||
{
|
||||
return vecRhs;
|
||||
return Ai;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type4;
|
||||
typedef Grid<Real> type4;
|
||||
inline Grid<Real> &getArg5()
|
||||
{
|
||||
return Aj;
|
||||
}
|
||||
typedef Grid<Real> type5;
|
||||
inline Grid<Real> &getArg6()
|
||||
{
|
||||
return Ak;
|
||||
}
|
||||
typedef Grid<Real> type6;
|
||||
void runMessage()
|
||||
{
|
||||
debMsg("Executing kernel ApplyMatrix ", 3);
|
||||
@@ -220,7 +226,7 @@ struct ApplyMatrix : public KernelBase {
|
||||
void operator()(const tbb::blocked_range<IndexInt> &__r) const
|
||||
{
|
||||
for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
|
||||
op(idx, flags, dst, src, matrixA, vecRhs);
|
||||
op(idx, flags, dst, src, A0, Ai, Aj, Ak);
|
||||
}
|
||||
void run()
|
||||
{
|
||||
@@ -229,8 +235,10 @@ struct ApplyMatrix : public KernelBase {
|
||||
const FlagGrid &flags;
|
||||
Grid<Real> &dst;
|
||||
const Grid<Real> &src;
|
||||
const std::vector<Grid<Real> *> matrixA;
|
||||
const std::vector<Grid<Real> *> vecRhs;
|
||||
Grid<Real> &A0;
|
||||
Grid<Real> &Ai;
|
||||
Grid<Real> &Aj;
|
||||
Grid<Real> &Ak;
|
||||
};
|
||||
|
||||
//! Kernel: Apply symmetric stored Matrix. 2D version
|
||||
@@ -239,9 +247,11 @@ struct ApplyMatrix2D : public KernelBase {
|
||||
ApplyMatrix2D(const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs)
|
||||
: KernelBase(&flags, 0), flags(flags), dst(dst), src(src), matrixA(matrixA), vecRhs(vecRhs)
|
||||
Grid<Real> &A0,
|
||||
Grid<Real> &Ai,
|
||||
Grid<Real> &Aj,
|
||||
Grid<Real> &Ak)
|
||||
: KernelBase(&flags, 0), flags(flags), dst(dst), src(src), A0(A0), Ai(Ai), Aj(Aj), Ak(Ak)
|
||||
{
|
||||
runMessage();
|
||||
run();
|
||||
@@ -250,16 +260,12 @@ struct ApplyMatrix2D : public KernelBase {
|
||||
const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs) const
|
||||
Grid<Real> &A0,
|
||||
Grid<Real> &Ai,
|
||||
Grid<Real> &Aj,
|
||||
Grid<Real> &Ak) const
|
||||
{
|
||||
unusedParameter(vecRhs); // Not needed in this matrix application
|
||||
|
||||
if (matrixA.size() != 3)
|
||||
errMsg("ConjugateGrad: Invalid A matrix in apply matrix step");
|
||||
Grid<Real> &A0 = *matrixA[0];
|
||||
Grid<Real> &Ai = *matrixA[1];
|
||||
Grid<Real> &Aj = *matrixA[2];
|
||||
unusedParameter(Ak); // only there for parameter compatibility with ApplyMatrix
|
||||
|
||||
if (!flags.isFluid(idx)) {
|
||||
dst[idx] = src[idx];
|
||||
@@ -284,16 +290,26 @@ struct ApplyMatrix2D : public KernelBase {
|
||||
return src;
|
||||
}
|
||||
typedef Grid<Real> type2;
|
||||
inline const std::vector<Grid<Real> *> &getArg3()
|
||||
inline Grid<Real> &getArg3()
|
||||
{
|
||||
return matrixA;
|
||||
return A0;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type3;
|
||||
inline const std::vector<Grid<Real> *> &getArg4()
|
||||
typedef Grid<Real> type3;
|
||||
inline Grid<Real> &getArg4()
|
||||
{
|
||||
return vecRhs;
|
||||
return Ai;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type4;
|
||||
typedef Grid<Real> type4;
|
||||
inline Grid<Real> &getArg5()
|
||||
{
|
||||
return Aj;
|
||||
}
|
||||
typedef Grid<Real> type5;
|
||||
inline Grid<Real> &getArg6()
|
||||
{
|
||||
return Ak;
|
||||
}
|
||||
typedef Grid<Real> type6;
|
||||
void runMessage()
|
||||
{
|
||||
debMsg("Executing kernel ApplyMatrix2D ", 3);
|
||||
@@ -304,7 +320,7 @@ struct ApplyMatrix2D : public KernelBase {
|
||||
void operator()(const tbb::blocked_range<IndexInt> &__r) const
|
||||
{
|
||||
for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
|
||||
op(idx, flags, dst, src, matrixA, vecRhs);
|
||||
op(idx, flags, dst, src, A0, Ai, Aj, Ak);
|
||||
}
|
||||
void run()
|
||||
{
|
||||
@@ -313,358 +329,12 @@ struct ApplyMatrix2D : public KernelBase {
|
||||
const FlagGrid &flags;
|
||||
Grid<Real> &dst;
|
||||
const Grid<Real> &src;
|
||||
const std::vector<Grid<Real> *> matrixA;
|
||||
const std::vector<Grid<Real> *> vecRhs;
|
||||
Grid<Real> &A0;
|
||||
Grid<Real> &Ai;
|
||||
Grid<Real> &Aj;
|
||||
Grid<Real> &Ak;
|
||||
};
|
||||
|
||||
struct ApplyMatrixViscosityU : public KernelBase {
|
||||
ApplyMatrixViscosityU(const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs)
|
||||
: KernelBase(&flags, 1), flags(flags), dst(dst), src(src), matrixA(matrixA), vecRhs(vecRhs)
|
||||
{
|
||||
runMessage();
|
||||
run();
|
||||
}
|
||||
inline void op(int i,
|
||||
int j,
|
||||
int k,
|
||||
const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs) const
|
||||
{
|
||||
if (matrixA.size() != 15)
|
||||
errMsg("ConjugateGrad: Invalid A matrix in apply matrix step");
|
||||
Grid<Real> &A0 = *matrixA[0];
|
||||
Grid<Real> &Aplusi = *matrixA[1];
|
||||
Grid<Real> &Aplusj = *matrixA[2];
|
||||
Grid<Real> &Aplusk = *matrixA[3];
|
||||
Grid<Real> &Aminusi = *matrixA[4];
|
||||
Grid<Real> &Aminusj = *matrixA[5];
|
||||
Grid<Real> &Aminusk = *matrixA[6];
|
||||
|
||||
if (vecRhs.size() != 2)
|
||||
errMsg("ConjugateGrad: Invalid rhs vector in apply matrix step");
|
||||
Grid<Real> &srcV = *vecRhs[0];
|
||||
Grid<Real> &srcW = *vecRhs[1];
|
||||
|
||||
dst(i, j, k) = src(i, j, k) * A0(i, j, k) + src(i + 1, j, k) * Aplusi(i, j, k) +
|
||||
src(i, j + 1, k) * Aplusj(i, j, k) + src(i, j, k + 1) * Aplusk(i, j, k) +
|
||||
src(i - 1, j, k) * Aminusi(i, j, k) + src(i, j - 1, k) * Aminusj(i, j, k) +
|
||||
src(i, j, k - 1) * Aminusk(i, j, k);
|
||||
|
||||
dst(i, j, k) += srcV(i, j + 1, k) * (*matrixA[7])(i, j, k) +
|
||||
srcV(i - 1, j + 1, k) * (*matrixA[8])(i, j, k) +
|
||||
srcV(i, j, k) * (*matrixA[9])(i, j, k) +
|
||||
srcV(i - 1, j, k) * (*matrixA[10])(i, j, k) +
|
||||
srcW(i, j, k + 1) * (*matrixA[11])(i, j, k) +
|
||||
srcW(i - 1, j, k + 1) * (*matrixA[12])(i, j, k) +
|
||||
srcW(i, j, k) * (*matrixA[13])(i, j, k) +
|
||||
srcW(i - 1, j, k) * (*matrixA[14])(i, j, k);
|
||||
}
|
||||
inline const FlagGrid &getArg0()
|
||||
{
|
||||
return flags;
|
||||
}
|
||||
typedef FlagGrid type0;
|
||||
inline Grid<Real> &getArg1()
|
||||
{
|
||||
return dst;
|
||||
}
|
||||
typedef Grid<Real> type1;
|
||||
inline const Grid<Real> &getArg2()
|
||||
{
|
||||
return src;
|
||||
}
|
||||
typedef Grid<Real> type2;
|
||||
inline const std::vector<Grid<Real> *> &getArg3()
|
||||
{
|
||||
return matrixA;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type3;
|
||||
inline const std::vector<Grid<Real> *> &getArg4()
|
||||
{
|
||||
return vecRhs;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type4;
|
||||
void runMessage()
|
||||
{
|
||||
debMsg("Executing kernel ApplyMatrixViscosityU ", 3);
|
||||
debMsg("Kernel range"
|
||||
<< " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
|
||||
4);
|
||||
};
|
||||
void operator()(const tbb::blocked_range<IndexInt> &__r) const
|
||||
{
|
||||
const int _maxX = maxX;
|
||||
const int _maxY = maxY;
|
||||
if (maxZ > 1) {
|
||||
for (int k = __r.begin(); k != (int)__r.end(); k++)
|
||||
for (int j = 1; j < _maxY; j++)
|
||||
for (int i = 1; i < _maxX; i++)
|
||||
op(i, j, k, flags, dst, src, matrixA, vecRhs);
|
||||
}
|
||||
else {
|
||||
const int k = 0;
|
||||
for (int j = __r.begin(); j != (int)__r.end(); j++)
|
||||
for (int i = 1; i < _maxX; i++)
|
||||
op(i, j, k, flags, dst, src, matrixA, vecRhs);
|
||||
}
|
||||
}
|
||||
void run()
|
||||
{
|
||||
if (maxZ > 1)
|
||||
tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
|
||||
else
|
||||
tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
|
||||
}
|
||||
const FlagGrid &flags;
|
||||
Grid<Real> &dst;
|
||||
const Grid<Real> &src;
|
||||
const std::vector<Grid<Real> *> matrixA;
|
||||
const std::vector<Grid<Real> *> vecRhs;
|
||||
};
|
||||
|
||||
struct ApplyMatrixViscosityV : public KernelBase {
|
||||
ApplyMatrixViscosityV(const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs)
|
||||
: KernelBase(&flags, 1), flags(flags), dst(dst), src(src), matrixA(matrixA), vecRhs(vecRhs)
|
||||
{
|
||||
runMessage();
|
||||
run();
|
||||
}
|
||||
inline void op(int i,
|
||||
int j,
|
||||
int k,
|
||||
const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs) const
|
||||
{
|
||||
if (matrixA.size() != 15)
|
||||
errMsg("ConjugateGrad: Invalid A matrix in apply matrix step");
|
||||
Grid<Real> &A0 = *matrixA[0];
|
||||
Grid<Real> &Aplusi = *matrixA[1];
|
||||
Grid<Real> &Aplusj = *matrixA[2];
|
||||
Grid<Real> &Aplusk = *matrixA[3];
|
||||
Grid<Real> &Aminusi = *matrixA[4];
|
||||
Grid<Real> &Aminusj = *matrixA[5];
|
||||
Grid<Real> &Aminusk = *matrixA[6];
|
||||
|
||||
if (vecRhs.size() != 2)
|
||||
errMsg("ConjugateGrad: Invalid rhs vector in apply matrix step");
|
||||
Grid<Real> &srcU = *vecRhs[0];
|
||||
Grid<Real> &srcW = *vecRhs[1];
|
||||
|
||||
dst(i, j, k) = src(i, j, k) * A0(i, j, k) + src(i + 1, j, k) * Aplusi(i, j, k) +
|
||||
src(i, j + 1, k) * Aplusj(i, j, k) + src(i, j, k + 1) * Aplusk(i, j, k) +
|
||||
src(i - 1, j, k) * Aminusi(i, j, k) + src(i, j - 1, k) * Aminusj(i, j, k) +
|
||||
src(i, j, k - 1) * Aminusk(i, j, k);
|
||||
|
||||
dst(i, j, k) += srcU(i + 1, j, k) * (*matrixA[7])(i, j, k) +
|
||||
srcU(i + 1, j - 1, k) * (*matrixA[8])(i, j, k) +
|
||||
srcU(i, j, k) * (*matrixA[9])(i, j, k) +
|
||||
srcU(i, j - 1, k) * (*matrixA[10])(i, j, k) +
|
||||
srcW(i, j, k + 1) * (*matrixA[11])(i, j, k) +
|
||||
srcW(i, j - 1, k + 1) * (*matrixA[12])(i, j, k) +
|
||||
srcW(i, j, k) * (*matrixA[13])(i, j, k) +
|
||||
srcW(i, j - 1, k) * (*matrixA[14])(i, j, k);
|
||||
}
|
||||
inline const FlagGrid &getArg0()
|
||||
{
|
||||
return flags;
|
||||
}
|
||||
typedef FlagGrid type0;
|
||||
inline Grid<Real> &getArg1()
|
||||
{
|
||||
return dst;
|
||||
}
|
||||
typedef Grid<Real> type1;
|
||||
inline const Grid<Real> &getArg2()
|
||||
{
|
||||
return src;
|
||||
}
|
||||
typedef Grid<Real> type2;
|
||||
inline const std::vector<Grid<Real> *> &getArg3()
|
||||
{
|
||||
return matrixA;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type3;
|
||||
inline const std::vector<Grid<Real> *> &getArg4()
|
||||
{
|
||||
return vecRhs;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type4;
|
||||
void runMessage()
|
||||
{
|
||||
debMsg("Executing kernel ApplyMatrixViscosityV ", 3);
|
||||
debMsg("Kernel range"
|
||||
<< " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
|
||||
4);
|
||||
};
|
||||
void operator()(const tbb::blocked_range<IndexInt> &__r) const
|
||||
{
|
||||
const int _maxX = maxX;
|
||||
const int _maxY = maxY;
|
||||
if (maxZ > 1) {
|
||||
for (int k = __r.begin(); k != (int)__r.end(); k++)
|
||||
for (int j = 1; j < _maxY; j++)
|
||||
for (int i = 1; i < _maxX; i++)
|
||||
op(i, j, k, flags, dst, src, matrixA, vecRhs);
|
||||
}
|
||||
else {
|
||||
const int k = 0;
|
||||
for (int j = __r.begin(); j != (int)__r.end(); j++)
|
||||
for (int i = 1; i < _maxX; i++)
|
||||
op(i, j, k, flags, dst, src, matrixA, vecRhs);
|
||||
}
|
||||
}
|
||||
void run()
|
||||
{
|
||||
if (maxZ > 1)
|
||||
tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
|
||||
else
|
||||
tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
|
||||
}
|
||||
const FlagGrid &flags;
|
||||
Grid<Real> &dst;
|
||||
const Grid<Real> &src;
|
||||
const std::vector<Grid<Real> *> matrixA;
|
||||
const std::vector<Grid<Real> *> vecRhs;
|
||||
};
|
||||
|
||||
struct ApplyMatrixViscosityW : public KernelBase {
|
||||
ApplyMatrixViscosityW(const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs)
|
||||
: KernelBase(&flags, 1), flags(flags), dst(dst), src(src), matrixA(matrixA), vecRhs(vecRhs)
|
||||
{
|
||||
runMessage();
|
||||
run();
|
||||
}
|
||||
inline void op(int i,
|
||||
int j,
|
||||
int k,
|
||||
const FlagGrid &flags,
|
||||
Grid<Real> &dst,
|
||||
const Grid<Real> &src,
|
||||
const std::vector<Grid<Real> *> matrixA,
|
||||
const std::vector<Grid<Real> *> vecRhs) const
|
||||
{
|
||||
if (matrixA.size() != 15)
|
||||
errMsg("ConjugateGrad: Invalid A matrix in apply matrix step");
|
||||
Grid<Real> &A0 = *matrixA[0];
|
||||
Grid<Real> &Aplusi = *matrixA[1];
|
||||
Grid<Real> &Aplusj = *matrixA[2];
|
||||
Grid<Real> &Aplusk = *matrixA[3];
|
||||
Grid<Real> &Aminusi = *matrixA[4];
|
||||
Grid<Real> &Aminusj = *matrixA[5];
|
||||
Grid<Real> &Aminusk = *matrixA[6];
|
||||
|
||||
if (vecRhs.size() != 2)
|
||||
errMsg("ConjugateGrad: Invalid rhs vector in apply matrix step");
|
||||
Grid<Real> &srcU = *vecRhs[0];
|
||||
Grid<Real> &srcV = *vecRhs[1];
|
||||
|
||||
dst(i, j, k) = src(i, j, k) * A0(i, j, k) + src(i + 1, j, k) * Aplusi(i, j, k) +
|
||||
src(i, j + 1, k) * Aplusj(i, j, k) + src(i, j, k + 1) * Aplusk(i, j, k) +
|
||||
src(i - 1, j, k) * Aminusi(i, j, k) + src(i, j - 1, k) * Aminusj(i, j, k) +
|
||||
src(i, j, k - 1) * Aminusk(i, j, k);
|
||||
|
||||
dst(i, j, k) += srcU(i + 1, j, k) * (*matrixA[7])(i, j, k) +
|
||||
srcU(i + 1, j, k - 1) * (*matrixA[8])(i, j, k) +
|
||||
srcU(i, j, k) * (*matrixA[9])(i, j, k) +
|
||||
srcU(i, j, k - 1) * (*matrixA[10])(i, j, k) +
|
||||
srcV(i, j + 1, k) * (*matrixA[11])(i, j, k) +
|
||||
srcV(i, j + 1, k - 1) * (*matrixA[12])(i, j, k) +
|
||||
srcV(i, j, k) * (*matrixA[13])(i, j, k) +
|
||||
srcV(i, j, k - 1) * (*matrixA[14])(i, j, k);
|
||||
}
|
||||
inline const FlagGrid &getArg0()
|
||||
{
|
||||
return flags;
|
||||
}
|
||||
typedef FlagGrid type0;
|
||||
inline Grid<Real> &getArg1()
|
||||
{
|
||||
return dst;
|
||||
}
|
||||
typedef Grid<Real> type1;
|
||||
inline const Grid<Real> &getArg2()
|
||||
{
|
||||
return src;
|
||||
}
|
||||
typedef Grid<Real> type2;
|
||||
inline const std::vector<Grid<Real> *> &getArg3()
|
||||
{
|
||||
return matrixA;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type3;
|
||||
inline const std::vector<Grid<Real> *> &getArg4()
|
||||
{
|
||||
return vecRhs;
|
||||
}
|
||||
typedef std::vector<Grid<Real> *> type4;
|
||||
void runMessage()
|
||||
{
|
||||
debMsg("Executing kernel ApplyMatrixViscosityW ", 3);
|
||||
debMsg("Kernel range"
|
||||
<< " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
|
||||
4);
|
||||
};
|
||||
void operator()(const tbb::blocked_range<IndexInt> &__r) const
|
||||
{
|
||||
const int _maxX = maxX;
|
||||
const int _maxY = maxY;
|
||||
if (maxZ > 1) {
|
||||
for (int k = __r.begin(); k != (int)__r.end(); k++)
|
||||
for (int j = 1; j < _maxY; j++)
|
||||
for (int i = 1; i < _maxX; i++)
|
||||
op(i, j, k, flags, dst, src, matrixA, vecRhs);
|
||||
}
|
||||
else {
|
||||
const int k = 0;
|
||||
for (int j = __r.begin(); j != (int)__r.end(); j++)
|
||||
for (int i = 1; i < _maxX; i++)
|
||||
op(i, j, k, flags, dst, src, matrixA, vecRhs);
|
||||
}
|
||||
}
|
||||
void run()
|
||||
{
|
||||
if (maxZ > 1)
|
||||
tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
|
||||
else
|
||||
tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
|
||||
}
|
||||
const FlagGrid &flags;
|
||||
Grid<Real> &dst;
|
||||
const Grid<Real> &src;
|
||||
const std::vector<Grid<Real> *> matrixA;
|
||||
const std::vector<Grid<Real> *> vecRhs;
|
||||
};
|
||||
|
||||
/* NOTE: Use this template for new matrix application kernels
|
||||
|
||||
//! Template for matrix application kernels
|
||||
KERNEL()
|
||||
void ApplyMatrixTemplate (const FlagGrid& flags, Grid<Real>& dst, const Grid<Real>& src,
|
||||
const std::vector<Grid<Real> *> matrixA, const std::vector<Grid<Real> *> vecRhs)
|
||||
{
|
||||
// The kernel must define how to use the grids from the matrixA and vecRhs lists
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
//! Kernel: Construct the matrix for the poisson equation
|
||||
|
||||
struct MakeLaplaceMatrix : public KernelBase {
|
||||
|
23
extern/mantaflow/preprocessed/fileio/iogrids.cpp
vendored
23
extern/mantaflow/preprocessed/fileio/iogrids.cpp
vendored
@@ -628,24 +628,13 @@ template<class T> int readGridUni(const string &name, Grid<T> *grid)
|
||||
// current file format
|
||||
UniHeader head;
|
||||
assertMsg(gzread(gzf, &head, sizeof(UniHeader)) == sizeof(UniHeader),
|
||||
"readGridUni: Can't read file, no header present");
|
||||
"can't read file, no header present");
|
||||
assertMsg(head.dimX == grid->getSizeX() && head.dimY == grid->getSizeY() &&
|
||||
head.dimZ == grid->getSizeZ(),
|
||||
"grid dim doesn't match, " << Vec3(head.dimX, head.dimY, head.dimZ) << " vs "
|
||||
<< grid->getSize());
|
||||
assertMsg(unifyGridType(head.gridType) == unifyGridType(grid->getType()),
|
||||
"readGridUni: Grid type doesn't match " << head.gridType << " vs "
|
||||
<< grid->getType());
|
||||
|
||||
const Vec3i curGridSize = grid->getParent()->getGridSize();
|
||||
const Vec3i headGridSize(head.dimX, head.dimY, head.dimZ);
|
||||
# if BLENDER
|
||||
// Correct grid size is only a soft requirement in Blender
|
||||
if (headGridSize != curGridSize) {
|
||||
debMsg("readGridUni: Grid dim doesn't match, " << headGridSize << " vs " << curGridSize, 1);
|
||||
return 0;
|
||||
}
|
||||
# else
|
||||
assertMsg(headGridSize == curGridSize,
|
||||
"readGridUni: Grid dim doesn't match, " << headGridSize << " vs " << curGridSize);
|
||||
# endif
|
||||
|
||||
"grid type doesn't match " << head.gridType << " vs " << grid->getType());
|
||||
# if FLOATINGPOINT_PRECISION != 1
|
||||
// convert float to double
|
||||
Grid<T> temp(grid->getParent());
|
||||
|
@@ -230,19 +230,6 @@ int readParticlesUni(const std::string &name, BasicParticleSystem *parts)
|
||||
assertMsg(((head.bytesPerElement == PartSysSize) && (head.elementType == 0)),
|
||||
"particle type doesn't match");
|
||||
|
||||
const Vec3i curGridSize = parts->getParent()->getGridSize();
|
||||
const Vec3i headGridSize(head.dimX, head.dimY, head.dimZ);
|
||||
# if BLENDER
|
||||
// Correct grid size is only a soft requirement in Blender
|
||||
if (headGridSize != curGridSize) {
|
||||
debMsg("readPdataUni: Grid dim doesn't match, " << headGridSize << " vs " << curGridSize, 1);
|
||||
return 0;
|
||||
}
|
||||
# else
|
||||
assertMsg(headGridSize == curGridSize,
|
||||
"readPdataUni: Grid dim doesn't match, " << headGridSize << " vs " << curGridSize);
|
||||
# endif
|
||||
|
||||
// re-allocate all data
|
||||
parts->resizeAll(head.dim);
|
||||
|
||||
@@ -338,19 +325,6 @@ template<class T> int readPdataUni(const std::string &name, ParticleDataImpl<T>
|
||||
pdata->getParticleSys()->resize(head.dim); // ensure that parent particle system has same size
|
||||
pdata->resize(head.dim);
|
||||
|
||||
const Vec3i curGridSize = pdata->getParent()->getGridSize();
|
||||
const Vec3i headGridSize(head.dimX, head.dimY, head.dimZ);
|
||||
# if BLENDER
|
||||
// Correct grid size is only a soft requirement in Blender
|
||||
if (headGridSize != curGridSize) {
|
||||
debMsg("readPdataUni: Grid dim doesn't match, " << headGridSize << " vs " << curGridSize, 1);
|
||||
return 0;
|
||||
}
|
||||
# else
|
||||
assertMsg(headGridSize == curGridSize,
|
||||
"readPdataUni: Grid dim doesn't match, " << headGridSize << " vs " << curGridSize);
|
||||
# endif
|
||||
|
||||
assertMsg(head.dim == pdata->size(), "pdata size doesn't match");
|
||||
# if FLOATINGPOINT_PRECISION != 1
|
||||
ParticleDataImpl<T> temp(pdata->getParent());
|
||||
|
@@ -90,13 +90,6 @@ template<> void convertFrom(openvdb::Vec3s &in, Vec3 *out)
|
||||
(*out).z = in.z();
|
||||
}
|
||||
|
||||
template<> void convertFrom(openvdb::Vec3i &in, Vec3i *out)
|
||||
{
|
||||
(*out).x = in.x();
|
||||
(*out).y = in.y();
|
||||
(*out).z = in.z();
|
||||
}
|
||||
|
||||
// Convert to OpenVDB value from Manta value.
|
||||
template<class S, class T> void convertTo(S *out, T &in)
|
||||
{
|
||||
|
172
extern/mantaflow/preprocessed/fileio/iovdb.cpp
vendored
172
extern/mantaflow/preprocessed/fileio/iovdb.cpp
vendored
@@ -38,11 +38,6 @@
|
||||
#define POSITION_NAME "P"
|
||||
#define FLAG_NAME "U"
|
||||
|
||||
#define META_BASE_RES "file_base_resolution"
|
||||
#define META_VOXEL_SIZE "file_voxel_size"
|
||||
#define META_BBOX_MAX "file_bbox_max"
|
||||
#define META_BBOX_MIN "file_bbox_min"
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace Manta {
|
||||
@@ -393,8 +388,7 @@ int writeObjectsVDB(const string &filename,
|
||||
int compression,
|
||||
int precision,
|
||||
float clip,
|
||||
const Grid<Real> *clipGrid,
|
||||
const bool meta)
|
||||
const Grid<Real> *clipGrid)
|
||||
{
|
||||
openvdb::initialize();
|
||||
openvdb::io::File file(filename);
|
||||
@@ -429,14 +423,13 @@ int writeObjectsVDB(const string &filename,
|
||||
|
||||
if (GridBase *mantaGrid = dynamic_cast<GridBase *>(*iter)) {
|
||||
|
||||
if (clipGrid) {
|
||||
assertMsg(clipGrid->getSize() == mantaGrid->getSize(),
|
||||
"writeObjectsVDB: Clip grid and exported grid must have the same size");
|
||||
}
|
||||
if (mantaGrid->getType() & GridBase::TypeInt) {
|
||||
debMsg("Writing int grid '" << mantaGrid->getName() << "' to vdb file " << filename, 1);
|
||||
Grid<int> *mantaIntGrid = (Grid<int> *)mantaGrid;
|
||||
if (clipGrid && mantaIntGrid->saveSparse()) {
|
||||
assertMsg(clipGrid->getSize() == mantaGrid->getSize(),
|
||||
"writeObjectsVDB: Clip grid and exported grid must have the same size "
|
||||
<< clipGrid->getSize() << " vs " << mantaGrid->getSize());
|
||||
}
|
||||
vdbGrid = exportVDB<int, openvdb::Int32Grid>(mantaIntGrid, clip, vdbClipGrid);
|
||||
gridsVDB.push_back(vdbGrid);
|
||||
}
|
||||
@@ -447,11 +440,6 @@ int writeObjectsVDB(const string &filename,
|
||||
Grid<Real> *mantaRealGrid = (Grid<Real> *)mantaGrid;
|
||||
// Only supply clip grid if real grid is not equal to the clip grid
|
||||
openvdb::FloatGrid::Ptr tmpClipGrid = (mantaRealGrid == clipGrid) ? nullptr : vdbClipGrid;
|
||||
if (clipGrid && mantaRealGrid->saveSparse()) {
|
||||
assertMsg(clipGrid->getSize() == mantaGrid->getSize(),
|
||||
"writeObjectsVDB: Clip grid and exported grid must have the same size "
|
||||
<< clipGrid->getSize() << " vs " << mantaGrid->getSize());
|
||||
}
|
||||
vdbGrid = exportVDB<Real, openvdb::FloatGrid>(mantaRealGrid, clip, tmpClipGrid);
|
||||
gridsVDB.push_back(vdbGrid);
|
||||
}
|
||||
@@ -460,11 +448,6 @@ int writeObjectsVDB(const string &filename,
|
||||
gClass = (mantaGrid->getType() & GridBase::TypeMAC) ? openvdb::GRID_STAGGERED :
|
||||
openvdb::GRID_UNKNOWN;
|
||||
Grid<Vec3> *mantaVec3Grid = (Grid<Vec3> *)mantaGrid;
|
||||
if (clipGrid && mantaVec3Grid->saveSparse()) {
|
||||
assertMsg(clipGrid->getSize() == mantaGrid->getSize(),
|
||||
"writeObjectsVDB: Clip grid and exported grid must have the same size "
|
||||
<< clipGrid->getSize() << " vs " << mantaGrid->getSize());
|
||||
}
|
||||
vdbGrid = exportVDB<Vec3, openvdb::Vec3SGrid>(mantaVec3Grid, clip, vdbClipGrid);
|
||||
gridsVDB.push_back(vdbGrid);
|
||||
}
|
||||
@@ -495,16 +478,6 @@ int writeObjectsVDB(const string &filename,
|
||||
// Set additional grid attributes, e.g. name, grid class, compression level, etc.
|
||||
if (vdbGrid) {
|
||||
setGridOptions<openvdb::GridBase>(vdbGrid, objectName, gClass, voxelSize, precision);
|
||||
|
||||
// Optional metadata: Save additional simulation information per vdb object
|
||||
if (meta) {
|
||||
const Vec3i size = object->getParent()->getGridSize();
|
||||
// The (dense) resolution of this grid
|
||||
vdbGrid->insertMeta(META_BASE_RES,
|
||||
openvdb::Vec3IMetadata(openvdb::Vec3i(size.x, size.y, size.z)));
|
||||
// Length of one voxel side
|
||||
vdbGrid->insertMeta(META_VOXEL_SIZE, openvdb::FloatMetadata(voxelSize));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -549,44 +522,6 @@ int writeObjectsVDB(const string &filename,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void clearAll(std::vector<PbClass *> *objects, std::vector<ParticleDataBase *> pdbBuffer)
|
||||
{
|
||||
// Clear all data loaded into manta objects (e.g. during IO error)
|
||||
for (std::vector<PbClass *>::iterator iter = objects->begin(); iter != objects->end(); ++iter) {
|
||||
if (GridBase *mantaGrid = dynamic_cast<GridBase *>(*iter)) {
|
||||
if (mantaGrid->getType() & GridBase::TypeInt) {
|
||||
Grid<int> *mantaIntGrid = (Grid<int> *)mantaGrid;
|
||||
mantaIntGrid->clear();
|
||||
}
|
||||
else if (mantaGrid->getType() & GridBase::TypeReal) {
|
||||
Grid<Real> *mantaRealGrid = (Grid<Real> *)mantaGrid;
|
||||
mantaRealGrid->clear();
|
||||
}
|
||||
else if (mantaGrid->getType() & GridBase::TypeVec3) {
|
||||
Grid<Vec3> *mantaVec3Grid = (Grid<Vec3> *)mantaGrid;
|
||||
mantaVec3Grid->clear();
|
||||
}
|
||||
}
|
||||
else if (BasicParticleSystem *mantaPP = dynamic_cast<BasicParticleSystem *>(*iter)) {
|
||||
mantaPP->clear();
|
||||
}
|
||||
}
|
||||
for (ParticleDataBase *pdb : pdbBuffer) {
|
||||
if (pdb->getType() == ParticleDataBase::TypeInt) {
|
||||
ParticleDataImpl<int> *mantaPDataInt = (ParticleDataImpl<int> *)pdb;
|
||||
mantaPDataInt->clear();
|
||||
}
|
||||
else if (pdb->getType() == ParticleDataBase::TypeReal) {
|
||||
ParticleDataImpl<Real> *mantaPDataReal = (ParticleDataImpl<Real> *)pdb;
|
||||
mantaPDataReal->clear();
|
||||
}
|
||||
else if (pdb->getType() == ParticleDataBase::TypeVec3) {
|
||||
ParticleDataImpl<Vec3> *mantaPDataVec3 = (ParticleDataImpl<Vec3> *)pdb;
|
||||
mantaPDataVec3->clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int readObjectsVDB(const string &filename, std::vector<PbClass *> *objects, float worldSize)
|
||||
{
|
||||
|
||||
@@ -615,9 +550,6 @@ int readObjectsVDB(const string &filename, std::vector<PbClass *> *objects, floa
|
||||
// A buffer to store a handle to pData objects. These will be read alongside a particle system.
|
||||
std::vector<ParticleDataBase *> pdbBuffer;
|
||||
|
||||
// Count how many objects could not be read correctly
|
||||
int readFailure = 0;
|
||||
|
||||
for (std::vector<PbClass *>::iterator iter = objects->begin(); iter != objects->end(); ++iter) {
|
||||
|
||||
if (gridsVDB.empty()) {
|
||||
@@ -625,12 +557,11 @@ int readObjectsVDB(const string &filename, std::vector<PbClass *> *objects, floa
|
||||
}
|
||||
// If there is just one grid in this file, load it regardless of name match (to vdb caches per
|
||||
// grid).
|
||||
const bool onlyGrid = (gridsVDB.size() == 1);
|
||||
bool onlyGrid = (gridsVDB.size() == 1);
|
||||
|
||||
PbClass *object = dynamic_cast<PbClass *>(*iter);
|
||||
const Real dx = object->getParent()->getDx();
|
||||
const Vec3i origRes = object->getParent()->getGridSize();
|
||||
Real voxelSize = worldSize * dx;
|
||||
const Real voxelSize = worldSize * dx;
|
||||
|
||||
// Particle data objects are treated separately - buffered and inserted when reading the
|
||||
// particle system
|
||||
@@ -654,81 +585,6 @@ int readObjectsVDB(const string &filename, std::vector<PbClass *> *objects, floa
|
||||
if (!nameMatch && !onlyGrid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Metadata: If present in the file, meta data will be parsed into these fields
|
||||
Real metaVoxelSize(0);
|
||||
Vec3i metaRes(0), metaBBoxMax(0), metaBBoxMin(0);
|
||||
|
||||
// Loop to load all meta data that we care about
|
||||
for (openvdb::MetaMap::MetaIterator iter = vdbGrid->beginMeta(); iter != vdbGrid->endMeta();
|
||||
++iter) {
|
||||
const std::string &name = iter->first;
|
||||
const openvdb::Metadata::Ptr value = iter->second;
|
||||
if (name.compare(META_BASE_RES) == 0) {
|
||||
openvdb::Vec3i tmp = static_cast<openvdb::Vec3IMetadata &>(*value).value();
|
||||
convertFrom(tmp, &metaRes);
|
||||
}
|
||||
else if (name.compare(META_VOXEL_SIZE) == 0) {
|
||||
float tmp = static_cast<openvdb::FloatMetadata &>(*value).value();
|
||||
convertFrom(tmp, &metaVoxelSize);
|
||||
|
||||
voxelSize = metaVoxelSize; // Make sure to update voxel size variable (used in
|
||||
// pointgrid's importVDB())
|
||||
if (worldSize != 1.0)
|
||||
debMsg(
|
||||
"readObjectsVDB: Found voxel size in meta data. worldSize parameter will be "
|
||||
"ignored!",
|
||||
1);
|
||||
}
|
||||
else if (name.compare(META_BBOX_MAX) == 0) {
|
||||
openvdb::Vec3i tmp = static_cast<openvdb::Vec3IMetadata &>(*value).value();
|
||||
convertFrom(tmp, &metaBBoxMax);
|
||||
}
|
||||
else if (name.compare(META_BBOX_MIN) == 0) {
|
||||
openvdb::Vec3i tmp = static_cast<openvdb::Vec3IMetadata &>(*value).value();
|
||||
convertFrom(tmp, &metaBBoxMin);
|
||||
}
|
||||
else {
|
||||
debMsg("readObjectsVDB: Skipping unknown meta information '" << name << "'", 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Compare metadata with allocated grid setup. This prevents invalid index access.
|
||||
if (notZero(metaRes) && metaRes != origRes) {
|
||||
debMsg("readObjectsVDB Warning: Grid '" << vdbGrid->getName()
|
||||
<< "' has not been read. Meta grid res " << metaRes
|
||||
<< " vs " << origRes << " current grid size",
|
||||
1);
|
||||
readFailure++;
|
||||
break;
|
||||
}
|
||||
if (notZero(metaVoxelSize) && metaVoxelSize != voxelSize) {
|
||||
debMsg("readObjectsVDB Warning: Grid '"
|
||||
<< vdbGrid->getName() << "' has not been read. Meta voxel size "
|
||||
<< metaVoxelSize << " vs " << voxelSize << " current voxel size",
|
||||
1);
|
||||
readFailure++;
|
||||
break;
|
||||
}
|
||||
if (metaBBoxMax.x > origRes.x || metaBBoxMax.y > origRes.y || metaBBoxMax.z > origRes.z) {
|
||||
debMsg("readObjectsVDB Warning: Grid '"
|
||||
<< vdbGrid->getName() << "' has not been read. Vdb bbox max " << metaBBoxMax
|
||||
<< " vs " << origRes << " current grid size",
|
||||
1);
|
||||
readFailure++;
|
||||
break;
|
||||
}
|
||||
const Vec3i origOrigin(0);
|
||||
if (metaBBoxMin.x < origOrigin.x || metaBBoxMin.y < origOrigin.y ||
|
||||
metaBBoxMin.z < origOrigin.z) {
|
||||
debMsg("readObjectsVDB Warning: Grid '"
|
||||
<< vdbGrid->getName() << "' has not been read. Vdb bbox min " << metaBBoxMin
|
||||
<< " vs " << origOrigin << " current grid origin",
|
||||
1);
|
||||
readFailure++;
|
||||
break;
|
||||
}
|
||||
|
||||
if (GridBase *mantaGrid = dynamic_cast<GridBase *>(*iter)) {
|
||||
|
||||
if (mantaGrid->getType() & GridBase::TypeInt) {
|
||||
@@ -788,17 +644,6 @@ int readObjectsVDB(const string &filename, std::vector<PbClass *> *objects, floa
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
// Do not continue loading objects in this loop if there was a read error
|
||||
if (readFailure > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (readFailure > 0) {
|
||||
// Clear all data that has already been loaded into simulation objects
|
||||
clearAll(objects, pdbBuffer);
|
||||
pdbBuffer.clear();
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Give out a warning if pData items were present but could not be read due to missing particle
|
||||
@@ -873,8 +718,7 @@ int writeObjectsVDB(const string &filename,
|
||||
int compression,
|
||||
int precision,
|
||||
float clip,
|
||||
const Grid<Real> *clipGrid,
|
||||
const bool meta)
|
||||
const Grid<Real> *clipGrid)
|
||||
{
|
||||
errMsg("Cannot save to .vdb file. Mantaflow has not been built with OpenVDB support.");
|
||||
return 0;
|
||||
|
@@ -85,8 +85,7 @@ int save(const string &name,
|
||||
bool precisionHalf = true,
|
||||
int precision = PRECISION_HALF,
|
||||
float clip = 1e-4,
|
||||
const Grid<Real> *clipGrid = nullptr,
|
||||
const bool meta = false)
|
||||
const Grid<Real> *clipGrid = nullptr)
|
||||
{
|
||||
|
||||
if (!precisionHalf) {
|
||||
@@ -106,7 +105,7 @@ int save(const string &name,
|
||||
return writeGridsVol(name, &objects);
|
||||
if (ext == ".vdb")
|
||||
return writeObjectsVDB(
|
||||
name, &objects, worldSize, skipDeletedParts, compression, precision, clip, clipGrid, meta);
|
||||
name, &objects, worldSize, skipDeletedParts, compression, precision, clip, clipGrid);
|
||||
else if (ext == ".npz")
|
||||
return writeGridsNumpy(name, &objects);
|
||||
else if (ext == ".txt")
|
||||
@@ -135,7 +134,6 @@ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
int precision = _args.getOpt<int>("precision", 6, PRECISION_HALF, &_lock);
|
||||
float clip = _args.getOpt<float>("clip", 7, 1e-4, &_lock);
|
||||
const Grid<Real> *clipGrid = _args.getPtrOpt<Grid<Real>>("clipGrid", 8, nullptr, &_lock);
|
||||
const bool meta = _args.getOpt<bool>("meta", 9, false, &_lock);
|
||||
_retval = toPy(save(name,
|
||||
objects,
|
||||
worldSize,
|
||||
@@ -144,8 +142,7 @@ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
|
||||
precisionHalf,
|
||||
precision,
|
||||
clip,
|
||||
clipGrid,
|
||||
meta));
|
||||
clipGrid));
|
||||
_args.check();
|
||||
}
|
||||
pbFinalizePlugin(parent, "save", !noTiming);
|
||||
|
@@ -77,8 +77,7 @@ int writeObjectsVDB(const std::string &filename,
|
||||
int compression = COMPRESSION_ZIP,
|
||||
int precision = PRECISION_HALF,
|
||||
float clip = 1e-4,
|
||||
const Grid<Real> *clipGrid = nullptr,
|
||||
const bool meta = false);
|
||||
const Grid<Real> *clipGrid = nullptr);
|
||||
int readObjectsVDB(const std::string &filename,
|
||||
std::vector<PbClass *> *objects,
|
||||
float scale = 1.0);
|
||||
|
35
extern/mantaflow/preprocessed/general.h
vendored
35
extern/mantaflow/preprocessed/general.h
vendored
@@ -42,7 +42,7 @@ inline void updateQtGui(bool full, int frame, float time, const std::string &cur
|
||||
# ifdef _DEBUG
|
||||
# define DEBUG 1
|
||||
# endif // _DEBUG
|
||||
#endif // DEBUG
|
||||
#endif // DEBUG
|
||||
|
||||
// Standard exception
|
||||
class Error : public std::exception {
|
||||
@@ -242,39 +242,6 @@ inline bool c_isnan(float c)
|
||||
return d != d;
|
||||
}
|
||||
|
||||
//! Swap so that a<b
|
||||
template<class T> inline void sort(T &a, T &b)
|
||||
{
|
||||
if (a > b)
|
||||
std::swap(a, b);
|
||||
}
|
||||
|
||||
//! Swap so that a<b<c
|
||||
template<class T> inline void sort(T &a, T &b, T &c)
|
||||
{
|
||||
if (a > b)
|
||||
std::swap(a, b);
|
||||
if (a > c)
|
||||
std::swap(a, c);
|
||||
if (b > c)
|
||||
std::swap(b, c);
|
||||
}
|
||||
|
||||
//! Swap so that a<b<c<d
|
||||
template<class T> inline void sort(T &a, T &b, T &c, T &d)
|
||||
{
|
||||
if (a > b)
|
||||
std::swap(a, b);
|
||||
if (c > d)
|
||||
std::swap(c, d);
|
||||
if (a > c)
|
||||
std::swap(a, c);
|
||||
if (b > d)
|
||||
std::swap(b, d);
|
||||
if (b > c)
|
||||
std::swap(b, c);
|
||||
}
|
||||
|
||||
} // namespace Manta
|
||||
|
||||
#endif
|
||||
|
2
extern/mantaflow/preprocessed/gitinfo.h
vendored
2
extern/mantaflow/preprocessed/gitinfo.h
vendored
@@ -1,3 +1,3 @@
|
||||
|
||||
|
||||
#define MANTA_GIT_VERSION "commit 1c86d86496e7f7473c36248d12ef07bf4d9d2840"
|
||||
#define MANTA_GIT_VERSION "commit 327917cd59b03bef3a953b5f58fc1637b3a83e01"
|
||||
|
6
extern/mantaflow/preprocessed/grid.cpp
vendored
6
extern/mantaflow/preprocessed/grid.cpp
vendored
@@ -508,7 +508,8 @@ struct CompMaxVec : public KernelBase {
|
||||
|
||||
template<class T> Grid<T> &Grid<T>::copyFrom(const Grid<T> &a, bool copyType)
|
||||
{
|
||||
assertMsg(a.mSize == mSize, "different grid resolutions " << a.mSize << " vs " << this->mSize);
|
||||
assertMsg(a.mSize.x == mSize.x && a.mSize.y == mSize.y && a.mSize.z == mSize.z,
|
||||
"different grid resolutions " << a.mSize << " vs " << this->mSize);
|
||||
memcpy(mData, a.mData, sizeof(T) * mSize.x * mSize.y * mSize.z);
|
||||
if (copyType)
|
||||
mType = a.mType; // copy type marker
|
||||
@@ -3401,7 +3402,8 @@ void PbRegister_markIsolatedFluidCell()
|
||||
void copyMACData(
|
||||
const MACGrid &source, MACGrid &target, const FlagGrid &flags, const int flag, const int bnd)
|
||||
{
|
||||
assertMsg(source.getSize() == target.getSize(),
|
||||
assertMsg(source.getSize().x == target.getSize().x && source.getSize().y == target.getSize().y &&
|
||||
source.getSize().z == target.getSize().z,
|
||||
"different grid resolutions " << source.getSize() << " vs " << target.getSize());
|
||||
|
||||
// Grid<Real> divGrid(target.getParent());
|
||||
|
1
extern/mantaflow/preprocessed/grid.h
vendored
1
extern/mantaflow/preprocessed/grid.h
vendored
@@ -596,7 +596,6 @@ template<class T> class Grid : public GridBase {
|
||||
//! set data
|
||||
inline void set(int i, int j, int k, T &val)
|
||||
{
|
||||
DEBUG_ONLY(checkIndex(i, j, k));
|
||||
mData[index(i, j, k)] = val;
|
||||
}
|
||||
|
||||
|
4
extern/mantaflow/preprocessed/grid4d.cpp
vendored
4
extern/mantaflow/preprocessed/grid4d.cpp
vendored
@@ -491,7 +491,9 @@ template<class T> Grid4d<T> &Grid4d<T>::safeDivide(const Grid4d<T> &a)
|
||||
}
|
||||
template<class T> Grid4d<T> &Grid4d<T>::copyFrom(const Grid4d<T> &a, bool copyType)
|
||||
{
|
||||
assertMsg(a.mSize == mSize, "different Grid4d resolutions " << a.mSize << " vs " << this->mSize);
|
||||
assertMsg(a.mSize.x == mSize.x && a.mSize.y == mSize.y && a.mSize.z == mSize.z &&
|
||||
a.mSize.t == mSize.t,
|
||||
"different Grid4d resolutions " << a.mSize << " vs " << this->mSize);
|
||||
memcpy(mData, a.mData, sizeof(T) * mSize.x * mSize.y * mSize.z * mSize.t);
|
||||
if (copyType)
|
||||
mType = a.mType; // copy type marker
|
||||
|
@@ -1135,27 +1135,26 @@ struct KnAddForceIfLower : public KernelBase {
|
||||
if (!curFluid && !curEmpty)
|
||||
return;
|
||||
|
||||
Real minVal, maxVal, sum;
|
||||
if (flags.isFluid(i - 1, j, k) || (curFluid && flags.isEmpty(i - 1, j, k))) {
|
||||
Real forceMACX = 0.5 * (force(i - 1, j, k).x + force(i, j, k).x);
|
||||
minVal = min(vel(i, j, k).x, forceMACX);
|
||||
maxVal = max(vel(i, j, k).x, forceMACX);
|
||||
sum = vel(i, j, k).x + forceMACX;
|
||||
vel(i, j, k).x = (forceMACX > 0) ? min(sum, maxVal) : max(sum, minVal);
|
||||
Real min = std::min(vel(i, j, k).x, forceMACX);
|
||||
Real max = std::max(vel(i, j, k).x, forceMACX);
|
||||
Real sum = vel(i, j, k).x + forceMACX;
|
||||
vel(i, j, k).x = (forceMACX > 0) ? std::min(sum, max) : std::max(sum, min);
|
||||
}
|
||||
if (flags.isFluid(i, j - 1, k) || (curFluid && flags.isEmpty(i, j - 1, k))) {
|
||||
Real forceMACY = 0.5 * (force(i, j - 1, k).y + force(i, j, k).y);
|
||||
minVal = min(vel(i, j, k).y, forceMACY);
|
||||
maxVal = max(vel(i, j, k).y, forceMACY);
|
||||
sum = vel(i, j, k).y + forceMACY;
|
||||
vel(i, j, k).y = (forceMACY > 0) ? min(sum, maxVal) : max(sum, minVal);
|
||||
Real min = std::min(vel(i, j, k).y, forceMACY);
|
||||
Real max = std::max(vel(i, j, k).y, forceMACY);
|
||||
Real sum = vel(i, j, k).y + forceMACY;
|
||||
vel(i, j, k).y = (forceMACY > 0) ? std::min(sum, max) : std::max(sum, min);
|
||||
}
|
||||
if (vel.is3D() && (flags.isFluid(i, j, k - 1) || (curFluid && flags.isEmpty(i, j, k - 1)))) {
|
||||
Real forceMACZ = 0.5 * (force(i, j, k - 1).z + force(i, j, k).z);
|
||||
minVal = min(vel(i, j, k).z, forceMACZ);
|
||||
maxVal = max(vel(i, j, k).z, forceMACZ);
|
||||
sum = vel(i, j, k).z + forceMACZ;
|
||||
vel(i, j, k).z = (forceMACZ > 0) ? min(sum, maxVal) : max(sum, minVal);
|
||||
Real min = std::min(vel(i, j, k).z, forceMACZ);
|
||||
Real max = std::max(vel(i, j, k).z, forceMACZ);
|
||||
Real sum = vel(i, j, k).z + forceMACZ;
|
||||
vel(i, j, k).z = (forceMACZ > 0) ? std::min(sum, max) : std::max(sum, min);
|
||||
}
|
||||
}
|
||||
inline const FlagGrid &getArg0()
|
||||
|
@@ -1138,15 +1138,11 @@ void solvePressureSystem(Grid<Real> &rhs,
|
||||
// note: the last factor increases the max iterations for 2d, which right now can't use a
|
||||
// preconditioner
|
||||
GridCgInterface *gcg;
|
||||
vector<Grid<Real> *> matA{&A0, &Ai, &Aj};
|
||||
|
||||
if (vel.is3D()) {
|
||||
matA.push_back(&Ak);
|
||||
gcg = new GridCg<ApplyMatrix>(pressure, rhs, residual, search, flags, tmp, matA);
|
||||
}
|
||||
else {
|
||||
gcg = new GridCg<ApplyMatrix2D>(pressure, rhs, residual, search, flags, tmp, matA);
|
||||
}
|
||||
if (vel.is3D())
|
||||
gcg = new GridCg<ApplyMatrix>(pressure, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
|
||||
else
|
||||
gcg = new GridCg<ApplyMatrix2D>(
|
||||
pressure, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
|
||||
|
||||
gcg->setAccuracy(cgAccuracy);
|
||||
gcg->setUseL2Norm(useL2Norm);
|
||||
|
1428
extern/mantaflow/preprocessed/plugin/viscosity.cpp
vendored
1428
extern/mantaflow/preprocessed/plugin/viscosity.cpp
vendored
File diff suppressed because it is too large
Load Diff
@@ -576,10 +576,8 @@ void VICintegration(VortexSheetMesh &mesh,
|
||||
|
||||
// prepare CG solver
|
||||
const int maxIter = (int)(cgMaxIterFac * vel.getSize().max());
|
||||
vector<Grid<Real> *> matA{&A0, &Ai, &Aj, &Ak};
|
||||
|
||||
GridCgInterface *gcg = new GridCg<ApplyMatrix>(
|
||||
solution, rhs, residual, search, flags, temp1, matA);
|
||||
solution, rhs, residual, search, flags, temp1, &A0, &Ai, &Aj, &Ak);
|
||||
gcg->setAccuracy(cgAccuracy);
|
||||
gcg->setUseL2Norm(true);
|
||||
gcg->setICPreconditioner(
|
||||
|
13
extern/mantaflow/preprocessed/plugin/waves.cpp
vendored
13
extern/mantaflow/preprocessed/plugin/waves.cpp
vendored
@@ -423,15 +423,10 @@ void cgSolveWE(const FlagGrid &flags,
|
||||
|
||||
const int maxIter = (int)(cgMaxIterFac * flags.getSize().max()) * (flags.is3D() ? 1 : 4);
|
||||
GridCgInterface *gcg;
|
||||
vector<Grid<Real> *> matA{&A0, &Ai, &Aj};
|
||||
|
||||
if (flags.is3D()) {
|
||||
matA.push_back(&Ak);
|
||||
gcg = new GridCg<ApplyMatrix>(out, rhs, residual, search, flags, tmp, matA);
|
||||
}
|
||||
else {
|
||||
gcg = new GridCg<ApplyMatrix2D>(out, rhs, residual, search, flags, tmp, matA);
|
||||
}
|
||||
if (flags.is3D())
|
||||
gcg = new GridCg<ApplyMatrix>(out, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
|
||||
else
|
||||
gcg = new GridCg<ApplyMatrix2D>(out, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
|
||||
|
||||
gcg->setAccuracy(cgAccuracy);
|
||||
|
||||
|
@@ -145,7 +145,6 @@ extern void PbRegister_flipComputeSurfaceNormals();
|
||||
extern void PbRegister_flipUpdateNeighborRatio();
|
||||
extern void PbRegister_particleSurfaceTurbulence();
|
||||
extern void PbRegister_debugCheckParts();
|
||||
extern void PbRegister_applyViscosity();
|
||||
extern void PbRegister_markAsFixed();
|
||||
extern void PbRegister_texcoordInflow();
|
||||
extern void PbRegister_meshSmokeInflow();
|
||||
@@ -343,7 +342,6 @@ void MantaEnsureRegistration()
|
||||
PbRegister_flipUpdateNeighborRatio();
|
||||
PbRegister_particleSurfaceTurbulence();
|
||||
PbRegister_debugCheckParts();
|
||||
PbRegister_applyViscosity();
|
||||
PbRegister_markAsFixed();
|
||||
PbRegister_texcoordInflow();
|
||||
PbRegister_meshSmokeInflow();
|
||||
|
@@ -14,7 +14,7 @@
|
||||
|
||||
# Standalone or with Blender
|
||||
if(NOT WITH_BLENDER AND WITH_CYCLES_STANDALONE)
|
||||
set(CYCLES_INSTALL_PATH ${CMAKE_INSTALL_PREFIX})
|
||||
set(CYCLES_INSTALL_PATH "")
|
||||
else()
|
||||
set(WITH_CYCLES_BLENDER ON)
|
||||
# WINDOWS_PYTHON_DEBUG needs to write into the user addons folder since it will
|
||||
@@ -379,9 +379,6 @@ endif()
|
||||
# Subdirectories
|
||||
|
||||
if(WITH_CYCLES_BLENDER)
|
||||
# Not needed to make cycles automated tests pass with -march=native.
|
||||
# However Blender itself needs this flag.
|
||||
remove_cc_flag("-ffp-contract=off")
|
||||
add_definitions(-DWITH_BLENDER_GUARDEDALLOC)
|
||||
add_subdirectory(blender)
|
||||
endif()
|
||||
|
@@ -133,12 +133,12 @@ static void scene_init()
|
||||
|
||||
/* Camera width/height override? */
|
||||
if (!(options.width == 0 || options.height == 0)) {
|
||||
options.scene->camera->set_full_width(options.width);
|
||||
options.scene->camera->set_full_height(options.height);
|
||||
options.scene->camera->width = options.width;
|
||||
options.scene->camera->height = options.height;
|
||||
}
|
||||
else {
|
||||
options.width = options.scene->camera->get_full_width();
|
||||
options.height = options.scene->camera->get_full_height();
|
||||
options.width = options.scene->camera->width;
|
||||
options.height = options.scene->camera->height;
|
||||
}
|
||||
|
||||
/* Calculate Viewplane */
|
||||
@@ -233,7 +233,7 @@ static void display()
|
||||
static void motion(int x, int y, int button)
|
||||
{
|
||||
if (options.interactive) {
|
||||
Transform matrix = options.session->scene->camera->get_matrix();
|
||||
Transform matrix = options.session->scene->camera->matrix;
|
||||
|
||||
/* Translate */
|
||||
if (button == 0) {
|
||||
@@ -251,8 +251,8 @@ static void motion(int x, int y, int button)
|
||||
}
|
||||
|
||||
/* Update and Reset */
|
||||
options.session->scene->camera->set_matrix(matrix);
|
||||
options.session->scene->camera->need_flags_update = true;
|
||||
options.session->scene->camera->matrix = matrix;
|
||||
options.session->scene->camera->need_update = true;
|
||||
options.session->scene->camera->need_device_update = true;
|
||||
|
||||
options.session->reset(session_buffer_params(), options.session_params.samples);
|
||||
@@ -266,10 +266,10 @@ static void resize(int width, int height)
|
||||
|
||||
if (options.session) {
|
||||
/* Update camera */
|
||||
options.session->scene->camera->set_full_width(options.width);
|
||||
options.session->scene->camera->set_full_height(options.height);
|
||||
options.session->scene->camera->width = width;
|
||||
options.session->scene->camera->height = height;
|
||||
options.session->scene->camera->compute_auto_viewplane();
|
||||
options.session->scene->camera->need_flags_update = true;
|
||||
options.session->scene->camera->need_update = true;
|
||||
options.session->scene->camera->need_device_update = true;
|
||||
|
||||
options.session->reset(session_buffer_params(), options.session_params.samples);
|
||||
@@ -302,7 +302,7 @@ static void keyboard(unsigned char key)
|
||||
|
||||
/* Navigation */
|
||||
else if (options.interactive && (key == 'w' || key == 'a' || key == 's' || key == 'd')) {
|
||||
Transform matrix = options.session->scene->camera->get_matrix();
|
||||
Transform matrix = options.session->scene->camera->matrix;
|
||||
float3 translate;
|
||||
|
||||
if (key == 'w')
|
||||
@@ -317,8 +317,8 @@ static void keyboard(unsigned char key)
|
||||
matrix = matrix * transform_translate(translate);
|
||||
|
||||
/* Update and Reset */
|
||||
options.session->scene->camera->set_matrix(matrix);
|
||||
options.session->scene->camera->need_flags_update = true;
|
||||
options.session->scene->camera->matrix = matrix;
|
||||
options.session->scene->camera->need_update = true;
|
||||
options.session->scene->camera->need_device_update = true;
|
||||
|
||||
options.session->reset(session_buffer_params(), options.session_params.samples);
|
||||
@@ -345,7 +345,10 @@ static void keyboard(unsigned char key)
|
||||
break;
|
||||
}
|
||||
|
||||
options.session->scene->integrator->set_max_bounce(bounce);
|
||||
options.session->scene->integrator->max_bounce = bounce;
|
||||
|
||||
/* Update and Reset */
|
||||
options.session->scene->integrator->need_update = true;
|
||||
|
||||
options.session->reset(session_buffer_params(), options.session_params.samples);
|
||||
}
|
||||
|
@@ -190,18 +190,17 @@ static void xml_read_camera(XMLReadState &state, xml_node node)
|
||||
{
|
||||
Camera *cam = state.scene->camera;
|
||||
|
||||
int width = -1, height = -1;
|
||||
xml_read_int(&width, node, "width");
|
||||
xml_read_int(&height, node, "height");
|
||||
xml_read_int(&cam->width, node, "width");
|
||||
xml_read_int(&cam->height, node, "height");
|
||||
|
||||
cam->set_full_width(width);
|
||||
cam->set_full_height(height);
|
||||
cam->full_width = cam->width;
|
||||
cam->full_height = cam->height;
|
||||
|
||||
xml_read_node(state, cam, node);
|
||||
|
||||
cam->set_matrix(state.tfm);
|
||||
cam->matrix = state.tfm;
|
||||
|
||||
cam->need_flags_update = true;
|
||||
cam->need_update = true;
|
||||
cam->update(state.scene);
|
||||
}
|
||||
|
||||
@@ -339,13 +338,11 @@ static void xml_read_shader_graph(XMLReadState &state, Shader *shader, xml_node
|
||||
|
||||
if (node_name == "image_texture") {
|
||||
ImageTextureNode *img = (ImageTextureNode *)snode;
|
||||
ustring filename(path_join(state.base, img->get_filename().string()));
|
||||
img->set_filename(filename);
|
||||
img->filename = path_join(state.base, img->filename.string());
|
||||
}
|
||||
else if (node_name == "environment_texture") {
|
||||
EnvironmentTextureNode *env = (EnvironmentTextureNode *)snode;
|
||||
ustring filename(path_join(state.base, env->get_filename().string()));
|
||||
env->set_filename(filename);
|
||||
env->filename = path_join(state.base, env->filename.string());
|
||||
}
|
||||
|
||||
if (snode) {
|
||||
@@ -387,8 +384,8 @@ static Mesh *xml_add_mesh(Scene *scene, const Transform &tfm)
|
||||
|
||||
/* create object*/
|
||||
Object *object = new Object();
|
||||
object->set_geometry(mesh);
|
||||
object->set_tfm(tfm);
|
||||
object->geometry = mesh;
|
||||
object->tfm = tfm;
|
||||
scene->objects.push_back(object);
|
||||
|
||||
return mesh;
|
||||
@@ -398,9 +395,7 @@ static void xml_read_mesh(const XMLReadState &state, xml_node node)
|
||||
{
|
||||
/* add mesh */
|
||||
Mesh *mesh = xml_add_mesh(state.scene, state.tfm);
|
||||
array<Node *> used_shaders = mesh->get_used_shaders();
|
||||
used_shaders.push_back_slow(state.shader);
|
||||
mesh->set_used_shaders(used_shaders);
|
||||
mesh->used_shaders.push_back(state.shader);
|
||||
|
||||
/* read state */
|
||||
int shader = 0;
|
||||
@@ -416,24 +411,20 @@ static void xml_read_mesh(const XMLReadState &state, xml_node node)
|
||||
xml_read_int_array(nverts, node, "nverts");
|
||||
|
||||
if (xml_equal_string(node, "subdivision", "catmull-clark")) {
|
||||
mesh->set_subdivision_type(Mesh::SUBDIVISION_CATMULL_CLARK);
|
||||
mesh->subdivision_type = Mesh::SUBDIVISION_CATMULL_CLARK;
|
||||
}
|
||||
else if (xml_equal_string(node, "subdivision", "linear")) {
|
||||
mesh->set_subdivision_type(Mesh::SUBDIVISION_LINEAR);
|
||||
mesh->subdivision_type = Mesh::SUBDIVISION_LINEAR;
|
||||
}
|
||||
|
||||
array<float3> P_array;
|
||||
P_array = P;
|
||||
|
||||
if (mesh->get_subdivision_type() == Mesh::SUBDIVISION_NONE) {
|
||||
if (mesh->subdivision_type == Mesh::SUBDIVISION_NONE) {
|
||||
/* create vertices */
|
||||
|
||||
mesh->set_verts(P_array);
|
||||
mesh->verts = P;
|
||||
|
||||
size_t num_triangles = 0;
|
||||
for (size_t i = 0; i < nverts.size(); i++)
|
||||
num_triangles += nverts[i] - 2;
|
||||
mesh->reserve_mesh(mesh->get_verts().size(), num_triangles);
|
||||
mesh->reserve_mesh(mesh->verts.size(), num_triangles);
|
||||
|
||||
/* create triangles */
|
||||
int index_offset = 0;
|
||||
@@ -483,7 +474,7 @@ static void xml_read_mesh(const XMLReadState &state, xml_node node)
|
||||
}
|
||||
else {
|
||||
/* create vertices */
|
||||
mesh->set_verts(P_array);
|
||||
mesh->verts = P;
|
||||
|
||||
size_t num_ngons = 0;
|
||||
size_t num_corners = 0;
|
||||
@@ -522,20 +513,23 @@ static void xml_read_mesh(const XMLReadState &state, xml_node node)
|
||||
}
|
||||
|
||||
/* setup subd params */
|
||||
float dicing_rate = state.dicing_rate;
|
||||
xml_read_float(&dicing_rate, node, "dicing_rate");
|
||||
dicing_rate = std::max(0.1f, dicing_rate);
|
||||
if (!mesh->subd_params) {
|
||||
mesh->subd_params = new SubdParams(mesh);
|
||||
}
|
||||
SubdParams &sdparams = *mesh->subd_params;
|
||||
|
||||
mesh->set_subd_dicing_rate(dicing_rate);
|
||||
mesh->set_subd_objecttoworld(state.tfm);
|
||||
sdparams.dicing_rate = state.dicing_rate;
|
||||
xml_read_float(&sdparams.dicing_rate, node, "dicing_rate");
|
||||
sdparams.dicing_rate = std::max(0.1f, sdparams.dicing_rate);
|
||||
|
||||
sdparams.objecttoworld = state.tfm;
|
||||
}
|
||||
|
||||
/* we don't yet support arbitrary attributes, for now add vertex
|
||||
* coordinates as generated coordinates if requested */
|
||||
if (mesh->need_attribute(state.scene, ATTR_STD_GENERATED)) {
|
||||
Attribute *attr = mesh->attributes.add(ATTR_STD_GENERATED);
|
||||
memcpy(
|
||||
attr->data_float3(), mesh->get_verts().data(), sizeof(float3) * mesh->get_verts().size());
|
||||
memcpy(attr->data_float3(), mesh->verts.data(), sizeof(float3) * mesh->verts.size());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -545,7 +539,7 @@ static void xml_read_light(XMLReadState &state, xml_node node)
|
||||
{
|
||||
Light *light = new Light();
|
||||
|
||||
light->set_shader(state.shader);
|
||||
light->shader = state.shader;
|
||||
xml_read_node(state, light, node);
|
||||
|
||||
state.scene->lights.push_back(light);
|
||||
|
@@ -59,7 +59,6 @@ class CyclesRender(bpy.types.RenderEngine):
|
||||
bl_use_exclude_layers = True
|
||||
bl_use_save_buffers = True
|
||||
bl_use_spherical_stereo = True
|
||||
bl_use_custom_freestyle = True
|
||||
|
||||
def __init__(self):
|
||||
self.session = None
|
||||
|
@@ -282,7 +282,7 @@ def list_render_passes(scene, srl):
|
||||
yield ("CryptoAsset" + '{:02d}'.format(i), "RGBA", 'COLOR')
|
||||
|
||||
# Denoising passes.
|
||||
if (scene.cycles.use_denoising and crl.use_denoising) or crl.denoising_store_passes:
|
||||
if crl.use_denoising or crl.denoising_store_passes:
|
||||
yield ("Noisy Image", "RGBA", 'COLOR')
|
||||
if crl.denoising_store_passes:
|
||||
yield ("Denoising Normal", "XYZ", 'VECTOR')
|
||||
|
@@ -1570,7 +1570,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
|
||||
elif entry.type == 'CPU':
|
||||
cpu_devices.append(entry)
|
||||
# Extend all GPU devices with CPU.
|
||||
if compute_device_type in {'CUDA', 'OPTIX', 'OPENCL'}:
|
||||
if compute_device_type in {'CUDA', 'OPENCL'}:
|
||||
devices.extend(cpu_devices)
|
||||
return devices
|
||||
|
||||
|
@@ -1443,7 +1443,6 @@ class CYCLES_LIGHT_PT_nodes(CyclesButtonsPanel, Panel):
|
||||
|
||||
class CYCLES_LIGHT_PT_spot(CyclesButtonsPanel, Panel):
|
||||
bl_label = "Spot Shape"
|
||||
bl_parent_id = "CYCLES_LIGHT_PT_light"
|
||||
bl_context = "data"
|
||||
|
||||
@classmethod
|
||||
@@ -1455,6 +1454,7 @@ class CYCLES_LIGHT_PT_spot(CyclesButtonsPanel, Panel):
|
||||
layout = self.layout
|
||||
light = context.light
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
col = layout.column()
|
||||
col.prop(light, "spot_size", text="Size")
|
||||
@@ -1969,11 +1969,9 @@ class CYCLES_RENDER_PT_bake_output(CyclesButtonsPanel, Panel):
|
||||
if rd.bake_type == 'DISPLACEMENT':
|
||||
layout.prop(rd, "use_bake_lores_mesh")
|
||||
else:
|
||||
layout.prop(cbk, "target")
|
||||
|
||||
if cbk.target == 'IMAGE_TEXTURES':
|
||||
layout.prop(cbk, "margin")
|
||||
layout.prop(cbk, "use_clear", text="Clear Image")
|
||||
layout.prop(cbk, "margin")
|
||||
layout.prop(cbk, "use_clear", text="Clear Image")
|
||||
|
||||
|
||||
class CYCLES_RENDER_PT_debug(CyclesButtonsPanel, Panel):
|
||||
|
@@ -67,10 +67,11 @@ static bool ObtainCacheParticleData(
|
||||
Transform tfm = get_transform(b_ob->matrix_world());
|
||||
Transform itfm = transform_quick_inverse(tfm);
|
||||
|
||||
for (BL::Modifier &b_mod : b_ob->modifiers) {
|
||||
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
|
||||
(background ? b_mod.show_render() : b_mod.show_viewport())) {
|
||||
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
|
||||
BL::Object::modifiers_iterator b_mod;
|
||||
for (b_ob->modifiers.begin(b_mod); b_mod != b_ob->modifiers.end(); ++b_mod) {
|
||||
if ((b_mod->type() == b_mod->type_PARTICLE_SYSTEM) &&
|
||||
(background ? b_mod->show_render() : b_mod->show_viewport())) {
|
||||
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod->ptr);
|
||||
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
|
||||
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
|
||||
|
||||
@@ -162,10 +163,11 @@ static bool ObtainCacheParticleUV(Hair *hair,
|
||||
|
||||
CData->curve_uv.clear();
|
||||
|
||||
for (BL::Modifier &b_mod : b_ob->modifiers) {
|
||||
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
|
||||
(background ? b_mod.show_render() : b_mod.show_viewport())) {
|
||||
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
|
||||
BL::Object::modifiers_iterator b_mod;
|
||||
for (b_ob->modifiers.begin(b_mod); b_mod != b_ob->modifiers.end(); ++b_mod) {
|
||||
if ((b_mod->type() == b_mod->type_PARTICLE_SYSTEM) &&
|
||||
(background ? b_mod->show_render() : b_mod->show_viewport())) {
|
||||
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod->ptr);
|
||||
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
|
||||
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
|
||||
|
||||
@@ -224,10 +226,11 @@ static bool ObtainCacheParticleVcol(Hair *hair,
|
||||
|
||||
CData->curve_vcol.clear();
|
||||
|
||||
for (BL::Modifier &b_mod : b_ob->modifiers) {
|
||||
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
|
||||
(background ? b_mod.show_render() : b_mod.show_viewport())) {
|
||||
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
|
||||
BL::Object::modifiers_iterator b_mod;
|
||||
for (b_ob->modifiers.begin(b_mod); b_mod != b_ob->modifiers.end(); ++b_mod) {
|
||||
if ((b_mod->type() == b_mod->type_PARTICLE_SYSTEM) &&
|
||||
(background ? b_mod->show_render() : b_mod->show_viewport())) {
|
||||
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod->ptr);
|
||||
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
|
||||
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
|
||||
|
||||
@@ -507,10 +510,11 @@ static void ExportCurveSegmentsMotion(Hair *hair, ParticleCurveData *CData, int
|
||||
bool BlenderSync::object_has_particle_hair(BL::Object b_ob)
|
||||
{
|
||||
/* Test if the object has a particle modifier with hair. */
|
||||
for (BL::Modifier &b_mod : b_ob.modifiers) {
|
||||
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
|
||||
(preview ? b_mod.show_viewport() : b_mod.show_render())) {
|
||||
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
|
||||
BL::Object::modifiers_iterator b_mod;
|
||||
for (b_ob.modifiers.begin(b_mod); b_mod != b_ob.modifiers.end(); ++b_mod) {
|
||||
if ((b_mod->type() == b_mod->type_PARTICLE_SYSTEM) &&
|
||||
(preview ? b_mod->show_viewport() : b_mod->show_render())) {
|
||||
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod->ptr);
|
||||
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
|
||||
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
|
||||
|
||||
@@ -674,7 +678,9 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Hair b_hair)
|
||||
/* Export curves and points. */
|
||||
vector<float> points_length;
|
||||
|
||||
for (BL::HairCurve &b_curve : b_hair.curves) {
|
||||
BL::Hair::curves_iterator b_curve_iter;
|
||||
for (b_hair.curves.begin(b_curve_iter); b_curve_iter != b_hair.curves.end(); ++b_curve_iter) {
|
||||
BL::HairCurve b_curve = *b_curve_iter;
|
||||
const int first_point_index = b_curve.first_point_index();
|
||||
const int num_points = b_curve.num_points();
|
||||
|
||||
@@ -742,7 +748,9 @@ static void export_hair_curves_motion(Hair *hair, BL::Hair b_hair, int motion_st
|
||||
int num_motion_keys = 0;
|
||||
int curve_index = 0;
|
||||
|
||||
for (BL::HairCurve &b_curve : b_hair.curves) {
|
||||
BL::Hair::curves_iterator b_curve_iter;
|
||||
for (b_hair.curves.begin(b_curve_iter); b_curve_iter != b_hair.curves.end(); ++b_curve_iter) {
|
||||
BL::HairCurve b_curve = *b_curve_iter;
|
||||
const int first_point_index = b_curve.first_point_index();
|
||||
const int num_points = b_curve.num_points();
|
||||
|
||||
@@ -847,7 +855,10 @@ void BlenderSync::sync_hair(BL::Depsgraph b_depsgraph, BL::Object b_ob, Hair *ha
|
||||
hair->set_value(socket, new_hair, socket);
|
||||
}
|
||||
|
||||
hair->attributes.update(std::move(new_hair.attributes));
|
||||
hair->attributes.clear();
|
||||
foreach (Attribute &attr, new_hair.attributes.attributes) {
|
||||
hair->attributes.attributes.push_back(std::move(attr));
|
||||
}
|
||||
|
||||
/* tag update */
|
||||
|
||||
|
@@ -43,39 +43,42 @@ int blender_device_threads(BL::Scene &b_scene)
|
||||
|
||||
DeviceInfo blender_device_info(BL::Preferences &b_preferences, BL::Scene &b_scene, bool background)
|
||||
{
|
||||
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
|
||||
|
||||
/* Find cycles preferences. */
|
||||
PointerRNA cpreferences;
|
||||
for (BL::Addon &b_addon : b_preferences.addons) {
|
||||
if (b_addon.module() == "cycles") {
|
||||
cpreferences = b_addon.preferences().ptr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Default to CPU device. */
|
||||
DeviceInfo device = Device::available_devices(DEVICE_MASK_CPU).front();
|
||||
|
||||
if (BlenderSession::device_override != DEVICE_MASK_ALL) {
|
||||
vector<DeviceInfo> devices = Device::available_devices(BlenderSession::device_override);
|
||||
|
||||
if (devices.empty()) {
|
||||
device = Device::dummy_device("Found no Cycles device of the specified type");
|
||||
}
|
||||
else {
|
||||
int threads = blender_device_threads(b_scene);
|
||||
device = Device::get_multi_device(devices, threads, background);
|
||||
return Device::dummy_device("Found no Cycles device of the specified type");
|
||||
}
|
||||
|
||||
int threads = blender_device_threads(b_scene);
|
||||
return Device::get_multi_device(devices, threads, background);
|
||||
}
|
||||
else if (get_enum(cscene, "device") == 2) {
|
||||
|
||||
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
|
||||
|
||||
/* Default to CPU device. */
|
||||
DeviceInfo device = Device::available_devices(DEVICE_MASK_CPU).front();
|
||||
|
||||
if (get_enum(cscene, "device") == 2) {
|
||||
/* Find network device. */
|
||||
vector<DeviceInfo> devices = Device::available_devices(DEVICE_MASK_NETWORK);
|
||||
if (!devices.empty()) {
|
||||
device = devices.front();
|
||||
return devices.front();
|
||||
}
|
||||
}
|
||||
else if (get_enum(cscene, "device") == 1) {
|
||||
/* Find cycles preferences. */
|
||||
PointerRNA cpreferences;
|
||||
|
||||
BL::Preferences::addons_iterator b_addon_iter;
|
||||
for (b_preferences.addons.begin(b_addon_iter); b_addon_iter != b_preferences.addons.end();
|
||||
++b_addon_iter) {
|
||||
if (b_addon_iter->module() == "cycles") {
|
||||
cpreferences = b_addon_iter->preferences().ptr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Test if we are using GPU devices. */
|
||||
ComputeDevice compute_device = (ComputeDevice)get_enum(
|
||||
cpreferences, "compute_device_type", COMPUTE_DEVICE_NUM, COMPUTE_DEVICE_CPU);
|
||||
@@ -87,7 +90,8 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences, BL::Scene &b_scen
|
||||
mask |= DEVICE_MASK_CUDA;
|
||||
}
|
||||
else if (compute_device == COMPUTE_DEVICE_OPTIX) {
|
||||
mask |= DEVICE_MASK_OPTIX;
|
||||
/* Cannot use CPU and OptiX device at the same time right now, so replace mask. */
|
||||
mask = DEVICE_MASK_OPTIX;
|
||||
}
|
||||
else if (compute_device == COMPUTE_DEVICE_OPENCL) {
|
||||
mask |= DEVICE_MASK_OPENCL;
|
||||
@@ -114,11 +118,11 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences, BL::Scene &b_scen
|
||||
device = Device::get_multi_device(used_devices, threads, background);
|
||||
}
|
||||
/* Else keep using the CPU device that was set before. */
|
||||
}
|
||||
}
|
||||
|
||||
if (!get_boolean(cpreferences, "peer_memory")) {
|
||||
device.has_peer_memory = false;
|
||||
if (!get_boolean(cpreferences, "peer_memory")) {
|
||||
device.has_peer_memory = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return device;
|
||||
|
@@ -42,34 +42,6 @@ static Geometry::Type determine_geom_type(BL::Object &b_ob, bool use_particle_ha
|
||||
return Geometry::MESH;
|
||||
}
|
||||
|
||||
array<Node *> BlenderSync::find_used_shaders(BL::Object &b_ob)
|
||||
{
|
||||
BL::Material material_override = view_layer.material_override;
|
||||
Shader *default_shader = (b_ob.type() == BL::Object::type_VOLUME) ? scene->default_volume :
|
||||
scene->default_surface;
|
||||
|
||||
array<Node *> used_shaders;
|
||||
|
||||
for (BL::MaterialSlot &b_slot : b_ob.material_slots) {
|
||||
if (material_override) {
|
||||
find_shader(material_override, used_shaders, default_shader);
|
||||
}
|
||||
else {
|
||||
BL::ID b_material(b_slot.material());
|
||||
find_shader(b_material, used_shaders, default_shader);
|
||||
}
|
||||
}
|
||||
|
||||
if (used_shaders.size() == 0) {
|
||||
if (material_override)
|
||||
find_shader(material_override, used_shaders, default_shader);
|
||||
else
|
||||
used_shaders.push_back_slow(default_shader);
|
||||
}
|
||||
|
||||
return used_shaders;
|
||||
}
|
||||
|
||||
Geometry *BlenderSync::sync_geometry(BL::Depsgraph &b_depsgraph,
|
||||
BL::Object &b_ob,
|
||||
BL::Object &b_ob_instance,
|
||||
@@ -80,11 +52,32 @@ Geometry *BlenderSync::sync_geometry(BL::Depsgraph &b_depsgraph,
|
||||
/* Test if we can instance or if the object is modified. */
|
||||
BL::ID b_ob_data = b_ob.data();
|
||||
BL::ID b_key_id = (BKE_object_is_modified(b_ob)) ? b_ob_instance : b_ob_data;
|
||||
BL::Material material_override = view_layer.material_override;
|
||||
Shader *default_shader = (b_ob.type() == BL::Object::type_VOLUME) ? scene->default_volume :
|
||||
scene->default_surface;
|
||||
Geometry::Type geom_type = determine_geom_type(b_ob, use_particle_hair);
|
||||
GeometryKey key(b_key_id.ptr.data, geom_type);
|
||||
|
||||
/* Find shader indices. */
|
||||
array<Node *> used_shaders = find_used_shaders(b_ob);
|
||||
array<Node *> used_shaders;
|
||||
|
||||
BL::Object::material_slots_iterator slot;
|
||||
for (b_ob.material_slots.begin(slot); slot != b_ob.material_slots.end(); ++slot) {
|
||||
if (material_override) {
|
||||
find_shader(material_override, used_shaders, default_shader);
|
||||
}
|
||||
else {
|
||||
BL::ID b_material(slot->material());
|
||||
find_shader(b_material, used_shaders, default_shader);
|
||||
}
|
||||
}
|
||||
|
||||
if (used_shaders.size() == 0) {
|
||||
if (material_override)
|
||||
find_shader(material_override, used_shaders, default_shader);
|
||||
else
|
||||
used_shaders.push_back_slow(default_shader);
|
||||
}
|
||||
|
||||
/* Ensure we only sync instanced geometry once. */
|
||||
Geometry *geom = geometry_map.find(key);
|
||||
@@ -131,7 +124,7 @@ Geometry *BlenderSync::sync_geometry(BL::Depsgraph &b_depsgraph,
|
||||
|
||||
foreach (Node *node, geom->get_used_shaders()) {
|
||||
Shader *shader = static_cast<Shader *>(node);
|
||||
if (shader->need_update_geometry()) {
|
||||
if (shader->need_update_geometry) {
|
||||
attribute_recalc = true;
|
||||
}
|
||||
}
|
||||
|
@@ -285,10 +285,12 @@ static void attr_create_sculpt_vertex_color(Scene *scene,
|
||||
BL::Mesh &b_mesh,
|
||||
bool subdivision)
|
||||
{
|
||||
for (BL::MeshVertColorLayer &l : b_mesh.sculpt_vertex_colors) {
|
||||
const bool active_render = l.active_render();
|
||||
BL::Mesh::sculpt_vertex_colors_iterator l;
|
||||
|
||||
for (b_mesh.sculpt_vertex_colors.begin(l); l != b_mesh.sculpt_vertex_colors.end(); ++l) {
|
||||
const bool active_render = l->active_render();
|
||||
AttributeStandard vcol_std = (active_render) ? ATTR_STD_VERTEX_COLOR : ATTR_STD_NONE;
|
||||
ustring vcol_name = ustring(l.name().c_str());
|
||||
ustring vcol_name = ustring(l->name().c_str());
|
||||
|
||||
const bool need_vcol = mesh->need_attribute(scene, vcol_name) ||
|
||||
mesh->need_attribute(scene, vcol_std);
|
||||
@@ -305,7 +307,7 @@ static void attr_create_sculpt_vertex_color(Scene *scene,
|
||||
int numverts = b_mesh.vertices.length();
|
||||
|
||||
for (int i = 0; i < numverts; i++) {
|
||||
*(cdata++) = get_float4(l.data[i].color());
|
||||
*(cdata++) = get_float4(l->data[i].color());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -313,10 +315,12 @@ static void attr_create_sculpt_vertex_color(Scene *scene,
|
||||
/* Create vertex color attributes. */
|
||||
static void attr_create_vertex_color(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh, bool subdivision)
|
||||
{
|
||||
for (BL::MeshLoopColorLayer &l : b_mesh.vertex_colors) {
|
||||
const bool active_render = l.active_render();
|
||||
BL::Mesh::vertex_colors_iterator l;
|
||||
|
||||
for (b_mesh.vertex_colors.begin(l); l != b_mesh.vertex_colors.end(); ++l) {
|
||||
const bool active_render = l->active_render();
|
||||
AttributeStandard vcol_std = (active_render) ? ATTR_STD_VERTEX_COLOR : ATTR_STD_NONE;
|
||||
ustring vcol_name = ustring(l.name().c_str());
|
||||
ustring vcol_name = ustring(l->name().c_str());
|
||||
|
||||
const bool need_vcol = mesh->need_attribute(scene, vcol_name) ||
|
||||
mesh->need_attribute(scene, vcol_std);
|
||||
@@ -335,12 +339,13 @@ static void attr_create_vertex_color(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh,
|
||||
vcol_attr = mesh->subd_attributes.add(vcol_name, TypeRGBA, ATTR_ELEMENT_CORNER_BYTE);
|
||||
}
|
||||
|
||||
BL::Mesh::polygons_iterator p;
|
||||
uchar4 *cdata = vcol_attr->data_uchar4();
|
||||
|
||||
for (BL::MeshPolygon &p : b_mesh.polygons) {
|
||||
int n = p.loop_total();
|
||||
for (b_mesh.polygons.begin(p); p != b_mesh.polygons.end(); ++p) {
|
||||
int n = p->loop_total();
|
||||
for (int i = 0; i < n; i++) {
|
||||
float4 color = get_float4(l.data[p.loop_start() + i].color());
|
||||
float4 color = get_float4(l->data[p->loop_start() + i].color());
|
||||
/* Compress/encode vertex color using the sRGB curve. */
|
||||
*(cdata++) = color_float4_to_uchar4(color);
|
||||
}
|
||||
@@ -354,13 +359,14 @@ static void attr_create_vertex_color(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh,
|
||||
vcol_attr = mesh->attributes.add(vcol_name, TypeRGBA, ATTR_ELEMENT_CORNER_BYTE);
|
||||
}
|
||||
|
||||
BL::Mesh::loop_triangles_iterator t;
|
||||
uchar4 *cdata = vcol_attr->data_uchar4();
|
||||
|
||||
for (BL::MeshLoopTriangle &t : b_mesh.loop_triangles) {
|
||||
int3 li = get_int3(t.loops());
|
||||
float4 c1 = get_float4(l.data[li[0]].color());
|
||||
float4 c2 = get_float4(l.data[li[1]].color());
|
||||
float4 c3 = get_float4(l.data[li[2]].color());
|
||||
for (b_mesh.loop_triangles.begin(t); t != b_mesh.loop_triangles.end(); ++t) {
|
||||
int3 li = get_int3(t->loops());
|
||||
float4 c1 = get_float4(l->data[li[0]].color());
|
||||
float4 c2 = get_float4(l->data[li[1]].color());
|
||||
float4 c3 = get_float4(l->data[li[2]].color());
|
||||
|
||||
/* Compress/encode vertex color using the sRGB curve. */
|
||||
cdata[0] = color_float4_to_uchar4(c1);
|
||||
@@ -377,12 +383,14 @@ static void attr_create_vertex_color(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh,
|
||||
static void attr_create_uv_map(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh)
|
||||
{
|
||||
if (b_mesh.uv_layers.length() != 0) {
|
||||
for (BL::MeshUVLoopLayer &l : b_mesh.uv_layers) {
|
||||
const bool active_render = l.active_render();
|
||||
BL::Mesh::uv_layers_iterator l;
|
||||
|
||||
for (b_mesh.uv_layers.begin(l); l != b_mesh.uv_layers.end(); ++l) {
|
||||
const bool active_render = l->active_render();
|
||||
AttributeStandard uv_std = (active_render) ? ATTR_STD_UV : ATTR_STD_NONE;
|
||||
ustring uv_name = ustring(l.name().c_str());
|
||||
ustring uv_name = ustring(l->name().c_str());
|
||||
AttributeStandard tangent_std = (active_render) ? ATTR_STD_UV_TANGENT : ATTR_STD_NONE;
|
||||
ustring tangent_name = ustring((string(l.name().c_str()) + ".tangent").c_str());
|
||||
ustring tangent_name = ustring((string(l->name().c_str()) + ".tangent").c_str());
|
||||
|
||||
/* Denotes whether UV map was requested directly. */
|
||||
const bool need_uv = mesh->need_attribute(scene, uv_name) ||
|
||||
@@ -404,13 +412,14 @@ static void attr_create_uv_map(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh)
|
||||
uv_attr = mesh->attributes.add(uv_name, TypeFloat2, ATTR_ELEMENT_CORNER);
|
||||
}
|
||||
|
||||
BL::Mesh::loop_triangles_iterator t;
|
||||
float2 *fdata = uv_attr->data_float2();
|
||||
|
||||
for (BL::MeshLoopTriangle &t : b_mesh.loop_triangles) {
|
||||
int3 li = get_int3(t.loops());
|
||||
fdata[0] = get_float2(l.data[li[0]].uv());
|
||||
fdata[1] = get_float2(l.data[li[1]].uv());
|
||||
fdata[2] = get_float2(l.data[li[2]].uv());
|
||||
for (b_mesh.loop_triangles.begin(t); t != b_mesh.loop_triangles.end(); ++t) {
|
||||
int3 li = get_int3(t->loops());
|
||||
fdata[0] = get_float2(l->data[li[0]].uv());
|
||||
fdata[1] = get_float2(l->data[li[1]].uv());
|
||||
fdata[2] = get_float2(l->data[li[2]].uv());
|
||||
fdata += 3;
|
||||
}
|
||||
}
|
||||
@@ -418,10 +427,10 @@ static void attr_create_uv_map(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh)
|
||||
/* UV tangent */
|
||||
if (need_tangent) {
|
||||
AttributeStandard sign_std = (active_render) ? ATTR_STD_UV_TANGENT_SIGN : ATTR_STD_NONE;
|
||||
ustring sign_name = ustring((string(l.name().c_str()) + ".tangent_sign").c_str());
|
||||
ustring sign_name = ustring((string(l->name().c_str()) + ".tangent_sign").c_str());
|
||||
bool need_sign = (mesh->need_attribute(scene, sign_name) ||
|
||||
mesh->need_attribute(scene, sign_std));
|
||||
mikk_compute_tangents(b_mesh, l.name().c_str(), mesh, need_sign, active_render);
|
||||
mikk_compute_tangents(b_mesh, l->name().c_str(), mesh, need_sign, active_render);
|
||||
}
|
||||
/* Remove temporarily created UV attribute. */
|
||||
if (!need_uv && uv_attr != NULL) {
|
||||
@@ -471,12 +480,13 @@ static void attr_create_subd_uv_map(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh,
|
||||
uv_attr->flags |= ATTR_SUBDIVIDED;
|
||||
}
|
||||
|
||||
BL::Mesh::polygons_iterator p;
|
||||
float2 *fdata = uv_attr->data_float2();
|
||||
|
||||
for (BL::MeshPolygon &p : b_mesh.polygons) {
|
||||
int n = p.loop_total();
|
||||
for (b_mesh.polygons.begin(p); p != b_mesh.polygons.end(); ++p) {
|
||||
int n = p->loop_total();
|
||||
for (int j = 0; j < n; j++) {
|
||||
*(fdata++) = get_float2(l->data[p.loop_start() + j].uv());
|
||||
*(fdata++) = get_float2(l->data[p->loop_start() + j].uv());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -696,8 +706,9 @@ static void attr_create_random_per_island(Scene *scene,
|
||||
|
||||
DisjointSet vertices_sets(number_of_vertices);
|
||||
|
||||
for (BL::MeshEdge &e : b_mesh.edges) {
|
||||
vertices_sets.join(e.vertices()[0], e.vertices()[1]);
|
||||
BL::Mesh::edges_iterator e;
|
||||
for (b_mesh.edges.begin(e); e != b_mesh.edges.end(); ++e) {
|
||||
vertices_sets.join(e->vertices()[0], e->vertices()[1]);
|
||||
}
|
||||
|
||||
AttributeSet &attributes = (subdivision) ? mesh->subd_attributes : mesh->attributes;
|
||||
@@ -705,13 +716,15 @@ static void attr_create_random_per_island(Scene *scene,
|
||||
float *data = attribute->data_float();
|
||||
|
||||
if (!subdivision) {
|
||||
for (BL::MeshLoopTriangle &t : b_mesh.loop_triangles) {
|
||||
data[t.index()] = hash_uint_to_float(vertices_sets.find(t.vertices()[0]));
|
||||
BL::Mesh::loop_triangles_iterator t;
|
||||
for (b_mesh.loop_triangles.begin(t); t != b_mesh.loop_triangles.end(); ++t) {
|
||||
data[t->index()] = hash_uint_to_float(vertices_sets.find(t->vertices()[0]));
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (BL::MeshPolygon &p : b_mesh.polygons) {
|
||||
data[p.index()] = hash_uint_to_float(vertices_sets.find(p.vertices()[0]));
|
||||
BL::Mesh::polygons_iterator p;
|
||||
for (b_mesh.polygons.begin(p); p != b_mesh.polygons.end(); ++p) {
|
||||
data[p->index()] = hash_uint_to_float(vertices_sets.find(p->vertices()[0]));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -743,9 +756,10 @@ static void create_mesh(Scene *scene,
|
||||
numtris = numfaces;
|
||||
}
|
||||
else {
|
||||
for (BL::MeshPolygon &p : b_mesh.polygons) {
|
||||
numngons += (p.loop_total() == 4) ? 0 : 1;
|
||||
numcorners += p.loop_total();
|
||||
BL::Mesh::polygons_iterator p;
|
||||
for (b_mesh.polygons.begin(p); p != b_mesh.polygons.end(); ++p) {
|
||||
numngons += (p->loop_total() == 4) ? 0 : 1;
|
||||
numcorners += p->loop_total();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -789,15 +803,17 @@ static void create_mesh(Scene *scene,
|
||||
|
||||
/* create faces */
|
||||
if (!subdivision) {
|
||||
for (BL::MeshLoopTriangle &t : b_mesh.loop_triangles) {
|
||||
BL::MeshPolygon p = b_mesh.polygons[t.polygon_index()];
|
||||
int3 vi = get_int3(t.vertices());
|
||||
BL::Mesh::loop_triangles_iterator t;
|
||||
|
||||
for (b_mesh.loop_triangles.begin(t); t != b_mesh.loop_triangles.end(); ++t) {
|
||||
BL::MeshPolygon p = b_mesh.polygons[t->polygon_index()];
|
||||
int3 vi = get_int3(t->vertices());
|
||||
|
||||
int shader = clamp(p.material_index(), 0, used_shaders.size() - 1);
|
||||
bool smooth = p.use_smooth() || use_loop_normals;
|
||||
|
||||
if (use_loop_normals) {
|
||||
BL::Array<float, 9> loop_normals = t.split_normals();
|
||||
BL::Array<float, 9> loop_normals = t->split_normals();
|
||||
for (int i = 0; i < 3; i++) {
|
||||
N[vi[i]] = make_float3(
|
||||
loop_normals[i * 3], loop_normals[i * 3 + 1], loop_normals[i * 3 + 2]);
|
||||
@@ -812,17 +828,18 @@ static void create_mesh(Scene *scene,
|
||||
}
|
||||
}
|
||||
else {
|
||||
BL::Mesh::polygons_iterator p;
|
||||
vector<int> vi;
|
||||
|
||||
for (BL::MeshPolygon &p : b_mesh.polygons) {
|
||||
int n = p.loop_total();
|
||||
int shader = clamp(p.material_index(), 0, used_shaders.size() - 1);
|
||||
bool smooth = p.use_smooth() || use_loop_normals;
|
||||
for (b_mesh.polygons.begin(p); p != b_mesh.polygons.end(); ++p) {
|
||||
int n = p->loop_total();
|
||||
int shader = clamp(p->material_index(), 0, used_shaders.size() - 1);
|
||||
bool smooth = p->use_smooth() || use_loop_normals;
|
||||
|
||||
vi.resize(n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
/* NOTE: Autosmooth is already taken care about. */
|
||||
vi[i] = b_mesh.loops[p.loop_start() + i].vertex_index();
|
||||
vi[i] = b_mesh.loops[p->loop_start() + i].vertex_index();
|
||||
}
|
||||
|
||||
/* create subd faces */
|
||||
@@ -874,18 +891,19 @@ static void create_subd_mesh(Scene *scene,
|
||||
|
||||
/* export creases */
|
||||
size_t num_creases = 0;
|
||||
BL::Mesh::edges_iterator e;
|
||||
|
||||
for (BL::MeshEdge &e : b_mesh.edges) {
|
||||
if (e.crease() != 0.0f) {
|
||||
for (b_mesh.edges.begin(e); e != b_mesh.edges.end(); ++e) {
|
||||
if (e->crease() != 0.0f) {
|
||||
num_creases++;
|
||||
}
|
||||
}
|
||||
|
||||
mesh->reserve_subd_creases(num_creases);
|
||||
|
||||
for (BL::MeshEdge &e : b_mesh.edges) {
|
||||
if (e.crease() != 0.0f) {
|
||||
mesh->add_crease(e.vertices()[0], e.vertices()[1], e.crease());
|
||||
for (b_mesh.edges.begin(e); e != b_mesh.edges.end(); ++e) {
|
||||
if (e->crease() != 0.0f) {
|
||||
mesh->add_crease(e->vertices()[0], e->vertices()[1], e->crease());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1057,8 +1075,15 @@ void BlenderSync::sync_mesh(BL::Depsgraph b_depsgraph, BL::Object b_ob, Mesh *me
|
||||
mesh->set_value(socket, new_mesh, socket);
|
||||
}
|
||||
|
||||
mesh->attributes.update(std::move(new_mesh.attributes));
|
||||
mesh->subd_attributes.update(std::move(new_mesh.subd_attributes));
|
||||
mesh->attributes.clear();
|
||||
foreach (Attribute &attr, new_mesh.attributes.attributes) {
|
||||
mesh->attributes.attributes.push_back(std::move(attr));
|
||||
}
|
||||
|
||||
mesh->subd_attributes.clear();
|
||||
foreach (Attribute &attr, new_mesh.subd_attributes.attributes) {
|
||||
mesh->subd_attributes.attributes.push_back(std::move(attr));
|
||||
}
|
||||
|
||||
mesh->set_num_subd_faces(new_mesh.get_num_subd_faces());
|
||||
|
||||
|
@@ -51,11 +51,10 @@ bool BlenderSync::BKE_object_is_modified(BL::Object &b_ob)
|
||||
}
|
||||
else {
|
||||
/* object level material links */
|
||||
for (BL::MaterialSlot &b_slot : b_ob.material_slots) {
|
||||
if (b_slot.link() == BL::MaterialSlot::link_OBJECT) {
|
||||
BL::Object::material_slots_iterator slot;
|
||||
for (b_ob.material_slots.begin(slot); slot != b_ob.material_slots.end(); ++slot)
|
||||
if (slot->link() == BL::MaterialSlot::link_OBJECT)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -244,6 +243,9 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
|
||||
|
||||
/* holdout */
|
||||
object->set_use_holdout(use_holdout);
|
||||
if (object->use_holdout_is_modified()) {
|
||||
scene->object_manager->tag_update(scene);
|
||||
}
|
||||
|
||||
object->set_visibility(visibility);
|
||||
|
||||
@@ -349,10 +351,6 @@ static bool lookup_property(BL::ID b_id, const string &name, float4 *r_value)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (prop == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PropertyType type = RNA_property_type(prop);
|
||||
int arraylen = RNA_property_array_length(&ptr, prop);
|
||||
|
||||
|
@@ -57,7 +57,7 @@ bool BlenderSync::sync_dupli_particle(BL::Object &b_ob,
|
||||
|
||||
/* no update needed? */
|
||||
if (!need_update && !object->get_geometry()->is_modified() &&
|
||||
!scene->object_manager->need_update())
|
||||
!scene->object_manager->need_update)
|
||||
return true;
|
||||
|
||||
/* first time used in this sync loop? clear and tag update */
|
||||
@@ -85,7 +85,7 @@ bool BlenderSync::sync_dupli_particle(BL::Object &b_ob,
|
||||
object->set_particle_index(psys->particles.size() - 1);
|
||||
|
||||
if (object->particle_index_is_modified())
|
||||
scene->object_manager->tag_update(scene, ObjectManager::PARTICLE_MODIFIED);
|
||||
scene->object_manager->tag_update(scene);
|
||||
|
||||
/* return that this object has particle data */
|
||||
return true;
|
||||
|
@@ -597,19 +597,22 @@ static PyObject *osl_update_node_func(PyObject * /*self*/, PyObject *args)
|
||||
bool removed;
|
||||
|
||||
do {
|
||||
BL::Node::inputs_iterator b_input;
|
||||
BL::Node::outputs_iterator b_output;
|
||||
|
||||
removed = false;
|
||||
|
||||
for (BL::NodeSocket &b_input : b_node.inputs) {
|
||||
if (used_sockets.find(b_input.ptr.data) == used_sockets.end()) {
|
||||
b_node.inputs.remove(b_data, b_input);
|
||||
for (b_node.inputs.begin(b_input); b_input != b_node.inputs.end(); ++b_input) {
|
||||
if (used_sockets.find(b_input->ptr.data) == used_sockets.end()) {
|
||||
b_node.inputs.remove(b_data, *b_input);
|
||||
removed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (BL::NodeSocket &b_output : b_node.outputs) {
|
||||
if (used_sockets.find(b_output.ptr.data) == used_sockets.end()) {
|
||||
b_node.outputs.remove(b_data, b_output);
|
||||
for (b_node.outputs.begin(b_output); b_output != b_node.outputs.end(); ++b_output) {
|
||||
if (used_sockets.find(b_output->ptr.data) == used_sockets.end()) {
|
||||
b_node.outputs.remove(b_data, *b_output);
|
||||
removed = true;
|
||||
break;
|
||||
}
|
||||
|
@@ -358,7 +358,11 @@ void BlenderSession::do_write_update_render_tile(RenderTile &rtile,
|
||||
|
||||
if (do_read_only) {
|
||||
/* copy each pass */
|
||||
for (BL::RenderPass &b_pass : b_rlay.passes) {
|
||||
BL::RenderLayer::passes_iterator b_iter;
|
||||
|
||||
for (b_rlay.passes.begin(b_iter); b_iter != b_rlay.passes.end(); ++b_iter) {
|
||||
BL::RenderPass b_pass(*b_iter);
|
||||
|
||||
/* find matching pass type */
|
||||
PassType pass_type = BlenderSync::get_pass_type(b_pass);
|
||||
int components = b_pass.channels();
|
||||
@@ -548,6 +552,7 @@ void BlenderSession::render(BL::Depsgraph &b_depsgraph_)
|
||||
int seed = scene->integrator->get_seed();
|
||||
seed += hash_uint2(seed, hash_uint2(view_index * 0xdeadbeef, 0));
|
||||
scene->integrator->set_seed(seed);
|
||||
scene->integrator->tag_update(scene);
|
||||
}
|
||||
|
||||
/* Update number of samples per layer. */
|
||||
@@ -731,7 +736,10 @@ void BlenderSession::do_write_update_render_result(BL::RenderLayer &b_rlay,
|
||||
|
||||
if (!do_update_only) {
|
||||
/* copy each pass */
|
||||
for (BL::RenderPass &b_pass : b_rlay.passes) {
|
||||
BL::RenderLayer::passes_iterator b_iter;
|
||||
|
||||
for (b_rlay.passes.begin(b_iter); b_iter != b_rlay.passes.end(); ++b_iter) {
|
||||
BL::RenderPass b_pass(*b_iter);
|
||||
int components = b_pass.channels();
|
||||
|
||||
/* Copy pixels from regular render passes. */
|
||||
@@ -1108,6 +1116,10 @@ void BlenderSession::update_resumable_tile_manager(int num_samples)
|
||||
|
||||
scene->integrator->set_start_sample(rounded_range_start_sample);
|
||||
|
||||
if (scene->integrator->is_modified()) {
|
||||
scene->integrator->tag_update(scene);
|
||||
}
|
||||
|
||||
session->tile_manager.range_start_sample = rounded_range_start_sample;
|
||||
session->tile_manager.range_num_samples = rounded_range_num_samples;
|
||||
}
|
||||
|
@@ -148,13 +148,15 @@ BlenderAttributeType blender_attribute_name_split_type(ustring name, string *r_r
|
||||
|
||||
static BL::NodeSocket get_node_output(BL::Node &b_node, const string &name)
|
||||
{
|
||||
for (BL::NodeSocket &b_out : b_node.outputs) {
|
||||
if (b_out.name() == name) {
|
||||
return b_out;
|
||||
}
|
||||
}
|
||||
BL::Node::outputs_iterator b_out;
|
||||
|
||||
for (b_node.outputs.begin(b_out); b_out != b_node.outputs.end(); ++b_out)
|
||||
if (b_out->name() == name)
|
||||
return *b_out;
|
||||
|
||||
assert(0);
|
||||
return *b_node.outputs.begin();
|
||||
|
||||
return *b_out;
|
||||
}
|
||||
|
||||
static float3 get_node_output_rgba(BL::Node &b_node, const string &name)
|
||||
@@ -721,8 +723,9 @@ static ShaderNode *add_node(Scene *scene,
|
||||
image->set_alpha_type(get_image_alpha_type(b_image));
|
||||
|
||||
array<int> tiles;
|
||||
for (BL::UDIMTile &b_tile : b_image.tiles) {
|
||||
tiles.push_back_slow(b_tile.number());
|
||||
BL::Image::tiles_iterator b_iter;
|
||||
for (b_image.tiles.begin(b_iter); b_iter != b_image.tiles.end(); ++b_iter) {
|
||||
tiles.push_back_slow(b_iter->number());
|
||||
}
|
||||
image->set_tiles(tiles);
|
||||
|
||||
@@ -882,7 +885,7 @@ static ShaderNode *add_node(Scene *scene,
|
||||
sky->set_sun_intensity(b_sky_node.sun_intensity());
|
||||
sky->set_sun_elevation(b_sky_node.sun_elevation());
|
||||
sky->set_sun_rotation(b_sky_node.sun_rotation());
|
||||
sky->set_altitude(b_sky_node.altitude());
|
||||
sky->set_altitude(1000.0f * b_sky_node.altitude());
|
||||
sky->set_air_density(b_sky_node.air_density());
|
||||
sky->set_dust_density(b_sky_node.dust_density());
|
||||
sky->set_ozone_density(b_sky_node.ozone_density());
|
||||
@@ -1009,18 +1012,18 @@ static ShaderInput *node_find_input_by_name(ShaderNode *node,
|
||||
string name = b_socket.name();
|
||||
|
||||
if (node_use_modified_socket_name(node)) {
|
||||
BL::Node::inputs_iterator b_input;
|
||||
bool found = false;
|
||||
int counter = 0, total = 0;
|
||||
|
||||
for (BL::NodeSocket &b_input : b_node.inputs) {
|
||||
if (b_input.name() == name) {
|
||||
if (!found) {
|
||||
for (b_node.inputs.begin(b_input); b_input != b_node.inputs.end(); ++b_input) {
|
||||
if (b_input->name() == name) {
|
||||
if (!found)
|
||||
counter++;
|
||||
}
|
||||
total++;
|
||||
}
|
||||
|
||||
if (b_input.ptr.data == b_socket.ptr.data)
|
||||
if (b_input->ptr.data == b_socket.ptr.data)
|
||||
found = true;
|
||||
}
|
||||
|
||||
@@ -1042,20 +1045,19 @@ static ShaderOutput *node_find_output_by_name(ShaderNode *node,
|
||||
string name = b_socket.name();
|
||||
|
||||
if (node_use_modified_socket_name(node)) {
|
||||
BL::Node::outputs_iterator b_output;
|
||||
bool found = false;
|
||||
int counter = 0, total = 0;
|
||||
|
||||
for (BL::NodeSocket &b_output : b_node.outputs) {
|
||||
if (b_output.name() == name) {
|
||||
if (!found) {
|
||||
for (b_node.outputs.begin(b_output); b_output != b_node.outputs.end(); ++b_output) {
|
||||
if (b_output->name() == name) {
|
||||
if (!found)
|
||||
counter++;
|
||||
}
|
||||
total++;
|
||||
}
|
||||
|
||||
if (b_output.ptr.data == b_socket.ptr.data) {
|
||||
if (b_output->ptr.data == b_socket.ptr.data)
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* rename if needed */
|
||||
@@ -1080,19 +1082,25 @@ static void add_nodes(Scene *scene,
|
||||
const ProxyMap &proxy_output_map)
|
||||
{
|
||||
/* add nodes */
|
||||
BL::ShaderNodeTree::nodes_iterator b_node;
|
||||
PtrInputMap input_map;
|
||||
PtrOutputMap output_map;
|
||||
|
||||
BL::Node::inputs_iterator b_input;
|
||||
BL::Node::outputs_iterator b_output;
|
||||
|
||||
/* find the node to use for output if there are multiple */
|
||||
BL::ShaderNode output_node = b_ntree.get_output_node(
|
||||
BL::ShaderNodeOutputMaterial::target_CYCLES);
|
||||
|
||||
/* add nodes */
|
||||
for (BL::Node &b_node : b_ntree.nodes) {
|
||||
if (b_node.mute() || b_node.is_a(&RNA_NodeReroute)) {
|
||||
for (b_ntree.nodes.begin(b_node); b_node != b_ntree.nodes.end(); ++b_node) {
|
||||
if (b_node->mute() || b_node->is_a(&RNA_NodeReroute)) {
|
||||
/* replace muted node with internal links */
|
||||
for (BL::NodeLink &b_link : b_node.internal_links) {
|
||||
BL::NodeSocket to_socket(b_link.to_socket());
|
||||
BL::Node::internal_links_iterator b_link;
|
||||
for (b_node->internal_links.begin(b_link); b_link != b_node->internal_links.end();
|
||||
++b_link) {
|
||||
BL::NodeSocket to_socket(b_link->to_socket());
|
||||
SocketType::Type to_socket_type = convert_socket_type(to_socket);
|
||||
if (to_socket_type == SocketType::UNDEFINED) {
|
||||
continue;
|
||||
@@ -1100,22 +1108,22 @@ static void add_nodes(Scene *scene,
|
||||
|
||||
ConvertNode *proxy = graph->create_node<ConvertNode>(to_socket_type, to_socket_type, true);
|
||||
|
||||
input_map[b_link.from_socket().ptr.data] = proxy->inputs[0];
|
||||
output_map[b_link.to_socket().ptr.data] = proxy->outputs[0];
|
||||
input_map[b_link->from_socket().ptr.data] = proxy->inputs[0];
|
||||
output_map[b_link->to_socket().ptr.data] = proxy->outputs[0];
|
||||
|
||||
graph->add(proxy);
|
||||
}
|
||||
}
|
||||
else if (b_node.is_a(&RNA_ShaderNodeGroup) || b_node.is_a(&RNA_NodeCustomGroup) ||
|
||||
b_node.is_a(&RNA_ShaderNodeCustomGroup)) {
|
||||
else if (b_node->is_a(&RNA_ShaderNodeGroup) || b_node->is_a(&RNA_NodeCustomGroup) ||
|
||||
b_node->is_a(&RNA_ShaderNodeCustomGroup)) {
|
||||
|
||||
BL::ShaderNodeTree b_group_ntree(PointerRNA_NULL);
|
||||
if (b_node.is_a(&RNA_ShaderNodeGroup))
|
||||
b_group_ntree = BL::ShaderNodeTree(((BL::NodeGroup)(b_node)).node_tree());
|
||||
else if (b_node.is_a(&RNA_NodeCustomGroup))
|
||||
b_group_ntree = BL::ShaderNodeTree(((BL::NodeCustomGroup)(b_node)).node_tree());
|
||||
if (b_node->is_a(&RNA_ShaderNodeGroup))
|
||||
b_group_ntree = BL::ShaderNodeTree(((BL::NodeGroup)(*b_node)).node_tree());
|
||||
else if (b_node->is_a(&RNA_NodeCustomGroup))
|
||||
b_group_ntree = BL::ShaderNodeTree(((BL::NodeCustomGroup)(*b_node)).node_tree());
|
||||
else
|
||||
b_group_ntree = BL::ShaderNodeTree(((BL::ShaderNodeCustomGroup)(b_node)).node_tree());
|
||||
b_group_ntree = BL::ShaderNodeTree(((BL::ShaderNodeCustomGroup)(*b_node)).node_tree());
|
||||
|
||||
ProxyMap group_proxy_input_map, group_proxy_output_map;
|
||||
|
||||
@@ -1123,8 +1131,8 @@ static void add_nodes(Scene *scene,
|
||||
* Do this even if the node group has no internal tree,
|
||||
* so that links have something to connect to and assert won't fail.
|
||||
*/
|
||||
for (BL::NodeSocket &b_input : b_node.inputs) {
|
||||
SocketType::Type input_type = convert_socket_type(b_input);
|
||||
for (b_node->inputs.begin(b_input); b_input != b_node->inputs.end(); ++b_input) {
|
||||
SocketType::Type input_type = convert_socket_type(*b_input);
|
||||
if (input_type == SocketType::UNDEFINED) {
|
||||
continue;
|
||||
}
|
||||
@@ -1133,14 +1141,14 @@ static void add_nodes(Scene *scene,
|
||||
graph->add(proxy);
|
||||
|
||||
/* register the proxy node for internal binding */
|
||||
group_proxy_input_map[b_input.identifier()] = proxy;
|
||||
group_proxy_input_map[b_input->identifier()] = proxy;
|
||||
|
||||
input_map[b_input.ptr.data] = proxy->inputs[0];
|
||||
input_map[b_input->ptr.data] = proxy->inputs[0];
|
||||
|
||||
set_default_value(proxy->inputs[0], b_input, b_data, b_ntree);
|
||||
set_default_value(proxy->inputs[0], *b_input, b_data, b_ntree);
|
||||
}
|
||||
for (BL::NodeSocket &b_output : b_node.outputs) {
|
||||
SocketType::Type output_type = convert_socket_type(b_output);
|
||||
for (b_node->outputs.begin(b_output); b_output != b_node->outputs.end(); ++b_output) {
|
||||
SocketType::Type output_type = convert_socket_type(*b_output);
|
||||
if (output_type == SocketType::UNDEFINED) {
|
||||
continue;
|
||||
}
|
||||
@@ -1149,9 +1157,9 @@ static void add_nodes(Scene *scene,
|
||||
graph->add(proxy);
|
||||
|
||||
/* register the proxy node for internal binding */
|
||||
group_proxy_output_map[b_output.identifier()] = proxy;
|
||||
group_proxy_output_map[b_output->identifier()] = proxy;
|
||||
|
||||
output_map[b_output.ptr.data] = proxy->outputs[0];
|
||||
output_map[b_output->ptr.data] = proxy->outputs[0];
|
||||
}
|
||||
|
||||
if (b_group_ntree) {
|
||||
@@ -1166,30 +1174,30 @@ static void add_nodes(Scene *scene,
|
||||
group_proxy_output_map);
|
||||
}
|
||||
}
|
||||
else if (b_node.is_a(&RNA_NodeGroupInput)) {
|
||||
else if (b_node->is_a(&RNA_NodeGroupInput)) {
|
||||
/* map each socket to a proxy node */
|
||||
for (BL::NodeSocket &b_output : b_node.outputs) {
|
||||
ProxyMap::const_iterator proxy_it = proxy_input_map.find(b_output.identifier());
|
||||
for (b_node->outputs.begin(b_output); b_output != b_node->outputs.end(); ++b_output) {
|
||||
ProxyMap::const_iterator proxy_it = proxy_input_map.find(b_output->identifier());
|
||||
if (proxy_it != proxy_input_map.end()) {
|
||||
ConvertNode *proxy = proxy_it->second;
|
||||
|
||||
output_map[b_output.ptr.data] = proxy->outputs[0];
|
||||
output_map[b_output->ptr.data] = proxy->outputs[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (b_node.is_a(&RNA_NodeGroupOutput)) {
|
||||
BL::NodeGroupOutput b_output_node(b_node);
|
||||
else if (b_node->is_a(&RNA_NodeGroupOutput)) {
|
||||
BL::NodeGroupOutput b_output_node(*b_node);
|
||||
/* only the active group output is used */
|
||||
if (b_output_node.is_active_output()) {
|
||||
/* map each socket to a proxy node */
|
||||
for (BL::NodeSocket &b_input : b_node.inputs) {
|
||||
ProxyMap::const_iterator proxy_it = proxy_output_map.find(b_input.identifier());
|
||||
for (b_node->inputs.begin(b_input); b_input != b_node->inputs.end(); ++b_input) {
|
||||
ProxyMap::const_iterator proxy_it = proxy_output_map.find(b_input->identifier());
|
||||
if (proxy_it != proxy_output_map.end()) {
|
||||
ConvertNode *proxy = proxy_it->second;
|
||||
|
||||
input_map[b_input.ptr.data] = proxy->inputs[0];
|
||||
input_map[b_input->ptr.data] = proxy->inputs[0];
|
||||
|
||||
set_default_value(proxy->inputs[0], b_input, b_data, b_ntree);
|
||||
set_default_value(proxy->inputs[0], *b_input, b_data, b_ntree);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1197,49 +1205,52 @@ static void add_nodes(Scene *scene,
|
||||
else {
|
||||
ShaderNode *node = NULL;
|
||||
|
||||
if (b_node.ptr.data == output_node.ptr.data) {
|
||||
if (b_node->ptr.data == output_node.ptr.data) {
|
||||
node = graph->output();
|
||||
}
|
||||
else {
|
||||
BL::ShaderNode b_shader_node(b_node);
|
||||
BL::ShaderNode b_shader_node(*b_node);
|
||||
node = add_node(
|
||||
scene, b_engine, b_data, b_depsgraph, b_scene, graph, b_ntree, b_shader_node);
|
||||
}
|
||||
|
||||
if (node) {
|
||||
/* map node sockets for linking */
|
||||
for (BL::NodeSocket &b_input : b_node.inputs) {
|
||||
ShaderInput *input = node_find_input_by_name(node, b_node, b_input);
|
||||
for (b_node->inputs.begin(b_input); b_input != b_node->inputs.end(); ++b_input) {
|
||||
ShaderInput *input = node_find_input_by_name(node, *b_node, *b_input);
|
||||
if (!input) {
|
||||
/* XXX should not happen, report error? */
|
||||
continue;
|
||||
}
|
||||
input_map[b_input.ptr.data] = input;
|
||||
input_map[b_input->ptr.data] = input;
|
||||
|
||||
set_default_value(input, b_input, b_data, b_ntree);
|
||||
set_default_value(input, *b_input, b_data, b_ntree);
|
||||
}
|
||||
for (BL::NodeSocket &b_output : b_node.outputs) {
|
||||
ShaderOutput *output = node_find_output_by_name(node, b_node, b_output);
|
||||
for (b_node->outputs.begin(b_output); b_output != b_node->outputs.end(); ++b_output) {
|
||||
ShaderOutput *output = node_find_output_by_name(node, *b_node, *b_output);
|
||||
if (!output) {
|
||||
/* XXX should not happen, report error? */
|
||||
continue;
|
||||
}
|
||||
output_map[b_output.ptr.data] = output;
|
||||
output_map[b_output->ptr.data] = output;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* connect nodes */
|
||||
for (BL::NodeLink &b_link : b_ntree.links) {
|
||||
BL::NodeTree::links_iterator b_link;
|
||||
|
||||
for (b_ntree.links.begin(b_link); b_link != b_ntree.links.end(); ++b_link) {
|
||||
/* Ignore invalid links to avoid unwanted cycles created in graph.
|
||||
* Also ignore links with unavailable sockets. */
|
||||
if (!(b_link.is_valid() && b_link.from_socket().enabled() && b_link.to_socket().enabled())) {
|
||||
if (!(b_link->is_valid() && b_link->from_socket().enabled() &&
|
||||
b_link->to_socket().enabled())) {
|
||||
continue;
|
||||
}
|
||||
/* get blender link data */
|
||||
BL::NodeSocket b_from_sock = b_link.from_socket();
|
||||
BL::NodeSocket b_to_sock = b_link.to_socket();
|
||||
BL::NodeSocket b_from_sock = b_link->from_socket();
|
||||
BL::NodeSocket b_to_sock = b_link->to_socket();
|
||||
|
||||
ShaderOutput *output = 0;
|
||||
ShaderInput *input = 0;
|
||||
@@ -1287,12 +1298,13 @@ void BlenderSync::sync_materials(BL::Depsgraph &b_depsgraph, bool update_all)
|
||||
TaskPool pool;
|
||||
set<Shader *> updated_shaders;
|
||||
|
||||
for (BL::ID &b_id : b_depsgraph.ids) {
|
||||
if (!b_id.is_a(&RNA_Material)) {
|
||||
BL::Depsgraph::ids_iterator b_id;
|
||||
for (b_depsgraph.ids.begin(b_id); b_id != b_depsgraph.ids.end(); ++b_id) {
|
||||
if (!b_id->is_a(&RNA_Material)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
BL::Material b_mat(b_id);
|
||||
BL::Material b_mat(*b_id);
|
||||
Shader *shader;
|
||||
|
||||
/* test if we need to sync */
|
||||
@@ -1485,6 +1497,7 @@ void BlenderSync::sync_world(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d,
|
||||
|
||||
shader->set_graph(graph);
|
||||
shader->tag_update(scene);
|
||||
background->tag_update(scene);
|
||||
}
|
||||
|
||||
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
|
||||
@@ -1504,7 +1517,8 @@ void BlenderSync::sync_world(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d,
|
||||
viewport_parameters.custom_viewport_parameters());
|
||||
background->set_use_ao(background->get_use_ao() && view_layer.use_background_ao);
|
||||
|
||||
background->tag_update(scene);
|
||||
if (background->is_modified())
|
||||
background->tag_update(scene);
|
||||
}
|
||||
|
||||
/* Sync Lights */
|
||||
@@ -1513,12 +1527,13 @@ void BlenderSync::sync_lights(BL::Depsgraph &b_depsgraph, bool update_all)
|
||||
{
|
||||
shader_map.set_default(scene->default_light);
|
||||
|
||||
for (BL::ID &b_id : b_depsgraph.ids) {
|
||||
if (!b_id.is_a(&RNA_Light)) {
|
||||
BL::Depsgraph::ids_iterator b_id;
|
||||
for (b_depsgraph.ids.begin(b_id); b_id != b_depsgraph.ids.end(); ++b_id) {
|
||||
if (!b_id->is_a(&RNA_Light)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
BL::Light b_light(b_id);
|
||||
BL::Light b_light(*b_id);
|
||||
Shader *shader;
|
||||
|
||||
/* test if we need to sync */
|
||||
|
@@ -24,7 +24,6 @@
|
||||
#include "render/mesh.h"
|
||||
#include "render/nodes.h"
|
||||
#include "render/object.h"
|
||||
#include "render/procedural.h"
|
||||
#include "render/scene.h"
|
||||
#include "render/shader.h"
|
||||
|
||||
@@ -132,8 +131,9 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
|
||||
}
|
||||
|
||||
/* Iterate over all IDs in this depsgraph. */
|
||||
for (BL::DepsgraphUpdate &b_update : b_depsgraph.updates) {
|
||||
BL::ID b_id(b_update.id());
|
||||
BL::Depsgraph::updates_iterator b_update;
|
||||
for (b_depsgraph.updates.begin(b_update); b_update != b_depsgraph.updates.end(); ++b_update) {
|
||||
BL::ID b_id(b_update->id());
|
||||
|
||||
/* Material */
|
||||
if (b_id.is_a(&RNA_Material)) {
|
||||
@@ -151,17 +151,17 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
|
||||
const bool is_geometry = object_is_geometry(b_ob);
|
||||
const bool is_light = !is_geometry && object_is_light(b_ob);
|
||||
|
||||
if (b_ob.is_instancer() && b_update.is_updated_shading()) {
|
||||
if (b_ob.is_instancer() && b_update->is_updated_shading()) {
|
||||
/* Needed for e.g. object color updates on instancer. */
|
||||
object_map.set_recalc(b_ob);
|
||||
}
|
||||
|
||||
if (is_geometry || is_light) {
|
||||
const bool updated_geometry = b_update.is_updated_geometry();
|
||||
const bool updated_geometry = b_update->is_updated_geometry();
|
||||
|
||||
/* Geometry (mesh, hair, volume). */
|
||||
if (is_geometry) {
|
||||
if (b_update.is_updated_transform() || b_update.is_updated_shading()) {
|
||||
if (b_update->is_updated_transform() || b_update->is_updated_shading()) {
|
||||
object_map.set_recalc(b_ob);
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
|
||||
}
|
||||
/* Light */
|
||||
else if (is_light) {
|
||||
if (b_update.is_updated_transform() || b_update.is_updated_shading()) {
|
||||
if (b_update->is_updated_transform() || b_update->is_updated_shading()) {
|
||||
object_map.set_recalc(b_ob);
|
||||
light_map.set_recalc(b_ob);
|
||||
}
|
||||
@@ -302,6 +302,11 @@ void BlenderSync::sync_integrator()
|
||||
integrator->set_sample_clamp_direct(get_float(cscene, "sample_clamp_direct"));
|
||||
integrator->set_sample_clamp_indirect(get_float(cscene, "sample_clamp_indirect"));
|
||||
if (!preview) {
|
||||
if (integrator->get_motion_blur() != r.use_motion_blur()) {
|
||||
scene->object_manager->tag_update(scene);
|
||||
scene->camera->tag_modified();
|
||||
}
|
||||
|
||||
integrator->set_motion_blur(r.use_motion_blur());
|
||||
}
|
||||
|
||||
@@ -370,8 +375,8 @@ void BlenderSync::sync_integrator()
|
||||
integrator->set_ao_bounces(0);
|
||||
}
|
||||
|
||||
/* UPDATE_NONE as we don't want to tag the integrator as modified, just tag dependent things */
|
||||
integrator->tag_update(scene, Integrator::UPDATE_NONE);
|
||||
if (integrator->is_modified())
|
||||
integrator->tag_update(scene);
|
||||
}
|
||||
|
||||
/* Film */
|
||||
@@ -466,15 +471,16 @@ void BlenderSync::sync_images()
|
||||
return;
|
||||
}
|
||||
/* Free buffers used by images which are not needed for render. */
|
||||
for (BL::Image &b_image : b_data.images) {
|
||||
BL::BlendData::images_iterator b_image;
|
||||
for (b_data.images.begin(b_image); b_image != b_data.images.end(); ++b_image) {
|
||||
/* TODO(sergey): Consider making it an utility function to check
|
||||
* whether image is considered builtin.
|
||||
*/
|
||||
const bool is_builtin = b_image.packed_file() ||
|
||||
b_image.source() == BL::Image::source_GENERATED ||
|
||||
b_image.source() == BL::Image::source_MOVIE || b_engine.is_preview();
|
||||
const bool is_builtin = b_image->packed_file() ||
|
||||
b_image->source() == BL::Image::source_GENERATED ||
|
||||
b_image->source() == BL::Image::source_MOVIE || b_engine.is_preview();
|
||||
if (is_builtin == false) {
|
||||
b_image.buffers_free();
|
||||
b_image->buffers_free();
|
||||
}
|
||||
/* TODO(sergey): Free builtin images not used by any shader. */
|
||||
}
|
||||
@@ -575,7 +581,10 @@ vector<Pass> BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay,
|
||||
vector<Pass> passes;
|
||||
|
||||
/* loop over passes */
|
||||
for (BL::RenderPass &b_pass : b_rlay.passes) {
|
||||
BL::RenderLayer::passes_iterator b_pass_iter;
|
||||
|
||||
for (b_rlay.passes.begin(b_pass_iter); b_pass_iter != b_rlay.passes.end(); ++b_pass_iter) {
|
||||
BL::RenderPass b_pass(*b_pass_iter);
|
||||
PassType pass_type = get_pass_type(b_pass);
|
||||
|
||||
if (pass_type == PASS_MOTION && scene->integrator->get_motion_blur())
|
||||
@@ -720,7 +729,7 @@ vector<Pass> BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay,
|
||||
|
||||
scene->film->set_pass_alpha_threshold(b_view_layer.pass_alpha_threshold());
|
||||
scene->film->tag_passes_update(scene, passes);
|
||||
scene->integrator->tag_update(scene, Integrator::UPDATE_ALL);
|
||||
scene->integrator->tag_update(scene);
|
||||
|
||||
return passes;
|
||||
}
|
||||
@@ -743,8 +752,9 @@ void BlenderSync::free_data_after_sync(BL::Depsgraph &b_depsgraph)
|
||||
/* TODO(sergey): We can actually remove the whole dependency graph,
|
||||
* but that will need some API support first.
|
||||
*/
|
||||
for (BL::Object &b_ob : b_depsgraph.objects) {
|
||||
b_ob.cache_release();
|
||||
BL::Depsgraph::objects_iterator b_ob;
|
||||
for (b_depsgraph.objects.begin(b_ob); b_ob != b_depsgraph.objects.end(); ++b_ob) {
|
||||
b_ob->cache_release();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -843,7 +853,7 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine,
|
||||
preview_samples = preview_samples * preview_samples;
|
||||
}
|
||||
|
||||
if (get_enum(cscene, "progressive") == 0 && params.device.has_branched_path) {
|
||||
if (get_enum(cscene, "progressive") == 0 && (params.device.type != DEVICE_OPTIX)) {
|
||||
if (background) {
|
||||
params.samples = aa_samples;
|
||||
}
|
||||
|
@@ -134,7 +134,6 @@ class BlenderSync {
|
||||
void sync_view();
|
||||
|
||||
/* Shader */
|
||||
array<Node *> find_used_shaders(BL::Object &b_ob);
|
||||
void sync_world(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d, bool update_all);
|
||||
void sync_shaders(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d);
|
||||
void sync_nodes(Shader *shader, BL::ShaderNodeTree &b_ntree);
|
||||
|
@@ -538,9 +538,11 @@ static inline bool object_use_deform_motion(BL::Object &b_parent, BL::Object &b_
|
||||
|
||||
static inline BL::FluidDomainSettings object_fluid_liquid_domain_find(BL::Object &b_ob)
|
||||
{
|
||||
for (BL::Modifier &b_mod : b_ob.modifiers) {
|
||||
if (b_mod.is_a(&RNA_FluidModifier)) {
|
||||
BL::FluidModifier b_mmd(b_mod);
|
||||
BL::Object::modifiers_iterator b_mod;
|
||||
|
||||
for (b_ob.modifiers.begin(b_mod); b_mod != b_ob.modifiers.end(); ++b_mod) {
|
||||
if (b_mod->is_a(&RNA_FluidModifier)) {
|
||||
BL::FluidModifier b_mmd(*b_mod);
|
||||
|
||||
if (b_mmd.fluid_type() == BL::FluidModifier::fluid_type_DOMAIN &&
|
||||
b_mmd.domain_settings().domain_type() == BL::FluidDomainSettings::domain_type_LIQUID) {
|
||||
@@ -554,9 +556,11 @@ static inline BL::FluidDomainSettings object_fluid_liquid_domain_find(BL::Object
|
||||
|
||||
static inline BL::FluidDomainSettings object_fluid_gas_domain_find(BL::Object &b_ob)
|
||||
{
|
||||
for (BL::Modifier &b_mod : b_ob.modifiers) {
|
||||
if (b_mod.is_a(&RNA_FluidModifier)) {
|
||||
BL::FluidModifier b_mmd(b_mod);
|
||||
BL::Object::modifiers_iterator b_mod;
|
||||
|
||||
for (b_ob.modifiers.begin(b_mod); b_mod != b_ob.modifiers.end(); ++b_mod) {
|
||||
if (b_mod->is_a(&RNA_FluidModifier)) {
|
||||
BL::FluidModifier b_mmd(*b_mod);
|
||||
|
||||
if (b_mmd.fluid_type() == BL::FluidModifier::fluid_type_DOMAIN &&
|
||||
b_mmd.domain_settings().domain_type() == BL::FluidDomainSettings::domain_type_GAS) {
|
||||
|
@@ -222,7 +222,9 @@ class BlenderVolumeLoader : public VDBImageLoader {
|
||||
b_volume.grids.load(b_data.ptr.data);
|
||||
|
||||
#ifdef WITH_OPENVDB
|
||||
for (BL::VolumeGrid &b_volume_grid : b_volume.grids) {
|
||||
BL::Volume::grids_iterator b_grid_iter;
|
||||
for (b_volume.grids.begin(b_grid_iter); b_grid_iter != b_volume.grids.end(); ++b_grid_iter) {
|
||||
BL::VolumeGrid b_volume_grid(*b_grid_iter);
|
||||
if (b_volume_grid.name() == grid_name) {
|
||||
const bool unload = !b_volume_grid.is_loaded();
|
||||
|
||||
@@ -258,7 +260,9 @@ static void sync_volume_object(BL::BlendData &b_data,
|
||||
volume->set_object_space((b_render.space() == BL::VolumeRender::space_OBJECT));
|
||||
|
||||
/* Find grid with matching name. */
|
||||
for (BL::VolumeGrid &b_grid : b_volume.grids) {
|
||||
BL::Volume::grids_iterator b_grid_iter;
|
||||
for (b_volume.grids.begin(b_grid_iter); b_grid_iter != b_volume.grids.end(); ++b_grid_iter) {
|
||||
BL::VolumeGrid b_grid = *b_grid_iter;
|
||||
ustring name = ustring(b_grid.name());
|
||||
AttributeStandard std = ATTR_STD_NONE;
|
||||
|
||||
|
@@ -25,7 +25,6 @@ set(SRC
|
||||
bvh_binning.cpp
|
||||
bvh_build.cpp
|
||||
bvh_embree.cpp
|
||||
bvh_multi.cpp
|
||||
bvh_node.cpp
|
||||
bvh_optix.cpp
|
||||
bvh_sort.cpp
|
||||
@@ -39,7 +38,6 @@ set(SRC_HEADERS
|
||||
bvh_binning.h
|
||||
bvh_build.h
|
||||
bvh_embree.h
|
||||
bvh_multi.h
|
||||
bvh_node.h
|
||||
bvh_optix.h
|
||||
bvh_params.h
|
||||
|
@@ -17,11 +17,17 @@
|
||||
|
||||
#include "bvh/bvh.h"
|
||||
|
||||
#include "render/hair.h"
|
||||
#include "render/mesh.h"
|
||||
#include "render/object.h"
|
||||
|
||||
#include "bvh/bvh2.h"
|
||||
#include "bvh/bvh_build.h"
|
||||
#include "bvh/bvh_embree.h"
|
||||
#include "bvh/bvh_multi.h"
|
||||
#include "bvh/bvh_node.h"
|
||||
#include "bvh/bvh_optix.h"
|
||||
|
||||
#include "util/util_foreach.h"
|
||||
#include "util/util_logging.h"
|
||||
#include "util/util_progress.h"
|
||||
|
||||
@@ -32,17 +38,14 @@ CCL_NAMESPACE_BEGIN
|
||||
const char *bvh_layout_name(BVHLayout layout)
|
||||
{
|
||||
switch (layout) {
|
||||
case BVH_LAYOUT_NONE:
|
||||
return "NONE";
|
||||
case BVH_LAYOUT_BVH2:
|
||||
return "BVH2";
|
||||
case BVH_LAYOUT_NONE:
|
||||
return "NONE";
|
||||
case BVH_LAYOUT_EMBREE:
|
||||
return "EMBREE";
|
||||
case BVH_LAYOUT_OPTIX:
|
||||
return "OPTIX";
|
||||
case BVH_LAYOUT_MULTI_OPTIX:
|
||||
case BVH_LAYOUT_MULTI_OPTIX_EMBREE:
|
||||
return "MULTI";
|
||||
case BVH_LAYOUT_ALL:
|
||||
return "ALL";
|
||||
}
|
||||
@@ -73,6 +76,17 @@ BVHLayout BVHParams::best_bvh_layout(BVHLayout requested_layout, BVHLayoutMask s
|
||||
return (BVHLayout)(1 << widest_allowed_layout_mask);
|
||||
}
|
||||
|
||||
/* Pack Utility */
|
||||
|
||||
BVHStackEntry::BVHStackEntry(const BVHNode *n, int i) : node(n), idx(i)
|
||||
{
|
||||
}
|
||||
|
||||
int BVHStackEntry::encodeIdx() const
|
||||
{
|
||||
return (node->is_leaf()) ? ~idx : idx;
|
||||
}
|
||||
|
||||
/* BVH */
|
||||
|
||||
BVH::BVH(const BVHParams ¶ms_,
|
||||
@@ -85,27 +99,24 @@ BVH::BVH(const BVHParams ¶ms_,
|
||||
BVH *BVH::create(const BVHParams ¶ms,
|
||||
const vector<Geometry *> &geometry,
|
||||
const vector<Object *> &objects,
|
||||
Device *device)
|
||||
const Device *device)
|
||||
{
|
||||
switch (params.bvh_layout) {
|
||||
case BVH_LAYOUT_BVH2:
|
||||
return new BVH2(params, geometry, objects);
|
||||
case BVH_LAYOUT_EMBREE:
|
||||
#ifdef WITH_EMBREE
|
||||
return new BVHEmbree(params, geometry, objects);
|
||||
#else
|
||||
break;
|
||||
#endif
|
||||
case BVH_LAYOUT_OPTIX:
|
||||
#ifdef WITH_OPTIX
|
||||
return new BVHOptiX(params, geometry, objects, device);
|
||||
return new BVHEmbree(params, geometry, objects, device);
|
||||
#else
|
||||
(void)device;
|
||||
break;
|
||||
#endif
|
||||
case BVH_LAYOUT_MULTI_OPTIX:
|
||||
case BVH_LAYOUT_MULTI_OPTIX_EMBREE:
|
||||
return new BVHMulti(params, geometry, objects);
|
||||
case BVH_LAYOUT_OPTIX:
|
||||
#ifdef WITH_OPTIX
|
||||
return new BVHOptiX(params, geometry, objects);
|
||||
#else
|
||||
break;
|
||||
#endif
|
||||
case BVH_LAYOUT_NONE:
|
||||
case BVH_LAYOUT_ALL:
|
||||
break;
|
||||
@@ -114,4 +125,399 @@ BVH *BVH::create(const BVHParams ¶ms,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Building */
|
||||
|
||||
void BVH::build(Progress &progress, Stats *)
|
||||
{
|
||||
progress.set_substatus("Building BVH");
|
||||
|
||||
/* build nodes */
|
||||
BVHBuild bvh_build(objects,
|
||||
pack.prim_type,
|
||||
pack.prim_index,
|
||||
pack.prim_object,
|
||||
pack.prim_time,
|
||||
params,
|
||||
progress);
|
||||
BVHNode *bvh2_root = bvh_build.run();
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
if (bvh2_root != NULL) {
|
||||
bvh2_root->deleteSubtree();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* BVH builder returns tree in a binary mode (with two children per inner
|
||||
* node. Need to adopt that for a wider BVH implementations. */
|
||||
BVHNode *root = widen_children_nodes(bvh2_root);
|
||||
if (root != bvh2_root) {
|
||||
bvh2_root->deleteSubtree();
|
||||
}
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
if (root != NULL) {
|
||||
root->deleteSubtree();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* pack triangles */
|
||||
progress.set_substatus("Packing BVH triangles and strands");
|
||||
pack_primitives();
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
root->deleteSubtree();
|
||||
return;
|
||||
}
|
||||
|
||||
/* pack nodes */
|
||||
progress.set_substatus("Packing BVH nodes");
|
||||
pack_nodes(root);
|
||||
|
||||
/* free build nodes */
|
||||
root->deleteSubtree();
|
||||
}
|
||||
|
||||
/* Refitting */
|
||||
|
||||
void BVH::refit(Progress &progress)
|
||||
{
|
||||
progress.set_substatus("Packing BVH primitives");
|
||||
pack_primitives();
|
||||
|
||||
if (progress.get_cancel())
|
||||
return;
|
||||
|
||||
progress.set_substatus("Refitting BVH nodes");
|
||||
refit_nodes();
|
||||
}
|
||||
|
||||
void BVH::refit_primitives(int start, int end, BoundBox &bbox, uint &visibility)
|
||||
{
|
||||
/* Refit range of primitives. */
|
||||
for (int prim = start; prim < end; prim++) {
|
||||
int pidx = pack.prim_index[prim];
|
||||
int tob = pack.prim_object[prim];
|
||||
Object *ob = objects[tob];
|
||||
|
||||
if (pidx == -1) {
|
||||
/* Object instance. */
|
||||
bbox.grow(ob->bounds);
|
||||
}
|
||||
else {
|
||||
/* Primitives. */
|
||||
if (pack.prim_type[prim] & PRIMITIVE_ALL_CURVE) {
|
||||
/* Curves. */
|
||||
const Hair *hair = static_cast<const Hair *>(ob->get_geometry());
|
||||
int prim_offset = (params.top_level) ? hair->prim_offset : 0;
|
||||
Hair::Curve curve = hair->get_curve(pidx - prim_offset);
|
||||
int k = PRIMITIVE_UNPACK_SEGMENT(pack.prim_type[prim]);
|
||||
|
||||
curve.bounds_grow(k, &hair->get_curve_keys()[0], &hair->get_curve_radius()[0], bbox);
|
||||
|
||||
/* Motion curves. */
|
||||
if (hair->get_use_motion_blur()) {
|
||||
Attribute *attr = hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
|
||||
if (attr) {
|
||||
size_t hair_size = hair->get_curve_keys().size();
|
||||
size_t steps = hair->get_motion_steps() - 1;
|
||||
float3 *key_steps = attr->data_float3();
|
||||
|
||||
for (size_t i = 0; i < steps; i++)
|
||||
curve.bounds_grow(k, key_steps + i * hair_size, &hair->get_curve_radius()[0], bbox);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Triangles. */
|
||||
const Mesh *mesh = static_cast<const Mesh *>(ob->get_geometry());
|
||||
int prim_offset = (params.top_level) ? mesh->prim_offset : 0;
|
||||
Mesh::Triangle triangle = mesh->get_triangle(pidx - prim_offset);
|
||||
const float3 *vpos = &mesh->verts[0];
|
||||
|
||||
triangle.bounds_grow(vpos, bbox);
|
||||
|
||||
/* Motion triangles. */
|
||||
if (mesh->use_motion_blur) {
|
||||
Attribute *attr = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
|
||||
if (attr) {
|
||||
size_t mesh_size = mesh->verts.size();
|
||||
size_t steps = mesh->motion_steps - 1;
|
||||
float3 *vert_steps = attr->data_float3();
|
||||
|
||||
for (size_t i = 0; i < steps; i++)
|
||||
triangle.bounds_grow(vert_steps + i * mesh_size, bbox);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
visibility |= ob->visibility_for_tracing();
|
||||
}
|
||||
}
|
||||
|
||||
/* Triangles */
|
||||
|
||||
void BVH::pack_triangle(int idx, float4 tri_verts[3])
|
||||
{
|
||||
int tob = pack.prim_object[idx];
|
||||
assert(tob >= 0 && tob < objects.size());
|
||||
const Mesh *mesh = static_cast<const Mesh *>(objects[tob]->get_geometry());
|
||||
|
||||
int tidx = pack.prim_index[idx];
|
||||
Mesh::Triangle t = mesh->get_triangle(tidx);
|
||||
const float3 *vpos = &mesh->verts[0];
|
||||
float3 v0 = vpos[t.v[0]];
|
||||
float3 v1 = vpos[t.v[1]];
|
||||
float3 v2 = vpos[t.v[2]];
|
||||
|
||||
tri_verts[0] = float3_to_float4(v0);
|
||||
tri_verts[1] = float3_to_float4(v1);
|
||||
tri_verts[2] = float3_to_float4(v2);
|
||||
}
|
||||
|
||||
void BVH::pack_primitives()
|
||||
{
|
||||
const size_t tidx_size = pack.prim_index.size();
|
||||
size_t num_prim_triangles = 0;
|
||||
/* Count number of triangles primitives in BVH. */
|
||||
for (unsigned int i = 0; i < tidx_size; i++) {
|
||||
if ((pack.prim_index[i] != -1)) {
|
||||
if ((pack.prim_type[i] & PRIMITIVE_ALL_TRIANGLE) != 0) {
|
||||
++num_prim_triangles;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Reserve size for arrays. */
|
||||
pack.prim_tri_index.clear();
|
||||
pack.prim_tri_index.resize(tidx_size);
|
||||
pack.prim_tri_verts.clear();
|
||||
pack.prim_tri_verts.resize(num_prim_triangles * 3);
|
||||
pack.prim_visibility.clear();
|
||||
pack.prim_visibility.resize(tidx_size);
|
||||
/* Fill in all the arrays. */
|
||||
size_t prim_triangle_index = 0;
|
||||
for (unsigned int i = 0; i < tidx_size; i++) {
|
||||
if (pack.prim_index[i] != -1) {
|
||||
int tob = pack.prim_object[i];
|
||||
Object *ob = objects[tob];
|
||||
if ((pack.prim_type[i] & PRIMITIVE_ALL_TRIANGLE) != 0) {
|
||||
pack_triangle(i, (float4 *)&pack.prim_tri_verts[3 * prim_triangle_index]);
|
||||
pack.prim_tri_index[i] = 3 * prim_triangle_index;
|
||||
++prim_triangle_index;
|
||||
}
|
||||
else {
|
||||
pack.prim_tri_index[i] = -1;
|
||||
}
|
||||
pack.prim_visibility[i] = ob->visibility_for_tracing();
|
||||
}
|
||||
else {
|
||||
pack.prim_tri_index[i] = -1;
|
||||
pack.prim_visibility[i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Pack Instances */
|
||||
|
||||
void BVH::pack_instances(size_t nodes_size, size_t leaf_nodes_size)
|
||||
{
|
||||
/* Adjust primitive index to point to the triangle in the global array, for
|
||||
* geometry with transform applied and already in the top level BVH.
|
||||
*/
|
||||
for (size_t i = 0; i < pack.prim_index.size(); i++) {
|
||||
if (pack.prim_index[i] != -1) {
|
||||
pack.prim_index[i] += objects[pack.prim_object[i]]->get_geometry()->prim_offset;
|
||||
}
|
||||
}
|
||||
|
||||
/* track offsets of instanced BVH data in global array */
|
||||
size_t prim_offset = pack.prim_index.size();
|
||||
size_t nodes_offset = nodes_size;
|
||||
size_t nodes_leaf_offset = leaf_nodes_size;
|
||||
|
||||
/* clear array that gives the node indexes for instanced objects */
|
||||
pack.object_node.clear();
|
||||
|
||||
/* reserve */
|
||||
size_t prim_index_size = pack.prim_index.size();
|
||||
size_t prim_tri_verts_size = pack.prim_tri_verts.size();
|
||||
|
||||
size_t pack_prim_index_offset = prim_index_size;
|
||||
size_t pack_prim_tri_verts_offset = prim_tri_verts_size;
|
||||
size_t pack_nodes_offset = nodes_size;
|
||||
size_t pack_leaf_nodes_offset = leaf_nodes_size;
|
||||
size_t object_offset = 0;
|
||||
|
||||
foreach (Geometry *geom, geometry) {
|
||||
BVH *bvh = geom->bvh;
|
||||
|
||||
if (geom->need_build_bvh(params.bvh_layout)) {
|
||||
prim_index_size += bvh->pack.prim_index.size();
|
||||
prim_tri_verts_size += bvh->pack.prim_tri_verts.size();
|
||||
nodes_size += bvh->pack.nodes.size();
|
||||
leaf_nodes_size += bvh->pack.leaf_nodes.size();
|
||||
}
|
||||
}
|
||||
|
||||
pack.prim_index.resize(prim_index_size);
|
||||
pack.prim_type.resize(prim_index_size);
|
||||
pack.prim_object.resize(prim_index_size);
|
||||
pack.prim_visibility.resize(prim_index_size);
|
||||
pack.prim_tri_verts.resize(prim_tri_verts_size);
|
||||
pack.prim_tri_index.resize(prim_index_size);
|
||||
pack.nodes.resize(nodes_size);
|
||||
pack.leaf_nodes.resize(leaf_nodes_size);
|
||||
pack.object_node.resize(objects.size());
|
||||
|
||||
if (params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0) {
|
||||
pack.prim_time.resize(prim_index_size);
|
||||
}
|
||||
|
||||
int *pack_prim_index = (pack.prim_index.size()) ? &pack.prim_index[0] : NULL;
|
||||
int *pack_prim_type = (pack.prim_type.size()) ? &pack.prim_type[0] : NULL;
|
||||
int *pack_prim_object = (pack.prim_object.size()) ? &pack.prim_object[0] : NULL;
|
||||
uint *pack_prim_visibility = (pack.prim_visibility.size()) ? &pack.prim_visibility[0] : NULL;
|
||||
float4 *pack_prim_tri_verts = (pack.prim_tri_verts.size()) ? &pack.prim_tri_verts[0] : NULL;
|
||||
uint *pack_prim_tri_index = (pack.prim_tri_index.size()) ? &pack.prim_tri_index[0] : NULL;
|
||||
int4 *pack_nodes = (pack.nodes.size()) ? &pack.nodes[0] : NULL;
|
||||
int4 *pack_leaf_nodes = (pack.leaf_nodes.size()) ? &pack.leaf_nodes[0] : NULL;
|
||||
float2 *pack_prim_time = (pack.prim_time.size()) ? &pack.prim_time[0] : NULL;
|
||||
|
||||
map<Geometry *, int> geometry_map;
|
||||
|
||||
/* merge */
|
||||
foreach (Object *ob, objects) {
|
||||
Geometry *geom = ob->get_geometry();
|
||||
|
||||
/* We assume that if mesh doesn't need own BVH it was already included
|
||||
* into a top-level BVH and no packing here is needed.
|
||||
*/
|
||||
if (!geom->need_build_bvh(params.bvh_layout)) {
|
||||
pack.object_node[object_offset++] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* if mesh already added once, don't add it again, but used set
|
||||
* node offset for this object */
|
||||
map<Geometry *, int>::iterator it = geometry_map.find(geom);
|
||||
|
||||
if (geometry_map.find(geom) != geometry_map.end()) {
|
||||
int noffset = it->second;
|
||||
pack.object_node[object_offset++] = noffset;
|
||||
continue;
|
||||
}
|
||||
|
||||
BVH *bvh = geom->bvh;
|
||||
|
||||
int noffset = nodes_offset;
|
||||
int noffset_leaf = nodes_leaf_offset;
|
||||
int geom_prim_offset = geom->prim_offset;
|
||||
|
||||
/* fill in node indexes for instances */
|
||||
if (bvh->pack.root_index == -1)
|
||||
pack.object_node[object_offset++] = -noffset_leaf - 1;
|
||||
else
|
||||
pack.object_node[object_offset++] = noffset;
|
||||
|
||||
geometry_map[geom] = pack.object_node[object_offset - 1];
|
||||
|
||||
/* merge primitive, object and triangle indexes */
|
||||
if (bvh->pack.prim_index.size()) {
|
||||
size_t bvh_prim_index_size = bvh->pack.prim_index.size();
|
||||
int *bvh_prim_index = &bvh->pack.prim_index[0];
|
||||
int *bvh_prim_type = &bvh->pack.prim_type[0];
|
||||
uint *bvh_prim_visibility = &bvh->pack.prim_visibility[0];
|
||||
uint *bvh_prim_tri_index = &bvh->pack.prim_tri_index[0];
|
||||
float2 *bvh_prim_time = bvh->pack.prim_time.size() ? &bvh->pack.prim_time[0] : NULL;
|
||||
|
||||
for (size_t i = 0; i < bvh_prim_index_size; i++) {
|
||||
if (bvh->pack.prim_type[i] & PRIMITIVE_ALL_CURVE) {
|
||||
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
|
||||
pack_prim_tri_index[pack_prim_index_offset] = -1;
|
||||
}
|
||||
else {
|
||||
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
|
||||
pack_prim_tri_index[pack_prim_index_offset] = bvh_prim_tri_index[i] +
|
||||
pack_prim_tri_verts_offset;
|
||||
}
|
||||
|
||||
pack_prim_type[pack_prim_index_offset] = bvh_prim_type[i];
|
||||
pack_prim_visibility[pack_prim_index_offset] = bvh_prim_visibility[i];
|
||||
pack_prim_object[pack_prim_index_offset] = 0; // unused for instances
|
||||
if (bvh_prim_time != NULL) {
|
||||
pack_prim_time[pack_prim_index_offset] = bvh_prim_time[i];
|
||||
}
|
||||
pack_prim_index_offset++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge triangle vertices data. */
|
||||
if (bvh->pack.prim_tri_verts.size()) {
|
||||
const size_t prim_tri_size = bvh->pack.prim_tri_verts.size();
|
||||
memcpy(pack_prim_tri_verts + pack_prim_tri_verts_offset,
|
||||
&bvh->pack.prim_tri_verts[0],
|
||||
prim_tri_size * sizeof(float4));
|
||||
pack_prim_tri_verts_offset += prim_tri_size;
|
||||
}
|
||||
|
||||
/* merge nodes */
|
||||
if (bvh->pack.leaf_nodes.size()) {
|
||||
int4 *leaf_nodes_offset = &bvh->pack.leaf_nodes[0];
|
||||
size_t leaf_nodes_offset_size = bvh->pack.leaf_nodes.size();
|
||||
for (size_t i = 0, j = 0; i < leaf_nodes_offset_size; i += BVH_NODE_LEAF_SIZE, j++) {
|
||||
int4 data = leaf_nodes_offset[i];
|
||||
data.x += prim_offset;
|
||||
data.y += prim_offset;
|
||||
pack_leaf_nodes[pack_leaf_nodes_offset] = data;
|
||||
for (int j = 1; j < BVH_NODE_LEAF_SIZE; ++j) {
|
||||
pack_leaf_nodes[pack_leaf_nodes_offset + j] = leaf_nodes_offset[i + j];
|
||||
}
|
||||
pack_leaf_nodes_offset += BVH_NODE_LEAF_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
if (bvh->pack.nodes.size()) {
|
||||
int4 *bvh_nodes = &bvh->pack.nodes[0];
|
||||
size_t bvh_nodes_size = bvh->pack.nodes.size();
|
||||
|
||||
for (size_t i = 0, j = 0; i < bvh_nodes_size; j++) {
|
||||
size_t nsize, nsize_bbox;
|
||||
if (bvh_nodes[i].x & PATH_RAY_NODE_UNALIGNED) {
|
||||
nsize = BVH_UNALIGNED_NODE_SIZE;
|
||||
nsize_bbox = 0;
|
||||
}
|
||||
else {
|
||||
nsize = BVH_NODE_SIZE;
|
||||
nsize_bbox = 0;
|
||||
}
|
||||
|
||||
memcpy(pack_nodes + pack_nodes_offset, bvh_nodes + i, nsize_bbox * sizeof(int4));
|
||||
|
||||
/* Modify offsets into arrays */
|
||||
int4 data = bvh_nodes[i + nsize_bbox];
|
||||
data.z += (data.z < 0) ? -noffset_leaf : noffset;
|
||||
data.w += (data.w < 0) ? -noffset_leaf : noffset;
|
||||
pack_nodes[pack_nodes_offset + nsize_bbox] = data;
|
||||
|
||||
/* Usually this copies nothing, but we better
|
||||
* be prepared for possible node size extension.
|
||||
*/
|
||||
memcpy(&pack_nodes[pack_nodes_offset + nsize_bbox + 1],
|
||||
&bvh_nodes[i + nsize_bbox + 1],
|
||||
sizeof(int4) * (nsize - (nsize_bbox + 1)));
|
||||
|
||||
pack_nodes_offset += nsize;
|
||||
i += nsize;
|
||||
}
|
||||
}
|
||||
|
||||
nodes_offset += bvh->pack.nodes.size();
|
||||
nodes_leaf_offset += bvh->pack.leaf_nodes.size();
|
||||
prim_offset += bvh->pack.prim_index.size();
|
||||
}
|
||||
}
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
@@ -25,16 +25,17 @@
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
class BoundBox;
|
||||
class BVHNode;
|
||||
class BVHParams;
|
||||
class Stats;
|
||||
class Device;
|
||||
class DeviceScene;
|
||||
class Geometry;
|
||||
class BVHNode;
|
||||
struct BVHStackEntry;
|
||||
class BVHParams;
|
||||
class BoundBox;
|
||||
class LeafNode;
|
||||
class Geometry;
|
||||
class Object;
|
||||
class Progress;
|
||||
class Stats;
|
||||
|
||||
#define BVH_ALIGN 4096
|
||||
#define TRI_NODE_SIZE 3
|
||||
@@ -75,10 +76,13 @@ struct PackedBVH {
|
||||
}
|
||||
};
|
||||
|
||||
enum BVH_TYPE { bvh2 };
|
||||
|
||||
/* BVH */
|
||||
|
||||
class BVH {
|
||||
public:
|
||||
PackedBVH pack;
|
||||
BVHParams params;
|
||||
vector<Geometry *> geometry;
|
||||
vector<Object *> objects;
|
||||
@@ -86,15 +90,47 @@ class BVH {
|
||||
static BVH *create(const BVHParams ¶ms,
|
||||
const vector<Geometry *> &geometry,
|
||||
const vector<Object *> &objects,
|
||||
Device *device);
|
||||
const Device *device);
|
||||
virtual ~BVH()
|
||||
{
|
||||
}
|
||||
|
||||
virtual void build(Progress &progress, Stats *stats = NULL);
|
||||
virtual void copy_to_device(Progress & /*progress*/, DeviceScene * /*dscene*/)
|
||||
{
|
||||
}
|
||||
|
||||
void refit(Progress &progress);
|
||||
|
||||
protected:
|
||||
BVH(const BVHParams ¶ms,
|
||||
const vector<Geometry *> &geometry,
|
||||
const vector<Object *> &objects);
|
||||
|
||||
/* Refit range of primitives. */
|
||||
void refit_primitives(int start, int end, BoundBox &bbox, uint &visibility);
|
||||
|
||||
/* triangles and strands */
|
||||
void pack_primitives();
|
||||
void pack_triangle(int idx, float4 storage[3]);
|
||||
|
||||
/* merge instance BVH's */
|
||||
void pack_instances(size_t nodes_size, size_t leaf_nodes_size);
|
||||
|
||||
/* for subclasses to implement */
|
||||
virtual void pack_nodes(const BVHNode *root) = 0;
|
||||
virtual void refit_nodes() = 0;
|
||||
|
||||
virtual BVHNode *widen_children_nodes(const BVHNode *root) = 0;
|
||||
};
|
||||
|
||||
/* Pack Utility */
|
||||
struct BVHStackEntry {
|
||||
const BVHNode *node;
|
||||
int idx;
|
||||
|
||||
BVHStackEntry(const BVHNode *n = 0, int i = 0);
|
||||
int encodeIdx() const;
|
||||
};
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
@@ -17,28 +17,14 @@
|
||||
|
||||
#include "bvh/bvh2.h"
|
||||
|
||||
#include "render/hair.h"
|
||||
#include "render/mesh.h"
|
||||
#include "render/object.h"
|
||||
|
||||
#include "bvh/bvh_build.h"
|
||||
#include "bvh/bvh_node.h"
|
||||
#include "bvh/bvh_unaligned.h"
|
||||
|
||||
#include "util/util_foreach.h"
|
||||
#include "util/util_progress.h"
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
BVHStackEntry::BVHStackEntry(const BVHNode *n, int i) : node(n), idx(i)
|
||||
{
|
||||
}
|
||||
|
||||
int BVHStackEntry::encodeIdx() const
|
||||
{
|
||||
return (node->is_leaf()) ? ~idx : idx;
|
||||
}
|
||||
|
||||
BVH2::BVH2(const BVHParams ¶ms_,
|
||||
const vector<Geometry *> &geometry_,
|
||||
const vector<Object *> &objects_)
|
||||
@@ -46,70 +32,6 @@ BVH2::BVH2(const BVHParams ¶ms_,
|
||||
{
|
||||
}
|
||||
|
||||
void BVH2::build(Progress &progress, Stats *)
|
||||
{
|
||||
progress.set_substatus("Building BVH");
|
||||
|
||||
/* build nodes */
|
||||
BVHBuild bvh_build(objects,
|
||||
pack.prim_type,
|
||||
pack.prim_index,
|
||||
pack.prim_object,
|
||||
pack.prim_time,
|
||||
params,
|
||||
progress);
|
||||
BVHNode *bvh2_root = bvh_build.run();
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
if (bvh2_root != NULL) {
|
||||
bvh2_root->deleteSubtree();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* BVH builder returns tree in a binary mode (with two children per inner
|
||||
* node. Need to adopt that for a wider BVH implementations. */
|
||||
BVHNode *root = widen_children_nodes(bvh2_root);
|
||||
if (root != bvh2_root) {
|
||||
bvh2_root->deleteSubtree();
|
||||
}
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
if (root != NULL) {
|
||||
root->deleteSubtree();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* pack triangles */
|
||||
progress.set_substatus("Packing BVH triangles and strands");
|
||||
pack_primitives();
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
root->deleteSubtree();
|
||||
return;
|
||||
}
|
||||
|
||||
/* pack nodes */
|
||||
progress.set_substatus("Packing BVH nodes");
|
||||
pack_nodes(root);
|
||||
|
||||
/* free build nodes */
|
||||
root->deleteSubtree();
|
||||
}
|
||||
|
||||
void BVH2::refit(Progress &progress)
|
||||
{
|
||||
progress.set_substatus("Packing BVH primitives");
|
||||
pack_primitives();
|
||||
|
||||
if (progress.get_cancel())
|
||||
return;
|
||||
|
||||
progress.set_substatus("Refitting BVH nodes");
|
||||
refit_nodes();
|
||||
}
|
||||
|
||||
BVHNode *BVH2::widen_children_nodes(const BVHNode *root)
|
||||
{
|
||||
return const_cast<BVHNode *>(root);
|
||||
@@ -331,7 +253,7 @@ void BVH2::refit_node(int idx, bool leaf, BoundBox &bbox, uint &visibility)
|
||||
const int c0 = data[0].x;
|
||||
const int c1 = data[0].y;
|
||||
|
||||
refit_primitives(c0, c1, bbox, visibility);
|
||||
BVH::refit_primitives(c0, c1, bbox, visibility);
|
||||
|
||||
/* TODO(sergey): De-duplicate with pack_leaf(). */
|
||||
float4 leaf_data[BVH_NODE_LEAF_SIZE];
|
||||
@@ -370,333 +292,4 @@ void BVH2::refit_node(int idx, bool leaf, BoundBox &bbox, uint &visibility)
|
||||
}
|
||||
}
|
||||
|
||||
/* Refitting */
|
||||
|
||||
void BVH2::refit_primitives(int start, int end, BoundBox &bbox, uint &visibility)
|
||||
{
|
||||
/* Refit range of primitives. */
|
||||
for (int prim = start; prim < end; prim++) {
|
||||
int pidx = pack.prim_index[prim];
|
||||
int tob = pack.prim_object[prim];
|
||||
Object *ob = objects[tob];
|
||||
|
||||
if (pidx == -1) {
|
||||
/* Object instance. */
|
||||
bbox.grow(ob->bounds);
|
||||
}
|
||||
else {
|
||||
/* Primitives. */
|
||||
if (pack.prim_type[prim] & PRIMITIVE_ALL_CURVE) {
|
||||
/* Curves. */
|
||||
const Hair *hair = static_cast<const Hair *>(ob->get_geometry());
|
||||
int prim_offset = (params.top_level) ? hair->prim_offset : 0;
|
||||
Hair::Curve curve = hair->get_curve(pidx - prim_offset);
|
||||
int k = PRIMITIVE_UNPACK_SEGMENT(pack.prim_type[prim]);
|
||||
|
||||
curve.bounds_grow(k, &hair->get_curve_keys()[0], &hair->get_curve_radius()[0], bbox);
|
||||
|
||||
/* Motion curves. */
|
||||
if (hair->get_use_motion_blur()) {
|
||||
Attribute *attr = hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
|
||||
if (attr) {
|
||||
size_t hair_size = hair->get_curve_keys().size();
|
||||
size_t steps = hair->get_motion_steps() - 1;
|
||||
float3 *key_steps = attr->data_float3();
|
||||
|
||||
for (size_t i = 0; i < steps; i++)
|
||||
curve.bounds_grow(k, key_steps + i * hair_size, &hair->get_curve_radius()[0], bbox);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Triangles. */
|
||||
const Mesh *mesh = static_cast<const Mesh *>(ob->get_geometry());
|
||||
int prim_offset = (params.top_level) ? mesh->prim_offset : 0;
|
||||
Mesh::Triangle triangle = mesh->get_triangle(pidx - prim_offset);
|
||||
const float3 *vpos = &mesh->verts[0];
|
||||
|
||||
triangle.bounds_grow(vpos, bbox);
|
||||
|
||||
/* Motion triangles. */
|
||||
if (mesh->use_motion_blur) {
|
||||
Attribute *attr = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
|
||||
if (attr) {
|
||||
size_t mesh_size = mesh->verts.size();
|
||||
size_t steps = mesh->motion_steps - 1;
|
||||
float3 *vert_steps = attr->data_float3();
|
||||
|
||||
for (size_t i = 0; i < steps; i++)
|
||||
triangle.bounds_grow(vert_steps + i * mesh_size, bbox);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
visibility |= ob->visibility_for_tracing();
|
||||
}
|
||||
}
|
||||
|
||||
/* Triangles */
|
||||
|
||||
void BVH2::pack_triangle(int idx, float4 tri_verts[3])
|
||||
{
|
||||
int tob = pack.prim_object[idx];
|
||||
assert(tob >= 0 && tob < objects.size());
|
||||
const Mesh *mesh = static_cast<const Mesh *>(objects[tob]->get_geometry());
|
||||
|
||||
int tidx = pack.prim_index[idx];
|
||||
Mesh::Triangle t = mesh->get_triangle(tidx);
|
||||
const float3 *vpos = &mesh->verts[0];
|
||||
float3 v0 = vpos[t.v[0]];
|
||||
float3 v1 = vpos[t.v[1]];
|
||||
float3 v2 = vpos[t.v[2]];
|
||||
|
||||
tri_verts[0] = float3_to_float4(v0);
|
||||
tri_verts[1] = float3_to_float4(v1);
|
||||
tri_verts[2] = float3_to_float4(v2);
|
||||
}
|
||||
|
||||
void BVH2::pack_primitives()
|
||||
{
|
||||
const size_t tidx_size = pack.prim_index.size();
|
||||
size_t num_prim_triangles = 0;
|
||||
/* Count number of triangles primitives in BVH. */
|
||||
for (unsigned int i = 0; i < tidx_size; i++) {
|
||||
if ((pack.prim_index[i] != -1)) {
|
||||
if ((pack.prim_type[i] & PRIMITIVE_ALL_TRIANGLE) != 0) {
|
||||
++num_prim_triangles;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Reserve size for arrays. */
|
||||
pack.prim_tri_index.clear();
|
||||
pack.prim_tri_index.resize(tidx_size);
|
||||
pack.prim_tri_verts.clear();
|
||||
pack.prim_tri_verts.resize(num_prim_triangles * 3);
|
||||
pack.prim_visibility.clear();
|
||||
pack.prim_visibility.resize(tidx_size);
|
||||
/* Fill in all the arrays. */
|
||||
size_t prim_triangle_index = 0;
|
||||
for (unsigned int i = 0; i < tidx_size; i++) {
|
||||
if (pack.prim_index[i] != -1) {
|
||||
int tob = pack.prim_object[i];
|
||||
Object *ob = objects[tob];
|
||||
if ((pack.prim_type[i] & PRIMITIVE_ALL_TRIANGLE) != 0) {
|
||||
pack_triangle(i, (float4 *)&pack.prim_tri_verts[3 * prim_triangle_index]);
|
||||
pack.prim_tri_index[i] = 3 * prim_triangle_index;
|
||||
++prim_triangle_index;
|
||||
}
|
||||
else {
|
||||
pack.prim_tri_index[i] = -1;
|
||||
}
|
||||
pack.prim_visibility[i] = ob->visibility_for_tracing();
|
||||
}
|
||||
else {
|
||||
pack.prim_tri_index[i] = -1;
|
||||
pack.prim_visibility[i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Pack Instances */
|
||||
|
||||
void BVH2::pack_instances(size_t nodes_size, size_t leaf_nodes_size)
|
||||
{
|
||||
/* Adjust primitive index to point to the triangle in the global array, for
|
||||
* geometry with transform applied and already in the top level BVH.
|
||||
*/
|
||||
for (size_t i = 0; i < pack.prim_index.size(); i++) {
|
||||
if (pack.prim_index[i] != -1) {
|
||||
pack.prim_index[i] += objects[pack.prim_object[i]]->get_geometry()->prim_offset;
|
||||
}
|
||||
}
|
||||
|
||||
/* track offsets of instanced BVH data in global array */
|
||||
size_t prim_offset = pack.prim_index.size();
|
||||
size_t nodes_offset = nodes_size;
|
||||
size_t nodes_leaf_offset = leaf_nodes_size;
|
||||
|
||||
/* clear array that gives the node indexes for instanced objects */
|
||||
pack.object_node.clear();
|
||||
|
||||
/* reserve */
|
||||
size_t prim_index_size = pack.prim_index.size();
|
||||
size_t prim_tri_verts_size = pack.prim_tri_verts.size();
|
||||
|
||||
size_t pack_prim_index_offset = prim_index_size;
|
||||
size_t pack_prim_tri_verts_offset = prim_tri_verts_size;
|
||||
size_t pack_nodes_offset = nodes_size;
|
||||
size_t pack_leaf_nodes_offset = leaf_nodes_size;
|
||||
size_t object_offset = 0;
|
||||
|
||||
foreach (Geometry *geom, geometry) {
|
||||
BVH2 *bvh = static_cast<BVH2 *>(geom->bvh);
|
||||
|
||||
if (geom->need_build_bvh(params.bvh_layout)) {
|
||||
prim_index_size += bvh->pack.prim_index.size();
|
||||
prim_tri_verts_size += bvh->pack.prim_tri_verts.size();
|
||||
nodes_size += bvh->pack.nodes.size();
|
||||
leaf_nodes_size += bvh->pack.leaf_nodes.size();
|
||||
}
|
||||
}
|
||||
|
||||
pack.prim_index.resize(prim_index_size);
|
||||
pack.prim_type.resize(prim_index_size);
|
||||
pack.prim_object.resize(prim_index_size);
|
||||
pack.prim_visibility.resize(prim_index_size);
|
||||
pack.prim_tri_verts.resize(prim_tri_verts_size);
|
||||
pack.prim_tri_index.resize(prim_index_size);
|
||||
pack.nodes.resize(nodes_size);
|
||||
pack.leaf_nodes.resize(leaf_nodes_size);
|
||||
pack.object_node.resize(objects.size());
|
||||
|
||||
if (params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0) {
|
||||
pack.prim_time.resize(prim_index_size);
|
||||
}
|
||||
|
||||
int *pack_prim_index = (pack.prim_index.size()) ? &pack.prim_index[0] : NULL;
|
||||
int *pack_prim_type = (pack.prim_type.size()) ? &pack.prim_type[0] : NULL;
|
||||
int *pack_prim_object = (pack.prim_object.size()) ? &pack.prim_object[0] : NULL;
|
||||
uint *pack_prim_visibility = (pack.prim_visibility.size()) ? &pack.prim_visibility[0] : NULL;
|
||||
float4 *pack_prim_tri_verts = (pack.prim_tri_verts.size()) ? &pack.prim_tri_verts[0] : NULL;
|
||||
uint *pack_prim_tri_index = (pack.prim_tri_index.size()) ? &pack.prim_tri_index[0] : NULL;
|
||||
int4 *pack_nodes = (pack.nodes.size()) ? &pack.nodes[0] : NULL;
|
||||
int4 *pack_leaf_nodes = (pack.leaf_nodes.size()) ? &pack.leaf_nodes[0] : NULL;
|
||||
float2 *pack_prim_time = (pack.prim_time.size()) ? &pack.prim_time[0] : NULL;
|
||||
|
||||
unordered_map<Geometry *, int> geometry_map;
|
||||
|
||||
/* merge */
|
||||
foreach (Object *ob, objects) {
|
||||
Geometry *geom = ob->get_geometry();
|
||||
|
||||
/* We assume that if mesh doesn't need own BVH it was already included
|
||||
* into a top-level BVH and no packing here is needed.
|
||||
*/
|
||||
if (!geom->need_build_bvh(params.bvh_layout)) {
|
||||
pack.object_node[object_offset++] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* if mesh already added once, don't add it again, but used set
|
||||
* node offset for this object */
|
||||
unordered_map<Geometry *, int>::iterator it = geometry_map.find(geom);
|
||||
|
||||
if (geometry_map.find(geom) != geometry_map.end()) {
|
||||
int noffset = it->second;
|
||||
pack.object_node[object_offset++] = noffset;
|
||||
continue;
|
||||
}
|
||||
|
||||
BVH2 *bvh = static_cast<BVH2 *>(geom->bvh);
|
||||
|
||||
int noffset = nodes_offset;
|
||||
int noffset_leaf = nodes_leaf_offset;
|
||||
int geom_prim_offset = geom->prim_offset;
|
||||
|
||||
/* fill in node indexes for instances */
|
||||
if (bvh->pack.root_index == -1)
|
||||
pack.object_node[object_offset++] = -noffset_leaf - 1;
|
||||
else
|
||||
pack.object_node[object_offset++] = noffset;
|
||||
|
||||
geometry_map[geom] = pack.object_node[object_offset - 1];
|
||||
|
||||
/* merge primitive, object and triangle indexes */
|
||||
if (bvh->pack.prim_index.size()) {
|
||||
size_t bvh_prim_index_size = bvh->pack.prim_index.size();
|
||||
int *bvh_prim_index = &bvh->pack.prim_index[0];
|
||||
int *bvh_prim_type = &bvh->pack.prim_type[0];
|
||||
uint *bvh_prim_visibility = &bvh->pack.prim_visibility[0];
|
||||
uint *bvh_prim_tri_index = &bvh->pack.prim_tri_index[0];
|
||||
float2 *bvh_prim_time = bvh->pack.prim_time.size() ? &bvh->pack.prim_time[0] : NULL;
|
||||
|
||||
for (size_t i = 0; i < bvh_prim_index_size; i++) {
|
||||
if (bvh->pack.prim_type[i] & PRIMITIVE_ALL_CURVE) {
|
||||
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
|
||||
pack_prim_tri_index[pack_prim_index_offset] = -1;
|
||||
}
|
||||
else {
|
||||
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
|
||||
pack_prim_tri_index[pack_prim_index_offset] = bvh_prim_tri_index[i] +
|
||||
pack_prim_tri_verts_offset;
|
||||
}
|
||||
|
||||
pack_prim_type[pack_prim_index_offset] = bvh_prim_type[i];
|
||||
pack_prim_visibility[pack_prim_index_offset] = bvh_prim_visibility[i];
|
||||
pack_prim_object[pack_prim_index_offset] = 0; // unused for instances
|
||||
if (bvh_prim_time != NULL) {
|
||||
pack_prim_time[pack_prim_index_offset] = bvh_prim_time[i];
|
||||
}
|
||||
pack_prim_index_offset++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge triangle vertices data. */
|
||||
if (bvh->pack.prim_tri_verts.size()) {
|
||||
const size_t prim_tri_size = bvh->pack.prim_tri_verts.size();
|
||||
memcpy(pack_prim_tri_verts + pack_prim_tri_verts_offset,
|
||||
&bvh->pack.prim_tri_verts[0],
|
||||
prim_tri_size * sizeof(float4));
|
||||
pack_prim_tri_verts_offset += prim_tri_size;
|
||||
}
|
||||
|
||||
/* merge nodes */
|
||||
if (bvh->pack.leaf_nodes.size()) {
|
||||
int4 *leaf_nodes_offset = &bvh->pack.leaf_nodes[0];
|
||||
size_t leaf_nodes_offset_size = bvh->pack.leaf_nodes.size();
|
||||
for (size_t i = 0, j = 0; i < leaf_nodes_offset_size; i += BVH_NODE_LEAF_SIZE, j++) {
|
||||
int4 data = leaf_nodes_offset[i];
|
||||
data.x += prim_offset;
|
||||
data.y += prim_offset;
|
||||
pack_leaf_nodes[pack_leaf_nodes_offset] = data;
|
||||
for (int j = 1; j < BVH_NODE_LEAF_SIZE; ++j) {
|
||||
pack_leaf_nodes[pack_leaf_nodes_offset + j] = leaf_nodes_offset[i + j];
|
||||
}
|
||||
pack_leaf_nodes_offset += BVH_NODE_LEAF_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
if (bvh->pack.nodes.size()) {
|
||||
int4 *bvh_nodes = &bvh->pack.nodes[0];
|
||||
size_t bvh_nodes_size = bvh->pack.nodes.size();
|
||||
|
||||
for (size_t i = 0, j = 0; i < bvh_nodes_size; j++) {
|
||||
size_t nsize, nsize_bbox;
|
||||
if (bvh_nodes[i].x & PATH_RAY_NODE_UNALIGNED) {
|
||||
nsize = BVH_UNALIGNED_NODE_SIZE;
|
||||
nsize_bbox = 0;
|
||||
}
|
||||
else {
|
||||
nsize = BVH_NODE_SIZE;
|
||||
nsize_bbox = 0;
|
||||
}
|
||||
|
||||
memcpy(pack_nodes + pack_nodes_offset, bvh_nodes + i, nsize_bbox * sizeof(int4));
|
||||
|
||||
/* Modify offsets into arrays */
|
||||
int4 data = bvh_nodes[i + nsize_bbox];
|
||||
data.z += (data.z < 0) ? -noffset_leaf : noffset;
|
||||
data.w += (data.w < 0) ? -noffset_leaf : noffset;
|
||||
pack_nodes[pack_nodes_offset + nsize_bbox] = data;
|
||||
|
||||
/* Usually this copies nothing, but we better
|
||||
* be prepared for possible node size extension.
|
||||
*/
|
||||
memcpy(&pack_nodes[pack_nodes_offset + nsize_bbox + 1],
|
||||
&bvh_nodes[i + nsize_bbox + 1],
|
||||
sizeof(int4) * (nsize - (nsize_bbox + 1)));
|
||||
|
||||
pack_nodes_offset += nsize;
|
||||
i += nsize;
|
||||
}
|
||||
}
|
||||
|
||||
nodes_offset += bvh->pack.nodes.size();
|
||||
nodes_leaf_offset += bvh->pack.leaf_nodes.size();
|
||||
prim_offset += bvh->pack.prim_index.size();
|
||||
}
|
||||
}
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
@@ -26,30 +26,23 @@
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
class BVHNode;
|
||||
struct BVHStackEntry;
|
||||
class BVHParams;
|
||||
class BoundBox;
|
||||
class LeafNode;
|
||||
class Object;
|
||||
class Progress;
|
||||
|
||||
#define BVH_NODE_SIZE 4
|
||||
#define BVH_NODE_LEAF_SIZE 1
|
||||
#define BVH_UNALIGNED_NODE_SIZE 7
|
||||
|
||||
/* Pack Utility */
|
||||
struct BVHStackEntry {
|
||||
const BVHNode *node;
|
||||
int idx;
|
||||
|
||||
BVHStackEntry(const BVHNode *n = 0, int i = 0);
|
||||
int encodeIdx() const;
|
||||
};
|
||||
|
||||
/* BVH2
|
||||
*
|
||||
* Typical BVH with each node having two children.
|
||||
*/
|
||||
class BVH2 : public BVH {
|
||||
public:
|
||||
void build(Progress &progress, Stats *stats);
|
||||
void refit(Progress &progress);
|
||||
|
||||
PackedBVH pack;
|
||||
|
||||
protected:
|
||||
/* constructor */
|
||||
friend class BVH;
|
||||
@@ -58,10 +51,10 @@ class BVH2 : public BVH {
|
||||
const vector<Object *> &objects);
|
||||
|
||||
/* Building process. */
|
||||
virtual BVHNode *widen_children_nodes(const BVHNode *root);
|
||||
virtual BVHNode *widen_children_nodes(const BVHNode *root) override;
|
||||
|
||||
/* pack */
|
||||
void pack_nodes(const BVHNode *root);
|
||||
void pack_nodes(const BVHNode *root) override;
|
||||
|
||||
void pack_leaf(const BVHStackEntry &e, const LeafNode *leaf);
|
||||
void pack_inner(const BVHStackEntry &e, const BVHStackEntry &e0, const BVHStackEntry &e1);
|
||||
@@ -91,18 +84,8 @@ class BVH2 : public BVH {
|
||||
uint visibility1);
|
||||
|
||||
/* refit */
|
||||
void refit_nodes();
|
||||
void refit_nodes() override;
|
||||
void refit_node(int idx, bool leaf, BoundBox &bbox, uint &visibility);
|
||||
|
||||
/* Refit range of primitives. */
|
||||
void refit_primitives(int start, int end, BoundBox &bbox, uint &visibility);
|
||||
|
||||
/* triangles and strands */
|
||||
void pack_primitives();
|
||||
void pack_triangle(int idx, float4 storage[3]);
|
||||
|
||||
/* merge instance BVH's */
|
||||
void pack_instances(size_t nodes_size, size_t leaf_nodes_size);
|
||||
};
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
@@ -298,31 +298,82 @@ static bool rtc_progress_func(void *user_ptr, const double n)
|
||||
return !progress->get_cancel();
|
||||
}
|
||||
|
||||
static size_t count_primitives(Geometry *geom)
|
||||
{
|
||||
if (geom->geometry_type == Geometry::MESH || geom->geometry_type == Geometry::VOLUME) {
|
||||
Mesh *mesh = static_cast<Mesh *>(geom);
|
||||
return mesh->num_triangles();
|
||||
}
|
||||
else if (geom->geometry_type == Geometry::HAIR) {
|
||||
Hair *hair = static_cast<Hair *>(geom);
|
||||
return hair->num_segments();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
BVHEmbree::BVHEmbree(const BVHParams ¶ms_,
|
||||
const vector<Geometry *> &geometry_,
|
||||
const vector<Object *> &objects_)
|
||||
const vector<Object *> &objects_,
|
||||
const Device *device)
|
||||
: BVH(params_, geometry_, objects_),
|
||||
scene(NULL),
|
||||
rtc_device(NULL),
|
||||
build_quality(RTC_BUILD_QUALITY_REFIT)
|
||||
mem_used(0),
|
||||
top_level(NULL),
|
||||
rtc_device((RTCDevice)device->bvh_device()),
|
||||
stats(NULL),
|
||||
curve_subdivisions(params.curve_subdivisions),
|
||||
build_quality(RTC_BUILD_QUALITY_REFIT),
|
||||
dynamic_scene(true)
|
||||
{
|
||||
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
|
||||
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
|
||||
|
||||
rtcSetDeviceErrorFunction(rtc_device, rtc_error_func, NULL);
|
||||
|
||||
pack.root_index = -1;
|
||||
}
|
||||
|
||||
BVHEmbree::~BVHEmbree()
|
||||
{
|
||||
if (scene) {
|
||||
rtcReleaseScene(scene);
|
||||
if (!params.top_level) {
|
||||
destroy(scene);
|
||||
}
|
||||
}
|
||||
|
||||
void BVHEmbree::build(Progress &progress, Stats *stats, RTCDevice rtc_device_)
|
||||
void BVHEmbree::destroy(RTCScene scene)
|
||||
{
|
||||
rtc_device = rtc_device_;
|
||||
assert(rtc_device);
|
||||
if (scene) {
|
||||
rtcReleaseScene(scene);
|
||||
scene = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
rtcSetDeviceErrorFunction(rtc_device, rtc_error_func, NULL);
|
||||
void BVHEmbree::delete_rtcScene()
|
||||
{
|
||||
if (scene) {
|
||||
/* When this BVH is used as an instance in a top level BVH, don't delete now
|
||||
* Let the top_level BVH know that it should delete it later. */
|
||||
if (top_level) {
|
||||
top_level->add_delayed_delete_scene(scene);
|
||||
}
|
||||
else {
|
||||
rtcReleaseScene(scene);
|
||||
if (delayed_delete_scenes.size()) {
|
||||
foreach (RTCScene s, delayed_delete_scenes) {
|
||||
rtcReleaseScene(s);
|
||||
}
|
||||
}
|
||||
delayed_delete_scenes.clear();
|
||||
}
|
||||
scene = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void BVHEmbree::build(Progress &progress, Stats *stats_)
|
||||
{
|
||||
assert(rtc_device);
|
||||
stats = stats_;
|
||||
rtcSetDeviceMemoryMonitorFunction(rtc_device, rtc_memory_monitor_func, stats);
|
||||
|
||||
progress.set_substatus("Building BVH");
|
||||
@@ -343,7 +394,35 @@ void BVHEmbree::build(Progress &progress, Stats *stats, RTCDevice rtc_device_)
|
||||
RTC_BUILD_QUALITY_MEDIUM);
|
||||
rtcSetSceneBuildQuality(scene, build_quality);
|
||||
|
||||
/* Count triangles and curves first, reserve arrays once. */
|
||||
size_t prim_count = 0;
|
||||
|
||||
foreach (Object *ob, objects) {
|
||||
if (params.top_level) {
|
||||
if (!ob->is_traceable()) {
|
||||
continue;
|
||||
}
|
||||
if (!ob->get_geometry()->is_instanced()) {
|
||||
prim_count += count_primitives(ob->get_geometry());
|
||||
}
|
||||
else {
|
||||
++prim_count;
|
||||
}
|
||||
}
|
||||
else {
|
||||
prim_count += count_primitives(ob->get_geometry());
|
||||
}
|
||||
}
|
||||
|
||||
pack.prim_object.reserve(prim_count);
|
||||
pack.prim_type.reserve(prim_count);
|
||||
pack.prim_index.reserve(prim_count);
|
||||
pack.prim_tri_index.reserve(prim_count);
|
||||
|
||||
int i = 0;
|
||||
|
||||
pack.object_node.clear();
|
||||
|
||||
foreach (Object *ob, objects) {
|
||||
if (params.top_level) {
|
||||
if (!ob->is_traceable()) {
|
||||
@@ -366,11 +445,37 @@ void BVHEmbree::build(Progress &progress, Stats *stats, RTCDevice rtc_device_)
|
||||
}
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
delete_rtcScene();
|
||||
stats = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
rtcSetSceneProgressMonitorFunction(scene, rtc_progress_func, &progress);
|
||||
rtcCommitScene(scene);
|
||||
|
||||
pack_primitives();
|
||||
|
||||
if (progress.get_cancel()) {
|
||||
delete_rtcScene();
|
||||
stats = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
progress.set_substatus("Packing geometry");
|
||||
pack_nodes(NULL);
|
||||
|
||||
stats = NULL;
|
||||
}
|
||||
|
||||
void BVHEmbree::copy_to_device(Progress & /*progress*/, DeviceScene *dscene)
|
||||
{
|
||||
dscene->data.bvh.scene = scene;
|
||||
}
|
||||
|
||||
BVHNode *BVHEmbree::widen_children_nodes(const BVHNode * /*root*/)
|
||||
{
|
||||
assert(!"Must not be called.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void BVHEmbree::add_object(Object *ob, int i)
|
||||
@@ -393,8 +498,15 @@ void BVHEmbree::add_object(Object *ob, int i)
|
||||
|
||||
void BVHEmbree::add_instance(Object *ob, int i)
|
||||
{
|
||||
if (!ob || !ob->get_geometry()) {
|
||||
assert(0);
|
||||
return;
|
||||
}
|
||||
BVHEmbree *instance_bvh = (BVHEmbree *)(ob->get_geometry()->bvh);
|
||||
assert(instance_bvh != NULL);
|
||||
|
||||
if (instance_bvh->top_level != this) {
|
||||
instance_bvh->top_level = this;
|
||||
}
|
||||
|
||||
const size_t num_object_motion_steps = ob->use_motion() ? ob->get_motion().size() : 1;
|
||||
const size_t num_motion_steps = min(num_object_motion_steps, RTC_MAX_TIME_STEP_COUNT);
|
||||
@@ -426,6 +538,11 @@ void BVHEmbree::add_instance(Object *ob, int i)
|
||||
geom_id, 0, RTC_FORMAT_FLOAT3X4_ROW_MAJOR, (const float *)&ob->get_tfm());
|
||||
}
|
||||
|
||||
pack.prim_index.push_back_slow(-1);
|
||||
pack.prim_object.push_back_slow(i);
|
||||
pack.prim_type.push_back_slow(PRIMITIVE_NONE);
|
||||
pack.prim_tri_index.push_back_slow(-1);
|
||||
|
||||
rtcSetGeometryUserData(geom_id, (void *)instance_bvh->scene);
|
||||
rtcSetGeometryMask(geom_id, ob->visibility_for_tracing());
|
||||
|
||||
@@ -436,22 +553,20 @@ void BVHEmbree::add_instance(Object *ob, int i)
|
||||
|
||||
void BVHEmbree::add_triangles(const Object *ob, const Mesh *mesh, int i)
|
||||
{
|
||||
size_t prim_offset = mesh->optix_prim_offset;
|
||||
|
||||
size_t prim_offset = pack.prim_index.size();
|
||||
const Attribute *attr_mP = NULL;
|
||||
size_t num_motion_steps = 1;
|
||||
size_t num_geometry_motion_steps = 1;
|
||||
if (mesh->has_motion_blur()) {
|
||||
attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
if (attr_mP) {
|
||||
num_motion_steps = mesh->get_motion_steps();
|
||||
num_geometry_motion_steps = mesh->get_motion_steps();
|
||||
}
|
||||
}
|
||||
|
||||
assert(num_motion_steps <= RTC_MAX_TIME_STEP_COUNT);
|
||||
num_motion_steps = min(num_motion_steps, RTC_MAX_TIME_STEP_COUNT);
|
||||
const size_t num_motion_steps = min(num_geometry_motion_steps, RTC_MAX_TIME_STEP_COUNT);
|
||||
assert(num_geometry_motion_steps <= RTC_MAX_TIME_STEP_COUNT);
|
||||
|
||||
const size_t num_triangles = mesh->num_triangles();
|
||||
|
||||
RTCGeometry geom_id = rtcNewGeometry(rtc_device, RTC_GEOMETRY_TYPE_TRIANGLE);
|
||||
rtcSetGeometryBuildQuality(geom_id, build_quality);
|
||||
rtcSetGeometryTimeStepCount(geom_id, num_motion_steps);
|
||||
@@ -473,6 +588,22 @@ void BVHEmbree::add_triangles(const Object *ob, const Mesh *mesh, int i)
|
||||
|
||||
set_tri_vertex_buffer(geom_id, mesh, false);
|
||||
|
||||
size_t prim_object_size = pack.prim_object.size();
|
||||
pack.prim_object.resize(prim_object_size + num_triangles);
|
||||
size_t prim_type_size = pack.prim_type.size();
|
||||
pack.prim_type.resize(prim_type_size + num_triangles);
|
||||
size_t prim_index_size = pack.prim_index.size();
|
||||
pack.prim_index.resize(prim_index_size + num_triangles);
|
||||
pack.prim_tri_index.resize(prim_index_size + num_triangles);
|
||||
int prim_type = (num_motion_steps > 1 ? PRIMITIVE_MOTION_TRIANGLE : PRIMITIVE_TRIANGLE);
|
||||
|
||||
for (size_t j = 0; j < num_triangles; ++j) {
|
||||
pack.prim_object[prim_object_size + j] = i;
|
||||
pack.prim_type[prim_type_size + j] = prim_type;
|
||||
pack.prim_index[prim_index_size + j] = j;
|
||||
pack.prim_tri_index[prim_index_size + j] = j;
|
||||
}
|
||||
|
||||
rtcSetGeometryUserData(geom_id, (void *)prim_offset);
|
||||
rtcSetGeometryOccludedFilterFunction(geom_id, rtc_filter_occluded_func);
|
||||
rtcSetGeometryMask(geom_id, ob->visibility_for_tracing());
|
||||
@@ -498,12 +629,12 @@ void BVHEmbree::set_tri_vertex_buffer(RTCGeometry geom_id, const Mesh *mesh, con
|
||||
}
|
||||
}
|
||||
}
|
||||
const size_t num_verts = mesh->get_verts().size();
|
||||
const size_t num_verts = mesh->verts.size();
|
||||
|
||||
for (int t = 0; t < num_motion_steps; ++t) {
|
||||
const float3 *verts;
|
||||
if (t == t_mid) {
|
||||
verts = mesh->get_verts().data();
|
||||
verts = &mesh->verts[0];
|
||||
}
|
||||
else {
|
||||
int t_ = (t > t_mid) ? (t - 1) : t;
|
||||
@@ -605,19 +736,24 @@ void BVHEmbree::set_curve_vertex_buffer(RTCGeometry geom_id, const Hair *hair, c
|
||||
|
||||
void BVHEmbree::add_curves(const Object *ob, const Hair *hair, int i)
|
||||
{
|
||||
size_t prim_offset = hair->optix_prim_offset;
|
||||
|
||||
size_t prim_offset = pack.prim_index.size();
|
||||
const Attribute *attr_mP = NULL;
|
||||
size_t num_motion_steps = 1;
|
||||
size_t num_geometry_motion_steps = 1;
|
||||
if (hair->has_motion_blur()) {
|
||||
attr_mP = hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
if (attr_mP) {
|
||||
num_motion_steps = hair->get_motion_steps();
|
||||
num_geometry_motion_steps = hair->get_motion_steps();
|
||||
}
|
||||
}
|
||||
|
||||
assert(num_motion_steps <= RTC_MAX_TIME_STEP_COUNT);
|
||||
num_motion_steps = min(num_motion_steps, RTC_MAX_TIME_STEP_COUNT);
|
||||
const size_t num_motion_steps = min(num_geometry_motion_steps, RTC_MAX_TIME_STEP_COUNT);
|
||||
const PrimitiveType primitive_type =
|
||||
(num_motion_steps > 1) ?
|
||||
((hair->curve_shape == CURVE_RIBBON) ? PRIMITIVE_MOTION_CURVE_RIBBON :
|
||||
PRIMITIVE_MOTION_CURVE_THICK) :
|
||||
((hair->curve_shape == CURVE_RIBBON) ? PRIMITIVE_CURVE_RIBBON : PRIMITIVE_CURVE_THICK);
|
||||
|
||||
assert(num_geometry_motion_steps <= RTC_MAX_TIME_STEP_COUNT);
|
||||
|
||||
const size_t num_curves = hair->num_curves();
|
||||
size_t num_segments = 0;
|
||||
@@ -627,12 +763,22 @@ void BVHEmbree::add_curves(const Object *ob, const Hair *hair, int i)
|
||||
num_segments += c.num_segments();
|
||||
}
|
||||
|
||||
/* Make room for Cycles specific data. */
|
||||
size_t prim_object_size = pack.prim_object.size();
|
||||
pack.prim_object.resize(prim_object_size + num_segments);
|
||||
size_t prim_type_size = pack.prim_type.size();
|
||||
pack.prim_type.resize(prim_type_size + num_segments);
|
||||
size_t prim_index_size = pack.prim_index.size();
|
||||
pack.prim_index.resize(prim_index_size + num_segments);
|
||||
size_t prim_tri_index_size = pack.prim_index.size();
|
||||
pack.prim_tri_index.resize(prim_tri_index_size + num_segments);
|
||||
|
||||
enum RTCGeometryType type = (hair->curve_shape == CURVE_RIBBON ?
|
||||
RTC_GEOMETRY_TYPE_FLAT_CATMULL_ROM_CURVE :
|
||||
RTC_GEOMETRY_TYPE_ROUND_CATMULL_ROM_CURVE);
|
||||
|
||||
RTCGeometry geom_id = rtcNewGeometry(rtc_device, type);
|
||||
rtcSetGeometryTessellationRate(geom_id, params.curve_subdivisions + 1);
|
||||
rtcSetGeometryTessellationRate(geom_id, curve_subdivisions + 1);
|
||||
unsigned *rtc_indices = (unsigned *)rtcSetNewGeometryBuffer(
|
||||
geom_id, RTC_BUFFER_TYPE_INDEX, 0, RTC_FORMAT_UINT, sizeof(int), num_segments);
|
||||
size_t rtc_index = 0;
|
||||
@@ -642,6 +788,11 @@ void BVHEmbree::add_curves(const Object *ob, const Hair *hair, int i)
|
||||
rtc_indices[rtc_index] = c.first_key + k;
|
||||
/* Room for extra CVs at Catmull-Rom splines. */
|
||||
rtc_indices[rtc_index] += j * 2;
|
||||
/* Cycles specific data. */
|
||||
pack.prim_object[prim_object_size + rtc_index] = i;
|
||||
pack.prim_type[prim_type_size + rtc_index] = (PRIMITIVE_PACK_SEGMENT(primitive_type, k));
|
||||
pack.prim_index[prim_index_size + rtc_index] = j;
|
||||
pack.prim_tri_index[prim_tri_index_size + rtc_index] = rtc_index;
|
||||
|
||||
++rtc_index;
|
||||
}
|
||||
@@ -667,10 +818,134 @@ void BVHEmbree::add_curves(const Object *ob, const Hair *hair, int i)
|
||||
rtcReleaseGeometry(geom_id);
|
||||
}
|
||||
|
||||
void BVHEmbree::refit(Progress &progress)
|
||||
void BVHEmbree::pack_nodes(const BVHNode *)
|
||||
{
|
||||
progress.set_substatus("Refitting BVH nodes");
|
||||
/* Quite a bit of this code is for compatibility with Cycles' native BVH. */
|
||||
if (!params.top_level) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < pack.prim_index.size(); ++i) {
|
||||
if (pack.prim_index[i] != -1) {
|
||||
pack.prim_index[i] += objects[pack.prim_object[i]]->get_geometry()->prim_offset;
|
||||
}
|
||||
}
|
||||
|
||||
size_t prim_offset = pack.prim_index.size();
|
||||
|
||||
/* reserve */
|
||||
size_t prim_index_size = pack.prim_index.size();
|
||||
size_t prim_tri_verts_size = pack.prim_tri_verts.size();
|
||||
|
||||
size_t pack_prim_index_offset = prim_index_size;
|
||||
size_t pack_prim_tri_verts_offset = prim_tri_verts_size;
|
||||
size_t object_offset = 0;
|
||||
|
||||
map<Geometry *, int> geometry_map;
|
||||
|
||||
foreach (Object *ob, objects) {
|
||||
Geometry *geom = ob->get_geometry();
|
||||
BVH *bvh = geom->bvh;
|
||||
|
||||
if (geom->need_build_bvh(BVH_LAYOUT_EMBREE)) {
|
||||
if (geometry_map.find(geom) == geometry_map.end()) {
|
||||
prim_index_size += bvh->pack.prim_index.size();
|
||||
prim_tri_verts_size += bvh->pack.prim_tri_verts.size();
|
||||
geometry_map[geom] = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
geometry_map.clear();
|
||||
|
||||
pack.prim_index.resize(prim_index_size);
|
||||
pack.prim_type.resize(prim_index_size);
|
||||
pack.prim_object.resize(prim_index_size);
|
||||
pack.prim_visibility.clear();
|
||||
pack.prim_tri_verts.resize(prim_tri_verts_size);
|
||||
pack.prim_tri_index.resize(prim_index_size);
|
||||
pack.object_node.resize(objects.size());
|
||||
|
||||
int *pack_prim_index = (pack.prim_index.size()) ? &pack.prim_index[0] : NULL;
|
||||
int *pack_prim_type = (pack.prim_type.size()) ? &pack.prim_type[0] : NULL;
|
||||
int *pack_prim_object = (pack.prim_object.size()) ? &pack.prim_object[0] : NULL;
|
||||
float4 *pack_prim_tri_verts = (pack.prim_tri_verts.size()) ? &pack.prim_tri_verts[0] : NULL;
|
||||
uint *pack_prim_tri_index = (pack.prim_tri_index.size()) ? &pack.prim_tri_index[0] : NULL;
|
||||
|
||||
/* merge */
|
||||
foreach (Object *ob, objects) {
|
||||
Geometry *geom = ob->get_geometry();
|
||||
|
||||
/* We assume that if mesh doesn't need own BVH it was already included
|
||||
* into a top-level BVH and no packing here is needed.
|
||||
*/
|
||||
if (!geom->need_build_bvh(BVH_LAYOUT_EMBREE)) {
|
||||
pack.object_node[object_offset++] = prim_offset;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* if geom already added once, don't add it again, but used set
|
||||
* node offset for this object */
|
||||
map<Geometry *, int>::iterator it = geometry_map.find(geom);
|
||||
|
||||
if (geometry_map.find(geom) != geometry_map.end()) {
|
||||
int noffset = it->second;
|
||||
pack.object_node[object_offset++] = noffset;
|
||||
continue;
|
||||
}
|
||||
|
||||
BVHEmbree *bvh = (BVHEmbree *)geom->bvh;
|
||||
|
||||
rtc_memory_monitor_func(stats, unaccounted_mem, true);
|
||||
unaccounted_mem = 0;
|
||||
|
||||
int geom_prim_offset = geom->prim_offset;
|
||||
|
||||
/* fill in node indexes for instances */
|
||||
pack.object_node[object_offset++] = prim_offset;
|
||||
|
||||
geometry_map[geom] = pack.object_node[object_offset - 1];
|
||||
|
||||
/* merge primitive, object and triangle indexes */
|
||||
if (bvh->pack.prim_index.size()) {
|
||||
size_t bvh_prim_index_size = bvh->pack.prim_index.size();
|
||||
int *bvh_prim_index = &bvh->pack.prim_index[0];
|
||||
int *bvh_prim_type = &bvh->pack.prim_type[0];
|
||||
uint *bvh_prim_tri_index = &bvh->pack.prim_tri_index[0];
|
||||
|
||||
for (size_t i = 0; i < bvh_prim_index_size; ++i) {
|
||||
if (bvh->pack.prim_type[i] & PRIMITIVE_ALL_CURVE) {
|
||||
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
|
||||
pack_prim_tri_index[pack_prim_index_offset] = -1;
|
||||
}
|
||||
else {
|
||||
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
|
||||
pack_prim_tri_index[pack_prim_index_offset] = bvh_prim_tri_index[i] +
|
||||
pack_prim_tri_verts_offset;
|
||||
}
|
||||
|
||||
pack_prim_type[pack_prim_index_offset] = bvh_prim_type[i];
|
||||
pack_prim_object[pack_prim_index_offset] = 0;
|
||||
|
||||
++pack_prim_index_offset;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge triangle vertices data. */
|
||||
if (bvh->pack.prim_tri_verts.size()) {
|
||||
const size_t prim_tri_size = bvh->pack.prim_tri_verts.size();
|
||||
memcpy(pack_prim_tri_verts + pack_prim_tri_verts_offset,
|
||||
&bvh->pack.prim_tri_verts[0],
|
||||
prim_tri_size * sizeof(float4));
|
||||
pack_prim_tri_verts_offset += prim_tri_size;
|
||||
}
|
||||
|
||||
prim_offset += bvh->pack.prim_index.size();
|
||||
}
|
||||
}
|
||||
|
||||
void BVHEmbree::refit_nodes()
|
||||
{
|
||||
/* Update all vertex buffers, then tell Embree to rebuild/-fit the BVHs. */
|
||||
unsigned geom_id = 0;
|
||||
foreach (Object *ob, objects) {
|
||||
@@ -682,7 +957,6 @@ void BVHEmbree::refit(Progress &progress)
|
||||
if (mesh->num_triangles() > 0) {
|
||||
RTCGeometry geom = rtcGetGeometry(scene, geom_id);
|
||||
set_tri_vertex_buffer(geom, mesh, true);
|
||||
rtcSetGeometryUserData(geom, (void *)mesh->optix_prim_offset);
|
||||
rtcCommitGeometry(geom);
|
||||
}
|
||||
}
|
||||
@@ -691,17 +965,14 @@ void BVHEmbree::refit(Progress &progress)
|
||||
if (hair->num_curves() > 0) {
|
||||
RTCGeometry geom = rtcGetGeometry(scene, geom_id + 1);
|
||||
set_curve_vertex_buffer(geom, hair, true);
|
||||
rtcSetGeometryUserData(geom, (void *)hair->optix_prim_offset);
|
||||
rtcCommitGeometry(geom);
|
||||
}
|
||||
}
|
||||
}
|
||||
geom_id += 2;
|
||||
}
|
||||
|
||||
rtcCommitScene(scene);
|
||||
}
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
||||
#endif /* WITH_EMBREE */
|
||||
|
@@ -31,34 +31,56 @@
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
class Geometry;
|
||||
class Hair;
|
||||
class Mesh;
|
||||
|
||||
class BVHEmbree : public BVH {
|
||||
public:
|
||||
void build(Progress &progress, Stats *stats, RTCDevice rtc_device);
|
||||
void refit(Progress &progress);
|
||||
|
||||
virtual void build(Progress &progress, Stats *stats) override;
|
||||
virtual void copy_to_device(Progress &progress, DeviceScene *dscene) override;
|
||||
virtual ~BVHEmbree();
|
||||
RTCScene scene;
|
||||
static void destroy(RTCScene);
|
||||
|
||||
/* Building process. */
|
||||
virtual BVHNode *widen_children_nodes(const BVHNode *root) override;
|
||||
|
||||
protected:
|
||||
friend class BVH;
|
||||
BVHEmbree(const BVHParams ¶ms,
|
||||
const vector<Geometry *> &geometry,
|
||||
const vector<Object *> &objects);
|
||||
virtual ~BVHEmbree();
|
||||
const vector<Object *> &objects,
|
||||
const Device *device);
|
||||
|
||||
virtual void pack_nodes(const BVHNode *) override;
|
||||
virtual void refit_nodes() override;
|
||||
|
||||
void add_object(Object *ob, int i);
|
||||
void add_instance(Object *ob, int i);
|
||||
void add_curves(const Object *ob, const Hair *hair, int i);
|
||||
void add_triangles(const Object *ob, const Mesh *mesh, int i);
|
||||
|
||||
ssize_t mem_used;
|
||||
|
||||
void add_delayed_delete_scene(RTCScene scene)
|
||||
{
|
||||
delayed_delete_scenes.push_back(scene);
|
||||
}
|
||||
BVHEmbree *top_level;
|
||||
|
||||
private:
|
||||
void delete_rtcScene();
|
||||
void set_tri_vertex_buffer(RTCGeometry geom_id, const Mesh *mesh, const bool update);
|
||||
void set_curve_vertex_buffer(RTCGeometry geom_id, const Hair *hair, const bool update);
|
||||
|
||||
RTCDevice rtc_device;
|
||||
|
||||
Stats *stats;
|
||||
vector<RTCScene> delayed_delete_scenes;
|
||||
int curve_subdivisions;
|
||||
enum RTCBuildQuality build_quality;
|
||||
bool dynamic_scene;
|
||||
};
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020, Blender Foundation.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "bvh/bvh_multi.h"
|
||||
|
||||
#include "util/util_foreach.h"
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
BVHMulti::BVHMulti(const BVHParams ¶ms_,
|
||||
const vector<Geometry *> &geometry_,
|
||||
const vector<Object *> &objects_)
|
||||
: BVH(params_, geometry_, objects_)
|
||||
{
|
||||
}
|
||||
|
||||
BVHMulti::~BVHMulti()
|
||||
{
|
||||
foreach (BVH *bvh, sub_bvhs) {
|
||||
delete bvh;
|
||||
}
|
||||
}
|
||||
|
||||
CCL_NAMESPACE_END
|
@@ -1,39 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020, Blender Foundation.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef __BVH_MULTI_H__
|
||||
#define __BVH_MULTI_H__
|
||||
|
||||
#include "bvh/bvh.h"
|
||||
#include "bvh/bvh_params.h"
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
class BVHMulti : public BVH {
|
||||
public:
|
||||
vector<BVH *> sub_bvhs;
|
||||
|
||||
protected:
|
||||
friend class BVH;
|
||||
BVHMulti(const BVHParams ¶ms,
|
||||
const vector<Geometry *> &geometry,
|
||||
const vector<Object *> &objects);
|
||||
virtual ~BVHMulti();
|
||||
};
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
||||
#endif /* __BVH_MULTI_H__ */
|
@@ -19,22 +19,212 @@
|
||||
|
||||
# include "bvh/bvh_optix.h"
|
||||
|
||||
# include "device/device.h"
|
||||
|
||||
# include "render/geometry.h"
|
||||
# include "render/hair.h"
|
||||
# include "render/mesh.h"
|
||||
# include "render/object.h"
|
||||
|
||||
# include "util/util_foreach.h"
|
||||
# include "util/util_logging.h"
|
||||
# include "util/util_progress.h"
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
BVHOptiX::BVHOptiX(const BVHParams ¶ms_,
|
||||
const vector<Geometry *> &geometry_,
|
||||
const vector<Object *> &objects_,
|
||||
Device *device)
|
||||
: BVH(params_, geometry_, objects_),
|
||||
traversable_handle(0),
|
||||
as_data(device, params_.top_level ? "optix tlas" : "optix blas"),
|
||||
motion_transform_data(device, "optix motion transform")
|
||||
const vector<Object *> &objects_)
|
||||
: BVH(params_, geometry_, objects_)
|
||||
{
|
||||
optix_handle = 0;
|
||||
optix_data_handle = 0;
|
||||
do_refit = false;
|
||||
}
|
||||
|
||||
BVHOptiX::~BVHOptiX()
|
||||
{
|
||||
// Acceleration structure memory is freed via the 'as_data' destructor
|
||||
}
|
||||
|
||||
void BVHOptiX::build(Progress &, Stats *)
|
||||
{
|
||||
if (params.top_level)
|
||||
pack_tlas();
|
||||
else
|
||||
pack_blas();
|
||||
}
|
||||
|
||||
void BVHOptiX::copy_to_device(Progress &progress, DeviceScene *dscene)
|
||||
{
|
||||
progress.set_status("Updating Scene BVH", "Building OptiX acceleration structure");
|
||||
|
||||
Device *const device = dscene->bvh_nodes.device;
|
||||
if (!device->build_optix_bvh(this))
|
||||
progress.set_error("Failed to build OptiX acceleration structure");
|
||||
}
|
||||
|
||||
void BVHOptiX::pack_blas()
|
||||
{
|
||||
// Bottom-level BVH can contain multiple primitive types, so merge them:
|
||||
assert(geometry.size() == 1 && objects.size() == 1); // These are built per-mesh
|
||||
Geometry *const geom = geometry[0];
|
||||
|
||||
if (geom->geometry_type == Geometry::HAIR) {
|
||||
Hair *const hair = static_cast<Hair *const>(geom);
|
||||
if (hair->num_curves() > 0) {
|
||||
const size_t num_curves = hair->num_curves();
|
||||
const size_t num_segments = hair->num_segments();
|
||||
pack.prim_type.reserve(pack.prim_type.size() + num_segments);
|
||||
pack.prim_index.reserve(pack.prim_index.size() + num_segments);
|
||||
pack.prim_object.reserve(pack.prim_object.size() + num_segments);
|
||||
// 'pack.prim_time' is only used in geom_curve_intersect.h
|
||||
// It is not needed because of OPTIX_MOTION_FLAG_[START|END]_VANISH
|
||||
|
||||
uint type = (hair->get_use_motion_blur() &&
|
||||
hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION)) ?
|
||||
((hair->curve_shape == CURVE_RIBBON) ? PRIMITIVE_MOTION_CURVE_RIBBON :
|
||||
PRIMITIVE_MOTION_CURVE_THICK) :
|
||||
((hair->curve_shape == CURVE_RIBBON) ? PRIMITIVE_CURVE_RIBBON :
|
||||
PRIMITIVE_CURVE_THICK);
|
||||
|
||||
for (size_t j = 0; j < num_curves; ++j) {
|
||||
const Hair::Curve curve = hair->get_curve(j);
|
||||
for (size_t k = 0; k < curve.num_segments(); ++k) {
|
||||
pack.prim_type.push_back_reserved(PRIMITIVE_PACK_SEGMENT(type, k));
|
||||
// Each curve segment points back to its curve index
|
||||
pack.prim_index.push_back_reserved(j);
|
||||
pack.prim_object.push_back_reserved(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (geom->geometry_type == Geometry::MESH || geom->geometry_type == Geometry::VOLUME) {
|
||||
Mesh *const mesh = static_cast<Mesh *const>(geom);
|
||||
if (mesh->num_triangles() > 0) {
|
||||
const size_t num_triangles = mesh->num_triangles();
|
||||
pack.prim_type.reserve(pack.prim_type.size() + num_triangles);
|
||||
pack.prim_index.reserve(pack.prim_index.size() + num_triangles);
|
||||
pack.prim_object.reserve(pack.prim_object.size() + num_triangles);
|
||||
|
||||
uint type = PRIMITIVE_TRIANGLE;
|
||||
if (mesh->get_use_motion_blur() && mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION))
|
||||
type = PRIMITIVE_MOTION_TRIANGLE;
|
||||
|
||||
for (size_t k = 0; k < num_triangles; ++k) {
|
||||
pack.prim_type.push_back_reserved(type);
|
||||
pack.prim_index.push_back_reserved(k);
|
||||
pack.prim_object.push_back_reserved(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize visibility to zero and later update it during top-level build
|
||||
uint prev_visibility = objects[0]->get_visibility();
|
||||
objects[0]->set_visibility(0);
|
||||
|
||||
// Update 'pack.prim_tri_index', 'pack.prim_tri_verts' and 'pack.prim_visibility'
|
||||
pack_primitives();
|
||||
|
||||
// Reset visibility after packing
|
||||
objects[0]->set_visibility(prev_visibility);
|
||||
}
|
||||
|
||||
void BVHOptiX::pack_tlas()
|
||||
{
|
||||
// Calculate total packed size
|
||||
size_t prim_index_size = 0;
|
||||
size_t prim_tri_verts_size = 0;
|
||||
foreach (Geometry *geom, geometry) {
|
||||
BVH *const bvh = geom->bvh;
|
||||
prim_index_size += bvh->pack.prim_index.size();
|
||||
prim_tri_verts_size += bvh->pack.prim_tri_verts.size();
|
||||
}
|
||||
|
||||
if (prim_index_size == 0)
|
||||
return; // Abort right away if this is an empty BVH
|
||||
|
||||
size_t pack_offset = 0;
|
||||
size_t pack_verts_offset = 0;
|
||||
|
||||
pack.prim_type.resize(prim_index_size);
|
||||
int *pack_prim_type = pack.prim_type.data();
|
||||
pack.prim_index.resize(prim_index_size);
|
||||
int *pack_prim_index = pack.prim_index.data();
|
||||
pack.prim_object.resize(prim_index_size);
|
||||
int *pack_prim_object = pack.prim_object.data();
|
||||
pack.prim_visibility.resize(prim_index_size);
|
||||
uint *pack_prim_visibility = pack.prim_visibility.data();
|
||||
pack.prim_tri_index.resize(prim_index_size);
|
||||
uint *pack_prim_tri_index = pack.prim_tri_index.data();
|
||||
pack.prim_tri_verts.resize(prim_tri_verts_size);
|
||||
float4 *pack_prim_tri_verts = pack.prim_tri_verts.data();
|
||||
|
||||
// Top-level BVH should only contain instances, see 'Geometry::need_build_bvh'
|
||||
// Iterate over scene mesh list instead of objects, since the 'prim_offset' is calculated based
|
||||
// on that list, which may be ordered differently from the object list.
|
||||
foreach (Geometry *geom, geometry) {
|
||||
PackedBVH &bvh_pack = geom->bvh->pack;
|
||||
int geom_prim_offset = geom->prim_offset;
|
||||
|
||||
// Merge visibility flags of all objects and fix object indices for non-instanced geometry
|
||||
int object_index = 0; // Unused for instanced geometry
|
||||
int object_visibility = 0;
|
||||
foreach (Object *ob, objects) {
|
||||
if (ob->get_geometry() == geom) {
|
||||
object_visibility |= ob->visibility_for_tracing();
|
||||
if (!geom->is_instanced()) {
|
||||
object_index = ob->get_device_index();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Merge primitive, object and triangle indexes
|
||||
if (!bvh_pack.prim_index.empty()) {
|
||||
int *bvh_prim_type = &bvh_pack.prim_type[0];
|
||||
int *bvh_prim_index = &bvh_pack.prim_index[0];
|
||||
uint *bvh_prim_tri_index = &bvh_pack.prim_tri_index[0];
|
||||
uint *bvh_prim_visibility = &bvh_pack.prim_visibility[0];
|
||||
|
||||
for (size_t i = 0; i < bvh_pack.prim_index.size(); i++, pack_offset++) {
|
||||
if (bvh_pack.prim_type[i] & PRIMITIVE_ALL_CURVE) {
|
||||
pack_prim_index[pack_offset] = bvh_prim_index[i] + geom_prim_offset;
|
||||
pack_prim_tri_index[pack_offset] = -1;
|
||||
}
|
||||
else {
|
||||
pack_prim_index[pack_offset] = bvh_prim_index[i] + geom_prim_offset;
|
||||
pack_prim_tri_index[pack_offset] = bvh_prim_tri_index[i] + pack_verts_offset;
|
||||
}
|
||||
|
||||
pack_prim_type[pack_offset] = bvh_prim_type[i];
|
||||
pack_prim_object[pack_offset] = object_index;
|
||||
pack_prim_visibility[pack_offset] = bvh_prim_visibility[i] | object_visibility;
|
||||
}
|
||||
}
|
||||
|
||||
// Merge triangle vertex data
|
||||
if (!bvh_pack.prim_tri_verts.empty()) {
|
||||
const size_t prim_tri_size = bvh_pack.prim_tri_verts.size();
|
||||
memcpy(pack_prim_tri_verts + pack_verts_offset,
|
||||
bvh_pack.prim_tri_verts.data(),
|
||||
prim_tri_size * sizeof(float4));
|
||||
pack_verts_offset += prim_tri_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BVHOptiX::pack_nodes(const BVHNode *)
|
||||
{
|
||||
}
|
||||
|
||||
void BVHOptiX::refit_nodes()
|
||||
{
|
||||
do_refit = true;
|
||||
}
|
||||
|
||||
BVHNode *BVHOptiX::widen_children_nodes(const BVHNode *)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
@@ -26,19 +26,33 @@
|
||||
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
class BVHOptiX : public BVH {
|
||||
public:
|
||||
uint64_t traversable_handle;
|
||||
device_only_memory<char> as_data;
|
||||
device_only_memory<char> motion_transform_data;
|
||||
class Geometry;
|
||||
class Optix;
|
||||
|
||||
protected:
|
||||
class BVHOptiX : public BVH {
|
||||
friend class BVH;
|
||||
|
||||
public:
|
||||
uint64_t optix_handle;
|
||||
uint64_t optix_data_handle;
|
||||
bool do_refit;
|
||||
|
||||
BVHOptiX(const BVHParams ¶ms,
|
||||
const vector<Geometry *> &geometry,
|
||||
const vector<Object *> &objects,
|
||||
Device *device);
|
||||
const vector<Object *> &objects);
|
||||
virtual ~BVHOptiX();
|
||||
|
||||
virtual void build(Progress &progress, Stats *) override;
|
||||
virtual void copy_to_device(Progress &progress, DeviceScene *dscene) override;
|
||||
|
||||
private:
|
||||
void pack_blas();
|
||||
void pack_tlas();
|
||||
|
||||
virtual void pack_nodes(const BVHNode *) override;
|
||||
virtual void refit_nodes() override;
|
||||
|
||||
virtual BVHNode *widen_children_nodes(const BVHNode *) override;
|
||||
};
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
@@ -71,7 +71,6 @@ class CUDADevice : public Device {
|
||||
};
|
||||
typedef map<device_memory *, CUDAMem> CUDAMemMap;
|
||||
CUDAMemMap cuda_mem_map;
|
||||
thread_mutex cuda_mem_map_mutex;
|
||||
|
||||
struct PixelMem {
|
||||
GLuint cuPBO;
|
||||
|
@@ -718,10 +718,8 @@ void CUDADevice::init_host_memory()
|
||||
void CUDADevice::load_texture_info()
|
||||
{
|
||||
if (need_texture_info) {
|
||||
/* Unset flag before copying, so this does not loop indefinitely if the copy below calls
|
||||
* into 'move_textures_to_host' (which calls 'load_texture_info' again). */
|
||||
need_texture_info = false;
|
||||
texture_info.copy_to_device();
|
||||
need_texture_info = false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -742,7 +740,6 @@ void CUDADevice::move_textures_to_host(size_t size, bool for_texture)
|
||||
size_t max_size = 0;
|
||||
bool max_is_image = false;
|
||||
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
foreach (CUDAMemMap::value_type &pair, cuda_mem_map) {
|
||||
device_memory &mem = *pair.first;
|
||||
CUDAMem *cmem = &pair.second;
|
||||
@@ -774,7 +771,6 @@ void CUDADevice::move_textures_to_host(size_t size, bool for_texture)
|
||||
max_mem = &mem;
|
||||
}
|
||||
}
|
||||
lock.unlock();
|
||||
|
||||
/* Move to host memory. This part is mutex protected since
|
||||
* multiple CUDA devices could be moving the memory. The
|
||||
@@ -896,7 +892,6 @@ CUDADevice::CUDAMem *CUDADevice::generic_alloc(device_memory &mem, size_t pitch_
|
||||
}
|
||||
|
||||
/* Insert into map of allocations. */
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
CUDAMem *cmem = &cuda_mem_map[&mem];
|
||||
if (shared_pointer != 0) {
|
||||
/* Replace host pointer with our host allocation. Only works if
|
||||
@@ -938,7 +933,6 @@ void CUDADevice::generic_copy_to(device_memory &mem)
|
||||
/* If use_mapped_host of mem is false, the current device only uses device memory allocated by
|
||||
* cuMemAlloc regardless of mem.host_pointer and mem.shared_pointer, and should copy data from
|
||||
* mem.host_pointer. */
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
if (!cuda_mem_map[&mem].use_mapped_host || mem.host_pointer != mem.shared_pointer) {
|
||||
const CUDAContextScope scope(this);
|
||||
cuda_assert(
|
||||
@@ -950,7 +944,6 @@ void CUDADevice::generic_free(device_memory &mem)
|
||||
{
|
||||
if (mem.device_pointer) {
|
||||
CUDAContextScope scope(this);
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
const CUDAMem &cmem = cuda_mem_map[&mem];
|
||||
|
||||
/* If cmem.use_mapped_host is true, reference counting is used
|
||||
@@ -1016,6 +1009,7 @@ void CUDADevice::mem_copy_to(device_memory &mem)
|
||||
if (!mem.device_pointer) {
|
||||
generic_alloc(mem);
|
||||
}
|
||||
|
||||
generic_copy_to(mem);
|
||||
}
|
||||
}
|
||||
@@ -1054,7 +1048,6 @@ void CUDADevice::mem_zero(device_memory &mem)
|
||||
|
||||
/* If use_mapped_host of mem is false, mem.device_pointer currently refers to device memory
|
||||
* regardless of mem.host_pointer and mem.shared_pointer. */
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
if (!cuda_mem_map[&mem].use_mapped_host || mem.host_pointer != mem.shared_pointer) {
|
||||
const CUDAContextScope scope(this);
|
||||
cuda_assert(cuMemsetD8((CUdeviceptr)mem.device_pointer, 0, mem.memory_size()));
|
||||
@@ -1178,7 +1171,6 @@ void CUDADevice::tex_alloc(device_texture &mem)
|
||||
size_t dst_pitch = src_pitch;
|
||||
|
||||
if (!mem.is_resident(this)) {
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
cmem = &cuda_mem_map[&mem];
|
||||
cmem->texobject = 0;
|
||||
|
||||
@@ -1228,7 +1220,6 @@ void CUDADevice::tex_alloc(device_texture &mem)
|
||||
mem.device_size = size;
|
||||
stats.mem_alloc(size);
|
||||
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
cmem = &cuda_mem_map[&mem];
|
||||
cmem->texobject = 0;
|
||||
cmem->array = array_3d;
|
||||
@@ -1314,9 +1305,6 @@ void CUDADevice::tex_alloc(device_texture &mem)
|
||||
texDesc.filterMode = filter_mode;
|
||||
texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
|
||||
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
cmem = &cuda_mem_map[&mem];
|
||||
|
||||
cuda_assert(cuTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL));
|
||||
|
||||
texture_info[slot].data = (uint64_t)cmem->texobject;
|
||||
@@ -1330,7 +1318,6 @@ void CUDADevice::tex_free(device_texture &mem)
|
||||
{
|
||||
if (mem.device_pointer) {
|
||||
CUDAContextScope scope(this);
|
||||
thread_scoped_lock lock(cuda_mem_map_mutex);
|
||||
const CUDAMem &cmem = cuda_mem_map[&mem];
|
||||
|
||||
if (cmem.texobject) {
|
||||
@@ -1352,7 +1339,6 @@ void CUDADevice::tex_free(device_texture &mem)
|
||||
cuda_mem_map.erase(cuda_mem_map.find(&mem));
|
||||
}
|
||||
else {
|
||||
lock.unlock();
|
||||
generic_free(mem);
|
||||
}
|
||||
}
|
||||
@@ -1925,19 +1911,18 @@ void CUDADevice::render(DeviceTask &task, RenderTile &rtile, device_vector<WorkT
|
||||
}
|
||||
|
||||
uint step_samples = divide_up(min_blocks * num_threads_per_block, wtile->w * wtile->h);
|
||||
if (task.adaptive_sampling.use) {
|
||||
step_samples = task.adaptive_sampling.align_static_samples(step_samples);
|
||||
}
|
||||
|
||||
/* Render all samples. */
|
||||
int start_sample = rtile.start_sample;
|
||||
int end_sample = rtile.start_sample + rtile.num_samples;
|
||||
|
||||
for (int sample = start_sample; sample < end_sample;) {
|
||||
for (int sample = start_sample; sample < end_sample; sample += step_samples) {
|
||||
/* Setup and copy work tile to device. */
|
||||
wtile->start_sample = sample;
|
||||
wtile->num_samples = step_samples;
|
||||
if (task.adaptive_sampling.use) {
|
||||
wtile->num_samples = task.adaptive_sampling.align_samples(sample, step_samples);
|
||||
}
|
||||
wtile->num_samples = min(wtile->num_samples, end_sample - sample);
|
||||
wtile->num_samples = min(step_samples, end_sample - sample);
|
||||
work_tiles.copy_to_device();
|
||||
|
||||
CUdeviceptr d_work_tiles = (CUdeviceptr)work_tiles.device_pointer;
|
||||
@@ -1959,8 +1944,7 @@ void CUDADevice::render(DeviceTask &task, RenderTile &rtile, device_vector<WorkT
|
||||
cuda_assert(cuCtxSynchronize());
|
||||
|
||||
/* Update progress. */
|
||||
sample += wtile->num_samples;
|
||||
rtile.sample = sample;
|
||||
rtile.sample = sample + wtile->num_samples;
|
||||
task.update_progress(&rtile, rtile.w * rtile.h * wtile->num_samples);
|
||||
|
||||
if (task.get_cancel()) {
|
||||
|
@@ -17,8 +17,6 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "bvh/bvh2.h"
|
||||
|
||||
#include "device/device.h"
|
||||
#include "device/device_intern.h"
|
||||
|
||||
@@ -366,19 +364,6 @@ void Device::draw_pixels(device_memory &rgba,
|
||||
}
|
||||
}
|
||||
|
||||
void Device::build_bvh(BVH *bvh, Progress &progress, bool refit)
|
||||
{
|
||||
assert(bvh->params.bvh_layout == BVH_LAYOUT_BVH2);
|
||||
|
||||
BVH2 *const bvh2 = static_cast<BVH2 *>(bvh);
|
||||
if (refit) {
|
||||
bvh2->refit(progress);
|
||||
}
|
||||
else {
|
||||
bvh2->build(progress, &stats);
|
||||
}
|
||||
}
|
||||
|
||||
Device *Device::create(DeviceInfo &info, Stats &stats, Profiler &profiler, bool background)
|
||||
{
|
||||
#ifdef WITH_MULTI
|
||||
@@ -620,7 +605,6 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo> &subdevices,
|
||||
|
||||
info.has_half_images = true;
|
||||
info.has_volume_decoupled = true;
|
||||
info.has_branched_path = true;
|
||||
info.has_adaptive_stop_per_sample = true;
|
||||
info.has_osl = true;
|
||||
info.has_profiling = true;
|
||||
@@ -666,7 +650,6 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo> &subdevices,
|
||||
/* Accumulate device info. */
|
||||
info.has_half_images &= device.has_half_images;
|
||||
info.has_volume_decoupled &= device.has_volume_decoupled;
|
||||
info.has_branched_path &= device.has_branched_path;
|
||||
info.has_adaptive_stop_per_sample &= device.has_adaptive_stop_per_sample;
|
||||
info.has_osl &= device.has_osl;
|
||||
info.has_profiling &= device.has_profiling;
|
||||
|
@@ -79,7 +79,6 @@ class DeviceInfo {
|
||||
bool display_device; /* GPU is used as a display device. */
|
||||
bool has_half_images; /* Support half-float textures. */
|
||||
bool has_volume_decoupled; /* Decoupled volume shading. */
|
||||
bool has_branched_path; /* Supports branched path tracing. */
|
||||
bool has_adaptive_stop_per_sample; /* Per-sample adaptive sampling stopping. */
|
||||
bool has_osl; /* Support Open Shading Language. */
|
||||
bool use_split_kernel; /* Use split or mega kernel. */
|
||||
@@ -100,7 +99,6 @@ class DeviceInfo {
|
||||
display_device = false;
|
||||
has_half_images = false;
|
||||
has_volume_decoupled = false;
|
||||
has_branched_path = true;
|
||||
has_adaptive_stop_per_sample = false;
|
||||
has_osl = false;
|
||||
use_split_kernel = false;
|
||||
@@ -375,6 +373,12 @@ class Device {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Device specific pointer for BVH creation. Currently only used by Embree. */
|
||||
virtual void *bvh_device() const
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* load/compile kernels, must be called before adding tasks */
|
||||
virtual bool load_kernels(const DeviceRequestedFeatures & /*requested_features*/)
|
||||
{
|
||||
@@ -423,7 +427,10 @@ class Device {
|
||||
const DeviceDrawParams &draw_params);
|
||||
|
||||
/* acceleration structure building */
|
||||
virtual void build_bvh(BVH *bvh, Progress &progress, bool refit);
|
||||
virtual bool build_optix_bvh(BVH *)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef WITH_NETWORK
|
||||
/* networking */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user