Compare commits

..

6 Commits

Author SHA1 Message Date
f240ac1673 Improve formating 2021-11-24 21:41:25 +01:00
a590474f4c Fix compilation & dependency depth 2021-11-24 20:09:36 +01:00
b2462b6f5c Make shaders sources from draw included in the dependency library. 2021-11-24 19:46:00 +01:00
21ee89c52f Fix compilation issues on MSVC and a bug in builder 2021-11-24 18:56:51 +01:00
2c95da88aa GPUShaderDependency: Initial Commit
This is a prototype to support `#include` directive inside glsl sources.
The sources are aggregated into a list before being translated to byte_array.
This list is used to generate a mapping between the filename and the
associated byte_array. For each byte_array, we search for any `#include`
and store the file to merge.

At runtime, for one input filename we concatenate all byte_arrays that
are needed following the include order and avoiding double include.

This is meant to evolve into a fully supported `#include` system.
2021-11-24 17:58:06 +01:00
7358a5aba2 GPUShaderDescriptor: Initial Commit
This is a first draft of what the Shader Descriptor system could be.

A shader descriptor provides a way to define shader structure, resources
and interfaces. This makes for a quick way to provide backend agnostic
binding informations while also making shader variations easy to declare.
2021-11-24 17:52:39 +01:00
3568 changed files with 103724 additions and 149083 deletions

View File

@@ -269,5 +269,9 @@ StatementMacros:
- PyObject_HEAD
- PyObject_VAR_HEAD
StatementMacros:
- GPU_STAGE_INTERFACE_CREATE
- GPU_SHADER_DESCRIPTOR
MacroBlockBegin: "^BSDF_CLOSURE_CLASS_BEGIN$"
MacroBlockEnd: "^BSDF_CLOSURE_CLASS_END$"

View File

@@ -157,9 +157,8 @@ option(WITH_BLENDER "Build blender (disable to build only the blender player)" O
mark_as_advanced(WITH_BLENDER)
if(APPLE)
# In future, can be used with `quicklookthumbnailing/qlthumbnailreply` to create file
# thumbnails for say Finder. Turn it off for now.
option(WITH_BLENDER_THUMBNAILER "Build \"blender-thumbnailer\" thumbnail extraction utility" OFF)
# Currently this causes a build error linking, disable.
set(WITH_BLENDER_THUMBNAILER OFF)
elseif(WIN32)
option(WITH_BLENDER_THUMBNAILER "Build \"BlendThumb.dll\" helper for Windows explorer integration" ON)
else()
@@ -188,13 +187,6 @@ mark_as_advanced(CPACK_OVERRIDE_PACKAGENAME)
mark_as_advanced(BUILDINFO_OVERRIDE_DATE)
mark_as_advanced(BUILDINFO_OVERRIDE_TIME)
if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.16")
option(WITH_UNITY_BUILD "Enable unity build for modules that support it to improve compile times" ON)
mark_as_advanced(WITH_UNITY_BUILD)
else()
set(WITH_UNITY_BUILD OFF)
endif()
option(WITH_IK_ITASC "Enable ITASC IK solver (only disable for development & for incompatible C++ compilers)" ON)
option(WITH_IK_SOLVER "Enable Legacy IK solver (only disable for development)" ON)
option(WITH_FFTW3 "Enable FFTW3 support (Used for smoke, ocean sim, and audio effects)" ON)
@@ -274,13 +266,11 @@ endif()
if(UNIX AND NOT APPLE)
option(WITH_SYSTEM_GLEW "Use GLEW OpenGL wrapper library provided by the operating system" OFF)
option(WITH_SYSTEM_GLEW "Use GLEW OpenGL wrapper library provided by the operating system" OFF)
option(WITH_SYSTEM_FREETYPE "Use the freetype library provided by the operating system" OFF)
option(WITH_SYSTEM_GLES "Use OpenGL ES library provided by the operating system" ON)
else()
# not an option for other OS's
set(WITH_SYSTEM_GLEW OFF)
set(WITH_SYSTEM_GLES OFF)
set(WITH_SYSTEM_FREETYPE OFF)
endif()
@@ -436,40 +426,30 @@ mark_as_advanced(WITH_CYCLES_DEBUG_NAN)
mark_as_advanced(WITH_CYCLES_NATIVE_ONLY)
# NVIDIA CUDA & OptiX
if(NOT APPLE)
option(WITH_CYCLES_DEVICE_CUDA "Enable Cycles NVIDIA CUDA compute support" ON)
option(WITH_CYCLES_DEVICE_OPTIX "Enable Cycles NVIDIA OptiX support" ON)
mark_as_advanced(WITH_CYCLES_DEVICE_CUDA)
option(WITH_CYCLES_DEVICE_CUDA "Enable Cycles NVIDIA CUDA compute support" ON)
option(WITH_CYCLES_DEVICE_OPTIX "Enable Cycles NVIDIA OptiX support" ON)
mark_as_advanced(WITH_CYCLES_DEVICE_CUDA)
option(WITH_CYCLES_CUDA_BINARIES "Build Cycles NVIDIA CUDA binaries" OFF)
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 sm_86 compute_75 CACHE STRING "CUDA architectures to build binaries for")
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
option(WITH_CUDA_DYNLOAD "Dynamically load CUDA libraries at runtime (for developers, makes cuda-gdb work)" ON)
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
mark_as_advanced(WITH_CYCLES_CUBIN_COMPILER)
mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
mark_as_advanced(WITH_CUDA_DYNLOAD)
endif()
option(WITH_CYCLES_CUDA_BINARIES "Build Cycles NVIDIA CUDA binaries" OFF)
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 sm_86 compute_75 CACHE STRING "CUDA architectures to build binaries for")
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
option(WITH_CUDA_DYNLOAD "Dynamically load CUDA libraries at runtime (for developers, makes cuda-gdb work)" ON)
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
mark_as_advanced(WITH_CYCLES_CUBIN_COMPILER)
mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
mark_as_advanced(WITH_CUDA_DYNLOAD)
# AMD HIP
if(NOT APPLE)
if(WIN32)
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" ON)
else()
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" OFF)
endif()
option(WITH_CYCLES_HIP_BINARIES "Build Cycles AMD HIP binaries" OFF)
set(CYCLES_HIP_BINARIES_ARCH gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 CACHE STRING "AMD HIP architectures to build binaries for")
mark_as_advanced(WITH_CYCLES_DEVICE_HIP)
mark_as_advanced(CYCLES_HIP_BINARIES_ARCH)
endif()
# Apple Metal
if(APPLE)
option(WITH_CYCLES_DEVICE_METAL "Enable Cycles Apple Metal compute support" ON)
if(WIN32)
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" ON)
else()
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" OFF)
endif()
option(WITH_CYCLES_HIP_BINARIES "Build Cycles AMD HIP binaries" OFF)
set(CYCLES_HIP_BINARIES_ARCH gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 CACHE STRING "AMD HIP architectures to build binaries for")
mark_as_advanced(WITH_CYCLES_DEVICE_HIP)
mark_as_advanced(CYCLES_HIP_BINARIES_ARCH)
# Draw Manager
option(WITH_DRAW_DEBUG "Add extra debug capabilities to Draw Manager" OFF)
@@ -514,10 +494,11 @@ if(WIN32)
endif()
# This should be turned off when Blender enter beta/rc/release
if("${BLENDER_VERSION_CYCLE}" STREQUAL "alpha")
set(WITH_EXPERIMENTAL_FEATURES ON)
else()
if("${BLENDER_VERSION_CYCLE}" STREQUAL "release" OR
"${BLENDER_VERSION_CYCLE}" STREQUAL "rc")
set(WITH_EXPERIMENTAL_FEATURES OFF)
else()
set(WITH_EXPERIMENTAL_FEATURES ON)
endif()
# Unit testsing
@@ -539,14 +520,12 @@ option(WITH_OPENGL "When off limits visibility of the opengl header
option(WITH_GLEW_ES "Switches to experimental copy of GLEW that has support for OpenGL ES. (temporary option for development purposes)" OFF)
option(WITH_GL_EGL "Use the EGL OpenGL system library instead of the platform specific OpenGL system library (CGL, glX, or WGL)" OFF)
option(WITH_GL_PROFILE_ES20 "Support using OpenGL ES 2.0. (through either EGL or the AGL/WGL/XGL 'es20' profile)" OFF)
option(WITH_GPU_SHADER_BUILDER "Shader builder is a developer option enabling linting on GLSL during compilation" OFF)
mark_as_advanced(
WITH_OPENGL
WITH_GLEW_ES
WITH_GL_EGL
WITH_GL_PROFILE_ES20
WITH_GPU_SHADER_BUILDER
)
if(WIN32)
@@ -564,18 +543,12 @@ if(WIN32)
set(CPACK_INSTALL_PREFIX ${CMAKE_GENERIC_PROGRAM_FILES}/${})
endif()
# Compiler tool-chain.
if(UNIX AND NOT APPLE)
if(CMAKE_COMPILER_IS_GNUCC)
option(WITH_LINKER_GOLD "Use ld.gold linker which is usually faster than ld.bfd" ON)
mark_as_advanced(WITH_LINKER_GOLD)
option(WITH_LINKER_LLD "Use ld.lld linker which is usually faster than ld.gold" OFF)
mark_as_advanced(WITH_LINKER_LLD)
endif()
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
option(WITH_LINKER_MOLD "Use ld.mold linker which is usually faster than ld.gold & ld.lld." OFF)
mark_as_advanced(WITH_LINKER_MOLD)
endif()
# Compiler toolchain
if(CMAKE_COMPILER_IS_GNUCC)
option(WITH_LINKER_GOLD "Use ld.gold linker which is usually faster than ld.bfd" ON)
mark_as_advanced(WITH_LINKER_GOLD)
option(WITH_LINKER_LLD "Use ld.lld linker which is usually faster than ld.gold" OFF)
mark_as_advanced(WITH_LINKER_LLD)
endif()
option(WITH_COMPILER_ASAN "Build and link against address sanitizer (only for Debug & RelWithDebInfo targets)." OFF)
@@ -686,7 +659,7 @@ if(WIN32 OR XCODE)
option(IDE_GROUP_PROJECTS_IN_FOLDERS "Organize the projects according to source folder structure." ON)
mark_as_advanced(IDE_GROUP_PROJECTS_IN_FOLDERS)
if(IDE_GROUP_PROJECTS_IN_FOLDERS)
if (IDE_GROUP_PROJECTS_IN_FOLDERS)
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
endif()
endif()
@@ -711,12 +684,9 @@ if(UNIX AND NOT APPLE)
endif()
# Installation process.
set(POSTINSTALL_SCRIPT "" CACHE FILEPATH "Run given CMake script after installation process")
option(POSTINSTALL_SCRIPT "Run given CMake script after installation process" OFF)
mark_as_advanced(POSTINSTALL_SCRIPT)
set(POSTCONFIGURE_SCRIPT "" CACHE FILEPATH "Run given CMake script as the last step of CMake configuration")
mark_as_advanced(POSTCONFIGURE_SCRIPT)
# end option(...)
@@ -870,7 +840,7 @@ if(WITH_AUDASPACE)
endif()
# Auto-enable CUDA dynload if toolkit is not found.
if(WITH_CYCLES AND WITH_CYCLES_DEVICE_CUDA AND NOT WITH_CUDA_DYNLOAD)
if(NOT WITH_CUDA_DYNLOAD)
find_package(CUDA)
if(NOT CUDA_FOUND)
message(STATUS "CUDA toolkit not found, using dynamic runtime loading of libraries (WITH_CUDA_DYNLOAD) instead")
@@ -2080,8 +2050,3 @@ endif()
if(0)
print_all_vars()
endif()
# Should be the last step of configuration.
if(POSTCONFIGURE_SCRIPT)
include(${POSTCONFIGURE_SCRIPT})
endif()

View File

@@ -63,7 +63,6 @@ include(cmake/jpeg.cmake)
include(cmake/blosc.cmake)
include(cmake/pthreads.cmake)
include(cmake/openexr.cmake)
include(cmake/brotli.cmake)
include(cmake/freetype.cmake)
include(cmake/freeglut.cmake)
include(cmake/glew.cmake)

View File

@@ -25,13 +25,8 @@ else()
endif()
if(WIN32)
if(MSVC_VERSION GREATER_EQUAL 1920) # 2019
set(BOOST_TOOLSET toolset=msvc-14.2)
set(BOOST_COMPILER_STRING -vc142)
else() # 2017
set(BOOST_TOOLSET toolset=msvc-14.1)
set(BOOST_COMPILER_STRING -vc141)
endif()
set(BOOST_TOOLSET toolset=msvc-14.1)
set(BOOST_COMPILER_STRING -vc141)
set(BOOST_CONFIGURE_COMMAND bootstrap.bat)
set(BOOST_BUILD_COMMAND b2)

View File

@@ -1,38 +0,0 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
set(BROTLI_EXTRA_ARGS
)
ExternalProject_Add(external_brotli
URL file://${PACKAGE_DIR}/${BROTLI_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${BROTLI_HASH_TYPE}=${BROTLI_HASH}
PREFIX ${BUILD_DIR}/brotli
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/brotli ${DEFAULT_CMAKE_FLAGS} ${BROTLI_EXTRA_ARGS}
INSTALL_DIR ${LIBDIR}/brotli
)
if(BUILD_MODE STREQUAL Release AND WIN32)
ExternalProject_Add_Step(external_brotli after_install
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/brotli/include ${HARVEST_TARGET}/brotli/include
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/brotli/lib/brotlidec-static${LIBEXT} ${HARVEST_TARGET}/brotli/lib/brotlidec-static${LIBEXT}
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/brotli/lib/brotlicommon-static${LIBEXT} ${HARVEST_TARGET}/brotli/lib/brotlicommon-static${LIBEXT}
DEPENDEES install
)
endif()

View File

@@ -94,4 +94,3 @@ download_source(POTRACE)
download_source(HARU)
download_source(ZSTD)
download_source(FLEX)
download_source(BROTLI)

View File

@@ -19,13 +19,13 @@
set(FREETYPE_EXTRA_ARGS
-DCMAKE_RELEASE_POSTFIX:STRING=2ST
-DCMAKE_DEBUG_POSTFIX:STRING=2ST_d
-DFT_DISABLE_BZIP2=ON
-DFT_DISABLE_HARFBUZZ=ON
-DFT_DISABLE_PNG=ON
-DFT_REQUIRE_BROTLI=ON
-DPC_BROTLIDEC_INCLUDEDIR=${LIBDIR}/brotli/include
-DPC_BROTLIDEC_LIBDIR=${LIBDIR}/brotli/lib
)
-DWITH_BZip2=OFF
-DWITH_HarfBuzz=OFF
-DFT_WITH_HARFBUZZ=OFF
-DFT_WITH_BZIP2=OFF
-DCMAKE_DISABLE_FIND_PACKAGE_HarfBuzz=TRUE
-DCMAKE_DISABLE_FIND_PACKAGE_BZip2=TRUE
-DCMAKE_DISABLE_FIND_PACKAGE_BrotliDec=TRUE)
ExternalProject_Add(external_freetype
URL file://${PACKAGE_DIR}/${FREETYPE_FILE}
@@ -36,11 +36,6 @@ ExternalProject_Add(external_freetype
INSTALL_DIR ${LIBDIR}/freetype
)
add_dependencies(
external_freetype
external_brotli
)
if(BUILD_MODE STREQUAL Release AND WIN32)
ExternalProject_Add_Step(external_freetype after_install
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/freetype ${HARVEST_TARGET}/freetype

View File

@@ -38,6 +38,13 @@ elseif(UNIX AND NOT APPLE)
)
endif()
if(BLENDER_PLATFORM_ARM)
set(GMP_OPTIONS
${GMP_OPTIONS}
--disable-assembly
)
endif()
ExternalProject_Add(external_gmp
URL file://${PACKAGE_DIR}/${GMP_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}

View File

@@ -79,8 +79,6 @@ endfunction()
harvest(alembic/include alembic/include "*.h")
harvest(alembic/lib/libAlembic.a alembic/lib/libAlembic.a)
harvest(alembic/bin alembic/bin "*")
harvest(brotli/include brotli/include "*.h")
harvest(brotli/lib brotli/lib "*.a")
harvest(boost/include boost/include "*")
harvest(boost/lib boost/lib "*.a")
harvest(ffmpeg/include ffmpeg/include "*.h")

View File

@@ -31,7 +31,7 @@ ExternalProject_Add(external_python_site_packages
CONFIGURE_COMMAND ${PIP_CONFIGURE_COMMAND}
BUILD_COMMAND ""
PREFIX ${BUILD_DIR}/site_packages
INSTALL_COMMAND ${PYTHON_BINARY} -m pip install --no-cache-dir ${SITE_PACKAGES_EXTRA} cython==${CYTHON_VERSION} idna==${IDNA_VERSION} charset-normalizer==${CHARSET_NORMALIZER_VERSION} urllib3==${URLLIB3_VERSION} certifi==${CERTIFI_VERSION} requests==${REQUESTS_VERSION} zstandard==${ZSTANDARD_VERSION} --no-binary :all:
INSTALL_COMMAND ${PYTHON_BINARY} -m pip install ${SITE_PACKAGES_EXTRA} cython==${CYTHON_VERSION} idna==${IDNA_VERSION} charset-normalizer==${CHARSET_NORMALIZER_VERSION} urllib3==${URLLIB3_VERSION} certifi==${CERTIFI_VERSION} requests==${REQUESTS_VERSION} zstandard==${ZSTANDARD_VERSION} --no-binary :all:
)
if(USE_PIP_NUMPY)

View File

@@ -83,9 +83,9 @@ else()
set(OPENEXR_VERSION_POSTFIX)
endif()
set(FREETYPE_VERSION 2.11.1)
set(FREETYPE_VERSION 2.10.2)
set(FREETYPE_URI http://prdownloads.sourceforge.net/freetype/freetype-${FREETYPE_VERSION}.tar.gz)
set(FREETYPE_HASH bd4e3b007474319909a6b79d50908e85)
set(FREETYPE_HASH b1cb620e4c875cd4d1bfa04945400945)
set(FREETYPE_HASH_TYPE MD5)
set(FREETYPE_FILE freetype-${FREETYPE_VERSION}.tar.gz)
@@ -189,11 +189,11 @@ set(OSL_HASH 1abd7ce40481771a9fa937f19595d2f2)
set(OSL_HASH_TYPE MD5)
set(OSL_FILE OpenShadingLanguage-${OSL_VERSION}.tar.gz)
set(PYTHON_VERSION 3.10.2)
set(PYTHON_SHORT_VERSION 3.10)
set(PYTHON_SHORT_VERSION_NO_DOTS 310)
set(PYTHON_VERSION 3.9.7)
set(PYTHON_SHORT_VERSION 3.9)
set(PYTHON_SHORT_VERSION_NO_DOTS 39)
set(PYTHON_URI https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tar.xz)
set(PYTHON_HASH 14e8c22458ed7779a1957b26cde01db9)
set(PYTHON_HASH fddb060b483bc01850a3f412eea1d954)
set(PYTHON_HASH_TYPE MD5)
set(PYTHON_FILE Python-${PYTHON_VERSION}.tar.xz)
@@ -215,20 +215,18 @@ set(NANOVDB_HASH e7b9e863ec2f3b04ead171dec2322807)
set(NANOVDB_HASH_TYPE MD5)
set(NANOVDB_FILE nano-vdb-${NANOVDB_GIT_UID}.tar.gz)
set(IDNA_VERSION 3.3)
set(CHARSET_NORMALIZER_VERSION 2.0.10)
set(URLLIB3_VERSION 1.26.8)
set(IDNA_VERSION 3.2)
set(CHARSET_NORMALIZER_VERSION 2.0.6)
set(URLLIB3_VERSION 1.26.7)
set(CERTIFI_VERSION 2021.10.8)
set(REQUESTS_VERSION 2.27.1)
set(CYTHON_VERSION 0.29.26)
# The version of the zstd library used to build the Python package should match ZSTD_VERSION defined below.
# At this time of writing, 0.17.0 was already released, but built against zstd 1.5.1, while we use 1.5.0.
set(ZSTANDARD_VERSION 0.16.0)
set(REQUESTS_VERSION 2.26.0)
set(CYTHON_VERSION 0.29.24)
set(ZSTANDARD_VERSION 0.15.2 )
set(NUMPY_VERSION 1.22.0)
set(NUMPY_SHORT_VERSION 1.22)
set(NUMPY_VERSION 1.21.2)
set(NUMPY_SHORT_VERSION 1.21)
set(NUMPY_URI https://github.com/numpy/numpy/releases/download/v${NUMPY_VERSION}/numpy-${NUMPY_VERSION}.zip)
set(NUMPY_HASH 252de134862a27bd66705d29622edbfe)
set(NUMPY_HASH 5638d5dae3ca387be562912312db842e)
set(NUMPY_HASH_TYPE MD5)
set(NUMPY_FILE numpy-${NUMPY_VERSION}.zip)
@@ -476,9 +474,9 @@ set(ISPC_HASH 2e3abedbc0ea9aaec17d6562c632454d)
set(ISPC_HASH_TYPE MD5)
set(ISPC_FILE ispc-${ISPC_VERSION}.tar.gz)
set(GMP_VERSION 6.2.1)
set(GMP_VERSION 6.2.0)
set(GMP_URI https://gmplib.org/download/gmp/gmp-${GMP_VERSION}.tar.xz)
set(GMP_HASH 0b82665c4a92fd2ade7440c13fcaa42b)
set(GMP_HASH a325e3f09e6d91e62101e59f9bda3ec1)
set(GMP_HASH_TYPE MD5)
set(GMP_FILE gmp-${GMP_VERSION}.tar.xz)
@@ -502,9 +500,3 @@ set(ZSTD_FILE zstd-${ZSTD_VERSION}.tar.gz)
set(SSE2NEON_GIT https://github.com/DLTcollab/sse2neon.git)
set(SSE2NEON_GIT_HASH fe5ff00bb8d19b327714a3c290f3e2ce81ba3525)
set(BROTLI_VERSION v1.0.9)
set(BROTLI_URI https://github.com/google/brotli/archive/refs/tags/${BROTLI_VERSION}.tar.gz)
set(BROTLI_HASH f9e8d81d0405ba66d181529af42a3354f838c939095ff99930da6aa9cdf6fe46)
set(BROTLI_HASH_TYPE SHA256)
set(BROTLI_FILE brotli-${BROTLI_VERSION}.tar.gz)

View File

@@ -379,27 +379,27 @@ USE_CXX11=true
CLANG_FORMAT_VERSION_MIN="6.0"
CLANG_FORMAT_VERSION_MEX="10.0"
PYTHON_VERSION="3.10.2"
PYTHON_VERSION_SHORT="3.10"
PYTHON_VERSION_MIN="3.9"
PYTHON_VERSION_MEX="3.12"
PYTHON_VERSION="3.9.7"
PYTHON_VERSION_SHORT="3.9"
PYTHON_VERSION_MIN="3.7"
PYTHON_VERSION_MEX="3.11"
PYTHON_VERSION_INSTALLED=$PYTHON_VERSION_SHORT
PYTHON_FORCE_BUILD=false
PYTHON_FORCE_REBUILD=false
PYTHON_SKIP=false
# Additional Python modules.
PYTHON_IDNA_VERSION="3.3"
PYTHON_IDNA_VERSION="3.2"
PYTHON_IDNA_VERSION_MIN="2.0"
PYTHON_IDNA_VERSION_MEX="4.0"
PYTHON_IDNA_NAME="idna"
PYTHON_CHARSET_NORMALIZER_VERSION="2.0.10"
PYTHON_CHARSET_NORMALIZER_VERSION="2.0.6"
PYTHON_CHARSET_NORMALIZER_VERSION_MIN="2.0.6"
PYTHON_CHARSET_NORMALIZER_VERSION_MEX="2.1.0" # requests uses `charset_normalizer~=2.0.0`
PYTHON_CHARSET_NORMALIZER_NAME="charset-normalizer"
PYTHON_URLLIB3_VERSION="1.26.8"
PYTHON_URLLIB3_VERSION="1.26.7"
PYTHON_URLLIB3_VERSION_MIN="1.0"
PYTHON_URLLIB3_VERSION_MEX="2.0"
PYTHON_URLLIB3_NAME="urllib3"
@@ -409,17 +409,17 @@ PYTHON_CERTIFI_VERSION_MIN="2021.0"
PYTHON_CERTIFI_VERSION_MEX="2023.0"
PYTHON_CERTIFI_NAME="certifi"
PYTHON_REQUESTS_VERSION="2.27.1"
PYTHON_REQUESTS_VERSION="2.23.0"
PYTHON_REQUESTS_VERSION_MIN="2.0"
PYTHON_REQUESTS_VERSION_MEX="3.0"
PYTHON_REQUESTS_NAME="requests"
PYTHON_ZSTANDARD_VERSION="0.16.0"
PYTHON_ZSTANDARD_VERSION="0.15.2"
PYTHON_ZSTANDARD_VERSION_MIN="0.15.2"
PYTHON_ZSTANDARD_VERSION_MEX="0.20.0"
PYTHON_ZSTANDARD_VERSION_MEX="0.16.0"
PYTHON_ZSTANDARD_NAME="zstandard"
PYTHON_NUMPY_VERSION="1.22.0"
PYTHON_NUMPY_VERSION="1.21.2"
PYTHON_NUMPY_VERSION_MIN="1.14"
PYTHON_NUMPY_VERSION_MEX="2.0"
PYTHON_NUMPY_NAME="numpy"
@@ -492,14 +492,14 @@ OIIO_SKIP=false
LLVM_VERSION="12.0.0"
LLVM_VERSION_SHORT="12.0"
LLVM_VERSION_MIN="11.0"
LLVM_VERSION_MEX="14.0"
LLVM_VERSION_MEX="13.0"
LLVM_VERSION_FOUND=""
LLVM_FORCE_BUILD=false
LLVM_FORCE_REBUILD=false
LLVM_SKIP=false
# OSL needs to be compiled for now!
OSL_VERSION="1.11.17.0"
OSL_VERSION="1.11.14.1"
OSL_VERSION_SHORT="1.11"
OSL_VERSION_MIN="1.11"
OSL_VERSION_MEX="2.0"
@@ -2083,9 +2083,9 @@ compile_OIIO() {
cmake_d="$cmake_d -D OPENEXR_VERSION=$OPENEXR_VERSION"
if [ "$_with_built_openexr" = true ]; then
cmake_d="$cmake_d -D ILMBASE_ROOT=$INST/openexr"
cmake_d="$cmake_d -D OPENEXR_ROOT=$INST/openexr"
INFO "Ilmbase_ROOT=$INST/openexr"
cmake_d="$cmake_d -D ILMBASE_HOME=$INST/openexr"
cmake_d="$cmake_d -D OPENEXR_HOME=$INST/openexr"
INFO "ILMBASE_HOME=$INST/openexr"
fi
# ptex is only needed when nicholas bishop is ready
@@ -2374,9 +2374,9 @@ compile_OSL() {
#~ cmake_d="$cmake_d -D ILMBASE_VERSION=$ILMBASE_VERSION"
if [ "$_with_built_openexr" = true ]; then
cmake_d="$cmake_d -D ILMBASE_ROOT=$INST/openexr"
cmake_d="$cmake_d -D OPENEXR_ROOT=$INST/openexr"
INFO "Ilmbase_ROOT=$INST/openexr"
INFO "ILMBASE_HOME=$INST/openexr"
cmake_d="$cmake_d -D OPENEXR_ROOT_DIR=$INST/openexr"
cmake_d="$cmake_d -D ILMBASE_ROOT_DIR=$INST/openexr"
# XXX Temp workaround... sigh, ILMBase really messed the things up by defining their custom names ON by default :(
fi
@@ -3620,8 +3620,8 @@ compile_FFmpeg() {
fi
./configure --cc="gcc -Wl,--as-needed" \
--extra-ldflags="-pthread" \
--prefix=$_inst --enable-shared \
--extra-ldflags="-pthread -static-libgcc" \
--prefix=$_inst --enable-static \
--disable-ffplay --disable-doc \
--enable-gray \
--enable-avfilter --disable-vdpau \
@@ -4036,14 +4036,14 @@ install_DEB() {
INFO "Forced Python building, as requested..."
_do_compile_python=true
else
check_package_version_ge_lt_DEB python${PYTHON_VERSION_SHORT}-dev $PYTHON_VERSION_MIN $PYTHON_VERSION_MEX
check_package_version_ge_lt_DEB python3-dev $PYTHON_VERSION_MIN $PYTHON_VERSION_MEX
if [ $? -eq 0 ]; then
install_packages_DEB python${PYTHON_VERSION_SHORT}-dev
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_DEB python3-dev` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
install_packages_DEB python3-dev
clean_Python
PRINT ""
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_DEB python${PYTHON_VERSION_SHORT}-dev` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
for module in "${PYTHON_MODULES_PACKAGES[@]}"
do
module=($module)
@@ -4681,11 +4681,11 @@ install_RPM() {
else
check_package_version_ge_lt_RPM python3-devel $PYTHON_VERSION_MIN $PYTHON_VERSION_MEX
if [ $? -eq 0 ]; then
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_RPM python3-devel` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
install_packages_RPM python3-devel
clean_Python
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_RPM python3-devel` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
for module in "${PYTHON_MODULES_PACKAGES[@]}"
do
module=($module)
@@ -5224,12 +5224,12 @@ install_ARCH() {
else
check_package_version_ge_lt_ARCH python $PYTHON_VERSION_MIN $PYTHON_VERSION_MEX
if [ $? -eq 0 ]; then
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_ARCH python` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
install_packages_ARCH python
clean_Python
PRINT ""
PYTHON_VERSION_INSTALLED=$(echo `get_package_version_ARCH python` | sed -r 's/^([0-9]+\.[0-9]+).*/\1/')
for module in "${PYTHON_MODULES_PACKAGES[@]}"
do
module=($module)
@@ -5721,6 +5721,76 @@ install_OTHER() {
# ----------------------------------------------------------------------------
# Printing User Info
print_info_ffmpeglink_DEB() {
dpkg -L $_packages | grep -e ".*\/lib[^\/]\+\.so" | gawk '{ printf(nlines ? "'"$_ffmpeg_list_sep"'%s" : "%s", gensub(/.*lib([^\/]+)\.so/, "\\1", "g", $0)); nlines++ }'
}
print_info_ffmpeglink_RPM() {
rpm -ql $_packages | grep -e ".*\/lib[^\/]\+\.so" | gawk '{ printf(nlines ? "'"$_ffmpeg_list_sep"'%s" : "%s", gensub(/.*lib([^\/]+)\.so/, "\\1", "g", $0)); nlines++ }'
}
print_info_ffmpeglink_ARCH() {
pacman -Ql $_packages | grep -e ".*\/lib[^\/]\+\.so$" | gawk '{ printf(nlines ? "'"$_ffmpeg_list_sep"'%s" : "%s", gensub(/.*lib([^\/]+)\.so/, "\\1", "g", $0)); nlines++ }'
}
print_info_ffmpeglink() {
# This func must only print a ';'-separated list of libs...
if [ -z "$DISTRO" ]; then
ERROR "Failed to detect distribution type"
exit 1
fi
# Create list of packages from which to get libs names...
_packages=""
if [ "$THEORA_USE" = true ]; then
_packages="$_packages $THEORA_DEV"
fi
if [ "$VORBIS_USE" = true ]; then
_packages="$_packages $VORBIS_DEV"
fi
if [ "$OGG_USE" = true ]; then
_packages="$_packages $OGG_DEV"
fi
if [ "$XVID_USE" = true ]; then
_packages="$_packages $XVID_DEV"
fi
if [ "$VPX_USE" = true ]; then
_packages="$_packages $VPX_DEV"
fi
if [ "$OPUS_USE" = true ]; then
_packages="$_packages $OPUS_DEV"
fi
if [ "$MP3LAME_USE" = true ]; then
_packages="$_packages $MP3LAME_DEV"
fi
if [ "$X264_USE" = true ]; then
_packages="$_packages $X264_DEV"
fi
if [ "$OPENJPEG_USE" = true ]; then
_packages="$_packages $OPENJPEG_DEV"
fi
if [ "$DISTRO" = "DEB" ]; then
print_info_ffmpeglink_DEB
elif [ "$DISTRO" = "RPM" ]; then
print_info_ffmpeglink_RPM
elif [ "$DISTRO" = "ARCH" ]; then
print_info_ffmpeglink_ARCH
# XXX TODO!
else
PRINT "<Could not determine additional link libraries needed for ffmpeg, replace this by valid list of libs...>"
fi
}
print_info() {
PRINT ""
PRINT ""
@@ -5731,7 +5801,7 @@ print_info() {
PRINT "If you're using CMake add this to your configuration flags:"
_buildargs="-U *SNDFILE* -U PYTHON* -U *BOOST* -U *Boost* -U *TBB*"
_buildargs="$_buildargs -U *OPENCOLORIO* -U *OPENEXR* -U *OPENIMAGEIO* -U *LLVM* -U *CLANG* -U *CYCLES*"
_buildargs="$_buildargs -U *OPENCOLORIO* -U *OPENEXR* -U *OPENIMAGEIO* -U *LLVM* -U *CYCLES*"
_buildargs="$_buildargs -U *OPENSUBDIV* -U *OPENVDB* -U *BLOSC* -U *COLLADA* -U *FFMPEG* -U *ALEMBIC* -U *USD*"
_buildargs="$_buildargs -U *EMBREE* -U *OPENIMAGEDENOISE* -U *OPENXR*"
@@ -5932,10 +6002,12 @@ print_info() {
if [ "$FFMPEG_SKIP" = false ]; then
_1="-D WITH_CODEC_FFMPEG=ON"
_2="-D FFMPEG_LIBRARIES='avformat;avcodec;avutil;avdevice;swscale;swresample;lzma;rt;`print_info_ffmpeglink`'"
PRINT " $_1"
_buildargs="$_buildargs $_1"
PRINT " $_2"
_buildargs="$_buildargs $_1 $_2"
if [ -d $INST/ffmpeg ]; then
_1="-D FFMPEG_ROOT_DIR=$INST/ffmpeg"
_1="-D FFMPEG=$INST/ffmpeg"
PRINT " $_1"
_buildargs="$_buildargs $_1"
fi

View File

@@ -197,38 +197,3 @@ index 67ec0d15f..6dc3e85a0 100644
#else
#error Unknown architecture.
#endif
diff --git a/pxr/base/arch/demangle.cpp b/pxr/base/arch/demangle.cpp
index 67ec0d15f..6dc3e85a0 100644
--- a/pxr/base/arch/demangle.cpp
+++ b/pxr/base/arch/demangle.cpp
@@ -36,6 +36,7 @@
#if (ARCH_COMPILER_GCC_MAJOR == 3 && ARCH_COMPILER_GCC_MINOR >= 1) || \
ARCH_COMPILER_GCC_MAJOR > 3 || defined(ARCH_COMPILER_CLANG)
#define _AT_LEAST_GCC_THREE_ONE_OR_CLANG
+#include <cxxabi.h>
#endif
PXR_NAMESPACE_OPEN_SCOPE
@@ -138,7 +139,6 @@
#endif
#if defined(_AT_LEAST_GCC_THREE_ONE_OR_CLANG)
-#include <cxxabi.h>
/*
* This routine doesn't work when you get to gcc3.4.
diff --git a/pxr/base/work/singularTask.h b/pxr/base/work/singularTask.h
index 67ec0d15f..6dc3e85a0 100644
--- a/pxr/base/work/singularTask.h
+++ b/pxr/base/work/singularTask.h
@@ -120,7 +120,7 @@
// case we go again to ensure the task can do whatever it
// was awakened to do. Once we successfully take the count
// to zero, we stop.
- size_t old = count;
+ std::size_t old = count;
do { _fn(); } while (
!count.compare_exchange_strong(old, 0));
});

View File

@@ -1,14 +1,21 @@
@echo off
if NOT "%1" == "" (
if "%1" == "2017" (
echo "Building for VS2017"
set VSVER=15.0
set VSVER_SHORT=15
set BuildDir=VS15
if "%1" == "2013" (
echo "Building for VS2013"
set VSVER=12.0
set VSVER_SHORT=12
set BuildDir=VS12
goto par2
)
if "%1" == "2019" (
echo "Building for VS2019"
if "%1" == "2015" (
echo "Building for VS2015"
set VSVER=14.0
set VSVER_SHORT=14
set BuildDir=VS14
goto par2
)
if "%1" == "2017" (
echo "Building for VS2017"
set VSVER=15.0
set VSVER_SHORT=15
set BuildDir=VS15
@@ -18,22 +25,40 @@ if NOT "%1" == "" (
)
:usage
Echo Usage build_deps 2017/2019 x64
Echo Usage build_deps 2013/2015/2017 x64/x86
goto exit
:par2
if NOT "%2" == "" (
if "%2" == "x86" (
echo "Building for x86"
set HARVESTROOT=Windows_vc
set ARCH=86
if "%1" == "2013" (
set CMAKE_BUILDER=Visual Studio 12 2013
)
if "%1" == "2015" (
set CMAKE_BUILDER=Visual Studio 14 2015
)
if "%1" == "2017" (
set CMAKE_BUILDER=Visual Studio 15 2017
)
goto start
)
if "%2" == "x64" (
echo "Building for x64"
set HARVESTROOT=Win64_vc
set ARCH=64
if "%1" == "2019" (
set CMAKE_BUILDER=Visual Studio 16 2019
set CMAKE_BUILD_ARCH=-A x64
if "%1" == "2013" (
set CMAKE_BUILDER=Visual Studio 12 2013 Win64
)
if "%1" == "2015" (
set CMAKE_BUILDER=Visual Studio 14 2015 Win64
)
if "%1" == "2017" (
set CMAKE_BUILDER=Visual Studio 15 2017 Win64
set CMAKE_BUILD_ARCH=
)
goto start
)
)
@@ -95,7 +120,7 @@ set path=%BUILD_DIR%\downloads\mingw\mingw64\msys\1.0\bin\;%BUILD_DIR%\downloads
mkdir %STAGING%\%BuildDir%%ARCH%R
cd %Staging%\%BuildDir%%ARCH%R
echo %DATE% %TIME% : Start > %StatusFile%
cmake -G "%CMAKE_BUILDER%" %CMAKE_BUILD_ARCH% -Thost=x64 %SOURCE_DIR% -DPACKAGE_DIR=%BUILD_DIR%/packages -DDOWNLOAD_DIR=%BUILD_DIR%/downloads -DBUILD_MODE=Release -DHARVEST_TARGET=%HARVEST_DIR%/%HARVESTROOT%%VSVER_SHORT%/
cmake -G "%CMAKE_BUILDER%" -Thost=x64 %SOURCE_DIR% -DPACKAGE_DIR=%BUILD_DIR%/packages -DDOWNLOAD_DIR=%BUILD_DIR%/downloads -DBUILD_MODE=Release -DHARVEST_TARGET=%HARVEST_DIR%/%HARVESTROOT%%VSVER_SHORT%/
echo %DATE% %TIME% : Release Configuration done >> %StatusFile%
if "%dobuild%" == "1" (
msbuild /m "ll.vcxproj" /p:Configuration=Release /fl /flp:logfile=BlenderDeps_llvm.log;Verbosity=normal
@@ -108,7 +133,7 @@ if "%NODEBUG%" == "1" goto exit
cd %BUILD_DIR%
mkdir %STAGING%\%BuildDir%%ARCH%D
cd %Staging%\%BuildDir%%ARCH%D
cmake -G "%CMAKE_BUILDER%" %CMAKE_BUILD_ARCH% -Thost=x64 %SOURCE_DIR% -DPACKAGE_DIR=%BUILD_DIR%/packages -DDOWNLOAD_DIR=%BUILD_DIR%/downloads -DCMAKE_BUILD_TYPE=Debug -DBUILD_MODE=Debug -DHARVEST_TARGET=%HARVEST_DIR%/%HARVESTROOT%%VSVER_SHORT%/ %CMAKE_DEBUG_OPTIONS%
cmake -G "%CMAKE_BUILDER%" -Thost=x64 %SOURCE_DIR% -DPACKAGE_DIR=%BUILD_DIR%/packages -DDOWNLOAD_DIR=%BUILD_DIR%/downloads -DCMAKE_BUILD_TYPE=Debug -DBUILD_MODE=Debug -DHARVEST_TARGET=%HARVEST_DIR%/%HARVESTROOT%%VSVER_SHORT%/ %CMAKE_DEBUG_OPTIONS%
echo %DATE% %TIME% : Debug Configuration done >> %StatusFile%
if "%dobuild%" == "1" (
msbuild /m "ll.vcxproj" /p:Configuration=Debug /fl /flp:logfile=BlenderDeps_llvm.log;;Verbosity=normal

View File

@@ -1,83 +0,0 @@
# - Find Brotli library (compression for freetype/woff2).
# This module defines
# BROTLI_INCLUDE_DIRS, where to find Brotli headers, Set when
# BROTLI_INCLUDE_DIR is found.
# BROTLI_LIBRARIES, libraries to link against to use Brotli.
# BROTLI_ROOT_DIR, The base directory to search for Brotli.
# This can also be an environment variable.
# BROTLI_FOUND, If false, do not try to use Brotli.
#
#=============================================================================
# Copyright 2022 Blender Foundation.
#
# Distributed under the OSI-approved BSD 3-Clause License,
# see accompanying file BSD-3-Clause-license.txt for details.
#=============================================================================
# If BROTLI_ROOT_DIR was defined in the environment, use it.
IF(NOT BROTLI_ROOT_DIR AND NOT $ENV{BROTLI_ROOT_DIR} STREQUAL "")
SET(BROTLI_ROOT_DIR $ENV{BROTLI_ROOT_DIR})
ENDIF()
SET(_BROTLI_SEARCH_DIRS
${BROTLI_ROOT_DIR}
)
FIND_PATH(BROTLI_INCLUDE_DIR
NAMES
brotli/decode.h
HINTS
${_BROTLI_SEARCH_DIRS}
PATH_SUFFIXES
include
DOC "Brotli header files"
)
FIND_LIBRARY(BROTLI_LIBRARY_COMMON
NAMES
# Some builds use a special `-static` postfix in their static libraries names.
brotlicommon-static
brotlicommon
HINTS
${_BROTLI_SEARCH_DIRS}
PATH_SUFFIXES
lib64 lib lib/static
DOC "Brotli static common library"
)
FIND_LIBRARY(BROTLI_LIBRARY_DEC
NAMES
# Some builds use a special `-static` postfix in their static libraries names.
brotlidec-static
brotlidec
HINTS
${_BROTLI_SEARCH_DIRS}
PATH_SUFFIXES
lib64 lib lib/static
DOC "Brotli static decode library"
)
IF(${BROTLI_LIBRARY_COMMON_NOTFOUND} or ${BROTLI_LIBRARY_DEC_NOTFOUND})
set(BROTLI_FOUND FALSE)
ELSE()
# handle the QUIETLY and REQUIRED arguments and set BROTLI_FOUND to TRUE if
# all listed variables are TRUE
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(Brotli DEFAULT_MSG BROTLI_LIBRARY_COMMON BROTLI_LIBRARY_DEC BROTLI_INCLUDE_DIR)
IF(BROTLI_FOUND)
get_filename_component(BROTLI_LIBRARY_DIR ${BROTLI_LIBRARY_COMMON} DIRECTORY)
SET(BROTLI_INCLUDE_DIRS ${BROTLI_INCLUDE_DIR})
SET(BROTLI_LIBRARIES ${BROTLI_LIBRARY_DEC} ${BROTLI_LIBRARY_COMMON})
ENDIF()
ENDIF()
MARK_AS_ADVANCED(
BROTLI_INCLUDE_DIR
BROTLI_LIBRARY_COMMON
BROTLI_LIBRARY_DEC
BROTLI_LIBRARY_DIR
)
UNSET(_BROTLI_SEARCH_DIRS)

View File

@@ -33,8 +33,6 @@ if(NOT FFMPEG_FIND_COMPONENTS)
avfilter
avformat
avutil
swscale
swresample
)
endif()
@@ -52,9 +50,9 @@ foreach(_component ${FFMPEG_FIND_COMPONENTS})
string(TOUPPER ${_component} _upper_COMPONENT)
find_library(FFMPEG_${_upper_COMPONENT}_LIBRARY
NAMES
${_component}
${_upper_COMPONENT}
HINTS
${_ffmpeg_SEARCH_DIRS}
${LIBDIR}/ffmpeg
PATH_SUFFIXES
lib64 lib
)

View File

@@ -21,7 +21,7 @@ ENDIF()
SET(_optix_SEARCH_DIRS
${OPTIX_ROOT_DIR}
"$ENV{PROGRAMDATA}/NVIDIA Corporation/OptiX SDK 7.3.0"
"$ENV{PROGRAMDATA}/NVIDIA Corporation/OptiX SDK 7.0.0"
)
FIND_PATH(OPTIX_INCLUDE_DIR

View File

@@ -34,7 +34,7 @@ IF(NOT PYTHON_ROOT_DIR AND NOT $ENV{PYTHON_ROOT_DIR} STREQUAL "")
SET(PYTHON_ROOT_DIR $ENV{PYTHON_ROOT_DIR})
ENDIF()
SET(PYTHON_VERSION 3.10 CACHE STRING "Python Version (major and minor only)")
SET(PYTHON_VERSION 3.9 CACHE STRING "Python Version (major and minor only)")
MARK_AS_ADVANCED(PYTHON_VERSION)

View File

@@ -114,7 +114,7 @@ def is_c_header(filename: str) -> bool:
def is_c(filename: str) -> bool:
ext = splitext(filename)[1]
return (ext in {".c", ".cpp", ".cxx", ".m", ".mm", ".rc", ".cc", ".inl", ".metal"})
return (ext in {".c", ".cpp", ".cxx", ".m", ".mm", ".rc", ".cc", ".inl"})
def is_c_any(filename: str) -> bool:

View File

@@ -19,6 +19,9 @@ set(WITH_CODEC_SNDFILE OFF CACHE BOOL "" FORCE)
set(WITH_COMPOSITOR OFF CACHE BOOL "" FORCE)
set(WITH_COREAUDIO OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_DEVICE_OPTIX OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_EMBREE OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL OFF CACHE BOOL "" FORCE)
set(WITH_DRACO OFF CACHE BOOL "" FORCE)
set(WITH_FFTW3 OFF CACHE BOOL "" FORCE)
set(WITH_FREESTYLE OFF CACHE BOOL "" FORCE)

View File

@@ -61,7 +61,6 @@ set(WITH_MEM_JEMALLOC ON CACHE BOOL "" FORCE)
# platform dependent options
if(APPLE)
set(WITH_COREAUDIO ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_DEVICE_METAL ON CACHE BOOL "" FORCE)
endif()
if(NOT WIN32)
set(WITH_JACK ON CACHE BOOL "" FORCE)

View File

@@ -488,6 +488,7 @@ function(blender_add_test_executable
include_directories(${includes})
include_directories(${includes_sys})
setup_libdirs()
BLENDER_SRC_GTEST_EX(
NAME ${name}
@@ -524,6 +525,83 @@ function(setup_heavy_lib_pool)
endif()
endfunction()
function(SETUP_LIBDIRS)
# NOTE: For all new libraries, use absolute library paths.
# This should eventually be phased out.
# APPLE platform uses full paths for linking libraries, and avoids link_directories.
if(NOT MSVC AND NOT APPLE)
link_directories(${JPEG_LIBPATH} ${PNG_LIBPATH} ${ZLIB_LIBPATH} ${FREETYPE_LIBPATH})
if(WITH_PYTHON) # AND NOT WITH_PYTHON_MODULE # WIN32 needs
link_directories(${PYTHON_LIBPATH})
endif()
if(WITH_SDL AND NOT WITH_SDL_DYNLOAD)
link_directories(${SDL_LIBPATH})
endif()
if(WITH_CODEC_FFMPEG)
link_directories(${FFMPEG_LIBPATH})
endif()
if(WITH_IMAGE_OPENEXR)
link_directories(${OPENEXR_LIBPATH})
endif()
if(WITH_IMAGE_TIFF)
link_directories(${TIFF_LIBPATH})
endif()
if(WITH_BOOST)
link_directories(${BOOST_LIBPATH})
endif()
if(WITH_OPENIMAGEIO)
link_directories(${OPENIMAGEIO_LIBPATH})
endif()
if(WITH_OPENIMAGEDENOISE)
link_directories(${OPENIMAGEDENOISE_LIBPATH})
endif()
if(WITH_OPENCOLORIO)
link_directories(${OPENCOLORIO_LIBPATH})
endif()
if(WITH_OPENVDB)
link_directories(${OPENVDB_LIBPATH})
endif()
if(WITH_OPENAL)
link_directories(${OPENAL_LIBPATH})
endif()
if(WITH_JACK AND NOT WITH_JACK_DYNLOAD)
link_directories(${JACK_LIBPATH})
endif()
if(WITH_PULSEAUDIO AND NOT WITH_PULSEAUDIO_DYNLOAD)
link_directories(${LIBPULSE_LIBPATH})
endif()
if(WITH_CODEC_SNDFILE)
link_directories(${LIBSNDFILE_LIBPATH})
endif()
if(WITH_FFTW3)
link_directories(${FFTW3_LIBPATH})
endif()
if(WITH_OPENCOLLADA)
link_directories(${OPENCOLLADA_LIBPATH})
# # Never set
# link_directories(${PCRE_LIBPATH})
# link_directories(${EXPAT_LIBPATH})
endif()
if(WITH_LLVM)
link_directories(${LLVM_LIBPATH})
endif()
if(WITH_ALEMBIC)
link_directories(${ALEMBIC_LIBPATH})
endif()
if(WITH_GMP)
link_directories(${GMP_LIBPATH})
endif()
if(WIN32 AND NOT UNIX)
link_directories(${PTHREADS_LIBPATH})
endif()
endif()
endfunction()
# Platform specific linker flags for targets.
function(setup_platform_linker_flags
target)
@@ -1197,20 +1275,43 @@ endfunction()
macro(openmp_delayload
projectname
)
if(MSVC)
if(WITH_OPENMP)
if(MSVC_CLANG)
set(OPENMP_DLL_NAME "libomp")
elseif(MSVC_VERSION EQUAL 1800)
set(OPENMP_DLL_NAME "vcomp120")
else()
set(OPENMP_DLL_NAME "vcomp140")
if(MSVC)
if(WITH_OPENMP)
if(MSVC_CLANG)
set(OPENMP_DLL_NAME "libomp")
elseif(MSVC_VERSION EQUAL 1800)
set(OPENMP_DLL_NAME "vcomp120")
else()
set(OPENMP_DLL_NAME "vcomp140")
endif()
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_RELEASE " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_DEBUG " /DELAYLOAD:${OPENMP_DLL_NAME}d.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_RELWITHDEBINFO " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_MINSIZEREL " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
endif()
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_RELEASE " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_DEBUG " /DELAYLOAD:${OPENMP_DLL_NAME}d.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_RELWITHDEBINFO " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
set_property(TARGET ${projectname} APPEND_STRING PROPERTY LINK_FLAGS_MINSIZEREL " /DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
endif()
endmacro()
macro(blender_precompile_headers target cpp header)
if(MSVC)
# get the name for the pch output file
get_filename_component(pchbase ${cpp} NAME_WE)
set(pchfinal "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${pchbase}.pch")
# mark the cpp as the one outputting the pch
set_property(SOURCE ${cpp} APPEND PROPERTY OBJECT_OUTPUTS "${pchfinal}")
# get all sources for the target
get_target_property(sources ${target} SOURCES)
# make all sources depend on the pch to enforce the build order
foreach(src ${sources})
set_property(SOURCE ${src} APPEND PROPERTY OBJECT_DEPENDS "${pchfinal}")
endforeach()
target_sources(${target} PRIVATE ${cpp} ${header})
set_target_properties(${target} PROPERTIES COMPILE_FLAGS "/Yu${header} /Fp${pchfinal} /FI${header}")
set_source_files_properties(${cpp} PROPERTIES COMPILE_FLAGS "/Yc${header} /Fp${pchfinal}")
endif()
endmacro()

View File

@@ -128,20 +128,25 @@ if(WITH_CODEC_SNDFILE)
endif()
if(WITH_PYTHON)
# Use precompiled libraries by default.
set(PYTHON_VERSION 3.10)
# we use precompiled libraries for py 3.9 and up by default
set(PYTHON_VERSION 3.9)
if(NOT WITH_PYTHON_MODULE AND NOT WITH_PYTHON_FRAMEWORK)
# Normally cached but not since we include them with blender.
# normally cached but not since we include them with blender
set(PYTHON_INCLUDE_DIR "${LIBDIR}/python/include/python${PYTHON_VERSION}")
set(PYTHON_EXECUTABLE "${LIBDIR}/python/bin/python${PYTHON_VERSION}")
set(PYTHON_LIBRARY ${LIBDIR}/python/lib/libpython${PYTHON_VERSION}.a)
set(PYTHON_LIBPATH "${LIBDIR}/python/lib/python${PYTHON_VERSION}")
# set(PYTHON_LINKFLAGS "-u _PyMac_Error") # won't build with this enabled
else()
# Module must be compiled against Python framework.
# module must be compiled against Python framework
set(_py_framework "/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}")
set(PYTHON_INCLUDE_DIR "${_py_framework}/include/python${PYTHON_VERSION}")
set(PYTHON_EXECUTABLE "${_py_framework}/bin/python${PYTHON_VERSION}")
set(PYTHON_LIBPATH "${_py_framework}/lib/python${PYTHON_VERSION}")
# set(PYTHON_LIBRARY python${PYTHON_VERSION})
# set(PYTHON_LINKFLAGS "-u _PyMac_Error -framework Python") # won't build with this enabled
unset(_py_framework)
endif()
@@ -161,18 +166,13 @@ if(WITH_FFTW3)
find_package(Fftw3)
endif()
# FreeType compiled with Brotli compression for woff2.
find_package(Freetype REQUIRED)
list(APPEND FREETYPE_LIBRARIES
${LIBDIR}/brotli/lib/libbrotlicommon-static.a
${LIBDIR}/brotli/lib/libbrotlidec-static.a)
if(WITH_IMAGE_OPENEXR)
find_package(OpenEXR)
endif()
if(WITH_CODEC_FFMPEG)
set(FFMPEG_ROOT_DIR ${LIBDIR}/ffmpeg)
set(FFMPEG_FIND_COMPONENTS
avcodec avdevice avformat avutil
mp3lame ogg opus swresample swscale
@@ -257,6 +257,9 @@ if(WITH_BOOST)
if(WITH_INTERNATIONAL)
list(APPEND _boost_FIND_COMPONENTS locale)
endif()
if(WITH_CYCLES_NETWORK)
list(APPEND _boost_FIND_COMPONENTS serialization)
endif()
if(WITH_OPENVDB)
list(APPEND _boost_FIND_COMPONENTS iostreams)
endif()
@@ -336,7 +339,7 @@ if(WITH_LLVM)
endif()
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(CYCLES_OSL ${LIBDIR}/osl)
find_library(OSL_LIB_EXEC NAMES oslexec PATHS ${CYCLES_OSL}/lib)
@@ -356,7 +359,7 @@ if(WITH_CYCLES AND WITH_CYCLES_OSL)
endif()
endif()
if(WITH_CYCLES AND WITH_CYCLES_EMBREE)
if(WITH_CYCLES_EMBREE)
find_package(Embree 3.8.0 REQUIRED)
# Increase stack size for Embree, only works for executables.
if(NOT WITH_PYTHON_MODULE)
@@ -479,11 +482,8 @@ string(APPEND PLATFORM_LINKFLAGS " -stdlib=libc++")
# Suppress ranlib "has no symbols" warnings (workaround for T48250)
set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
# llvm-ranlib doesn't support this flag. Xcode's libtool does.
if(NOT ${CMAKE_RANLIB} MATCHES ".*llvm-ranlib$")
set(CMAKE_C_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
set(CMAKE_CXX_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
endif()
set(CMAKE_C_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
set(CMAKE_CXX_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
if(WITH_COMPILER_CCACHE)
if(NOT CMAKE_GENERATOR STREQUAL "Xcode")
@@ -510,6 +510,3 @@ list(APPEND CMAKE_BUILD_RPATH "${OpenMP_LIBRARY_DIR}")
set(CMAKE_SKIP_INSTALL_RPATH FALSE)
list(APPEND CMAKE_INSTALL_RPATH "@loader_path/../Resources/${BLENDER_VERSION}/lib")
# Same as `CFBundleIdentifier` in Info.plist.
set(CMAKE_XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER "org.blenderfoundation.blender")

View File

@@ -96,7 +96,7 @@ else()
# Detect SDK version to use.
if(NOT DEFINED OSX_SYSTEM)
execute_process(
COMMAND xcrun --sdk macosx --show-sdk-version
COMMAND xcrun --show-sdk-version
OUTPUT_VARIABLE OSX_SYSTEM
OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()

View File

@@ -18,7 +18,7 @@
# All rights reserved.
# ***** END GPL LICENSE BLOCK *****
# Libraries configuration for any *nix system including Linux and Unix (excluding APPLE).
# Libraries configuration for any *nix system including Linux and Unix.
# Detect precompiled library directory
if(NOT DEFINED LIBDIR)
@@ -48,9 +48,6 @@ if(NOT DEFINED LIBDIR)
unset(LIBDIR_CENTOS7_ABI)
endif()
# Support restoring this value once pre-compiled libraries have been handled.
set(WITH_STATIC_LIBS_INIT ${WITH_STATIC_LIBS})
if(EXISTS ${LIBDIR})
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
@@ -103,22 +100,7 @@ find_package_wrapper(JPEG REQUIRED)
find_package_wrapper(PNG REQUIRED)
find_package_wrapper(ZLIB REQUIRED)
find_package_wrapper(Zstd REQUIRED)
if(NOT WITH_SYSTEM_FREETYPE)
# FreeType compiled with Brotli compression for woff2.
find_package_wrapper(Freetype REQUIRED)
if(EXISTS ${LIBDIR})
find_package_wrapper(Brotli REQUIRED)
# NOTE: This is done on WIN32 & APPLE but fails on some Linux systems.
# See: https://devtalk.blender.org/t/22536
# So `BROTLI_LIBRARIES` need to be added directly after `FREETYPE_LIBRARIES`.
#
# list(APPEND FREETYPE_LIBRARIES
# ${BROTLI_LIBRARIES}
# )
endif()
endif()
find_package_wrapper(Freetype REQUIRED)
if(WITH_PYTHON)
# No way to set py35, remove for now.
@@ -196,30 +178,26 @@ endif()
if(WITH_CODEC_FFMPEG)
if(EXISTS ${LIBDIR})
set(FFMPEG_ROOT_DIR ${LIBDIR}/ffmpeg)
# Override FFMPEG components to also include static library dependencies
# included with precompiled libraries, and to ensure correct link order.
set(FFMPEG_FIND_COMPONENTS
avformat avcodec avdevice avutil swresample swscale
sndfile
FLAC
mp3lame
opus
theora theoradec theoraenc
vorbis vorbisenc vorbisfile ogg
vpx
x264
xvidcore)
elseif(FFMPEG)
# Old cache variable used for root dir, convert to new standard.
set(FFMPEG_ROOT_DIR ${FFMPEG})
# For precompiled lib directory, all ffmpeg dependencies are in the same folder
file(GLOB ffmpeg_libs ${LIBDIR}/ffmpeg/lib/*.a ${LIBDIR}/sndfile/lib/*.a)
set(FFMPEG ${LIBDIR}/ffmpeg CACHE PATH "FFMPEG Directory")
set(FFMPEG_LIBRARIES ${ffmpeg_libs} ${ffmpeg_libs} CACHE STRING "FFMPEG Libraries")
else()
set(FFMPEG /usr CACHE PATH "FFMPEG Directory")
set(FFMPEG_LIBRARIES avformat avcodec avutil avdevice swscale CACHE STRING "FFMPEG Libraries")
endif()
find_package(FFmpeg)
if(NOT FFMPEG_FOUND)
set(WITH_CODEC_FFMPEG OFF)
message(STATUS "FFmpeg not found, disabling it")
mark_as_advanced(FFMPEG)
# lame, but until we have proper find module for ffmpeg
set(FFMPEG_INCLUDE_DIRS ${FFMPEG}/include)
if(EXISTS "${FFMPEG}/include/ffmpeg/")
list(APPEND FFMPEG_INCLUDE_DIRS "${FFMPEG}/include/ffmpeg")
endif()
# end lameness
mark_as_advanced(FFMPEG_LIBRARIES)
set(FFMPEG_LIBPATH ${FFMPEG}/lib)
endif()
if(WITH_FFTW3)
@@ -263,7 +241,7 @@ if(WITH_INPUT_NDOF)
endif()
endif()
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
if(EXISTS ${CYCLES_OSL} AND NOT OSL_ROOT)
set(OSL_ROOT ${CYCLES_OSL})
@@ -336,7 +314,7 @@ if(WITH_BOOST)
endif()
set(Boost_USE_MULTITHREADED ON)
set(__boost_packages filesystem regex thread date_time)
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
if(NOT (${OSL_LIBRARY_VERSION_MAJOR} EQUAL "1" AND ${OSL_LIBRARY_VERSION_MINOR} LESS "6"))
list(APPEND __boost_packages wave)
else()
@@ -345,6 +323,9 @@ if(WITH_BOOST)
if(WITH_INTERNATIONAL)
list(APPEND __boost_packages locale)
endif()
if(WITH_CYCLES_NETWORK)
list(APPEND __boost_packages serialization)
endif()
if(WITH_OPENVDB)
list(APPEND __boost_packages iostreams)
endif()
@@ -422,7 +403,7 @@ if(WITH_OPENCOLORIO)
endif()
endif()
if(WITH_CYCLES AND WITH_CYCLES_EMBREE)
if(WITH_CYCLES_EMBREE)
find_package(Embree 3.8.0 REQUIRED)
endif()
@@ -554,21 +535,6 @@ add_definitions(-D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE
#
# Keep last, so indirectly linked libraries don't override our own pre-compiled libs.
if(EXISTS ${LIBDIR})
# Clear the prefix path as it causes the `LIBDIR` to override system locations.
unset(CMAKE_PREFIX_PATH)
# Since the pre-compiled `LIBDIR` directories have been handled, don't prefer static libraries.
set(WITH_STATIC_LIBS ${WITH_STATIC_LIBS_INIT})
endif()
if(WITH_SYSTEM_FREETYPE)
find_package_wrapper(Freetype)
if(NOT FREETYPE_FOUND)
message(FATAL_ERROR "Failed finding system FreeType version!")
endif()
endif()
if(WITH_LZO AND WITH_SYSTEM_LZO)
find_package_wrapper(LZO)
if(NOT LZO_FOUND)
@@ -681,9 +647,6 @@ endif()
# ----------------------------------------------------------------------------
# Compilers
# Only set the linker once.
set(_IS_LINKER_DEFAULT ON)
# GNU Compiler
if(CMAKE_COMPILER_IS_GNUCC)
# ffp-contract=off:
@@ -702,89 +665,26 @@ if(CMAKE_COMPILER_IS_GNUCC)
string(PREPEND CMAKE_CXX_FLAGS_RELWITHDEBINFO "${GCC_EXTRA_FLAGS_RELEASE} ")
unset(GCC_EXTRA_FLAGS_RELEASE)
# NOTE(@campbellbarton): Eventually mold will be able to use `-fuse-ld=mold`,
# however at the moment this only works for GCC 12.1+ (unreleased at time of writing).
# So a workaround is used here "-B" which points to another path to find system commands
# such as `ld`.
if(WITH_LINKER_MOLD AND _IS_LINKER_DEFAULT)
find_program(MOLD_BIN "mold")
mark_as_advanced(MOLD_BIN)
if(NOT MOLD_BIN)
message(STATUS "The \"mold\" binary could not be found, using system linker.")
set(WITH_LINKER_MOLD OFF)
else()
# By default mold installs the binary to:
# - `{PREFIX}/bin/mold` as well as a symbolic-link in...
# - `{PREFIX}/lib/mold/ld`.
# (where `PREFIX` is typically `/usr/`).
#
# This block of code finds `{PREFIX}/lib/mold` from the `mold` binary.
# Other methods of searching for the path could also be made to work,
# we could even make our own directory and symbolic-link, however it's more
# convenient to use the one provided by mold.
#
# Use the binary path to "mold", to find the common prefix which contains "lib/mold".
# The parent directory: e.g. `/usr/bin/mold` -> `/usr/bin/`.
get_filename_component(MOLD_PREFIX "${MOLD_BIN}" DIRECTORY)
# The common prefix path: e.g. `/usr/bin/` -> `/usr/` to use as a hint.
get_filename_component(MOLD_PREFIX "${MOLD_PREFIX}" DIRECTORY)
# Find `{PREFIX}/lib/mold/ld`, store the directory component (without the `ld`).
# Then pass `-B {PREFIX}/lib/mold` to GCC so the `ld` located there overrides the default.
find_path(
MOLD_BIN_DIR "ld"
HINTS "${MOLD_PREFIX}"
# The default path is `libexec`, Arch Linux for e.g.
# replaces this with `lib` so check both.
PATH_SUFFIXES "libexec/mold" "lib/mold" "lib64/mold"
NO_DEFAULT_PATH
NO_CACHE
)
if(NOT MOLD_BIN_DIR)
message(STATUS
"The mold linker could not find the directory containing the linker command "
"(typically "
"\"${MOLD_PREFIX}/libexec/mold/ld\") or "
"\"${MOLD_PREFIX}/lib/mold/ld\") using system linker.")
set(WITH_LINKER_MOLD OFF)
endif()
unset(MOLD_PREFIX)
endif()
if(WITH_LINKER_MOLD)
# GCC will search for `ld` in this directory first.
string(APPEND CMAKE_EXE_LINKER_FLAGS " -B \"${MOLD_BIN_DIR}\"")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -B \"${MOLD_BIN_DIR}\"")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " -B \"${MOLD_BIN_DIR}\"")
set(_IS_LINKER_DEFAULT OFF)
endif()
unset(MOLD_BIN)
unset(MOLD_BIN_DIR)
endif()
if(WITH_LINKER_GOLD AND _IS_LINKER_DEFAULT)
if(WITH_LINKER_GOLD)
execute_process(
COMMAND ${CMAKE_C_COMPILER} -fuse-ld=gold -Wl,--version
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
if("${LD_VERSION}" MATCHES "GNU gold")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -fuse-ld=gold")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -fuse-ld=gold")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " -fuse-ld=gold")
set(_IS_LINKER_DEFAULT OFF)
string(APPEND CMAKE_C_FLAGS " -fuse-ld=gold")
string(APPEND CMAKE_CXX_FLAGS " -fuse-ld=gold")
else()
message(STATUS "GNU gold linker isn't available, using the default system linker.")
endif()
unset(LD_VERSION)
endif()
if(WITH_LINKER_LLD AND _IS_LINKER_DEFAULT)
if(WITH_LINKER_LLD)
execute_process(
COMMAND ${CMAKE_C_COMPILER} -fuse-ld=lld -Wl,--version
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
if("${LD_VERSION}" MATCHES "LLD")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -fuse-ld=lld")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -fuse-ld=lld")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " -fuse-ld=lld")
set(_IS_LINKER_DEFAULT OFF)
string(APPEND CMAKE_C_FLAGS " -fuse-ld=lld")
string(APPEND CMAKE_CXX_FLAGS " -fuse-ld=lld")
else()
message(STATUS "LLD linker isn't available, using the default system linker.")
endif()
@@ -794,28 +694,6 @@ if(CMAKE_COMPILER_IS_GNUCC)
# CLang is the same as GCC for now.
elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
set(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing")
if(WITH_LINKER_MOLD AND _IS_LINKER_DEFAULT)
find_program(MOLD_BIN "mold")
mark_as_advanced(MOLD_BIN)
if(NOT MOLD_BIN)
message(STATUS "The \"mold\" binary could not be found, using system linker.")
set(WITH_LINKER_MOLD OFF)
else()
if(CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0)
string(APPEND CMAKE_EXE_LINKER_FLAGS " --ld-path=\"${MOLD_BIN}\"")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " --ld-path=\"${MOLD_BIN}\"")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " --ld-path=\"${MOLD_BIN}\"")
else()
string(APPEND CMAKE_EXE_LINKER_FLAGS " -fuse-ld=\"${MOLD_BIN}\"")
string(APPEND CMAKE_SHARED_LINKER_FLAGS " -fuse-ld=\"${MOLD_BIN}\"")
string(APPEND CMAKE_MODULE_LINKER_FLAGS " -fuse-ld=\"${MOLD_BIN}\"")
endif()
set(_IS_LINKER_DEFAULT OFF)
endif()
unset(MOLD_BIN)
endif()
# Intel C++ Compiler
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
# think these next two are broken
@@ -839,8 +717,6 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
string(APPEND PLATFORM_LINKFLAGS " -static-intel")
endif()
unset(_IS_LINKER_DEFAULT)
# Avoid conflicts with Mesa llvmpipe, Luxrender, and other plug-ins that may
# use the same libraries as Blender with a different version or build options.
set(PLATFORM_LINKFLAGS

View File

@@ -55,10 +55,6 @@ if(CMAKE_C_COMPILER_ID MATCHES "Clang")
message(WARNING "stripped pdb not supported with clang, disabling..")
set(WITH_WINDOWS_STRIPPED_PDB OFF)
endif()
else()
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.28.29921) # MSVC 2019 16.9.16
message(FATAL_ERROR "Compiler is unsupported, MSVC 2019 16.9.16 or newer is required for building blender.")
endif()
endif()
if(NOT WITH_PYTHON_MODULE)
@@ -269,6 +265,12 @@ if(NOT DEFINED LIBDIR)
elseif(MSVC_VERSION GREATER 1919)
message(STATUS "Visual Studio 2019 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
elseif(MSVC_VERSION GREATER 1909)
message(STATUS "Visual Studio 2017 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
elseif(MSVC_VERSION EQUAL 1900)
message(STATUS "Visual Studio 2015 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
endif()
else()
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
@@ -345,11 +347,7 @@ set(FREETYPE_INCLUDE_DIRS
${LIBDIR}/freetype/include
${LIBDIR}/freetype/include/freetype2
)
set(FREETYPE_LIBRARIES
${LIBDIR}/freetype/lib/freetype2ST.lib
${LIBDIR}/brotli/lib/brotlidec-static.lib
${LIBDIR}/brotli/lib/brotlicommon-static.lib
)
set(FREETYPE_LIBRARY ${LIBDIR}/freetype/lib/freetype2ST.lib)
windows_find_package(freetype REQUIRED)
if(WITH_FFTW3)
@@ -463,7 +461,7 @@ if(WITH_JACK)
endif()
if(WITH_PYTHON)
set(PYTHON_VERSION 3.10) # CACHE STRING)
set(PYTHON_VERSION 3.9) # CACHE STRING)
string(REPLACE "." "" _PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION})
set(PYTHON_LIBRARY ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/libs/python${_PYTHON_VERSION_NO_DOTS}.lib)
@@ -479,7 +477,7 @@ if(WITH_PYTHON)
endif()
if(WITH_BOOST)
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(boost_extra_libs wave)
endif()
if(WITH_INTERNATIONAL)
@@ -522,7 +520,7 @@ if(WITH_BOOST)
debug ${BOOST_LIBPATH}/libboost_thread-${BOOST_DEBUG_POSTFIX}
debug ${BOOST_LIBPATH}/libboost_chrono-${BOOST_DEBUG_POSTFIX}
)
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(BOOST_LIBRARIES ${BOOST_LIBRARIES}
optimized ${BOOST_LIBPATH}/libboost_wave-${BOOST_POSTFIX}
debug ${BOOST_LIBPATH}/libboost_wave-${BOOST_DEBUG_POSTFIX})
@@ -710,7 +708,7 @@ if(WITH_CODEC_SNDFILE)
set(LIBSNDFILE_LIBRARIES ${LIBSNDFILE_LIBPATH}/libsndfile-1.lib)
endif()
if(WITH_CYCLES AND WITH_CYCLES_OSL)
if(WITH_CYCLES_OSL)
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
set(OSL_SHADER_DIR ${CYCLES_OSL}/shaders)
# Shaders have moved around a bit between OSL versions, check multiple locations
@@ -743,7 +741,7 @@ if(WITH_CYCLES AND WITH_CYCLES_OSL)
endif()
endif()
if(WITH_CYCLES AND WITH_CYCLES_EMBREE)
if(WITH_CYCLES_EMBREE)
windows_find_package(Embree)
if(NOT EMBREE_FOUND)
set(EMBREE_INCLUDE_DIRS ${LIBDIR}/embree/include)

View File

@@ -3,6 +3,9 @@ echo No explicit msvc version requested, autodetecting version.
call "%~dp0\detect_msvc2019.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete
call "%~dp0\detect_msvc2017.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete
call "%~dp0\detect_msvc2022.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete

View File

@@ -1,3 +1,4 @@
if "%BUILD_VS_YEAR%"=="2017" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2019" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2022" set BUILD_VS_LIBDIRPOST=vc15

View File

@@ -19,6 +19,12 @@ if "%WITH_PYDEBUG%"=="1" (
set PYDEBUG_CMAKE_ARGS=-DWINDOWS_PYTHON_DEBUG=On
)
if "%BUILD_VS_YEAR%"=="2017" (
set BUILD_GENERATOR_POST=%WINDOWS_ARCH%
) else (
set BUILD_PLATFORM_SELECT=-A %MSBUILD_PLATFORM%
)
set BUILD_CMAKE_ARGS=%BUILD_CMAKE_ARGS% -G "Visual Studio %BUILD_VS_VER% %BUILD_VS_YEAR%%BUILD_GENERATOR_POST%" %BUILD_PLATFORM_SELECT% %TESTS_CMAKE_ARGS% %CLANG_CMAKE_ARGS% %ASAN_CMAKE_ARGS% %PYDEBUG_CMAKE_ARGS%
if NOT EXIST %BUILD_DIR%\nul (

View File

@@ -37,9 +37,15 @@ set LLVM_DIR=
:DetectionComplete
set CC=%LLVM_DIR%\bin\clang-cl
set CXX=%LLVM_DIR%\bin\clang-cl
rem build and tested against 2019 16.2
set CFLAGS=-m64 -fmsc-version=1922
set CXXFLAGS=-m64 -fmsc-version=1922
if "%BUILD_VS_YEAR%" == "2019" (
rem build and tested against 2019 16.2
set CFLAGS=-m64 -fmsc-version=1922
set CXXFLAGS=-m64 -fmsc-version=1922
) else (
rem build and tested against 2017 15.7
set CFLAGS=-m64 -fmsc-version=1914
set CXXFLAGS=-m64 -fmsc-version=1914
)
)
if "%WITH_ASAN%"=="1" (

View File

@@ -0,0 +1,3 @@
set BUILD_VS_VER=15
set BUILD_VS_YEAR=2017
call "%~dp0\detect_msvc_vswhere.cmd"

View File

@@ -3,32 +3,7 @@ for %%X in (svn.exe) do (set SVN=%%~$PATH:X)
for %%X in (cmake.exe) do (set CMAKE=%%~$PATH:X)
for %%X in (ctest.exe) do (set CTEST=%%~$PATH:X)
for %%X in (git.exe) do (set GIT=%%~$PATH:X)
REM For python, default on 39 but if that does not exist also check
REM the 310,311 and 312 folders to see if those are there, it checks
REM this far ahead to ensure good lib folder compatiblity in the future.
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\39\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\310\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\311\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\312\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
if NOT EXIST %PYTHON% (
echo Warning: Python not found, there is likely an issue with the library folder
set PYTHON=""
)
:detect_python_done
if NOT "%verbose%" == "" (
echo svn : "%SVN%"
echo cmake : "%CMAKE%"
@@ -36,3 +11,7 @@ if NOT "%verbose%" == "" (
echo git : "%GIT%"
echo python : "%PYTHON%"
)
if "%CMAKE%" == "" (
echo Cmake not found in path, required for building, exiting...
exit /b 1
)

View File

@@ -9,11 +9,17 @@ exit /b 1
:detect_done
echo found clang-format in %CF_PATH%
if NOT EXIST %PYTHON% (
echo python not found, required for this operation
exit /b 1
if EXIST %PYTHON% (
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\39\bin\python.exe
goto detect_python_done
)
echo python not found in lib folder
exit /b 1
:detect_python_done
echo found python (%PYTHON%)
set FORMAT_PATHS=%BLENDER_DIR%\source\tools\utils_maintenance\clang_format_paths.py
REM The formatting script expects clang-format to be in the current PATH.

View File

@@ -1,8 +1,18 @@
if NOT EXIST %PYTHON% (
echo python not found, required for this operation
exit /b 1
if EXIST "%PYTHON%" (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\39\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
echo python not found at %PYTHON%
exit /b 1
:detect_python_done
echo found python (%PYTHON%)
call "%~dp0\find_inkscape.cmd"
if EXIST "%INKSCAPE_BIN%" (

View File

@@ -1,8 +1,18 @@
if NOT EXIST %PYTHON% (
echo python not found, required for this operation
exit /b 1
if EXIST %PYTHON% (
goto detect_python_done
)
set PYTHON=%BLENDER_DIR%\..\lib\win64_vc15\python\39\bin\python.exe
if EXIST %PYTHON% (
goto detect_python_done
)
echo python not found at %PYTHON%
exit /b 1
:detect_python_done
echo found python (%PYTHON%)
call "%~dp0\find_blender.cmd"
if EXIST "%BLENDER_BIN%" (

View File

@@ -50,6 +50,14 @@ if NOT "%1" == "" (
goto ERR
) else if "%1" == "x64" (
set BUILD_ARCH=x64
) else if "%1" == "2017" (
set BUILD_VS_YEAR=2017
) else if "%1" == "2017pre" (
set BUILD_VS_YEAR=2017
set VSWHERE_ARGS=-prerelease
) else if "%1" == "2017b" (
set BUILD_VS_YEAR=2017
set VSWHERE_ARGS=-products Microsoft.VisualStudio.Product.BuildTools
) else if "%1" == "2019" (
set BUILD_VS_YEAR=2019
) else if "%1" == "2019pre" (

View File

@@ -24,12 +24,12 @@ echo - nobuildinfo ^(disable buildinfo^)
echo - debug ^(Build an unoptimized debuggable build^)
echo - packagename [newname] ^(override default cpack package name^)
echo - builddir [newdir] ^(override default build folder^)
echo - 2017 ^(build with visual studio 2017^)
echo - 2017pre ^(build with visual studio 2017 pre-release^)
echo - 2017b ^(build with visual studio 2017 Build Tools^)
echo - 2019 ^(build with visual studio 2019^)
echo - 2019pre ^(build with visual studio 2019 pre-release^)
echo - 2019b ^(build with visual studio 2019 Build Tools^)
echo - 2022 ^(build with visual studio 2022^)
echo - 2022pre ^(build with visual studio 2022 pre-release^)
echo - 2022b ^(build with visual studio 2022 Build Tools^)
echo.
echo Documentation Targets ^(Not associated with building^)

View File

@@ -1,3 +1,4 @@
if "%BUILD_VS_YEAR%"=="2017" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2019" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2022" set BUILD_VS_LIBDIRPOST=vc15

View File

@@ -1,7 +1,10 @@
if NOT EXIST %PYTHON% (
echo python not found, required for this operation
exit /b 1
if EXIST %PYTHON% (
goto detect_python_done
)
echo python not found in lib folder
exit /b 1
:detect_python_done
REM Use -B to avoid writing __pycache__ in lib directory and causing update conflicts.

View File

@@ -38,7 +38,7 @@ PROJECT_NAME = Blender
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = V3.2
PROJECT_NUMBER = V3.1
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a

View File

@@ -6,87 +6,91 @@
* as part of the normal development process.
*/
/* TODO: other modules.
* - `libmv`
* - `cycles`
* - `opencolorio`
* - `opensubdiv`
* - `openvdb`
* - `quadriflow`
/** \defgroup MEM Guarded memory (de)allocation
* \ingroup intern
*/
/** \defgroup intern_atomic Atomic Operations
* \ingroup intern */
/** \defgroup clog C-Logging (CLOG)
* \ingroup intern
*/
/** \defgroup intern_clog C-Logging (CLOG)
* \ingroup intern */
/** \defgroup ctr container
* \ingroup intern
*/
/** \defgroup intern_eigen Eigen
* \ingroup intern */
/** \defgroup iksolver iksolver
* \ingroup intern
*/
/** \defgroup intern_glew-mx GLEW with Multiple Rendering Context's
* \ingroup intern */
/** \defgroup itasc itasc
* \ingroup intern
*/
/** \defgroup intern_iksolver Inverse Kinematics (Solver)
* \ingroup intern */
/** \defgroup memutil memutil
* \ingroup intern
*/
/** \defgroup intern_itasc Inverse Kinematics (ITASC)
* \ingroup intern */
/** \defgroup mikktspace mikktspace
* \ingroup intern
*/
/** \defgroup intern_libc_compat libc Compatibility For Linux
* \ingroup intern */
/** \defgroup moto moto
* \ingroup intern
*/
/** \defgroup intern_locale Locale
* \ingroup intern */
/** \defgroup eigen eigen
* \ingroup intern
*/
/** \defgroup intern_mantaflow Manta-Flow Fluid Simulation
* \ingroup intern */
/** \defgroup smoke smoke
* \ingroup intern
*/
/** \defgroup intern_mem Guarded Memory (de)allocation
* \ingroup intern */
/** \defgroup intern_memutil Memory Utilities (memutil)
* \ingroup intern */
/** \defgroup intern_mikktspace MikktSpace
* \ingroup intern */
/** \defgroup intern_rigidbody Rigid-Body C-API
* \ingroup intern */
/** \defgroup intern_sky_model Sky Model
* \ingroup intern */
/** \defgroup intern_utf_conv UTF-8/16 Conversion (utfconv)
* \ingroup intern */
/** \defgroup string string
* \ingroup intern
*/
/** \defgroup audaspace Audaspace
* \ingroup intern undoc
* \todo add to doxygen */
* \todo add to doxygen
*/
/** \defgroup audcoreaudio Audaspace CoreAudio
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audfx Audaspace FX
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audopenal Audaspace OpenAL
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audpulseaudio Audaspace PulseAudio
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audwasapi Audaspace WASAPI
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audpython Audaspace Python
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audsdl Audaspace SDL
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audsrc Audaspace SRC
* \ingroup audaspace */
*
* \ingroup audaspace
*/
/** \defgroup audffmpeg Audaspace FFMpeg
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audfftw Audaspace FFTW
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audjack Audaspace Jack
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup audsndfile Audaspace sndfile
* \ingroup audaspace */
* \ingroup audaspace
*/
/** \defgroup GHOST GHOST API
* \ingroup intern GUI

View File

@@ -5,8 +5,7 @@
/** \defgroup bmesh BMesh
* \ingroup blender
*/
/** \defgroup compositor Compositing
* \ingroup blender */
/** \defgroup compositor Compositing */
/** \defgroup python Python
* \ingroup blender
@@ -79,8 +78,7 @@
* \ingroup blender
*/
/** \defgroup data DNA, RNA and .blend access
* \ingroup blender */
/** \defgroup data DNA, RNA and .blend access*/
/** \defgroup gpu GPU
* \ingroup blender
@@ -103,12 +101,11 @@
* merged in docs.
*/
/**
* \defgroup gui GUI
* \ingroup blender */
/** \defgroup gui GUI */
/** \defgroup wm Window Manager
* \ingroup gui */
* \ingroup blender gui
*/
/* ================================ */
@@ -282,8 +279,7 @@
* \ingroup gui
*/
/** \defgroup externformats External Formats
* \ingroup blender */
/** \defgroup externformats External Formats */
/** \defgroup collada COLLADA
* \ingroup externformats
@@ -312,7 +308,4 @@
/* ================================ */
/** \defgroup undoc Undocumented
*
* \brief Modules and libraries that are still undocumented,
* or lacking proper integration into the doxygen system, are marked in this group.
*/
* \brief Modules and libraries that are still undocumented, or lacking proper integration into the doxygen system, are marked in this group. */

View File

@@ -61,7 +61,7 @@ def blender_extract_info(blender_bin: str) -> Dict[str, str]:
stdout=subprocess.PIPE,
).stdout.decode(encoding="utf-8")
blender_version_output = subprocess.run(
blender_version_ouput = subprocess.run(
[blender_bin, "--version"],
env=blender_env,
check=True,
@@ -73,7 +73,7 @@ def blender_extract_info(blender_bin: str) -> Dict[str, str]:
# check for each lines prefix to ensure these aren't included.
blender_version = ""
blender_date = ""
for l in blender_version_output.split("\n"):
for l in blender_version_ouput.split("\n"):
if l.startswith("Blender "):
# Remove 'Blender' prefix.
blender_version = l.split(" ", 1)[1].strip()

View File

@@ -8,42 +8,27 @@ def set_pose_matrices(obj, matrix_map):
"Assign pose space matrices of all bones at once, ignoring constraints."
def rec(pbone, parent_matrix):
if pbone.name in matrix_map:
matrix = matrix_map[pbone.name]
matrix = matrix_map[pbone.name]
## Instead of:
# pbone.matrix = matrix
# bpy.context.view_layer.update()
## Instead of:
# pbone.matrix = matrix
# bpy.context.view_layer.update()
# Compute and assign local matrix, using the new parent matrix
if pbone.parent:
pbone.matrix_basis = pbone.bone.convert_local_to_pose(
matrix,
pbone.bone.matrix_local,
parent_matrix=parent_matrix,
parent_matrix_local=pbone.parent.bone.matrix_local,
invert=True
)
else:
pbone.matrix_basis = pbone.bone.convert_local_to_pose(
matrix,
pbone.bone.matrix_local,
invert=True
)
# Compute and assign local matrix, using the new parent matrix
if pbone.parent:
pbone.matrix_basis = pbone.bone.convert_local_to_pose(
matrix,
pbone.bone.matrix_local,
parent_matrix=parent_matrix,
parent_matrix_local=pbone.parent.bone.matrix_local,
invert=True
)
else:
# Compute the updated pose matrix from local and new parent matrix
if pbone.parent:
matrix = pbone.bone.convert_local_to_pose(
pbone.matrix_basis,
pbone.bone.matrix_local,
parent_matrix=parent_matrix,
parent_matrix_local=pbone.parent.bone.matrix_local,
)
else:
matrix = pbone.bone.convert_local_to_pose(
pbone.matrix_basis,
pbone.bone.matrix_local,
)
pbone.matrix_basis = pbone.bone.convert_local_to_pose(
matrix,
pbone.bone.matrix_local,
invert=True
)
# Recursively process children, passing the new matrix through
for child in pbone.children:

View File

@@ -22,7 +22,7 @@ Data Access
===========
The most common case for using the reference API is to find out how to access data in the blend-file.
Before going any further it's best to be aware of ID data-blocks in Blender since you will often find properties
Before going any further its best to be aware of ID data-blocks in Blender since you will often find properties
relative to them.
@@ -55,9 +55,9 @@ Start by collecting the information where the data is located.
First find this setting in the interface ``Properties editor -> Object -> Transform -> Location``.
From the button context menu select *Online Python Reference*, this will link you to:
:class:`bpy.types.Object.location`.
Being an API reference, this link often gives little more information than the tooltip, though some of the pages
Being an API reference, this link often gives little more information then the tooltip, though some of the pages
include examples (normally at the top of the page).
But you now know that you have to use ``.location`` and that it's an array of three floats.
But you now know that you have to use ``.location`` and that its an array of three floats.
So the next step is to find out where to access objects, go down to the bottom of the page to the references section,
for objects there are many references, but one of the most common places to access objects is via the context.
@@ -154,7 +154,7 @@ The tooltip includes :class:`bpy.types.SubsurfModifier.levels` but you want the
Note that the text copied won't include the ``bpy.data.collection["name"].`` component since its assumed that
you won't be doing collection look-ups on every access and typically you'll want to use the context rather
than access each :class:`bpy.types.ID` instance by name.
then access each :class:`bpy.types.ID` instance by name.
Type in the ID path into a Python console :mod:`bpy.context.active_object`.
Include the trailing dot and don't execute the code, yet.
@@ -252,6 +252,6 @@ Each entry can be selected, then copied :kbd:`Ctrl-C`, usually to paste in the t
.. note::
Not all operators get registered for display,
zooming the view for example isn't so useful to repeat so it's excluded from the output.
zooming the view for example isn't so useful to repeat so its excluded from the output.
To display *every* operator that runs see :ref:`Show All Operators <info_show_all_operators>`.

View File

@@ -229,7 +229,7 @@ removing the last items first, which is faster (as explained above):
This example shows a fast way of removing items,
for use in cases where you can alter the list order without breaking the script's functionality.
for use in cases where you can alter the list order without breaking the scripts functionality.
This works by swapping two list items, so the item you remove is always last:
.. code-block:: python
@@ -278,7 +278,7 @@ Here are three ways of joining multiple strings into one string for writing.
This also applies to any area of your code that involves a lot of string joining:
String concatenation
This is the slowest option, do **not** use this if you can avoid it, especially when writing data in a loop.
This is the slowest option, do **not** use if you can avoid it, especially when writing data in a loop.
>>> file.write(str1 + " " + str2 + " " + str3 + "\n")
@@ -288,7 +288,7 @@ String formatting
>>> file.write("%s %s %s\n" % (str1, str2, str3))
String joining
Use this to join a list of strings (the list may be temporary). In the following example, the strings are joined with
Use to join a list of strings (the list may be temporary). In the following example, the strings are joined with
a space " " in between, other examples are "" or ", ".
>>> file.write(" ".join((str1, str2, str3, "\n")))

View File

@@ -12,7 +12,7 @@ that can be troublesome and avoid practices that are known to cause instability.
Using Operators
===============
Blender's operators are tools for users to access, that can be accessed with Python too which is very useful.
Blender's operators are tools for users to access, that can access with Python too which is very useful.
Still operators have limitations that can make them cumbersome to script.
The main limits are:
@@ -20,13 +20,13 @@ The main limits are:
- Can't pass data such as objects, meshes or materials to operate on (operators use the context instead).
- The return value from calling an operator is the success (if it finished or was canceled),
in some cases it would be more logical from an API perspective to return the result of the operation.
- Operators' poll function can fail where an API function would raise an exception giving details on exactly why.
- Operators poll function can fail where an API function would raise an exception giving details on exactly why.
Why does an operator's poll fail?
---------------------------------
When calling an operator it gives an error like this:
When calling an operator gives an error like this:
>>> bpy.ops.action.clean(threshold=0.001)
RuntimeError: Operator bpy.ops.action.clean.poll() failed, context is incorrect
@@ -49,9 +49,9 @@ you should be able to find the poll function with no knowledge of C.
.. note::
Blender does have the functionality for poll functions to describe why they fail,
but it's currently not used much, if you're interested to help improve the API
but its currently not used much, if you're interested to help improve the API
feel free to add calls to :class:`bpy.types.Operator.poll_message_set` (``CTX_wm_operator_poll_msg_set`` in C)
where it's not obvious why poll fails, e.g:
where its not obvious why poll fails, e.g:
>>> bpy.ops.gpencil.draw()
RuntimeError: Operator bpy.ops.gpencil.draw.poll() Failed to find Grease Pencil data to draw into
@@ -107,7 +107,7 @@ In this case you need to call :class:`bpy.types.ViewLayer.update` after modifyin
Now all dependent data (child objects, modifiers, drivers, etc.)
have been recalculated and are available to the script within the active view layer.
has been recalculated and is available to the script within active view layer.
Can I redraw during script execution?
@@ -116,13 +116,13 @@ Can I redraw during script execution?
The official answer to this is no, or... *"You don't want to do that"*.
To give some background on the topic:
While a script executes, Blender waits for it to finish and is effectively locked until it's done;
While a script executes Blender waits for it to finish and is effectively locked until its done,
while in this state Blender won't redraw or respond to user input.
Normally this is not such a problem because scripts distributed with Blender
tend not to run for an extended period of time,
nevertheless scripts *can* take a long time to complete and it would be nice to see progress in the viewport.
Tools that lock Blender in a loop redraw are highly discouraged
When tools lock Blender in a loop redraw are highly discouraged
since they conflict with Blender's ability to run multiple operators
at once and update different parts of the interface as the tool runs.
@@ -130,7 +130,7 @@ So the solution here is to write a **modal** operator, which is an operator that
See the modal operator template in the text editor.
Modal operators execute on user input or setup their own timers to run frequently,
they can handle the events or pass through to be handled by the keymap or other modal operators.
Examples of modal operators are Transform, Painting, Fly Navigation and File Select.
Examples of a modal operators are Transform, Painting, Fly Navigation and File Select.
Writing modal operators takes more effort than a simple ``for`` loop
that contains draw calls but is more flexible and integrates better with Blender's design.
@@ -240,7 +240,7 @@ Editing
Editing is where the three data types vary most.
- Polygons are very limited for editing,
changing materials and options like smooth works, but for anything else
changing materials and options like smooth works but for anything else
they are too inflexible and are only intended for storage.
- Tessfaces should not be used for editing geometry because doing so will cause existing n-gons to be tessellated.
- BMesh-faces are by far the best way to manipulate geometry.
@@ -256,7 +256,7 @@ the choice mostly depends on whether the target format supports n-gons or not.
- Tessfaces work well for exporting to formats which don't support n-gons,
in fact this is the only place where their use is encouraged.
- BMesh-Faces can work for exporting too but may not be necessary if polygons can be used
since using BMesh gives some overhead because it's not the native storage format in Object-Mode.
since using BMesh gives some overhead because its not the native storage format in Object-Mode.
Edit Bones, Pose Bones, Bone... Bones
@@ -348,7 +348,7 @@ Armature Mode Switching
While writing scripts that deal with armatures you may find you have to switch between modes,
when doing so take care when switching out of Edit-Mode not to keep references
to the edit bones or their head/tail vectors.
Further access to these will crash Blender so it's important that the script
Further access to these will crash Blender so its important the script
clearly separates sections of the code which operate in different modes.
This is mainly an issue with Edit-Mode since pose data can be manipulated without having to be in Pose-Mode,
@@ -386,11 +386,11 @@ Or with name assignment:
Data names may not match the assigned values if they exceed the maximum length, are already used or an empty string.
It's better practice not to reference objects by names at all,
Its better practice not to reference objects by names at all,
once created you can store the data in a list, dictionary, on a class, etc;
there is rarely a reason to have to keep searching for the same data by name.
If you do need to use name references, it's best to use a dictionary to maintain
If you do need to use name references, its best to use a dictionary to maintain
a mapping between the names of the imported assets and the newly created data,
this way you don't run this risk of referencing existing data from the blend-file, or worse modifying it.
@@ -414,11 +414,11 @@ Library Collisions
Blender keeps data names unique (:class:`bpy.types.ID.name`) so you can't name two objects,
meshes, scenes, etc., the same by accident.
However, when linking in library data from another blend-file naming collisions can occur,
so it's best to avoid referencing data by name at all.
so its best to avoid referencing data by name at all.
This can be tricky at times and not even Blender handles this correctly in some cases
This can be tricky at times and not even Blender handles this correctly in some case
(when selecting the modifier object for e.g. you can't select between multiple objects with the same name),
but it's still good to try avoiding these problems in this area.
but its still good to try avoiding these problems in this area.
If you need to select between local and library data, there is a feature in ``bpy.data`` members to allow for this.
.. code-block:: python
@@ -467,11 +467,11 @@ writing a script in ``latin1`` or ``iso-8859-15``.
See `PEP 263 <https://www.python.org/dev/peps/pep-0263/>`__.
However, this complicates matters for Blender's Python API because ``.blend`` files don't have an explicit encoding.
To avoid the problem for Python integration and script authors we have decided that all strings in blend-files
To avoid the problem for Python integration and script authors we have decided all strings in blend-files
**must** be ``UTF-8``, ``ASCII`` compatible.
This means assigning strings with different encodings to an object name, for instance, will raise an error.
This means assigning strings with different encodings to an object names for instance will raise an error.
Paths are an exception to this rule since the existence of non-UTF-8 paths on the user's file system cannot be ignored.
Paths are an exception to this rule since the existence of non-UTF-8 paths on user's file system cannot be ignored.
This means seemingly harmless expressions can raise errors, e.g:
>>> print(bpy.data.filepath)
@@ -505,7 +505,7 @@ to keep it short about encoding problems -- here are some suggestions:
.. note::
Sometimes it's preferable to avoid string encoding issues by using bytes instead of Python strings,
when reading some input it's less trouble to read it as binary data
when reading some input its less trouble to read it as binary data
though you will still need to decide how to treat any strings you want to use with Blender,
some importers do this.
@@ -679,7 +679,7 @@ Undo/Redo
---------
For safety, you should assume that undo and redo always invalidates all :class:`bpy.types.ID`
instances (Object, Scene, Mesh, Light, etc.), as well obviously as all of their sub-data.
instances (Object, Scene, Mesh, Light, etc.), as weel obviously as all of their sub-data.
This example shows how you can tell undo changes the memory locations:
@@ -716,7 +716,7 @@ Tools in Blender are not allowed to modify library data.
But Python does not enforce this restriction.
This can be useful in some cases, using a script to adjust material values for example.
But it's also possible to use a script to make library data point to newly created local data,
But its also possible to use a script to make library data point to newly created local data,
which is not supported since a call to undo will remove the local data
but leave the library referencing it and likely crash.
@@ -743,7 +743,7 @@ will re-allocate objects data,
any references to a meshes vertices/polygons/UVs, armatures bones,
curves points, etc. cannot be accessed after switching mode.
Only the reference to the data itself can be re-accessed, the following example will crash.
Only the reference to the data its self can be re-accessed, the following example will crash.
.. code-block:: python

View File

@@ -81,7 +81,7 @@ but reference an external file rather than including it directly.
Executing External Scripts
--------------------------
This is the equivalent to running the script directly, referencing a script's path from a two line code block.
This is the equivalent to running the script directly, referencing a scripts path from a two line code block.
.. code-block:: python
@@ -124,7 +124,7 @@ small script which is often useful for testing different settings quickly.
The other issue with this is the script has to be in Python's module search path.
While this is not best practice -- for testing purposes you can extend the search path,
this following example adds the current blend-file's directory to the search path
this following example adds the current blend-files directory to the search path
and then loads the script as a module.
.. code-block:: python
@@ -302,7 +302,7 @@ Python Safety (Build Option)
----------------------------
Since it's possible to access data which has been removed (see :doc:`Gotchas <info_gotcha>`),
it can be hard to track down the cause of crashes.
can make it hard to track down the cause of crashes.
To raise Python exceptions on accessing freed data (rather than crashing),
enable the CMake build option ``WITH_PYTHON_SAFETY``.
This enables data tracking which makes data access about two times slower

View File

@@ -417,8 +417,7 @@ MODULE_GROUPING = {
BLENDER_REVISION = str(bpy.app.build_hash, 'utf_8')
# '2.83.0 Beta' or '2.83.0' or '2.83.1'
BLENDER_VERSION_STRING = bpy.app.version_string
BLENDER_VERSION_DOTS = "%d.%d" % (bpy.app.version[0], bpy.app.version[1])
BLENDER_VERSION_DOTS = bpy.app.version_string
if BLENDER_REVISION != "Unknown":
# SHA1 Git hash
@@ -1104,7 +1103,6 @@ context_type_map = {
"selectable_objects": ("Object", True),
"selected_asset_files": ("FileSelectEntry", True),
"selected_bones": ("EditBone", True),
"selected_editable_actions": ("Action", True),
"selected_editable_bones": ("EditBone", True),
"selected_editable_fcurves": ("FCurve", True),
"selected_editable_keyframes": ("Keyframe", True),
@@ -1120,13 +1118,12 @@ context_type_map = {
"selected_pose_bones": ("PoseBone", True),
"selected_pose_bones_from_active_object": ("PoseBone", True),
"selected_sequences": ("Sequence", True),
"selected_visible_actions": ("Action", True),
"selected_visible_fcurves": ("FCurve", True),
"sequences": ("Sequence", True),
"soft_body": ("SoftBodyModifier", False),
"speaker": ("Speaker", False),
"texture": ("Texture", False),
"texture_slot": ("TextureSlot", False),
"texture_slot": ("MaterialTextureSlot", False),
"texture_user": ("ID", False),
"texture_user_property": ("Property", False),
"ui_list": ("UIList", False),
@@ -1725,11 +1722,11 @@ def write_sphinx_conf_py(basepath):
fw("import sys, os\n\n")
fw("extensions = ['sphinx.ext.intersphinx']\n\n")
fw("intersphinx_mapping = {'blender_manual': ('https://docs.blender.org/manual/en/dev/', None)}\n\n")
fw("project = 'Blender %s Python API'\n" % BLENDER_VERSION_STRING)
fw("project = 'Blender %s Python API'\n" % BLENDER_VERSION_DOTS)
fw("master_doc = 'index'\n")
fw("copyright = u'Blender Foundation'\n")
fw("version = '%s'\n" % BLENDER_VERSION_DOTS)
fw("release = '%s'\n" % BLENDER_VERSION_DOTS)
fw("version = '%s'\n" % BLENDER_VERSION_HASH)
fw("release = '%s'\n" % BLENDER_VERSION_HASH)
# Quiet file not in table-of-contents warnings.
fw("exclude_patterns = [\n")
@@ -1750,7 +1747,6 @@ except ModuleNotFoundError:
fw("if html_theme == 'sphinx_rtd_theme':\n")
fw(" html_theme_options = {\n")
fw(" 'display_version': False,\n")
# fw(" 'analytics_id': '',\n")
# fw(" 'collapse_navigation': True,\n")
fw(" 'sticky_navigation': False,\n")
@@ -1764,18 +1760,12 @@ except ModuleNotFoundError:
fw("html_show_sphinx = False\n")
fw("html_baseurl = 'https://docs.blender.org/api/current/'\n")
fw("html_use_opensearch = 'https://docs.blender.org/api/current'\n")
fw("html_show_search_summary = True\n")
fw("html_split_index = True\n")
fw("html_static_path = ['static']\n")
fw("templates_path = ['templates']\n")
fw("html_context = {'commit': '%s'}\n" % BLENDER_VERSION_HASH)
fw("html_extra_path = ['static/favicon.ico', 'static/blender_logo.svg']\n")
fw("html_favicon = 'static/favicon.ico'\n")
fw("html_logo = 'static/blender_logo.svg'\n")
fw("html_last_updated_fmt = '%m/%d/%Y'\n\n")
fw("if html_theme == 'sphinx_rtd_theme':\n")
fw(" html_css_files = ['css/version_switch.css']\n")
fw(" html_js_files = ['js/version_switch.js']\n")
# needed for latex, pdf gen
fw("latex_elements = {\n")
@@ -2132,9 +2122,6 @@ def copy_theme_assets(basepath):
shutil.copytree(os.path.join(SCRIPT_DIR, "static"),
os.path.join(basepath, "static"),
copy_function=shutil.copy)
shutil.copytree(os.path.join(SCRIPT_DIR, "templates"),
os.path.join(basepath, "templates"),
copy_function=shutil.copy)
def rna2sphinx(basepath):

View File

@@ -1,127 +0,0 @@
/* Override RTD theme */
.rst-versions {
border-top: 0px;
overflow: visible;
}
.version-btn.vdeact {
cursor: default;
color: dimgray;
}
.version-btn.vdeact::after {
content: "";
}
#versionwrap {
display: flex;
padding-top: 2px;
font-size: 90%;
justify-content: center;
flex-wrap: wrap;
}
.version-btn {
display: inline-block;
background-color: #272525;
width: 140px;
text-align: center;
padding: 3px 10px;
margin: 0px 5px 4px;
vertical-align: middle;
color: #27AE60;
border: solid 1px #444444;
border-radius: 3px;
cursor: pointer;
z-index: 400;
transition: border-color 0.4s;
}
.version-btn::after {
content:"\f0d8";
display: inline;
font: normal normal normal 16px/1 FontAwesome;
color: #8d8c8c;
vertical-align: top;
padding-left: 0.5em;
}
.version-btn-open::after {
color: gray;
}
.version-btn:hover, .version-btn:focus {
border-color: #525252;
}
.version-btn-open {
color: gray;
border: solid 1px gray;
}
.version-btn.wait {
cursor: wait;
}
.version-btn.disabled {
cursor: not-allowed;
color: dimgray;
}
.version-dialog {
display: none;
position: absolute;
bottom: 28px;
width: 140px;
margin: 0 5px;
padding-bottom: 4px;
background-color: #0003;
border-radius: 3px;
box-shadow: 0 0 6px #000C;
z-index: 999;
max-height: calc(100vh - 30px);
overflow-y: auto;
cursor: default;
}
.version-title {
padding: 5px;
color: black;
text-align: center;
font-size: 102%;
background-color: #27ae60;
border-bottom: solid 1.5px #444;
}
.version-list {
margin-bottom: 4px;
text-align: center;
background-color: #000C;
border: solid 1px gray;
border-radius: 0px 0px 3px 3px;
}
.version-list a, .version-list span, .version-list li {
position: relative;
display: block;
font-size: 98%;
line-height: 1.15;
width: 100%;
margin: 0;
padding: 4px 0px;
color: #404040;
}
.version-list li {
background-color: #ede9e9;
color: #404040;
padding: 1px;
}
.version-list li:hover, .version-list li a:focus {
background-color: #b9cfda;
}
.version-list li.selected, .version-list li.selected:hover {
background-color: #8d8c8c;
}
.version-list li.selected span {
cursor: default;
outline-color: red;
}
.version-arrow {
position: absolute;
width: 8px;
height: 8px;
left: 50%;
bottom: 4px;
margin-left: -4px;
transform: rotate(225deg);
background: #ede9e9;
border: 1px solid gray;
border-width: 1px 0 0 1px;
}

View File

@@ -1,323 +0,0 @@
(function() { // switch: v1.2
"use strict";
var versionsFileUrl = "https://docs.blender.org/PROD/versions.json"
var all_versions;
var Popover = function() {
function Popover(id)
{
this.isOpen = false;
this.type = (id === "version-popover");
this.$btn = $('#' + id);
this.$dialog = this.$btn.next();
this.$list = this.$dialog.children("ul");
this.sel = null;
this.beforeInit();
}
Popover.prototype = {
beforeInit : function() {
var that = this;
this.$btn.on("click", function(e) {
that.init();
e.preventDefault();
e.stopPropagation();
});
this.$btn.on("keydown", function(e) {
if (that.btnKeyFilter(e)) {
that.init();
e.preventDefault();
e.stopPropagation();
}
});
},
init : function() {
this.$btn.off("click");
this.$btn.off("keydown");
if (all_versions === undefined) {
this.$btn.addClass("wait");
this.loadVL(this);
}
else {
this.afterLoad();
}
},
loadVL : function(that) {
$.getJSON(versionsFileUrl, function(data) {
all_versions = data;
that.afterLoad();
return true;
}).fail(function() {
console.log("Version Switch Error: versions.json could not be loaded.");
that.$btn.addClass("disabled");
return false;
});
},
afterLoad : function() {
var release = DOCUMENTATION_OPTIONS.VERSION;
const m = release.match(/\d\.\d+/g);
if (m) {
release = m[0];
}
this.warnOld(release, all_versions);
var version = this.getNamed(release);
var list = this.buildList(version);
this.$list.children(":first-child").remove();
this.$list.append(list);
var that = this;
this.$list.on("keydown", function(e) {
that.keyMove(e);
});
this.$btn.removeClass("wait");
this.btnOpenHandler();
this.$btn.on("mousedown", function(e) {
that.btnOpenHandler();
e.preventDefault()
});
this.$btn.on("keydown", function(e) {
if (that.btnKeyFilter(e)) {
that.btnOpenHandler();
}
});
},
warnOld : function(release, all_versions) {
// Note this is effectively disabled now, two issues must fixed:
// * versions.js does not contain a current entry, because that leads to
// duplicate version numbers in the menu. These need to be deduplicated.
// * It only shows the warning after opening the menu to switch version
// when versions.js is loaded. This is too late to be useful.
var current = all_versions.current
if (!current)
{
// console.log("Version Switch Error: no 'current' in version.json.");
return;
}
const m = current.match(/\d\.\d+/g);
if (m) {
current = parseFloat(m[0]);
}
if (release < current) {
var currentURL = window.location.pathname.replace(release, current);
var warning = $('<div class="admonition warning"> ' +
'<p class="first admonition-title">Note</p> ' +
'<p class="last"> ' +
'You are not using the most up to date version of the documentation. ' +
'<a href="#"></a> is the newest version.' +
'</p>' +
'</div>');
warning.find('a').attr('href', currentURL).text(current);
var body = $("div.body");
if (!body.length) {
body = $("div.document");
}
body.prepend(warning);
}
},
buildList : function(v) {
var url = new URL(window.location.href);
let pathSplit = [ "", "api", v ];
if (url.pathname.startsWith("/api/")) {
pathSplit.push(url.pathname.split('/').slice(3).join('/'));
}
else {
pathSplit.push(url.pathname.substring(1));
}
if (this.type) {
var dyn = all_versions;
var cur = v;
}
var buf = [];
var that = this;
$.each(dyn, function(ix, title) {
buf.push("<li");
if (ix === cur) {
buf.push(
' class="selected" tabindex="-1" role="presentation"><span tabindex="-1" role="menuitem" aria-current="page">' +
title + '</spanp></li>');
}
else {
pathSplit[2 + that.type] = ix;
var href = new URL(url);
href.pathname = pathSplit.join('/');
buf.push(' tabindex="-1" role="presentation"><a href ="' + href + '" tabindex="-1">' +
title + '</a></li>');
}
});
return buf.join('');
},
getNamed : function(v) {
$.each(all_versions, function(ix, title) {
if (ix === "master" || ix === "latest") {
var m = title.match(/\d\.\d[\w\d\.]*/)[0];
if (parseFloat(m) == v) {
v = ix;
return false;
}
}
});
return v;
},
dialogToggle : function(speed) {
var wasClose = !this.isOpen;
var that = this;
if (!this.isOpen) {
this.$btn.addClass("version-btn-open");
this.$btn.attr("aria-pressed", true);
this.$dialog.attr("aria-hidden", false);
this.$dialog.fadeIn(speed, function() {
that.$btn.parent().on("focusout", function(e) {
that.focusoutHandler();
e.stopImmediatePropagation();
})
that.$btn.parent().on("mouseleave", function(e) {
that.mouseoutHandler();
e.stopImmediatePropagation();
});
});
this.isOpen = true;
}
else {
this.$btn.removeClass("version-btn-open");
this.$btn.attr("aria-pressed", false);
this.$dialog.attr("aria-hidden", true);
this.$btn.parent().off("focusout");
this.$btn.parent().off("mouseleave");
this.$dialog.fadeOut(speed, function() {
if (this.$sel) {
this.$sel.attr("tabindex", -1);
}
that.$btn.attr("tabindex", 0);
if (document.activeElement !== null && document.activeElement !== document &&
document.activeElement !== document.body) {
that.$btn.focus();
}
});
this.isOpen = false;
}
if (wasClose) {
if (this.$sel) {
this.$sel.attr("tabindex", -1);
}
if (document.activeElement !== null && document.activeElement !== document &&
document.activeElement !== document.body) {
var $nw = this.listEnter();
$nw.attr("tabindex", 0);
$nw.focus();
this.$sel = $nw;
}
}
},
btnOpenHandler : function() {
this.dialogToggle(300);
},
focusoutHandler : function() {
var list = this.$list;
var that = this;
setTimeout(function() {
if (list.find(":focus").length === 0) {
that.dialogToggle(200);
}
}, 200);
},
mouseoutHandler : function() {
this.dialogToggle(200);
},
btnKeyFilter : function(e) {
if (e.ctrlKey || e.shiftKey) {
return false;
}
if (e.key === " " || e.key === "Enter" || (e.key === "ArrowDown" && e.altKey) ||
e.key === "ArrowDown" || e.key === "ArrowUp") {
return true;
}
return false;
},
keyMove : function(e) {
if (e.ctrlKey || e.shiftKey) {
return true;
}
var p = true;
var $nw = $(e.target);
switch (e.key) {
case "ArrowUp":
$nw = this.listPrev($nw);
break;
case "ArrowDown":
$nw = this.listNext($nw);
break;
case "Home":
$nw = this.listFirst();
break;
case "End":
$nw = this.listLast();
break;
case "Escape":
$nw = this.listExit();
break;
case "ArrowLeft":
$nw = this.listExit();
break;
case "ArrowRight":
$nw = this.listExit();
break;
default:
p = false;
}
if (p) {
$nw.attr("tabindex", 0);
$nw.focus();
if (this.$sel) {
this.$sel.attr("tabindex", -1);
}
this.$sel = $nw;
e.preventDefault();
e.stopPropagation();
}
},
listPrev : function($nw) {
if ($nw.parent().prev().length !== 0) {
return $nw.parent().prev().children(":first-child");
}
else {
return this.listLast();
}
},
listNext : function($nw) {
if ($nw.parent().next().length !== 0) {
return $nw.parent().next().children(":first-child");
}
else {
return this.listFirst();
}
},
listFirst : function() {
return this.$list.children(":first-child").children(":first-child");
},
listLast : function() {
return this.$list.children(":last-child").children(":first-child");
},
listExit : function() {
this.mouseoutHandler();
return this.$btn;
},
listEnter : function() {
return this.$list.children(":first-child").children(":first-child");
}
};
return Popover
}();
$(document).ready(function() {
var lng_popover = new Popover("version-popover");
});
})();

View File

@@ -1,17 +0,0 @@
<div class="rst-versions" data-toggle="rst-versions" role="note" aria-label="document versions">
<ul id="versionwrap" role="presentation">
<li role="presentation">
<span id="version-popover" class="version-btn" tabindex="0" role="button" aria-label="versions selector" aria-haspopup="true" aria-controls="version-vsnlist" aria-disabled="true">
{{ release }}
</span>
<div class="version-dialog" aria-hidden="true">
<div class="version-arrow" aria-hidden="true"></div>
<div class="version-title">Versions</div>
<ul id="version-vsnlist" class="version-list" role="menu" aria-labelledby="version-popover" aria-hidden="true">
<li role="presentation">Loading...</li>
</ul>
</div>
</li>
</ul>
</div>

View File

@@ -113,6 +113,6 @@ if(WITH_MOD_FLUID)
add_subdirectory(mantaflow)
endif()
if(WITH_COMPOSITOR)
if (WITH_COMPOSITOR)
add_subdirectory(smaa_areatex)
endif()

View File

@@ -1092,12 +1092,12 @@ if(WITH_PYTHON)
configure_file(${PYTHON_SOURCE_DIRECTORY}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py ESCAPE_QUOTES @ONLY)
if(APPLE)
add_custom_command(OUTPUT build COMMAND MACOSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET} ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR} setup.py)
add_custom_command(OUTPUT build COMMAND MACOSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET} ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR})
elseif(WIN32)
set(ENV{VS100COMNTOOLS} $ENV{VS120COMNTOOLS})
add_custom_command(OUTPUT build COMMAND ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR} setup.py)
add_custom_command(OUTPUT build COMMAND ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR})
else()
add_custom_command(OUTPUT build COMMAND ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR} setup.py)
add_custom_command(OUTPUT build COMMAND ${PYTHON_EXECUTABLE} setup.py build DEPENDS ${PYTHON_SRC} ${PYTHON_HDR})
endif()
add_custom_target(pythonmodule ALL DEPENDS build SOURCES ${PYTHON_SOURCE_DIRECTORY}/setup.py.in ${PYTHON_SRC} ${PYTHON_HDR})
add_dependencies(pythonmodule audaspace)

View File

@@ -8,20 +8,20 @@ import numpy
from distutils.core import setup, Extension
if len(sys.argv) > 2 and sys.argv[1] == '--build-docs':
import subprocess
from distutils.core import Distribution
from distutils.command.build import build
import subprocess
from distutils.core import Distribution
from distutils.command.build import build
dist = Distribution()
cmd = build(dist)
cmd.finalize_options()
#print(cmd.build_platlib)
dist = Distribution()
cmd = build(dist)
cmd.finalize_options()
#print(cmd.build_platlib)
os.environ['PYTHONPATH'] = os.path.join(os.getcwd(), cmd.build_platlib)
os.environ['LD_LIBRARY_PATH'] = os.getcwd()
os.environ['PYTHONPATH'] = os.path.join(os.getcwd(), cmd.build_platlib)
os.environ['LD_LIBRARY_PATH'] = os.getcwd()
ret = subprocess.call(sys.argv[2:])
sys.exit(ret)
ret = subprocess.call(sys.argv[2:])
sys.exit(ret)
# the following line is not working due to https://bugs.python.org/issue9023
@@ -43,8 +43,7 @@ audaspace = Extension(
library_dirs = ['.', 'Release', 'Debug'],
language = 'c++',
extra_compile_args = extra_args,
define_macros = [('WITH_CONVOLUTION', None)] if '@WITH_FFTW@' == 'ON' else [],
sources = [os.path.join(source_directory, file) for file in ['PyAPI.cpp', 'PyDevice.cpp', 'PyHandle.cpp', 'PySound.cpp', 'PySequenceEntry.cpp', 'PySequence.cpp', 'PyPlaybackManager.cpp', 'PyDynamicMusic.cpp', 'PyThreadPool.cpp', 'PySource.cpp'] + (['PyImpulseResponse.cpp', 'PyHRTF.cpp'] if '@WITH_FFTW@' == 'ON' else [])]
sources = [os.path.join(source_directory, file) for file in ['PyAPI.cpp', 'PyDevice.cpp', 'PyHandle.cpp', 'PySound.cpp', 'PySequenceEntry.cpp', 'PySequence.cpp', 'PyPlaybackManager.cpp', 'PyDynamicMusic.cpp', 'PyThreadPool.cpp', 'PySource.cpp'] + (['PyImpulseResponse.cpp', 'PyHRTF.cpp'] if '@WITH_FFTW@' == 'ON' else [])]
)
setup(
@@ -57,6 +56,6 @@ setup(
license = 'Apache License 2.0',
long_description = codecs.open(os.path.join(source_directory, '../../README.md'), 'r', 'utf-8').read(),
ext_modules = [audaspace],
headers = [os.path.join(source_directory, file) for file in ['PyAPI.h', 'PyDevice.h', 'PyHandle.h', 'PySound.h', 'PySequenceEntry.h', 'PySequence.h', 'PyPlaybackManager.h', 'PyDynamicMusic.h', 'PyThreadPool.h', 'PySource.h'] + (['PyImpulseResponse.h', 'PyHRTF.h'] if '@WITH_FFTW@' == 'ON' else [])] + ['Audaspace.h']
headers = [os.path.join(source_directory, file) for file in ['PyAPI.h', 'PyDevice.h', 'PyHandle.h', 'PySound.h', 'PySequenceEntry.h', 'PySequence.h', 'PyPlaybackManager.h', 'PyDynamicMusic.h', 'PyThreadPool.h', 'PySource.h'] + (['PyImpulseResponse.h', 'PyHRTF.h'] if '@WITH_FFTW@' == 'ON' else [])] + ['Audaspace.h']
)

View File

@@ -95,13 +95,6 @@ void WASAPIDevice::runMixingThread()
sleep_duration = std::chrono::milliseconds(buffer_size * 1000 / int(m_specs.rate) / 2);
}
if(m_default_device_changed)
{
m_default_device_changed = false;
result = AUDCLNT_E_DEVICE_INVALIDATED;
goto stop_thread;
}
if(FAILED(result = m_audio_client->GetCurrentPadding(&padding)))
goto stop_thread;
@@ -303,78 +296,13 @@ bool WASAPIDevice::setupDevice(DeviceSpecs &specs)
return true;
}
ULONG WASAPIDevice::AddRef()
{
return InterlockedIncrement(&m_reference_count);
}
ULONG WASAPIDevice::Release()
{
ULONG reference_count = InterlockedDecrement(&m_reference_count);
if(0 == reference_count)
delete this;
return reference_count;
}
HRESULT WASAPIDevice::QueryInterface(REFIID riid, void **ppvObject)
{
if(riid == __uuidof(IMMNotificationClient))
{
*ppvObject = reinterpret_cast<IMMNotificationClient*>(this);
AddRef();
}
else if(riid == IID_IUnknown)
{
*ppvObject = reinterpret_cast<IUnknown*>(this);
AddRef();
}
else
{
*ppvObject = nullptr;
return E_NOINTERFACE;
}
return S_OK;
}
HRESULT WASAPIDevice::OnDeviceStateChanged(LPCWSTR pwstrDeviceId, DWORD dwNewState)
{
return S_OK;
}
HRESULT WASAPIDevice::OnDeviceAdded(LPCWSTR pwstrDeviceId)
{
return S_OK;
}
HRESULT WASAPIDevice::OnDeviceRemoved(LPCWSTR pwstrDeviceId)
{
return S_OK;
}
HRESULT WASAPIDevice::OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR pwstrDeviceId)
{
if(flow != EDataFlow::eCapture)
m_default_device_changed = true;
return S_OK;
}
HRESULT WASAPIDevice::OnPropertyValueChanged(LPCWSTR pwstrDeviceId, const PROPERTYKEY key)
{
return S_OK;
}
WASAPIDevice::WASAPIDevice(DeviceSpecs specs, int buffersize) :
m_buffersize(buffersize),
m_imm_device_enumerator(nullptr),
m_imm_device(nullptr),
m_audio_client(nullptr),
m_wave_format_extensible({}),
m_default_device_changed(false),
m_reference_count(1)
m_wave_format_extensible({})
{
// initialize COM if it hasn't happened yet
CoInitializeEx(nullptr, COINIT_MULTITHREADED);
@@ -399,8 +327,6 @@ WASAPIDevice::WASAPIDevice(DeviceSpecs specs, int buffersize) :
create();
m_imm_device_enumerator->RegisterEndpointNotificationCallback(this);
return;
error:
@@ -414,8 +340,6 @@ WASAPIDevice::~WASAPIDevice()
{
stopMixingThread();
m_imm_device_enumerator->UnregisterEndpointNotificationCallback(this);
SafeRelease(&m_audio_client);
SafeRelease(&m_imm_device);
SafeRelease(&m_imm_device_enumerator);

View File

@@ -40,7 +40,7 @@ AUD_NAMESPACE_BEGIN
/**
* This device plays back through WASAPI, the Windows audio API.
*/
class AUD_PLUGIN_API WASAPIDevice : IMMNotificationClient, public ThreadedDevice
class AUD_PLUGIN_API WASAPIDevice : public ThreadedDevice
{
private:
int m_buffersize;
@@ -48,8 +48,6 @@ private:
IMMDevice* m_imm_device;
IAudioClient* m_audio_client;
WAVEFORMATEXTENSIBLE m_wave_format_extensible;
bool m_default_device_changed;
LONG m_reference_count;
AUD_LOCAL HRESULT setupRenderClient(IAudioRenderClient*& render_client, UINT32& buffer_size);
@@ -60,17 +58,6 @@ private:
AUD_LOCAL bool setupDevice(DeviceSpecs& specs);
// IUnknown implementation
ULONG STDMETHODCALLTYPE AddRef();
ULONG STDMETHODCALLTYPE Release();
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppvObject);
// IMMNotificationClient implementation
HRESULT STDMETHODCALLTYPE OnDeviceStateChanged(LPCWSTR pwstrDeviceId, DWORD dwNewState);
HRESULT STDMETHODCALLTYPE OnDeviceAdded(LPCWSTR pwstrDeviceId);
HRESULT STDMETHODCALLTYPE OnDeviceRemoved(LPCWSTR pwstrDeviceId);
HRESULT STDMETHODCALLTYPE OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR pwstrDeviceId);
HRESULT STDMETHODCALLTYPE OnPropertyValueChanged(LPCWSTR pwstrDeviceId, const PROPERTYKEY key);
// delete copy constructor and operator=
WASAPIDevice(const WASAPIDevice&) = delete;
WASAPIDevice& operator=(const WASAPIDevice&) = delete;

12
extern/hipew/README vendored
View File

@@ -1,12 +0,0 @@
The HIP Extension Wrangler Library (HIPEW) is a cross-platform open-source
C/C++ library to dynamically load the HIP library.
HIP (Heterogeneous-Compute Interface for Portability) is an API for C++
programming on AMD GPUs.
It is maintained as part of the Blender project, but included in extern/
for consistency with CUEW and CLEW libraries.
LICENSE
HIPEW is released under the Apache 2.0 license.

View File

@@ -1,5 +0,0 @@
Project: Blender
URL: https://git.blender.org/blender.git
License: Apache 2.0
Upstream version: N/A
Local modifications: None

View File

@@ -219,17 +219,17 @@ static int hipewHasOldDriver(const char *hip_path) {
DWORD verHandle = 0;
DWORD verSize = GetFileVersionInfoSize(hip_path, &verHandle);
int old_driver = 0;
if (verSize != 0) {
if(verSize != 0) {
LPSTR verData = (LPSTR)malloc(verSize);
if (GetFileVersionInfo(hip_path, verHandle, verSize, verData)) {
if(GetFileVersionInfo(hip_path, verHandle, verSize, verData)) {
LPBYTE lpBuffer = NULL;
UINT size = 0;
if (VerQueryValue(verData, "\\", (VOID FAR * FAR *)&lpBuffer, &size)) {
if (size) {
if(VerQueryValue(verData, "\\", (VOID FAR * FAR *)&lpBuffer, &size)) {
if(size) {
VS_FIXEDFILEINFO *verInfo = (VS_FIXEDFILEINFO *)lpBuffer;
/* Magic value from
* https://docs.microsoft.com/en-us/windows/win32/api/verrsrc/ns-verrsrc-vs_fixedfileinfo */
if (verInfo->dwSignature == 0xfeef04bd) {
if(verInfo->dwSignature == 0xfeef04bd) {
unsigned int fileVersionLS0 = (verInfo->dwFileVersionLS >> 16) & 0xffff;
unsigned int fileversionLS1 = (verInfo->dwFileVersionLS >> 0) & 0xffff;
/* Corresponds to versions older than AMD Radeon Pro 21.Q4. */
@@ -257,7 +257,7 @@ static int hipewHipInit(void) {
#endif
static int initialized = 0;
static int result = 0;
int error;
int error, driver_version;
if (initialized) {
return result;
@@ -565,6 +565,8 @@ int hipewCompilerVersion(void) {
const char *path = hipewCompilerPath();
const char *marker = "Hip compilation tools, release ";
FILE *pipe;
int major, minor;
char *versionstr;
char buf[128];
char output[65536] = "\0";
char command[65536] = "\0";

View File

@@ -1,7 +1,7 @@
Project: NanoSVG
URL: https://github.com/memononen/nanosvg
License: zlib
Upstream version: 3cdd4a9d7886
Upstream version:
Local modifications: Added some functionality to manage grease pencil layers
Added a fix to SVG import arc and float errors (https://developer.blender.org/rB11dc674c78b49fc4e0b7c134c375b6c8b8eacbcc)

View File

@@ -25,6 +25,7 @@ add_subdirectory(ghost)
add_subdirectory(guardedalloc)
add_subdirectory(libmv)
add_subdirectory(memutil)
add_subdirectory(numaapi)
add_subdirectory(opencolorio)
add_subdirectory(opensubdiv)
add_subdirectory(mikktspace)

View File

@@ -45,7 +45,7 @@
*/
/** \file
* \ingroup intern_atomic
* \ingroup Atomic
*
* \brief Provides wrapper around system-specific atomic primitives,
* and some extensions (faked-atomic operations over float numbers).

View File

@@ -44,10 +44,6 @@
* The Original Code is: adapted from jemalloc.
*/
/** \file
* \ingroup intern_atomic
*/
#ifndef __ATOMIC_OPS_EXT_H__
#define __ATOMIC_OPS_EXT_H__

View File

@@ -5,7 +5,7 @@
* All rights reserved.
* Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
* Copyright (C) 2009-2013 Facebook, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice(s),
@@ -13,7 +13,7 @@
* 2. Redistributions in binary form must reproduce the above copyright notice(s),
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@@ -26,10 +26,6 @@
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \ingroup intern_atomic
*/
#ifndef __ATOMIC_OPS_MSVC_H__
#define __ATOMIC_OPS_MSVC_H__

View File

@@ -44,10 +44,6 @@
* The Original Code is: adapted from jemalloc.
*/
/** \file
* \ingroup intern_atomic
*/
#ifndef __ATOMIC_OPS_UNIX_H__
#define __ATOMIC_OPS_UNIX_H__

View File

@@ -44,10 +44,6 @@
* The Original Code is: adapted from jemalloc.
*/
/** \file
* \ingroup intern_atomic
*/
#ifndef __ATOMIC_OPS_UTILS_H__
#define __ATOMIC_OPS_UTILS_H__

View File

@@ -14,8 +14,11 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __CLG_LOG_H__
#define __CLG_LOG_H__
/** \file
* \ingroup intern_clog
* \ingroup clog
*
* C Logging Library (clog)
* ========================
@@ -65,9 +68,6 @@
* - 4+: May be used for more details than 3, should be avoided but not prevented.
*/
#ifndef __CLG_LOG_H__
#define __CLG_LOG_H__
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */

View File

@@ -15,7 +15,7 @@
*/
/** \file
* \ingroup intern_clog
* \ingroup clog
*/
#include <assert.h>
@@ -388,7 +388,7 @@ static void clg_ctx_fatal_action(CLogContext *ctx)
static void clg_ctx_backtrace(CLogContext *ctx)
{
/* NOTE: we avoid writing to 'FILE', for back-trace we make an exception,
/* Note: we avoid writing to 'FILE', for back-trace we make an exception,
* if necessary we could have a version of the callback that writes to file
* descriptor all at once. */
ctx->callbacks.backtrace_fn(ctx->output_file);

View File

@@ -51,6 +51,8 @@ list(APPEND LIBRARIES ${CYCLES_GL_LIBRARIES})
# Common configuration.
cycles_link_directories()
add_definitions(${GL_DEFINITIONS})
include_directories(${INC})

View File

@@ -82,7 +82,7 @@ static void session_print_status()
string status, substatus;
/* get status */
double progress = options.session->progress.get_progress();
float progress = options.session->progress.get_progress();
options.session->progress.get_status(status, substatus);
if (substatus != "")
@@ -183,7 +183,7 @@ static void display_info(Progress &progress)
progress.get_time(total_time, sample_time);
progress.get_status(status, substatus);
double progress_val = progress.get_progress();
float progress_val = progress.get_progress();
if (substatus != "")
status += ": " + substatus;

View File

@@ -40,7 +40,6 @@ set(SRC
object_cull.cpp
output_driver.cpp
particles.cpp
pointcloud.cpp
curves.cpp
logging.cpp
python.cpp
@@ -88,7 +87,6 @@ endif()
set(ADDON_FILES
addon/__init__.py
addon/camera.py
addon/engine.py
addon/operators.py
addon/osl.py
@@ -103,11 +101,6 @@ add_definitions(${GL_DEFINITIONS})
if(WITH_CYCLES_DEVICE_HIP)
add_definitions(-DWITH_HIP)
endif()
if(WITH_CYCLES_DEVICE_METAL)
add_definitions(-DWITH_METAL)
endif()
if(WITH_MOD_FLUID)
add_definitions(-DWITH_FLUID)
endif()

View File

@@ -1,84 +0,0 @@
#
# Copyright 2011-2021 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
# Fit to match default projective camera with focal_length 50 and sensor_width 36.
default_fisheye_polynomial = [-1.1735143712967577e-05,
-0.019988736953434998,
-3.3525322965709175e-06,
3.099275275886036e-06,
-2.6064646454854524e-08]
# Utilities to generate lens polynomials to match built-in camera types, only here
# for reference at the moment, not used by the code.
def create_grid(sensor_height, sensor_width):
import numpy as np
if sensor_height is None:
sensor_height = sensor_width / (16 / 9) # Default aspect ration 16:9
uu, vv = np.meshgrid(np.linspace(0, 1, 100), np.linspace(0, 1, 100))
uu = (uu - 0.5) * sensor_width
vv = (vv - 0.5) * sensor_height
rr = np.sqrt(uu ** 2 + vv ** 2)
return rr
def fisheye_lens_polynomial_from_projective(focal_length=50, sensor_width=36, sensor_height=None):
import numpy as np
rr = create_grid(sensor_height, sensor_width)
polynomial = np.polyfit(rr.flat, (-np.arctan(rr / focal_length)).flat, 4)
return list(reversed(polynomial))
def fisheye_lens_polynomial_from_projective_fov(fov, sensor_width=36, sensor_height=None):
import numpy as np
f = sensor_width / 2 / np.tan(fov / 2)
return fisheye_lens_polynomial_from_projective(f, sensor_width, sensor_height)
def fisheye_lens_polynomial_from_equisolid(lens=10.5, sensor_width=36, sensor_height=None):
import numpy as np
rr = create_grid(sensor_height, sensor_width)
x = rr.reshape(-1)
x = np.stack([x**i for i in [1, 2, 3, 4]])
y = (-2 * np.arcsin(rr / (2 * lens))).reshape(-1)
polynomial = np.linalg.lstsq(x.T, y.T, rcond=None)[0]
return [0] + list(polynomial)
def fisheye_lens_polynomial_from_equidistant(fov=180, sensor_width=36, sensor_height=None):
import numpy as np
return [0, -np.radians(fov) / sensor_width, 0, 0, 0]
def fisheye_lens_polynomial_from_distorted_projective_polynomial(k1, k2, k3, focal_length=50, sensor_width=36, sensor_height=None):
import numpy as np
rr = create_grid(sensor_height, sensor_width)
r2 = (rr / focal_length) ** 2
r4 = r2 * r2
r6 = r4 * r2
r_coeff = 1 + k1 * r2 + k2 * r4 + k3 * r6
polynomial = np.polyfit(rr.flat, (-np.arctan(rr / focal_length * r_coeff)).flat, 4)
return list(reversed(polynomial))
def fisheye_lens_polynomial_from_distorted_projective_divisions(k1, k2, focal_length=50, sensor_width=36, sensor_height=None):
import numpy as np
rr = create_grid(sensor_height, sensor_width)
r2 = (rr / focal_length) ** 2
r4 = r2 * r2
r_coeff = 1 + k1 * r2 + k2 * r4
polynomial = np.polyfit(rr.flat, (-np.arctan(rr / focal_length / r_coeff)).flat, 4)
return list(reversed(polynomial))

View File

@@ -28,7 +28,7 @@ def _configure_argument_parser():
action='store_true')
parser.add_argument("--cycles-device",
help="Set the device to use for Cycles, overriding user preferences and the scene setting."
"Valid options are 'CPU', 'CUDA', 'OPTIX', 'HIP' or 'METAL'."
"Valid options are 'CPU', 'CUDA', 'OPTIX', or 'HIP'"
"Additionally, you can append '+CPU' to any GPU type for hybrid rendering.",
default=None)
return parser
@@ -60,8 +60,9 @@ def init():
path = os.path.dirname(__file__)
user_path = os.path.dirname(os.path.abspath(bpy.utils.user_resource('CONFIG', path='')))
temp_path = bpy.app.tempdir
_cycles.init(path, user_path, bpy.app.background)
_cycles.init(path, user_path, temp_path, bpy.app.background)
_parse_command_line()

View File

@@ -33,7 +33,6 @@ from math import pi
# enums
from . import engine
from . import camera
enum_devices = (
('CPU', "CPU", "Use CPU for rendering"),
@@ -73,8 +72,6 @@ enum_panorama_types = (
('FISHEYE_EQUISOLID', "Fisheye Equisolid",
"Similar to most fisheye modern lens, takes sensor dimensions into consideration"),
('MIRRORBALL', "Mirror Ball", "Uses the mirror ball mapping"),
('FISHEYE_LENS_POLYNOMIAL', "Fisheye Lens Polynomial",
"Defines the lens projection as polynomial to allow real world camera lenses to be mimicked"),
)
enum_curve_shape = (
@@ -114,8 +111,7 @@ enum_device_type = (
('CPU', "CPU", "CPU", 0),
('CUDA', "CUDA", "CUDA", 1),
('OPTIX', "OptiX", "OptiX", 3),
('HIP', "HIP", "HIP", 4),
('METAL', "Metal", "Metal", 5)
("HIP", "HIP", "HIP", 4)
)
enum_texture_limit = (
@@ -433,7 +429,7 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
)
direct_light_sampling_type: EnumProperty(
name="Direct Light Sampling",
name="Direct Light Sampling Type",
description="The type of strategy used for sampling direct light contributions",
items=enum_direct_light_sampling_type,
default='MULTIPLE_IMPORTANCE_SAMPLING',
@@ -667,11 +663,6 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
description="Use special type BVH optimized for hair (uses more ram but renders faster)",
default=True,
)
debug_use_compact_bvh: BoolProperty(
name="Use Compact BVH",
description="Use compact BVH structure (uses less ram but renders slower)",
default=True,
)
debug_bvh_time_steps: IntProperty(
name="BVH Time Steps",
description="Split BVH primitives by this number of time steps to speed up render time in cost of memory",
@@ -799,7 +790,7 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
)
use_auto_tile: BoolProperty(
name="Use Tiling",
name="Using Tiling",
description="Render high resolution images in tiles to reduce memory usage, using the specified tile size. Tiles are cached to disk while rendering to save memory",
default=True,
)
@@ -807,7 +798,7 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
name="Tile Size",
default=2048,
description="",
min=8, max=8192,
min=8, max=16384,
)
# Various fine-tuning debug flags
@@ -899,32 +890,6 @@ class CyclesCameraSettings(bpy.types.PropertyGroup):
default=pi,
)
fisheye_polynomial_k0: FloatProperty(
name="Fisheye Polynomial K0",
description="Coefficient K0 of the lens polynomial",
default=camera.default_fisheye_polynomial[0], precision=6, step=0.1, subtype='ANGLE',
)
fisheye_polynomial_k1: FloatProperty(
name="Fisheye Polynomial K1",
description="Coefficient K1 of the lens polynomial",
default=camera.default_fisheye_polynomial[1], precision=6, step=0.1, subtype='ANGLE',
)
fisheye_polynomial_k2: FloatProperty(
name="Fisheye Polynomial K2",
description="Coefficient K2 of the lens polynomial",
default=camera.default_fisheye_polynomial[2], precision=6, step=0.1, subtype='ANGLE',
)
fisheye_polynomial_k3: FloatProperty(
name="Fisheye Polynomial K3",
description="Coefficient K3 of the lens polynomial",
default=camera.default_fisheye_polynomial[3], precision=6, step=0.1, subtype='ANGLE',
)
fisheye_polynomial_k4: FloatProperty(
name="Fisheye Polynomial K4",
description="Coefficient K4 of the lens polynomial",
default=camera.default_fisheye_polynomial[4], precision=6, step=0.1, subtype='ANGLE',
)
@classmethod
def register(cls):
bpy.types.Camera.cycles = PointerProperty(
@@ -1347,7 +1312,8 @@ class CyclesPreferences(bpy.types.AddonPreferences):
def get_device_types(self, context):
import _cycles
has_cuda, has_optix, has_hip, has_metal = _cycles.get_device_types()
has_cuda, has_optix, has_hip = _cycles.get_device_types()
list = [('NONE', "None", "Don't use compute device", 0)]
if has_cuda:
list.append(('CUDA', "CUDA", "Use CUDA for GPU acceleration", 1))
@@ -1355,8 +1321,6 @@ class CyclesPreferences(bpy.types.AddonPreferences):
list.append(('OPTIX', "OptiX", "Use OptiX for GPU acceleration", 3))
if has_hip:
list.append(('HIP', "HIP", "Use HIP for GPU acceleration", 4))
if has_metal:
list.append(('METAL', "Metal", "Use Metal for GPU acceleration", 5))
return list
@@ -1382,7 +1346,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
def update_device_entries(self, device_list):
for device in device_list:
if not device[1] in {'CUDA', 'OPTIX', 'CPU', 'HIP', 'METAL'}:
if not device[1] in {'CUDA', 'OPTIX', 'CPU', 'HIP'}:
continue
# Try to find existing Device entry
entry = self.find_existing_device_entry(device)
@@ -1426,7 +1390,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
import _cycles
# Ensure `self.devices` is not re-allocated when the second call to
# get_devices_for_type is made, freeing items from the first list.
for device_type in ('CUDA', 'OPTIX', 'HIP', 'METAL'):
for device_type in ('CUDA', 'OPTIX', 'HIP'):
self.update_device_entries(_cycles.available_devices(device_type))
# Deprecated: use refresh_devices instead.
@@ -1452,19 +1416,6 @@ class CyclesPreferences(bpy.types.AddonPreferences):
num += 1
return num
def has_multi_device(self):
import _cycles
compute_device_type = self.get_compute_device_type()
device_list = _cycles.available_devices(compute_device_type)
for device in device_list:
if device[1] == compute_device_type:
continue
for dev in self.devices:
if dev.use and dev.id == device[2]:
return True
return False
def has_active_device(self):
return self.get_num_gpu_devices() > 0
@@ -1491,8 +1442,6 @@ class CyclesPreferences(bpy.types.AddonPreferences):
col.label(text="Requires discrete AMD GPU with RDNA architecture", icon='BLANK1')
if sys.platform[:3] == "win":
col.label(text="and AMD Radeon Pro 21.Q4 driver or newer", icon='BLANK1')
elif device_type == 'METAL':
col.label(text="Requires Apple Silicon and macOS 12.0 or newer", icon='BLANK1')
return
for device in devices:

View File

@@ -97,11 +97,6 @@ def use_cpu(context):
return (get_device_type(context) == 'NONE' or cscene.device == 'CPU')
def use_metal(context):
cscene = context.scene.cycles
return (get_device_type(context) == 'METAL' and cscene.device == 'GPU')
def use_cuda(context):
cscene = context.scene.cycles
@@ -118,11 +113,11 @@ def use_optix(context):
return (get_device_type(context) == 'OPTIX' and cscene.device == 'GPU')
def use_multi_device(context):
def use_sample_all_lights(context):
cscene = context.scene.cycles
if cscene.device != 'GPU':
return False
return context.preferences.addons[__package__].preferences.has_multi_device()
return cscene.sample_all_lights_direct or cscene.sample_all_lights_indirect
def show_device_active(context):
@@ -667,10 +662,6 @@ class CYCLES_RENDER_PT_performance_acceleration_structure(CyclesButtonsPanel, Pa
bl_label = "Acceleration Structure"
bl_parent_id = "CYCLES_RENDER_PT_performance"
@classmethod
def poll(cls, context):
return not use_optix(context) or use_multi_device(context)
def draw(self, context):
import _cycles
@@ -683,33 +674,21 @@ class CYCLES_RENDER_PT_performance_acceleration_structure(CyclesButtonsPanel, Pa
col = layout.column()
use_embree = _cycles.with_embree
use_embree = False
if use_cpu(context):
col.prop(cscene, "debug_use_spatial_splits")
if use_embree:
col.prop(cscene, "debug_use_compact_bvh")
else:
sub = col.column()
sub.active = not cscene.debug_use_spatial_splits
sub.prop(cscene, "debug_bvh_time_steps")
col.prop(cscene, "debug_use_hair_bvh")
use_embree = _cycles.with_embree
if not use_embree:
sub = col.column(align=True)
sub.label(text="Cycles built without Embree support")
sub.label(text="CPU raytracing performance will be poor")
else:
col.prop(cscene, "debug_use_spatial_splits")
sub = col.column()
sub.active = not cscene.debug_use_spatial_splits
sub.prop(cscene, "debug_bvh_time_steps")
col.prop(cscene, "debug_use_hair_bvh")
# CPU is used in addition to a GPU
if use_multi_device(context) and use_embree:
col.prop(cscene, "debug_use_compact_bvh")
col.prop(cscene, "debug_use_spatial_splits")
sub = col.column()
sub.active = not use_embree
sub.prop(cscene, "debug_use_hair_bvh")
sub = col.column()
sub.active = not cscene.debug_use_spatial_splits and not use_embree
sub.prop(cscene, "debug_bvh_time_steps")
class CYCLES_RENDER_PT_performance_final_render(CyclesButtonsPanel, Panel):
@@ -1036,7 +1015,7 @@ class CYCLES_OBJECT_PT_motion_blur(CyclesButtonsPanel, Panel):
def poll(cls, context):
ob = context.object
if CyclesButtonsPanel.poll(context) and ob:
if ob.type in {'MESH', 'CURVE', 'CURVE', 'SURFACE', 'FONT', 'META', 'CAMERA', 'HAIR', 'POINTCLOUD'}:
if ob.type in {'MESH', 'CURVE', 'CURVE', 'SURFACE', 'FONT', 'META', 'CAMERA'}:
return True
if ob.instance_type == 'COLLECTION' and ob.instance_collection:
return True
@@ -1819,45 +1798,18 @@ class CYCLES_RENDER_PT_bake_output(CyclesButtonsPanel, Panel):
rd = scene.render
if rd.use_bake_multires:
layout.prop(rd, "bake_margin")
layout.prop(rd, "use_bake_clear", text="Clear Image")
if rd.bake_type == 'DISPLACEMENT':
layout.prop(rd, "use_bake_lores_mesh")
else:
layout.prop(cbk, "target")
if cbk.target == 'IMAGE_TEXTURES':
layout.prop(cbk, "margin")
layout.prop(cbk, "use_clear", text="Clear Image")
class CYCLES_RENDER_PT_bake_output_margin(CyclesButtonsPanel, Panel):
bl_label = "Margin"
bl_context = "render"
bl_parent_id = "CYCLES_RENDER_PT_bake_output"
COMPAT_ENGINES = {'CYCLES'}
@classmethod
def poll(cls, context):
scene = context.scene
cbk = scene.render.bake
return cbk.target == 'IMAGE_TEXTURES'
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
cscene = scene.cycles
cbk = scene.render.bake
rd = scene.render
if rd.use_bake_multires:
layout.prop(rd, "bake_margin_type", text="Type")
layout.prop(rd, "bake_margin", text="Size")
else:
if cbk.target == 'IMAGE_TEXTURES':
layout.prop(cbk, "margin_type", text="Type")
layout.prop(cbk, "margin", text="Size")
class CYCLES_RENDER_PT_debug(CyclesDebugButtonsPanel, Panel):
bl_label = "Debug"
@@ -1867,38 +1819,37 @@ class CYCLES_RENDER_PT_debug(CyclesDebugButtonsPanel, Panel):
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
cscene = scene.cycles
col = layout.column(heading="CPU")
col = layout.column()
col.label(text="CPU Flags:")
row = col.row(align=True)
row.prop(cscene, "debug_use_cpu_sse2", toggle=True)
row.prop(cscene, "debug_use_cpu_sse3", toggle=True)
row.prop(cscene, "debug_use_cpu_sse41", toggle=True)
row.prop(cscene, "debug_use_cpu_avx", toggle=True)
row.prop(cscene, "debug_use_cpu_avx2", toggle=True)
col.prop(cscene, "debug_bvh_layout", text="BVH")
col.prop(cscene, "debug_bvh_layout")
col.separator()
col = layout.column(heading="CUDA")
col = layout.column()
col.label(text="CUDA Flags:")
col.prop(cscene, "debug_use_cuda_adaptive_compile")
col = layout.column(heading="OptiX")
col.prop(cscene, "debug_use_optix_debug", text="Module Debug")
col.separator()
col.prop(cscene, "debug_bvh_type", text="Viewport BVH")
col = layout.column()
col.label(text="OptiX Flags:")
col.prop(cscene, "debug_use_optix_debug")
col.separator()
import _cycles
if _cycles.with_debug:
col.prop(cscene, "direct_light_sampling_type")
col = layout.column()
col.prop(cscene, "debug_bvh_type")
class CYCLES_RENDER_PT_simplify(CyclesButtonsPanel, Panel):
@@ -2226,7 +2177,6 @@ classes = (
CYCLES_RENDER_PT_bake_influence,
CYCLES_RENDER_PT_bake_selected_to_active,
CYCLES_RENDER_PT_bake_output,
CYCLES_RENDER_PT_bake_output_margin,
CYCLES_RENDER_PT_debug,
node_panel(CYCLES_MATERIAL_PT_settings),
node_panel(CYCLES_MATERIAL_PT_settings_surface),

View File

@@ -69,12 +69,6 @@ struct BlenderCamera {
float pole_merge_angle_from;
float pole_merge_angle_to;
float fisheye_polynomial_k0;
float fisheye_polynomial_k1;
float fisheye_polynomial_k2;
float fisheye_polynomial_k3;
float fisheye_polynomial_k4;
enum { AUTO, HORIZONTAL, VERTICAL } sensor_fit;
float sensor_width;
float sensor_height;
@@ -206,12 +200,6 @@ static void blender_camera_from_object(BlenderCamera *bcam,
bcam->longitude_min = RNA_float_get(&ccamera, "longitude_min");
bcam->longitude_max = RNA_float_get(&ccamera, "longitude_max");
bcam->fisheye_polynomial_k0 = RNA_float_get(&ccamera, "fisheye_polynomial_k0");
bcam->fisheye_polynomial_k1 = RNA_float_get(&ccamera, "fisheye_polynomial_k1");
bcam->fisheye_polynomial_k2 = RNA_float_get(&ccamera, "fisheye_polynomial_k2");
bcam->fisheye_polynomial_k3 = RNA_float_get(&ccamera, "fisheye_polynomial_k3");
bcam->fisheye_polynomial_k4 = RNA_float_get(&ccamera, "fisheye_polynomial_k4");
bcam->interocular_distance = b_camera.stereo().interocular_distance();
if (b_camera.stereo().convergence_mode() == BL::CameraStereoData::convergence_mode_PARALLEL) {
bcam->convergence_distance = FLT_MAX;
@@ -434,8 +422,7 @@ static void blender_camera_sync(Camera *cam,
cam->set_full_height(height);
/* panorama sensor */
if (bcam->type == CAMERA_PANORAMA && (bcam->panorama_type == PANORAMA_FISHEYE_EQUISOLID ||
bcam->panorama_type == PANORAMA_FISHEYE_LENS_POLYNOMIAL)) {
if (bcam->type == CAMERA_PANORAMA && bcam->panorama_type == PANORAMA_FISHEYE_EQUISOLID) {
float fit_xratio = (float)bcam->render_width * bcam->pixelaspect.x;
float fit_yratio = (float)bcam->render_height * bcam->pixelaspect.y;
bool horizontal_fit;
@@ -478,12 +465,6 @@ static void blender_camera_sync(Camera *cam,
cam->set_latitude_min(bcam->latitude_min);
cam->set_latitude_max(bcam->latitude_max);
cam->set_fisheye_polynomial_k0(bcam->fisheye_polynomial_k0);
cam->set_fisheye_polynomial_k1(bcam->fisheye_polynomial_k1);
cam->set_fisheye_polynomial_k2(bcam->fisheye_polynomial_k2);
cam->set_fisheye_polynomial_k3(bcam->fisheye_polynomial_k3);
cam->set_fisheye_polynomial_k4(bcam->fisheye_polynomial_k4);
cam->set_longitude_min(bcam->longitude_min);
cam->set_longitude_max(bcam->longitude_max);

View File

@@ -14,8 +14,6 @@
* limitations under the License.
*/
#include <optional>
#include "blender/sync.h"
#include "blender/util.h"
@@ -626,36 +624,15 @@ void BlenderSync::sync_particle_hair(
}
}
#ifdef WITH_NEW_CURVES_TYPE
static std::optional<BL::FloatAttribute> find_curves_radius_attribute(BL::Curves b_curves)
#ifdef WITH_HAIR_NODES
static float4 hair_point_as_float4(BL::HairPoint b_point)
{
for (BL::Attribute &b_attribute : b_curves.attributes) {
if (b_attribute.name() != "radius") {
continue;
}
if (b_attribute.domain() != BL::Attribute::domain_POINT) {
continue;
}
if (b_attribute.data_type() != BL::Attribute::data_type_FLOAT) {
continue;
}
return BL::FloatAttribute{b_attribute};
}
return std::nullopt;
}
static float4 hair_point_as_float4(BL::Curves b_curves,
std::optional<BL::FloatAttribute> b_attr_radius,
const int index)
{
float4 mP = float3_to_float4(get_float3(b_curves.position_data[index].vector()));
mP.w = b_attr_radius ? b_attr_radius->data[index].value() : 0.0f;
float4 mP = float3_to_float4(get_float3(b_point.co()));
mP.w = b_point.radius();
return mP;
}
static float4 interpolate_hair_points(BL::Curves b_curves,
std::optional<BL::FloatAttribute> b_attr_radius,
static float4 interpolate_hair_points(BL::Hair b_hair,
const int first_point_index,
const int num_points,
const float step)
@@ -664,12 +641,12 @@ static float4 interpolate_hair_points(BL::Curves b_curves,
const int point_a = clamp((int)curve_t, 0, num_points - 1);
const int point_b = min(point_a + 1, num_points - 1);
const float t = curve_t - (float)point_a;
return lerp(hair_point_as_float4(b_curves, b_attr_radius, first_point_index + point_a),
hair_point_as_float4(b_curves, b_attr_radius, first_point_index + point_b),
return lerp(hair_point_as_float4(b_hair.points[first_point_index + point_a]),
hair_point_as_float4(b_hair.points[first_point_index + point_b]),
t);
}
static void export_hair_curves(Scene *scene, Hair *hair, BL::Curves b_curves)
static void export_hair_curves(Scene *scene, Hair *hair, BL::Hair b_hair)
{
/* TODO: optimize so we can straight memcpy arrays from Blender? */
@@ -689,19 +666,17 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Curves b_curves)
}
/* Reserve memory. */
const int num_keys = b_curves.points.length();
const int num_curves = b_curves.curves.length();
const int num_keys = b_hair.points.length();
const int num_curves = b_hair.curves.length();
hair->reserve_curves(num_curves, num_keys);
std::optional<BL::FloatAttribute> b_attr_radius = find_curves_radius_attribute(b_curves);
/* Export curves and points. */
vector<float> points_length;
for (int i = 0; i < num_curves; i++) {
const int first_point_index = b_curves.curve_offset_data[i].value();
const int num_points = b_curves.curve_offset_data[i + 1].value() - first_point_index;
for (BL::HairCurve &b_curve : b_hair.curves) {
const int first_point_index = b_curve.first_point_index();
const int num_points = b_curve.num_points();
float3 prev_co = zero_float3();
float length = 0.0f;
@@ -712,9 +687,10 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Curves b_curves)
/* Position and radius. */
for (int i = 0; i < num_points; i++) {
const float3 co = get_float3(b_curves.position_data[first_point_index + i].vector());
const float radius = b_attr_radius ? b_attr_radius->data[first_point_index + i].value() :
0.0f;
BL::HairPoint b_point = b_hair.points[first_point_index + i];
const float3 co = get_float3(b_point.co());
const float radius = b_point.radius();
hair->add_curve_key(co, radius);
if (attr_intercept) {
@@ -739,7 +715,7 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Curves b_curves)
/* Random number per curve. */
if (attr_random != NULL) {
attr_random->add(hash_uint2_to_float(i, 0));
attr_random->add(hash_uint2_to_float(b_curve.index(), 0));
}
/* Curve. */
@@ -748,7 +724,7 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Curves b_curves)
}
}
static void export_hair_curves_motion(Hair *hair, BL::Curves b_curves, int motion_step)
static void export_hair_curves_motion(Hair *hair, BL::Hair b_hair, int motion_step)
{
/* Find or add attribute. */
Attribute *attr_mP = hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
@@ -761,17 +737,14 @@ static void export_hair_curves_motion(Hair *hair, BL::Curves b_curves, int motio
/* Export motion keys. */
const int num_keys = hair->get_curve_keys().size();
const int num_curves = b_curves.curves.length();
float4 *mP = attr_mP->data_float4() + motion_step * num_keys;
bool have_motion = false;
int num_motion_keys = 0;
int curve_index = 0;
std::optional<BL::FloatAttribute> b_attr_radius = find_curves_radius_attribute(b_curves);
for (int i = 0; i < num_curves; i++) {
const int first_point_index = b_curves.curve_offset_data[i].value();
const int num_points = b_curves.curve_offset_data[i + 1].value() - first_point_index;
for (BL::HairCurve &b_curve : b_hair.curves) {
const int first_point_index = b_curve.first_point_index();
const int num_points = b_curve.num_points();
Hair::Curve curve = hair->get_curve(curve_index);
curve_index++;
@@ -782,7 +755,7 @@ static void export_hair_curves_motion(Hair *hair, BL::Curves b_curves, int motio
int point_index = first_point_index + i;
if (point_index < num_keys) {
mP[num_motion_keys] = hair_point_as_float4(b_curves, b_attr_radius, point_index);
mP[num_motion_keys] = hair_point_as_float4(b_hair.points[point_index]);
num_motion_keys++;
if (!have_motion) {
@@ -801,8 +774,7 @@ static void export_hair_curves_motion(Hair *hair, BL::Curves b_curves, int motio
const float step_size = curve.num_keys > 1 ? 1.0f / (curve.num_keys - 1) : 0.0f;
for (int i = 0; i < curve.num_keys; i++) {
const float step = i * step_size;
mP[num_motion_keys] = interpolate_hair_points(
b_curves, b_attr_radius, first_point_index, num_points, step);
mP[num_motion_keys] = interpolate_hair_points(b_hair, first_point_index, num_points, step);
num_motion_keys++;
}
have_motion = true;
@@ -819,12 +791,12 @@ static void export_hair_curves_motion(Hair *hair, BL::Curves b_curves, int motio
void BlenderSync::sync_hair(Hair *hair, BObjectInfo &b_ob_info, bool motion, int motion_step)
{
/* Convert Blender hair to Cycles curves. */
BL::Curves b_curves(b_ob_info.object_data);
BL::Hair b_hair(b_ob_info.object_data);
if (motion) {
export_hair_curves_motion(hair, b_curves, motion_step);
export_hair_curves_motion(hair, b_hair, motion_step);
}
else {
export_hair_curves(scene, hair, b_curves);
export_hair_curves(scene, hair, b_hair);
}
}
#else
@@ -847,14 +819,11 @@ void BlenderSync::sync_hair(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, H
new_hair.set_used_shaders(used_shaders);
if (view_layer.use_hair) {
#ifdef WITH_NEW_CURVES_TYPE
if (b_ob_info.object_data.is_a(&RNA_Curves)) {
if (b_ob_info.object_data.is_a(&RNA_Hair)) {
/* Hair object. */
sync_hair(&new_hair, b_ob_info, false);
}
else
#endif
{
else {
/* Particle hair. */
bool need_undeformed = new_hair.need_attribute(scene, ATTR_STD_GENERATED);
BL::Mesh b_mesh = object_to_mesh(
@@ -901,15 +870,12 @@ void BlenderSync::sync_hair_motion(BL::Depsgraph b_depsgraph,
/* Export deformed coordinates. */
if (ccl::BKE_object_is_deform_modified(b_ob_info, b_scene, preview)) {
#ifdef WITH_NEW_CURVES_TYPE
if (b_ob_info.object_data.is_a(&RNA_Curves)) {
if (b_ob_info.object_data.is_a(&RNA_Hair)) {
/* Hair object. */
sync_hair(hair, b_ob_info, true, motion_step);
return;
}
else
#endif
{
else {
/* Particle hair. */
BL::Mesh b_mesh = object_to_mesh(
b_data, b_ob_info, b_depsgraph, false, Mesh::SUBDIVISION_NONE);

View File

@@ -27,7 +27,6 @@ enum ComputeDevice {
COMPUTE_DEVICE_CUDA = 1,
COMPUTE_DEVICE_OPTIX = 3,
COMPUTE_DEVICE_HIP = 4,
COMPUTE_DEVICE_METAL = 5,
COMPUTE_DEVICE_NUM
};
@@ -86,9 +85,6 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences, BL::Scene &b_scen
else if (compute_device == COMPUTE_DEVICE_HIP) {
mask |= DEVICE_MASK_HIP;
}
else if (compute_device == COMPUTE_DEVICE_METAL) {
mask |= DEVICE_MASK_METAL;
}
vector<DeviceInfo> devices = Device::available_devices(mask);
/* Match device preferences and available devices. */

View File

@@ -272,300 +272,12 @@ uint BlenderDisplaySpaceShader::get_shader_program()
return shader_program_;
}
/* --------------------------------------------------------------------
* DrawTile.
*/
/* Higher level representation of a texture from the graphics library. */
class GLTexture {
public:
/* Global counter for all allocated OpenGL textures used by instances of this class. */
static inline std::atomic<int> num_used = 0;
GLTexture() = default;
~GLTexture()
{
assert(gl_id == 0);
}
GLTexture(const GLTexture &other) = delete;
GLTexture &operator=(GLTexture &other) = delete;
GLTexture(GLTexture &&other) noexcept
: gl_id(other.gl_id), width(other.width), height(other.height)
{
other.reset();
}
GLTexture &operator=(GLTexture &&other)
{
if (this == &other) {
return *this;
}
gl_id = other.gl_id;
width = other.width;
height = other.height;
other.reset();
return *this;
}
bool gl_resources_ensure()
{
if (gl_id) {
return true;
}
/* Create texture. */
glGenTextures(1, &gl_id);
if (!gl_id) {
LOG(ERROR) << "Error creating texture.";
return false;
}
/* Configure the texture. */
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, gl_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
/* Clamp to edge so that precision issues when zoomed out (which forces linear interpolation)
* does not cause unwanted repetition. */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_2D, 0);
++num_used;
return true;
}
void gl_resources_destroy()
{
if (!gl_id) {
return;
}
glDeleteTextures(1, &gl_id);
reset();
--num_used;
}
/* OpenGL resource IDs of the texture.
*
* NOTE: Allocated on the render engine's context. */
uint gl_id = 0;
/* Dimensions of the texture in pixels. */
int width = 0;
int height = 0;
protected:
void reset()
{
gl_id = 0;
width = 0;
height = 0;
}
};
/* Higher level representation of a Pixel Buffer Object (PBO) from the graphics library. */
class GLPixelBufferObject {
public:
/* Global counter for all allocated OpenGL PBOs used by instances of this class. */
static inline std::atomic<int> num_used = 0;
GLPixelBufferObject() = default;
~GLPixelBufferObject()
{
assert(gl_id == 0);
}
GLPixelBufferObject(const GLPixelBufferObject &other) = delete;
GLPixelBufferObject &operator=(GLPixelBufferObject &other) = delete;
GLPixelBufferObject(GLPixelBufferObject &&other) noexcept
: gl_id(other.gl_id), width(other.width), height(other.height)
{
other.reset();
}
GLPixelBufferObject &operator=(GLPixelBufferObject &&other)
{
if (this == &other) {
return *this;
}
gl_id = other.gl_id;
width = other.width;
height = other.height;
other.reset();
return *this;
}
bool gl_resources_ensure()
{
if (gl_id) {
return true;
}
glGenBuffers(1, &gl_id);
if (!gl_id) {
LOG(ERROR) << "Error creating texture pixel buffer object.";
return false;
}
++num_used;
return true;
}
void gl_resources_destroy()
{
if (!gl_id) {
return;
}
glDeleteBuffers(1, &gl_id);
reset();
--num_used;
}
/* OpenGL resource IDs of the PBO.
*
* NOTE: Allocated on the render engine's context. */
uint gl_id = 0;
/* Dimensions of the PBO. */
int width = 0;
int height = 0;
protected:
void reset()
{
gl_id = 0;
width = 0;
height = 0;
}
};
class DrawTile {
public:
DrawTile() = default;
~DrawTile() = default;
DrawTile(const DrawTile &other) = delete;
DrawTile &operator=(const DrawTile &other) = delete;
DrawTile(DrawTile &&other) noexcept = default;
DrawTile &operator=(DrawTile &&other) = default;
bool gl_resources_ensure()
{
if (!texture.gl_resources_ensure()) {
gl_resources_destroy();
return false;
}
if (!gl_vertex_buffer) {
glGenBuffers(1, &gl_vertex_buffer);
if (!gl_vertex_buffer) {
LOG(ERROR) << "Error allocating tile VBO.";
gl_resources_destroy();
return false;
}
}
return true;
}
void gl_resources_destroy()
{
texture.gl_resources_destroy();
if (gl_vertex_buffer) {
glDeleteBuffers(1, &gl_vertex_buffer);
gl_vertex_buffer = 0;
}
}
inline bool ready_to_draw() const
{
return texture.gl_id != 0;
}
/* Texture which contains pixels of the tile. */
GLTexture texture;
/* Display parameters the texture of this tile has been updated for. */
BlenderDisplayDriver::Params params;
/* OpenGL resources needed for drawing. */
uint gl_vertex_buffer = 0;
};
class DrawTileAndPBO {
public:
bool gl_resources_ensure()
{
if (!tile.gl_resources_ensure() || !buffer_object.gl_resources_ensure()) {
gl_resources_destroy();
return false;
}
return true;
}
void gl_resources_destroy()
{
tile.gl_resources_destroy();
buffer_object.gl_resources_destroy();
}
DrawTile tile;
GLPixelBufferObject buffer_object;
};
/* --------------------------------------------------------------------
* BlenderDisplayDriver.
*/
struct BlenderDisplayDriver::Tiles {
/* Resources of a tile which is being currently rendered. */
DrawTileAndPBO current_tile;
/* All tiles which rendering is finished and which content will not be changed. */
struct {
vector<DrawTile> tiles;
void gl_resources_destroy_and_clear()
{
for (DrawTile &tile : tiles) {
tile.gl_resources_destroy();
}
tiles.clear();
}
} finished_tiles;
};
BlenderDisplayDriver::BlenderDisplayDriver(BL::RenderEngine &b_engine, BL::Scene &b_scene)
: b_engine_(b_engine),
display_shader_(BlenderDisplayShader::create(b_engine, b_scene)),
tiles_(make_unique<Tiles>())
: b_engine_(b_engine), display_shader_(BlenderDisplayShader::create(b_engine, b_scene))
{
/* Create context while on the main thread. */
gl_context_create();
@@ -580,21 +292,6 @@ BlenderDisplayDriver::~BlenderDisplayDriver()
* Update procedure.
*/
void BlenderDisplayDriver::next_tile_begin()
{
if (!tiles_->current_tile.tile.ready_to_draw()) {
LOG(ERROR)
<< "Unexpectedly moving to the next tile without any data provided for current tile.";
return;
}
/* Moving to the next tile without giving render data for the current tile is not an expected
* situation. */
DCHECK(!need_clear_);
tiles_->finished_tiles.tiles.emplace_back(std::move(tiles_->current_tile.tile));
}
bool BlenderDisplayDriver::update_begin(const Params &params,
int texture_width,
int texture_height)
@@ -615,33 +312,24 @@ bool BlenderDisplayDriver::update_begin(const Params &params,
glWaitSync((GLsync)gl_render_sync_, 0, GL_TIMEOUT_IGNORED);
}
DrawTile &current_tile = tiles_->current_tile.tile;
GLPixelBufferObject &current_tile_buffer_object = tiles_->current_tile.buffer_object;
/* Clear storage of all finished tiles when display clear is requested.
* Do it when new tile data is provided to handle the display clear flag in a single place.
* It also makes the logic reliable from the whether drawing did happen or not point of view. */
if (need_clear_) {
tiles_->finished_tiles.gl_resources_destroy_and_clear();
need_clear_ = false;
}
if (!tiles_->current_tile.gl_resources_ensure()) {
tiles_->current_tile.gl_resources_destroy();
if (!gl_texture_resources_ensure()) {
gl_context_disable();
return false;
}
/* Update texture dimensions if needed. */
if (current_tile.texture.width != texture_width ||
current_tile.texture.height != texture_height) {
if (texture_.width != texture_width || texture_.height != texture_height) {
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, current_tile.texture.gl_id);
glBindTexture(GL_TEXTURE_2D, texture_.gl_id);
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA16F, texture_width, texture_height, 0, GL_RGBA, GL_HALF_FLOAT, 0);
current_tile.texture.width = texture_width;
current_tile.texture.height = texture_height;
texture_.width = texture_width;
texture_.height = texture_height;
glBindTexture(GL_TEXTURE_2D, 0);
/* Texture did change, and no pixel storage was provided. Tag for an explicit zeroing out to
* avoid undefined content. */
texture_.need_clear = true;
}
/* Update PBO dimensions if needed.
@@ -653,58 +341,29 @@ bool BlenderDisplayDriver::update_begin(const Params &params,
* sending too much data to GPU when resolution divider is not 1. */
/* TODO(sergey): Investigate whether keeping the PBO exact size of the texture makes non-interop
* mode faster. */
const int buffer_width = params.size.x;
const int buffer_height = params.size.y;
if (current_tile_buffer_object.width != buffer_width ||
current_tile_buffer_object.height != buffer_height) {
const int buffer_width = params.full_size.x;
const int buffer_height = params.full_size.y;
if (texture_.buffer_width != buffer_width || texture_.buffer_height != buffer_height) {
const size_t size_in_bytes = sizeof(half4) * buffer_width * buffer_height;
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, current_tile_buffer_object.gl_id);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, texture_.gl_pbo_id);
glBufferData(GL_PIXEL_UNPACK_BUFFER, size_in_bytes, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
current_tile_buffer_object.width = buffer_width;
current_tile_buffer_object.height = buffer_height;
texture_.buffer_width = buffer_width;
texture_.buffer_height = buffer_height;
}
/* Store an updated parameters of the current tile.
* In theory it is only needed once per update of the tile, but doing it on every update is
* the easiest and is not expensive. */
tiles_->current_tile.tile.params = params;
/* New content will be provided to the texture in one way or another, so mark this in a
* centralized place. */
texture_.need_update = true;
texture_.params = params;
return true;
}
static void update_tile_texture_pixels(const DrawTileAndPBO &tile)
{
const GLTexture &texture = tile.tile.texture;
DCHECK_NE(tile.buffer_object.gl_id, 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture.gl_id);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, tile.buffer_object.gl_id);
glTexSubImage2D(
GL_TEXTURE_2D, 0, 0, 0, texture.width, texture.height, GL_RGBA, GL_HALF_FLOAT, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
void BlenderDisplayDriver::update_end()
{
/* Unpack the PBO into the texture as soon as the new content is provided.
*
* This allows to ensure that the unpacking happens while resources like graphics interop (which
* lifetime is outside of control of the display driver) are still valid, as well as allows to
* move the tile from being current to finished immediately after this call.
*
* One concern with this approach is that if the update happens more often than drawing then
* doing the unpack here occupies GPU transfer for no good reason. However, the render scheduler
* takes care of ensuring updates don't happen that often. In regular applications redraw will
* happen much more often than this update. */
update_tile_texture_pixels(tiles_->current_tile);
gl_upload_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
@@ -717,11 +376,7 @@ void BlenderDisplayDriver::update_end()
half4 *BlenderDisplayDriver::map_texture_buffer()
{
const uint pbo_gl_id = tiles_->current_tile.buffer_object.gl_id;
DCHECK_NE(pbo_gl_id, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo_gl_id);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, texture_.gl_pbo_id);
half4 *mapped_rgba_pixels = reinterpret_cast<half4 *>(
glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY));
@@ -729,6 +384,15 @@ half4 *BlenderDisplayDriver::map_texture_buffer()
LOG(ERROR) << "Error mapping BlenderDisplayDriver pixel buffer object.";
}
if (texture_.need_clear) {
const int64_t texture_width = texture_.width;
const int64_t texture_height = texture_.height;
memset(reinterpret_cast<void *>(mapped_rgba_pixels),
0,
texture_width * texture_height * sizeof(half4));
texture_.need_clear = false;
}
return mapped_rgba_pixels;
}
@@ -747,9 +411,12 @@ BlenderDisplayDriver::GraphicsInterop BlenderDisplayDriver::graphics_interop_get
{
GraphicsInterop interop_dst;
interop_dst.buffer_width = tiles_->current_tile.buffer_object.width;
interop_dst.buffer_height = tiles_->current_tile.buffer_object.height;
interop_dst.opengl_pbo_id = tiles_->current_tile.buffer_object.gl_id;
interop_dst.buffer_width = texture_.buffer_width;
interop_dst.buffer_height = texture_.buffer_height;
interop_dst.opengl_pbo_id = texture_.gl_pbo_id;
interop_dst.need_clear = texture_.need_clear;
texture_.need_clear = false;
return interop_dst;
}
@@ -770,7 +437,7 @@ void BlenderDisplayDriver::graphics_interop_deactivate()
void BlenderDisplayDriver::clear()
{
need_clear_ = true;
texture_.need_clear = true;
}
void BlenderDisplayDriver::set_zoom(float zoom_x, float zoom_y)
@@ -778,155 +445,26 @@ void BlenderDisplayDriver::set_zoom(float zoom_x, float zoom_y)
zoom_ = make_float2(zoom_x, zoom_y);
}
/* Update vertex buffer with new coordinates of vertex positions and texture coordinates.
* This buffer is used to render texture in the viewport.
*
* NOTE: The buffer needs to be bound. */
static void vertex_buffer_update(const DisplayDriver::Params &params)
{
const int x = params.full_offset.x;
const int y = params.full_offset.y;
const int width = params.size.x;
const int height = params.size.y;
/* Invalidate old contents - avoids stalling if the buffer is still waiting in queue to be
* rendered. */
glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
float *vpointer = reinterpret_cast<float *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY));
if (!vpointer) {
return;
}
vpointer[0] = 0.0f;
vpointer[1] = 0.0f;
vpointer[2] = x;
vpointer[3] = y;
vpointer[4] = 1.0f;
vpointer[5] = 0.0f;
vpointer[6] = x + width;
vpointer[7] = y;
vpointer[8] = 1.0f;
vpointer[9] = 1.0f;
vpointer[10] = x + width;
vpointer[11] = y + height;
vpointer[12] = 0.0f;
vpointer[13] = 1.0f;
vpointer[14] = x;
vpointer[15] = y + height;
glUnmapBuffer(GL_ARRAY_BUFFER);
}
static void draw_tile(const float2 &zoom,
const int texcoord_attribute,
const int position_attribute,
const DrawTile &draw_tile)
{
if (!draw_tile.ready_to_draw()) {
return;
}
const GLTexture &texture = draw_tile.texture;
DCHECK_NE(texture.gl_id, 0);
DCHECK_NE(draw_tile.gl_vertex_buffer, 0);
glBindBuffer(GL_ARRAY_BUFFER, draw_tile.gl_vertex_buffer);
/* Draw at the parameters for which the texture has been updated for. This allows to always draw
* texture during bordered-rendered camera view without flickering. The validness of the display
* parameters for a texture is guaranteed by the initial "clear" state which makes drawing to
* have an early output.
*
* Such approach can cause some extra "jelly" effect during panning, but it is not more jelly
* than overlay of selected objects. Also, it's possible to redraw texture at an intersection of
* the texture draw parameters and the latest updated draw parameters (although, complexity of
* doing it might not worth it. */
vertex_buffer_update(draw_tile.params);
glBindTexture(GL_TEXTURE_2D, texture.gl_id);
/* Trick to keep sharp rendering without jagged edges on all GPUs.
*
* The idea here is to enforce driver to use linear interpolation when the image is not zoomed
* in.
* For the render result with a resolution divider in effect we always use nearest interpolation.
*
* Use explicit MIN assignment to make sure the driver does not have an undefined behavior at
* the zoom level 1. The MAG filter is always NEAREST. */
const float zoomed_width = draw_tile.params.size.x * zoom.x;
const float zoomed_height = draw_tile.params.size.y * zoom.y;
if (texture.width != draw_tile.params.size.x || texture.height != draw_tile.params.size.y) {
/* Resolution divider is different from 1, force nearest interpolation. */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else if (zoomed_width - draw_tile.params.size.x > 0.5f ||
zoomed_height - draw_tile.params.size.y > 0.5f) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
glVertexAttribPointer(
texcoord_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)0);
glVertexAttribPointer(position_attribute,
2,
GL_FLOAT,
GL_FALSE,
4 * sizeof(float),
(const GLvoid *)(sizeof(float) * 2));
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
void BlenderDisplayDriver::flush()
{
/* This is called from the render thread that also calls update_begin/end, right before ending
* the render loop. We wait for any queued PBO and render commands to be done, before destroying
* the render thread and activating the context in the main thread to destroy resources.
*
* If we don't do this, the NVIDIA driver hangs for a few seconds for when ending 3D viewport
* rendering, for unknown reasons. This was found with NVIDIA driver version 470.73 and a Quadro
* RTX 6000 on Linux. */
if (!gl_context_enable()) {
return;
}
if (gl_upload_sync_) {
glWaitSync((GLsync)gl_upload_sync_, 0, GL_TIMEOUT_IGNORED);
}
if (gl_render_sync_) {
glWaitSync((GLsync)gl_render_sync_, 0, GL_TIMEOUT_IGNORED);
}
gl_context_disable();
}
void BlenderDisplayDriver::draw(const Params &params)
{
/* See do_update_begin() for why no locking is required here. */
const bool transparent = true; // TODO(sergey): Derive this from Film.
if (!gl_draw_resources_ensure()) {
return;
}
if (use_gl_context_) {
gl_context_mutex_.lock();
}
if (need_clear_) {
if (texture_.need_clear) {
/* Texture is requested to be cleared and was not yet cleared.
*
* Do early return which should be equivalent of drawing all-zero texture.
* Watch out for the lock though so that the clear happening during update is properly
* synchronized here. */
if (use_gl_context_) {
gl_context_mutex_.unlock();
}
gl_context_mutex_.unlock();
return;
}
@@ -939,37 +477,66 @@ void BlenderDisplayDriver::draw(const Params &params)
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
}
glActiveTexture(GL_TEXTURE0);
display_shader_->bind(params.full_size.x, params.full_size.y);
/* NOTE: The VAO is to be allocated on the drawing context as it is not shared across contexts.
* Simplest is to allocate it on every redraw so that it is possible to destroy it from a
* correct context. */
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_.gl_id);
/* Trick to keep sharp rendering without jagged edges on all GPUs.
*
* The idea here is to enforce driver to use linear interpolation when the image is not zoomed
* in.
* For the render result with a resolution divider in effect we always use nearest interpolation.
*
* Use explicit MIN assignment to make sure the driver does not have an undefined behavior at
* the zoom level 1. The MAG filter is always NEAREST. */
const float zoomed_width = params.size.x * zoom_.x;
const float zoomed_height = params.size.y * zoom_.y;
if (texture_.width != params.size.x || texture_.height != params.size.y) {
/* Resolution divider is different from 1, force nearest interpolation. */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else if (zoomed_width - params.size.x > 0.5f || zoomed_height - params.size.y > 0.5f) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_);
texture_update_if_needed();
vertex_buffer_update(params);
/* TODO(sergey): Does it make sense/possible to cache/reuse the VAO? */
GLuint vertex_array_object;
glGenVertexArrays(1, &vertex_array_object);
glBindVertexArray(vertex_array_object);
display_shader_->bind(params.full_size.x, params.full_size.y);
const int texcoord_attribute = display_shader_->get_tex_coord_attrib_location();
const int position_attribute = display_shader_->get_position_attrib_location();
glEnableVertexAttribArray(texcoord_attribute);
glEnableVertexAttribArray(position_attribute);
draw_tile(zoom_, texcoord_attribute, position_attribute, tiles_->current_tile.tile);
glVertexAttribPointer(
texcoord_attribute, 2, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (const GLvoid *)0);
glVertexAttribPointer(position_attribute,
2,
GL_FLOAT,
GL_FALSE,
4 * sizeof(float),
(const GLvoid *)(sizeof(float) * 2));
for (const DrawTile &tile : tiles_->finished_tiles.tiles) {
draw_tile(zoom_, texcoord_attribute, position_attribute, tile);
}
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
display_shader_->unbind();
glBindTexture(GL_TEXTURE_2D, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteVertexArrays(1, &vertex_array_object);
display_shader_->unbind();
if (transparent) {
glDisable(GL_BLEND);
}
@@ -977,11 +544,6 @@ void BlenderDisplayDriver::draw(const Params &params)
gl_render_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
if (VLOG_IS_ON(5)) {
VLOG(5) << "Number of textures: " << GLTexture::num_used;
VLOG(5) << "Number of PBOs: " << GLPixelBufferObject::num_used;
}
if (use_gl_context_) {
gl_context_mutex_.unlock();
}
@@ -1056,16 +618,154 @@ void BlenderDisplayDriver::gl_context_dispose()
}
}
bool BlenderDisplayDriver::gl_draw_resources_ensure()
{
if (!texture_.gl_id) {
/* If there is no texture allocated, there is nothing to draw. Inform the draw call that it can
* can not continue. Note that this is not an unrecoverable error, so once the texture is known
* we will come back here and create all the GPU resources needed for draw. */
return false;
}
if (gl_draw_resource_creation_attempted_) {
return gl_draw_resources_created_;
}
gl_draw_resource_creation_attempted_ = true;
if (!vertex_buffer_) {
glGenBuffers(1, &vertex_buffer_);
if (!vertex_buffer_) {
LOG(ERROR) << "Error creating vertex buffer.";
return false;
}
}
gl_draw_resources_created_ = true;
return true;
}
void BlenderDisplayDriver::gl_resources_destroy()
{
gl_context_enable();
tiles_->current_tile.gl_resources_destroy();
tiles_->finished_tiles.gl_resources_destroy_and_clear();
if (vertex_buffer_ != 0) {
glDeleteBuffers(1, &vertex_buffer_);
}
if (texture_.gl_pbo_id) {
glDeleteBuffers(1, &texture_.gl_pbo_id);
texture_.gl_pbo_id = 0;
}
if (texture_.gl_id) {
glDeleteTextures(1, &texture_.gl_id);
texture_.gl_id = 0;
}
gl_context_disable();
gl_context_dispose();
}
bool BlenderDisplayDriver::gl_texture_resources_ensure()
{
if (texture_.creation_attempted) {
return texture_.is_created;
}
texture_.creation_attempted = true;
DCHECK(!texture_.gl_id);
DCHECK(!texture_.gl_pbo_id);
/* Create texture. */
glGenTextures(1, &texture_.gl_id);
if (!texture_.gl_id) {
LOG(ERROR) << "Error creating texture.";
return false;
}
/* Configure the texture. */
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_.gl_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
/* Create PBO for the texture. */
glGenBuffers(1, &texture_.gl_pbo_id);
if (!texture_.gl_pbo_id) {
LOG(ERROR) << "Error creating texture pixel buffer object.";
return false;
}
/* Creation finished with a success. */
texture_.is_created = true;
return true;
}
void BlenderDisplayDriver::texture_update_if_needed()
{
if (!texture_.need_update) {
return;
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, texture_.gl_pbo_id);
glTexSubImage2D(
GL_TEXTURE_2D, 0, 0, 0, texture_.width, texture_.height, GL_RGBA, GL_HALF_FLOAT, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
texture_.need_update = false;
}
void BlenderDisplayDriver::vertex_buffer_update(const Params & /*params*/)
{
/* Draw at the parameters for which the texture has been updated for. This allows to always draw
* texture during bordered-rendered camera view without flickering. The validness of the display
* parameters for a texture is guaranteed by the initial "clear" state which makes drawing to
* have an early output.
*
* Such approach can cause some extra "jelly" effect during panning, but it is not more jelly
* than overlay of selected objects. Also, it's possible to redraw texture at an intersection of
* the texture draw parameters and the latest updated draw parameters (although, complexity of
* doing it might not worth it. */
const int x = texture_.params.full_offset.x;
const int y = texture_.params.full_offset.y;
const int width = texture_.params.size.x;
const int height = texture_.params.size.y;
/* Invalidate old contents - avoids stalling if the buffer is still waiting in queue to be
* rendered. */
glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
float *vpointer = reinterpret_cast<float *>(glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY));
if (!vpointer) {
return;
}
vpointer[0] = 0.0f;
vpointer[1] = 0.0f;
vpointer[2] = x;
vpointer[3] = y;
vpointer[4] = 1.0f;
vpointer[5] = 0.0f;
vpointer[6] = x + width;
vpointer[7] = y;
vpointer[8] = 1.0f;
vpointer[9] = 1.0f;
vpointer[10] = x + width;
vpointer[11] = y + height;
vpointer[12] = 0.0f;
vpointer[13] = 1.0f;
vpointer[14] = x;
vpointer[15] = y + height;
glUnmapBuffer(GL_ARRAY_BUFFER);
}
CCL_NAMESPACE_END

View File

@@ -26,7 +26,6 @@
#include "util/thread.h"
#include "util/unique_ptr.h"
#include "util/vector.h"
CCL_NAMESPACE_BEGIN
@@ -113,8 +112,6 @@ class BlenderDisplayDriver : public DisplayDriver {
void set_zoom(float zoom_x, float zoom_y);
protected:
virtual void next_tile_begin() override;
virtual bool update_begin(const Params &params, int texture_width, int texture_height) override;
virtual void update_end() override;
@@ -125,17 +122,33 @@ class BlenderDisplayDriver : public DisplayDriver {
virtual void draw(const Params &params) override;
virtual void flush() override;
/* Helper function which allocates new GPU context. */
void gl_context_create();
bool gl_context_enable();
void gl_context_disable();
void gl_context_dispose();
/* Make sure texture is allocated and its initial configuration is performed. */
bool gl_texture_resources_ensure();
/* Ensure all runtime GPU resources needed for drawing are allocated.
* Returns true if all resources needed for drawing are available. */
bool gl_draw_resources_ensure();
/* Destroy all GPU resources which are being used by this object. */
void gl_resources_destroy();
/* Update GPU texture dimensions and content if needed (new pixel data was provided).
*
* NOTE: The texture needs to be bound. */
void texture_update_if_needed();
/* Update vertex buffer with new coordinates of vertex positions and texture coordinates.
* This buffer is used to render texture in the viewport.
*
* NOTE: The buffer needs to be bound. */
void vertex_buffer_update(const Params &params);
BL::RenderEngine b_engine_;
/* OpenGL context which is used the render engine doesn't have its own. */
@@ -146,14 +159,50 @@ class BlenderDisplayDriver : public DisplayDriver {
/* Mutex used to guard the `gl_context_`. */
thread_mutex gl_context_mutex_;
/* Content of the display is to be filled with zeroes. */
std::atomic<bool> need_clear_ = true;
/* Texture which contains pixels of the render result. */
struct {
/* Indicates whether texture creation was attempted and succeeded.
* Used to avoid multiple attempts of texture creation on GPU issues or GPU context
* misconfiguration. */
bool creation_attempted = false;
bool is_created = false;
/* OpenGL resource IDs of the texture itself and Pixel Buffer Object (PBO) used to write
* pixels to it.
*
* NOTE: Allocated on the engine's context. */
uint gl_id = 0;
uint gl_pbo_id = 0;
/* Is true when new data was written to the PBO, meaning, the texture might need to be resized
* and new data is to be uploaded to the GPU. */
bool need_update = false;
/* Content of the texture is to be filled with zeroes. */
std::atomic<bool> need_clear = true;
/* Dimensions of the texture in pixels. */
int width = 0;
int height = 0;
/* Dimensions of the underlying PBO. */
int buffer_width = 0;
int buffer_height = 0;
/* Display parameters the texture has been updated for. */
Params params;
} texture_;
unique_ptr<BlenderDisplayShader> display_shader_;
/* Opaque storage for an internal state and data for tiles. */
struct Tiles;
unique_ptr<Tiles> tiles_;
/* Special track of whether GPU resources were attempted to be created, to avoid attempts of
* their re-creation on failure on every redraw. */
bool gl_draw_resource_creation_attempted_ = false;
bool gl_draw_resources_created_ = false;
/* Vertex buffer which hold vertices of a triangle fan which is textures with the texture
* holding the render result. */
uint vertex_buffer_ = 0;
void *gl_render_sync_ = nullptr;
void *gl_upload_sync_ = nullptr;

View File

@@ -19,7 +19,6 @@
#include "scene/hair.h"
#include "scene/mesh.h"
#include "scene/object.h"
#include "scene/pointcloud.h"
#include "scene/volume.h"
#include "blender/sync.h"
@@ -32,18 +31,10 @@ CCL_NAMESPACE_BEGIN
static Geometry::Type determine_geom_type(BObjectInfo &b_ob_info, bool use_particle_hair)
{
#ifdef WITH_NEW_CURVES_TYPE
if (b_ob_info.object_data.is_a(&RNA_Curves) || use_particle_hair) {
#else
if (use_particle_hair) {
#endif
if (b_ob_info.object_data.is_a(&RNA_Hair) || use_particle_hair) {
return Geometry::HAIR;
}
if (b_ob_info.object_data.is_a(&RNA_PointCloud)) {
return Geometry::POINTCLOUD;
}
if (b_ob_info.object_data.is_a(&RNA_Volume) ||
(b_ob_info.object_data == b_ob_info.real_object.data() &&
object_fluid_gas_domain_find(b_ob_info.real_object))) {
@@ -116,9 +107,6 @@ Geometry *BlenderSync::sync_geometry(BL::Depsgraph &b_depsgraph,
else if (geom_type == Geometry::VOLUME) {
geom = scene->create_node<Volume>();
}
else if (geom_type == Geometry::POINTCLOUD) {
geom = scene->create_node<PointCloud>();
}
else {
geom = scene->create_node<Mesh>();
}
@@ -178,10 +166,6 @@ Geometry *BlenderSync::sync_geometry(BL::Depsgraph &b_depsgraph,
Volume *volume = static_cast<Volume *>(geom);
sync_volume(b_ob_info, volume);
}
else if (geom_type == Geometry::POINTCLOUD) {
PointCloud *pointcloud = static_cast<PointCloud *>(geom);
sync_pointcloud(pointcloud, b_ob_info);
}
else {
Mesh *mesh = static_cast<Mesh *>(geom);
sync_mesh(b_depsgraph, b_ob_info, mesh);
@@ -231,11 +215,7 @@ void BlenderSync::sync_geometry_motion(BL::Depsgraph &b_depsgraph,
if (progress.get_cancel())
return;
#ifdef WITH_NEW_CURVES_TYPE
if (b_ob_info.object_data.is_a(&RNA_Curves) || use_particle_hair) {
#else
if (use_particle_hair) {
#endif
if (b_ob_info.object_data.is_a(&RNA_Hair) || use_particle_hair) {
Hair *hair = static_cast<Hair *>(geom);
sync_hair_motion(b_depsgraph, b_ob_info, hair, motion_step);
}
@@ -243,10 +223,6 @@ void BlenderSync::sync_geometry_motion(BL::Depsgraph &b_depsgraph,
object_fluid_gas_domain_find(b_ob_info.real_object)) {
/* No volume motion blur support yet. */
}
else if (b_ob_info.object_data.is_a(&RNA_PointCloud)) {
PointCloud *pointcloud = static_cast<PointCloud *>(geom);
sync_pointcloud_motion(pointcloud, b_ob_info, motion_step);
}
else {
Mesh *mesh = static_cast<Mesh *>(geom);
sync_mesh_motion(b_depsgraph, b_ob_info, mesh, motion_step);

View File

@@ -24,14 +24,8 @@ CCL_NAMESPACE_BEGIN
/* Packed Images */
BlenderImageLoader::BlenderImageLoader(BL::Image b_image,
const int frame,
const bool is_preview_render)
: b_image(b_image),
frame(frame),
/* Don't free cache for preview render to avoid race condition from T93560, to be fixed
properly later as we are close to release. */
free_cache(!is_preview_render && !b_image.has_data())
BlenderImageLoader::BlenderImageLoader(BL::Image b_image, int frame)
: b_image(b_image), frame(frame), free_cache(!b_image.has_data())
{
}

View File

@@ -25,7 +25,7 @@ CCL_NAMESPACE_BEGIN
class BlenderImageLoader : public ImageLoader {
public:
BlenderImageLoader(BL::Image b_image, const int frame, const bool is_preview_render);
BlenderImageLoader(BL::Image b_image, int frame);
bool load_metadata(const ImageDeviceFeatures &features, ImageMetaData &metadata) override;
bool load_pixels(const ImageMetaData &metadata,

View File

@@ -1071,15 +1071,7 @@ static void create_subd_mesh(Scene *scene,
for (BL::MeshEdge &e : b_mesh.edges) {
if (e.crease() != 0.0f) {
mesh->add_edge_crease(e.vertices()[0], e.vertices()[1], e.crease());
}
}
for (BL::MeshVertexCreaseLayer &c : b_mesh.vertex_creases) {
for (int i = 0; i < c.data.length(); ++i) {
if (c.data[i].value() != 0.0f) {
mesh->add_vertex_crease(i, c.data[i].value());
}
mesh->add_crease(e.vertices()[0], e.vertices()[1], e.crease());
}
}
@@ -1094,6 +1086,40 @@ static void create_subd_mesh(Scene *scene,
/* Sync */
/* Check whether some of "built-in" motion-related attributes are needed to be exported (includes
* things like velocity from cache modifier, fluid simulation).
*
* NOTE: This code is run prior to object motion blur initialization. so can not access properties
* set by `sync_object_motion_init()`. */
static bool mesh_need_motion_attribute(BObjectInfo &b_ob_info, Scene *scene)
{
const Scene::MotionType need_motion = scene->need_motion();
if (need_motion == Scene::MOTION_NONE) {
/* Simple case: neither motion pass nor motion blur is needed, no need in the motion related
* attributes. */
return false;
}
if (need_motion == Scene::MOTION_BLUR) {
/* A bit tricky and implicit case:
* - Motion blur is enabled in the scene, which implies specific number of time steps for
* objects.
* - If the object has motion blur disabled on it, it will have 0 time steps.
* - Motion attribute expects non-zero time steps.
*
* Avoid adding motion attributes if the motion blur will enforce 0 motion steps. */
PointerRNA cobject = RNA_pointer_get(&b_ob_info.real_object.ptr, "cycles");
const bool use_motion = get_boolean(cobject, "use_motion_blur");
if (!use_motion) {
return false;
}
}
/* Motion pass which implies 3 motion steps, or motion blur which is not disabled on object
* level. */
return true;
}
void BlenderSync::sync_mesh(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, Mesh *mesh)
{
/* make a copy of the shaders as the caller in the main thread still need them for syncing the
@@ -1118,7 +1144,7 @@ void BlenderSync::sync_mesh(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, M
if (b_mesh) {
/* Motion blur attribute is relative to seconds, we need it relative to frames. */
const bool need_motion = object_need_motion_attribute(b_ob_info, scene);
const bool need_motion = mesh_need_motion_attribute(b_ob_info, scene);
const float motion_scale = (need_motion) ?
scene->motion_shutter_time() /
(b_scene.render().fps() / b_scene.render().fps_base()) :

View File

@@ -72,8 +72,7 @@ bool BlenderSync::object_is_geometry(BObjectInfo &b_ob_info)
BL::Object::type_enum type = b_ob_info.iter_object.type();
if (type == BL::Object::type_VOLUME || type == BL::Object::type_CURVES ||
type == BL::Object::type_POINTCLOUD) {
if (type == BL::Object::type_VOLUME || type == BL::Object::type_HAIR) {
/* Will be exported attached to mesh. */
return true;
}
@@ -97,7 +96,7 @@ bool BlenderSync::object_can_have_geometry(BL::Object &b_ob)
case BL::Object::type_SURFACE:
case BL::Object::type_META:
case BL::Object::type_FONT:
case BL::Object::type_CURVES:
case BL::Object::type_HAIR:
case BL::Object::type_POINTCLOUD:
case BL::Object::type_VOLUME:
return true;
@@ -207,7 +206,7 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
return NULL;
}
/* only interested in object that we can create geometry from */
/* only interested in object that we can create meshes from */
if (!object_is_geometry(b_ob_info)) {
return NULL;
}
@@ -529,17 +528,6 @@ void BlenderSync::sync_procedural(BL::Object &b_ob,
string absolute_path = blender_absolute_path(b_data, b_ob, b_mesh_cache.cache_file().filepath());
procedural->set_filepath(ustring(absolute_path));
array<ustring> layers;
for (BL::CacheFileLayer &layer : cache_file.layers) {
if (layer.hide_layer()) {
continue;
}
absolute_path = blender_absolute_path(b_data, b_ob, layer.filepath());
layers.push_back_slow(ustring(absolute_path));
}
procedural->set_layers(layers);
procedural->set_scale(cache_file.scale());
procedural->set_use_prefetch(cache_file.use_prefetch());

View File

@@ -51,6 +51,8 @@ bool BlenderOutputDriver::read_render_tile(const Tile &tile)
BL::RenderLayer b_rlay = *b_single_rlay;
vector<float> pixels(tile.size.x * tile.size.y * 4);
/* Copy each pass.
* TODO:copy only the required ones for better performance? */
for (BL::RenderPass &b_pass : b_rlay.passes) {
@@ -64,7 +66,7 @@ bool BlenderOutputDriver::read_render_tile(const Tile &tile)
bool BlenderOutputDriver::update_render_tile(const Tile &tile)
{
/* Use final write for preview renders, otherwise render result wouldn't be updated
/* Use final write for preview renders, otherwise render result wouldn't be be updated
* quickly on Blender side. For all other cases we use the display driver. */
if (b_engine_.is_preview()) {
write_render_tile(tile);
@@ -107,7 +109,7 @@ void BlenderOutputDriver::write_render_tile(const Tile &tile)
BL::RenderLayer b_rlay = *b_single_rlay;
vector<float> pixels(static_cast<size_t>(tile.size.x) * tile.size.y * 4);
vector<float> pixels(tile.size.x * tile.size.y * 4);
/* Copy each pass. */
for (BL::RenderPass &b_pass : b_rlay.passes) {
@@ -118,7 +120,7 @@ void BlenderOutputDriver::write_render_tile(const Tile &tile)
b_pass.rect(&pixels[0]);
}
b_engine_.end_result(b_rr, false, false, true);
b_engine_.end_result(b_rr, true, false, true);
}
CCL_NAMESPACE_END

View File

@@ -1,303 +0,0 @@
/*
* Copyright 2011-2013 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "scene/pointcloud.h"
#include "scene/attribute.h"
#include "scene/scene.h"
#include "blender/sync.h"
#include "blender/util.h"
#include "util/foreach.h"
#include "util/hash.h"
CCL_NAMESPACE_BEGIN
template<typename TypeInCycles, typename GetValueAtIndex>
static void fill_generic_attribute(BL::PointCloud &b_pointcloud,
TypeInCycles *data,
const GetValueAtIndex &get_value_at_index)
{
const int num_points = b_pointcloud.points.length();
for (int i = 0; i < num_points; i++) {
data[i] = get_value_at_index(i);
}
}
static void attr_create_motion(PointCloud *pointcloud,
BL::Attribute &b_attribute,
const float motion_scale)
{
if (!(b_attribute.domain() == BL::Attribute::domain_POINT) &&
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR)) {
return;
}
BL::FloatVectorAttribute b_vector_attribute(b_attribute);
const int num_points = pointcloud->get_points().size();
/* Find or add attribute */
float3 *P = &pointcloud->get_points()[0];
Attribute *attr_mP = pointcloud->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
if (!attr_mP) {
attr_mP = pointcloud->attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
}
/* Only export previous and next frame, we don't have any in between data. */
float motion_times[2] = {-1.0f, 1.0f};
for (int step = 0; step < 2; step++) {
const float relative_time = motion_times[step] * 0.5f * motion_scale;
float3 *mP = attr_mP->data_float3() + step * num_points;
for (int i = 0; i < num_points; i++) {
mP[i] = P[i] + get_float3(b_vector_attribute.data[i].vector()) * relative_time;
}
}
}
static void copy_attributes(PointCloud *pointcloud,
BL::PointCloud b_pointcloud,
const bool need_motion,
const float motion_scale)
{
AttributeSet &attributes = pointcloud->attributes;
static const ustring u_velocity("velocity");
for (BL::Attribute &b_attribute : b_pointcloud.attributes) {
const ustring name{b_attribute.name().c_str()};
if (need_motion && name == u_velocity) {
attr_create_motion(pointcloud, b_attribute, motion_scale);
}
if (attributes.find(name)) {
continue;
}
const AttributeElement element = ATTR_ELEMENT_VERTEX;
const BL::Attribute::data_type_enum b_data_type = b_attribute.data_type();
switch (b_data_type) {
case BL::Attribute::data_type_FLOAT: {
BL::FloatAttribute b_float_attribute{b_attribute};
Attribute *attr = attributes.add(name, TypeFloat, element);
float *data = attr->data_float();
fill_generic_attribute(
b_pointcloud, data, [&](int i) { return b_float_attribute.data[i].value(); });
break;
}
case BL::Attribute::data_type_BOOLEAN: {
BL::BoolAttribute b_bool_attribute{b_attribute};
Attribute *attr = attributes.add(name, TypeFloat, element);
float *data = attr->data_float();
fill_generic_attribute(
b_pointcloud, data, [&](int i) { return (float)b_bool_attribute.data[i].value(); });
break;
}
case BL::Attribute::data_type_INT: {
BL::IntAttribute b_int_attribute{b_attribute};
Attribute *attr = attributes.add(name, TypeFloat, element);
float *data = attr->data_float();
fill_generic_attribute(
b_pointcloud, data, [&](int i) { return (float)b_int_attribute.data[i].value(); });
break;
}
case BL::Attribute::data_type_FLOAT_VECTOR: {
BL::FloatVectorAttribute b_vector_attribute{b_attribute};
Attribute *attr = attributes.add(name, TypeVector, element);
float3 *data = attr->data_float3();
fill_generic_attribute(b_pointcloud, data, [&](int i) {
BL::Array<float, 3> v = b_vector_attribute.data[i].vector();
return make_float3(v[0], v[1], v[2]);
});
break;
}
case BL::Attribute::data_type_FLOAT_COLOR: {
BL::FloatColorAttribute b_color_attribute{b_attribute};
Attribute *attr = attributes.add(name, TypeRGBA, element);
float4 *data = attr->data_float4();
fill_generic_attribute(b_pointcloud, data, [&](int i) {
BL::Array<float, 4> v = b_color_attribute.data[i].color();
return make_float4(v[0], v[1], v[2], v[3]);
});
break;
}
case BL::Attribute::data_type_FLOAT2: {
BL::Float2Attribute b_float2_attribute{b_attribute};
Attribute *attr = attributes.add(name, TypeFloat2, element);
float2 *data = attr->data_float2();
fill_generic_attribute(b_pointcloud, data, [&](int i) {
BL::Array<float, 2> v = b_float2_attribute.data[i].vector();
return make_float2(v[0], v[1]);
});
break;
}
default:
/* Not supported. */
break;
}
}
}
static void export_pointcloud(Scene *scene,
PointCloud *pointcloud,
BL::PointCloud b_pointcloud,
const bool need_motion,
const float motion_scale)
{
/* TODO: optimize so we can straight memcpy arrays from Blender? */
/* Add requested attributes. */
Attribute *attr_random = NULL;
if (pointcloud->need_attribute(scene, ATTR_STD_POINT_RANDOM)) {
attr_random = pointcloud->attributes.add(ATTR_STD_POINT_RANDOM);
}
/* Reserve memory. */
const int num_points = b_pointcloud.points.length();
pointcloud->reserve(num_points);
/* Export points. */
BL::PointCloud::points_iterator b_point_iter;
for (b_pointcloud.points.begin(b_point_iter); b_point_iter != b_pointcloud.points.end();
++b_point_iter) {
BL::Point b_point = *b_point_iter;
const float3 co = get_float3(b_point.co());
const float radius = b_point.radius();
pointcloud->add_point(co, radius);
/* Random number per point. */
if (attr_random != NULL) {
attr_random->add(hash_uint2_to_float(b_point.index(), 0));
}
}
/* Export attributes */
copy_attributes(pointcloud, b_pointcloud, need_motion, motion_scale);
}
static void export_pointcloud_motion(PointCloud *pointcloud,
BL::PointCloud b_pointcloud,
int motion_step)
{
/* Find or add attribute. */
Attribute *attr_mP = pointcloud->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
bool new_attribute = false;
if (!attr_mP) {
attr_mP = pointcloud->attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
new_attribute = true;
}
/* Export motion points. */
const int num_points = pointcloud->num_points();
float3 *mP = attr_mP->data_float3() + motion_step * num_points;
bool have_motion = false;
int num_motion_points = 0;
const array<float3> &pointcloud_points = pointcloud->get_points();
BL::PointCloud::points_iterator b_point_iter;
for (b_pointcloud.points.begin(b_point_iter); b_point_iter != b_pointcloud.points.end();
++b_point_iter) {
BL::Point b_point = *b_point_iter;
if (num_motion_points < num_points) {
float3 P = get_float3(b_point.co());
P.w = b_point.radius();
mP[num_motion_points] = P;
have_motion = have_motion || (P != pointcloud_points[num_motion_points]);
num_motion_points++;
}
}
/* In case of new attribute, we verify if there really was any motion. */
if (new_attribute) {
if (num_motion_points != num_points || !have_motion) {
pointcloud->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
}
else if (motion_step > 0) {
/* Motion, fill up previous steps that we might have skipped because
* they had no motion, but we need them anyway now. */
for (int step = 0; step < motion_step; step++) {
pointcloud->copy_center_to_motion_step(step);
}
}
}
/* Export attributes */
copy_attributes(pointcloud, b_pointcloud, false, 0.0f);
}
void BlenderSync::sync_pointcloud(PointCloud *pointcloud, BObjectInfo &b_ob_info)
{
size_t old_numpoints = pointcloud->num_points();
array<Node *> used_shaders = pointcloud->get_used_shaders();
PointCloud new_pointcloud;
new_pointcloud.set_used_shaders(used_shaders);
/* TODO: add option to filter out points in the view layer. */
BL::PointCloud b_pointcloud(b_ob_info.object_data);
/* Motion blur attribute is relative to seconds, we need it relative to frames. */
const bool need_motion = object_need_motion_attribute(b_ob_info, scene);
const float motion_scale = (need_motion) ?
scene->motion_shutter_time() /
(b_scene.render().fps() / b_scene.render().fps_base()) :
0.0f;
export_pointcloud(scene, &new_pointcloud, b_pointcloud, need_motion, motion_scale);
/* update original sockets */
for (const SocketType &socket : new_pointcloud.type->inputs) {
/* Those sockets are updated in sync_object, so do not modify them. */
if (socket.name == "use_motion_blur" || socket.name == "motion_steps" ||
socket.name == "used_shaders") {
continue;
}
pointcloud->set_value(socket, new_pointcloud, socket);
}
pointcloud->attributes.clear();
foreach (Attribute &attr, new_pointcloud.attributes.attributes) {
pointcloud->attributes.attributes.push_back(std::move(attr));
}
/* tag update */
const bool rebuild = (pointcloud && old_numpoints != pointcloud->num_points());
pointcloud->tag_update(scene, rebuild);
}
void BlenderSync::sync_pointcloud_motion(PointCloud *pointcloud,
BObjectInfo &b_ob_info,
int motion_step)
{
/* Skip if nothing exported. */
if (pointcloud->num_points() == 0) {
return;
}
/* Export deformed coordinates. */
if (ccl::BKE_object_is_deform_modified(b_ob_info, b_scene, preview)) {
/* PointCloud object. */
BL::PointCloud b_pointcloud(b_ob_info.object_data);
export_pointcloud_motion(pointcloud, b_pointcloud, motion_step);
}
else {
/* No deformation on this frame, copy coordinates if other frames did have it. */
pointcloud->copy_center_to_motion_step(motion_step);
}
}
CCL_NAMESPACE_END

View File

@@ -138,18 +138,20 @@ static const char *PyC_UnicodeAsByte(PyObject *py_str, PyObject **coerce)
static PyObject *init_func(PyObject * /*self*/, PyObject *args)
{
PyObject *path, *user_path;
PyObject *path, *user_path, *temp_path;
int headless;
if (!PyArg_ParseTuple(args, "OOi", &path, &user_path, &headless)) {
if (!PyArg_ParseTuple(args, "OOOi", &path, &user_path, &temp_path, &headless)) {
return nullptr;
}
PyObject *path_coerce = nullptr, *user_path_coerce = nullptr;
PyObject *path_coerce = nullptr, *user_path_coerce = nullptr, *temp_path_coerce = nullptr;
path_init(PyC_UnicodeAsByte(path, &path_coerce),
PyC_UnicodeAsByte(user_path, &user_path_coerce));
PyC_UnicodeAsByte(user_path, &user_path_coerce),
PyC_UnicodeAsByte(temp_path, &temp_path_coerce));
Py_XDECREF(path_coerce);
Py_XDECREF(user_path_coerce);
Py_XDECREF(temp_path_coerce);
BlenderSession::headless = headless;
@@ -733,20 +735,27 @@ static bool image_parse_filepaths(PyObject *pyfilepaths, vector<string> &filepat
static PyObject *denoise_func(PyObject * /*self*/, PyObject *args, PyObject *keywords)
{
#if 1
(void)args;
(void)keywords;
#else
static const char *keyword_list[] = {
"preferences", "scene", "view_layer", "input", "output", NULL};
"preferences", "scene", "view_layer", "input", "output", "tile_size", "samples", NULL};
PyObject *pypreferences, *pyscene, *pyviewlayer;
PyObject *pyinput, *pyoutput = NULL;
int tile_size = 0, samples = 0;
if (!PyArg_ParseTupleAndKeywords(args,
keywords,
"OOOO|O",
"OOOO|Oii",
(char **)keyword_list,
&pypreferences,
&pyscene,
&pyviewlayer,
&pyinput,
&pyoutput)) {
&pyoutput,
&tile_size,
&samples)) {
return NULL;
}
@@ -768,10 +777,14 @@ static PyObject *denoise_func(PyObject * /*self*/, PyObject *args, PyObject *key
&RNA_ViewLayer,
PyLong_AsVoidPtr(pyviewlayer),
&viewlayerptr);
BL::ViewLayer b_view_layer(viewlayerptr);
PointerRNA cviewlayer = RNA_pointer_get(&viewlayerptr, "cycles");
DenoiseParams params = BlenderSync::get_denoise_params(b_scene, b_view_layer, true);
params.use = true;
DenoiseParams params;
params.radius = get_int(cviewlayer, "denoising_radius");
params.strength = get_float(cviewlayer, "denoising_strength");
params.feature_strength = get_float(cviewlayer, "denoising_feature_strength");
params.relative_pca = get_boolean(cviewlayer, "denoising_relative_pca");
params.neighbor_frames = get_int(cviewlayer, "denoising_neighbor_frames");
/* Parse file paths list. */
vector<string> input, output;
@@ -799,15 +812,24 @@ static PyObject *denoise_func(PyObject * /*self*/, PyObject *args, PyObject *key
}
/* Create denoiser. */
DenoiserPipeline denoiser(device, params);
DenoiserPipeline denoiser(device);
denoiser.params = params;
denoiser.input = input;
denoiser.output = output;
if (tile_size > 0) {
denoiser.tile_size = make_int2(tile_size, tile_size);
}
if (samples > 0) {
denoiser.samples_override = samples;
}
/* Run denoiser. */
if (!denoiser.run()) {
PyErr_SetString(PyExc_ValueError, denoiser.error.c_str());
return NULL;
}
#endif
Py_RETURN_NONE;
}
@@ -884,18 +906,16 @@ static PyObject *enable_print_stats_func(PyObject * /*self*/, PyObject * /*args*
static PyObject *get_device_types_func(PyObject * /*self*/, PyObject * /*args*/)
{
vector<DeviceType> device_types = Device::available_types();
bool has_cuda = false, has_optix = false, has_hip = false, has_metal = false;
bool has_cuda = false, has_optix = false, has_hip = false;
foreach (DeviceType device_type, device_types) {
has_cuda |= (device_type == DEVICE_CUDA);
has_optix |= (device_type == DEVICE_OPTIX);
has_hip |= (device_type == DEVICE_HIP);
has_metal |= (device_type == DEVICE_METAL);
}
PyObject *list = PyTuple_New(4);
PyObject *list = PyTuple_New(3);
PyTuple_SET_ITEM(list, 0, PyBool_FromLong(has_cuda));
PyTuple_SET_ITEM(list, 1, PyBool_FromLong(has_optix));
PyTuple_SET_ITEM(list, 2, PyBool_FromLong(has_hip));
PyTuple_SET_ITEM(list, 3, PyBool_FromLong(has_metal));
return list;
}
@@ -924,9 +944,6 @@ static PyObject *set_device_override_func(PyObject * /*self*/, PyObject *arg)
else if (override == "HIP") {
BlenderSession::device_override = DEVICE_MASK_HIP;
}
else if (override == "METAL") {
BlenderSession::device_override = DEVICE_MASK_METAL;
}
else {
printf("\nError: %s is not a valid Cycles device.\n", override.c_str());
Py_RETURN_FALSE;
@@ -1037,13 +1054,5 @@ void *CCL_python_module_init()
Py_INCREF(Py_False);
}
#ifdef WITH_CYCLES_DEBUG
PyModule_AddObject(mod, "with_debug", Py_True);
Py_INCREF(Py_True);
#else /* WITH_CYCLES_DEBUG */
PyModule_AddObject(mod, "with_debug", Py_False);
Py_INCREF(Py_False);
#endif /* WITH_CYCLES_DEBUG */
return (void *)mod;
}

View File

@@ -396,13 +396,6 @@ void BlenderSession::render(BL::Depsgraph &b_depsgraph_)
/* set the current view */
b_engine.active_view_set(b_rview_name.c_str());
/* Force update in this case, since the camera transform on each frame changes
* in different views. This could be optimized by somehow storing the animated
* camera transforms separate from the fixed stereo transform. */
if ((scene->need_motion() != Scene::MOTION_NONE) && view_index > 0) {
sync->tag_update();
}
/* update scene */
BL::Object b_camera_override(b_engine.camera_override());
sync->sync_camera(b_render, b_camera_override, width, height, b_rview_name.c_str());
@@ -502,15 +495,10 @@ void BlenderSession::render_frame_finish()
path_remove(filename);
}
/* Clear output driver. */
/* Clear driver. */
session->set_output_driver(nullptr);
session->full_buffer_written_cb = function_null;
/* The display driver holds OpenGL resources which belong to an OpenGL context held by the render
* engine on Blender side. Force destruction of those resources. */
display_driver_ = nullptr;
session->set_display_driver(nullptr);
/* All the files are handled.
* Clear the list so that this session can be re-used by Persistent Data. */
full_buffer_files_.clear();
@@ -641,7 +629,7 @@ void BlenderSession::bake(BL::Depsgraph &b_depsgraph_,
integrator->set_use_emission((bake_filter & BL::BakeSettings::pass_filter_EMIT) != 0);
}
/* Always use transparent background for baking. */
/* Always use transpanent background for baking. */
scene->background->set_transparent(true);
/* Load built-in images from Blender. */

View File

@@ -378,19 +378,10 @@ static ShaderNode *add_node(Scene *scene,
}
else if (b_node.is_a(&RNA_ShaderNodeMapRange)) {
BL::ShaderNodeMapRange b_map_range_node(b_node);
if (b_map_range_node.data_type() == BL::ShaderNodeMapRange::data_type_FLOAT_VECTOR) {
VectorMapRangeNode *vector_map_range_node = graph->create_node<VectorMapRangeNode>();
vector_map_range_node->set_use_clamp(b_map_range_node.clamp());
vector_map_range_node->set_range_type(
(NodeMapRangeType)b_map_range_node.interpolation_type());
node = vector_map_range_node;
}
else {
MapRangeNode *map_range_node = graph->create_node<MapRangeNode>();
map_range_node->set_clamp(b_map_range_node.clamp());
map_range_node->set_range_type((NodeMapRangeType)b_map_range_node.interpolation_type());
node = map_range_node;
}
MapRangeNode *map_range_node = graph->create_node<MapRangeNode>();
map_range_node->set_clamp(b_map_range_node.clamp());
map_range_node->set_range_type((NodeMapRangeType)b_map_range_node.interpolation_type());
node = map_range_node;
}
else if (b_node.is_a(&RNA_ShaderNodeClamp)) {
BL::ShaderNodeClamp b_clamp_node(b_node);
@@ -689,9 +680,6 @@ static ShaderNode *add_node(Scene *scene,
else if (b_node.is_a(&RNA_ShaderNodeHairInfo)) {
node = graph->create_node<HairInfoNode>();
}
else if (b_node.is_a(&RNA_ShaderNodePointInfo)) {
node = graph->create_node<PointInfoNode>();
}
else if (b_node.is_a(&RNA_ShaderNodeVolumeInfo)) {
node = graph->create_node<VolumeInfoNode>();
}
@@ -774,12 +762,11 @@ static ShaderNode *add_node(Scene *scene,
int scene_frame = b_scene.frame_current();
int image_frame = image_user_frame_number(b_image_user, b_image, scene_frame);
image->handle = scene->image_manager->add_image(
new BlenderImageLoader(b_image, image_frame, b_engine.is_preview()),
image->image_params());
new BlenderImageLoader(b_image, image_frame), image->image_params());
}
else {
ustring filename = ustring(
image_user_file_path(b_image_user, b_image, b_scene.frame_current()));
image_user_file_path(b_image_user, b_image, b_scene.frame_current(), true));
image->set_filename(filename);
}
}
@@ -810,13 +797,12 @@ static ShaderNode *add_node(Scene *scene,
if (is_builtin) {
int scene_frame = b_scene.frame_current();
int image_frame = image_user_frame_number(b_image_user, b_image, scene_frame);
env->handle = scene->image_manager->add_image(
new BlenderImageLoader(b_image, image_frame, b_engine.is_preview()),
env->image_params());
env->handle = scene->image_manager->add_image(new BlenderImageLoader(b_image, image_frame),
env->image_params());
}
else {
env->set_filename(
ustring(image_user_file_path(b_image_user, b_image, b_scene.frame_current())));
ustring(image_user_file_path(b_image_user, b_image, b_scene.frame_current(), false)));
}
}
node = env;

View File

@@ -95,11 +95,6 @@ void BlenderSync::reset(BL::BlendData &b_data, BL::Scene &b_scene)
this->b_scene = b_scene;
}
void BlenderSync::tag_update()
{
has_updates_ = true;
}
/* Sync */
void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d)
@@ -787,7 +782,6 @@ SceneParams BlenderSync::get_scene_params(BL::Scene &b_scene, bool background)
params.bvh_type = BVH_TYPE_DYNAMIC;
params.use_bvh_spatial_split = RNA_boolean_get(&cscene, "debug_use_spatial_splits");
params.use_bvh_compact_structure = RNA_boolean_get(&cscene, "debug_use_compact_bvh");
params.use_bvh_unaligned_nodes = RNA_boolean_get(&cscene, "debug_use_hair_bvh");
params.num_bvh_time_steps = RNA_int_get(&cscene, "debug_bvh_time_steps");
@@ -833,14 +827,6 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine,
SessionParams params;
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
if (background && !b_engine.is_preview()) {
/* Viewport and preview renders do not require temp directory and do request session
* parameters more often than the background render.
* Optimize RNA-C++ usage and memory allocation a bit by saving string access which we know is
* not needed for viewport render. */
params.temp_dir = b_engine.temporary_directory();
}
/* feature set */
params.experimental = (get_enum(cscene, "feature_set") != 0);

View File

@@ -66,8 +66,6 @@ class BlenderSync {
void reset(BL::BlendData &b_data, BL::Scene &b_scene);
void tag_update();
/* sync */
void sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d);
void sync_data(BL::RenderSettings &b_render,
@@ -105,11 +103,11 @@ class BlenderSync {
static BufferParams get_buffer_params(
BL::SpaceView3D &b_v3d, BL::RegionView3D &b_rv3d, Camera *cam, int width, int height);
private:
static DenoiseParams get_denoise_params(BL::Scene &b_scene,
BL::ViewLayer &b_view_layer,
bool background);
private:
/* sync */
void sync_lights(BL::Depsgraph &b_depsgraph, bool update_all);
void sync_materials(BL::Depsgraph &b_depsgraph, bool update_all);
@@ -169,16 +167,12 @@ class BlenderSync {
Hair *hair, BL::Mesh &b_mesh, BObjectInfo &b_ob_info, bool motion, int motion_step = 0);
bool object_has_particle_hair(BL::Object b_ob);
/* Point Cloud */
void sync_pointcloud(PointCloud *pointcloud, BObjectInfo &b_ob_info);
void sync_pointcloud_motion(PointCloud *pointcloud, BObjectInfo &b_ob_info, int motion_step = 0);
/* Camera */
void sync_camera_motion(
BL::RenderSettings &b_render, BL::Object &b_ob, int width, int height, float motion_time);
/* Geometry */
Geometry *sync_geometry(BL::Depsgraph &b_depsgraph,
Geometry *sync_geometry(BL::Depsgraph &b_depsgrpah,
BObjectInfo &b_ob_info,
bool object_updated,
bool use_particle_hair,
@@ -273,6 +267,7 @@ class BlenderSync {
Progress &progress;
protected:
/* Indicates that `sync_recalc()` detected changes in the scene.
* If this flag is false then the data is considered to be up-to-date and will not be
* synchronized at all. */

View File

@@ -18,7 +18,6 @@
#define __BLENDER_UTIL_H__
#include "scene/mesh.h"
#include "scene/scene.h"
#include "util/algorithm.h"
#include "util/array.h"
@@ -34,7 +33,7 @@
extern "C" {
void BKE_image_user_frame_calc(void *ima, void *iuser, int cfra);
void BKE_image_user_file_path_ex(void *iuser, void *ima, char *path, bool resolve_udim);
void BKE_image_user_file_path(void *iuser, void *ima, char *path);
unsigned char *BKE_image_get_pixels_for_frame(void *image, int frame, int tile);
float *BKE_image_get_float_pixels_for_frame(void *image, int frame, int tile);
}
@@ -291,14 +290,25 @@ static inline int render_resolution_y(BL::RenderSettings &b_render)
return b_render.resolution_y() * b_render.resolution_percentage() / 100;
}
static inline string image_user_file_path(BL::ImageUser &iuser, BL::Image &ima, int cfra)
static inline string image_user_file_path(BL::ImageUser &iuser,
BL::Image &ima,
int cfra,
bool load_tiled)
{
char filepath[1024];
iuser.tile(0);
BKE_image_user_frame_calc(ima.ptr.data, iuser.ptr.data, cfra);
BKE_image_user_file_path_ex(iuser.ptr.data, ima.ptr.data, filepath, false);
BKE_image_user_file_path(iuser.ptr.data, ima.ptr.data, filepath);
return string(filepath);
string filepath_str = string(filepath);
if (load_tiled && ima.source() == BL::Image::source_TILED) {
string udim;
if (!ima.tiles.empty()) {
udim = to_string(ima.tiles[0].number());
}
string_replace(filepath_str, udim, "<UDIM>");
}
return filepath_str;
}
static inline int image_user_frame_number(BL::ImageUser &iuser, BL::Image &ima, int cfra)
@@ -671,40 +681,6 @@ static inline uint object_ray_visibility(BL::Object &b_ob)
return flag;
}
/* Check whether some of "built-in" motion-related attributes are needed to be exported (includes
* things like velocity from cache modifier, fluid simulation).
*
* NOTE: This code is run prior to object motion blur initialization. so can not access properties
* set by `sync_object_motion_init()`. */
static inline bool object_need_motion_attribute(BObjectInfo &b_ob_info, Scene *scene)
{
const Scene::MotionType need_motion = scene->need_motion();
if (need_motion == Scene::MOTION_NONE) {
/* Simple case: neither motion pass nor motion blur is needed, no need in the motion related
* attributes. */
return false;
}
if (need_motion == Scene::MOTION_BLUR) {
/* A bit tricky and implicit case:
* - Motion blur is enabled in the scene, which implies specific number of time steps for
* objects.
* - If the object has motion blur disabled on it, it will have 0 time steps.
* - Motion attribute expects non-zero time steps.
*
* Avoid adding motion attributes if the motion blur will enforce 0 motion steps. */
PointerRNA cobject = RNA_pointer_get(&b_ob_info.real_object.ptr, "cycles");
const bool use_motion = get_boolean(cobject, "use_motion_blur");
if (!use_motion) {
return false;
}
}
/* Motion pass which implies 3 motion steps, or motion blur which is not disabled on object
* level. */
return true;
}
class EdgeMap {
public:
EdgeMap()

View File

@@ -33,17 +33,6 @@ set(SRC
unaligned.cpp
)
set(SRC_METAL
metal.mm
)
if(WITH_CYCLES_DEVICE_METAL)
list(APPEND SRC
${SRC_METAL}
)
add_definitions(-DWITH_METAL)
endif()
set(SRC_HEADERS
bvh.h
bvh2.h
@@ -57,7 +46,6 @@ set(SRC_HEADERS
sort.h
split.h
unaligned.h
metal.h
)
set(LIB

View File

@@ -26,7 +26,6 @@
#include "scene/hair.h"
#include "scene/mesh.h"
#include "scene/object.h"
#include "scene/pointcloud.h"
#include "scene/scene.h"
#include "util/algorithm.h"
@@ -114,9 +113,9 @@ void BVHBuild::add_reference_triangles(BoundBox &root,
else {
/* Motion triangles, trace optimized case: we split triangle
* primitives into separate nodes for each of the time steps.
* This way we minimize overlap of neighbor triangle primitives.
* This way we minimize overlap of neighbor curve primitives.
*/
const int num_bvh_steps = params.num_motion_triangle_steps * 2 + 1;
const int num_bvh_steps = params.num_motion_curve_steps * 2 + 1;
const float num_bvh_steps_inv_1 = 1.0f / (num_bvh_steps - 1);
const size_t num_verts = mesh->verts.size();
const size_t num_steps = mesh->motion_steps;
@@ -270,101 +269,6 @@ void BVHBuild::add_reference_curves(BoundBox &root, BoundBox &center, Hair *hair
}
}
void BVHBuild::add_reference_points(BoundBox &root,
BoundBox &center,
PointCloud *pointcloud,
int i)
{
const Attribute *point_attr_mP = NULL;
if (pointcloud->has_motion_blur()) {
point_attr_mP = pointcloud->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
}
const float3 *points_data = &pointcloud->points[0];
const float *radius_data = &pointcloud->radius[0];
const size_t num_points = pointcloud->num_points();
const float3 *motion_data = (point_attr_mP) ? point_attr_mP->data_float3() : NULL;
const size_t num_steps = pointcloud->get_motion_steps();
if (point_attr_mP == NULL) {
/* Really simple logic for static points. */
for (uint j = 0; j < num_points; j++) {
const PointCloud::Point point = pointcloud->get_point(j);
BoundBox bounds = BoundBox::empty;
point.bounds_grow(points_data, radius_data, bounds);
if (bounds.valid()) {
references.push_back(BVHReference(bounds, j, i, PRIMITIVE_POINT));
root.grow(bounds);
center.grow(bounds.center2());
}
}
}
else if (params.num_motion_point_steps == 0 || params.use_spatial_split) {
/* Simple case of motion points: single node for the whole
* shutter time. Lowest memory usage but less optimal
* rendering.
*/
/* TODO(sergey): Support motion steps for spatially split BVH. */
for (uint j = 0; j < num_points; j++) {
const PointCloud::Point point = pointcloud->get_point(j);
BoundBox bounds = BoundBox::empty;
point.bounds_grow(points_data, radius_data, bounds);
for (size_t step = 0; step < num_steps - 1; step++) {
point.bounds_grow(motion_data + step * num_points, radius_data, bounds);
}
if (bounds.valid()) {
references.push_back(BVHReference(bounds, j, i, PRIMITIVE_MOTION_POINT));
root.grow(bounds);
center.grow(bounds.center2());
}
}
}
else {
/* Motion points, trace optimized case: we split point
* primitives into separate nodes for each of the time steps.
* This way we minimize overlap of neighbor point primitives.
*/
const int num_bvh_steps = params.num_motion_point_steps * 2 + 1;
const float num_bvh_steps_inv_1 = 1.0f / (num_bvh_steps - 1);
for (uint j = 0; j < num_points; j++) {
const PointCloud::Point point = pointcloud->get_point(j);
const size_t num_steps = pointcloud->get_motion_steps();
const float3 *point_steps = point_attr_mP->data_float3();
/* Calculate bounding box of the previous time step.
* Will be reused later to avoid duplicated work on
* calculating BVH time step boundbox.
*/
float4 prev_key = point.motion_key(
points_data, radius_data, point_steps, num_points, num_steps, 0.0f, j);
BoundBox prev_bounds = BoundBox::empty;
point.bounds_grow(prev_key, prev_bounds);
/* Create all primitive time steps, */
for (int bvh_step = 1; bvh_step < num_bvh_steps; ++bvh_step) {
const float curr_time = (float)(bvh_step)*num_bvh_steps_inv_1;
float4 curr_key = point.motion_key(
points_data, radius_data, point_steps, num_points, num_steps, curr_time, j);
BoundBox curr_bounds = BoundBox::empty;
point.bounds_grow(curr_key, curr_bounds);
BoundBox bounds = prev_bounds;
bounds.grow(curr_bounds);
if (bounds.valid()) {
const float prev_time = (float)(bvh_step - 1) * num_bvh_steps_inv_1;
references.push_back(
BVHReference(bounds, j, i, PRIMITIVE_MOTION_POINT, prev_time, curr_time));
root.grow(bounds);
center.grow(bounds.center2());
}
/* Current time boundbox becomes previous one for the
* next time step.
*/
prev_bounds = curr_bounds;
}
}
}
}
void BVHBuild::add_reference_geometry(BoundBox &root,
BoundBox &center,
Geometry *geom,
@@ -378,10 +282,6 @@ void BVHBuild::add_reference_geometry(BoundBox &root,
Hair *hair = static_cast<Hair *>(geom);
add_reference_curves(root, center, hair, object_index);
}
else if (geom->geometry_type == Geometry::POINTCLOUD) {
PointCloud *pointcloud = static_cast<PointCloud *>(geom);
add_reference_points(root, center, pointcloud, object_index);
}
}
void BVHBuild::add_reference_object(BoundBox &root, BoundBox &center, Object *ob, int i)
@@ -411,10 +311,6 @@ static size_t count_primitives(Geometry *geom)
Hair *hair = static_cast<Hair *>(geom);
return count_curve_segments(hair);
}
else if (geom->geometry_type == Geometry::POINTCLOUD) {
PointCloud *pointcloud = static_cast<PointCloud *>(geom);
return pointcloud->num_points();
}
return 0;
}
@@ -432,9 +328,8 @@ void BVHBuild::add_references(BVHRange &root)
if (!ob->get_geometry()->is_instanced()) {
num_alloc_references += count_primitives(ob->get_geometry());
}
else {
else
num_alloc_references++;
}
}
else {
num_alloc_references += count_primitives(ob->get_geometry());
@@ -499,7 +394,7 @@ BVHNode *BVHBuild::run()
spatial_min_overlap = root.bounds().safe_area() * params.spatial_split_alpha;
spatial_free_index = 0;
need_prim_time = params.use_motion_steps();
need_prim_time = params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0;
/* init progress updates */
double build_start_time;
@@ -640,8 +535,7 @@ bool BVHBuild::range_within_max_leaf_size(const BVHRange &range,
const vector<BVHReference> &references) const
{
size_t size = range.size();
size_t max_leaf_size = max(max(params.max_triangle_leaf_size, params.max_curve_leaf_size),
params.max_point_leaf_size);
size_t max_leaf_size = max(params.max_triangle_leaf_size, params.max_curve_leaf_size);
if (size > max_leaf_size)
return false;
@@ -650,44 +544,32 @@ bool BVHBuild::range_within_max_leaf_size(const BVHRange &range,
size_t num_motion_triangles = 0;
size_t num_curves = 0;
size_t num_motion_curves = 0;
size_t num_points = 0;
size_t num_motion_points = 0;
for (int i = 0; i < size; i++) {
const BVHReference &ref = references[range.start() + i];
if (ref.prim_type() & PRIMITIVE_CURVE) {
if (ref.prim_type() & PRIMITIVE_MOTION) {
if (ref.prim_type() & PRIMITIVE_ALL_CURVE) {
if (ref.prim_type() & PRIMITIVE_ALL_MOTION) {
num_motion_curves++;
}
else {
num_curves++;
}
}
else if (ref.prim_type() & PRIMITIVE_TRIANGLE) {
if (ref.prim_type() & PRIMITIVE_MOTION) {
else if (ref.prim_type() & PRIMITIVE_ALL_TRIANGLE) {
if (ref.prim_type() & PRIMITIVE_ALL_MOTION) {
num_motion_triangles++;
}
else {
num_triangles++;
}
}
else if (ref.prim_type() & PRIMITIVE_POINT) {
if (ref.prim_type() & PRIMITIVE_MOTION) {
num_motion_points++;
}
else {
num_points++;
}
}
}
return (num_triangles <= params.max_triangle_leaf_size) &&
(num_motion_triangles <= params.max_motion_triangle_leaf_size) &&
(num_curves <= params.max_curve_leaf_size) &&
(num_motion_curves <= params.max_motion_curve_leaf_size) &&
(num_points <= params.max_point_leaf_size) &&
(num_motion_points <= params.max_motion_point_leaf_size);
(num_motion_curves <= params.max_motion_curve_leaf_size);
}
/* multithreaded binning builder */
@@ -935,7 +817,7 @@ BVHNode *BVHBuild::create_object_leaf_nodes(const BVHReference *ref, int start,
BVHNode *BVHBuild::create_leaf_node(const BVHRange &range, const vector<BVHReference> &references)
{
/* This is a bit over-allocating here (considering leaf size into account),
/* This is a bit overallocating here (considering leaf size into account),
* but chunk-based re-allocation in vector makes it difficult to use small
* size of stack storage here. Some tweaks are possible tho.
*
@@ -973,7 +855,7 @@ BVHNode *BVHBuild::create_leaf_node(const BVHRange &range, const vector<BVHRefer
for (int i = 0; i < range.size(); i++) {
const BVHReference &ref = references[range.start() + i];
if (ref.prim_index() != -1) {
uint32_t type_index = PRIMITIVE_INDEX(ref.prim_type() & PRIMITIVE_ALL);
uint32_t type_index = bitscan((uint32_t)(ref.prim_type() & PRIMITIVE_ALL));
p_ref[type_index].push_back(ref);
p_type[type_index].push_back(ref.prim_type());
p_index[type_index].push_back(ref.prim_index());

View File

@@ -39,7 +39,6 @@ class Geometry;
class Hair;
class Mesh;
class Object;
class PointCloud;
class Progress;
/* BVH Builder */
@@ -69,7 +68,6 @@ class BVHBuild {
/* Adding references. */
void add_reference_triangles(BoundBox &root, BoundBox &center, Mesh *mesh, int i);
void add_reference_curves(BoundBox &root, BoundBox &center, Hair *hair, int i);
void add_reference_points(BoundBox &root, BoundBox &center, PointCloud *pointcloud, int i);
void add_reference_geometry(BoundBox &root, BoundBox &center, Geometry *geom, int i);
void add_reference_object(BoundBox &root, BoundBox &center, Object *ob, int i);
void add_references(BVHRange &root);

View File

@@ -19,7 +19,6 @@
#include "bvh/bvh2.h"
#include "bvh/embree.h"
#include "bvh/metal.h"
#include "bvh/multi.h"
#include "bvh/optix.h"
@@ -41,12 +40,8 @@ const char *bvh_layout_name(BVHLayout layout)
return "EMBREE";
case BVH_LAYOUT_OPTIX:
return "OPTIX";
case BVH_LAYOUT_METAL:
return "METAL";
case BVH_LAYOUT_MULTI_OPTIX:
case BVH_LAYOUT_MULTI_METAL:
case BVH_LAYOUT_MULTI_OPTIX_EMBREE:
case BVH_LAYOUT_MULTI_METAL_EMBREE:
return "MULTI";
case BVH_LAYOUT_ALL:
return "ALL";
@@ -107,18 +102,9 @@ BVH *BVH::create(const BVHParams &params,
#else
(void)device;
break;
#endif
case BVH_LAYOUT_METAL:
#ifdef WITH_METAL
return bvh_metal_create(params, geometry, objects, device);
#else
(void)device;
break;
#endif
case BVH_LAYOUT_MULTI_OPTIX:
case BVH_LAYOUT_MULTI_METAL:
case BVH_LAYOUT_MULTI_OPTIX_EMBREE:
case BVH_LAYOUT_MULTI_METAL_EMBREE:
return new BVHMulti(params, geometry, objects);
case BVH_LAYOUT_NONE:
case BVH_LAYOUT_ALL:

View File

@@ -20,7 +20,6 @@
#include "scene/hair.h"
#include "scene/mesh.h"
#include "scene/object.h"
#include "scene/pointcloud.h"
#include "bvh/build.h"
#include "bvh/node.h"
@@ -387,7 +386,7 @@ void BVH2::refit_primitives(int start, int end, BoundBox &bbox, uint &visibility
}
else {
/* Primitives. */
if (pack.prim_type[prim] & PRIMITIVE_CURVE) {
if (pack.prim_type[prim] & PRIMITIVE_ALL_CURVE) {
/* Curves. */
const Hair *hair = static_cast<const Hair *>(ob->get_geometry());
int prim_offset = (params.top_level) ? hair->prim_offset : 0;
@@ -410,30 +409,6 @@ void BVH2::refit_primitives(int start, int end, BoundBox &bbox, uint &visibility
}
}
}
else if (pack.prim_type[prim] & PRIMITIVE_POINT) {
/* Points. */
const PointCloud *pointcloud = static_cast<const PointCloud *>(ob->get_geometry());
int prim_offset = (params.top_level) ? pointcloud->prim_offset : 0;
const float3 *points = &pointcloud->points[0];
const float *radius = &pointcloud->radius[0];
PointCloud::Point point = pointcloud->get_point(pidx - prim_offset);
point.bounds_grow(points, radius, bbox);
/* Motion points. */
if (pointcloud->get_use_motion_blur()) {
Attribute *attr = pointcloud->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
if (attr) {
size_t pointcloud_size = pointcloud->points.size();
size_t steps = pointcloud->get_motion_steps() - 1;
float3 *point_steps = attr->data_float3();
for (size_t i = 0; i < steps; i++)
point.bounds_grow(point_steps + i * pointcloud_size, radius, bbox);
}
}
}
else {
/* Triangles. */
const Mesh *mesh = static_cast<const Mesh *>(ob->get_geometry());
@@ -530,8 +505,7 @@ void BVH2::pack_instances(size_t nodes_size, size_t leaf_nodes_size)
pack.leaf_nodes.resize(leaf_nodes_size);
pack.object_node.resize(objects.size());
if (params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0 ||
params.num_motion_point_steps > 0) {
if (params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0) {
pack.prim_time.resize(prim_index_size);
}
@@ -590,7 +564,13 @@ void BVH2::pack_instances(size_t nodes_size, size_t leaf_nodes_size)
float2 *bvh_prim_time = bvh->pack.prim_time.size() ? &bvh->pack.prim_time[0] : NULL;
for (size_t i = 0; i < bvh_prim_index_size; i++) {
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
if (bvh->pack.prim_type[i] & PRIMITIVE_ALL_CURVE) {
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
}
else {
pack_prim_index[pack_prim_index_offset] = bvh_prim_index[i] + geom_prim_offset;
}
pack_prim_type[pack_prim_index_offset] = bvh_prim_type[i];
pack_prim_visibility[pack_prim_index_offset] = bvh_prim_visibility[i];
pack_prim_object[pack_prim_index_offset] = 0; // unused for instances

View File

@@ -45,7 +45,6 @@
# include "scene/hair.h"
# include "scene/mesh.h"
# include "scene/object.h"
# include "scene/pointcloud.h"
# include "util/foreach.h"
# include "util/log.h"
@@ -61,26 +60,6 @@ static_assert(Object::MAX_MOTION_STEPS == Geometry::MAX_MOTION_STEPS,
# define IS_HAIR(x) (x & 1)
/* This gets called by Embree at every valid ray/object intersection.
* Things like recording subsurface or shadow hits for later evaluation
* as well as filtering for volume objects happen here.
* Cycles' own BVH does that directly inside the traversal calls.
*/
static void rtc_filter_intersection_func(const RTCFilterFunctionNArguments *args)
{
/* Current implementation in Cycles assumes only single-ray intersection queries. */
assert(args->N == 1);
RTCHit *hit = (RTCHit *)args->hit;
CCLIntersectContext *ctx = ((IntersectContext *)args->context)->userRayExt;
const KernelGlobalsCPU *kg = ctx->kg;
const Ray *cray = ctx->ray;
if (kernel_embree_is_self_intersection(kg, hit, cray)) {
*args->valid = 0;
}
}
/* This gets called by Embree at every valid ray/object intersection.
* Things like recording subsurface or shadow hits for later evaluation
* as well as filtering for volume objects happen here.
@@ -95,16 +74,12 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
RTCHit *hit = (RTCHit *)args->hit;
CCLIntersectContext *ctx = ((IntersectContext *)args->context)->userRayExt;
const KernelGlobalsCPU *kg = ctx->kg;
const Ray *cray = ctx->ray;
switch (ctx->type) {
case CCLIntersectContext::RAY_SHADOW_ALL: {
Intersection current_isect;
kernel_embree_convert_hit(kg, ray, hit, &current_isect);
if (intersection_skip_self_shadow(cray->self, current_isect.object, current_isect.prim)) {
*args->valid = 0;
return;
}
/* If no transparent shadows or max number of hits exceeded, all light is blocked. */
const int flags = intersection_get_shader_flags(kg, current_isect.prim, current_isect.type);
if (!(flags & (SD_HAS_TRANSPARENT_SHADOW)) || ctx->num_hits >= ctx->max_hits) {
@@ -115,7 +90,7 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
++ctx->num_hits;
/* Always use baked shadow transparency for curves. */
if (current_isect.type & PRIMITIVE_CURVE) {
if (current_isect.type & PRIMITIVE_ALL_CURVE) {
ctx->throughput *= intersection_curve_shadow_transparency(
kg, current_isect.object, current_isect.prim, current_isect.u);
@@ -184,10 +159,6 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
break;
}
}
if (intersection_skip_self_local(cray->self, current_isect.prim)) {
*args->valid = 0;
return;
}
/* No intersection information requested, just return a hit. */
if (ctx->max_hits == 0) {
@@ -253,11 +224,6 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
if (ctx->num_hits < ctx->max_hits) {
Intersection current_isect;
kernel_embree_convert_hit(kg, ray, hit, &current_isect);
if (intersection_skip_self(cray->self, current_isect.object, current_isect.prim)) {
*args->valid = 0;
return;
}
Intersection *isect = &ctx->isect_s[ctx->num_hits];
++ctx->num_hits;
*isect = current_isect;
@@ -269,20 +235,17 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
}
/* This tells Embree to continue tracing. */
*args->valid = 0;
break;
}
break;
}
case CCLIntersectContext::RAY_REGULAR:
default:
if (kernel_embree_is_self_intersection(kg, hit, cray)) {
*args->valid = 0;
return;
}
/* Nothing to do here. */
break;
}
}
static void rtc_filter_func_backface_cull(const RTCFilterFunctionNArguments *args)
static void rtc_filter_func_thick_curve(const RTCFilterFunctionNArguments *args)
{
const RTCRay *ray = (RTCRay *)args->ray;
RTCHit *hit = (RTCHit *)args->hit;
@@ -293,17 +256,9 @@ static void rtc_filter_func_backface_cull(const RTCFilterFunctionNArguments *arg
*args->valid = 0;
return;
}
CCLIntersectContext *ctx = ((IntersectContext *)args->context)->userRayExt;
const KernelGlobalsCPU *kg = ctx->kg;
const Ray *cray = ctx->ray;
if (kernel_embree_is_self_intersection(kg, hit, cray)) {
*args->valid = 0;
}
}
static void rtc_filter_occluded_func_backface_cull(const RTCFilterFunctionNArguments *args)
static void rtc_filter_occluded_func_thick_curve(const RTCFilterFunctionNArguments *args)
{
const RTCRay *ray = (RTCRay *)args->ray;
RTCHit *hit = (RTCHit *)args->hit;
@@ -399,12 +354,10 @@ void BVHEmbree::build(Progress &progress, Stats *stats, RTCDevice rtc_device_)
}
const bool dynamic = params.bvh_type == BVH_TYPE_DYNAMIC;
const bool compact = params.use_compact_structure;
scene = rtcNewScene(rtc_device);
const RTCSceneFlags scene_flags = (dynamic ? RTC_SCENE_FLAG_DYNAMIC : RTC_SCENE_FLAG_NONE) |
(compact ? RTC_SCENE_FLAG_COMPACT : RTC_SCENE_FLAG_NONE) |
RTC_SCENE_FLAG_ROBUST;
RTC_SCENE_FLAG_COMPACT | RTC_SCENE_FLAG_ROBUST;
rtcSetSceneFlags(scene, scene_flags);
build_quality = dynamic ? RTC_BUILD_QUALITY_LOW :
(params.use_spatial_split ? RTC_BUILD_QUALITY_HIGH :
@@ -457,12 +410,6 @@ void BVHEmbree::add_object(Object *ob, int i)
add_curves(ob, hair, i);
}
}
else if (geom->geometry_type == Geometry::POINTCLOUD) {
PointCloud *pointcloud = static_cast<PointCloud *>(geom);
if (pointcloud->num_points() > 0) {
add_points(ob, pointcloud, i);
}
}
}
void BVHEmbree::add_instance(Object *ob, int i)
@@ -549,7 +496,6 @@ void BVHEmbree::add_triangles(const Object *ob, const Mesh *mesh, int i)
rtcSetGeometryUserData(geom_id, (void *)prim_offset);
rtcSetGeometryOccludedFilterFunction(geom_id, rtc_filter_occluded_func);
rtcSetGeometryIntersectFilterFunction(geom_id, rtc_filter_intersection_func);
rtcSetGeometryMask(geom_id, ob->visibility_for_tracing());
rtcCommitGeometry(geom_id);
@@ -678,89 +624,6 @@ void BVHEmbree::set_curve_vertex_buffer(RTCGeometry geom_id, const Hair *hair, c
}
}
void BVHEmbree::set_point_vertex_buffer(RTCGeometry geom_id,
const PointCloud *pointcloud,
const bool update)
{
const Attribute *attr_mP = NULL;
size_t num_motion_steps = 1;
if (pointcloud->has_motion_blur()) {
attr_mP = pointcloud->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
if (attr_mP) {
num_motion_steps = pointcloud->get_motion_steps();
}
}
const size_t num_points = pointcloud->num_points();
/* Copy the point data to Embree */
const int t_mid = (num_motion_steps - 1) / 2;
const float *radius = pointcloud->get_radius().data();
for (int t = 0; t < num_motion_steps; ++t) {
const float3 *verts;
if (t == t_mid || attr_mP == NULL) {
verts = pointcloud->get_points().data();
}
else {
int t_ = (t > t_mid) ? (t - 1) : t;
verts = &attr_mP->data_float3()[t_ * num_points];
}
float4 *rtc_verts = (update) ? (float4 *)rtcGetGeometryBufferData(
geom_id, RTC_BUFFER_TYPE_VERTEX, t) :
(float4 *)rtcSetNewGeometryBuffer(geom_id,
RTC_BUFFER_TYPE_VERTEX,
t,
RTC_FORMAT_FLOAT4,
sizeof(float) * 4,
num_points);
assert(rtc_verts);
if (rtc_verts) {
for (size_t j = 0; j < num_points; ++j) {
rtc_verts[j] = float3_to_float4(verts[j]);
rtc_verts[j].w = radius[j];
}
}
if (update) {
rtcUpdateGeometryBuffer(geom_id, RTC_BUFFER_TYPE_VERTEX, t);
}
}
}
void BVHEmbree::add_points(const Object *ob, const PointCloud *pointcloud, int i)
{
size_t prim_offset = pointcloud->prim_offset;
const Attribute *attr_mP = NULL;
size_t num_motion_steps = 1;
if (pointcloud->has_motion_blur()) {
attr_mP = pointcloud->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
if (attr_mP) {
num_motion_steps = pointcloud->get_motion_steps();
}
}
enum RTCGeometryType type = RTC_GEOMETRY_TYPE_SPHERE_POINT;
RTCGeometry geom_id = rtcNewGeometry(rtc_device, type);
rtcSetGeometryBuildQuality(geom_id, build_quality);
rtcSetGeometryTimeStepCount(geom_id, num_motion_steps);
set_point_vertex_buffer(geom_id, pointcloud, false);
rtcSetGeometryUserData(geom_id, (void *)prim_offset);
rtcSetGeometryIntersectFilterFunction(geom_id, rtc_filter_func_backface_cull);
rtcSetGeometryOccludedFilterFunction(geom_id, rtc_filter_occluded_func_backface_cull);
rtcSetGeometryMask(geom_id, ob->visibility_for_tracing());
rtcCommitGeometry(geom_id);
rtcAttachGeometryByID(scene, geom_id, i * 2);
rtcReleaseGeometry(geom_id);
}
void BVHEmbree::add_curves(const Object *ob, const Hair *hair, int i)
{
size_t prim_offset = hair->curve_segment_offset;
@@ -812,12 +675,11 @@ void BVHEmbree::add_curves(const Object *ob, const Hair *hair, int i)
rtcSetGeometryUserData(geom_id, (void *)prim_offset);
if (hair->curve_shape == CURVE_RIBBON) {
rtcSetGeometryIntersectFilterFunction(geom_id, rtc_filter_intersection_func);
rtcSetGeometryOccludedFilterFunction(geom_id, rtc_filter_occluded_func);
}
else {
rtcSetGeometryIntersectFilterFunction(geom_id, rtc_filter_func_backface_cull);
rtcSetGeometryOccludedFilterFunction(geom_id, rtc_filter_occluded_func_backface_cull);
rtcSetGeometryIntersectFilterFunction(geom_id, rtc_filter_func_thick_curve);
rtcSetGeometryOccludedFilterFunction(geom_id, rtc_filter_occluded_func_thick_curve);
}
rtcSetGeometryMask(geom_id, ob->visibility_for_tracing());
@@ -854,14 +716,6 @@ void BVHEmbree::refit(Progress &progress)
rtcCommitGeometry(geom);
}
}
else if (geom->geometry_type == Geometry::POINTCLOUD) {
PointCloud *pointcloud = static_cast<PointCloud *>(geom);
if (pointcloud->num_points() > 0) {
RTCGeometry geom = rtcGetGeometry(scene, geom_id);
set_point_vertex_buffer(geom, pointcloud, true);
rtcCommitGeometry(geom);
}
}
}
geom_id += 2;
}

Some files were not shown because too many files have changed in this diff Show More