1
1

Merge branch 'asset-browser-grid-view' into file-browser-grid-view

This commit is contained in:
2022-07-20 17:25:31 +02:00
2407 changed files with 88745 additions and 47369 deletions

View File

@@ -265,6 +265,7 @@ ForEachMacros:
- SET_SLOT_PROBING_BEGIN
- MAP_SLOT_PROBING_BEGIN
- VECTOR_SET_SLOT_PROBING_BEGIN
- WL_ARRAY_FOR_EACH
StatementMacros:
- PyObject_HEAD

View File

@@ -222,6 +222,17 @@ if(UNIX AND NOT (APPLE OR HAIKU))
option(WITH_GHOST_WAYLAND "Enable building Blender against Wayland for windowing (under development)" OFF)
mark_as_advanced(WITH_GHOST_WAYLAND)
if (WITH_GHOST_WAYLAND)
option(WITH_GHOST_WAYLAND_LIBDECOR "Optionally build with LibDecor window decorations" OFF)
mark_as_advanced(WITH_GHOST_WAYLAND_LIBDECOR)
option(WITH_GHOST_WAYLAND_DBUS "Optionally build with DBUS support (used for Cursor themes). May hang on startup systems where DBUS is not used." OFF)
mark_as_advanced(WITH_GHOST_WAYLAND_DBUS)
option(WITH_GHOST_WAYLAND_DYNLOAD "Enable runtime dynamic WAYLAND libraries loading" OFF)
mark_as_advanced(WITH_GHOST_WAYLAND_DYNLOAD)
endif()
endif()
if(WITH_GHOST_X11)
@@ -255,19 +266,11 @@ if(WITH_GHOST_X11)
endif()
if(UNIX AND NOT APPLE)
option(WITH_SYSTEM_GLEW "Use GLEW OpenGL wrapper library provided by the operating system" OFF)
option(WITH_SYSTEM_GLES "Use OpenGL ES library provided by the operating system" ON)
option(WITH_SYSTEM_FREETYPE "Use the freetype library provided by the operating system" OFF)
else()
# not an option for other OS's
set(WITH_SYSTEM_GLEW OFF)
set(WITH_SYSTEM_GLES OFF)
set(WITH_SYSTEM_FREETYPE OFF)
endif()
if(UNIX AND NOT APPLE)
option(WITH_SYSTEM_EIGEN3 "Use the systems Eigen3 library" OFF)
else()
set(WITH_SYSTEM_FREETYPE OFF)
set(WITH_SYSTEM_EIGEN3 OFF)
endif()
@@ -300,6 +303,9 @@ option(WITH_USD "Enable Universal Scene Description (USD) Suppor
# 3D format support
# Disable opencollada when we don't have precompiled libs
option(WITH_OPENCOLLADA "Enable OpenCollada Support (http://www.opencollada.org)" ON)
option(WITH_IO_WAVEFRONT_OBJ "Enable Wavefront-OBJ 3D file format support (*.obj)" ON)
option(WITH_IO_STL "Enable STL 3D file format support (*.stl)" ON)
option(WITH_IO_GPENCIL "Enable grease-pencil file format IO (*.svg, *.pdf)" ON)
# Sound output
option(WITH_SDL "Enable SDL for sound" ON)
@@ -441,7 +447,7 @@ endif()
if(NOT APPLE)
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" ON)
option(WITH_CYCLES_HIP_BINARIES "Build Cycles AMD HIP binaries" OFF)
set(CYCLES_HIP_BINARIES_ARCH gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 CACHE STRING "AMD HIP architectures to build binaries for")
set(CYCLES_HIP_BINARIES_ARCH gfx900 gfx906 gfx90c gfx902 gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 gfx1035 CACHE STRING "AMD HIP architectures to build binaries for")
mark_as_advanced(WITH_CYCLES_DEVICE_HIP)
mark_as_advanced(CYCLES_HIP_BINARIES_ARCH)
endif()
@@ -451,6 +457,21 @@ if(APPLE)
option(WITH_CYCLES_DEVICE_METAL "Enable Cycles Apple Metal compute support" ON)
endif()
# oneAPI
if(NOT APPLE)
option(WITH_CYCLES_DEVICE_ONEAPI "Enable Cycles oneAPI compute support" OFF)
option(WITH_CYCLES_ONEAPI_BINARIES "Enable Ahead-Of-Time compilation for Cycles oneAPI device" OFF)
option(WITH_CYCLES_ONEAPI_SYCL_HOST_ENABLED "Enable use of SYCL host (CPU) device execution by oneAPI implementation. This option is for debugging purposes and impacts GPU execution." OFF)
# https://www.intel.com/content/www/us/en/develop/documentation/oneapi-dpcpp-cpp-compiler-dev-guide-and-reference/top/compilation/ahead-of-time-compilation.html
SET (CYCLES_ONEAPI_SPIR64_GEN_DEVICES "dg2" CACHE STRING "oneAPI Intel GPU architectures to build binaries for")
SET (CYCLES_ONEAPI_SYCL_TARGETS spir64 spir64_gen CACHE STRING "oneAPI targets to build AOT binaries for")
mark_as_advanced(WITH_CYCLES_ONEAPI_SYCL_HOST_ENABLED)
mark_as_advanced(CYCLES_ONEAPI_SPIR64_GEN_DEVICES)
mark_as_advanced(CYCLES_ONEAPI_SYCL_TARGETS)
endif()
# Draw Manager
option(WITH_DRAW_DEBUG "Add extra debug capabilities to Draw Manager" OFF)
mark_as_advanced(WITH_DRAW_DEBUG)
@@ -515,20 +536,48 @@ endif()
# OpenGL
# Experimental EGL option.
option(WITH_GL_EGL "Use the EGL OpenGL system library instead of the platform specific OpenGL system library (CGL, GLX or WGL)" OFF)
mark_as_advanced(WITH_GL_EGL)
if(WITH_GHOST_WAYLAND)
# Wayland can only use EGL to create OpenGL contexts, not GLX.
set(WITH_GL_EGL ON)
endif()
if(UNIX AND NOT APPLE)
if(WITH_GL_EGL)
# GLEW can only be built with either GLX or EGL support. Most binary distributions are
# built with GLX support and we have no automated way to detect this. So always build
# GLEW from source to be sure it has EGL support.
set(WITH_SYSTEM_GLEW OFF)
else()
option(WITH_SYSTEM_GLEW "Use GLEW OpenGL wrapper library provided by the operating system" OFF)
endif()
option(WITH_SYSTEM_GLES "Use OpenGL ES library provided by the operating system" ON)
else()
# System GLEW and GLES not an option on other platforms.
set(WITH_SYSTEM_GLEW OFF)
set(WITH_SYSTEM_GLES OFF)
endif()
option(WITH_OPENGL "When off limits visibility of the opengl headers to just bf_gpu and gawain (temporary option for development purposes)" ON)
option(WITH_GLEW_ES "Switches to experimental copy of GLEW that has support for OpenGL ES. (temporary option for development purposes)" OFF)
option(WITH_GL_EGL "Use the EGL OpenGL system library instead of the platform specific OpenGL system library (CGL, glX, or WGL)" OFF)
option(WITH_GL_PROFILE_ES20 "Support using OpenGL ES 2.0. (through either EGL or the AGL/WGL/XGL 'es20' profile)" OFF)
option(WITH_GPU_SHADER_BUILDER "Shader builder is a developer option enabling linting on GLSL during compilation" OFF)
option(WITH_GPU_BUILDTIME_SHADER_BUILDER "Shader builder is a developer option enabling linting on GLSL during compilation" OFF)
mark_as_advanced(
WITH_OPENGL
WITH_GLEW_ES
WITH_GL_EGL
WITH_GL_PROFILE_ES20
WITH_GPU_SHADER_BUILDER
WITH_GPU_BUILDTIME_SHADER_BUILDER
)
if(WITH_HEADLESS)
set(WITH_OPENGL OFF)
endif()
# Metal
if (APPLE)

View File

@@ -29,10 +29,12 @@ cmake_minimum_required(VERSION 3.5)
include(ExternalProject)
include(cmake/check_software.cmake)
include(cmake/versions.cmake)
include(cmake/options.cmake)
# versions.cmake needs to be included after options.cmake due to the BLENDER_PLATFORM_ARM variable being needed.
include(cmake/versions.cmake)
include(cmake/boost_build_options.cmake)
include(cmake/download.cmake)
include(cmake/macros.cmake)
if(ENABLE_MINGW64)
include(cmake/setup_mingw64.cmake)
@@ -57,7 +59,6 @@ include(cmake/alembic.cmake)
include(cmake/opensubdiv.cmake)
include(cmake/sdl.cmake)
include(cmake/opencollada.cmake)
include(cmake/llvm.cmake)
if(APPLE)
include(cmake/openmp.cmake)
endif()
@@ -75,6 +76,7 @@ include(cmake/osl.cmake)
include(cmake/tbb.cmake)
include(cmake/openvdb.cmake)
include(cmake/python.cmake)
include(cmake/llvm.cmake)
option(USE_PIP_NUMPY "Install NumPy using pip wheel instead of building from source" OFF)
if(APPLE AND ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
set(USE_PIP_NUMPY ON)
@@ -96,6 +98,15 @@ include(cmake/fmt.cmake)
include(cmake/robinmap.cmake)
if(NOT APPLE)
include(cmake/xr_openxr.cmake)
if(NOT WIN32 OR BUILD_MODE STREQUAL Release)
include(cmake/dpcpp.cmake)
include(cmake/dpcpp_deps.cmake)
endif()
if(NOT WIN32)
include(cmake/igc.cmake)
include(cmake/gmmlib.cmake)
include(cmake/ocloc.cmake)
endif()
endif()
# OpenColorIO and dependencies.

View File

@@ -56,10 +56,7 @@ if(UNIX)
"On Debian and Ubuntu:\n"
" apt install autoconf automake libtool yasm tcl ninja-build meson python3-mako\n"
"\n"
"On macOS Intel (with homebrew):\n"
" brew install autoconf automake bison libtool pkg-config yasm\n"
"\n"
"On macOS ARM (with homebrew):\n"
"On macOS (with homebrew):\n"
" brew install autoconf automake bison flex libtool pkg-config yasm\n"
"\n"
"Other platforms:\n"

View File

@@ -101,3 +101,18 @@ download_source(ROBINMAP)
download_source(IMATH)
download_source(PYSTRING)
download_source(LEVEL_ZERO)
download_source(DPCPP)
download_source(VCINTRINSICS)
download_source(OPENCLHEADERS)
download_source(ICDLOADER)
download_source(MP11)
download_source(SPIRV_HEADERS)
download_source(IGC)
download_source(IGC_LLVM)
download_source(IGC_OPENCL_CLANG)
download_source(IGC_VCINTRINSICS)
download_source(IGC_SPIRV_HEADERS)
download_source(IGC_SPIRV_TOOLS)
download_source(IGC_SPIRV_TRANSLATOR)
download_source(GMMLIB)
download_source(OCLOC)

View File

@@ -0,0 +1,109 @@
# SPDX-License-Identifier: GPL-2.0-or-later
if(WIN32)
set(LLVM_GENERATOR "Ninja")
else()
set(LLVM_GENERATOR "Unix Makefiles")
endif()
set(DPCPP_CONFIGURE_ARGS
# When external deps dpcpp needs are not found it will automatically
# download the during the configure stage using FetchContent. Given
# we need to keep an archive of all source used during build for compliance
# reasons it CANNOT download anything we do not know about. By setting
# this property to ON, all downloads are disabled, and we will have to
# provide the missing deps some other way, a build error beats a compliance
# violation
--cmake-opt FETCHCONTENT_FULLY_DISCONNECTED=ON
)
set(DPCPP_SOURCE_ROOT ${BUILD_DIR}/dpcpp/src/external_dpcpp/)
set(DPCPP_EXTRA_ARGS
# When external deps dpcpp needs are not found it will automatically
# download the during the configure stage using FetchContent. Given
# we need to keep an archive of all source used during build for compliance
# reasons it CANNOT download anything we do not know about. By setting
# this property to ON, all downloads are disabled, and we will have to
# provide the missing deps some other way, a build or configure error
# beats a compliance violation
-DFETCHCONTENT_FULLY_DISCONNECTED=ON
-DLLVMGenXIntrinsics_SOURCE_DIR=${BUILD_DIR}/vcintrinsics/src/external_vcintrinsics/
-DOpenCL_HEADERS=file://${PACKAGE_DIR}/${OPENCLHEADERS_FILE}
-DOpenCL_LIBRARY_SRC=file://${PACKAGE_DIR}/${ICDLOADER_FILE}
-DBOOST_MP11_SOURCE_DIR=${BUILD_DIR}/mp11/src/external_mp11/
-DLEVEL_ZERO_LIBRARY=${LIBDIR}/level-zero/lib/${LIBPREFIX}ze_loader${SHAREDLIBEXT}
-DLEVEL_ZERO_INCLUDE_DIR=${LIBDIR}/level-zero/include
-DLLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR=${BUILD_DIR}/spirvheaders/src/external_spirvheaders/
# Below here is copied from an invocation of buildbot/config.py
-DLLVM_ENABLE_ASSERTIONS=ON
-DLLVM_TARGETS_TO_BUILD=X86
-DLLVM_EXTERNAL_PROJECTS=sycl^^llvm-spirv^^opencl^^libdevice^^xpti^^xptifw
-DLLVM_EXTERNAL_SYCL_SOURCE_DIR=${DPCPP_SOURCE_ROOT}/sycl
-DLLVM_EXTERNAL_LLVM_SPIRV_SOURCE_DIR=${DPCPP_SOURCE_ROOT}/llvm-spirv
-DLLVM_EXTERNAL_XPTI_SOURCE_DIR=${DPCPP_SOURCE_ROOT}/xpti
-DXPTI_SOURCE_DIR=${DPCPP_SOURCE_ROOT}/xpti
-DLLVM_EXTERNAL_XPTIFW_SOURCE_DIR=${DPCPP_SOURCE_ROOT}/xptifw
-DLLVM_EXTERNAL_LIBDEVICE_SOURCE_DIR=${DPCPP_SOURCE_ROOT}/libdevice
-DLLVM_ENABLE_PROJECTS=clang^^sycl^^llvm-spirv^^opencl^^libdevice^^xpti^^xptifw
-DLIBCLC_TARGETS_TO_BUILD=
-DLIBCLC_GENERATE_REMANGLED_VARIANTS=OFF
-DSYCL_BUILD_PI_HIP_PLATFORM=AMD
-DLLVM_BUILD_TOOLS=ON
-DSYCL_ENABLE_WERROR=OFF
-DSYCL_INCLUDE_TESTS=ON
-DLLVM_ENABLE_DOXYGEN=OFF
-DLLVM_ENABLE_SPHINX=OFF
-DBUILD_SHARED_LIBS=OFF
-DSYCL_ENABLE_XPTI_TRACING=ON
-DLLVM_ENABLE_LLD=OFF
-DXPTI_ENABLE_WERROR=OFF
-DSYCL_CLANG_EXTRA_FLAGS=
-DSYCL_ENABLE_PLUGINS=level_zero
-DCMAKE_INSTALL_RPATH=\$ORIGIN
-DPython3_ROOT_DIR=${LIBDIR}/python/
-DPython3_EXECUTABLE=${PYTHON_BINARY}
-DPYTHON_EXECUTABLE=${PYTHON_BINARY}
-DLLDB_ENABLE_CURSES=OFF
-DLLVM_ENABLE_TERMINFO=OFF
)
if(WIN32)
list(APPEND DPCPP_EXTRA_ARGS -DPython3_FIND_REGISTRY=NEVER)
endif()
ExternalProject_Add(external_dpcpp
URL file://${PACKAGE_DIR}/${DPCPP_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${DPCPP_HASH_TYPE}=${DPCPP_HASH}
PREFIX ${BUILD_DIR}/dpcpp
CMAKE_GENERATOR ${LLVM_GENERATOR}
SOURCE_SUBDIR llvm
LIST_SEPARATOR ^^
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/dpcpp ${DEFAULT_CMAKE_FLAGS} ${DPCPP_EXTRA_ARGS}
#CONFIGURE_COMMAND ${PYTHON_BINARY} ${BUILD_DIR}/dpcpp/src/external_dpcpp/buildbot/configure.py ${DPCPP_CONFIGURE_ARGS}
#BUILD_COMMAND echo "." #${PYTHON_BINARY} ${BUILD_DIR}/dpcpp/src/external_dpcpp/buildbot/compile.py
INSTALL_COMMAND ${CMAKE_COMMAND} --build . -- deploy-sycl-toolchain
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/dpcpp/src/external_dpcpp < ${PATCH_DIR}/dpcpp.diff
INSTALL_DIR ${LIBDIR}/dpcpp
)
add_dependencies(
external_dpcpp
external_python
external_python_site_packages
external_vcintrinsics
external_openclheaders
external_icdloader
external_mp11
external_level-zero
external_spirvheaders
)
if(BUILD_MODE STREQUAL Release AND WIN32)
ExternalProject_Add_Step(external_dpcpp after_install
COMMAND ${CMAKE_COMMAND} -E rm -f ${LIBDIR}/dpcpp/bin/clang-cl.exe
COMMAND ${CMAKE_COMMAND} -E rm -f ${LIBDIR}/dpcpp/bin/clang-cpp.exe
COMMAND ${CMAKE_COMMAND} -E rm -f ${LIBDIR}/dpcpp/bin/clang.exe
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/dpcpp ${HARVEST_TARGET}/dpcpp
)
endif()

View File

@@ -0,0 +1,61 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# These are build time requirements for dpcpp
# We only have to unpack these dpcpp will build
# them.
ExternalProject_Add(external_vcintrinsics
URL file://${PACKAGE_DIR}/${VCINTRINSICS_FILE}
URL_HASH ${VCINTRINSICS_HASH_TYPE}=${VCINTRINSICS_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/vcintrinsics
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
)
# opencl headers do not have to be unpacked, dpcpp will do it
# but it wouldn't hurt to do it anyway as an opertunity to validate
# the hash is correct.
ExternalProject_Add(external_openclheaders
URL file://${PACKAGE_DIR}/${OPENCLHEADERS_FILE}
URL_HASH ${OPENCLHEADERS_HASH_TYPE}=${OPENCLHEADERS_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/openclheaders
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
)
# icdloader does not have to be unpacked, dpcpp will do it
# but it wouldn't hurt to do it anyway as an opertunity to validate
# the hash is correct.
ExternalProject_Add(external_icdloader
URL file://${PACKAGE_DIR}/${ICDLOADER_FILE}
URL_HASH ${ICDLOADER_HASH_TYPE}=${ICDLOADER_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/icdloader
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
)
ExternalProject_Add(external_mp11
URL file://${PACKAGE_DIR}/${MP11_FILE}
URL_HASH ${MP11_HASH_TYPE}=${MP11_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/mp11
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
)
ExternalProject_Add(external_spirvheaders
URL file://${PACKAGE_DIR}/${SPIRV_HEADERS_FILE}
URL_HASH ${SPIRV_HEADERS_HASH_TYPE}=${SPIRV_HEADERS_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/spirvheaders
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
)

View File

@@ -10,18 +10,12 @@ set(EMBREE_EXTRA_ARGS
-DEMBREE_RAY_MASK=ON
-DEMBREE_FILTER_FUNCTION=ON
-DEMBREE_BACKFACE_CULLING=OFF
-DEMBREE_MAX_ISA=AVX2
-DEMBREE_TASKING_SYSTEM=TBB
-DEMBREE_TBB_ROOT=${LIBDIR}/tbb
-DTBB_ROOT=${LIBDIR}/tbb
-DTBB_STATIC_LIB=${TBB_STATIC_LIBRARY}
)
if(BLENDER_PLATFORM_ARM)
set(EMBREE_EXTRA_ARGS
${EMBREE_EXTRA_ARGS}
-DEMBREE_MAX_ISA=NEON)
else()
if (NOT BLENDER_PLATFORM_ARM)
set(EMBREE_EXTRA_ARGS
${EMBREE_EXTRA_ARGS}
-DEMBREE_MAX_ISA=AVX2)
@@ -30,23 +24,10 @@ endif()
if(TBB_STATIC_LIBRARY)
set(EMBREE_EXTRA_ARGS
${EMBREE_EXTRA_ARGS}
-DEMBREE_TBB_LIBRARY_NAME=tbb_static
-DEMBREE_TBBMALLOC_LIBRARY_NAME=tbbmalloc_static
-DEMBREE_TBB_COMPONENT=tbb_static
)
endif()
if(WIN32)
set(EMBREE_BUILD_DIR ${BUILD_MODE}/)
if(BUILD_MODE STREQUAL Debug)
list(APPEND EMBREE_EXTRA_ARGS
-DEMBREE_TBBMALLOC_LIBRARY_NAME=tbbmalloc_debug
-DEMBREE_TBB_LIBRARY_NAME=tbb_debug
)
endif()
else()
set(EMBREE_BUILD_DIR)
endif()
ExternalProject_Add(external_embree
URL file://${PACKAGE_DIR}/${EMBREE_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}

View File

@@ -5,6 +5,8 @@ ExternalProject_Add(external_flex
URL_HASH ${FLEX_HASH_TYPE}=${FLEX_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/flex
# This patch fixes build with some versions of glibc (https://github.com/westes/flex/commit/24fd0551333e7eded87b64dd36062da3df2f6380)
PATCH_COMMAND ${PATCH_CMD} -d ${BUILD_DIR}/flex/src/external_flex < ${PATCH_DIR}/flex.diff
CONFIGURE_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/flex/src/external_flex/ && ${CONFIGURE_COMMAND} --prefix=${LIBDIR}/flex
BUILD_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/flex/src/external_flex/ && make -j${MAKE_THREADS}
INSTALL_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/flex/src/external_flex/ && make install

View File

@@ -0,0 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-or-later
set(GMMLIB_EXTRA_ARGS
)
ExternalProject_Add(external_gmmlib
URL file://${PACKAGE_DIR}/${GMMLIB_FILE}
URL_HASH ${GMMLIB_HASH_TYPE}=${GMMLIB_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/gmmlib
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/gmmlib ${DEFAULT_CMAKE_FLAGS} ${GMMLIB_EXTRA_ARGS}
INSTALL_DIR ${LIBDIR}/gmmlib
)

View File

@@ -192,6 +192,10 @@ harvest(zstd/lib zstd/lib "*.a")
if(UNIX AND NOT APPLE)
harvest(libglu/lib mesa/lib "*.so*")
harvest(mesa/lib64 mesa/lib "*.so*")
endif()
harvest(dpcpp dpcpp "*")
harvest(igc dpcpp/lib/igc "*")
harvest(ocloc dpcpp/lib/ocloc "*")
endif()
endif()

View File

@@ -0,0 +1,126 @@
# SPDX-License-Identifier: GPL-2.0-or-later
unpack_only(igc_vcintrinsics)
unpack_only(igc_spirv_headers)
unpack_only(igc_spirv_tools)
#
# igc_opencl_clang contains patches that need to be applied
# to external_igc_llvm and igc_spirv_translator, we unpack
# igc_opencl_clang first, then have the patch stages of
# external_igc_llvm and igc_spirv_translator apply them.
#
ExternalProject_Add(external_igc_opencl_clang
URL file://${PACKAGE_DIR}/${IGC_OPENCL_CLANG_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${IGC_OPENCL_CLANG_HASH_TYPE}=${IGC_OPENCL_CLANG_HASH}
PREFIX ${BUILD_DIR}/igc_opencl_clang
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/igc_opencl_clang/src/external_igc_opencl_clang/ < ${PATCH_DIR}/igc_opencl_clang.diff
)
set(IGC_OPENCL_CLANG_PATCH_DIR ${BUILD_DIR}/igc_opencl_clang/src/external_igc_opencl_clang/patches)
set(IGC_LLVM_SOURCE_DIR ${BUILD_DIR}/igc_llvm/src/external_igc_llvm)
set(IGC_SPIRV_TRANSLATOR_SOURCE_DIR ${BUILD_DIR}/igc_spirv_translator/src/external_igc_spirv_translator)
ExternalProject_Add(external_igc_llvm
URL file://${PACKAGE_DIR}/${IGC_LLVM_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${IGC_LLVM_HASH_TYPE}=${IGC_LLVM_HASH}
PREFIX ${BUILD_DIR}/igc_llvm
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${IGC_LLVM_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/clang/0001-OpenCL-3.0-support.patch &&
${PATCH_CMD} -p 1 -d ${IGC_LLVM_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/clang/0002-Remove-__IMAGE_SUPPORT__-macro-for-SPIR.patch &&
${PATCH_CMD} -p 1 -d ${IGC_LLVM_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/clang/0003-Avoid-calling-ParseCommandLineOptions-in-BackendUtil.patch &&
${PATCH_CMD} -p 1 -d ${IGC_LLVM_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/clang/0004-OpenCL-support-cl_ext_float_atomics.patch &&
${PATCH_CMD} -p 1 -d ${IGC_LLVM_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/clang/0005-OpenCL-Add-cl_khr_integer_dot_product.patch &&
${PATCH_CMD} -p 1 -d ${IGC_LLVM_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/llvm/0001-Memory-leak-fix-for-Managed-Static-Mutex.patch &&
${PATCH_CMD} -p 1 -d ${IGC_LLVM_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/llvm/0002-Remove-repo-name-in-LLVM-IR.patch
)
add_dependencies(
external_igc_llvm
external_igc_opencl_clang
)
ExternalProject_Add(external_igc_spirv_translator
URL file://${PACKAGE_DIR}/${IGC_SPIRV_TRANSLATOR_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${IGC_SPIRV_TRANSLATOR_HASH_TYPE}=${IGC_SPIRV_TRANSLATOR_HASH}
PREFIX ${BUILD_DIR}/igc_spirv_translator
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${IGC_SPIRV_TRANSLATOR_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/spirv/0001-update-SPIR-V-headers-for-SPV_INTEL_split_barrier.patch &&
${PATCH_CMD} -p 1 -d ${IGC_SPIRV_TRANSLATOR_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/spirv/0002-Add-support-for-split-barriers-extension-SPV_INTEL_s.patch &&
${PATCH_CMD} -p 1 -d ${IGC_SPIRV_TRANSLATOR_SOURCE_DIR} < ${IGC_OPENCL_CLANG_PATCH_DIR}/spirv/0003-Support-cl_bf16_conversions.patch
)
add_dependencies(
external_igc_spirv_translator
external_igc_opencl_clang
)
if(WIN32)
set(IGC_GENERATOR "Ninja")
set(IGC_TARGET Windows64)
else()
set(IGC_GENERATOR "Unix Makefiles")
set(IGC_TARGET Linux64)
endif()
set(IGC_EXTRA_ARGS
-DIGC_OPTION__ARCHITECTURE_TARGET=${IGC_TARGET}
-DIGC_OPTION__ARCHITECTURE_HOST=${IGC_TARGET}
)
if(UNIX AND NOT APPLE)
list(APPEND IGC_EXTRA_ARGS
-DFLEX_EXECUTABLE=${LIBDIR}/flex/bin/flex
-DFLEX_INCLUDE_DIR=${LIBDIR}/flex/include
)
endif()
ExternalProject_Add(external_igc
URL file://${PACKAGE_DIR}/${IGC_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${IGC_HASH_TYPE}=${IGC_HASH}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/igc ${DEFAULT_CMAKE_FLAGS} ${IGC_EXTRA_ARGS}
# IGC is pretty set in its way where sub projects ought to live, for some it offers
# hooks to supply alternatives folders, other are just hardocded with no way to configure
# we symlink everything here, since it's less work than trying to convince the cmake
# scripts to accept alternative locations.
#
PATCH_COMMAND ${CMAKE_COMMAND} -E create_symlink ${BUILD_DIR}/igc_llvm/src/external_igc_llvm/ ${BUILD_DIR}/igc/src/llvm-project &&
${CMAKE_COMMAND} -E create_symlink ${BUILD_DIR}/igc_opencl_clang/src/external_igc_opencl_clang/ ${BUILD_DIR}/igc/src/llvm-project/llvm/projects/opencl-clang &&
${CMAKE_COMMAND} -E create_symlink ${BUILD_DIR}/igc_spirv_translator/src/external_igc_spirv_translator/ ${BUILD_DIR}/igc/src/llvm-project/llvm/projects/llvm-spirv &&
${CMAKE_COMMAND} -E create_symlink ${BUILD_DIR}/igc_spirv_tools/src/external_igc_spirv_tools/ ${BUILD_DIR}/igc/src/SPIRV-Tools &&
${CMAKE_COMMAND} -E create_symlink ${BUILD_DIR}/igc_spirv_headers/src/external_igc_spirv_headers/ ${BUILD_DIR}/igc/src/SPIRV-Headers &&
${CMAKE_COMMAND} -E create_symlink ${BUILD_DIR}/igc_vcintrinsics/src/external_igc_vcintrinsics/ ${BUILD_DIR}/igc/src/vc-intrinsics
PREFIX ${BUILD_DIR}/igc
INSTALL_DIR ${LIBDIR}/igc
INSTALL_COMMAND ${CMAKE_COMMAND} --install . --strip
CMAKE_GENERATOR ${IGC_GENERATOR}
)
add_dependencies(
external_igc
external_igc_vcintrinsics
external_igc_llvm
external_igc_opencl_clang
external_igc_vcintrinsics
external_igc_spirv_headers
external_igc_spirv_tools
external_igc_spirv_translator
)
if(UNIX AND NOT APPLE)
add_dependencies(
external_igc
external_flex
)
endif()

View File

@@ -6,6 +6,7 @@ if(WIN32)
-DBISON_EXECUTABLE=${LIBDIR}/flexbison/win_bison.exe
-DM4_EXECUTABLE=${DOWNLOAD_DIR}/mingw/mingw64/msys/1.0/bin/m4.exe
-DARM_ENABLED=Off
-DPython3_FIND_REGISTRY=NEVER
)
elseif(APPLE)
# Use bison and flex installed via Homebrew.
@@ -27,7 +28,7 @@ elseif(UNIX)
set(ISPC_EXTRA_ARGS_UNIX
-DCMAKE_C_COMPILER=${LIBDIR}/llvm/bin/clang
-DCMAKE_CXX_COMPILER=${LIBDIR}/llvm/bin/clang++
-DARM_ENABLED=Off
-DARM_ENABLED=${BLENDER_PLATFORM_ARM}
-DFLEX_EXECUTABLE=${LIBDIR}/flex/bin/flex
)
endif()
@@ -43,6 +44,8 @@ set(ISPC_EXTRA_ARGS
-DISPC_INCLUDE_TESTS=Off
-DCLANG_LIBRARY_DIR=${LIBDIR}/llvm/lib
-DCLANG_INCLUDE_DIRS=${LIBDIR}/llvm/include
-DPython3_ROOT_DIR=${LIBDIR}/python/
-DPython3_EXECUTABLE=${PYTHON_BINARY}
${ISPC_EXTRA_ARGS_WIN}
${ISPC_EXTRA_ARGS_APPLE}
${ISPC_EXTRA_ARGS_UNIX}
@@ -61,6 +64,7 @@ ExternalProject_Add(external_ispc
add_dependencies(
external_ispc
ll
external_python
)
if(WIN32)

View File

@@ -25,11 +25,14 @@ set(LLVM_EXTRA_ARGS
-DLLVM_BUILD_LLVM_C_DYLIB=OFF
-DLLVM_ENABLE_UNWIND_TABLES=OFF
-DLLVM_ENABLE_PROJECTS=clang${LLVM_BUILD_CLANG_TOOLS_EXTRA}
-DPython3_ROOT_DIR=${LIBDIR}/python/
-DPython3_EXECUTABLE=${PYTHON_BINARY}
${LLVM_XML2_ARGS}
)
if(WIN32)
set(LLVM_GENERATOR "Ninja")
list(APPEND LLVM_EXTRA_ARGS -DPython3_FIND_REGISTRY=NEVER)
else()
set(LLVM_GENERATOR "Unix Makefiles")
endif()
@@ -74,3 +77,8 @@ if(APPLE)
external_xml2
)
endif()
add_dependencies(
ll
external_python
)

View File

@@ -0,0 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# shorthand to only unpack a certain dependency
macro(unpack_only name)
string(TOUPPER ${name} UPPER_NAME)
set(TARGET_FILE ${${UPPER_NAME}_FILE})
set(TARGET_HASH_TYPE ${${UPPER_NAME}_HASH_TYPE})
set(TARGET_HASH ${${UPPER_NAME}_HASH})
ExternalProject_Add(external_${name}
URL file://${PACKAGE_DIR}/${TARGET_FILE}
URL_HASH ${TARGET_HASH_TYPE}=${TARGET_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/${name}
CONFIGURE_COMMAND echo .
BUILD_COMMAND echo .
INSTALL_COMMAND echo .
)
endmacro()

View File

@@ -0,0 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-or-later
set(OCLOC_EXTRA_ARGS
-DNEO_SKIP_UNIT_TESTS=1
-DNEO_BUILD_WITH_OCL=0
-DBUILD_WITH_L0=0
-DIGC_DIR=${LIBDIR}/igc
-DGMM_DIR=${LIBDIR}/gmmlib
)
ExternalProject_Add(external_ocloc
URL file://${PACKAGE_DIR}/${OCLOC_FILE}
URL_HASH ${OCLOC_HASH_TYPE}=${OCLOC_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/ocloc
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/ocloc ${DEFAULT_CMAKE_FLAGS} ${OCLOC_EXTRA_ARGS}
INSTALL_DIR ${LIBDIR}/ocloc
)
add_dependencies(
external_ocloc
external_igc
external_gmmlib
)

View File

@@ -9,6 +9,7 @@ set(OIDN_EXTRA_ARGS
-DOIDN_STATIC_RUNTIME=OFF
-DISPC_EXECUTABLE=${LIBDIR}/ispc/bin/ispc
-DOIDN_FILTER_RTLIGHTMAP=OFF
-DPYTHON_EXECUTABLE=${PYTHON_BINARY}
)
if(WIN32)
@@ -38,6 +39,7 @@ add_dependencies(
external_openimagedenoise
external_tbb
external_ispc
external_python
)
if(WIN32)

View File

@@ -38,6 +38,7 @@ message("BUILD_DIR = ${BUILD_DIR}")
if(WIN32)
set(PATCH_CMD ${DOWNLOAD_DIR}/mingw/mingw64/msys/1.0/bin/patch.exe)
set(LIBEXT ".lib")
set(SHAREDLIBEXT ".lib")
set(LIBPREFIX "")
# For OIIO and OSL
@@ -96,6 +97,7 @@ if(WIN32)
else()
set(PATCH_CMD patch)
set(LIBEXT ".a")
set(SHAREDLIBEXT ".so")
set(LIBPREFIX "lib")
if(APPLE)

View File

@@ -147,7 +147,7 @@ set(OPENIMAGEIO_HASH de45fb38501c4581062b522b53b6141c)
set(OPENIMAGEIO_HASH_TYPE MD5)
set(OPENIMAGEIO_FILE OpenImageIO-${OPENIMAGEIO_VERSION}.tar.gz)
# 8.0.0 is currently oiio's preferred vesion although never versions may be available.
# 8.0.0 is currently oiio's preferred version although never versions may be available.
# the preferred version can be found in oiio's externalpackages.cmake
set(FMT_VERSION 8.0.0)
set(FMT_URI https://github.com/fmtlib/fmt/archive/refs/tags/${FMT_VERSION}.tar.gz)
@@ -155,7 +155,7 @@ set(FMT_HASH 7bce0e9e022e586b178b150002e7c2339994e3c2bbe44027e9abb0d60f9cce83)
set(FMT_HASH_TYPE SHA256)
set(FMT_FILE fmt-${FMT_VERSION}.tar.gz)
# 0.6.2 is currently oiio's preferred vesion although never versions may be available.
# 0.6.2 is currently oiio's preferred version although never versions may be available.
# the preferred version can be found in oiio's externalpackages.cmake
set(ROBINMAP_VERSION v0.6.2)
set(ROBINMAP_URI https://github.com/Tessil/robin-map/archive/refs/tags/${ROBINMAP_VERSION}.tar.gz)
@@ -410,9 +410,9 @@ set(SQLITE_HASH fb558c49ee21a837713c4f1e7e413309aabdd9c7)
set(SQLITE_HASH_TYPE SHA1)
set(SQLITE_FILE sqlite-src-3240000.zip)
set(EMBREE_VERSION 3.13.3)
set(EMBREE_VERSION 3.13.4)
set(EMBREE_URI https://github.com/embree/embree/archive/v${EMBREE_VERSION}.zip)
set(EMBREE_HASH f62766ba54e48a2f327c3a22596e7133)
set(EMBREE_HASH 52d0be294d6c88ba7a6c9e046796e7be)
set(EMBREE_HASH_TYPE MD5)
set(EMBREE_FILE embree-v${EMBREE_VERSION}.zip)
@@ -502,3 +502,134 @@ set(LEVEL_ZERO_URI https://github.com/oneapi-src/level-zero/archive/refs/tags/${
set(LEVEL_ZERO_HASH c39bb05a8e5898aa6c444e1704105b93d3f1888b9c333f8e7e73825ffbfb2617)
set(LEVEL_ZERO_HASH_TYPE SHA256)
set(LEVEL_ZERO_FILE level-zero-${LEVEL_ZERO_VERSION}.tar.gz)
set(DPCPP_VERSION 20220620)
set(DPCPP_URI https://github.com/intel/llvm/archive/refs/tags/sycl-nightly/${DPCPP_VERSION}.tar.gz)
set(DPCPP_HASH a5f41abd5229d28afa92cbd8a5d8d786ee698bf239f722929fd686276bad692c)
set(DPCPP_HASH_TYPE SHA256)
set(DPCPP_FILE DPCPP-${DPCPP_VERSION}.tar.gz)
########################
### DPCPP DEPS BEGIN ###
########################
# The following deps are build time requirements for dpcpp, when possible
# the source in the dpcpp source tree for the version chosen is documented
# by each dep, these will only have to be downloaded and unpacked, dpcpp
# will take care of building them, unpack is being done in dpcpp_deps.cmake
# Source llvm/lib/SYCLLowerIR/CMakeLists.txt
set(VCINTRINSICS_VERSION 984bb27baacce6ee5c716c2e64845f2a1928025b)
set(VCINTRINSICS_URI https://github.com/intel/vc-intrinsics/archive/${VCINTRINSICS_VERSION}.tar.gz)
set(VCINTRINSICS_HASH abea415a15a0dd11fdc94dee8fb462910f2548311b787e02f42509789e1b0d7b)
set(VCINTRINSICS_HASH_TYPE SHA256)
set(VCINTRINSICS_FILE vc-intrinsics-${VCINTRINSICS_VERSION}.tar.gz)
# Source opencl/CMakeLists.txt
set(OPENCLHEADERS_VERSION dcd5bede6859d26833cd85f0d6bbcee7382dc9b3)
set(OPENCLHEADERS_URI https://github.com/KhronosGroup/OpenCL-Headers/archive/${OPENCLHEADERS_VERSION}.tar.gz)
set(OPENCLHEADERS_HASH ca8090359654e94f2c41e946b7e9d826253d795ae809ce7c83a7d3c859624693)
set(OPENCLHEADERS_HASH_TYPE SHA256)
set(OPENCLHEADERS_FILE opencl_headers-${OPENCLHEADERS_VERSION}.tar.gz)
# Source opencl/CMakeLists.txt
set(ICDLOADER_VERSION aec3952654832211636fc4af613710f80e203b0a)
set(ICDLOADER_URI https://github.com/KhronosGroup/OpenCL-ICD-Loader/archive/${ICDLOADER_VERSION}.tar.gz)
set(ICDLOADER_HASH e1880551d67bd8dc31d13de63b94bbfd6b1f315b6145dad1ffcd159b89bda93c)
set(ICDLOADER_HASH_TYPE SHA256)
set(ICDLOADER_FILE icdloader-${ICDLOADER_VERSION}.tar.gz)
# Source sycl/cmake/modules/AddBoostMp11Headers.cmake
# Using external MP11 here, getting AddBoostMp11Headers.cmake to recognize
# our copy in boost directly was more trouble than it was worth.
set(MP11_VERSION 7bc4e1ae9b36ec8ee635c3629b59ec525bbe82b9)
set(MP11_URI https://github.com/boostorg/mp11/archive/${MP11_VERSION}.tar.gz)
set(MP11_HASH 071ee2bd3952ec89882edb3af25dd1816f6b61723f66e42eea32f4d02ceef426)
set(MP11_HASH_TYPE SHA256)
set(MP11_FILE mp11-${MP11_VERSION}.tar.gz)
# Source llvm-spirv/CMakeLists.txt (repo)
# Source llvm-spirv/spirv-headers-tag.conf (hash)
set(SPIRV_HEADERS_VERSION 36c0c1596225e728bd49abb7ef56a3953e7ed468)
set(SPIRV_HEADERS_URI https://github.com/KhronosGroup/SPIRV-Headers/archive/${SPIRV_HEADERS_VERSION}.tar.gz)
set(SPIRV_HEADERS_HASH 7a5c89633f8740456fe8adee052033e134476d267411d1336c0cb1e587a9229a)
set(SPIRV_HEADERS_HASH_TYPE SHA256)
set(SPIRV_HEADERS_FILE SPIR-V-Headers-${SPIRV_HEADERS_VERSION}.tar.gz)
######################
### DPCPP DEPS END ###
######################
##########################################
### Intel Graphics Compiler DEPS BEGIN ###
##########################################
# The following deps are build time requirements for the intel graphics
# compiler, the versions used are taken from the following location
# https://github.com/intel/intel-graphics-compiler/releases
set(IGC_VERSION 1.0.11222)
set(IGC_URI https://github.com/intel/intel-graphics-compiler/archive/refs/tags/igc-${IGC_VERSION}.tar.gz)
set(IGC_HASH d92f0608dcbb52690855685f9447282e5c09c0ba98ae35fabf114fcf8b1e9fcf)
set(IGC_HASH_TYPE SHA256)
set(IGC_FILE igc-${IGC_VERSION}.tar.gz)
set(IGC_LLVM_VERSION llvmorg-11.1.0)
set(IGC_LLVM_URI https://github.com/llvm/llvm-project/archive/refs/tags/${IGC_LLVM_VERSION}.tar.gz)
set(IGC_LLVM_HASH 53a0719f3f4b0388013cfffd7b10c7d5682eece1929a9553c722348d1f866e79)
set(IGC_LLVM_HASH_TYPE SHA256)
set(IGC_LLVM_FILE ${IGC_LLVM_VERSION}.tar.gz)
# WARNING WARNING WARNING
#
# IGC_OPENCL_CLANG contains patches for some of its dependencies.
#
# Whenever IGC_OPENCL_CLANG_VERSION changes, one *MUST* inspect
# IGC_OPENCL_CLANG's patches folder and update igc.cmake to account for
# any added or removed patches.
#
# WARNING WARNING WARNING
set(IGC_OPENCL_CLANG_VERSION bbdd1587f577397a105c900be114b56755d1f7dc)
set(IGC_OPENCL_CLANG_URI https://github.com/intel/opencl-clang/archive/${IGC_OPENCL_CLANG_VERSION}.tar.gz)
set(IGC_OPENCL_CLANG_HASH d08315f1b0d8a6fef33de2b3e6aa7356534c324910634962c72523d970773efc)
set(IGC_OPENCL_CLANG_HASH_TYPE SHA256)
set(IGC_OPENCL_CLANG_FILE opencl-clang-${IGC_OPENCL_CLANG_VERSION}.tar.gz)
set(IGC_VCINTRINSICS_VERSION v0.4.0)
set(IGC_VCINTRINSICS_URI https://github.com/intel/vc-intrinsics/archive/refs/tags/${IGC_VCINTRINSICS_VERSION}.tar.gz)
set(IGC_VCINTRINSICS_HASH c8b92682ad5031cf9d5b82a40e7d5c0e763cd9278660adbcaa69aab988e4b589)
set(IGC_VCINTRINSICS_HASH_TYPE SHA256)
set(IGC_VCINTRINSICS_FILE vc-intrinsics-${IGC_VCINTRINSICS_VERSION}.tar.gz)
set(IGC_SPIRV_HEADERS_VERSION sdk-1.3.204.1)
set(IGC_SPIRV_HEADERS_URI https://github.com/KhronosGroup/SPIRV-Headers/archive/refs/tags/${IGC_SPIRV_HEADERS_VERSION}.tar.gz)
set(IGC_SPIRV_HEADERS_HASH 262864053968c217d45b24b89044a7736a32361894743dd6cfe788df258c746c)
set(IGC_SPIRV_HEADERS_HASH_TYPE SHA256)
set(IGC_SPIRV_HEADERS_FILE SPIR-V-Headers-${IGC_SPIRV_HEADERS_VERSION}.tar.gz)
set(IGC_SPIRV_TOOLS_VERSION sdk-1.3.204.1)
set(IGC_SPIRV_TOOLS_URI https://github.com/KhronosGroup/SPIRV-Tools/archive/refs/tags/${IGC_SPIRV_TOOLS_VERSION}.tar.gz)
set(IGC_SPIRV_TOOLS_HASH 6e19900e948944243024aedd0a201baf3854b377b9cc7a386553bc103b087335)
set(IGC_SPIRV_TOOLS_HASH_TYPE SHA256)
set(IGC_SPIRV_TOOLS_FILE SPIR-V-Tools-${IGC_SPIRV_TOOLS_VERSION}.tar.gz)
set(IGC_SPIRV_TRANSLATOR_VERSION 99420daab98998a7e36858befac9c5ed109d4920)
set(IGC_SPIRV_TRANSLATOR_URI https://github.com/KhronosGroup/SPIRV-LLVM-Translator/archive/${IGC_SPIRV_TRANSLATOR_VERSION}.tar.gz)
set(IGC_SPIRV_TRANSLATOR_HASH 77dfb4ddb6bfb993535562c02ddea23f0a0d1c5a0258c1afe7e27c894ff783a8)
set(IGC_SPIRV_TRANSLATOR_HASH_TYPE SHA256)
set(IGC_SPIRV_TRANSLATOR_FILE SPIR-V-Translator-${IGC_SPIRV_TRANSLATOR_VERSION}.tar.gz)
########################################
### Intel Graphics Compiler DEPS END ###
########################################
set(GMMLIB_VERSION intel-gmmlib-22.1.2)
set(GMMLIB_URI https://github.com/intel/gmmlib/archive/refs/tags/${GMMLIB_VERSION}.tar.gz)
set(GMMLIB_HASH 3b9a6d5e7e3f5748b3d0a2fb0e980ae943907fece0980bd9c0508e71c838e334)
set(GMMLIB_HASH_TYPE SHA256)
set(GMMLIB_FILE ${GMMLIB_VERSION}.tar.gz)
set(OCLOC_VERSION 22.20.23198)
set(OCLOC_URI https://github.com/intel/compute-runtime/archive/refs/tags/${OCLOC_VERSION}.tar.gz)
set(OCLOC_HASH ab22b8bf2560a57fdd3def0e35a62ca75991406f959c0263abb00cd6cd9ae998)
set(OCLOC_HASH_TYPE SHA256)
set(OCLOC_FILE ocloc-${OCLOC_VERSION}.tar.gz)

View File

@@ -36,19 +36,19 @@ getopt \
-o s:i:t:h \
--long source:,install:,tmp:,info:,threads:,help,show-deps,no-sudo,no-build,no-confirm,\
with-all,with-opencollada,with-jack,with-pulseaudio,with-embree,with-oidn,with-nanovdb,\
ver-ocio:,ver-oiio:,ver-llvm:,ver-osl:,ver-osd:,ver-openvdb:,ver-xr-openxr:,\
ver-ocio:,ver-oiio:,ver-llvm:,ver-osl:,ver-osd:,ver-openvdb:,ver-xr-openxr:,ver-level-zero:\
force-all,force-python,force-boost,force-tbb,\
force-ocio,force-imath,force-openexr,force-oiio,force-llvm,force-osl,force-osd,force-openvdb,\
force-ffmpeg,force-opencollada,force-alembic,force-embree,force-oidn,force-usd,\
force-xr-openxr,\
force-xr-openxr,force-level-zero,\
build-all,build-python,build-boost,build-tbb,\
build-ocio,build-imath,build-openexr,build-oiio,build-llvm,build-osl,build-osd,build-openvdb,\
build-ffmpeg,build-opencollada,build-alembic,build-embree,build-oidn,build-usd,\
build-xr-openxr,\
build-xr-openxr,build-level-zero,\
skip-python,skip-boost,skip-tbb,\
skip-ocio,skip-imath,skip-openexr,skip-oiio,skip-llvm,skip-osl,skip-osd,skip-openvdb,\
skip-ffmpeg,skip-opencollada,skip-alembic,skip-embree,skip-oidn,skip-usd,\
skip-xr-openxr \
skip-xr-openxr,skip-level-zero \
-- "$@" \
)
@@ -165,6 +165,9 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
--ver-xr-openxr=<ver>
Force version of OpenXR-SDK.
--ver-level-zero=<ver>
Force version of OneAPI Level Zero library.
Note about the --ver-foo options:
It may not always work as expected (some libs are actually checked out from a git rev...), yet it might help
to fix some build issues (like LLVM mismatch with the version used by your graphic system).
@@ -226,6 +229,9 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
--build-xr-openxr
Force the build of OpenXR-SDK.
--build-level-zero=<ver>
Force the build of OneAPI Level Zero library.
Note about the --build-foo options:
* They force the script to prefer building dependencies rather than using available packages.
This may make things simpler and allow working around some distribution bugs, but on the other hand it will
@@ -293,6 +299,9 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
--force-xr-openxr
Force the rebuild of OpenXR-SDK.
--force-level-zero=<ver>
Force the rebuild of OneAPI Level Zero library.
Note about the --force-foo options:
* They obviously only have an effect if those libraries are built by this script
(i.e. if there is no available and satisfactory package)!
@@ -351,7 +360,10 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
Unconditionally skip Universal Scene Description installation/building.
--skip-xr-openxr
Unconditionally skip OpenXR-SDK installation/building.\""
Unconditionally skip OpenXR-SDK installation/building.
--skip-level-zero=<ver>
Unconditionally skip OneAPI Level Zero installation/building.\""
# ----------------------------------------------------------------------------
# Main Vars
@@ -453,7 +465,7 @@ TBB_VERSION="2020"
TBB_VERSION_SHORT="2020"
TBB_VERSION_UPDATE="_U3" # Used for source packages...
TBB_VERSION_MIN="2018"
TBB_VERSION_MEX="2022"
TBB_VERSION_MEX="2021" # 2021 introduces 'oneTBB', which has lots of compatibility breakage with previous versions
TBB_FORCE_BUILD=false
TBB_FORCE_REBUILD=false
TBB_SKIP=false
@@ -555,7 +567,7 @@ OPENCOLLADA_FORCE_BUILD=false
OPENCOLLADA_FORCE_REBUILD=false
OPENCOLLADA_SKIP=false
EMBREE_VERSION="3.13.3"
EMBREE_VERSION="3.13.4"
EMBREE_VERSION_SHORT="3.13"
EMBREE_VERSION_MIN="3.13"
EMBREE_VERSION_MEX="4.0"
@@ -573,14 +585,13 @@ OIDN_SKIP=false
ISPC_VERSION="1.17.0"
FFMPEG_VERSION="4.4"
FFMPEG_VERSION_SHORT="4.4"
FFMPEG_VERSION_MIN="3.0"
FFMPEG_VERSION_MEX="5.0"
FFMPEG_FORCE_BUILD=false
FFMPEG_FORCE_REBUILD=false
FFMPEG_SKIP=false
_ffmpeg_list_sep=";"
LEVEL_ZERO_VERSION="1.7.15"
LEVEL_ZERO_VERSION_SHORT="1.7"
LEVEL_ZERO_VERSION_MIN="1.7"
LEVEL_ZERO_VERSION_MEX="2.0"
LEVEL_ZERO_FORCE_BUILD=false
LEVEL_ZERO_FORCE_REBUILD=false
LEVEL_ZERO_SKIP=false
XR_OPENXR_VERSION="1.0.22"
XR_OPENXR_VERSION_SHORT="1.0"
@@ -590,6 +601,15 @@ XR_OPENXR_FORCE_BUILD=false
XR_OPENXR_FORCE_REBUILD=false
XR_OPENXR_SKIP=false
FFMPEG_VERSION="5.0"
FFMPEG_VERSION_SHORT="5.0"
FFMPEG_VERSION_MIN="4.0"
FFMPEG_VERSION_MEX="6.0"
FFMPEG_FORCE_BUILD=false
FFMPEG_FORCE_REBUILD=false
FFMPEG_SKIP=false
_ffmpeg_list_sep=";"
# FFMPEG optional libs.
VORBIS_USE=false
VORBIS_DEV=""
@@ -615,9 +635,6 @@ MP3LAME_DEV=""
OPENJPEG_USE=false
OPENJPEG_DEV=""
# Whether to use system GLEW or not (OpenSubDiv needs recent glew to work).
NO_SYSTEM_GLEW=false
# Switch to english language, else some things (like check_package_DEB()) won't work!
LANG_BACK=$LANG
LANG=""
@@ -781,6 +798,12 @@ while true; do
XR_OPENXR_VERSION_SHORT=$XR_OPENXR_VERSION
shift; shift; continue
;;
--ver-level-zero)
LEVEL_ZERO_VERSION="$2"
LEVEL_ZERO_VERSION_MIN=$LEVEL_ZERO_VERSION
LEVEL_ZERO_VERSION_SHORT=$LEVEL_ZERO_VERSION
shift; shift; continue
;;
--build-all)
PYTHON_FORCE_BUILD=true
BOOST_FORCE_BUILD=true
@@ -800,6 +823,7 @@ while true; do
ALEMBIC_FORCE_BUILD=true
USD_FORCE_BUILD=true
XR_OPENXR_FORCE_BUILD=true
LEVEL_ZERO_FORCE_BUILD=true
shift; continue
;;
--build-python)
@@ -857,6 +881,9 @@ while true; do
--build-xr-openxr)
XR_OPENXR_FORCE_BUILD=true; shift; continue
;;
--build-level-zero)
LEVEL_ZERO_FORCE_BUILD=true; shift; continue
;;
--force-all)
PYTHON_FORCE_REBUILD=true
BOOST_FORCE_REBUILD=true
@@ -876,6 +903,7 @@ while true; do
ALEMBIC_FORCE_REBUILD=true
USD_FORCE_REBUILD=true
XR_OPENXR_FORCE_REBUILD=true
LEVEL_ZERO_FORCE_REBUILD=true
shift; continue
;;
--force-python)
@@ -933,6 +961,9 @@ while true; do
--force-xr-openxr)
XR_OPENXR_FORCE_REBUILD=true; shift; continue
;;
--force-level-zero)
LEVEL_ZERO_FORCE_REBUILD=true; shift; continue
;;
--skip-python)
PYTHON_SKIP=true; shift; continue
;;
@@ -987,6 +1018,9 @@ while true; do
--skip-xr-openxr)
XR_OPENXR_SKIP=true; shift; continue
;;
--skip-level-zero)
LEVEL_ZERO_SKIP=true; shift; continue
;;
--)
# no more arguments to parse
break
@@ -1128,14 +1162,16 @@ OIDN_SOURCE=( "https://github.com/OpenImageDenoise/oidn/releases/download/v${OID
ISPC_BINARY=( "https://github.com/ispc/ispc/releases/download/v${ISPC_VERSION}/ispc-v${ISPC_VERSION}-linux.tar.gz" )
FFMPEG_SOURCE=( "http://ffmpeg.org/releases/ffmpeg-$FFMPEG_VERSION.tar.bz2" )
XR_OPENXR_USE_REPO=false
XR_OPENXR_SOURCE=("https://github.com/KhronosGroup/OpenXR-SDK/archive/release-${XR_OPENXR_VERSION}.tar.gz")
XR_OPENXR_SOURCE_REPO=("https://github.com/KhronosGroup/OpenXR-SDK.git")
XR_OPENXR_REPO_UID="458984d7f59d1ae6dc1b597d94b02e4f7132eaba"
XR_OPENXR_REPO_BRANCH="master"
LEVEL_ZERO_SOURCE=("https://github.com/oneapi-src/level-zero/archive/refs/tags/v${LEVEL_ZERO_VERSION}.tar.gz")
FFMPEG_SOURCE=( "http://ffmpeg.org/releases/ffmpeg-$FFMPEG_VERSION.tar.bz2" )
# C++11 is required now
CXXFLAGS_BACK=$CXXFLAGS
CXXFLAGS="$CXXFLAGS -std=c++11"
@@ -1154,7 +1190,7 @@ Those libraries should be available as packages in all recent distributions (opt
* libx11, libxcursor, libxi, libxrandr, libxinerama (and other libx... as needed).
* libwayland-client0, libwayland-cursor0, libwayland-egl1, libxkbcommon0, libdbus-1-3, libegl1 (Wayland)
* libsqlite3, libzstd, libbz2, libssl, libfftw3, libxml2, libtinyxml, yasm, libyaml-cpp, flex.
* libsdl2, libglew, libpugixml, libpotrace, [libgmp], [libglewmx], fontconfig, [libharu/libhpdf].\""
* libsdl2, libglew, libpugixml, libpotrace, [libgmp], fontconfig, [libharu/libhpdf].\""
DEPS_SPECIFIC_INFO="\"BUILDABLE DEPENDENCIES:
@@ -1187,7 +1223,8 @@ You may also want to build them yourself (optional ones are [between brackets]):
* [OpenImageDenoise $OIDN_VERSION] (from $OIDN_SOURCE).
* [Alembic $ALEMBIC_VERSION] (from $ALEMBIC_SOURCE).
* [Universal Scene Description $USD_VERSION] (from $USD_SOURCE).
* [OpenXR-SDK $XR_OPENXR_VERSION] (from $XR_OPENXR_SOURCE).\""
* [OpenXR-SDK $XR_OPENXR_VERSION] (from $XR_OPENXR_SOURCE).
* [OneAPI Level Zero $LEVEL_ZERO_VERSION] (from $LEVEL_ZERO_SOURCE).\""
if [ "$DO_SHOW_DEPS" = true ]; then
PRINT ""
@@ -1647,7 +1684,7 @@ compile_TBB() {
fi
# To be changed each time we make edits that would modify the compiled result!
tbb_magic=0
tbb_magic=1
_init_tbb
# Force having own builds for the dependencies.
@@ -2656,14 +2693,13 @@ compile_OSD() {
mkdir build
cd build
if [ -d $INST/tbb ]; then
cmake_d="$cmake_d $cmake_d -D TBB_LOCATION=$INST/tbb"
fi
cmake_d="-D CMAKE_BUILD_TYPE=Release"
if [ -d $INST/tbb ]; then
cmake_d="$cmake_d -D TBB_LOCATION=$INST/tbb"
fi
cmake_d="$cmake_d -D CMAKE_INSTALL_PREFIX=$_inst"
# ptex is only needed when nicholas bishop is ready
cmake_d="$cmake_d -D NO_PTEX=1"
cmake_d="$cmake_d -D NO_CLEW=1 -D NO_CUDA=1 -D NO_OPENCL=1"
cmake_d="$cmake_d -D NO_CLEW=1 -D NO_CUDA=1 -D NO_OPENCL=1 -D NO_GLEW=1"
# maya plugin, docs, tutorials, regression tests and examples are not needed
cmake_d="$cmake_d -D NO_MAYA=1 -D NO_DOC=1 -D NO_TUTORIALS=1 -D NO_REGRESSION=1 -DNO_EXAMPLES=1"
@@ -3286,7 +3322,7 @@ compile_Embree() {
fi
# To be changed each time we make edits that would modify the compiled results!
embree_magic=10
embree_magic=11
_init_embree
# Force having own builds for the dependencies.
@@ -3346,7 +3382,7 @@ compile_Embree() {
cmake_d="$cmake_d -D EMBREE_TASKING_SYSTEM=TBB"
if [ -d $INST/tbb ]; then
make_d="$make_d EMBREE_TBB_ROOT=$INST/tbb"
cmake_d="$cmake_d -D EMBREE_TBB_ROOT=$INST/tbb"
fi
cmake $cmake_d ../
@@ -3485,7 +3521,7 @@ compile_OIDN() {
install_ISPC
# To be changed each time we make edits that would modify the compiled results!
oidn_magic=9
oidn_magic=10
_init_oidn
# Force having own builds for the dependencies.
@@ -3541,7 +3577,7 @@ compile_OIDN() {
cmake_d="$cmake_d -D ISPC_DIR_HINT=$_ispc_path_bin"
if [ -d $INST/tbb ]; then
make_d="$make_d TBB_ROOT=$INST/tbb"
cmake_d="$cmake_d -D TBB_ROOT=$INST/tbb"
fi
cmake $cmake_d ../
@@ -3822,6 +3858,103 @@ compile_XR_OpenXR_SDK() {
}
# ----------------------------------------------------------------------------
# Build OneAPI Level Zero library.
_init_level_zero() {
_src=$SRC/level-zero-$LEVEL_ZERO_VERSION
_git=false
_inst=$INST/level-zero-$LEVEL_ZERO_VERSION_SHORT
_inst_shortcut=$INST/level-zero
}
_update_deps_level_zero() {
:
}
clean_Level_Zero() {
_init_level_zero
if [ -d $_inst ]; then
# Force rebuilding the dependencies if needed.
_update_deps_level_zero false true
fi
_clean
}
compile_Level_Zero() {
if [ "$NO_BUILD" = true ]; then
WARNING "--no-build enabled, Level Zero will not be compiled!"
return
fi
# To be changed each time we make edits that would modify the compiled result!
level_zero_magic=1
_init_level_zero
# Force having own builds for the dependencies.
_update_deps_level_zero true false
# Clean install if needed!
magic_compile_check level-zero-$LEVEL_ZERO_VERSION $level_zero_magic
if [ $? -eq 1 -o "$LEVEL_ZERO_FORCE_REBUILD" = true ]; then
clean_Level_Zero
fi
if [ ! -d $_inst ]; then
INFO "Building Level-Zero-$LEVEL_ZERO_VERSION"
# Force rebuilding the dependencies.
_update_deps_level_zero true true
prepare_inst
if [ ! -d $_src ]; then
mkdir -p $SRC
download LEVEL_ZERO_SOURCE[@] "$_src.tar.gz"
INFO "Unpacking Level-Zero-$LEVEL_ZERO_VERSION"
tar -C $SRC -xf $_src.tar.gz
fi
cd $_src
# Always refresh the whole build!
if [ -d build ]; then
rm -rf build
fi
mkdir build
cd build
# Keep flags in sync with LEVEL_ZERO_EXTRA_ARGS in level-zero.cmake!
cmake_d="-D CMAKE_BUILD_TYPE=Release"
cmake_d="$cmake_d -D CMAKE_INSTALL_PREFIX=$_inst"
cmake $cmake_d ..
make -j$THREADS && make install
make clean
if [ ! -d $_inst ]; then
ERROR "Level-Zero-$LEVEL_ZERO_VERSION failed to compile, exiting"
exit 1
fi
magic_compile_set level-zero-$LEVEL_ZERO_VERSION $level_zero_magic
cd $CWD
INFO "Done compiling Level-Zero-$LEVEL_ZERO_VERSION!"
else
INFO "Own Level-Zero-$LEVEL_ZERO_VERSION is up to date, nothing to do!"
INFO "If you want to force rebuild of this lib, use the --force-level-zero option."
fi
if [ -d $_inst ]; then
_create_inst_shortcut
fi
run_ldconfig "level-zero"
}
# ----------------------------------------------------------------------------
# Install on DEB-like
@@ -3925,7 +4058,6 @@ install_DEB() {
libopenal-dev libglew-dev yasm \
libsdl2-dev libfftw3-dev patch bzip2 libxml2-dev libtinyxml-dev libjemalloc-dev \
libgmp-dev libpugixml-dev libpotrace-dev libhpdf-dev libzstd-dev libpystring-dev"
# libglewmx-dev (broken in deb testing currently...)
VORBIS_USE=true
OGG_USE=true
@@ -4034,7 +4166,7 @@ install_DEB() {
fi
fi
# Check cmake/glew versions and disable features for older distros.
# Check cmake version and disable features for older distros.
# This is so Blender can at least compile.
PRINT ""
_cmake=`get_package_version_DEB cmake`
@@ -4051,28 +4183,6 @@ install_DEB() {
fi
fi
PRINT ""
_glew=`get_package_version_DEB libglew-dev`
if [ -z $_glew ]; then
# Stupid virtual package in Ubuntu 12.04 doesn't show version number...
_glew=`apt-cache showpkg libglew-dev|tail -n1|awk '{print $2}'|sed 's/-.*//'`
fi
version_ge $_glew "1.9.0"
if [ $? -eq 1 ]; then
version_ge $_glew "1.7.0"
if [ $? -eq 1 ]; then
WARNING "OpenSubdiv disabled because GLEW-$_glew is not enough"
WARNING "Blender will not use system GLEW library"
OSD_SKIP=true
NO_SYSTEM_GLEW=true
else
WARNING "OpenSubdiv will compile with GLEW-$_glew but with limited capability"
WARNING "Blender will not use system GLEW library"
NO_SYSTEM_GLEW=true
fi
fi
PRINT ""
_do_compile_python=false
if [ "$PYTHON_SKIP" = true ]; then
@@ -4458,6 +4568,18 @@ install_DEB() {
PRINT ""
compile_XR_OpenXR_SDK
fi
PRINT ""
if [ "$LEVEL_ZERO_SKIP" = true ]; then
WARNING "Skipping Level Zero installation, as requested..."
elif [ "$LEVEL_ZERO_FORCE_BUILD" = true ]; then
INFO "Forced Level Zero building, as requested..."
compile_Level_Zero
else
# No package currently!
PRINT ""
compile_Level_Zero
fi
}
@@ -5144,6 +5266,18 @@ install_RPM() {
# No package currently!
compile_XR_OpenXR_SDK
fi
PRINT ""
if [ "$LEVEL_ZERO_SKIP" = true ]; then
WARNING "Skipping Level Zero installation, as requested..."
elif [ "$LEVEL_ZERO_FORCE_BUILD" = true ]; then
INFO "Forced Level Zero building, as requested..."
compile_Level_Zero
else
# No package currently!
PRINT ""
compile_Level_Zero
fi
}
@@ -5721,6 +5855,18 @@ install_ARCH() {
# No package currently!
compile_XR_OpenXR_SDK
fi
PRINT ""
if [ "$LEVEL_ZERO_SKIP" = true ]; then
WARNING "Skipping Level Zero installation, as requested..."
elif [ "$LEVEL_ZERO_FORCE_BUILD" = true ]; then
INFO "Forced Level Zero building, as requested..."
compile_Level_Zero
else
# No package currently!
PRINT ""
compile_Level_Zero
fi
}
@@ -5895,6 +6041,14 @@ install_OTHER() {
INFO "Forced OpenXR-SDK building, as requested..."
compile_XR_OpenXR_SDK
fi
PRINT ""
if [ "$LEVEL_ZERO_SKIP" = true ]; then
WARNING "Skipping Level Zero installation, as requested..."
elif [ "$LEVEL_ZERO_FORCE_BUILD" = true ]; then
INFO "Forced Level Zero building, as requested..."
compile_Level_Zero
fi
}
# ----------------------------------------------------------------------------
@@ -6109,12 +6263,6 @@ print_info() {
fi
fi
if [ "$NO_SYSTEM_GLEW" = true ]; then
_1="-D WITH_SYSTEM_GLEW=OFF"
PRINT " $_1"
_buildargs="$_buildargs $_1"
fi
if [ "$FFMPEG_SKIP" = false ]; then
_1="-D WITH_CODEC_FFMPEG=ON"
PRINT " $_1"
@@ -6137,6 +6285,18 @@ print_info() {
fi
fi
# Not yet available in Blender.
#~ if [ "$LEVEL_ZERO_SKIP" = false ]; then
#~ _1="-D WITH_LEVEL_ZERO=ON"
#~ PRINT " $_1"
#~ _buildargs="$_buildargs $_1"
#~ if [ -d $INST/level-zero ]; then
#~ _1="-D LEVEL_ZERO_ROOT_DIR=$INST/level-zero"
#~ PRINT " $_1"
#~ _buildargs="$_buildargs $_1"
#~ fi
#~ fi
PRINT ""
PRINT "Or even simpler, just run (in your blender-source dir):"
PRINT " make -j$THREADS BUILD_CMAKE_ARGS=\"$_buildargs\""

View File

@@ -0,0 +1,54 @@
diff -Naur external_dpcpp.orig/sycl/source/CMakeLists.txt external_dpcpp/sycl/source/CMakeLists.txt
--- external_dpcpp.orig/sycl/source/CMakeLists.txt 2022-05-20 04:19:45.067771362 +0000
+++ external_dpcpp/sycl/source/CMakeLists.txt 2022-05-20 04:21:49.708025048 +0000
@@ -66,10 +66,10 @@
target_compile_options(${LIB_OBJ_NAME} PUBLIC
-fvisibility=hidden -fvisibility-inlines-hidden)
set(linker_script "${CMAKE_CURRENT_SOURCE_DIR}/ld-version-script.txt")
- set(abi_linker_script "${CMAKE_CURRENT_SOURCE_DIR}/abi_replacements_linux.txt")
- target_link_libraries(
- ${LIB_NAME} PRIVATE "-Wl,${abi_linker_script}")
- set_target_properties(${LIB_NAME} PROPERTIES LINK_DEPENDS ${abi_linker_script})
+# set(abi_linker_script "${CMAKE_CURRENT_SOURCE_DIR}/abi_replacements_linux.txt")
+# target_link_libraries(
+# ${LIB_NAME} PRIVATE "-Wl,${abi_linker_script}")
+# set_target_properties(${LIB_NAME} PROPERTIES LINK_DEPENDS ${abi_linker_script})
target_link_libraries(
${LIB_NAME} PRIVATE "-Wl,--version-script=${linker_script}")
set_target_properties(${LIB_NAME} PROPERTIES LINK_DEPENDS ${linker_script})
diff -Naur llvm-sycl-nightly-20220501.orig\opencl/CMakeLists.txt llvm-sycl-nightly-20220501\opencl/CMakeLists.txt
--- llvm-sycl-nightly-20220501.orig/opencl/CMakeLists.txt 2022-04-29 13:47:11 -0600
+++ llvm-sycl-nightly-20220501/opencl/CMakeLists.txt 2022-05-21 15:25:06 -0600
@@ -11,6 +11,11 @@
)
endif()
+# Blender code below is determined to use FetchContent_Declare
+# temporarily allow it (but feed it our downloaded tarball
+# in the OpenCL_HEADERS variable
+set(FETCHCONTENT_FULLY_DISCONNECTED OFF)
+
# Repo URLs
set(OCL_HEADERS_REPO
@@ -77,5 +82,6 @@
FetchContent_MakeAvailable(ocl-icd)
add_library(OpenCL-ICD ALIAS OpenCL)
+set(FETCHCONTENT_FULLY_DISCONNECTED ON)
add_subdirectory(opencl-aot)
diff -Naur llvm-sycl-nightly-20220208.orig/libdevice/cmake/modules/SYCLLibdevice.cmake llvm-sycl-nightly-20220208/libdevice/cmake/modules/SYCLLibdevice.cmake
--- llvm-sycl-nightly-20220208.orig/libdevice/cmake/modules/SYCLLibdevice.cmake 2022-02-08 09:17:24 -0700
+++ llvm-sycl-nightly-20220208/libdevice/cmake/modules/SYCLLibdevice.cmake 2022-05-24 11:35:51 -0600
@@ -36,7 +36,9 @@
add_custom_target(libsycldevice-obj)
add_custom_target(libsycldevice-spv)
-add_custom_target(libsycldevice DEPENDS
+# Blender: add ALL here otherwise this target will not build
+# and cause an error due to missing files during the install phase.
+add_custom_target(libsycldevice ALL DEPENDS
libsycldevice-obj
libsycldevice-spv)

View File

@@ -1,30 +1,37 @@
diff -Naur orig/common/sys/platform.h external_embree/common/sys/platform.h
--- orig/common/sys/platform.h 2020-05-13 23:08:53 -0600
+++ external_embree/common/sys/platform.h 2020-06-13 17:40:26 -0600
@@ -84,8 +84,8 @@
////////////////////////////////////////////////////////////////////////////////
diff -Naur org/kernels/rtcore_config.h.in embree-3.13.4/kernels/rtcore_config.h.in
--- org/kernels/rtcore_config.h.in 2022-06-14 22:13:52 -0600
+++ embree-3.13.4/kernels/rtcore_config.h.in 2022-06-24 15:20:12 -0600
@@ -14,6 +14,7 @@
#cmakedefine01 EMBREE_MIN_WIDTH
#define RTC_MIN_WIDTH EMBREE_MIN_WIDTH
+#cmakedefine EMBREE_STATIC_LIB
#cmakedefine EMBREE_API_NAMESPACE
#if defined(EMBREE_API_NAMESPACE)
diff --git a/kernels/CMakeLists.txt b/kernels/CMakeLists.txt
index 7c2f43d..106b1d5 100644
--- a/kernels/CMakeLists.txt
+++ b/kernels/CMakeLists.txt
@@ -201,6 +201,12 @@ embree_files(EMBREE_LIBRARY_FILES_AVX512 ${AVX512})
#message("AVX2: ${EMBREE_LIBRARY_FILES_AVX2}")
#message("AVX512: ${EMBREE_LIBRARY_FILES_AVX512}")
#ifdef __WIN32__
-#define dll_export __declspec(dllexport)
-#define dll_import __declspec(dllimport)
+#define dll_export
+#define dll_import
#else
#define dll_export __attribute__ ((visibility ("default")))
#define dll_import
diff --git orig/common/tasking/CMakeLists.txt external_embree/common/tasking/CMakeLists.txt
--- orig/common/tasking/CMakeLists.txt
+++ external_embree/common/tasking/CMakeLists.txt
@@ -27,7 +27,11 @@
else()
# If not found try getting older TBB via module (FindTBB.cmake)
unset(TBB_DIR CACHE)
- find_package(TBB 4.1 REQUIRED tbb)
+ if (TBB_STATIC_LIB)
+ find_package(TBB 4.1 REQUIRED tbb_static)
+ else()
+ find_package(TBB 4.1 REQUIRED tbb)
+ endif()
if (TBB_FOUND)
TARGET_LINK_LIBRARIES(tasking PUBLIC TBB)
TARGET_INCLUDE_DIRECTORIES(tasking PUBLIC "${TBB_INCLUDE_DIRS}")
+# Bundle Neon2x into the main static library.
+IF(EMBREE_ISA_NEON2X AND EMBREE_STATIC_LIB)
+ LIST(APPEND EMBREE_LIBRARY_FILES ${EMBREE_LIBRARY_FILES_AVX2})
+ LIST(REMOVE_DUPLICATES EMBREE_LIBRARY_FILES)
+ENDIF()
+
# replaces all .cpp files with a dummy file that includes that .cpp file
# this is to work around an ICC name mangling issue related to lambda functions under windows
MACRO (CreateISADummyFiles list isa)
@@ -277,7 +283,7 @@ IF (EMBREE_ISA_AVX AND EMBREE_LIBRARY_FILES_AVX)
ENDIF()
ENDIF()
-IF (EMBREE_ISA_AVX2 AND EMBREE_LIBRARY_FILES_AVX2)
+IF (EMBREE_ISA_AVX2 AND EMBREE_LIBRARY_FILES_AVX2 AND NOT (EMBREE_ISA_NEON2X AND EMBREE_STATIC_LIB))
DISABLE_STACK_PROTECTOR_FOR_INTERSECTORS(${EMBREE_LIBRARY_FILES_AVX2})
ADD_LIBRARY(embree_avx2 STATIC ${EMBREE_LIBRARY_FILES_AVX2})
TARGET_LINK_LIBRARIES(embree_avx2 PRIVATE tasking)

View File

@@ -0,0 +1,15 @@
diff --git a/configure.ac b/configure.ac
index c6f12d644..3c977a4e3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -25,8 +25,10 @@
# autoconf requirements and initialization
AC_INIT([the fast lexical analyser generator],[2.6.4],[flex-help@lists.sourceforge.net],[flex])
+AC_PREREQ([2.60])
AC_CONFIG_SRCDIR([src/scan.l])
AC_CONFIG_AUX_DIR([build-aux])
+AC_USE_SYSTEM_EXTENSIONS
LT_INIT
AM_INIT_AUTOMAKE([1.15 -Wno-portability foreign std-options dist-lzip parallel-tests subdir-objects])
AC_CONFIG_HEADER([src/config.h])

View File

@@ -0,0 +1,44 @@
diff -Naur external_igc_opencl_clang.orig/CMakeLists.txt external_igc_opencl_clang/CMakeLists.txt
--- external_igc_opencl_clang.orig/CMakeLists.txt 2022-03-16 05:51:10 -0600
+++ external_igc_opencl_clang/CMakeLists.txt 2022-05-23 10:40:09 -0600
@@ -126,22 +126,24 @@
)
endif()
-
- set(SPIRV_BASE_REVISION llvm_release_110)
- set(TARGET_BRANCH "ocl-open-110")
- get_filename_component(LLVM_MONOREPO_DIR ${LLVM_SOURCE_DIR} DIRECTORY)
- set(LLVM_PATCHES_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/patches/llvm
- ${CMAKE_CURRENT_SOURCE_DIR}/patches/clang)
- apply_patches(${LLVM_MONOREPO_DIR}
- "${LLVM_PATCHES_DIRS}"
- ${LLVM_BASE_REVISION}
- ${TARGET_BRANCH}
- ret)
- apply_patches(${SPIRV_SOURCE_DIR}
- ${CMAKE_CURRENT_SOURCE_DIR}/patches/spirv
- ${SPIRV_BASE_REVISION}
- ${TARGET_BRANCH}
- ret)
+ #
+ # Blender: Why apply these manually in igc.cmake
+ #
+ #set(SPIRV_BASE_REVISION llvm_release_110)
+ #set(TARGET_BRANCH "ocl-open-110")
+ #get_filename_component(LLVM_MONOREPO_DIR ${LLVM_SOURCE_DIR} DIRECTORY)
+ #set(LLVM_PATCHES_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/patches/llvm
+ # ${CMAKE_CURRENT_SOURCE_DIR}/patches/clang)
+ #apply_patches(${LLVM_MONOREPO_DIR}
+ # "${LLVM_PATCHES_DIRS}"
+ # ${LLVM_BASE_REVISION}
+ # ${TARGET_BRANCH}
+ # ret)
+ #apply_patches(${SPIRV_SOURCE_DIR}
+ # ${CMAKE_CURRENT_SOURCE_DIR}/patches/spirv
+ # ${SPIRV_BASE_REVISION}
+ # ${TARGET_BRANCH}
+ # ret)
endif(NOT USE_PREBUILT_LLVM)
#

View File

@@ -48,10 +48,13 @@ if "%4" == "nobuild" set dobuild=0
REM If Python is be available certain deps may try to
REM to use this over the version we build, to prevent that
REM make sure python is NOT in the path
for %%X in (python.exe) do (set PYTHON=%%~$PATH:X)
if EXIST "%PYTHON%" (
echo PYTHON found at %PYTHON% dependencies cannot be build with python available in the path
REM make sure pythonw is NOT in the path. We look for pythonw.exe
REM since windows apparently ships a python.exe that just opens up
REM the windows store but does not ship any actual python files that
REM could cause issues.
for %%X in (pythonw.exe) do (set PYTHONW=%%~$PATH:X)
if EXIST "%PYTHONW%" (
echo PYTHON found at %PYTHONW% dependencies cannot be build with python available in the path
goto exit
)

View File

@@ -0,0 +1,56 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2021-2022 Intel Corporation
# - Find Level Zero library
# Find Level Zero headers and libraries needed by oneAPI implementation
# This module defines
# LEVEL_ZERO_LIBRARY, libraries to link against in order to use L0.
# LEVEL_ZERO_INCLUDE_DIR, directories where L0 headers can be found.
# LEVEL_ZERO_ROOT_DIR, The base directory to search for L0 files.
# This can also be an environment variable.
# LEVEL_ZERO_FOUND, If false, then don't try to use L0.
IF(NOT LEVEL_ZERO_ROOT_DIR AND NOT $ENV{LEVEL_ZERO_ROOT_DIR} STREQUAL "")
SET(LEVEL_ZERO_ROOT_DIR $ENV{LEVEL_ZERO_ROOT_DIR})
ENDIF()
SET(_level_zero_search_dirs
${LEVEL_ZERO_ROOT_DIR}
/usr/lib
/usr/local/lib
)
FIND_LIBRARY(_LEVEL_ZERO_LIBRARY
NAMES
ze_loader
HINTS
${_level_zero_search_dirs}
PATH_SUFFIXES
lib64 lib
)
FIND_PATH(_LEVEL_ZERO_INCLUDE_DIR
NAMES
level_zero/ze_api.h
HINTS
${_level_zero_search_dirs}
PATH_SUFFIXES
include
)
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(LevelZero DEFAULT_MSG _LEVEL_ZERO_LIBRARY _LEVEL_ZERO_INCLUDE_DIR)
IF(LevelZero_FOUND)
SET(LEVEL_ZERO_LIBRARY ${_LEVEL_ZERO_LIBRARY})
SET(LEVEL_ZERO_INCLUDE_DIR ${_LEVEL_ZERO_INCLUDE_DIR} ${_LEVEL_ZERO_INCLUDE_PARENT_DIR})
SET(LEVEL_ZERO_FOUND TRUE)
ELSE()
SET(LEVEL_ZERO_FOUND FALSE)
ENDIF()
MARK_AS_ADVANCED(
LEVEL_ZERO_LIBRARY
LEVEL_ZERO_INCLUDE_DIR
)

View File

@@ -175,7 +175,9 @@ FIND_PACKAGE_HANDLE_STANDARD_ARGS(PythonLibsUnix DEFAULT_MSG
IF(PYTHONLIBSUNIX_FOUND)
# Assign cache items
SET(PYTHON_INCLUDE_DIRS ${PYTHON_INCLUDE_DIR} ${PYTHON_INCLUDE_CONFIG_DIR})
SET(PYTHON_LIBRARIES ${PYTHON_LIBRARY})
IF(NOT WITH_PYTHON_MODULE)
SET(PYTHON_LIBRARIES ${PYTHON_LIBRARY})
ENDIF()
FIND_FILE(PYTHON_SITE_PACKAGES
NAMES

View File

@@ -0,0 +1,88 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2021-2022 Intel Corporation
# - Find SYCL library
# Find the native SYCL header and libraries needed by oneAPI implementation
# This module defines
# SYCL_COMPILER, compiler which will be used for compilation of SYCL code
# SYCL_LIBRARY, libraries to link against in order to use SYCL.
# SYCL_INCLUDE_DIR, directories where SYCL headers can be found
# SYCL_ROOT_DIR, The base directory to search for SYCL files.
# This can also be an environment variable.
# SYCL_FOUND, If false, then don't try to use SYCL.
IF(NOT SYCL_ROOT_DIR AND NOT $ENV{SYCL_ROOT_DIR} STREQUAL "")
SET(SYCL_ROOT_DIR $ENV{SYCL_ROOT_DIR})
ENDIF()
SET(_sycl_search_dirs
${SYCL_ROOT_DIR}
/usr/lib
/usr/local/lib
/opt/intel/oneapi/compiler/latest/linux/
C:/Program\ Files\ \(x86\)/Intel/oneAPI/compiler/latest/windows
)
# Find DPC++ compiler.
# Since the compiler name is possibly conflicting with the system-wide
# CLang start with looking for either dpcpp or clang binary in the given
# list of search paths only. If that fails, try to look for a system-wide
# dpcpp binary.
FIND_PROGRAM(SYCL_COMPILER
NAMES
dpcpp
clang++
HINTS
${_sycl_search_dirs}
PATH_SUFFIXES
bin
NO_CMAKE_FIND_ROOT_PATH
NAMES_PER_DIR
)
# NOTE: No clang++ here so that we do not pick up a system-wide CLang
# compiler.
if(NOT SYCL_COMPILER)
FIND_PROGRAM(SYCL_COMPILER
NAMES
dpcpp
HINTS
${_sycl_search_dirs}
PATH_SUFFIXES
bin
)
endif()
FIND_LIBRARY(SYCL_LIBRARY
NAMES
sycl
HINTS
${_sycl_search_dirs}
PATH_SUFFIXES
lib64 lib
)
FIND_PATH(SYCL_INCLUDE_DIR
NAMES
CL/sycl.hpp
HINTS
${_sycl_search_dirs}
PATH_SUFFIXES
include
include/sycl
)
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(SYCL DEFAULT_MSG SYCL_LIBRARY SYCL_INCLUDE_DIR)
IF(SYCL_FOUND)
get_filename_component(_SYCL_INCLUDE_PARENT_DIR ${SYCL_INCLUDE_DIR} DIRECTORY)
SET(SYCL_INCLUDE_DIR ${SYCL_INCLUDE_DIR} ${_SYCL_INCLUDE_PARENT_DIR})
ELSE()
SET(SYCL_SYCL_FOUND FALSE)
ENDIF()
MARK_AS_ADVANCED(
_SYCL_INCLUDE_PARENT_DIR
)

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
"""
Example linux usage
python3 ~/blender-git/blender/build_files/cmake/cmake_netbeans_project.py ~/blender-git/cmake

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
r"""
Example Linux usage:
python ~/blender-git/blender/build_files/cmake/cmake_qtcreator_project.py --build-dir ~/blender-git/cmake

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
import project_source_info
import subprocess
import sys

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
import project_source_info
import subprocess
import sys

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
CHECKER_IGNORE_PREFIX = [
"extern",
"intern/moto",

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
CHECKER_IGNORE_PREFIX = [
"extern",
"intern/moto",

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
CHECKER_IGNORE_PREFIX = [
"extern",
"intern/moto",

View File

@@ -37,6 +37,9 @@ set(WITH_IMAGE_TIFF OFF CACHE BOOL "" FORCE)
set(WITH_IMAGE_WEBP OFF CACHE BOOL "" FORCE)
set(WITH_INPUT_NDOF OFF CACHE BOOL "" FORCE)
set(WITH_INTERNATIONAL OFF CACHE BOOL "" FORCE)
set(WITH_IO_STL OFF CACHE BOOL "" FORCE)
set(WITH_IO_WAVEFRONT_OBJ OFF CACHE BOOL "" FORCE)
set(WITH_IO_GPENCIL OFF CACHE BOOL "" FORCE)
set(WITH_JACK OFF CACHE BOOL "" FORCE)
set(WITH_LIBMV OFF CACHE BOOL "" FORCE)
set(WITH_LLVM OFF CACHE BOOL "" FORCE)

View File

@@ -70,7 +70,7 @@ if(NOT WIN32)
set(WITH_JACK ON CACHE BOOL "" FORCE)
endif()
if(WIN32)
set(WITH_WASAPI ON CACHE BOOL "" FORCE)
set(WITH_WASAPI ON CACHE BOOL "" FORCE)
endif()
if(UNIX AND NOT APPLE)
set(WITH_DOC_MANPAGE ON CACHE BOOL "" FORCE)
@@ -78,6 +78,11 @@ if(UNIX AND NOT APPLE)
set(WITH_PULSEAUDIO ON CACHE BOOL "" FORCE)
set(WITH_X11_XINPUT ON CACHE BOOL "" FORCE)
set(WITH_X11_XF86VMODE ON CACHE BOOL "" FORCE)
# Disable oneAPI on Linux for the time being.
# The AoT compilation takes too long to be used officially in the buildbot CI/CD and the JIT
# compilation has ABI compatibility issues when running builds made on centOS on Ubuntu.
set(WITH_CYCLES_DEVICE_ONEAPI OFF CACHE BOOL "" FORCE)
endif()
if(NOT APPLE)
set(WITH_XR_OPENXR ON CACHE BOOL "" FORCE)
@@ -86,4 +91,8 @@ if(NOT APPLE)
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_HIP_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_DEVICE_ONEAPI ON CACHE BOOL "" FORCE)
# Disable AoT kernels compilations until buildbot can deliver them in a reasonabel time.
set(WITH_CYCLES_ONEAPI_BINARIES OFF CACHE BOOL "" FORCE)
endif()

View File

@@ -38,9 +38,15 @@ if(EXISTS ${LIBDIR})
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
file(GLOB LIB_SUBDIRS ${LIBDIR}/*)
# Ignore Mesa software OpenGL libraries, they are not intended to be
# linked against but to optionally override at runtime.
list(REMOVE_ITEM LIB_SUBDIRS ${LIBDIR}/mesa)
# Ignore DPC++ as it contains its own copy of LLVM/CLang which we do
# not need to be ever discovered for the Blender linking.
list(REMOVE_ITEM LIB_SUBDIRS ${LIBDIR}/dpcpp)
# NOTE: Make sure "proper" compiled zlib comes first before the one
# which is a part of OpenCollada. They have different ABI, and we
# do need to use the official one.
@@ -271,6 +277,18 @@ if(WITH_CYCLES AND WITH_CYCLES_OSL)
endif()
endif()
if(WITH_CYCLES_DEVICE_ONEAPI)
set(CYCLES_LEVEL_ZERO ${LIBDIR}/level-zero CACHE PATH "Path to Level Zero installation")
if(EXISTS ${CYCLES_LEVEL_ZERO} AND NOT LEVEL_ZERO_ROOT_DIR)
set(LEVEL_ZERO_ROOT_DIR ${CYCLES_LEVEL_ZERO})
endif()
set(CYCLES_SYCL ${LIBDIR}/dpcpp CACHE PATH "Path to DPC++ and SYCL installation")
if(EXISTS ${CYCLES_SYCL} AND NOT SYCL_ROOT_DIR)
set(SYCL_ROOT_DIR ${CYCLES_SYCL})
endif()
endif()
if(WITH_OPENVDB)
find_package_wrapper(OpenVDB)
find_package_wrapper(Blosc)
@@ -613,17 +631,42 @@ if(WITH_GHOST_WAYLAND)
pkg_check_modules(wayland-scanner REQUIRED wayland-scanner)
pkg_check_modules(xkbcommon REQUIRED xkbcommon)
pkg_check_modules(wayland-cursor REQUIRED wayland-cursor)
pkg_check_modules(dbus REQUIRED dbus-1)
set(WITH_GL_EGL ON)
if(WITH_GHOST_WAYLAND_DBUS)
pkg_check_modules(dbus REQUIRED dbus-1)
endif()
if(WITH_GHOST_WAYLAND_LIBDECOR)
pkg_check_modules(libdecor REQUIRED libdecor-0>=0.1)
endif()
list(APPEND PLATFORM_LINKLIBS
${wayland-client_LINK_LIBRARIES}
${wayland-egl_LINK_LIBRARIES}
${xkbcommon_LINK_LIBRARIES}
${wayland-cursor_LINK_LIBRARIES}
${dbus_LINK_LIBRARIES}
)
if(NOT WITH_GHOST_WAYLAND_DYNLOAD)
list(APPEND PLATFORM_LINKLIBS
${wayland-client_LINK_LIBRARIES}
${wayland-egl_LINK_LIBRARIES}
${wayland-cursor_LINK_LIBRARIES}
)
endif()
if(WITH_GHOST_WAYLAND_DBUS)
list(APPEND PLATFORM_LINKLIBS
${dbus_LINK_LIBRARIES}
)
add_definitions(-DWITH_GHOST_WAYLAND_DBUS)
endif()
if(WITH_GHOST_WAYLAND_LIBDECOR)
if(NOT WITH_GHOST_WAYLAND_DYNLOAD)
list(APPEND PLATFORM_LINKLIBS
${libdecor_LIBRARIES}
)
endif()
add_definitions(-DWITH_GHOST_WAYLAND_LIBDECOR)
endif()
endif()
if(WITH_GHOST_X11)

View File

@@ -950,3 +950,6 @@ endif()
set(ZSTD_INCLUDE_DIRS ${LIBDIR}/zstd/include)
set(ZSTD_LIBRARIES ${LIBDIR}/zstd/lib/zstd_static.lib)
set(LEVEL_ZERO_ROOT_DIR ${LIBDIR}/level_zero)
set(SYCL_ROOT_DIR ${LIBDIR}/dpcpp)

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
"""
Module for accessing project file data for Blender.

View File

@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
__all__ = (
"build_info",
"SOURCE_DIR",

View File

@@ -54,6 +54,8 @@ buildbot:
version: '10.1.243'
cuda11:
version: '11.4.1'
hip:
version: '5.2.21440'
optix:
version: '7.3.0'
cmake:

View File

@@ -1,8 +1,6 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
import os
import shutil
import subprocess

View File

@@ -11,8 +11,6 @@ where <path-to-blender> is the path to the Blender executable,
and <output-filename> is where to write the generated man page.
'''
# <pep8 compliant>
import argparse
import os
import subprocess

View File

@@ -1,12 +1,12 @@
sphinx==4.1.1
sphinx==5.0.1
# Sphinx dependencies that are important
Jinja2==3.0.1
Pygments==2.10.0
Jinja2==3.1.2
Pygments==2.12.0
docutils==0.17.1
snowballstemmer==2.1.0
babel==2.9.1
requests==2.26.0
snowballstemmer==2.2.0
babel==2.10.1
requests==2.27.1
# Only needed to match the theme used for the official documentation.
# Without this theme, the default theme will be used.

View File

@@ -40,15 +40,6 @@ As well as pep8 we have additional conventions used for Blender Python scripts:
- pep8 also defines that lines should not exceed 79 characters,
we have decided that this is too restrictive so it is optional per script.
Periodically we run checks for pep8 compliance on Blender scripts,
for scripts to be included in this check add this line as a comment at the top of the script:
``# <pep8 compliant>``
To enable line length checks use this instead:
``# <pep8-80 compliant>``
User Interface Layout
=====================

View File

@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
# This is a quite stupid script which extracts bmesh api docs from
# 'bmesh_opdefines.c' in order to avoid having to add a lot of introspection
# data access into the api.

View File

@@ -1,61 +1,111 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
"""
Dump the python API into a text file so we can generate changelogs.
---------------
output from this tool should be added into "doc/python_api/rst/change_log.rst"
Dump the python API into a JSON file, or generate changelogs from those JSON API dumps.
# dump api blender_version.py in CWD
blender --background --python doc/python_api/sphinx_changelog_gen.py -- --dump
Typically, changelog output from this tool should be added into "doc/python_api/rst/change_log.rst"
# create changelog
API dump files are saved together with the generated API doc on the server, with a general index file.
This way the changelog generation simply needs to re-download the previous version's dump for the diffing process.
---------------
# Dump api blender_version.json in CWD:
blender --background --factory-startup --python doc/python_api/sphinx_changelog_gen.py -- \
--indexpath="path/to/api/docs/api_dump_index.json" \
dump --filepath-out="path/to/api/docs/<version>/api_dump.json"
# Create changelog:
blender --background --factory-startup --python doc/python_api/sphinx_changelog_gen.py -- \
--api_from blender_2_63_0.py \
--api_to blender_2_64_0.py \
--api_out changes.rst
--indexpath="path/to/api/docs/api_dump_index.json" \
changelog --filepath-out doc/python_api/rst/change_log.rst
# Api comparison can also run without blender
# Api comparison can also run without blender,
# will by default generate changeloig between the last two available versions listed in the index,
# unless input files are provided explicitely:
python doc/python_api/sphinx_changelog_gen.py -- \
--api_from blender_api_2_63_0.py \
--api_to blender_api_2_64_0.py \
--api_out changes.rst
--indexpath="path/to/api/docs/api_dump_index.json" \
changelog --filepath-in-from blender_api_2_63_0.json \
--filepath-in-to blender_api_2_64_0.json \
--filepath-out changes.rst
# Save the latest API dump in this folder, renaming it with its revision.
# This way the next person updating it doesn't need to build an old Blender only for that
--------------
API dump index format:
{[version_main, version_sub]: "<version>/api_dump.json", ...
}
API dump format:
[
[version_main, vserion_sub, version_path],
{"module.name":
{"parent.class":
{"basic_type", "member_name":
["Name", type, range, length, default, descr, f_args, f_arg_types, f_ret_types]}, ...
}, ...
}
]
"""
# format
'''
{"module.name":
{"parent.class":
{"basic_type", "member_name":
("Name", type, range, length, default, descr, f_args, f_arg_types, f_ret_types)}, ...
}, ...
}
'''
import json
import os
api_names = "basic_type" "name", "type", "range", "length", "default", "descr", "f_args", "f_arg_types", "f_ret_types"
API_BASIC_TYPE = 0
API_F_ARGS = 7
def api_dunp_fname():
import bpy
return "blender_api_%s.py" % "_".join([str(i) for i in bpy.app.version])
def api_version():
try:
import bpy
except:
return None, None
version = tuple(bpy.app.version[:2])
version_key = "%d.%d" % (version[0], version[1])
return version, version_key
def api_dump():
dump = {}
dump_module = dump["bpy.types"] = {}
def api_version_previous_in_index(index, version):
print("Searching for previous version to %s in %r" % (version, index))
version_prev = (version[0], version[1])
while True:
version_prev = (version_prev[0], version_prev[1] - 1)
if version_prev[1] < 0:
version_prev = (version_prev[0] - 1, 99)
if version_prev[0] < 0:
return None, None
version_prev_key = "%d.%d" % (version_prev[0], version_prev[1])
if version_prev_key in index:
print("Found previous version %s: %r" % (version_prev, index[version_prev_key]))
return version_prev, version_prev_key
class JSONEncoderAPIDump(json.JSONEncoder):
def default(self, o):
if o is ...:
return "..."
if isinstance(o, set):
return tuple(o)
return json.JSONEncoder.default(self, o)
def api_dump(args):
import rna_info
import inspect
version, version_key = api_version()
if version is None:
raise(ValueError("API dumps can only be generated from within Blender."))
dump = {}
dump_module = dump["bpy.types"] = {}
struct = rna_info.BuildRNAInfo()[0]
for struct_id, struct_info in sorted(struct.items()):
@@ -157,17 +207,25 @@ def api_dump():
)
del funcs
import pprint
filepath_out = args.filepath_out
with open(filepath_out, 'w', encoding='utf-8') as file_handle:
json.dump((version, dump), file_handle, cls=JSONEncoderAPIDump)
filename = api_dunp_fname()
filehandle = open(filename, 'w', encoding='utf-8')
tot = filehandle.write(pprint.pformat(dump, width=1))
filehandle.close()
print("%s, %d bytes written" % (filename, tot))
indexpath = args.indexpath
rootpath = os.path.dirname(indexpath)
if os.path.exists(indexpath):
with open(indexpath, 'r', encoding='utf-8') as file_handle:
index = json.load(file_handle)
else:
index = {}
index[version_key] = os.path.relpath(filepath_out, rootpath)
with open(indexpath, 'w', encoding='utf-8') as file_handle:
json.dump(index, file_handle)
print("API version %s dumped into %r, and index %r has been updated" % (version_key, filepath_out, indexpath))
def compare_props(a, b, fuzz=0.75):
# must be same basic_type, function != property
if a[0] != b[0]:
return False
@@ -182,15 +240,44 @@ def compare_props(a, b, fuzz=0.75):
return ((tot / totlen) >= fuzz)
def api_changelog(api_from, api_to, api_out):
def api_changelog(args):
indexpath = args.indexpath
filepath_in_from = args.filepath_in_from
filepath_in_to = args.filepath_in_to
filepath_out = args.filepath_out
file_handle = open(api_from, 'r', encoding='utf-8')
dict_from = eval(file_handle.read())
file_handle.close()
rootpath = os.path.dirname(indexpath)
file_handle = open(api_to, 'r', encoding='utf-8')
dict_to = eval(file_handle.read())
file_handle.close()
version, version_key = api_version()
if version is None and (filepath_in_from is None or filepath_in_to is None):
raise(ValueError("API dumps files must be given when ran outside of Blender."))
with open(indexpath, 'r', encoding='utf-8') as file_handle:
index = json.load(file_handle)
if filepath_in_to is None:
filepath_in_to = index.get(version_key, None)
if filepath_in_to is None:
raise(ValueError("Cannot find API dump file for Blender version " + str(version) + " in index file."))
print("Found to file: %r" % filepath_in_to)
if filepath_in_from is None:
version_from, version_from_key = api_version_previous_in_index(index, version)
if version_from is None:
raise(ValueError("No previous version of Blender could be found in the index."))
filepath_in_from = index.get(version_from_key, None)
if filepath_in_from is None:
raise(ValueError("Cannot find API dump file for previous Blender version " + str(version_from) + " in index file."))
print("Found from file: %r" % filepath_in_from)
with open(os.path.join(rootpath, filepath_in_from), 'r', encoding='utf-8') as file_handle:
_, dict_from = json.load(file_handle)
with open(os.path.join(rootpath, filepath_in_to), 'r', encoding='utf-8') as file_handle:
dump_version, dict_to = json.load(file_handle)
assert(tuple(dump_version) == version)
api_changes = []
@@ -251,63 +338,66 @@ def api_changelog(api_from, api_to, api_out):
# also document function argument changes
fout = open(api_out, 'w', encoding='utf-8')
fw = fout.write
# print(api_changes)
with open(filepath_out, 'w', encoding='utf-8') as fout:
fw = fout.write
# :class:`bpy_struct.id_data`
# Write header.
fw(""
":tocdepth: 2\n"
"\n"
"Blender API Change Log\n"
"**********************\n"
"\n"
".. note, this document is auto generated by sphinx_changelog_gen.py\n"
"\n"
"\n"
"%s to %s\n"
"============\n"
"\n" % (version_from_key, version_key))
def write_title(title, title_char):
fw("%s\n%s\n\n" % (title, title_char * len(title)))
def write_title(title, title_char):
fw("%s\n%s\n\n" % (title, title_char * len(title)))
for mod_id, class_id, props_moved, props_new, props_old, func_args in api_changes:
class_name = class_id.split(".")[-1]
title = mod_id + "." + class_name
write_title(title, "-")
for mod_id, class_id, props_moved, props_new, props_old, func_args in api_changes:
class_name = class_id.split(".")[-1]
title = mod_id + "." + class_name
write_title(title, "-")
if props_new:
write_title("Added", "^")
for prop_id in props_new:
fw("* :class:`%s.%s.%s`\n" % (mod_id, class_name, prop_id))
fw("\n")
if props_new:
write_title("Added", "^")
for prop_id in props_new:
fw("* :class:`%s.%s.%s`\n" % (mod_id, class_name, prop_id))
fw("\n")
if props_old:
write_title("Removed", "^")
for prop_id in props_old:
fw("* **%s**\n" % prop_id) # can't link to removed docs
fw("\n")
if props_old:
write_title("Removed", "^")
for prop_id in props_old:
fw("* **%s**\n" % prop_id) # can't link to removed docs
fw("\n")
if props_moved:
write_title("Renamed", "^")
for prop_id_old, prop_id in props_moved:
fw("* **%s** -> :class:`%s.%s.%s`\n" % (prop_id_old, mod_id, class_name, prop_id))
fw("\n")
if props_moved:
write_title("Renamed", "^")
for prop_id_old, prop_id in props_moved:
fw("* **%s** -> :class:`%s.%s.%s`\n" % (prop_id_old, mod_id, class_name, prop_id))
fw("\n")
if func_args:
write_title("Function Arguments", "^")
for func_id, args_old, args_new in func_args:
args_new = ", ".join(args_new)
args_old = ", ".join(args_old)
fw("* :class:`%s.%s.%s` (%s), *was (%s)*\n" % (mod_id, class_name, func_id, args_new, args_old))
fw("\n")
if func_args:
write_title("Function Arguments", "^")
for func_id, args_old, args_new in func_args:
args_new = ", ".join(args_new)
args_old = ", ".join(args_old)
fw("* :class:`%s.%s.%s` (%s), *was (%s)*\n" % (mod_id, class_name, func_id, args_new, args_old))
fw("\n")
fout.close()
print("Written: %r" % api_out)
print("Written: %r" % filepath_out)
def main():
def main(argv=None):
import sys
import os
import argparse
try:
import argparse
except ImportError:
print("Old Blender, just dumping")
api_dump()
return
argv = sys.argv
if argv is None:
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
@@ -318,42 +408,42 @@ def main():
usage_text = "Run blender in background mode with this script: "
"blender --background --factory-startup --python %s -- [options]" % os.path.basename(__file__)
epilog = "Run this before releases"
parser = argparse.ArgumentParser(description=usage_text, epilog=epilog)
parser = argparse.ArgumentParser(description=usage_text,
epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--dump", dest="dump", action='store_true',
help="When set the api will be dumped into blender_version.py")
"--indexpath", dest="indexpath", metavar='FILE', required=True,
help="Path of the JSON file containing the index of all available API dumps.")
parser.add_argument(
"--api_from", dest="api_from", metavar='FILE',
help="File to compare from (previous version)")
parser.add_argument(
"--api_to", dest="api_to", metavar='FILE',
help="File to compare from (current)")
parser.add_argument(
"--api_out", dest="api_out", metavar='FILE',
help="Output sphinx changelog")
parser_commands = parser.add_subparsers(required=True)
args = parser.parse_args(argv) # In this example we won't use the args
parser_dump = parser_commands.add_parser('dump', help="Dump the current Blender Python API into a JSON file.")
parser_dump.add_argument(
"--filepath-out", dest="filepath_out", metavar='FILE', required=True,
help="Path of the JSON file containing the dump of the API.")
parser_dump.set_defaults(func=api_dump)
if not argv:
print("No args given!")
parser.print_help()
return
parser_changelog = parser_commands.add_parser(
'changelog',
help="Generate the RST changelog page based on two Blender Python API JSON dumps.",
)
if args.dump:
api_dump()
else:
if args.api_from and args.api_to and args.api_out:
api_changelog(args.api_from, args.api_to, args.api_out)
else:
print("Error: --api_from/api_to/api_out args needed")
parser.print_help()
return
parser_changelog.add_argument(
"--filepath-in-from", dest="filepath_in_from", metavar='FILE', default=None,
help="JSON dump file to compare from (typically, previous version). "
"If not given, will be automatically determined from current Blender version and index file.")
parser_changelog.add_argument(
"--filepath-in-to", dest="filepath_in_to", metavar='FILE', default=None,
help="JSON dump file to compare to (typically, current version). "
"If not given, will be automatically determined from current Blender version and index file.")
parser_changelog.add_argument(
"--filepath-out", dest="filepath_out", metavar='FILE', required=True,
help="Output sphinx changelog RST file.")
parser_changelog.set_defaults(func=api_changelog)
print("batch job finished, exiting")
args = parser.parse_args(argv)
args.func(args)
if __name__ == "__main__":

View File

@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
"""
API dump in RST files
---------------------
@@ -78,6 +76,27 @@ SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# See: D6261 for reference.
USE_ONLY_BUILTIN_RNA_TYPES = True
# Write a page for each static enum defined in:
# `source/blender/makesrna/RNA_enum_items.h` so the enums can be linked to instead of being expanded everywhere.
USE_SHARED_RNA_ENUM_ITEMS_STATIC = True
if USE_SHARED_RNA_ENUM_ITEMS_STATIC:
from _bpy import rna_enum_items_static
rna_enum_dict = rna_enum_items_static()
for key in ("DummyRNA_DEFAULT_items", "DummyRNA_NULL_items"):
del rna_enum_dict[key]
del key, rna_enum_items_static
# Build enum `{pointer: identifier}` map, so any enum property pointer can
# lookup an identifier using `InfoPropertyRNA.enum_pointer` as the key.
rna_enum_pointer_to_id_map = {
enum_prop.as_pointer(): key
for key, enum_items in rna_enum_dict.items()
# It's possible the first item is a heading (which has no identifier).
# skip these as the `EnumProperty.enum_items` does not expose them.
if (enum_prop := next(iter(enum_prop for enum_prop in enum_items if enum_prop.identifier), None))
}
def handle_args():
"""
@@ -122,6 +141,26 @@ def handle_args():
required=False,
)
parser.add_argument(
"--api-changelog-generate",
dest="changelog",
default=False,
action='store_true',
help="Generate the API changelog RST file "
"(default=False, requires `--api-dump-index-path` parameter)",
required=False,
)
parser.add_argument(
"--api-dump-index-path",
dest="api_dump_index_path",
metavar='FILE',
default=None,
help="Path to the API dump index JSON file "
"(required when `--api-changelog-generate` is True)",
required=False,
)
parser.add_argument(
"-o", "--output",
dest="output_dir",
@@ -495,6 +534,42 @@ if ARGS.sphinx_build_pdf:
sphinx_make_pdf_log = os.path.join(ARGS.output_dir, ".latex_make.log")
SPHINX_MAKE_PDF_STDOUT = open(sphinx_make_pdf_log, "w", encoding="utf-8")
# --------------------------------CHANGELOG GENERATION--------------------------------------
def generate_changelog():
import importlib.util
spec = importlib.util.spec_from_file_location(
"sphinx_changelog_gen",
os.path.abspath(os.path.join(SCRIPT_DIR, "sphinx_changelog_gen.py")),
)
sphinx_changelog_gen = importlib.util.module_from_spec(spec)
spec.loader.exec_module(sphinx_changelog_gen)
API_DUMP_INDEX_FILEPATH = ARGS.api_dump_index_path
API_DUMP_ROOT = os.path.dirname(API_DUMP_INDEX_FILEPATH)
API_DUMP_FILEPATH = os.path.abspath(os.path.join(API_DUMP_ROOT, BLENDER_VERSION_DOTS, "api_dump.json"))
API_CHANGELOG_FILEPATH = os.path.abspath(os.path.join(SPHINX_IN_TMP, "change_log.rst"))
sphinx_changelog_gen.main((
"--",
"--indexpath",
API_DUMP_INDEX_FILEPATH,
"dump",
"--filepath-out",
API_DUMP_FILEPATH,
))
sphinx_changelog_gen.main((
"--",
"--indexpath",
API_DUMP_INDEX_FILEPATH,
"changelog",
"--filepath-out",
API_CHANGELOG_FILEPATH,
))
# --------------------------------API DUMP--------------------------------------
# Lame, python won't give some access.
@@ -531,7 +606,7 @@ def import_value_from_module(module_name, import_name):
def execfile(filepath):
global_namespace = {"__file__": filepath, "__name__": "__main__"}
with open(filepath) as file_handle:
with open(filepath, encoding="utf-8") as file_handle:
exec(compile(file_handle.read(), filepath, 'exec'), global_namespace)
@@ -693,26 +768,6 @@ def write_indented_lines(ident, fn, text, strip=True):
fn(ident + l + "\n")
def pymethod2sphinx(ident, fw, identifier, py_func):
"""
class method to sphinx
"""
arg_str = inspect.formatargspec(*inspect.getargspec(py_func))
if arg_str.startswith("(self, "):
arg_str = "(" + arg_str[7:]
func_type = "method"
elif arg_str.startswith("(cls, "):
arg_str = "(" + arg_str[6:]
func_type = "classmethod"
else:
func_type = "staticmethod"
fw(ident + ".. %s:: %s%s\n\n" % (func_type, identifier, arg_str))
if py_func.__doc__:
write_indented_lines(ident + " ", fw, py_func.__doc__)
fw("\n")
def pyfunc2sphinx(ident, fw, module_name, type_name, identifier, py_func, is_class=True):
"""
function or class method to sphinx
@@ -1220,15 +1275,23 @@ def pycontext2sphinx(basepath):
# No need to check if there are duplicates yet as it's known there wont be.
unique.add(prop.identifier)
enum_descr_override = None
if USE_SHARED_RNA_ENUM_ITEMS_STATIC:
enum_descr_override = pyrna_enum2sphinx_shared_link(prop)
type_descr = prop.get_type_description(
class_fmt=":class:`bpy.types.%s`", collection_id=_BPY_PROP_COLLECTION_ID)
class_fmt=":class:`bpy.types.%s`",
collection_id=_BPY_PROP_COLLECTION_ID,
enum_descr_override=enum_descr_override,
)
fw(".. data:: %s\n\n" % prop.identifier)
if prop.description:
fw(" %s\n\n" % prop.description)
# Special exception, can't use generic code here for enums.
if prop.type == "enum":
enum_text = pyrna_enum2sphinx(prop)
# If the link has been written, no need to inline the enum items.
enum_text = "" if enum_descr_override else pyrna_enum2sphinx(prop)
if enum_text:
write_indented_lines(" ", fw, enum_text)
fw("\n")
@@ -1290,6 +1353,11 @@ def pyrna_enum2sphinx(prop, use_empty_descriptions=False):
Write a bullet point list of enum + descriptions.
"""
# Write a link to the enum if this is part of `rna_enum_pointer_map`.
if USE_SHARED_RNA_ENUM_ITEMS_STATIC:
if (result := pyrna_enum2sphinx_shared_link(prop)) is not None:
return result
if use_empty_descriptions:
ok = True
else:
@@ -1368,10 +1436,15 @@ def pyrna2sphinx(basepath):
kwargs["collection_id"] = _BPY_PROP_COLLECTION_ID
enum_descr_override = None
if USE_SHARED_RNA_ENUM_ITEMS_STATIC:
enum_descr_override = pyrna_enum2sphinx_shared_link(prop)
kwargs["enum_descr_override"] = enum_descr_override
type_descr = prop.get_type_description(**kwargs)
enum_text = pyrna_enum2sphinx(prop)
# If the link has been written, no need to inline the enum items.
enum_text = "" if enum_descr_override else pyrna_enum2sphinx(prop)
if prop.name or prop.description or enum_text:
fw(ident + ":%s%s:\n\n" % (id_name, identifier))
@@ -1456,7 +1529,8 @@ def pyrna2sphinx(basepath):
else:
fw(".. class:: %s\n\n" % struct_id)
fw(" %s\n\n" % struct.description)
write_indented_lines(" ", fw, struct.description, False)
fw("\n")
# Properties sorted in alphabetical order.
sorted_struct_properties = struct.properties[:]
@@ -1472,7 +1546,15 @@ def pyrna2sphinx(basepath):
if identifier in struct_blacklist:
continue
type_descr = prop.get_type_description(class_fmt=":class:`%s`", collection_id=_BPY_PROP_COLLECTION_ID)
enum_descr_override = None
if USE_SHARED_RNA_ENUM_ITEMS_STATIC:
enum_descr_override = pyrna_enum2sphinx_shared_link(prop)
type_descr = prop.get_type_description(
class_fmt=":class:`%s`",
collection_id=_BPY_PROP_COLLECTION_ID,
enum_descr_override=enum_descr_override,
)
# Read-only properties use "data" directive, variables properties use "attribute" directive.
if "readonly" in type_descr:
fw(" .. data:: %s\n" % identifier)
@@ -1489,7 +1571,8 @@ def pyrna2sphinx(basepath):
# Special exception, can't use generic code here for enums.
if prop.type == "enum":
enum_text = pyrna_enum2sphinx(prop)
# If the link has been written, no need to inline the enum items.
enum_text = "" if enum_descr_override else pyrna_enum2sphinx(prop)
if enum_text:
write_indented_lines(" ", fw, enum_text)
fw("\n")
@@ -1528,8 +1611,16 @@ def pyrna2sphinx(basepath):
for prop in func.return_values:
# TODO: pyrna_enum2sphinx for multiple return values... actually don't
# think we even use this but still!
enum_descr_override = None
if USE_SHARED_RNA_ENUM_ITEMS_STATIC:
enum_descr_override = pyrna_enum2sphinx_shared_link(prop)
type_descr = prop.get_type_description(
as_ret=True, class_fmt=":class:`%s`", collection_id=_BPY_PROP_COLLECTION_ID)
as_ret=True, class_fmt=":class:`%s`",
collection_id=_BPY_PROP_COLLECTION_ID,
enum_descr_override=enum_descr_override,
)
descr = prop.description
if not descr:
descr = prop.name
@@ -1778,11 +1869,16 @@ def write_sphinx_conf_py(basepath):
fw("extensions = ['sphinx.ext.intersphinx']\n\n")
fw("intersphinx_mapping = {'blender_manual': ('https://docs.blender.org/manual/en/dev/', None)}\n\n")
fw("project = 'Blender %s Python API'\n" % BLENDER_VERSION_STRING)
fw("master_doc = 'index'\n")
fw("copyright = u'Blender Foundation'\n")
fw("root_doc = 'index'\n")
fw("copyright = 'Blender Foundation'\n")
fw("version = '%s'\n" % BLENDER_VERSION_DOTS)
fw("release = '%s'\n" % BLENDER_VERSION_DOTS)
# Set this as the default is a super-set of Python3.
fw("highlight_language = 'python3'\n")
# No need to detect encoding.
fw("highlight_options = {'default': {'encoding': 'utf-8'}}\n\n")
# Quiet file not in table-of-contents warnings.
fw("exclude_patterns = [\n")
fw(" 'include__bmesh.rst',\n")
@@ -1978,6 +2074,14 @@ def write_rst_types_index(basepath):
fw(".. toctree::\n")
fw(" :glob:\n\n")
fw(" bpy.types.*\n\n")
# This needs to be included somewhere, while it's hidden, list to avoid warnings.
if USE_SHARED_RNA_ENUM_ITEMS_STATIC:
fw(".. toctree::\n")
fw(" :hidden:\n")
fw(" :maxdepth: 1\n\n")
fw(" Shared Enum Types <bpy_types_enum_items/index>\n\n")
file.close()
@@ -2048,6 +2152,81 @@ def write_rst_data(basepath):
EXAMPLE_SET_USED.add("bpy.data")
def pyrna_enum2sphinx_shared_link(prop):
"""
Return a reference to the enum used by ``prop`` or None when not found.
"""
if (
(prop.type == "enum") and
(pointer := prop.enum_pointer) and
(identifier := rna_enum_pointer_to_id_map.get(pointer))
):
return ":ref:`%s`" % identifier
return None
def write_rst_enum_items(basepath, key, key_no_prefix, enum_items):
"""
Write a single page for a static enum in RST.
This helps avoiding very large lists being in-lined in many places which is an issue
especially with icons in ``bpy.types.UILayout``. See T87008.
"""
filepath = os.path.join(basepath, "%s.rst" % key_no_prefix)
with open(filepath, "w", encoding="utf-8") as fh:
fw = fh.write
# fw(".. noindex::\n\n")
fw(".. _%s:\n\n" % key)
fw(title_string(key_no_prefix.replace("_", " ").title(), "#"))
# fw(".. rubric:: %s\n\n" % key_no_prefix.replace("_", " ").title())
for item in enum_items:
identifier = item.identifier
name = item.name
description = item.description
if identifier:
fw(":%s: %s\n" % (item.identifier, (escape_rst(name) + ".") if name else ""))
if description:
fw("\n")
write_indented_lines(" ", fw, escape_rst(description) + ".")
else:
fw("\n")
else:
if name:
fw("\n\n**%s**\n\n" % name)
else:
fw("\n\n----\n\n")
if description:
fw(escape_rst(description) + ".")
fw("\n\n")
def write_rst_enum_items_and_index(basepath):
"""
Write shared enum items.
"""
subdir = "bpy_types_enum_items"
basepath_bpy_types_rna_enum = os.path.join(basepath, subdir)
os.makedirs(basepath_bpy_types_rna_enum, exist_ok=True)
with open(os.path.join(basepath_bpy_types_rna_enum, "index.rst"), "w", encoding="utf-8") as fh:
fw = fh.write
fw(title_string("Shared Enum Items", "#"))
fw(".. toctree::\n")
fw("\n")
for key, enum_items in rna_enum_dict.items():
if not key.startswith("rna_enum_"):
raise Exception("Found RNA enum identifier that doesn't use the 'rna_enum_' prefix, found %r!" % key)
key_no_prefix = key.removeprefix("rna_enum_")
fw(" %s\n" % key_no_prefix)
for key, enum_items in rna_enum_dict.items():
key_no_prefix = key.removeprefix("rna_enum_")
write_rst_enum_items(basepath_bpy_types_rna_enum, key, key_no_prefix, enum_items)
fw("\n")
def write_rst_importable_modules(basepath):
"""
Write the RST files of importable modules.
@@ -2212,6 +2391,10 @@ def rna2sphinx(basepath):
write_rst_data(basepath) # bpy.data
write_rst_importable_modules(basepath)
# `bpy_types_enum_items/*` (referenced from `bpy.types`).
if USE_SHARED_RNA_ENUM_ITEMS_STATIC:
write_rst_enum_items_and_index(basepath)
# copy the other rsts
copy_handwritten_rsts(basepath)
@@ -2285,8 +2468,6 @@ def setup_monkey_patch():
# Avoid adding too many changes here.
def setup_blender():
import bpy
# Remove handlers since the functions get included
# in the doc-string and don't have meaningful names.
lists_to_restore = []
@@ -2349,6 +2530,9 @@ def main():
rna2sphinx(SPHINX_IN_TMP)
if ARGS.changelog:
generate_changelog()
if ARGS.full_rebuild:
# Only for full updates.
shutil.rmtree(SPHINX_IN, True)

View File

@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# <pep8 compliant>
bpy_types_Operator_bl_property__doc__ = (
"""
The name of a property to use as this operators primary property.

View File

@@ -1,10 +1,3 @@
/* T76453: Prevent Long enum lists */
.field-list > dd p {
max-height: 245px;
overflow-y: auto !important;
word-break: break-word;
}
/* Hide home icon in search area */
.wy-side-nav-search > a:hover {background: none; opacity: 0.9}
.wy-side-nav-search > a.icon::before {content: none}

View File

@@ -270,7 +270,7 @@ AUD_API int AUD_readSound(AUD_Sound* sound, float* buffer, int length, int sampl
return length;
}
AUD_API const char* AUD_mixdown(AUD_Sound* sound, unsigned int start, unsigned int length, unsigned int buffersize, const char* filename, AUD_DeviceSpecs specs, AUD_Container format, AUD_Codec codec, unsigned int bitrate, void(*callback)(float, void*), void* data)
AUD_API int AUD_mixdown(AUD_Sound* sound, unsigned int start, unsigned int length, unsigned int buffersize, const char* filename, AUD_DeviceSpecs specs, AUD_Container format, AUD_Codec codec, unsigned int bitrate, void(*callback)(float, void*), void* data, char* error, size_t errorsize)
{
try
{
@@ -282,15 +282,20 @@ AUD_API const char* AUD_mixdown(AUD_Sound* sound, unsigned int start, unsigned i
std::shared_ptr<IWriter> writer = FileWriter::createWriter(filename, convCToDSpec(specs), static_cast<Container>(format), static_cast<Codec>(codec), bitrate);
FileWriter::writeReader(reader, writer, length, buffersize, callback, data);
return nullptr;
return true;
}
catch(Exception& e)
{
return e.getMessage().c_str();
if(error && errorsize)
{
std::strncpy(error, e.getMessage().c_str(), errorsize);
error[errorsize - 1] = '\0';
}
return false;
}
}
AUD_API const char* AUD_mixdown_per_channel(AUD_Sound* sound, unsigned int start, unsigned int length, unsigned int buffersize, const char* filename, AUD_DeviceSpecs specs, AUD_Container format, AUD_Codec codec, unsigned int bitrate, void(*callback)(float, void*), void* data)
AUD_API int AUD_mixdown_per_channel(AUD_Sound* sound, unsigned int start, unsigned int length, unsigned int buffersize, const char* filename, AUD_DeviceSpecs specs, AUD_Container format, AUD_Codec codec, unsigned int bitrate, void(*callback)(float, void*), void* data, char* error, size_t errorsize)
{
try
{
@@ -328,11 +333,16 @@ AUD_API const char* AUD_mixdown_per_channel(AUD_Sound* sound, unsigned int start
reader->seek(start);
FileWriter::writeReader(reader, writers, length, buffersize, callback, data);
return nullptr;
return true;
}
catch(Exception& e)
{
return e.getMessage().c_str();
if(error && errorsize)
{
std::strncpy(error, e.getMessage().c_str(), errorsize);
error[errorsize - 1] = '\0';
}
return false;
}
}

View File

@@ -70,13 +70,15 @@ extern AUD_API int AUD_readSound(AUD_Sound* sound, float* buffer, int length, in
* \param bitrate The bitrate for encoding.
* \param callback A callback function that is called periodically during mixdown, reporting progress if length > 0. Can be NULL.
* \param data Pass through parameter that is passed to the callback.
* \return An error message or NULL in case of success.
* \param error String buffer to copy the error message to in case of failure.
* \param errorsize The size of the error buffer.
* \return Whether or not the operation succeeded.
*/
extern AUD_API const char* AUD_mixdown(AUD_Sound* sound, unsigned int start, unsigned int length,
extern AUD_API int AUD_mixdown(AUD_Sound* sound, unsigned int start, unsigned int length,
unsigned int buffersize, const char* filename,
AUD_DeviceSpecs specs, AUD_Container format,
AUD_Codec codec, unsigned int bitrate,
void(*callback)(float, void*), void* data);
void(*callback)(float, void*), void* data, char* error, size_t errorsize);
/**
* Mixes a sound down into multiple files.
@@ -91,13 +93,15 @@ extern AUD_API const char* AUD_mixdown(AUD_Sound* sound, unsigned int start, uns
* \param bitrate The bitrate for encoding.
* \param callback A callback function that is called periodically during mixdown, reporting progress if length > 0. Can be NULL.
* \param data Pass through parameter that is passed to the callback.
* \return An error message or NULL in case of success.
* \param error String buffer to copy the error message to in case of failure.
* \param errorsize The size of the error buffer.
* \return Whether or not the operation succeeded.
*/
extern AUD_API const char* AUD_mixdown_per_channel(AUD_Sound* sound, unsigned int start, unsigned int length,
extern AUD_API int AUD_mixdown_per_channel(AUD_Sound* sound, unsigned int start, unsigned int length,
unsigned int buffersize, const char* filename,
AUD_DeviceSpecs specs, AUD_Container format,
AUD_Codec codec, unsigned int bitrate,
void(*callback)(float, void*), void* data);
void(*callback)(float, void*), void* data, char* error, size_t errorsize);
/**
* Opens a read device and prepares it for mixdown of the sound scene.

View File

@@ -41,7 +41,7 @@ double PulseAudioDevice::PulseAudioSynchronizer::getPosition(std::shared_ptr<IHa
void PulseAudioDevice::updateRingBuffer()
{
unsigned int samplesize = AUD_SAMPLE_SIZE(m_specs);
unsigned int samplesize = AUD_DEVICE_SAMPLE_SIZE(m_specs);
std::unique_lock<std::mutex> lock(m_mixingLock);

View File

@@ -1,5 +1,5 @@
Project: Curve-Fit-nD
URL: https://github.com/ideasman42/curve-fit-nd
License: BSD 3-Clause
Upstream version: ddcd5bd (Last Release)
Upstream version: ae32da9de264c3ed399673e2bc1bc09003799416 (Last Release)
Local modifications: None

View File

@@ -39,7 +39,7 @@
* Takes a flat array of points and evaluates that to calculate a bezier spline.
*
* \param points, points_len: The array of points to calculate a cubics from.
* \param dims: The number of dimensions for for each element in \a points.
* \param dims: The number of dimensions for each element in \a points.
* \param error_threshold: the error threshold to allow for,
* the curve will be within this distance from \a points.
* \param corners, corners_len: indices for points which will not have aligned tangents (optional).
@@ -47,10 +47,10 @@
* to evaluate a line to detect corner indices.
*
* \param r_cubic_array, r_cubic_array_len: Resulting array of tangents and knots, formatted as follows:
* ``r_cubic_array[r_cubic_array_len][3][dims]``,
* `r_cubic_array[r_cubic_array_len][3][dims]`,
* where each point has 0 and 2 for the tangents and the middle index 1 for the knot.
* The size of the *flat* array will be ``r_cubic_array_len * 3 * dims``.
* \param r_corner_index_array, r_corner_index_len: Corner indices in in \a r_cubic_array (optional).
* The size of the *flat* array will be `r_cubic_array_len * 3 * dims`.
* \param r_corner_index_array, r_corner_index_len: Corner indices in \a r_cubic_array (optional).
* This allows you to access corners on the resulting curve.
*
* \returns zero on success, nonzero is reserved for error values.
@@ -85,7 +85,7 @@ int curve_fit_cubic_to_points_fl(
* Takes a flat array of points and evaluates that to calculate handle lengths.
*
* \param points, points_len: The array of points to calculate a cubics from.
* \param dims: The number of dimensions for for each element in \a points.
* \param dims: The number of dimensions for each element in \a points.
* \param points_length_cache: Optional pre-calculated lengths between points.
* \param error_threshold: the error threshold to allow for,
* \param tan_l, tan_r: Normalized tangents the handles will be aligned to.
@@ -166,7 +166,7 @@ int curve_fit_cubic_to_points_refit_fl(
* A helper function that takes a line and outputs its corner indices.
*
* \param points, points_len: Curve to evaluate.
* \param dims: The number of dimensions for for each element in \a points.
* \param dims: The number of dimensions for each element in \a points.
* \param radius_min: Corners on the curve between points below this radius are ignored.
* \param radius_max: Corners on the curve above this radius are ignored.
* \param samples_max: Prevent testing corners beyond this many points

View File

@@ -43,20 +43,24 @@
#include "../curve_fit_nd.h"
/* Take curvature into account when calculating the least square solution isn't usable. */
/** Take curvature into account when calculating the least square solution isn't usable. */
#define USE_CIRCULAR_FALLBACK
/* Use the maximum distance of any points from the direct line between 2 points
/**
* Use the maximum distance of any points from the direct line between 2 points
* to calculate how long the handles need to be.
* Can do a 'perfect' reversal of subdivision when for curve has symmetrical handles and doesn't change direction
* (as with an 'S' shape). */
* (as with an 'S' shape).
*/
#define USE_OFFSET_FALLBACK
/* avoid re-calculating lengths multiple times */
/** Avoid re-calculating lengths multiple times. */
#define USE_LENGTH_CACHE
/* store the indices in the cubic data so we can return the original indices,
* useful when the caller has data associated with the curve. */
/**
* Store the indices in the cubic data so we can return the original indices,
* useful when the caller has data associated with the curve.
*/
#define USE_ORIG_INDEX_DATA
typedef unsigned int uint;
@@ -95,13 +99,15 @@ typedef unsigned int uint;
* \{ */
typedef struct Cubic {
/* single linked lists */
/** Single linked lists. */
struct Cubic *next;
#ifdef USE_ORIG_INDEX_DATA
uint orig_span;
#endif
/* 0: point_0, 1: handle_0, 2: handle_1, 3: point_1,
* each one is offset by 'dims' */
/**
* 0: point_0, 1: handle_0, 2: handle_1, 3: point_1,
* each one is offset by 'dims'.
*/
double pt_data[0];
} Cubic;
@@ -195,7 +201,7 @@ static double *cubic_list_as_array(
bool use_orig_index = (r_orig_index != NULL);
#endif
/* fill the array backwards */
/* Fill the array backwards. */
const size_t array_chunk = 3 * dims;
double *array_iter = array + array_flat_len;
for (Cubic *citer = clist->items; citer; citer = citer->next) {
@@ -221,15 +227,15 @@ static double *cubic_list_as_array(
}
#endif
/* flip tangent for first and last (we could leave at zero, but set to something useful) */
/* Flip tangent for first and last (we could leave at zero, but set to something useful). */
/* first */
/* First. */
array_iter -= array_chunk;
memcpy(&array_iter[dims], handle_prev, sizeof(double) * 2 * dims);
flip_vn_vnvn(&array_iter[0 * dims], &array_iter[1 * dims], &array_iter[2 * dims], dims);
assert(array == array_iter);
/* last */
/* Last. */
array_iter += array_flat_len - (3 * dims);
flip_vn_vnvn(&array_iter[2 * dims], &array_iter[1 * dims], &array_iter[0 * dims], dims);
@@ -455,7 +461,7 @@ static double points_calc_circumference_factor(
const double dot = dot_vnvn(tan_l, tan_r, dims);
const double len_tangent = dot < 0.0 ? len_vnvn(tan_l, tan_r, dims) : len_negated_vnvn(tan_l, tan_r, dims);
if (len_tangent > DBL_EPSILON) {
/* only clamp to avoid precision error */
/* Only clamp to avoid precision error. */
double angle = acos(max(-fabs(dot), -1.0));
/* Angle may be less than the length when the tangents define >180 degrees of the circle,
* (tangents that point away from each other).
@@ -466,7 +472,7 @@ static double points_calc_circumference_factor(
return factor;
}
else {
/* tangents are exactly aligned (think two opposite sides of a circle). */
/* Tangents are exactly aligned (think two opposite sides of a circle). */
return (M_PI / 2);
}
}
@@ -485,18 +491,18 @@ static double points_calc_circle_tangent_factor(
const double eps = 1e-8;
const double tan_dot = dot_vnvn(tan_l, tan_r, dims);
if (tan_dot > 1.0 - eps) {
/* no angle difference (use fallback, length wont make any difference) */
/* No angle difference (use fallback, length won't make any difference). */
return (1.0 / 3.0) * 0.75;
}
else if (tan_dot < -1.0 + eps) {
/* parallel tangents (half-circle) */
/* Parallel tangents (half-circle). */
return (1.0 / 2.0);
}
else {
/* non-aligned tangents, calculate handle length */
/* Non-aligned tangents, calculate handle length. */
const double angle = acos(tan_dot) / 2.0;
/* could also use 'angle_sin = len_vnvn(tan_l, tan_r, dims) / 2.0' */
/* Could also use `angle_sin = len_vnvn(tan_l, tan_r, dims) / 2.0`. */
const double angle_sin = sin(angle);
const double angle_cos = cos(angle);
return ((1.0 - angle_cos) / (angle_sin * 2.0)) / angle_sin;
@@ -516,15 +522,15 @@ static double points_calc_cubic_scale(
const double len_direct = len_vnvn(v_l, v_r, dims);
const double len_circle_factor = points_calc_circle_tangent_factor(tan_l, tan_r, dims);
/* if this curve is a circle, this value doesn't need modification */
/* If this curve is a circle, this value doesn't need modification. */
const double len_circle_handle = (len_direct * (len_circle_factor / 0.75));
/* scale by the difference from the circumference distance */
/* Scale by the difference from the circumference distance. */
const double len_circle = len_direct * points_calc_circumference_factor(tan_l, tan_r, dims);
double scale_handle = (coords_length / len_circle);
/* Could investigate an accurate calculation here,
* though this gives close results */
* though this gives close results. */
scale_handle = ((scale_handle - 1.0) * 1.75) + 1.0;
return len_circle_handle * scale_handle;
@@ -554,9 +560,8 @@ static void cubic_from_points_fallback(
r_cubic->orig_span = (points_offset_len - 1);
#endif
/* p1 = p0 - (tan_l * alpha);
* p2 = p3 + (tan_r * alpha);
*/
/* `p1 = p0 - (tan_l * alpha);`
* `p2 = p3 + (tan_r * alpha);` */
msub_vn_vnvn_fl(p1, p0, tan_l, alpha, dims);
madd_vn_vnvn_fl(p2, p3, tan_r, alpha, dims);
}
@@ -594,7 +599,7 @@ static void cubic_from_points_offset_fallback(
project_plane_vn_vnvn_normalized(a[0], tan_l, dir_unit, dims);
project_plane_vn_vnvn_normalized(a[1], tan_r, dir_unit, dims);
/* only for better accuracy, not essential */
/* Only for better accuracy, not essential. */
normalize_vn(a[0], dims);
normalize_vn(a[1], dims);
@@ -620,7 +625,7 @@ static void cubic_from_points_offset_fallback(
*
* The 'dists[..] + dir_dirs' limit is just a rough approximation.
* While a more exact value could be calculated,
* in this case the error values approach divide by zero (inf)
* in this case the error values approach divide by zero (infinite)
* so there is no need to be too precise when checking if limits have been exceeded. */
double alpha_l = (dists[0] / 0.75) / fabs(dot_vnvn(tan_l, a[0], dims));
@@ -644,9 +649,8 @@ static void cubic_from_points_offset_fallback(
r_cubic->orig_span = (points_offset_len - 1);
#endif
/* p1 = p0 - (tan_l * alpha_l);
* p2 = p3 + (tan_r * alpha_r);
*/
/* `p1 = p0 - (tan_l * alpha_l);`
* `p2 = p3 + (tan_r * alpha_r);` */
msub_vn_vnvn_fl(p1, p0, tan_l, alpha_l, dims);
madd_vn_vnvn_fl(p2, p3, tan_r, alpha_r, dims);
}
@@ -674,7 +678,7 @@ static void cubic_from_points(
const double *p0 = &points_offset[0];
const double *p3 = &points_offset[(points_offset_len - 1) * dims];
/* Point Pairs */
/* Point Pairs. */
double alpha_l, alpha_r;
#ifdef USE_VLA
double a[2][dims];
@@ -696,7 +700,7 @@ static void cubic_from_points(
const double b0_plus_b1 = B0plusB1(u_prime[i]);
const double b2_plus_b3 = B2plusB3(u_prime[i]);
/* inline dot product */
/* Inline dot product. */
for (uint j = 0; j < dims; j++) {
const double tmp = (pt[j] - (p0[j] * b0_plus_b1)) + (p3[j] * b2_plus_b3);
@@ -719,7 +723,7 @@ static void cubic_from_points(
det_C0_C1 = c[0][0] * c[1][1] * 10e-12;
}
/* may still divide-by-zero, check below will catch nan values */
/* May still divide-by-zero, check below will catch NAN values. */
alpha_l = det_X_C1 / det_C0_C1;
alpha_r = det_C_0X / det_C0_C1;
}
@@ -736,7 +740,7 @@ static void cubic_from_points(
bool use_clamp = true;
/* flip check to catch nan values */
/* Flip check to catch NAN values. */
if (!(alpha_l >= 0.0) ||
!(alpha_r >= 0.0))
{
@@ -750,7 +754,7 @@ static void cubic_from_points(
alpha_l = alpha_r = len_vnvn(p0, p3, dims) / 3.0;
#endif
/* skip clamping when we're using default handles */
/* Skip clamping when we're using default handles. */
use_clamp = false;
}
@@ -764,9 +768,8 @@ static void cubic_from_points(
r_cubic->orig_span = (points_offset_len - 1);
#endif
/* p1 = p0 - (tan_l * alpha_l);
* p2 = p3 + (tan_r * alpha_r);
*/
/* `p1 = p0 - (tan_l * alpha_l);`
* `p2 = p3 + (tan_r * alpha_r);` */
msub_vn_vnvn_fl(p1, p0, tan_l, alpha_l, dims);
madd_vn_vnvn_fl(p2, p3, tan_r, alpha_r, dims);
@@ -781,7 +784,7 @@ static void cubic_from_points(
#endif
points_calc_center_weighted(points_offset, points_offset_len, dims, center);
const double clamp_scale = 3.0; /* clamp to 3x */
const double clamp_scale = 3.0; /* Clamp to 3x. */
double dist_sq_max = 0.0;
{
@@ -790,7 +793,7 @@ static void cubic_from_points(
#if 0
double dist_sq_test = sq(len_vnvn(center, pt, dims) * clamp_scale);
#else
/* do inline */
/* Do inline. */
double dist_sq_test = 0.0;
for (uint j = 0; j < dims; j++) {
dist_sq_test += sq((pt[j] - center[j]) * clamp_scale);
@@ -816,10 +819,8 @@ static void cubic_from_points(
alpha_l = alpha_r = len_vnvn(p0, p3, dims) / 3.0;
#endif
/*
* p1 = p0 - (tan_l * alpha_l);
* p2 = p3 + (tan_r * alpha_r);
*/
/* `p1 = p0 - (tan_l * alpha_l);`
* `p2 = p3 + (tan_r * alpha_r);` */
for (uint j = 0; j < dims; j++) {
p1[j] = p0[j] - (tan_l[j] * alpha_l);
p2[j] = p3[j] + (tan_r[j] * alpha_r);
@@ -829,7 +830,7 @@ static void cubic_from_points(
p2_dist_sq = len_squared_vnvn(center, p2, dims);
}
/* clamp within the 3x radius */
/* Clamp within the 3x radius. */
if (p1_dist_sq > dist_sq_max) {
isub_vnvn(p1, center, dims);
imul_vn_fl(p1, sqrt(dist_sq_max) / sqrt(p1_dist_sq), dims);
@@ -841,7 +842,7 @@ static void cubic_from_points(
iadd_vnvn(p2, center, dims);
}
}
/* end clamping */
/* End clamping. */
}
#ifdef USE_LENGTH_CACHE
@@ -917,7 +918,7 @@ static double cubic_find_root(
const uint dims)
{
/* Newton-Raphson Method. */
/* all vectors */
/* All vectors. */
#ifdef USE_VLA
double q0_u[dims];
double q1_u[dims];
@@ -932,8 +933,8 @@ static double cubic_find_root(
cubic_calc_speed(cubic, u, dims, q1_u);
cubic_calc_acceleration(cubic, u, dims, q2_u);
/* may divide-by-zero, caller must check for that case */
/* u - ((q0_u - p) * q1_u) / (q1_u.length_squared() + (q0_u - p) * q2_u) */
/* May divide-by-zero, caller must check for that case. */
/* `u - ((q0_u - p) * q1_u) / (q1_u.length_squared() + (q0_u - p) * q2_u)` */
isub_vnvn(q0_u, p, dims);
return u - dot_vnvn(q0_u, q1_u, dims) /
(len_squared_vn(q1_u, dims) + dot_vnvn(q0_u, q2_u, dims));
@@ -1032,7 +1033,7 @@ static bool fit_cubic_to_points(
double error_max_sq;
uint split_index;
/* Parameterize points, and attempt to fit curve */
/* Parameterize points, and attempt to fit curve. */
cubic_from_points(
points_offset, points_offset_len,
#ifdef USE_CIRCULAR_FALLBACK
@@ -1040,7 +1041,7 @@ static bool fit_cubic_to_points(
#endif
u, tan_l, tan_r, dims, r_cubic);
/* Find max deviation of points to fitted curve */
/* Find max deviation of points to fitted curve. */
error_max_sq = cubic_calc_error(
r_cubic, points_offset, points_offset_len, u, dims,
&split_index);
@@ -1062,7 +1063,7 @@ static bool fit_cubic_to_points(
cubic_test, points_offset, points_offset_len, u, dims,
&split_index);
/* intentionally use the newly calculated 'split_index',
/* Intentionally use the newly calculated 'split_index',
* even if the 'error_max_sq_test' is worse. */
if (error_max_sq > error_max_sq_test) {
error_max_sq = error_max_sq_test;
@@ -1071,7 +1072,7 @@ static bool fit_cubic_to_points(
}
#endif
/* Test the offset fallback */
/* Test the offset fallback. */
#ifdef USE_OFFSET_FALLBACK
if (!(error_max_sq < error_threshold_sq)) {
/* Using the offset from the curve to calculate cubic handle length may give better results
@@ -1095,7 +1096,7 @@ static bool fit_cubic_to_points(
if (!(error_max_sq < error_threshold_sq)) {
cubic_copy(cubic_test, r_cubic, dims);
/* If error not too large, try some reparameterization and iteration */
/* If error not too large, try some re-parameterization and iteration. */
double *u_prime = malloc(sizeof(double) * points_offset_len);
for (uint iter = 0; iter < iteration_max; iter++) {
if (!cubic_reparameterize(
@@ -1123,7 +1124,7 @@ static bool fit_cubic_to_points(
}
if (!(error_max_sq < error_threshold_sq)) {
/* continue */
/* Continue. */
}
else {
assert((error_max_sq < error_threshold_sq));
@@ -1156,7 +1157,7 @@ static void fit_cubic_to_points_recursive(
const double error_threshold_sq,
const uint calc_flag,
const uint dims,
/* fill in the list */
/* Fill in the list. */
CubicList *clist)
{
Cubic *cubic = cubic_alloc(dims);
@@ -1180,7 +1181,7 @@ static void fit_cubic_to_points_recursive(
cubic_free(cubic);
/* Fitting failed -- split at max error point and fit recursively */
/* Fitting failed -- split at max error point and fit recursively. */
/* Check splinePoint is not an endpoint?
*
@@ -1212,7 +1213,7 @@ static void fit_cubic_to_points_recursive(
#endif
const double *pt = &points_offset[split_index * dims];
/* tan_center = ((pt_a - pt).normalized() + (pt - pt_b).normalized()).normalized() */
/* `tan_center = ((pt_a - pt).normalized() + (pt - pt_b).normalized()).normalized()`. */
normalize_vn_vnvn(tan_center_a, pt_a, pt, dims);
normalize_vn_vnvn(tan_center_b, pt, pt_b, dims);
add_vn_vnvn(tan_center, tan_center_a, tan_center_b, dims);
@@ -1306,9 +1307,8 @@ int curve_fit_cubic_to_points_db(
const double *pt_l_next = pt_l + dims;
const double *pt_r_prev = pt_r - dims;
/* tan_l = (pt_l - pt_l_next).normalized()
* tan_r = (pt_r_prev - pt_r).normalized()
*/
/* `tan_l = (pt_l - pt_l_next).normalized();`
* `tan_r = (pt_r_prev - pt_r).normalized();` */
normalize_vn_vnvn(tan_l, pt_l, pt_l_next, dims);
normalize_vn_vnvn(tan_r, pt_r_prev, pt_r, dims);
@@ -1362,7 +1362,7 @@ int curve_fit_cubic_to_points_db(
*r_cubic_orig_index = NULL;
#endif
/* allocate a contiguous array and free the linked list */
/* Allocate a contiguous array and free the linked list. */
*r_cubic_array = cubic_list_as_array(
&clist
#ifdef USE_ORIG_INDEX_DATA
@@ -1454,7 +1454,7 @@ int curve_fit_cubic_to_points_single_db(
{
Cubic *cubic = alloca(cubic_alloc_size(dims));
/* in this instance theres no advantage in using length cache,
/* In this instance there are no advantage in using length cache,
* since we're not recursively calculating values. */
#ifdef USE_LENGTH_CACHE
double *points_length_cache_alloc = NULL;

View File

@@ -1490,3 +1490,4 @@ int curve_fit_cubic_to_points_refit_fl(
return result;
}

View File

@@ -37,7 +37,7 @@
* - #TPOOL_STRUCT: Name for pool struct name.
* - #TPOOL_CHUNK_SIZE: Chunk size (optional), use 64kb when not defined.
*
* \note #TPOOL_ALLOC_TYPE must be at least ``sizeof(void *)``.
* \note #TPOOL_ALLOC_TYPE must be at least `sizeof(void *)`.
*
* Defines the API, uses #TPOOL_IMPL_PREFIX to prefix each function.
*

View File

@@ -305,5 +305,3 @@ void *HEAP_node_ptr(HeapNode *node)
{
return node->ptr;
}
/** \} */

View File

@@ -1,5 +1,5 @@
Project: Draco
URL: https://google.github.io/draco/
License: Apache 2.0
Upstream version: 1.3.6
Local modifications: None
Upstream version: 1.5.2
Local modifications: Apply patches/blender.patch

View File

@@ -38,6 +38,46 @@ void AttributeOctahedronTransform::CopyToAttributeTransformData(
out_data->AppendParameterValue(quantization_bits_);
}
bool AttributeOctahedronTransform::TransformAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) {
return GeneratePortableAttribute(attribute, point_ids,
target_attribute->size(), target_attribute);
}
bool AttributeOctahedronTransform::InverseTransformAttribute(
const PointAttribute &attribute, PointAttribute *target_attribute) {
if (target_attribute->data_type() != DT_FLOAT32) {
return false;
}
const int num_points = target_attribute->size();
const int num_components = target_attribute->num_components();
if (num_components != 3) {
return false;
}
constexpr int kEntrySize = sizeof(float) * 3;
float att_val[3];
const int32_t *source_attribute_data = reinterpret_cast<const int32_t *>(
attribute.GetAddress(AttributeValueIndex(0)));
uint8_t *target_address =
target_attribute->GetAddress(AttributeValueIndex(0));
OctahedronToolBox octahedron_tool_box;
if (!octahedron_tool_box.SetQuantizationBits(quantization_bits_)) {
return false;
}
for (uint32_t i = 0; i < num_points; ++i) {
const int32_t s = *source_attribute_data++;
const int32_t t = *source_attribute_data++;
octahedron_tool_box.QuantizedOctahedralCoordsToUnitVector(s, t, att_val);
// Store the decoded floating point values into the attribute buffer.
std::memcpy(target_address, att_val, kEntrySize);
target_address += kEntrySize;
}
return true;
}
void AttributeOctahedronTransform::SetParameters(int quantization_bits) {
quantization_bits_ = quantization_bits;
}
@@ -51,38 +91,55 @@ bool AttributeOctahedronTransform::EncodeParameters(
return false;
}
std::unique_ptr<PointAttribute>
AttributeOctahedronTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points) const {
DRACO_DCHECK(is_initialized());
bool AttributeOctahedronTransform::DecodeParameters(
const PointAttribute &attribute, DecoderBuffer *decoder_buffer) {
uint8_t quantization_bits;
if (!decoder_buffer->Decode(&quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
return true;
}
// Allocate portable attribute.
const int num_entries = static_cast<int>(point_ids.size());
std::unique_ptr<PointAttribute> portable_attribute =
InitPortableAttribute(num_entries, 2, num_points, attribute, true);
bool AttributeOctahedronTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points, PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
// Quantize all values in the order given by point_ids into portable
// attribute.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
portable_attribute->GetAddress(AttributeValueIndex(0)));
target_attribute->GetAddress(AttributeValueIndex(0)));
float att_val[3];
int32_t dst_index = 0;
OctahedronToolBox converter;
if (!converter.SetQuantizationBits(quantization_bits_)) {
return nullptr;
return false;
}
for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(point_ids[i]);
attribute.GetValue(att_val_id, att_val);
// Encode the vector into a s and t octahedral coordinates.
int32_t s, t;
converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
portable_attribute_data[dst_index++] = s;
portable_attribute_data[dst_index++] = t;
if (!point_ids.empty()) {
for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_val_id =
attribute.mapped_index(point_ids[i]);
attribute.GetValue(att_val_id, att_val);
// Encode the vector into a s and t octahedral coordinates.
int32_t s, t;
converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
portable_attribute_data[dst_index++] = s;
portable_attribute_data[dst_index++] = t;
}
} else {
for (PointIndex i(0); i < num_points; ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(i);
attribute.GetValue(att_val_id, att_val);
// Encode the vector into a s and t octahedral coordinates.
int32_t s, t;
converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
portable_attribute_data[dst_index++] = s;
portable_attribute_data[dst_index++] = t;
}
}
return portable_attribute;
return true;
}
} // namespace draco

View File

@@ -37,19 +37,40 @@ class AttributeOctahedronTransform : public AttributeTransform {
void CopyToAttributeTransformData(
AttributeTransformData *out_data) const override;
bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) override;
bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) override;
// Set number of quantization bits.
void SetParameters(int quantization_bits);
// Encode relevant parameters into buffer.
bool EncodeParameters(EncoderBuffer *encoder_buffer) const;
bool EncodeParameters(EncoderBuffer *encoder_buffer) const override;
bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) override;
bool is_initialized() const { return quantization_bits_ != -1; }
int32_t quantization_bits() const { return quantization_bits_; }
// Create portable attribute.
std::unique_ptr<PointAttribute> GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points) const;
protected:
DataType GetTransformedDataType(
const PointAttribute &attribute) const override {
return DT_UINT32;
}
int GetTransformedNumComponents(
const PointAttribute &attribute) const override {
return 2;
}
// Perform the actual transformation.
bool GeneratePortableAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
int num_points,
PointAttribute *target_attribute) const;
private:
int32_t quantization_bits_;

View File

@@ -1,4 +1,3 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -51,13 +50,74 @@ void AttributeQuantizationTransform::CopyToAttributeTransformData(
out_data->AppendParameterValue(range_);
}
void AttributeQuantizationTransform::SetParameters(int quantization_bits,
bool AttributeQuantizationTransform::TransformAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) {
if (point_ids.empty()) {
GeneratePortableAttribute(attribute, target_attribute->size(),
target_attribute);
} else {
GeneratePortableAttribute(attribute, point_ids, target_attribute->size(),
target_attribute);
}
return true;
}
bool AttributeQuantizationTransform::InverseTransformAttribute(
const PointAttribute &attribute, PointAttribute *target_attribute) {
if (target_attribute->data_type() != DT_FLOAT32) {
return false;
}
// Convert all quantized values back to floats.
const int32_t max_quantized_value =
(1u << static_cast<uint32_t>(quantization_bits_)) - 1;
const int num_components = target_attribute->num_components();
const int entry_size = sizeof(float) * num_components;
const std::unique_ptr<float[]> att_val(new float[num_components]);
int quant_val_id = 0;
int out_byte_pos = 0;
Dequantizer dequantizer;
if (!dequantizer.Init(range_, max_quantized_value)) {
return false;
}
const int32_t *const source_attribute_data =
reinterpret_cast<const int32_t *>(
attribute.GetAddress(AttributeValueIndex(0)));
const int num_values = target_attribute->size();
for (uint32_t i = 0; i < num_values; ++i) {
for (int c = 0; c < num_components; ++c) {
float value =
dequantizer.DequantizeFloat(source_attribute_data[quant_val_id++]);
value = value + min_values_[c];
att_val[c] = value;
}
// Store the floating point value into the attribute buffer.
target_attribute->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
out_byte_pos += entry_size;
}
return true;
}
bool AttributeQuantizationTransform::IsQuantizationValid(
int quantization_bits) {
// Currently we allow only up to 30 bit quantization.
return quantization_bits >= 1 && quantization_bits <= 30;
}
bool AttributeQuantizationTransform::SetParameters(int quantization_bits,
const float *min_values,
int num_components,
float range) {
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
min_values_.assign(min_values, min_values + num_components);
range_ = range;
return true;
}
bool AttributeQuantizationTransform::ComputeParameters(
@@ -65,6 +125,9 @@ bool AttributeQuantizationTransform::ComputeParameters(
if (quantization_bits_ != -1) {
return false; // already initialized.
}
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
const int num_components = attribute.num_components();
@@ -121,20 +184,37 @@ bool AttributeQuantizationTransform::EncodeParameters(
return false;
}
std::unique_ptr<PointAttribute>
AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, int num_points) const {
bool AttributeQuantizationTransform::DecodeParameters(
const PointAttribute &attribute, DecoderBuffer *decoder_buffer) {
min_values_.resize(attribute.num_components());
if (!decoder_buffer->Decode(&min_values_[0],
sizeof(float) * min_values_.size())) {
return false;
}
if (!decoder_buffer->Decode(&range_)) {
return false;
}
uint8_t quantization_bits;
if (!decoder_buffer->Decode(&quantization_bits)) {
return false;
}
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
return true;
}
void AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, int num_points,
PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
// Allocate portable attribute.
const int num_entries = num_points;
const int num_components = attribute.num_components();
std::unique_ptr<PointAttribute> portable_attribute =
InitPortableAttribute(num_entries, num_components, 0, attribute, true);
// Quantize all values using the order given by point_ids.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
portable_attribute->GetAddress(AttributeValueIndex(0)));
target_attribute->GetAddress(AttributeValueIndex(0)));
const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
Quantizer quantizer;
quantizer.Init(range(), max_quantized_value);
@@ -149,24 +229,18 @@ AttributeQuantizationTransform::GeneratePortableAttribute(
portable_attribute_data[dst_index++] = q_val;
}
}
return portable_attribute;
}
std::unique_ptr<PointAttribute>
AttributeQuantizationTransform::GeneratePortableAttribute(
void AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points) const {
int num_points, PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
// Allocate portable attribute.
const int num_entries = static_cast<int>(point_ids.size());
const int num_components = attribute.num_components();
std::unique_ptr<PointAttribute> portable_attribute = InitPortableAttribute(
num_entries, num_components, num_points, attribute, true);
// Quantize all values using the order given by point_ids.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
portable_attribute->GetAddress(AttributeValueIndex(0)));
target_attribute->GetAddress(AttributeValueIndex(0)));
const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
Quantizer quantizer;
quantizer.Init(range(), max_quantized_value);
@@ -181,7 +255,6 @@ AttributeQuantizationTransform::GeneratePortableAttribute(
portable_attribute_data[dst_index++] = q_val;
}
}
return portable_attribute;
}
} // namespace draco

View File

@@ -37,14 +37,24 @@ class AttributeQuantizationTransform : public AttributeTransform {
void CopyToAttributeTransformData(
AttributeTransformData *out_data) const override;
void SetParameters(int quantization_bits, const float *min_values,
bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) override;
bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) override;
bool SetParameters(int quantization_bits, const float *min_values,
int num_components, float range);
bool ComputeParameters(const PointAttribute &attribute,
const int quantization_bits);
// Encode relevant parameters into buffer.
bool EncodeParameters(EncoderBuffer *encoder_buffer) const;
bool EncodeParameters(EncoderBuffer *encoder_buffer) const override;
bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) override;
int32_t quantization_bits() const { return quantization_bits_; }
float min_value(int axis) const { return min_values_[axis]; }
@@ -52,16 +62,30 @@ class AttributeQuantizationTransform : public AttributeTransform {
float range() const { return range_; }
bool is_initialized() const { return quantization_bits_ != -1; }
protected:
// Create portable attribute using 1:1 mapping between points in the input and
// output attribute.
std::unique_ptr<PointAttribute> GeneratePortableAttribute(
const PointAttribute &attribute, int num_points) const;
void GeneratePortableAttribute(const PointAttribute &attribute,
int num_points,
PointAttribute *target_attribute) const;
// Create portable attribute using custom mapping between input and output
// points.
std::unique_ptr<PointAttribute> GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points) const;
void GeneratePortableAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
int num_points,
PointAttribute *target_attribute) const;
DataType GetTransformedDataType(
const PointAttribute &attribute) const override {
return DT_UINT32;
}
int GetTransformedNumComponents(
const PointAttribute &attribute) const override {
return attribute.num_components();
}
static bool IsQuantizationValid(int quantization_bits);
private:
int32_t quantization_bits_;

View File

@@ -24,21 +24,18 @@ bool AttributeTransform::TransferToAttribute(PointAttribute *attribute) const {
return true;
}
std::unique_ptr<PointAttribute> AttributeTransform::InitPortableAttribute(
int num_entries, int num_components, int num_points,
const PointAttribute &attribute, bool is_unsigned) const {
const DataType dt = is_unsigned ? DT_UINT32 : DT_INT32;
GeometryAttribute va;
va.Init(attribute.attribute_type(), nullptr, num_components, dt, false,
std::unique_ptr<PointAttribute> AttributeTransform::InitTransformedAttribute(
const PointAttribute &src_attribute, int num_entries) {
const int num_components = GetTransformedNumComponents(src_attribute);
const DataType dt = GetTransformedDataType(src_attribute);
GeometryAttribute ga;
ga.Init(src_attribute.attribute_type(), nullptr, num_components, dt, false,
num_components * DataTypeLength(dt), 0);
std::unique_ptr<PointAttribute> portable_attribute(new PointAttribute(va));
portable_attribute->Reset(num_entries);
if (num_points) {
portable_attribute->SetExplicitMapping(num_points);
} else {
portable_attribute->SetIdentityMapping();
}
return portable_attribute;
std::unique_ptr<PointAttribute> transformed_attribute(new PointAttribute(ga));
transformed_attribute->Reset(num_entries);
transformed_attribute->SetIdentityMapping();
transformed_attribute->set_unique_id(src_attribute.unique_id());
return transformed_attribute;
}
} // namespace draco

View File

@@ -17,6 +17,8 @@
#include "draco/attributes/attribute_transform_data.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/decoder_buffer.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
@@ -35,10 +37,38 @@ class AttributeTransform {
AttributeTransformData *out_data) const = 0;
bool TransferToAttribute(PointAttribute *attribute) const;
// Applies the transform to |attribute| and stores the result in
// |target_attribute|. |point_ids| is an optional vector that can be used to
// remap values during the transform.
virtual bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) = 0;
// Applies an inverse transform to |attribute| and stores the result in
// |target_attribute|. In this case, |attribute| is an attribute that was
// already transformed (e.g. quantized) and |target_attribute| is the
// attribute before the transformation.
virtual bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) = 0;
// Encodes all data needed by the transformation into the |encoder_buffer|.
virtual bool EncodeParameters(EncoderBuffer *encoder_buffer) const = 0;
// Decodes all data needed to transform |attribute| back to the original
// format.
virtual bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) = 0;
// Initializes a transformed attribute that can be used as target in the
// TransformAttribute() function call.
virtual std::unique_ptr<PointAttribute> InitTransformedAttribute(
const PointAttribute &src_attribute, int num_entries);
protected:
std::unique_ptr<PointAttribute> InitPortableAttribute(
int num_entries, int num_components, int num_points,
const PointAttribute &attribute, bool is_unsigned) const;
virtual DataType GetTransformedDataType(
const PointAttribute &attribute) const = 0;
virtual int GetTransformedNumComponents(
const PointAttribute &attribute) const = 0;
};
} // namespace draco

View File

@@ -43,10 +43,6 @@ void GeometryAttribute::Init(GeometryAttribute::Type attribute_type,
}
bool GeometryAttribute::CopyFrom(const GeometryAttribute &src_att) {
if (buffer_ == nullptr || src_att.buffer_ == nullptr) {
return false;
}
buffer_->Update(src_att.buffer_->data(), src_att.buffer_->data_size());
num_components_ = src_att.num_components_;
data_type_ = src_att.data_type_;
normalized_ = src_att.normalized_;
@@ -55,6 +51,14 @@ bool GeometryAttribute::CopyFrom(const GeometryAttribute &src_att) {
attribute_type_ = src_att.attribute_type_;
buffer_descriptor_ = src_att.buffer_descriptor_;
unique_id_ = src_att.unique_id_;
if (src_att.buffer_ == nullptr) {
buffer_ = nullptr;
} else {
if (buffer_ == nullptr) {
return false;
}
buffer_->Update(src_att.buffer_->data(), src_att.buffer_->data_size());
}
return true;
}

View File

@@ -21,6 +21,7 @@
#include "draco/attributes/geometry_indices.h"
#include "draco/core/data_buffer.h"
#include "draco/core/hash_utils.h"
#include "draco/draco_features.h"
namespace draco {
@@ -51,6 +52,16 @@ class GeometryAttribute {
// predefined use case. Such attributes are often used for a shader specific
// data.
GENERIC,
#ifdef DRACO_TRANSCODER_SUPPORTED
// TODO(ostava): Adding a new attribute would be bit-stream change for GLTF.
// Older decoders wouldn't know what to do with this attribute type. This
// should be open-sourced only when we are ready to increase our bit-stream
// version.
TANGENT,
MATERIAL,
JOINTS,
WEIGHTS,
#endif
// Total number of different attribute types.
// Always keep behind all named attributes.
NAMED_ATTRIBUTES_COUNT,
@@ -111,6 +122,9 @@ class GeometryAttribute {
const int64_t byte_pos = GetBytePos(att_index);
return buffer_->data() + byte_pos;
}
inline bool IsAddressValid(const uint8_t *address) const {
return ((buffer_->data() + buffer_->data_size()) > address);
}
// Fills out_data with the raw value of the requested attribute entry.
// out_data must be at least byte_stride_ long.
@@ -263,7 +277,35 @@ class GeometryAttribute {
// Convert all components available in both the original and output formats.
for (int i = 0; i < std::min(num_components_, out_num_components); ++i) {
if (!IsAddressValid(src_address)) {
return false;
}
const T in_value = *reinterpret_cast<const T *>(src_address);
// Make sure the in_value fits within the range of values that OutT
// is able to represent. Perform the check only for integral types.
if (std::is_integral<T>::value && std::is_integral<OutT>::value) {
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable:4804)
#endif
#if defined(__GNUC__) && !defined(__clang__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wbool-compare"
#endif
static constexpr OutT kOutMin =
std::is_signed<T>::value ? std::numeric_limits<OutT>::lowest() : 0;
if (in_value < kOutMin || in_value > std::numeric_limits<OutT>::max()) {
return false;
}
#ifdef __GNUC__
# pragma GCC diagnostic pop
#endif
#ifdef _MSC_VER
# pragma warning(pop)
#endif
}
out_value[i] = static_cast<OutT>(in_value);
// When converting integer to floating point, normalize the value if
// necessary.

View File

@@ -222,4 +222,47 @@ AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues(
}
#endif
#ifdef DRACO_TRANSCODER_SUPPORTED
void PointAttribute::RemoveUnusedValues() {
if (is_mapping_identity()) {
return; // For identity mapping, all values are always used.
}
// For explicit mapping we need to check if any point is mapped to a value.
// If not we can delete the value.
IndexTypeVector<AttributeValueIndex, bool> is_value_used(size(), false);
int num_used_values = 0;
for (PointIndex pi(0); pi < indices_map_.size(); ++pi) {
const AttributeValueIndex avi = indices_map_[pi];
if (!is_value_used[avi]) {
is_value_used[avi] = true;
num_used_values++;
}
}
if (num_used_values == size()) {
return; // All values are used.
}
// Remap the values and update the point to value mapping.
IndexTypeVector<AttributeValueIndex, AttributeValueIndex>
old_to_new_value_map(size(), kInvalidAttributeValueIndex);
AttributeValueIndex new_avi(0);
for (AttributeValueIndex avi(0); avi < size(); ++avi) {
if (!is_value_used[avi]) {
continue;
}
if (avi != new_avi) {
SetAttributeValue(new_avi, GetAddress(avi));
}
old_to_new_value_map[avi] = new_avi++;
}
// Remap all points to the new attribute values.
for (PointIndex pi(0); pi < indices_map_.size(); ++pi) {
indices_map_[pi] = old_to_new_value_map[indices_map_[pi]];
}
num_unique_entries_ = num_used_values;
}
#endif
} // namespace draco

View File

@@ -133,6 +133,12 @@ class PointAttribute : public GeometryAttribute {
return attribute_transform_data_.get();
}
#ifdef DRACO_TRANSCODER_SUPPORTED
// Removes unused values from the attribute. Value is unused when no point
// is mapped to the value. Only applicable when the mapping is not identity.
void RemoveUnusedValues();
#endif
private:
#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
template <typename T>

View File

@@ -43,9 +43,18 @@ bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) {
return false;
}
}
// Check that decoded number of attributes is valid.
if (num_attributes == 0) {
return false;
}
if (num_attributes > 5 * in_buffer->remaining_size()) {
// The decoded number of attributes is unreasonably high, because at least
// five bytes of attribute descriptor data per attribute are expected.
return false;
}
// Decode attribute descriptor data.
point_attribute_ids_.resize(num_attributes);
PointCloud *pc = point_cloud_;
for (uint32_t i = 0; i < num_attributes; ++i) {
@@ -69,9 +78,14 @@ bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) {
if (data_type == DT_INVALID || data_type >= DT_TYPES_COUNT) {
return false;
}
const DataType draco_dt = static_cast<DataType>(data_type);
// Add the attribute to the point cloud
// Check decoded attribute descriptor data.
if (num_components == 0) {
return false;
}
// Add the attribute to the point cloud.
const DataType draco_dt = static_cast<DataType>(data_type);
GeometryAttribute ga;
ga.Init(static_cast<GeometryAttribute::Type>(att_type), nullptr,
num_components, draco_dt, normalized > 0,
@@ -90,7 +104,9 @@ bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) {
} else
#endif
{
DecodeVarint(&unique_id, in_buffer);
if (!DecodeVarint(&unique_id, in_buffer)) {
return false;
}
ga.set_unique_id(unique_id);
}
const int att_id = pc->AddAttribute(

View File

@@ -15,14 +15,16 @@
#include "draco/compression/attributes/attributes_encoder.h"
#include "draco/core/varint_encoding.h"
#include "draco/draco_features.h"
namespace draco {
AttributesEncoder::AttributesEncoder()
: point_cloud_encoder_(nullptr), point_cloud_(nullptr) {}
AttributesEncoder::AttributesEncoder(int att_id) : AttributesEncoder() {
AddAttributeId(att_id);
AttributesEncoder::AttributesEncoder(int point_attrib_id)
: AttributesEncoder() {
AddAttributeId(point_attrib_id);
}
bool AttributesEncoder::Init(PointCloudEncoder *encoder, const PointCloud *pc) {
@@ -37,7 +39,15 @@ bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) {
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int32_t att_id = point_attribute_ids_[i];
const PointAttribute *const pa = point_cloud_->attribute(att_id);
out_buffer->Encode(static_cast<uint8_t>(pa->attribute_type()));
GeometryAttribute::Type type = pa->attribute_type();
#ifdef DRACO_TRANSCODER_SUPPORTED
// Attribute types TANGENT, MATERIAL, JOINTS, and WEIGHTS are not supported
// in the official bitstream. They will be encoded as GENERIC.
if (type > GeometryAttribute::GENERIC) {
type = GeometryAttribute::GENERIC;
}
#endif
out_buffer->Encode(static_cast<uint8_t>(type));
out_buffer->Encode(static_cast<uint8_t>(pa->data_type()));
out_buffer->Encode(static_cast<uint8_t>(pa->num_components()));
out_buffer->Encode(static_cast<uint8_t>(pa->normalized()));

View File

@@ -72,7 +72,7 @@ class PointAttributeVectorOutputIterator {
Self &operator*() { return *this; }
// Still needed in some cases.
// TODO(hemmer): remove.
// TODO(b/199760123): Remove.
// hardcoded to 3 based on legacy usage.
const Self &operator=(const VectorD<CoeffT, 3> &val) {
DRACO_DCHECK_EQ(attributes_.size(), 1); // Expect only ONE attribute.
@@ -278,8 +278,10 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
return false;
}
AttributeQuantizationTransform transform;
transform.SetParameters(quantization_bits, min_value.data(),
num_components, max_value_dif);
if (!transform.SetParameters(quantization_bits, min_value.data(),
num_components, max_value_dif)) {
return false;
}
const int num_transforms =
static_cast<int>(attribute_quantization_transforms_.size());
if (!transform.TransferToAttribute(
@@ -293,7 +295,9 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
// Decode transform data for signed integer attributes.
for (int i = 0; i < min_signed_values_.size(); ++i) {
int32_t val;
DecodeVarint(&val, in_buffer);
if (!DecodeVarint(&val, in_buffer)) {
return false;
}
min_signed_values_[i] = val;
}
return true;
@@ -353,8 +357,9 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
return false;
}
if (6 < compression_level) {
LOGE("KdTreeAttributesDecoder: compression level %i not supported.\n",
compression_level);
DRACO_LOGE(
"KdTreeAttributesDecoder: compression level %i not supported.\n",
compression_level);
return false;
}
@@ -371,7 +376,7 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
GetDecoder()->point_cloud()->attribute(att_id);
attr->Reset(num_points);
attr->SetIdentityMapping();
};
}
PointAttributeVectorOutputIterator<uint32_t> out_it(atts);

View File

@@ -71,16 +71,21 @@ bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
att->num_components(), range);
} else {
// Compute quantization settings from the attribute values.
attribute_quantization_transform.ComputeParameters(*att,
quantization_bits);
if (!attribute_quantization_transform.ComputeParameters(
*att, quantization_bits)) {
return false;
}
}
attribute_quantization_transforms_.push_back(
attribute_quantization_transform);
// Store the quantized attribute in an array that will be used when we do
// the actual encoding of the data.
quantized_portable_attributes_.push_back(
attribute_quantization_transform.GeneratePortableAttribute(
*att, static_cast<int>(num_points)));
auto portable_att =
attribute_quantization_transform.InitTransformedAttribute(*att,
num_points);
attribute_quantization_transform.TransformAttribute(*att, {},
portable_att.get());
quantized_portable_attributes_.push_back(std::move(portable_att));
} else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
// For signed types, find the minimum value for each component. These

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_CLOUD_KD_TREE_ATTRIBUTES_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINT_CLOUD_KD_TREE_ATTRIBUTES_ENCODER_H_
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/compression/attributes/attributes_encoder.h"
@@ -48,4 +48,4 @@ class KdTreeAttributesEncoder : public AttributesEncoder {
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_POINT_CLOUD_KD_TREE_ATTRIBUTES_ENCODER_H_
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_

View File

@@ -53,6 +53,7 @@ class OctahedronToolBox {
: quantization_bits_(-1),
max_quantized_value_(-1),
max_value_(-1),
dequantization_scale_(1.f),
center_value_(-1) {}
bool SetQuantizationBits(int32_t q) {
@@ -62,6 +63,7 @@ class OctahedronToolBox {
quantization_bits_ = q;
max_quantized_value_ = (1 << quantization_bits_) - 1;
max_value_ = max_quantized_value_ - 1;
dequantization_scale_ = 2.f / max_value_;
center_value_ = max_value_ / 2;
return true;
}
@@ -192,64 +194,11 @@ class OctahedronToolBox {
}
}
// TODO(b/149328891): Change function to not use templates as |T| is only
// float.
template <typename T>
void OctaherdalCoordsToUnitVector(T in_s, T in_t, T *out_vector) const {
DRACO_DCHECK_GE(in_s, 0);
DRACO_DCHECK_GE(in_t, 0);
DRACO_DCHECK_LE(in_s, 1);
DRACO_DCHECK_LE(in_t, 1);
T s = in_s;
T t = in_t;
T spt = s + t;
T smt = s - t;
T x_sign = 1.0;
if (spt >= 0.5 && spt <= 1.5 && smt >= -0.5 && smt <= 0.5) {
// Right hemisphere. Don't do anything.
} else {
// Left hemisphere.
x_sign = -1.0;
if (spt <= 0.5) {
s = 0.5 - in_t;
t = 0.5 - in_s;
} else if (spt >= 1.5) {
s = 1.5 - in_t;
t = 1.5 - in_s;
} else if (smt <= -0.5) {
s = in_t - 0.5;
t = in_s + 0.5;
} else {
s = in_t + 0.5;
t = in_s - 0.5;
}
spt = s + t;
smt = s - t;
}
const T y = 2.0 * s - 1.0;
const T z = 2.0 * t - 1.0;
const T x = std::min(std::min(2.0 * spt - 1.0, 3.0 - 2.0 * spt),
std::min(2.0 * smt + 1.0, 1.0 - 2.0 * smt)) *
x_sign;
// Normalize the computed vector.
const T normSquared = x * x + y * y + z * z;
if (normSquared < 1e-6) {
out_vector[0] = 0;
out_vector[1] = 0;
out_vector[2] = 0;
} else {
const T d = 1.0 / std::sqrt(normSquared);
out_vector[0] = x * d;
out_vector[1] = y * d;
out_vector[2] = z * d;
}
}
template <typename T>
void QuantizedOctaherdalCoordsToUnitVector(int32_t in_s, int32_t in_t,
T *out_vector) const {
T scale = 1.0 / static_cast<T>(max_value_);
OctaherdalCoordsToUnitVector(in_s * scale, in_t * scale, out_vector);
inline void QuantizedOctahedralCoordsToUnitVector(int32_t in_s, int32_t in_t,
float *out_vector) const {
OctahedralCoordsToUnitVector(in_s * dequantization_scale_ - 1.f,
in_t * dequantization_scale_ - 1.f,
out_vector);
}
// |s| and |t| are expected to be signed values.
@@ -333,9 +282,77 @@ class OctahedronToolBox {
int32_t center_value() const { return center_value_; }
private:
inline void OctahedralCoordsToUnitVector(float in_s_scaled, float in_t_scaled,
float *out_vector) const {
// Background about the encoding:
// A normal is encoded in a normalized space <s, t> depicted below. The
// encoding correponds to an octahedron that is unwrapped to a 2D plane.
// During encoding, a normal is projected to the surface of the octahedron
// and the projection is then unwrapped to the 2D plane. Decoding is the
// reverse of this process.
// All points in the central diamond are located on triangles on the
// right "hemisphere" of the octahedron while all points outside of the
// diamond are on the left hemisphere (basically, they would have to be
// wrapped along the diagonal edges to form the octahedron). The central
// point corresponds to the right most vertex of the octahedron and all
// corners of the plane correspond to the left most vertex of the
// octahedron.
//
// t
// ^ *-----*-----*
// | | /|\ |
// | / | \ |
// | / | \ |
// | / | \ |
// *-----*---- *
// | \ | / |
// | \ | / |
// | \ | / |
// | \|/ |
// *-----*-----* --> s
// Note that the input |in_s_scaled| and |in_t_scaled| are already scaled to
// <-1, 1> range. This way, the central point is at coordinate (0, 0).
float y = in_s_scaled;
float z = in_t_scaled;
// Remaining coordinate can be computed by projecting the (y, z) values onto
// the surface of the octahedron.
const float x = 1.f - std::abs(y) - std::abs(z);
// |x| is essentially a signed distance from the diagonal edges of the
// diamond shown on the figure above. It is positive for all points in the
// diamond (right hemisphere) and negative for all points outside the
// diamond (left hemisphere). For all points on the left hemisphere we need
// to update their (y, z) coordinates to account for the wrapping along
// the edges of the diamond.
float x_offset = -x;
x_offset = x_offset < 0 ? 0 : x_offset;
// This will do nothing for the points on the right hemisphere but it will
// mirror the (y, z) location along the nearest diagonal edge of the
// diamond.
y += y < 0 ? x_offset : -x_offset;
z += z < 0 ? x_offset : -x_offset;
// Normalize the computed vector.
const float norm_squared = x * x + y * y + z * z;
if (norm_squared < 1e-6) {
out_vector[0] = 0;
out_vector[1] = 0;
out_vector[2] = 0;
} else {
const float d = 1.0f / std::sqrt(norm_squared);
out_vector[0] = x * d;
out_vector[1] = y * d;
out_vector[2] = z * d;
}
}
int32_t quantization_bits_;
int32_t max_quantized_value_;
int32_t max_value_;
float dequantization_scale_;
int32_t center_value_;
};
} // namespace draco

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_PREDICTION_SCHEMES_PREDICTION_SCHEME_DATA_H_
#define DRACO_COMPRESSION_ATTRIBUTES_MESH_PREDICTION_SCHEMES_PREDICTION_SCHEME_DATA_H_
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_
#include "draco/mesh/corner_table.h"
#include "draco/mesh/mesh.h"

View File

@@ -69,7 +69,14 @@ class MeshPredictionSchemeGeometricNormalPredictorArea
// Computing cross product.
const VectorD<int64_t, 3> cross = CrossProduct(delta_next, delta_prev);
normal = normal + cross;
// Prevent signed integer overflows by doing math as unsigned.
auto normal_data = reinterpret_cast<uint64_t *>(normal.data());
auto cross_data = reinterpret_cast<const uint64_t *>(cross.data());
normal_data[0] = normal_data[0] + cross_data[0];
normal_data[1] = normal_data[1] + cross_data[1];
normal_data[2] = normal_data[2] + cross_data[2];
cit.Next();
}

View File

@@ -60,8 +60,13 @@ inline bool ComputeParallelogramPrediction(
const int v_next_off = vert_next * num_components;
const int v_prev_off = vert_prev * num_components;
for (int c = 0; c < num_components; ++c) {
out_prediction[c] = (in_data[v_next_off + c] + in_data[v_prev_off + c]) -
in_data[v_opp_off + c];
const int64_t in_data_next_off = in_data[v_next_off + c];
const int64_t in_data_prev_off = in_data[v_prev_off + c];
const int64_t in_data_opp_off = in_data[v_opp_off + c];
const int64_t result =
(in_data_next_off + in_data_prev_off) - in_data_opp_off;
out_prediction[c] = static_cast<DataTypeT>(result);
}
return true;
}

View File

@@ -156,6 +156,13 @@ bool MeshPredictionSchemeTexCoordsPortablePredictor<
const VectorD<int64_t, 2> x_uv =
n_uv * pn_norm2_squared + (cn_dot_pn * pn_uv);
const int64_t pn_absmax_element =
std::max(std::max(std::abs(pn[0]), std::abs(pn[1])), std::abs(pn[2]));
if (cn_dot_pn > std::numeric_limits<int64_t>::max() / pn_absmax_element) {
// return false if squared length calculation would overflow.
return false;
}
// Compute squared length of vector CX in position coordinate system:
const VectorD<int64_t, 3> x_pos =
next_pos + (cn_dot_pn * pn) / pn_norm2_squared;

View File

@@ -18,34 +18,51 @@ namespace draco {
PredictionSchemeMethod SelectPredictionMethod(
int att_id, const PointCloudEncoder *encoder) {
if (encoder->options()->GetSpeed() >= 10) {
return SelectPredictionMethod(att_id, *encoder->options(), encoder);
}
PredictionSchemeMethod SelectPredictionMethod(
int att_id, const EncoderOptions &options,
const PointCloudEncoder *encoder) {
if (options.GetSpeed() >= 10) {
// Selected fastest, though still doing some compression.
return PREDICTION_DIFFERENCE;
}
if (encoder->GetGeometryType() == TRIANGULAR_MESH) {
// Use speed setting to select the best encoding method.
const PointAttribute *const att = encoder->point_cloud()->attribute(att_id);
if (att->attribute_type() == GeometryAttribute::TEX_COORD) {
if (encoder->options()->GetSpeed() < 4) {
if (att->attribute_type() == GeometryAttribute::TEX_COORD &&
att->num_components() == 2) {
if (options.GetSpeed() < 4) {
// Use texture coordinate prediction for speeds 0, 1, 2, 3.
return MESH_PREDICTION_TEX_COORDS_PORTABLE;
}
}
if (att->attribute_type() == GeometryAttribute::NORMAL) {
#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
if (encoder->options()->GetSpeed() < 4) {
if (options.GetSpeed() < 4) {
// Use geometric normal prediction for speeds 0, 1, 2, 3.
return MESH_PREDICTION_GEOMETRIC_NORMAL;
// For this prediction, the position attribute needs to be either
// integer or quantized as well.
const int pos_att_id = encoder->point_cloud()->GetNamedAttributeId(
GeometryAttribute::POSITION);
const PointAttribute *const pos_att =
encoder->point_cloud()->GetNamedAttribute(
GeometryAttribute::POSITION);
if (pos_att && (IsDataTypeIntegral(pos_att->data_type()) ||
options.GetAttributeInt(pos_att_id, "quantization_bits",
-1) > 0)) {
return MESH_PREDICTION_GEOMETRIC_NORMAL;
}
}
#endif
return PREDICTION_DIFFERENCE; // default
}
// Handle other attribute types.
if (encoder->options()->GetSpeed() >= 8) {
if (options.GetSpeed() >= 8) {
return PREDICTION_DIFFERENCE;
}
if (encoder->options()->GetSpeed() >= 2 ||
encoder->point_cloud()->num_points() < 40) {
if (options.GetSpeed() >= 2 || encoder->point_cloud()->num_points() < 40) {
// Parallelogram prediction is used for speeds 2 - 7 or when the overhead
// of using constrained multi-parallelogram would be too high.
return MESH_PREDICTION_PARALLELOGRAM;

View File

@@ -38,6 +38,10 @@ namespace draco {
PredictionSchemeMethod SelectPredictionMethod(int att_id,
const PointCloudEncoder *encoder);
PredictionSchemeMethod SelectPredictionMethod(int att_id,
const EncoderOptions &options,
const PointCloudEncoder *encoder);
// Factory class for creating mesh prediction schemes.
template <typename DataTypeT>
struct MeshPredictionSchemeEncoderFactory {

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_INTERFACE_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_INTERFACE_H_
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
#include "draco/core/encoder_buffer.h"
@@ -52,4 +52,4 @@ class PredictionSchemeTypedEncoderInterface
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_INTERFACE_H_
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_

View File

@@ -36,9 +36,25 @@ class PredictionSchemeWrapDecodingTransform
inline void ComputeOriginalValue(const DataTypeT *predicted_vals,
const CorrTypeT *corr_vals,
DataTypeT *out_original_vals) const {
// For now we assume both |DataTypeT| and |CorrTypeT| are equal.
static_assert(std::is_same<DataTypeT, CorrTypeT>::value,
"Predictions and corrections must have the same type.");
// The only valid implementation right now is for int32_t.
static_assert(std::is_same<DataTypeT, int32_t>::value,
"Only int32_t is supported for predicted values.");
predicted_vals = this->ClampPredictedValue(predicted_vals);
// Perform the wrapping using unsigned coordinates to avoid potential signed
// integer overflows caused by malformed input.
const uint32_t *const uint_predicted_vals =
reinterpret_cast<const uint32_t *>(predicted_vals);
const uint32_t *const uint_corr_vals =
reinterpret_cast<const uint32_t *>(corr_vals);
for (int i = 0; i < this->num_components(); ++i) {
out_original_vals[i] = predicted_vals[i] + corr_vals[i];
out_original_vals[i] =
static_cast<DataTypeT>(uint_predicted_vals[i] + uint_corr_vals[i]);
if (out_original_vals[i] > this->max_value()) {
out_original_vals[i] -= this->max_dif();
} else if (out_original_vals[i] < this->min_value()) {

View File

@@ -73,7 +73,7 @@ class PredictionSchemeWrapTransformBase {
return &clamped_value_[0];
}
// TODO(hemmer): Consider refactoring to avoid this dummy.
// TODO(b/199760123): Consider refactoring to avoid this dummy.
int quantization_bits() const {
DRACO_DCHECK(false);
return -1;

View File

@@ -26,8 +26,8 @@ SequentialAttributeEncodersController::SequentialAttributeEncodersController(
: sequencer_(std::move(sequencer)) {}
SequentialAttributeEncodersController::SequentialAttributeEncodersController(
std::unique_ptr<PointsSequencer> sequencer, int att_id)
: AttributesEncoder(att_id), sequencer_(std::move(sequencer)) {}
std::unique_ptr<PointsSequencer> sequencer, int point_attrib_id)
: AttributesEncoder(point_attrib_id), sequencer_(std::move(sequencer)) {}
bool SequentialAttributeEncodersController::Init(PointCloudEncoder *encoder,
const PointCloud *pc) {

View File

@@ -53,6 +53,11 @@ bool SequentialIntegerAttributeDecoder::DecodeValues(
if (!in_buffer->Decode(&prediction_transform_type)) {
return false;
}
// Check that decoded prediction scheme transform type is valid.
if (prediction_transform_type < PREDICTION_TRANSFORM_NONE ||
prediction_transform_type >= NUM_PREDICTION_SCHEME_TRANSFORM_TYPES) {
return false;
}
prediction_scheme_ = CreateIntPredictionScheme(
static_cast<PredictionSchemeMethod>(prediction_scheme_method),
static_cast<PredictionSchemeTransformType>(prediction_transform_type));
@@ -143,8 +148,9 @@ bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
return false;
}
for (size_t i = 0; i < num_values; ++i) {
if (!in_buffer->Decode(portable_attribute_data + i, num_bytes))
if (!in_buffer->Decode(portable_attribute_data + i, num_bytes)) {
return false;
}
}
}
}
@@ -223,12 +229,13 @@ void SequentialIntegerAttributeDecoder::StoreTypedValues(uint32_t num_values) {
void SequentialIntegerAttributeDecoder::PreparePortableAttribute(
int num_entries, int num_components) {
GeometryAttribute va;
va.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32,
GeometryAttribute ga;
ga.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32,
false, num_components * DataTypeLength(DT_INT32), 0);
std::unique_ptr<PointAttribute> port_att(new PointAttribute(va));
std::unique_ptr<PointAttribute> port_att(new PointAttribute(ga));
port_att->SetIdentityMapping();
port_att->Reset(num_entries);
port_att->set_unique_id(attribute()->unique_id());
SetPortableAttribute(std::move(port_att));
}

View File

@@ -81,6 +81,9 @@ bool SequentialIntegerAttributeEncoder::TransformAttributeToPortableFormat(
value_to_value_map[orig_att->mapped_index(point_ids[i])] =
AttributeValueIndex(i);
}
if (portable_att->is_mapping_identity()) {
portable_att->SetExplicitMapping(encoder()->point_cloud()->num_points());
}
// Go over all points of the original attribute and update the mapping in
// the portable attribute.
for (PointIndex i(0); i < encoder()->point_cloud()->num_points(); ++i) {

View File

@@ -14,18 +14,17 @@
//
#include "draco/compression/attributes/sequential_normal_attribute_decoder.h"
#include "draco/attributes/attribute_octahedron_transform.h"
#include "draco/compression/attributes/normal_compression_utils.h"
namespace draco {
SequentialNormalAttributeDecoder::SequentialNormalAttributeDecoder()
: quantization_bits_(-1) {}
SequentialNormalAttributeDecoder::SequentialNormalAttributeDecoder() {}
bool SequentialNormalAttributeDecoder::Init(PointCloudDecoder *decoder,
int attribute_id) {
if (!SequentialIntegerAttributeDecoder::Init(decoder, attribute_id))
if (!SequentialIntegerAttributeDecoder::Init(decoder, attribute_id)) {
return false;
}
// Currently, this encoder works only for 3-component normal vectors.
if (attribute()->num_components() != 3) {
return false;
@@ -41,11 +40,13 @@ bool SequentialNormalAttributeDecoder::DecodeIntegerValues(
const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) {
uint8_t quantization_bits;
if (!in_buffer->Decode(&quantization_bits)) {
// Note: in older bitstreams, we do not have a PortableAttribute() decoded
// at this stage so we cannot pass it down to the DecodeParameters() call.
// It still works fine for octahedral transform because it does not need to
// use any data from the attribute.
if (!octahedral_transform_.DecodeParameters(*attribute(), in_buffer)) {
return false;
}
quantization_bits_ = quantization_bits;
}
#endif
return SequentialIntegerAttributeDecoder::DecodeIntegerValues(point_ids,
@@ -56,39 +57,20 @@ bool SequentialNormalAttributeDecoder::DecodeDataNeededByPortableTransform(
const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
if (decoder()->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 0)) {
// For newer file version, decode attribute transform data here.
uint8_t quantization_bits;
if (!in_buffer->Decode(&quantization_bits)) {
if (!octahedral_transform_.DecodeParameters(*GetPortableAttribute(),
in_buffer)) {
return false;
}
quantization_bits_ = quantization_bits;
}
// Store the decoded transform data in portable attribute.
AttributeOctahedronTransform octahedral_transform;
octahedral_transform.SetParameters(quantization_bits_);
return octahedral_transform.TransferToAttribute(portable_attribute());
return octahedral_transform_.TransferToAttribute(portable_attribute());
}
bool SequentialNormalAttributeDecoder::StoreValues(uint32_t num_points) {
// Convert all quantized values back to floats.
const int num_components = attribute()->num_components();
const int entry_size = sizeof(float) * num_components;
float att_val[3];
int quant_val_id = 0;
int out_byte_pos = 0;
const int32_t *const portable_attribute_data = GetPortableAttributeData();
OctahedronToolBox octahedron_tool_box;
if (!octahedron_tool_box.SetQuantizationBits(quantization_bits_))
return false;
for (uint32_t i = 0; i < num_points; ++i) {
const int32_t s = portable_attribute_data[quant_val_id++];
const int32_t t = portable_attribute_data[quant_val_id++];
octahedron_tool_box.QuantizedOctaherdalCoordsToUnitVector(s, t, att_val);
// Store the decoded floating point value into the attribute buffer.
attribute()->buffer()->Write(out_byte_pos, att_val, entry_size);
out_byte_pos += entry_size;
}
return true;
return octahedral_transform_.InverseTransformAttribute(
*GetPortableAttribute(), attribute());
}
} // namespace draco

View File

@@ -15,6 +15,7 @@
#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
#include "draco/attributes/attribute_octahedron_transform.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h"
@@ -42,7 +43,7 @@ class SequentialNormalAttributeDecoder
bool StoreValues(uint32_t num_points) override;
private:
int32_t quantization_bits_;
AttributeOctahedronTransform octahedral_transform_;
std::unique_ptr<PredictionSchemeTypedDecoderInterface<int32_t>>
CreateIntPredictionScheme(

View File

@@ -20,8 +20,9 @@ namespace draco {
bool SequentialNormalAttributeEncoder::Init(PointCloudEncoder *encoder,
int attribute_id) {
if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id))
if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id)) {
return false;
}
// Currently this encoder works only for 3-component normal vectors.
if (attribute()->num_components() != 3) {
return false;
@@ -44,9 +45,13 @@ bool SequentialNormalAttributeEncoder::EncodeDataNeededByPortableTransform(
bool SequentialNormalAttributeEncoder::PrepareValues(
const std::vector<PointIndex> &point_ids, int num_points) {
SetPortableAttribute(
attribute_octahedron_transform_.GeneratePortableAttribute(
*(attribute()), point_ids, num_points));
auto portable_att = attribute_octahedron_transform_.InitTransformedAttribute(
*(attribute()), point_ids.size());
if (!attribute_octahedron_transform_.TransformAttribute(
*(attribute()), point_ids, portable_att.get())) {
return false;
}
SetPortableAttribute(std::move(portable_att));
return true;
}

View File

@@ -14,13 +14,12 @@
//
#include "draco/compression/attributes/sequential_quantization_attribute_decoder.h"
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/core/quantization_utils.h"
namespace draco {
SequentialQuantizationAttributeDecoder::SequentialQuantizationAttributeDecoder()
: quantization_bits_(-1), max_value_dif_(0.f) {}
SequentialQuantizationAttributeDecoder::
SequentialQuantizationAttributeDecoder() {}
bool SequentialQuantizationAttributeDecoder::Init(PointCloudDecoder *decoder,
int attribute_id) {
@@ -59,62 +58,31 @@ bool SequentialQuantizationAttributeDecoder::
}
// Store the decoded transform data in portable attribute;
AttributeQuantizationTransform transform;
transform.SetParameters(quantization_bits_, min_value_.get(),
attribute()->num_components(), max_value_dif_);
return transform.TransferToAttribute(portable_attribute());
return quantization_transform_.TransferToAttribute(portable_attribute());
}
bool SequentialQuantizationAttributeDecoder::StoreValues(uint32_t num_values) {
return DequantizeValues(num_values);
bool SequentialQuantizationAttributeDecoder::StoreValues(uint32_t num_points) {
return DequantizeValues(num_points);
}
bool SequentialQuantizationAttributeDecoder::DecodeQuantizedDataInfo() {
const int num_components = attribute()->num_components();
min_value_ = std::unique_ptr<float[]>(new float[num_components]);
if (!decoder()->buffer()->Decode(min_value_.get(),
sizeof(float) * num_components)) {
return false;
// Get attribute used as source for decoding.
auto att = GetPortableAttribute();
if (att == nullptr) {
// This should happen only in the backward compatibility mode. It will still
// work fine for this case because the only thing the quantization transform
// cares about is the number of components that is the same for both source
// and target attributes.
att = attribute();
}
if (!decoder()->buffer()->Decode(&max_value_dif_)) {
return false;
}
uint8_t quantization_bits;
if (!decoder()->buffer()->Decode(&quantization_bits) ||
quantization_bits > 31) {
return false;
}
quantization_bits_ = quantization_bits;
return true;
return quantization_transform_.DecodeParameters(*att, decoder()->buffer());
}
bool SequentialQuantizationAttributeDecoder::DequantizeValues(
uint32_t num_values) {
// Convert all quantized values back to floats.
const int32_t max_quantized_value =
(1u << static_cast<uint32_t>(quantization_bits_)) - 1;
const int num_components = attribute()->num_components();
const int entry_size = sizeof(float) * num_components;
const std::unique_ptr<float[]> att_val(new float[num_components]);
int quant_val_id = 0;
int out_byte_pos = 0;
Dequantizer dequantizer;
if (!dequantizer.Init(max_value_dif_, max_quantized_value)) {
return false;
}
const int32_t *const portable_attribute_data = GetPortableAttributeData();
for (uint32_t i = 0; i < num_values; ++i) {
for (int c = 0; c < num_components; ++c) {
float value =
dequantizer.DequantizeFloat(portable_attribute_data[quant_val_id++]);
value = value + min_value_[c];
att_val[c] = value;
}
// Store the floating point value into the attribute buffer.
attribute()->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
out_byte_pos += entry_size;
}
return true;
return quantization_transform_.InverseTransformAttribute(
*GetPortableAttribute(), attribute());
}
} // namespace draco

View File

@@ -15,6 +15,7 @@
#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/compression/attributes/sequential_integer_attribute_decoder.h"
#include "draco/draco_features.h"
@@ -43,12 +44,7 @@ class SequentialQuantizationAttributeDecoder
virtual bool DequantizeValues(uint32_t num_values);
private:
// Max number of quantization bits used to encode each component of the
// attribute.
int32_t quantization_bits_;
std::unique_ptr<float[]> min_value_;
float max_value_dif_;
AttributeQuantizationTransform quantization_transform_;
};
} // namespace draco

View File

@@ -50,9 +50,11 @@ bool SequentialQuantizationAttributeEncoder::Init(PointCloudEncoder *encoder,
&quantization_origin[0]);
const float range = encoder->options()->GetAttributeFloat(
attribute_id, "quantization_range", 1.f);
attribute_quantization_transform_.SetParameters(
quantization_bits, quantization_origin.data(),
attribute->num_components(), range);
if (!attribute_quantization_transform_.SetParameters(
quantization_bits, quantization_origin.data(),
attribute->num_components(), range)) {
return false;
}
} else {
// Compute quantization settings from the attribute values.
if (!attribute_quantization_transform_.ComputeParameters(
@@ -70,9 +72,14 @@ bool SequentialQuantizationAttributeEncoder::
bool SequentialQuantizationAttributeEncoder::PrepareValues(
const std::vector<PointIndex> &point_ids, int num_points) {
SetPortableAttribute(
attribute_quantization_transform_.GeneratePortableAttribute(
*(attribute()), point_ids, num_points));
auto portable_attribute =
attribute_quantization_transform_.InitTransformedAttribute(
*attribute(), point_ids.size());
if (!attribute_quantization_transform_.TransformAttribute(
*(attribute()), point_ids, portable_attribute.get())) {
return false;
}
SetPortableAttribute(std::move(portable_attribute));
return true;
}

View File

@@ -42,6 +42,7 @@ enum EncodedGeometryType {
INVALID_GEOMETRY_TYPE = -1,
POINT_CLOUD = 0,
TRIANGULAR_MESH,
NUM_ENCODED_GEOMETRY_TYPES
};
// List of encoding methods for point clouds.
@@ -105,6 +106,8 @@ enum PredictionSchemeTransformType {
// Specialized transform for normal coordinates using canonicalized inverted
// tiles.
PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED = 3,
// The number of valid (non-negative) prediction scheme transform types.
NUM_PREDICTION_SCHEME_TRANSFORM_TYPES
};
// List of all mesh traversal methods supported by Draco framework.

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_SRC_DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_
#define DRACO_SRC_DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_
#ifndef DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_
#define DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_
#include <map>
#include <memory>
@@ -246,4 +246,4 @@ void DracoOptions<AttributeKeyT>::SetAttributeOptions(
} // namespace draco
#endif // DRACO_SRC_DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_
#endif // DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_

View File

@@ -56,7 +56,10 @@ StatusOr<EncodedGeometryType> Decoder::GetEncodedGeometryType(
DecoderBuffer *in_buffer) {
DecoderBuffer temp_buffer(*in_buffer);
DracoHeader header;
DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header));
if (header.encoder_type >= NUM_ENCODED_GEOMETRY_TYPES) {
return Status(Status::DRACO_ERROR, "Unsupported geometry type.");
}
return static_cast<EncodedGeometryType>(header.encoder_type);
}

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_SRC_DRACO_COMPRESSION_ENCODE_BASE_H_
#define DRACO_SRC_DRACO_COMPRESSION_ENCODE_BASE_H_
#ifndef DRACO_COMPRESSION_ENCODE_BASE_H_
#define DRACO_COMPRESSION_ENCODE_BASE_H_
#include "draco/attributes/geometry_attribute.h"
#include "draco/compression/config/compression_shared.h"
@@ -98,7 +98,7 @@ class EncoderBase {
"Invalid prediction scheme for attribute type.");
}
}
// TODO(hemmer): Try to enable more prediction schemes for normals.
// TODO(b/199760123): Try to enable more prediction schemes for normals.
if (att_type == GeometryAttribute::NORMAL) {
if (!(prediction_scheme == PREDICTION_DIFFERENCE ||
prediction_scheme == MESH_PREDICTION_GEOMETRIC_NORMAL)) {
@@ -128,4 +128,4 @@ void EncoderBase<EncoderOptionsT>::SetTrackEncodedProperties(bool flag) {
} // namespace draco
#endif // DRACO_SRC_DRACO_COMPRESSION_ENCODE_BASE_H_
#endif // DRACO_COMPRESSION_ENCODE_BASE_H_

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_CORE_ANS_H_
#define DRACO_CORE_ANS_H_
#ifndef DRACO_COMPRESSION_ENTROPY_ANS_H_
#define DRACO_COMPRESSION_ENTROPY_ANS_H_
// An implementation of Asymmetric Numeral Systems (rANS).
// See http://arxiv.org/abs/1311.2540v2 for more information on rANS.
// This file is based off libvpx's ans.h.
@@ -391,7 +391,6 @@ class RAnsEncoder {
ans_.buf[ans_.buf_offset++] = ans_.state % DRACO_ANS_IO_BASE;
ans_.state /= DRACO_ANS_IO_BASE;
}
// TODO(ostava): The division and multiplication should be optimized.
ans_.state =
(ans_.state / p) * rans_precision + ans_.state % p + sym->cum_prob;
}
@@ -524,4 +523,4 @@ class RAnsDecoder {
} // namespace draco
#endif // DRACO_CORE_ANS_H_
#endif // DRACO_COMPRESSION_ENTROPY_ANS_H_

Some files were not shown because too many files have changed in this diff Show More