Compare commits
84 Commits
multi_prev
...
decklink
Author | SHA1 | Date | |
---|---|---|---|
9d457c295a | |||
324bee2c3f | |||
d48c71b3b1 | |||
a7ff4bb5e5 | |||
3fd88e6843 | |||
6fdcc8c52d | |||
a088f81295 | |||
07c58d5218 | |||
5fea6ca52f | |||
a7c8327c8a | |||
31976ca624 | |||
55a38402ca | |||
ad2bf24a19 | |||
b8435f48f0 | |||
4a507a7653 | |||
12bb5bdabf | |||
e1963ddc30 | |||
7fe43f020c | |||
3c85accf84 | |||
8f97a72a41 | |||
3d01d6b620 | |||
79e274f1f2 | |||
40edd86c77 | |||
9a750eadcf | |||
18affe39a5 | |||
18829ee54a | |||
0ff79c87a4 | |||
054d871ac1 | |||
803e658d63 | |||
f5161c3137 | |||
9a31389aed | |||
74757522ba | |||
2f1f59cde1 | |||
d7f0c33b3d | |||
e5c461285f | |||
a90ee7828e | |||
47b0327de1 | |||
a2bebb0880 | |||
ef40f9164c | |||
309b4ad840 | |||
5b8f560e46 | |||
aefd3e5551 | |||
7455298f8c | |||
1be95bb0f4 | |||
9d18fd1188 | |||
c36f08d573 | |||
656733ab2a | |||
c6248cce73 | |||
622cbd9d7f | |||
c56384944d | |||
![]() |
c750ef6cf4 | ||
2658c67165 | |||
26b730b180 | |||
02c618fb94 | |||
6c7358a7e9 | |||
521edaaf75 | |||
b3df9f4407 | |||
cccb8f0b14 | |||
e936abdb3f | |||
cb21edde3e | |||
91db6eca47 | |||
2a41ec616f | |||
c1bfbcbbb5 | |||
0b7337ed19 | |||
c507e157c4 | |||
b64d76d8a2 | |||
fbc379b5cf | |||
edd8c874d8 | |||
94e1a67383 | |||
bd9904615b | |||
80f1ab3b62 | |||
50a139cf49 | |||
3dabaa5982 | |||
889573ba21 | |||
598854da76 | |||
d36f57c6ea | |||
3f02a17bba | |||
edc169d63e | |||
56283d0af8 | |||
f01f394982 | |||
ad75477f1c | |||
a2df27ff89 | |||
430b754f9a | |||
3b38dc96af |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -33,9 +33,3 @@ Desktop.ini
|
||||
/doc/python_api/sphinx-in-tmp/
|
||||
/doc/python_api/sphinx-in/
|
||||
/doc/python_api/sphinx-out/
|
||||
/doc/python_api/rst/bmesh.ops.rst
|
||||
/doc/python_api/rst/in_menu.png
|
||||
/doc/python_api/rst/menu_id.png
|
||||
/doc/python_api/rst/op_prop.png
|
||||
/doc/python_api/rst/run_script.png
|
||||
/doc/python_api/rst/spacebar.png
|
||||
|
4
.gitmodules
vendored
4
.gitmodules
vendored
@@ -10,7 +10,3 @@
|
||||
path = release/datafiles/locale
|
||||
url = ../blender-translations.git
|
||||
ignore = all
|
||||
[submodule "source/tools"]
|
||||
path = source/tools
|
||||
url = ../blender-dev-tools.git
|
||||
ignore = all
|
||||
|
1620
CMakeLists.txt
1620
CMakeLists.txt
File diff suppressed because it is too large
Load Diff
24
GNUmakefile
24
GNUmakefile
@@ -182,20 +182,14 @@ help: .FORCE
|
||||
@echo " * package_archive - build an archive package"
|
||||
@echo ""
|
||||
@echo "Testing Targets (not associated with building blender)"
|
||||
@echo " * test - run ctest, currently tests import/export,"
|
||||
@echo " operator execution and that python modules load"
|
||||
@echo " * test_cmake - runs our own cmake file checker"
|
||||
@echo " which detects errors in the cmake file list definitions"
|
||||
@echo " * test_pep8 - checks all python script are pep8"
|
||||
@echo " which are tagged to use the stricter formatting"
|
||||
@echo " * test - run ctest, currently tests import/export, operator execution and that python modules load"
|
||||
@echo " * test_cmake - runs our own cmake file checker which detects errors in the cmake file list definitions"
|
||||
@echo " * test_pep8 - checks all python script are pep8 which are tagged to use the stricter formatting"
|
||||
@echo " * test_deprecated - checks for deprecation tags in our code which may need to be removed"
|
||||
@echo " * test_style_c - checks C/C++ conforms with blenders style guide:"
|
||||
@echo " http://wiki.blender.org/index.php/Dev:Doc/CodeStyle"
|
||||
@echo " * test_style_c - checks C/C++ conforms with blenders style guide: http://wiki.blender.org/index.php/Dev:Doc/CodeStyle"
|
||||
@echo " * test_style_c_qtc - same as test_style but outputs QtCreator tasks format"
|
||||
@echo " * test_style_osl - checks OpenShadingLanguage conforms with blenders style guide:"
|
||||
@echo " http://wiki.blender.org/index.php/Dev:Doc/CodeStyle"
|
||||
@echo " * test_style_osl_qtc - checks OpenShadingLanguage conforms with blenders style guide:"
|
||||
@echo " http://wiki.blender.org/index.php/Dev:Doc/CodeStyle"
|
||||
@echo " * test_style_osl - checks OpenShadingLanguage conforms with blenders style guide: http://wiki.blender.org/index.php/Dev:Doc/CodeStyle"
|
||||
@echo " * test_style_osl_qtc - checks OpenShadingLanguage conforms with blenders style guide: http://wiki.blender.org/index.php/Dev:Doc/CodeStyle"
|
||||
@echo ""
|
||||
@echo "Static Source Code Checking (not associated with building blender)"
|
||||
@echo " * check_cppcheck - run blender source through cppcheck (C & C++)"
|
||||
@@ -411,8 +405,7 @@ update: .FORCE
|
||||
|
||||
# Simple version of ./doc/python_api/sphinx_doc_gen.sh with no PDF generation.
|
||||
doc_py: .FORCE
|
||||
"$(BUILD_DIR)/bin/blender" --background -noaudio --factory-startup \
|
||||
--python doc/python_api/sphinx_doc_gen.py
|
||||
"$(BUILD_DIR)/bin/blender" --background -noaudio --factory-startup --python doc/python_api/sphinx_doc_gen.py
|
||||
cd doc/python_api ; sphinx-build -b html sphinx-in sphinx-out
|
||||
@echo "docs written into: '$(BLENDER_DIR)/doc/python_api/sphinx-out/contents.html'"
|
||||
|
||||
@@ -421,8 +414,7 @@ doc_doxy: .FORCE
|
||||
@echo "docs written into: '$(BLENDER_DIR)/doc/doxygen/html/index.html'"
|
||||
|
||||
doc_dna: .FORCE
|
||||
"$(BUILD_DIR)/bin/blender" --background -noaudio --factory-startup \
|
||||
--python doc/blender_file_format/BlendFileDnaExporter_25.py
|
||||
"$(BUILD_DIR)/bin/blender" --background -noaudio --factory-startup --python doc/blender_file_format/BlendFileDnaExporter_25.py
|
||||
@echo "docs written into: '$(BLENDER_DIR)/doc/blender_file_format/dna.html'"
|
||||
|
||||
doc_man: .FORCE
|
||||
|
@@ -25,18 +25,17 @@
|
||||
ARGS=$( \
|
||||
getopt \
|
||||
-o s:i:t:h \
|
||||
--long source:,install:,tmp:,info:,threads:,help,show-deps,no-sudo,no-build,no-confirm,use-cxx11,\
|
||||
with-all,with-opencollada,\
|
||||
--long source:,install:,tmp:,info:,threads:,help,show-deps,no-sudo,no-confirm,with-all,with-opencollada,\
|
||||
ver-ocio:,ver-oiio:,ver-llvm:,ver-osl:,ver-osd:,ver-openvdb:,\
|
||||
force-all,force-python,force-numpy,force-boost,\
|
||||
force-ocio,force-openexr,force-oiio,force-llvm,force-osl,force-osd,force-openvdb,\
|
||||
force-ffmpeg,force-opencollada,force-alembic,\
|
||||
force-ffmpeg,force-opencollada,\
|
||||
build-all,build-python,build-numpy,build-boost,\
|
||||
build-ocio,build-openexr,build-oiio,build-llvm,build-osl,build-osd,build-openvdb,\
|
||||
build-ffmpeg,build-opencollada,build-alembic,\
|
||||
build-ffmpeg,build-opencollada,\
|
||||
skip-python,skip-numpy,skip-boost,\
|
||||
skip-ocio,skip-openexr,skip-oiio,skip-llvm,skip-osl,skip-osd,skip-openvdb,\
|
||||
skip-ffmpeg,skip-opencollada,skip-alembic \
|
||||
skip-ffmpeg,skip-opencollada \
|
||||
-- "$@" \
|
||||
)
|
||||
|
||||
@@ -98,17 +97,9 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
|
||||
--no-sudo
|
||||
Disable use of sudo (this script won't be able to do much though, will just print needed packages...).
|
||||
|
||||
--no-build
|
||||
Do not build (compile) anything, dependencies not installable with the package manager will remain missing.
|
||||
|
||||
--no-confirm
|
||||
Disable any interaction with user (suitable for automated run).
|
||||
|
||||
--use-cxx11
|
||||
Build all libraries in cpp11 'mode' (will be mandatory soon in blender2.8 branch).
|
||||
NOTE: If your compiler is gcc-6.0 or above, you probably *want* to enable this option (since it's default
|
||||
standard starting from this version).
|
||||
|
||||
--with-all
|
||||
By default, a number of optional and not-so-often needed libraries are not installed.
|
||||
This option will try to install them, at the cost of potential conflicts (depending on
|
||||
@@ -173,9 +164,6 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
|
||||
--build-openvdb
|
||||
Force the build of OpenVDB.
|
||||
|
||||
--build-alembic
|
||||
Force the build of Alembic.
|
||||
|
||||
--build-opencollada
|
||||
Force the build of OpenCOLLADA.
|
||||
|
||||
@@ -225,9 +213,6 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
|
||||
--force-openvdb
|
||||
Force the rebuild of OpenVDB.
|
||||
|
||||
--force-alembic
|
||||
Force the rebuild of Alembic.
|
||||
|
||||
--force-opencollada
|
||||
Force the rebuild of OpenCOLLADA.
|
||||
|
||||
@@ -270,9 +255,6 @@ ARGUMENTS_INFO="\"COMMAND LINE ARGUMENTS:
|
||||
--skip-openvdb
|
||||
Unconditionally skip OpenVDB installation/building.
|
||||
|
||||
--skip-alembic
|
||||
Unconditionally skip Alembic installation/building.
|
||||
|
||||
--skip-opencollada
|
||||
Unconditionally skip OpenCOLLADA installation/building.
|
||||
|
||||
@@ -285,11 +267,9 @@ DO_SHOW_DEPS=false
|
||||
|
||||
SUDO="sudo"
|
||||
|
||||
NO_BUILD=false
|
||||
NO_CONFIRM=false
|
||||
USE_CXX11=false
|
||||
|
||||
PYTHON_VERSION="3.5.2"
|
||||
PYTHON_VERSION="3.5.1"
|
||||
PYTHON_VERSION_MIN="3.5"
|
||||
PYTHON_FORCE_BUILD=false
|
||||
PYTHON_FORCE_REBUILD=false
|
||||
@@ -322,7 +302,7 @@ OPENEXR_FORCE_REBUILD=false
|
||||
OPENEXR_SKIP=false
|
||||
_with_built_openexr=false
|
||||
|
||||
OIIO_VERSION="1.7.8"
|
||||
OIIO_VERSION="1.6.9"
|
||||
OIIO_VERSION_MIN="1.6.0"
|
||||
OIIO_VERSION_MAX="1.9.0" # UNKNOWN currently # Not supported by current OSL...
|
||||
OIIO_FORCE_BUILD=false
|
||||
@@ -337,14 +317,14 @@ LLVM_FORCE_REBUILD=false
|
||||
LLVM_SKIP=false
|
||||
|
||||
# OSL needs to be compiled for now!
|
||||
OSL_VERSION="1.7.5"
|
||||
OSL_VERSION="1.7.1"
|
||||
OSL_VERSION_MIN=$OSL_VERSION
|
||||
OSL_FORCE_BUILD=false
|
||||
OSL_FORCE_REBUILD=false
|
||||
OSL_SKIP=false
|
||||
|
||||
# OpenSubdiv needs to be compiled for now
|
||||
OSD_VERSION="3.1.0"
|
||||
OSD_VERSION="3.0.2"
|
||||
OSD_VERSION_MIN=$OSD_VERSION
|
||||
OSD_FORCE_BUILD=false
|
||||
OSD_FORCE_REBUILD=false
|
||||
@@ -359,20 +339,13 @@ OPENVDB_FORCE_BUILD=false
|
||||
OPENVDB_FORCE_REBUILD=false
|
||||
OPENVDB_SKIP=false
|
||||
|
||||
# Alembic needs to be compiled for now
|
||||
ALEMBIC_VERSION="1.6.0"
|
||||
ALEMBIC_VERSION_MIN=$ALEMBIC_VERSION
|
||||
ALEMBIC_FORCE_BUILD=false
|
||||
ALEMBIC_FORCE_REBUILD=false
|
||||
ALEMBIC_SKIP=false
|
||||
|
||||
# Version??
|
||||
OPENCOLLADA_VERSION="1.3"
|
||||
OPENCOLLADA_FORCE_BUILD=false
|
||||
OPENCOLLADA_FORCE_REBUILD=false
|
||||
OPENCOLLADA_SKIP=false
|
||||
|
||||
FFMPEG_VERSION="3.2.1"
|
||||
FFMPEG_VERSION="2.8.4"
|
||||
FFMPEG_VERSION_MIN="2.8.4"
|
||||
FFMPEG_FORCE_BUILD=false
|
||||
FFMPEG_FORCE_REBUILD=false
|
||||
@@ -490,18 +463,9 @@ while true; do
|
||||
PRINT ""
|
||||
SUDO=""; shift; continue
|
||||
;;
|
||||
--no-build)
|
||||
PRINT ""
|
||||
WARNING "--no-build enabled, this script will not be able to install all dependencies..."
|
||||
PRINT ""
|
||||
NO_BUILD=true; shift; continue
|
||||
;;
|
||||
--no-confirm)
|
||||
NO_CONFIRM=true; shift; continue
|
||||
;;
|
||||
--use-cxx11)
|
||||
USE_CXX11=true; shift; continue
|
||||
;;
|
||||
--with-all)
|
||||
WITH_ALL=true; shift; continue
|
||||
;;
|
||||
@@ -551,7 +515,6 @@ while true; do
|
||||
OPENVDB_FORCE_BUILD=true
|
||||
OPENCOLLADA_FORCE_BUILD=true
|
||||
FFMPEG_FORCE_BUILD=true
|
||||
ALEMBIC_FORCE_BUILD=true
|
||||
shift; continue
|
||||
;;
|
||||
--build-python)
|
||||
@@ -594,9 +557,6 @@ while true; do
|
||||
--build-ffmpeg)
|
||||
FFMPEG_FORCE_BUILD=true; shift; continue
|
||||
;;
|
||||
--build-alembic)
|
||||
ALEMBIC_FORCE_BUILD=true; shift; continue
|
||||
;;
|
||||
--force-all)
|
||||
PYTHON_FORCE_REBUILD=true
|
||||
NUMPY_FORCE_REBUILD=true
|
||||
@@ -610,7 +570,6 @@ while true; do
|
||||
OPENVDB_FORCE_REBUILD=true
|
||||
OPENCOLLADA_FORCE_REBUILD=true
|
||||
FFMPEG_FORCE_REBUILD=true
|
||||
ALEMBIC_FORCE_REBUILD=true
|
||||
shift; continue
|
||||
;;
|
||||
--force-python)
|
||||
@@ -651,9 +610,6 @@ while true; do
|
||||
--force-ffmpeg)
|
||||
FFMPEG_FORCE_REBUILD=true; shift; continue
|
||||
;;
|
||||
--force-alembic)
|
||||
ALEMBIC_FORCE_REBUILD=true; shift; continue
|
||||
;;
|
||||
--skip-python)
|
||||
PYTHON_SKIP=true; shift; continue
|
||||
;;
|
||||
@@ -690,9 +646,6 @@ while true; do
|
||||
--skip-ffmpeg)
|
||||
FFMPEG_SKIP=true; shift; continue
|
||||
;;
|
||||
--skip-alembic)
|
||||
ALEMBIC_SKIP=true; shift; continue
|
||||
;;
|
||||
--)
|
||||
# no more arguments to parse
|
||||
break
|
||||
@@ -713,21 +666,6 @@ if [ "$WITH_ALL" = true -a "$OPENCOLLADA_SKIP" = false ]; then
|
||||
fi
|
||||
|
||||
|
||||
WARNING "****WARNING****"
|
||||
PRINT "If you are experiencing issues building Blender, _*TRY A FRESH, CLEAN BUILD FIRST*_!"
|
||||
PRINT "The same goes for install_deps itself, if you encounter issues, please first erase everything in $SRC and $INST"
|
||||
PRINT "(provided obviously you did not add anything yourself in those dirs!), and run install_deps.sh again!"
|
||||
PRINT "Often, changes in the libs built by this script, or in your distro package, cannot be handled simply, so..."
|
||||
PRINT ""
|
||||
PRINT "You may also try to use the '--build-foo' options to bypass your distribution's packages"
|
||||
PRINT "for some troublesome/buggy libraries..."
|
||||
PRINT ""
|
||||
PRINT ""
|
||||
PRINT "Ran with:"
|
||||
PRINT " install_deps.sh $COMMANDLINE"
|
||||
PRINT ""
|
||||
PRINT ""
|
||||
|
||||
|
||||
# This has to be done here, because user might force some versions...
|
||||
PYTHON_SOURCE=( "https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz" )
|
||||
@@ -735,7 +673,7 @@ NUMPY_SOURCE=( "http://sourceforge.net/projects/numpy/files/NumPy/$NUMPY_VERSION
|
||||
|
||||
_boost_version_nodots=`echo "$BOOST_VERSION" | sed -r 's/\./_/g'`
|
||||
BOOST_SOURCE=( "http://sourceforge.net/projects/boost/files/boost/$BOOST_VERSION/boost_$_boost_version_nodots.tar.bz2/download" )
|
||||
BOOST_BUILD_MODULES="--with-system --with-filesystem --with-thread --with-regex --with-locale --with-date_time --with-wave --with-iostreams --with-python --with-program_options"
|
||||
BOOST_BUILD_MODULES="--with-system --with-filesystem --with-thread --with-regex --with-locale --with-date_time --with-wave --with-iostreams"
|
||||
|
||||
OCIO_SOURCE=( "https://github.com/imageworks/OpenColorIO/tarball/v$OCIO_VERSION" )
|
||||
|
||||
@@ -764,7 +702,7 @@ OSL_SOURCE_REPO=( "https://github.com/Nazg-Gul/OpenShadingLanguage.git" )
|
||||
OSL_SOURCE_REPO_UID="7d40ff5fe8e47b030042afb92d0e955f5aa96f48"
|
||||
OSL_SOURCE_REPO_BRANCH="blender-fixes"
|
||||
|
||||
OSD_USE_REPO=false
|
||||
OSD_USE_REPO=true
|
||||
# Script foo to make the version string compliant with the archive name:
|
||||
# ${Varname//SearchForThisChar/ReplaceWithThisChar}
|
||||
OSD_SOURCE=( "https://github.com/PixarAnimationStudios/OpenSubdiv/archive/v${OSD_VERSION//./_}.tar.gz" )
|
||||
@@ -779,32 +717,13 @@ OPENVDB_SOURCE=( "https://github.com/dreamworksanimation/openvdb/archive/v${OPEN
|
||||
#~ OPENVDB_SOURCE_REPO_UID="404659fffa659da075d1c9416e4fc939139a84ee"
|
||||
#~ OPENVDB_SOURCE_REPO_BRANCH="dev"
|
||||
|
||||
ALEMBIC_USE_REPO=false
|
||||
ALEMBIC_SOURCE=( "https://github.com/alembic/alembic/archive/${ALEMBIC_VERSION}.tar.gz" )
|
||||
# ALEMBIC_SOURCE_REPO=( "https://github.com/alembic/alembic.git" )
|
||||
# ALEMBIC_SOURCE_REPO_UID="e6c90d4faa32c4550adeaaf3f556dad4b73a92bb"
|
||||
# ALEMBIC_SOURCE_REPO_BRANCH="master"
|
||||
|
||||
OPENCOLLADA_SOURCE=( "https://github.com/KhronosGroup/OpenCOLLADA.git" )
|
||||
OPENCOLLADA_REPO_UID="3335ac164e68b2512a40914b14c74db260e6ff7d"
|
||||
OPENCOLLADA_REPO_BRANCH="master"
|
||||
|
||||
FFMPEG_SOURCE=( "http://ffmpeg.org/releases/ffmpeg-$FFMPEG_VERSION.tar.bz2" )
|
||||
|
||||
CXXFLAGS_BACK=$CXXFLAGS
|
||||
if [ "$USE_CXX11" = true ]; then
|
||||
WARNING "You are trying to use c++11, this *should* go smoothely with any very recent distribution
|
||||
However, if you are experiencing linking errors (also when building Blender itself), please try the following:
|
||||
* Re-run this script with `--build-all --force-all` options.
|
||||
* Ensure your gcc version is at the very least 4.8, if possible you should really rather use gcc-5.1 or above.
|
||||
|
||||
Please note that until the transition to C++11-built libraries if completed in your distribution, situation will
|
||||
remain fuzzy and incompatibilities may happen..."
|
||||
PRINT ""
|
||||
PRINT ""
|
||||
CXXFLAGS="$CXXFLAGS -std=c++11"
|
||||
export CXXFLAGS
|
||||
fi
|
||||
|
||||
#### Show Dependencies ####
|
||||
|
||||
@@ -817,7 +736,7 @@ Those libraries should be available as packages in all recent distributions (opt
|
||||
* libjpeg, libpng, libtiff, [libopenjpeg], [libopenal].
|
||||
* libx11, libxcursor, libxi, libxrandr, libxinerama (and other libx... as needed).
|
||||
* libsqlite3, libbz2, libssl, libfftw3, libxml2, libtinyxml, yasm, libyaml-cpp.
|
||||
* libsdl1.2, libglew, [libglewmx].\""
|
||||
* libsdl1.2, libglew, libglewmx.\""
|
||||
|
||||
DEPS_SPECIFIC_INFO="\"BUILDABLE DEPENDENCIES:
|
||||
|
||||
@@ -838,8 +757,7 @@ You may also want to build them yourself (optional ones are [between brackets]):
|
||||
* [OpenShadingLanguage $OSL_VERSION_MIN] (from $OSL_SOURCE_REPO, branch $OSL_SOURCE_REPO_BRANCH, commit $OSL_SOURCE_REPO_UID).
|
||||
* [OpenSubDiv $OSD_VERSION_MIN] (from $OSD_SOURCE_REPO, branch $OSD_SOURCE_REPO_BRANCH, commit $OSD_SOURCE_REPO_UID).
|
||||
* [OpenVDB $OPENVDB_VERSION_MIN] (from $OPENVDB_SOURCE), [Blosc $OPENVDB_BLOSC_VERSION] (from $OPENVDB_BLOSC_SOURCE).
|
||||
* [OpenCollada] (from $OPENCOLLADA_SOURCE, branch $OPENCOLLADA_REPO_BRANCH, commit $OPENCOLLADA_REPO_UID).
|
||||
* [Alembic $ALEMBIC_VERSION] (from $ALEMBIC_SOURCE).\""
|
||||
* [OpenCollada] (from $OPENCOLLADA_SOURCE, branch $OPENCOLLADA_REPO_BRANCH, commit $OPENCOLLADA_REPO_UID).\""
|
||||
|
||||
if [ "$DO_SHOW_DEPS" = true ]; then
|
||||
PRINT ""
|
||||
@@ -991,7 +909,7 @@ prepare_opt() {
|
||||
|
||||
# Check whether the current package needs to be recompiled, based on a dummy file containing a magic number in its name...
|
||||
magic_compile_check() {
|
||||
if [ -f $INST/.$1-magiccheck-$2-$USE_CXX11 ]; then
|
||||
if [ -f $INST/.$1-magiccheck-$2 ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
@@ -1000,7 +918,7 @@ magic_compile_check() {
|
||||
|
||||
magic_compile_set() {
|
||||
rm -f $INST/.$1-magiccheck-*
|
||||
touch $INST/.$1-magiccheck-$2-$USE_CXX11
|
||||
touch $INST/.$1-magiccheck-$2
|
||||
}
|
||||
|
||||
# Note: should clean nicely in $INST, but not in $SRC, when we switch to a new version of a lib...
|
||||
@@ -1049,11 +967,6 @@ clean_Python() {
|
||||
}
|
||||
|
||||
compile_Python() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, Python will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
py_magic=1
|
||||
_init_python
|
||||
@@ -1119,11 +1032,6 @@ clean_Numpy() {
|
||||
}
|
||||
|
||||
compile_Numpy() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, Numpy will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
numpy_magic=0
|
||||
_init_numpy
|
||||
@@ -1184,13 +1092,8 @@ clean_Boost() {
|
||||
}
|
||||
|
||||
compile_Boost() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, Boost will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
boost_magic=11
|
||||
boost_magic=10
|
||||
|
||||
_init_boost
|
||||
|
||||
@@ -1262,11 +1165,6 @@ clean_OCIO() {
|
||||
}
|
||||
|
||||
compile_OCIO() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, OpenColorIO will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
ocio_magic=1
|
||||
_init_ocio
|
||||
@@ -1358,11 +1256,6 @@ clean_ILMBASE() {
|
||||
}
|
||||
|
||||
compile_ILMBASE() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, ILMBase will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
ilmbase_magic=10
|
||||
_init_ilmbase
|
||||
@@ -1450,11 +1343,6 @@ clean_OPENEXR() {
|
||||
}
|
||||
|
||||
compile_OPENEXR() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, OpenEXR will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
openexr_magic=14
|
||||
|
||||
@@ -1570,11 +1458,6 @@ clean_OIIO() {
|
||||
}
|
||||
|
||||
compile_OIIO() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, OpenImageIO will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
oiio_magic=16
|
||||
_init_oiio
|
||||
@@ -1660,10 +1543,6 @@ compile_OIIO() {
|
||||
# fi
|
||||
cmake_d="$cmake_d -D USE_OCIO=OFF"
|
||||
|
||||
if [ "$USE_CXX11" = true ]; then
|
||||
cmake_d="$cmake_d -D OIIO_BUILD_CPP11=ON"
|
||||
fi
|
||||
|
||||
if file /bin/cp | grep -q '32-bit'; then
|
||||
cflags="-fPIC -m32 -march=i686"
|
||||
else
|
||||
@@ -1710,11 +1589,6 @@ clean_LLVM() {
|
||||
}
|
||||
|
||||
compile_LLVM() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, LLVM will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
llvm_magic=3
|
||||
_init_llvm
|
||||
@@ -1812,13 +1686,8 @@ clean_OSL() {
|
||||
}
|
||||
|
||||
compile_OSL() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, OpenShadingLanguage will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
osl_magic=21
|
||||
osl_magic=20
|
||||
_init_osl
|
||||
|
||||
# Clean install if needed!
|
||||
@@ -1875,9 +1744,6 @@ compile_OSL() {
|
||||
cmake_d="$cmake_d -D OSL_BUILD_PLUGINS=OFF"
|
||||
cmake_d="$cmake_d -D OSL_BUILD_TESTS=OFF"
|
||||
cmake_d="$cmake_d -D USE_SIMD=sse2"
|
||||
if [ "$USE_CXX11" = true ]; then
|
||||
cmake_d="$cmake_d -D OSL_BUILD_CPP11=1"
|
||||
fi
|
||||
|
||||
#~ cmake_d="$cmake_d -D ILMBASE_VERSION=$ILMBASE_VERSION"
|
||||
|
||||
@@ -1946,13 +1812,8 @@ clean_OSD() {
|
||||
}
|
||||
|
||||
compile_OSD() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, OpenSubdiv will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
osd_magic=2
|
||||
osd_magic=1
|
||||
_init_osd
|
||||
|
||||
# Clean install if needed!
|
||||
@@ -2043,11 +1904,6 @@ clean_BLOSC() {
|
||||
}
|
||||
|
||||
compile_BLOSC() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, Blosc will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
blosc_magic=0
|
||||
_init_blosc
|
||||
@@ -2130,11 +1986,6 @@ clean_OPENVDB() {
|
||||
}
|
||||
|
||||
compile_OPENVDB() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, OpenVDB will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
compile_BLOSC
|
||||
PRINT ""
|
||||
|
||||
@@ -2217,102 +2068,6 @@ compile_OPENVDB() {
|
||||
run_ldconfig "openvdb"
|
||||
}
|
||||
|
||||
#### Build Alembic ####
|
||||
_init_alembic() {
|
||||
_src=$SRC/alembic-$ALEMBIC_VERSION
|
||||
_git=false
|
||||
_inst=$INST/alembic-$ALEMBIC_VERSION
|
||||
_inst_shortcut=$INST/alembic
|
||||
}
|
||||
|
||||
clean_ALEMBIC() {
|
||||
_init_alembic
|
||||
_clean
|
||||
}
|
||||
|
||||
compile_ALEMBIC() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, Alembic will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
compile_HDF5
|
||||
PRINT ""
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
alembic_magic=2
|
||||
_init_alembic
|
||||
|
||||
# Clean install if needed!
|
||||
magic_compile_check alembic-$ALEMBIC_VERSION $alembic_magic
|
||||
if [ $? -eq 1 -o "$ALEMBIC_FORCE_REBUILD" = true ]; then
|
||||
clean_ALEMBIC
|
||||
fi
|
||||
|
||||
if [ ! -d $_inst ]; then
|
||||
INFO "Building Alembic-$ALEMBIC_VERSION"
|
||||
|
||||
prepare_opt
|
||||
|
||||
if [ ! -d $_src -o true ]; then
|
||||
mkdir -p $SRC
|
||||
download ALEMBIC_SOURCE[@] "$_src.tar.gz"
|
||||
|
||||
INFO "Unpacking Alembic-$ALEMBIC_VERSION"
|
||||
tar -C $SRC -xf $_src.tar.gz
|
||||
fi
|
||||
|
||||
cd $_src
|
||||
|
||||
cmake_d="-D CMAKE_INSTALL_PREFIX=$_inst"
|
||||
|
||||
if [ -d $INST/boost ]; then
|
||||
cmake_d="$cmake_d -D BOOST_ROOT=$INST/boost"
|
||||
cmake_d="$cmake_d -D USE_STATIC_BOOST=ON"
|
||||
else
|
||||
cmake_d="$cmake_d -D USE_STATIC_BOOST=OFF"
|
||||
fi
|
||||
|
||||
if [ "$_with_built_openexr" = true ]; then
|
||||
cmake_d="$cmake_d -D ILMBASE_ROOT=$INST/openexr"
|
||||
cmake_d="$cmake_d -D USE_ARNOLD=OFF"
|
||||
cmake_d="$cmake_d -D USE_BINARIES=OFF"
|
||||
cmake_d="$cmake_d -D USE_EXAMPLES=OFF"
|
||||
cmake_d="$cmake_d -D USE_HDF5=OFF"
|
||||
cmake_d="$cmake_d -D USE_MAYA=OFF"
|
||||
cmake_d="$cmake_d -D USE_PRMAN=OFF"
|
||||
cmake_d="$cmake_d -D USE_PYALEMBIC=OFF"
|
||||
cmake_d="$cmake_d -D USE_STATIC_HDF5=OFF"
|
||||
cmake_d="$cmake_d -D ALEMBIC_ILMBASE_LINK_STATIC=OFF"
|
||||
cmake_d="$cmake_d -D ALEMBIC_SHARED_LIBS=OFF"
|
||||
cmake_d="$cmake_d -D ALEMBIC_LIB_USES_BOOST=ON"
|
||||
cmake_d="$cmake_d -D ALEMBIC_LIB_USES_TR1=OFF"
|
||||
INFO "ILMBASE_ROOT=$INST/openexr"
|
||||
fi
|
||||
|
||||
cmake $cmake_d ./
|
||||
make -j$THREADS install
|
||||
make clean
|
||||
|
||||
if [ -d $_inst ]; then
|
||||
_create_inst_shortcut
|
||||
else
|
||||
ERROR "Alembic-$ALEMBIC_VERSION failed to compile, exiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
magic_compile_set alembic-$ALEMBIC_VERSION $alembic_magic
|
||||
|
||||
cd $CWD
|
||||
INFO "Done compiling Alembic-$ALEMBIC_VERSION!"
|
||||
else
|
||||
INFO "Own Alembic-$ALEMBIC_VERSION is up to date, nothing to do!"
|
||||
INFO "If you want to force rebuild of this lib, use the --force-alembic option."
|
||||
fi
|
||||
|
||||
run_ldconfig "alembic"
|
||||
}
|
||||
|
||||
#### Build OpenCOLLADA ####
|
||||
_init_opencollada() {
|
||||
_src=$SRC/OpenCOLLADA-$OPENCOLLADA_VERSION
|
||||
@@ -2327,11 +2082,6 @@ clean_OpenCOLLADA() {
|
||||
}
|
||||
|
||||
compile_OpenCOLLADA() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, OpenCOLLADA will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled results!
|
||||
opencollada_magic=9
|
||||
_init_opencollada
|
||||
@@ -2411,11 +2161,6 @@ clean_FFmpeg() {
|
||||
}
|
||||
|
||||
compile_FFmpeg() {
|
||||
if [ "$NO_BUILD" = true ]; then
|
||||
WARNING "--no-build enabled, ffmpeg will not be compiled!"
|
||||
return
|
||||
fi
|
||||
|
||||
# To be changed each time we make edits that would modify the compiled result!
|
||||
ffmpeg_magic=5
|
||||
_init_ffmpeg
|
||||
@@ -2480,7 +2225,7 @@ compile_FFmpeg() {
|
||||
--enable-avfilter --disable-vdpau \
|
||||
--disable-bzlib --disable-libgsm --disable-libspeex \
|
||||
--enable-pthreads --enable-zlib --enable-stripping --enable-runtime-cpudetect \
|
||||
--disable-vaapi --disable-nonfree --enable-gpl \
|
||||
--disable-vaapi --disable-libfaac --disable-nonfree --enable-gpl \
|
||||
--disable-postproc --disable-librtmp --disable-libopencore-amrnb \
|
||||
--disable-libopencore-amrwb --disable-libdc1394 --disable-version3 --disable-outdev=sdl \
|
||||
--disable-libxcb \
|
||||
@@ -2607,9 +2352,8 @@ install_DEB() {
|
||||
git libfreetype6-dev libx11-dev flex bison libtbb-dev libxxf86vm-dev \
|
||||
libxcursor-dev libxi-dev wget libsqlite3-dev libxrandr-dev libxinerama-dev \
|
||||
libbz2-dev libncurses5-dev libssl-dev liblzma-dev libreadline-dev $OPENJPEG_DEV \
|
||||
libopenal-dev libglew-dev yasm $THEORA_DEV $VORBIS_DEV $OGG_DEV \
|
||||
libopenal-dev libglew-dev libglewmx-dev yasm $THEORA_DEV $VORBIS_DEV $OGG_DEV \
|
||||
libsdl1.2-dev libfftw3-dev patch bzip2 libxml2-dev libtinyxml-dev libjemalloc-dev"
|
||||
# libglewmx-dev (broken in deb testing currently...)
|
||||
|
||||
OPENJPEG_USE=true
|
||||
VORBIS_USE=true
|
||||
@@ -2922,17 +2666,6 @@ install_DEB() {
|
||||
fi
|
||||
fi
|
||||
|
||||
PRINT ""
|
||||
if [ "$ALEMBIC_SKIP" = true ]; then
|
||||
WARNING "Skipping Alembic installation, as requested..."
|
||||
elif [ "$ALEMBIC_FORCE_BUILD" = true ]; then
|
||||
INFO "Forced Alembic building, as requested..."
|
||||
compile_ALEMBIC
|
||||
else
|
||||
# No package currently, only HDF5!
|
||||
compile_ALEMBIC
|
||||
fi
|
||||
|
||||
|
||||
if [ "$WITH_OPENCOLLADA" = true ]; then
|
||||
_do_compile_collada=false
|
||||
@@ -3470,17 +3203,6 @@ install_RPM() {
|
||||
compile_OPENVDB
|
||||
fi
|
||||
|
||||
PRINT ""
|
||||
if [ "$ALEMBIC_SKIP" = true ]; then
|
||||
WARNING "Skipping Alembic installation, as requested..."
|
||||
elif [ "$ALEMBIC_FORCE_BUILD" = true ]; then
|
||||
INFO "Forced Alembic building, as requested..."
|
||||
compile_ALEMBIC
|
||||
else
|
||||
# No package currently!
|
||||
compile_ALEMBIC
|
||||
fi
|
||||
|
||||
|
||||
if [ "$WITH_OPENCOLLADA" = true ]; then
|
||||
PRINT ""
|
||||
@@ -3891,16 +3613,6 @@ install_ARCH() {
|
||||
fi
|
||||
fi
|
||||
|
||||
PRINT ""
|
||||
if [ "$ALEMBIC_SKIP" = true ]; then
|
||||
WARNING "Skipping Alembic installation, as requested..."
|
||||
elif [ "$ALEMBIC_FORCE_BUILD" = true ]; then
|
||||
INFO "Forced Alembic building, as requested..."
|
||||
compile_ALEMBIC
|
||||
else
|
||||
compile_ALEMBIC
|
||||
fi
|
||||
|
||||
|
||||
if [ "$WITH_OPENCOLLADA" = true ]; then
|
||||
PRINT ""
|
||||
@@ -4043,6 +3755,9 @@ install_OTHER() {
|
||||
fi
|
||||
|
||||
if [ "$_do_compile_llvm" = true ]; then
|
||||
install_packages_DEB libffi-dev
|
||||
# LLVM can't find the debian ffi header dir
|
||||
_FFI_INCLUDE_DIR=`dpkg -L libffi-dev | grep -e ".*/ffi.h" | sed -r 's/(.*)\/ffi.h/\1/'`
|
||||
PRINT ""
|
||||
compile_LLVM
|
||||
have_llvm=true
|
||||
@@ -4061,6 +3776,7 @@ install_OTHER() {
|
||||
|
||||
if [ "$_do_compile_osl" = true ]; then
|
||||
if [ "$have_llvm" = true ]; then
|
||||
install_packages_DEB flex bison libtbb-dev
|
||||
PRINT ""
|
||||
compile_OSL
|
||||
else
|
||||
@@ -4079,6 +3795,7 @@ install_OTHER() {
|
||||
fi
|
||||
|
||||
if [ "$_do_compile_osd" = true ]; then
|
||||
install_packages_DEB flex bison libtbb-dev
|
||||
PRINT ""
|
||||
compile_OSD
|
||||
fi
|
||||
@@ -4095,6 +3812,10 @@ install_OTHER() {
|
||||
fi
|
||||
|
||||
if [ "$_do_compile_collada" = true ]; then
|
||||
install_packages_DEB libpcre3-dev
|
||||
# Find path to libxml shared lib...
|
||||
_XML2_LIB=`dpkg -L libxml2-dev | grep -e ".*/libxml2.so"`
|
||||
# No package
|
||||
PRINT ""
|
||||
compile_OpenCOLLADA
|
||||
fi
|
||||
@@ -4179,6 +3900,16 @@ print_info_ffmpeglink() {
|
||||
}
|
||||
|
||||
print_info() {
|
||||
PRINT ""
|
||||
PRINT ""
|
||||
WARNING "****WARNING****"
|
||||
PRINT "If you are experiencing issues building Blender, _*TRY A FRESH, CLEAN BUILD FIRST*_!"
|
||||
PRINT "The same goes for install_deps itself, if you encounter issues, please first erase everything in $SRC and $INST"
|
||||
PRINT "(provided obviously you did not add anything yourself in those dirs!), and run install_deps.sh again!"
|
||||
PRINT "Often, changes in the libs built by this script, or in your distro package, cannot be handled simply, so..."
|
||||
PRINT ""
|
||||
PRINT "You may also try to use the '--build-foo' options to bypass your distribution's packages"
|
||||
PRINT "for some troublesome/buggy libraries..."
|
||||
PRINT ""
|
||||
PRINT ""
|
||||
PRINT "Ran with:"
|
||||
@@ -4189,13 +3920,7 @@ print_info() {
|
||||
|
||||
_buildargs="-U *SNDFILE* -U *PYTHON* -U *BOOST* -U *Boost*"
|
||||
_buildargs="$_buildargs -U *OPENCOLORIO* -U *OPENEXR* -U *OPENIMAGEIO* -U *LLVM* -U *CYCLES*"
|
||||
_buildargs="$_buildargs -U *OPENSUBDIV* -U *OPENVDB* -U *COLLADA* -U *FFMPEG* -U *ALEMBIC*"
|
||||
|
||||
if [ "$USE_CXX11" = true ]; then
|
||||
_1="-D WITH_CXX11=ON"
|
||||
PRINT " $_1"
|
||||
_buildargs="$_buildargs $_1"
|
||||
fi
|
||||
_buildargs="$_buildargs -U *OPENSUBDIV* -U *OPENVDB* -U *COLLADA* -U *FFMPEG*"
|
||||
|
||||
_1="-D WITH_CODEC_SNDFILE=ON"
|
||||
PRINT " $_1"
|
||||
@@ -4301,17 +4026,6 @@ print_info() {
|
||||
_buildargs="$_buildargs $_1"
|
||||
fi
|
||||
|
||||
if [ "$ALEMBIC_SKIP" = false ]; then
|
||||
_1="-D WITH_ALEMBIC=ON"
|
||||
PRINT " $_1"
|
||||
_buildargs="$_buildargs $_1"
|
||||
if [ -d $INST/alembic ]; then
|
||||
_1="-D ALEMBIC_ROOT_DIR=$INST/alembic"
|
||||
PRINT " $_1"
|
||||
_buildargs="$_buildargs $_1"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$NO_SYSTEM_GLEW" = true ]; then
|
||||
_1="-D WITH_SYSTEM_GLEW=OFF"
|
||||
PRINT " $_1"
|
||||
@@ -4360,6 +4074,3 @@ PRINT ""
|
||||
# Switch back to user language.
|
||||
LANG=LANG_BACK
|
||||
export LANG
|
||||
|
||||
CXXFLAGS=$CXXFLAGS_BACK
|
||||
export CXXFLAGS
|
||||
|
@@ -94,7 +94,6 @@ all_repositories = {
|
||||
r'git://git.blender.org/blender-translations.git': 'blender-translations',
|
||||
r'git://git.blender.org/blender-addons.git': 'blender-addons',
|
||||
r'git://git.blender.org/blender-addons-contrib.git': 'blender-addons-contrib',
|
||||
r'git://git.blender.org/blender-dev-tools.git': 'blender-dev-tools',
|
||||
r'https://svn.blender.org/svnroot/bf-blender/': 'lib svn',
|
||||
}
|
||||
|
||||
@@ -129,7 +128,6 @@ def schedule_force_build(name):
|
||||
forcesched.CodebaseParameter(hide=True, codebase="blender-translations"),
|
||||
forcesched.CodebaseParameter(hide=True, codebase="blender-addons"),
|
||||
forcesched.CodebaseParameter(hide=True, codebase="blender-addons-contrib"),
|
||||
forcesched.CodebaseParameter(hide=True, codebase="blender-dev-tools"),
|
||||
forcesched.CodebaseParameter(hide=True, codebase="lib svn")],
|
||||
properties=[]))
|
||||
|
||||
@@ -145,7 +143,6 @@ def schedule_build(name, hour, minute=0):
|
||||
"blender-translations": {"repository": "", "branch": "master"},
|
||||
"blender-addons": {"repository": "", "branch": "master"},
|
||||
"blender-addons-contrib": {"repository": "", "branch": "master"},
|
||||
"blender-dev-tools": {"repository": "", "branch": "master"},
|
||||
"lib svn": {"repository": "", "branch": "trunk"}},
|
||||
branch=current_branch,
|
||||
builderNames=[name],
|
||||
@@ -267,8 +264,7 @@ def generic_builder(id, libdir='', branch='', rsync=False):
|
||||
|
||||
for submodule in ('blender-translations',
|
||||
'blender-addons',
|
||||
'blender-addons-contrib',
|
||||
'blender-dev-tools'):
|
||||
'blender-addons-contrib'):
|
||||
f.addStep(git_submodule_step(submodule))
|
||||
|
||||
f.addStep(git_step(branch))
|
||||
@@ -289,7 +285,7 @@ def generic_builder(id, libdir='', branch='', rsync=False):
|
||||
maxsize=150 * 1024 * 1024,
|
||||
workdir='install'))
|
||||
f.addStep(MasterShellCommand(name='unpack',
|
||||
command=['python2.7', unpack_script, filename],
|
||||
command=['python', unpack_script, filename],
|
||||
description='unpacking',
|
||||
descriptionDone='unpacked'))
|
||||
return f
|
||||
@@ -297,14 +293,13 @@ def generic_builder(id, libdir='', branch='', rsync=False):
|
||||
# Builders
|
||||
|
||||
add_builder(c, 'mac_x86_64_10_6_cmake', 'darwin-9.x.universal', generic_builder, hour=5)
|
||||
# add_builder(c, 'linux_glibc211_i686_cmake', '', generic_builder, hour=1)
|
||||
# add_builder(c, 'linux_glibc211_x86_64_cmake', '', generic_builder, hour=2)
|
||||
add_builder(c, 'linux_glibc211_i686_cmake', '', generic_builder, hour=1)
|
||||
add_builder(c, 'linux_glibc211_x86_64_cmake', '', generic_builder, hour=2)
|
||||
add_builder(c, 'linux_glibc219_i686_cmake', '', generic_builder, hour=3)
|
||||
add_builder(c, 'linux_glibc219_x86_64_cmake', '', generic_builder, hour=4)
|
||||
add_builder(c, 'win32_cmake_vc2013', 'windows_vc12', generic_builder, hour=3)
|
||||
add_builder(c, 'win64_cmake_vc2013', 'win64_vc12', generic_builder, hour=4)
|
||||
add_builder(c, 'win32_cmake_vc2015', 'windows_vc14', generic_builder, hour=5)
|
||||
add_builder(c, 'win64_cmake_vc2015', 'win64_vc14', generic_builder, hour=6)
|
||||
add_builder(c, 'win64_cmake_vc2015', 'win64_vc14', generic_builder, hour=5)
|
||||
|
||||
# STATUS TARGETS
|
||||
#
|
||||
|
@@ -72,28 +72,21 @@ if 'cmake' in builder:
|
||||
# Set up OSX architecture
|
||||
if builder.endswith('x86_64_10_6_cmake'):
|
||||
cmake_extra_options.append('-DCMAKE_OSX_ARCHITECTURES:STRING=x86_64')
|
||||
cmake_extra_options.append('-DCUDA_NVCC_EXECUTABLE=/usr/local/cuda8-hack/bin/nvcc')
|
||||
cmake_extra_options.append('-DWITH_CODEC_QUICKTIME=OFF')
|
||||
cmake_extra_options.append('-DCMAKE_OSX_DEPLOYMENT_TARGET=10.6')
|
||||
build_cubins = False
|
||||
|
||||
cmake_extra_options.append('-DCUDA_NVCC_EXECUTABLE=/usr/local/cuda-hack/bin/nvcc')
|
||||
|
||||
elif builder.startswith('win'):
|
||||
if builder.endswith('_vc2015'):
|
||||
if builder.startswith('win64'):
|
||||
cmake_options.extend(['-G', 'Visual Studio 14 2015 Win64'])
|
||||
elif builder.startswith('win32'):
|
||||
bits = 32
|
||||
cmake_options.extend(['-G', 'Visual Studio 14 2015'])
|
||||
cmake_extra_options.append('-DCUDA_NVCC_FLAGS=--cl-version;2013;' +
|
||||
'--compiler-bindir;C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\bin')
|
||||
else:
|
||||
if builder.startswith('win64'):
|
||||
cmake_options.extend(['-G', 'Visual Studio 12 2013 Win64'])
|
||||
elif builder.startswith('win32'):
|
||||
bits = 32
|
||||
cmake_options.extend(['-G', 'Visual Studio 12 2013'])
|
||||
cmake_extra_options.append('-DCUDA_NVCC_EXECUTABLE:FILEPATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v8.0/bin/nvcc.exe')
|
||||
if builder.endswith('_vc2015'):
|
||||
if builder.startswith('win64'):
|
||||
cmake_options.extend(['-G', 'Visual Studio 14 2015 Win64', '-DWITH_CYCLES_CUDA_BINARIES=0'])
|
||||
elif builder.startswith('win32'):
|
||||
bits = 32
|
||||
cmake_options.extend(['-G', 'Visual Studio 14 2015', '-DWITH_CYCLES_CUDA_BINARIES=0'])
|
||||
else:
|
||||
if builder.startswith('win64'):
|
||||
cmake_options.extend(['-G', 'Visual Studio 12 2013 Win64'])
|
||||
elif builder.startswith('win32'):
|
||||
bits = 32
|
||||
cmake_options.extend(['-G', 'Visual Studio 12 2013'])
|
||||
|
||||
elif builder.startswith('linux'):
|
||||
tokens = builder.split("_")
|
||||
@@ -113,13 +106,10 @@ if 'cmake' in builder:
|
||||
cuda_chroot_name = 'buildbot_' + deb_name + '_x86_64'
|
||||
targets = ['player', 'blender', 'cuda']
|
||||
|
||||
cmake_extra_options.append('-DCUDA_NVCC_EXECUTABLE=/usr/local/cuda-8.0/bin/nvcc')
|
||||
|
||||
cmake_options.append("-C" + os.path.join(blender_dir, cmake_config_file))
|
||||
|
||||
# Prepare CMake options needed to configure cuda binaries compilation.
|
||||
cuda_cmake_options.append("-DWITH_CYCLES_CUDA_BINARIES=%s" % ('ON' if build_cubins else 'OFF'))
|
||||
cuda_cmake_options.append("-DCYCLES_CUDA_BINARIES_ARCH=sm_20;sm_21;sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61")
|
||||
if build_cubins or 'cuda' in targets:
|
||||
if bits == 32:
|
||||
cuda_cmake_options.append("-DCUDA_64_BIT_DEVICE_CODE=OFF")
|
||||
@@ -183,8 +173,10 @@ if 'cmake' in builder:
|
||||
print('Condifuration FAILED!')
|
||||
sys.exit(retcode)
|
||||
|
||||
if 'win32' in builder or 'win64' in builder:
|
||||
command = ['cmake', '--build', '.', '--target', target_name, '--config', 'Release']
|
||||
if 'win32' in builder:
|
||||
command = ['msbuild', 'INSTALL.vcxproj', '/Property:PlatformToolset=v120_xp', '/p:Configuration=Release']
|
||||
elif 'win64' in builder:
|
||||
command = ['msbuild', 'INSTALL.vcxproj', '/p:Configuration=Release']
|
||||
else:
|
||||
command = target_chroot_prefix + ['make', '-s', '-j2', target_name]
|
||||
|
||||
|
@@ -108,8 +108,6 @@ if builder.find('cmake') != -1:
|
||||
platform += 'i386'
|
||||
elif builder.endswith('ppc_10_6_cmake'):
|
||||
platform += 'ppc'
|
||||
if builder.endswith('vc2015'):
|
||||
platform += "-vc14"
|
||||
builderified_name = 'blender-{}-{}-{}'.format(blender_full_version, git_hash, platform)
|
||||
if branch != '':
|
||||
builderified_name = branch + "-" + builderified_name
|
||||
|
@@ -1,70 +0,0 @@
|
||||
# - Find Alembic library
|
||||
# Find the native Alembic includes and libraries
|
||||
# This module defines
|
||||
# ALEMBIC_INCLUDE_DIRS, where to find Alembic headers, Set when
|
||||
# ALEMBIC_INCLUDE_DIR is found.
|
||||
# ALEMBIC_LIBRARIES, libraries to link against to use Alembic.
|
||||
# ALEMBIC_ROOT_DIR, The base directory to search for Alembic.
|
||||
# This can also be an environment variable.
|
||||
# ALEMBIC_FOUND, If false, do not try to use Alembic.
|
||||
#
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2016 Blender Foundation.
|
||||
#
|
||||
# Distributed under the OSI-approved BSD License (the "License");
|
||||
# see accompanying file Copyright.txt for details.
|
||||
#
|
||||
# This software is distributed WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the License for more information.
|
||||
#=============================================================================
|
||||
|
||||
# If ALEMBIC_ROOT_DIR was defined in the environment, use it.
|
||||
IF(NOT ALEMBIC_ROOT_DIR AND NOT $ENV{ALEMBIC_ROOT_DIR} STREQUAL "")
|
||||
SET(ALEMBIC_ROOT_DIR $ENV{ALEMBIC_ROOT_DIR})
|
||||
ENDIF()
|
||||
|
||||
SET(_alembic_SEARCH_DIRS
|
||||
${ALEMBIC_ROOT_DIR}
|
||||
/usr/local
|
||||
/sw # Fink
|
||||
/opt/local # DarwinPorts
|
||||
/opt/csw # Blastwave
|
||||
/opt/lib/alembic
|
||||
)
|
||||
|
||||
FIND_PATH(ALEMBIC_INCLUDE_DIR
|
||||
NAMES
|
||||
Alembic/Abc/All.h
|
||||
HINTS
|
||||
${_alembic_SEARCH_DIRS}
|
||||
PATH_SUFFIXES
|
||||
include
|
||||
)
|
||||
|
||||
FIND_LIBRARY(ALEMBIC_LIBRARY
|
||||
NAMES
|
||||
Alembic
|
||||
HINTS
|
||||
${_alembic_SEARCH_DIRS}
|
||||
PATH_SUFFIXES
|
||||
lib64 lib lib/static
|
||||
)
|
||||
|
||||
# handle the QUIETLY and REQUIRED arguments and set ALEMBIC_FOUND to TRUE if
|
||||
# all listed variables are TRUE
|
||||
INCLUDE(FindPackageHandleStandardArgs)
|
||||
FIND_PACKAGE_HANDLE_STANDARD_ARGS(ALEMBIC DEFAULT_MSG ALEMBIC_LIBRARY ALEMBIC_INCLUDE_DIR)
|
||||
|
||||
IF(ALEMBIC_FOUND)
|
||||
SET(ALEMBIC_LIBRARIES ${ALEMBIC_LIBRARY})
|
||||
SET(ALEMBIC_INCLUDE_DIRS ${ALEMBIC_INCLUDE_DIR})
|
||||
ENDIF(ALEMBIC_FOUND)
|
||||
|
||||
MARK_AS_ADVANCED(
|
||||
ALEMBIC_INCLUDE_DIR
|
||||
ALEMBIC_LIBRARY
|
||||
)
|
||||
|
||||
UNSET(_alembic_SEARCH_DIRS)
|
@@ -1,69 +0,0 @@
|
||||
# - Find HDF5 library
|
||||
# Find the native HDF5 includes and libraries
|
||||
# This module defines
|
||||
# HDF5_INCLUDE_DIRS, where to find hdf5.h, Set when HDF5_INCLUDE_DIR is found.
|
||||
# HDF5_LIBRARIES, libraries to link against to use HDF5.
|
||||
# HDF5_ROOT_DIR, The base directory to search for HDF5.
|
||||
# This can also be an environment variable.
|
||||
# HDF5_FOUND, If false, do not try to use HDF5.
|
||||
#
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2016 Blender Foundation.
|
||||
#
|
||||
# Distributed under the OSI-approved BSD License (the "License");
|
||||
# see accompanying file Copyright.txt for details.
|
||||
#
|
||||
# This software is distributed WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the License for more information.
|
||||
#=============================================================================
|
||||
|
||||
# If HDF5_ROOT_DIR was defined in the environment, use it.
|
||||
IF(NOT HDF5_ROOT_DIR AND NOT $ENV{HDF5_ROOT_DIR} STREQUAL "")
|
||||
SET(HDF5_ROOT_DIR $ENV{HDF5_ROOT_DIR})
|
||||
ENDIF()
|
||||
|
||||
SET(_hdf5_SEARCH_DIRS
|
||||
${HDF5_ROOT_DIR}
|
||||
/usr/local
|
||||
/sw # Fink
|
||||
/opt/local # DarwinPorts
|
||||
/opt/csw # Blastwave
|
||||
/opt/lib/hdf5
|
||||
)
|
||||
|
||||
FIND_LIBRARY(HDF5_LIBRARY
|
||||
NAMES
|
||||
hdf5
|
||||
HINTS
|
||||
${_hdf5_SEARCH_DIRS}
|
||||
PATH_SUFFIXES
|
||||
lib64 lib
|
||||
)
|
||||
|
||||
FIND_PATH(HDF5_INCLUDE_DIR
|
||||
NAMES
|
||||
hdf5.h
|
||||
HINTS
|
||||
${_hdf5_SEARCH_DIRS}
|
||||
PATH_SUFFIXES
|
||||
include
|
||||
)
|
||||
|
||||
# handle the QUIETLY and REQUIRED arguments and set HDF5_FOUND to TRUE if
|
||||
# all listed variables are TRUE
|
||||
INCLUDE(FindPackageHandleStandardArgs)
|
||||
FIND_PACKAGE_HANDLE_STANDARD_ARGS(HDF5 DEFAULT_MSG HDF5_LIBRARY HDF5_INCLUDE_DIR)
|
||||
|
||||
IF(HDF5_FOUND)
|
||||
SET(HDF5_LIBRARIES ${HDF5_LIBRARY})
|
||||
SET(HDF5_INCLUDE_DIRS ${HDF5_INCLUDE_DIR})
|
||||
ENDIF(HDF5_FOUND)
|
||||
|
||||
MARK_AS_ADVANCED(
|
||||
HDF5_INCLUDE_DIR
|
||||
HDF5_LIBRARY
|
||||
)
|
||||
|
||||
UNSET(_hdf5_SEARCH_DIRS)
|
@@ -1,15 +1,15 @@
|
||||
# - Find JACK library
|
||||
# Find the native JACK includes and library
|
||||
# - Find Jack library
|
||||
# Find the native Jack includes and library
|
||||
# This module defines
|
||||
# JACK_INCLUDE_DIRS, where to find jack.h, Set when
|
||||
# JACK_INCLUDE_DIR is found.
|
||||
# JACK_LIBRARIES, libraries to link against to use JACK.
|
||||
# JACK_ROOT_DIR, The base directory to search for JACK.
|
||||
# JACK_LIBRARIES, libraries to link against to use Jack.
|
||||
# JACK_ROOT_DIR, The base directory to search for Jack.
|
||||
# This can also be an environment variable.
|
||||
# JACK_FOUND, If false, do not try to use JACK.
|
||||
# JACK_FOUND, If false, do not try to use Jack.
|
||||
#
|
||||
# also defined, but not for general use are
|
||||
# JACK_LIBRARY, where to find the JACK library.
|
||||
# JACK_LIBRARY, where to find the Jack library.
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2011 Blender Foundation.
|
||||
|
@@ -23,7 +23,6 @@ macro(BLENDER_SRC_GTEST_EX NAME SRC EXTRA_LIBS DO_ADD_TEST)
|
||||
${CMAKE_SOURCE_DIR}/extern/glog/src
|
||||
${CMAKE_SOURCE_DIR}/extern/gflags/src
|
||||
${CMAKE_SOURCE_DIR}/extern/gtest/include
|
||||
${CMAKE_SOURCE_DIR}/extern/gmock/include
|
||||
)
|
||||
unset(_current_include_directories)
|
||||
|
||||
@@ -34,7 +33,6 @@ macro(BLENDER_SRC_GTEST_EX NAME SRC EXTRA_LIBS DO_ADD_TEST)
|
||||
bf_testing_main
|
||||
bf_intern_guardedalloc
|
||||
extern_gtest
|
||||
extern_gmock
|
||||
# needed for glog
|
||||
${PTHREADS_LIBRARIES}
|
||||
extern_glog
|
||||
|
@@ -27,12 +27,13 @@ if(EXISTS ${SOURCE_DIR}/.git)
|
||||
OUTPUT_VARIABLE MY_WC_HASH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
execute_process(COMMAND git branch --list master blender-v* --contains ${MY_WC_HASH}
|
||||
execute_process(COMMAND git branch --list master --contains ${MY_WC_HASH}
|
||||
WORKING_DIRECTORY ${SOURCE_DIR}
|
||||
OUTPUT_VARIABLE _git_contains_check
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(NOT _git_contains_check STREQUAL "")
|
||||
STRING(REGEX REPLACE "^[ \t]+" "" _git_contains_check "${_git_contains_check}")
|
||||
if(_git_contains_check STREQUAL "master")
|
||||
set(MY_WC_BRANCH "master")
|
||||
else()
|
||||
execute_process(COMMAND git show-ref --tags -d
|
||||
@@ -47,22 +48,6 @@ if(EXISTS ${SOURCE_DIR}/.git)
|
||||
|
||||
if(_git_tag_hashes MATCHES "${_git_head_hash}")
|
||||
set(MY_WC_BRANCH "master")
|
||||
else()
|
||||
execute_process(COMMAND git branch --contains ${MY_WC_HASH}
|
||||
WORKING_DIRECTORY ${SOURCE_DIR}
|
||||
OUTPUT_VARIABLE _git_contains_branches
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
string(REGEX REPLACE "^\\*[ \t]+" "" _git_contains_branches "${_git_contains_branches}")
|
||||
string(REGEX REPLACE "[\r\n]+" ";" _git_contains_branches "${_git_contains_branches}")
|
||||
string(REGEX REPLACE ";[ \t]+" ";" _git_contains_branches "${_git_contains_branches}")
|
||||
foreach(_branch ${_git_contains_branches})
|
||||
if (NOT "${_branch}" MATCHES "\\(HEAD.*")
|
||||
set(MY_WC_BRANCH "${_branch}")
|
||||
break()
|
||||
endif()
|
||||
endforeach()
|
||||
unset(_branch)
|
||||
unset(_git_contains_branches)
|
||||
endif()
|
||||
|
||||
unset(_git_tag_hashes)
|
||||
|
@@ -239,8 +239,8 @@ def file_check_arg_sizes(tu):
|
||||
if 0:
|
||||
print("---",
|
||||
" <~> ".join(
|
||||
[" ".join([t.spelling for t in C.get_tokens()])
|
||||
for C in node.get_children()]
|
||||
[" ".join([t.spelling for t in C.get_tokens()])
|
||||
for C in node.get_children()]
|
||||
))
|
||||
# print(node.location)
|
||||
|
||||
|
@@ -29,11 +29,11 @@ if not sys.version.startswith("3"):
|
||||
sys.exit(1)
|
||||
|
||||
from cmake_consistency_check_config import (
|
||||
IGNORE,
|
||||
UTF8_CHECK,
|
||||
SOURCE_DIR,
|
||||
BUILD_DIR,
|
||||
)
|
||||
IGNORE,
|
||||
UTF8_CHECK,
|
||||
SOURCE_DIR,
|
||||
BUILD_DIR,
|
||||
)
|
||||
|
||||
|
||||
import os
|
||||
|
@@ -31,7 +31,7 @@ IGNORE = (
|
||||
"extern/carve/patches/files/random.h",
|
||||
"intern/audaspace/SRC/AUD_SRCResampleFactory.h",
|
||||
"intern/audaspace/SRC/AUD_SRCResampleReader.h",
|
||||
)
|
||||
)
|
||||
|
||||
UTF8_CHECK = True
|
||||
|
||||
|
@@ -37,19 +37,19 @@ if not project_info.init(sys.argv[-1]):
|
||||
sys.exit(1)
|
||||
|
||||
from project_info import (
|
||||
SIMPLE_PROJECTFILE,
|
||||
SOURCE_DIR,
|
||||
CMAKE_DIR,
|
||||
PROJECT_DIR,
|
||||
source_list,
|
||||
is_project_file,
|
||||
is_c_header,
|
||||
# is_py,
|
||||
cmake_advanced_info,
|
||||
cmake_compiler_defines,
|
||||
cmake_cache_var,
|
||||
project_name_get,
|
||||
)
|
||||
SIMPLE_PROJECTFILE,
|
||||
SOURCE_DIR,
|
||||
CMAKE_DIR,
|
||||
PROJECT_DIR,
|
||||
source_list,
|
||||
is_project_file,
|
||||
is_c_header,
|
||||
# is_py,
|
||||
cmake_advanced_info,
|
||||
cmake_compiler_defines,
|
||||
cmake_cache_var,
|
||||
project_name_get,
|
||||
)
|
||||
|
||||
|
||||
import os
|
||||
|
@@ -43,17 +43,17 @@ def quote_define(define):
|
||||
|
||||
def create_qtc_project_main(name):
|
||||
from project_info import (
|
||||
SIMPLE_PROJECTFILE,
|
||||
SOURCE_DIR,
|
||||
# CMAKE_DIR,
|
||||
PROJECT_DIR,
|
||||
source_list,
|
||||
is_project_file,
|
||||
is_c_header,
|
||||
cmake_advanced_info,
|
||||
cmake_compiler_defines,
|
||||
project_name_get,
|
||||
)
|
||||
SIMPLE_PROJECTFILE,
|
||||
SOURCE_DIR,
|
||||
# CMAKE_DIR,
|
||||
PROJECT_DIR,
|
||||
source_list,
|
||||
is_project_file,
|
||||
is_c_header,
|
||||
cmake_advanced_info,
|
||||
cmake_compiler_defines,
|
||||
project_name_get,
|
||||
)
|
||||
|
||||
files = list(source_list(SOURCE_DIR, filename_check=is_project_file))
|
||||
files_rel = [os.path.relpath(f, start=PROJECT_DIR) for f in files]
|
||||
@@ -69,7 +69,7 @@ def create_qtc_project_main(name):
|
||||
|
||||
with open(os.path.join(PROJECT_DIR, "%s.includes" % FILE_NAME), 'w') as f:
|
||||
f.write("\n".join(sorted(list(set(os.path.dirname(f)
|
||||
for f in files_rel if is_c_header(f))))))
|
||||
for f in files_rel if is_c_header(f))))))
|
||||
|
||||
qtc_prj = os.path.join(PROJECT_DIR, "%s.creator" % FILE_NAME)
|
||||
with open(qtc_prj, 'w') as f:
|
||||
@@ -87,7 +87,7 @@ def create_qtc_project_main(name):
|
||||
|
||||
# for some reason it doesnt give all internal includes
|
||||
includes = list(set(includes) | set(os.path.dirname(f)
|
||||
for f in files_rel if is_c_header(f)))
|
||||
for f in files_rel if is_c_header(f)))
|
||||
includes.sort()
|
||||
|
||||
# be tricky, get the project name from CMake if we can!
|
||||
@@ -125,13 +125,13 @@ def create_qtc_project_main(name):
|
||||
|
||||
def create_qtc_project_python(name):
|
||||
from project_info import (
|
||||
SOURCE_DIR,
|
||||
# CMAKE_DIR,
|
||||
PROJECT_DIR,
|
||||
source_list,
|
||||
is_py,
|
||||
project_name_get,
|
||||
)
|
||||
SOURCE_DIR,
|
||||
# CMAKE_DIR,
|
||||
PROJECT_DIR,
|
||||
source_list,
|
||||
is_py,
|
||||
project_name_get,
|
||||
)
|
||||
|
||||
files = list(source_list(SOURCE_DIR, filename_check=is_py))
|
||||
files_rel = [os.path.relpath(f, start=PROJECT_DIR) for f in files]
|
||||
@@ -161,24 +161,24 @@ def argparse_create():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="This script generates Qt Creator project files for Blender",
|
||||
)
|
||||
description="This script generates Qt Creator project files for Blender",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-n", "--name",
|
||||
dest="name",
|
||||
metavar='NAME', type=str,
|
||||
help="Override default project name (\"Blender\")",
|
||||
required=False,
|
||||
)
|
||||
"-n", "--name",
|
||||
dest="name",
|
||||
metavar='NAME', type=str,
|
||||
help="Override default project name (\"Blender\")",
|
||||
required=False,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-b", "--build-dir",
|
||||
dest="build_dir",
|
||||
metavar='BUILD_DIR', type=str,
|
||||
help="Specify the build path (or fallback to the $PWD)",
|
||||
required=False,
|
||||
)
|
||||
"-b", "--build-dir",
|
||||
dest="build_dir",
|
||||
metavar='BUILD_DIR', type=str,
|
||||
help="Specify the build path (or fallback to the $PWD)",
|
||||
required=False,
|
||||
)
|
||||
|
||||
return parser
|
||||
|
||||
|
@@ -32,7 +32,7 @@ USE_QUIET = (os.environ.get("QUIET", None) is not None)
|
||||
CHECKER_IGNORE_PREFIX = [
|
||||
"extern",
|
||||
"intern/moto",
|
||||
]
|
||||
]
|
||||
|
||||
CHECKER_BIN = "python2"
|
||||
|
||||
@@ -42,7 +42,7 @@ CHECKER_ARGS = [
|
||||
"-I" + os.path.join(project_source_info.SOURCE_DIR, "extern", "glew", "include"),
|
||||
# stupid but needed
|
||||
"-Dbool=char"
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
|
@@ -32,7 +32,7 @@ USE_QUIET = (os.environ.get("QUIET", None) is not None)
|
||||
CHECKER_IGNORE_PREFIX = [
|
||||
"extern",
|
||||
"intern/moto",
|
||||
]
|
||||
]
|
||||
|
||||
CHECKER_BIN = "cppcheck"
|
||||
|
||||
@@ -43,7 +43,7 @@ CHECKER_ARGS = [
|
||||
"--max-configs=1", # speeds up execution
|
||||
# "--check-config", # when includes are missing
|
||||
"--enable=all", # if you want sixty hundred pedantic suggestions
|
||||
]
|
||||
]
|
||||
|
||||
if USE_QUIET:
|
||||
CHECKER_ARGS.append("--quiet")
|
||||
|
@@ -25,13 +25,13 @@
|
||||
CHECKER_IGNORE_PREFIX = [
|
||||
"extern",
|
||||
"intern/moto",
|
||||
]
|
||||
]
|
||||
|
||||
CHECKER_BIN = "smatch"
|
||||
CHECKER_ARGS = [
|
||||
"--full-path",
|
||||
"--two-passes",
|
||||
]
|
||||
]
|
||||
|
||||
import project_source_info
|
||||
import subprocess
|
||||
|
@@ -25,11 +25,11 @@
|
||||
CHECKER_IGNORE_PREFIX = [
|
||||
"extern",
|
||||
"intern/moto",
|
||||
]
|
||||
]
|
||||
|
||||
CHECKER_BIN = "sparse"
|
||||
CHECKER_ARGS = [
|
||||
]
|
||||
]
|
||||
|
||||
import project_source_info
|
||||
import subprocess
|
||||
|
@@ -25,7 +25,7 @@
|
||||
CHECKER_IGNORE_PREFIX = [
|
||||
"extern",
|
||||
"intern/moto",
|
||||
]
|
||||
]
|
||||
|
||||
CHECKER_BIN = "splint"
|
||||
|
||||
@@ -61,7 +61,7 @@ CHECKER_ARGS = [
|
||||
# dummy, witjout this splint complains with:
|
||||
# /usr/include/bits/confname.h:31:27: *** Internal Bug at cscannerHelp.c:2428: Unexpanded macro not function or constant: int _PC_MAX_CANON
|
||||
"-D_PC_MAX_CANON=0",
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
import project_source_info
|
||||
|
@@ -4,7 +4,6 @@
|
||||
# cmake -C../blender/build_files/cmake/config/blender_full.cmake ../blender
|
||||
#
|
||||
|
||||
set(WITH_ALEMBIC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_BUILDINFO ON CACHE BOOL "" FORCE)
|
||||
set(WITH_BULLET ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_AVI ON CACHE BOOL "" FORCE)
|
||||
@@ -12,7 +11,6 @@ set(WITH_CODEC_FFMPEG ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_SNDFILE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_OSL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
|
||||
set(WITH_LIBMV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS ON CACHE BOOL "" FORCE)
|
||||
|
@@ -8,7 +8,6 @@
|
||||
set(WITH_INSTALL_PORTABLE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_SYSTEM_GLEW ON CACHE BOOL "" FORCE)
|
||||
|
||||
set(WITH_ALEMBIC OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_BUILDINFO OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_BULLET OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_AVI OFF CACHE BOOL "" FORCE)
|
||||
@@ -16,7 +15,6 @@ set(WITH_CODEC_FFMPEG OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_SNDFILE OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_OSL OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_OPENSUBDIV OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_FFTW3 OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_LIBMV OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_LLVM OFF CACHE BOOL "" FORCE)
|
||||
@@ -49,7 +47,6 @@ set(WITH_OPENCOLLADA OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENCOLORIO OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENIMAGEIO OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENMP OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_RAYOPTIMIZATION OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_SDL OFF CACHE BOOL "" FORCE)
|
||||
|
@@ -1,79 +0,0 @@
|
||||
# Turn everything ON thats expected for an official release builds.
|
||||
#
|
||||
# Example usage:
|
||||
# cmake -C../blender/build_files/cmake/config/blender_release.cmake ../blender
|
||||
#
|
||||
|
||||
set(WITH_ALEMBIC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_BUILDINFO ON CACHE BOOL "" FORCE)
|
||||
set(WITH_BULLET ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_AVI ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_FFMPEG ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_SNDFILE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_OSL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
|
||||
set(WITH_LIBMV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS ON CACHE BOOL "" FORCE)
|
||||
set(WITH_GAMEENGINE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_COMPOSITOR ON CACHE BOOL "" FORCE)
|
||||
set(WITH_FREESTYLE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_GHOST_XDND ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IK_SOLVER ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IK_ITASC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_CINEON ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_DDS ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_FRAMESERVER ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_HDR ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_OPENEXR ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_OPENJPEG ON CACHE BOOL "" FORCE)
|
||||
set(WITH_IMAGE_TIFF ON CACHE BOOL "" FORCE)
|
||||
set(WITH_INPUT_NDOF ON CACHE BOOL "" FORCE)
|
||||
set(WITH_INTERNATIONAL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_JACK ON CACHE BOOL "" FORCE)
|
||||
set(WITH_LZMA ON CACHE BOOL "" FORCE)
|
||||
set(WITH_LZO ON CACHE BOOL "" FORCE)
|
||||
set(WITH_MOD_BOOLEAN ON CACHE BOOL "" FORCE)
|
||||
set(WITH_MOD_FLUID ON CACHE BOOL "" FORCE)
|
||||
set(WITH_MOD_REMESH ON CACHE BOOL "" FORCE)
|
||||
set(WITH_MOD_SMOKE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_MOD_OCEANSIM ON CACHE BOOL "" FORCE)
|
||||
set(WITH_AUDASPACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENAL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENCOLLADA ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENCOLORIO ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB_BLOSC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PYTHON_INSTALL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_RAYOPTIMIZATION ON CACHE BOOL "" FORCE)
|
||||
set(WITH_SDL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_X11_XINPUT ON CACHE BOOL "" FORCE)
|
||||
set(WITH_X11_XF86VMODE ON CACHE BOOL "" FORCE)
|
||||
|
||||
set(WITH_PLAYER ON CACHE BOOL "" FORCE)
|
||||
set(WITH_MEM_JEMALLOC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_20;sm_21;sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61 CACHE STRING "" FORCE)
|
||||
|
||||
# platform dependent options
|
||||
if(UNIX AND NOT APPLE)
|
||||
set(WITH_JACK ON CACHE BOOL "" FORCE)
|
||||
set(WITH_DOC_MANPAGE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
elseif(WIN32)
|
||||
set(WITH_JACK OFF CACHE BOOL "" FORCE)
|
||||
if(NOT CMAKE_COMPILER_IS_GNUCC)
|
||||
set(WITH_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
else()
|
||||
# MinGW exceptions
|
||||
set(WITH_OPENSUBDIV OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_SNDFILE OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_OSL OFF CACHE BOOL "" FORCE)
|
||||
endif()
|
||||
elseif(APPLE)
|
||||
set(WITH_JACK ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CODEC_QUICKTIME ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV OFF CACHE BOOL "" FORCE)
|
||||
endif()
|
@@ -32,4 +32,3 @@ set(WITH_OPENCOLLADA OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_INTERNATIONAL OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_BULLET OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_ALEMBIC OFF CACHE BOOL "" FORCE)
|
||||
|
@@ -74,7 +74,7 @@ def main():
|
||||
"rebuild_cache",
|
||||
"depend",
|
||||
"cmake_check_build_system",
|
||||
])
|
||||
])
|
||||
|
||||
targets -= set(bad)
|
||||
|
||||
|
@@ -196,33 +196,8 @@ function(blender_source_group
|
||||
endfunction()
|
||||
|
||||
|
||||
# Support per-target CMake flags
|
||||
# Read from: CMAKE_C_FLAGS_**** (made upper case) when set.
|
||||
#
|
||||
# 'name' should alway match the target name,
|
||||
# use this macro before add_library or add_executable.
|
||||
#
|
||||
# Optionally takes an arg passed to set(), eg PARENT_SCOPE.
|
||||
macro(add_cc_flags_custom_test
|
||||
name
|
||||
)
|
||||
|
||||
string(TOUPPER ${name} _name_upper)
|
||||
if(DEFINED CMAKE_C_FLAGS_${_name_upper})
|
||||
message(STATUS "Using custom CFLAGS: CMAKE_C_FLAGS_${_name_upper} in \"${CMAKE_CURRENT_SOURCE_DIR}\"")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${_name_upper}}" ${ARGV1})
|
||||
endif()
|
||||
if(DEFINED CMAKE_CXX_FLAGS_${_name_upper})
|
||||
message(STATUS "Using custom CXXFLAGS: CMAKE_CXX_FLAGS_${_name_upper} in \"${CMAKE_CURRENT_SOURCE_DIR}\"")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${_name_upper}}" ${ARGV1})
|
||||
endif()
|
||||
unset(_name_upper)
|
||||
|
||||
endmacro()
|
||||
|
||||
|
||||
# only MSVC uses SOURCE_GROUP
|
||||
function(blender_add_lib__impl
|
||||
function(blender_add_lib_nolist
|
||||
name
|
||||
sources
|
||||
includes
|
||||
@@ -250,18 +225,6 @@ function(blender_add_lib__impl
|
||||
endfunction()
|
||||
|
||||
|
||||
function(blender_add_lib_nolist
|
||||
name
|
||||
sources
|
||||
includes
|
||||
includes_sys
|
||||
)
|
||||
|
||||
add_cc_flags_custom_test(${name} PARENT_SCOPE)
|
||||
|
||||
blender_add_lib__impl(${name} "${sources}" "${includes}" "${includes_sys}")
|
||||
endfunction()
|
||||
|
||||
function(blender_add_lib
|
||||
name
|
||||
sources
|
||||
@@ -269,9 +232,7 @@ function(blender_add_lib
|
||||
includes_sys
|
||||
)
|
||||
|
||||
add_cc_flags_custom_test(${name} PARENT_SCOPE)
|
||||
|
||||
blender_add_lib__impl(${name} "${sources}" "${includes}" "${includes_sys}")
|
||||
blender_add_lib_nolist(${name} "${sources}" "${includes}" "${includes_sys}")
|
||||
|
||||
set_property(GLOBAL APPEND PROPERTY BLENDER_LINK_LIBS ${name})
|
||||
endfunction()
|
||||
@@ -333,11 +294,6 @@ function(SETUP_LIBDIRS)
|
||||
link_directories(${LLVM_LIBPATH})
|
||||
endif()
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
link_directories(${ALEMBIC_LIBPATH})
|
||||
link_directories(${HDF5_LIBPATH})
|
||||
endif()
|
||||
|
||||
if(WIN32 AND NOT UNIX)
|
||||
link_directories(${PTHREADS_LIBPATH})
|
||||
endif()
|
||||
@@ -359,6 +315,7 @@ function(setup_liblinks
|
||||
target_link_libraries(
|
||||
${target}
|
||||
${PNG_LIBRARIES}
|
||||
${ZLIB_LIBRARIES}
|
||||
${FREETYPE_LIBRARY}
|
||||
)
|
||||
|
||||
@@ -415,7 +372,7 @@ function(setup_liblinks
|
||||
if(WITH_OPENCOLORIO)
|
||||
target_link_libraries(${target} ${OPENCOLORIO_LIBRARIES})
|
||||
endif()
|
||||
if(WITH_OPENSUBDIV OR WITH_CYCLES_OPENSUBDIV)
|
||||
if(WITH_OPENSUBDIV)
|
||||
if(WIN32 AND NOT UNIX)
|
||||
file_list_suffix(OPENSUBDIV_LIBRARIES_DEBUG "${OPENSUBDIV_LIBRARIES}" "_d")
|
||||
target_link_libraries_debug(${target} "${OPENSUBDIV_LIBRARIES_DEBUG}")
|
||||
@@ -438,9 +395,6 @@ function(setup_liblinks
|
||||
endif()
|
||||
endif()
|
||||
target_link_libraries(${target} ${JPEG_LIBRARIES})
|
||||
if(WITH_ALEMBIC)
|
||||
target_link_libraries(${target} ${ALEMBIC_LIBRARIES} ${HDF5_LIBRARIES})
|
||||
endif()
|
||||
if(WITH_IMAGE_OPENEXR)
|
||||
target_link_libraries(${target} ${OPENEXR_LIBRARIES})
|
||||
endif()
|
||||
@@ -509,17 +463,11 @@ function(setup_liblinks
|
||||
endif()
|
||||
endif()
|
||||
|
||||
target_link_libraries(
|
||||
${target}
|
||||
${ZLIB_LIBRARIES}
|
||||
)
|
||||
|
||||
#system libraries with no dependencies such as platform link libs or opengl should go last
|
||||
target_link_libraries(${target}
|
||||
${BLENDER_GL_LIBRARIES})
|
||||
|
||||
#target_link_libraries(${target} ${PLATFORM_LINKLIBS} ${CMAKE_DL_LIBS})
|
||||
target_link_libraries(${target} ${PLATFORM_LINKLIBS})
|
||||
target_link_libraries(${target} ${PLATFORM_LINKLIBS} ${CMAKE_DL_LIBS})
|
||||
endfunction()
|
||||
|
||||
|
||||
@@ -620,7 +568,6 @@ function(SETUP_BLENDER_SORTED_LIBS)
|
||||
bf_imbuf_openimageio
|
||||
bf_imbuf_dds
|
||||
bf_collada
|
||||
bf_alembic
|
||||
bf_intern_elbeem
|
||||
bf_intern_memutil
|
||||
bf_intern_guardedalloc
|
||||
@@ -738,7 +685,7 @@ function(SETUP_BLENDER_SORTED_LIBS)
|
||||
list_insert_after(BLENDER_SORTED_LIBS "ge_logic_ngnetwork" "extern_bullet")
|
||||
endif()
|
||||
|
||||
if(WITH_GAMEENGINE_DECKLINK)
|
||||
if(WITH_DECKLINK)
|
||||
list(APPEND BLENDER_SORTED_LIBS bf_intern_decklink)
|
||||
endif()
|
||||
|
||||
@@ -746,7 +693,7 @@ function(SETUP_BLENDER_SORTED_LIBS)
|
||||
list(APPEND BLENDER_SORTED_LIBS bf_intern_gpudirect)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENSUBDIV OR WITH_CYCLES_OPENSUBDIV)
|
||||
if(WITH_OPENSUBDIV)
|
||||
list(APPEND BLENDER_SORTED_LIBS bf_intern_opensubdiv)
|
||||
endif()
|
||||
|
||||
@@ -1561,44 +1508,3 @@ function(print_all_vars)
|
||||
message("${_var}=${${_var}}")
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
macro(openmp_delayload
|
||||
projectname
|
||||
)
|
||||
if(MSVC)
|
||||
if(WITH_OPENMP)
|
||||
if(MSVC_VERSION EQUAL 1800)
|
||||
set(OPENMP_DLL_NAME "vcomp120")
|
||||
else()
|
||||
set(OPENMP_DLL_NAME "vcomp140")
|
||||
endif()
|
||||
SET_TARGET_PROPERTIES(${projectname} PROPERTIES LINK_FLAGS_RELEASE "/DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
|
||||
SET_TARGET_PROPERTIES(${projectname} PROPERTIES LINK_FLAGS_DEBUG "/DELAYLOAD:${OPENMP_DLL_NAME}d.dll delayimp.lib")
|
||||
SET_TARGET_PROPERTIES(${projectname} PROPERTIES LINK_FLAGS_RELWITHDEBINFO "/DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
|
||||
SET_TARGET_PROPERTIES(${projectname} PROPERTIES LINK_FLAGS_MINSIZEREL "/DELAYLOAD:${OPENMP_DLL_NAME}.dll delayimp.lib")
|
||||
endif(WITH_OPENMP)
|
||||
endif(MSVC)
|
||||
endmacro()
|
||||
|
||||
MACRO(WINDOWS_SIGN_TARGET target)
|
||||
if (WITH_WINDOWS_CODESIGN)
|
||||
if (!SIGNTOOL_EXE)
|
||||
error("Codesigning is enabled, but signtool is not found")
|
||||
else()
|
||||
if (WINDOWS_CODESIGN_PFX_PASSWORD)
|
||||
set(CODESIGNPASSWORD /p ${WINDOWS_CODESIGN_PFX_PASSWORD})
|
||||
else()
|
||||
if ($ENV{PFXPASSWORD})
|
||||
set(CODESIGNPASSWORD /p $ENV{PFXPASSWORD})
|
||||
else()
|
||||
message( FATAL_ERROR "WITH_WINDOWS_CODESIGN is on but WINDOWS_CODESIGN_PFX_PASSWORD not set, and environment variable PFXPASSWORD not found, unable to sign code.")
|
||||
endif()
|
||||
endif()
|
||||
add_custom_command(TARGET ${target}
|
||||
POST_BUILD
|
||||
COMMAND ${SIGNTOOL_EXE} sign /f ${WINDOWS_CODESIGN_PFX} ${CODESIGNPASSWORD} $<TARGET_FILE:${target}>
|
||||
VERBATIM
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
ENDMACRO()
|
||||
|
@@ -38,21 +38,7 @@ unset(MY_WC_HASH)
|
||||
# Force Package Name
|
||||
execute_process(COMMAND date "+%Y%m%d" OUTPUT_VARIABLE CPACK_DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
string(TOLOWER ${PROJECT_NAME} PROJECT_NAME_LOWER)
|
||||
if (MSVC)
|
||||
if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
|
||||
set(PACKAGE_ARCH windows64)
|
||||
else()
|
||||
set(PACKAGE_ARCH windows32)
|
||||
endif()
|
||||
else(MSVC)
|
||||
set(PACKAGE_ARCH ${CMAKE_SYSTEM_PROCESSOR})
|
||||
endif()
|
||||
|
||||
if (CPACK_OVERRIDE_PACKAGENAME)
|
||||
set(CPACK_PACKAGE_FILE_NAME ${CPACK_OVERRIDE_PACKAGENAME}-${PACKAGE_ARCH})
|
||||
else()
|
||||
set(CPACK_PACKAGE_FILE_NAME ${PROJECT_NAME_LOWER}-${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}-git${CPACK_DATE}.${BUILD_REV}-${PACKAGE_ARCH})
|
||||
endif()
|
||||
set(CPACK_PACKAGE_FILE_NAME ${PROJECT_NAME_LOWER}-${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}-git${CPACK_DATE}.${BUILD_REV}-${CMAKE_SYSTEM_PROCESSOR})
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
# RPM packages
|
||||
@@ -97,8 +83,6 @@ if(WIN32)
|
||||
endif()
|
||||
|
||||
set(CPACK_PACKAGE_EXECUTABLES "blender" "blender")
|
||||
set(CPACK_CREATE_DESKTOP_LINKS "blender" "blender")
|
||||
|
||||
include(CPack)
|
||||
|
||||
# Target for build_archive.py script, to automatically pass along
|
||||
|
@@ -1,468 +0,0 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# The Original Code is Copyright (C) 2016, Blender Foundation
|
||||
# All rights reserved.
|
||||
#
|
||||
# Contributor(s): Sergey Sharybin.
|
||||
#
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
# Libraries configuration for Apple.
|
||||
|
||||
if(NOT DEFINED LIBDIR)
|
||||
if(WITH_CXX11)
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/darwin)
|
||||
else()
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/darwin-9.x.universal)
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
|
||||
endif()
|
||||
if(NOT EXISTS "${LIBDIR}/")
|
||||
message(FATAL_ERROR "Mac OSX requires pre-compiled libs at: '${LIBDIR}'")
|
||||
endif()
|
||||
|
||||
if(WITH_OPENAL)
|
||||
find_package(OpenAL)
|
||||
if(OPENAL_FOUND)
|
||||
set(WITH_OPENAL ON)
|
||||
set(OPENAL_INCLUDE_DIR "${LIBDIR}/openal/include")
|
||||
else()
|
||||
set(WITH_OPENAL OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
set(ALEMBIC ${LIBDIR}/alembic)
|
||||
set(ALEMBIC_INCLUDE_DIR ${ALEMBIC}/include)
|
||||
set(ALEMBIC_INCLUDE_DIRS ${ALEMBIC_INCLUDE_DIR})
|
||||
set(ALEMBIC_LIBPATH ${ALEMBIC}/lib)
|
||||
set(ALEMBIC_LIBRARIES Alembic)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENSUBDIV OR WITH_CYCLES_OPENSUBDIV)
|
||||
set(OPENSUBDIV ${LIBDIR}/opensubdiv)
|
||||
set(OPENSUBDIV_LIBPATH ${OPENSUBDIV}/lib)
|
||||
find_library(OSD_LIB_CPU NAMES osdCPU PATHS ${OPENSUBDIV_LIBPATH})
|
||||
find_library(OSD_LIB_GPU NAMES osdGPU PATHS ${OPENSUBDIV_LIBPATH})
|
||||
set(OPENSUBDIV_INCLUDE_DIR ${OPENSUBDIV}/include)
|
||||
set(OPENSUBDIV_INCLUDE_DIRS ${OPENSUBDIV_INCLUDE_DIR})
|
||||
list(APPEND OPENSUBDIV_LIBRARIES ${OSD_LIB_CPU} ${OSD_LIB_GPU})
|
||||
endif()
|
||||
|
||||
if(WITH_JACK)
|
||||
find_library(JACK_FRAMEWORK
|
||||
NAMES jackmp
|
||||
)
|
||||
set(JACK_INCLUDE_DIRS ${JACK_FRAMEWORK}/headers)
|
||||
if(NOT JACK_FRAMEWORK)
|
||||
set(WITH_JACK OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_CODEC_SNDFILE)
|
||||
set(SNDFILE ${LIBDIR}/sndfile)
|
||||
set(SNDFILE_INCLUDE_DIRS ${SNDFILE}/include)
|
||||
set(SNDFILE_LIBRARIES sndfile FLAC ogg vorbis vorbisenc)
|
||||
set(SNDFILE_LIBPATH ${SNDFILE}/lib ${LIBDIR}/ffmpeg/lib) # TODO, deprecate
|
||||
endif()
|
||||
|
||||
if(WITH_PYTHON)
|
||||
# we use precompiled libraries for py 3.5 and up by default
|
||||
set(PYTHON_VERSION 3.5)
|
||||
if(NOT WITH_PYTHON_MODULE AND NOT WITH_PYTHON_FRAMEWORK)
|
||||
# normally cached but not since we include them with blender
|
||||
set(PYTHON_INCLUDE_DIR "${LIBDIR}/python/include/python${PYTHON_VERSION}m")
|
||||
set(PYTHON_EXECUTABLE "${LIBDIR}/python/bin/python${PYTHON_VERSION}m")
|
||||
set(PYTHON_LIBRARY python${PYTHON_VERSION}m)
|
||||
set(PYTHON_LIBPATH "${LIBDIR}/python/lib/python${PYTHON_VERSION}")
|
||||
# set(PYTHON_LINKFLAGS "-u _PyMac_Error") # won't build with this enabled
|
||||
else()
|
||||
# module must be compiled against Python framework
|
||||
set(_py_framework "/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}")
|
||||
|
||||
set(PYTHON_INCLUDE_DIR "${_py_framework}/include/python${PYTHON_VERSION}m")
|
||||
set(PYTHON_EXECUTABLE "${_py_framework}/bin/python${PYTHON_VERSION}m")
|
||||
set(PYTHON_LIBPATH "${_py_framework}/lib/python${PYTHON_VERSION}/config-${PYTHON_VERSION}m")
|
||||
#set(PYTHON_LIBRARY python${PYTHON_VERSION})
|
||||
#set(PYTHON_LINKFLAGS "-u _PyMac_Error -framework Python") # won't build with this enabled
|
||||
|
||||
unset(_py_framework)
|
||||
endif()
|
||||
|
||||
# uncached vars
|
||||
set(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}")
|
||||
set(PYTHON_LIBRARIES "${PYTHON_LIBRARY}")
|
||||
|
||||
if(NOT EXISTS "${PYTHON_EXECUTABLE}")
|
||||
message(FATAL_ERROR "Python executable missing: ${PYTHON_EXECUTABLE}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_FFTW3)
|
||||
set(FFTW3 ${LIBDIR}/fftw3)
|
||||
set(FFTW3_INCLUDE_DIRS ${FFTW3}/include)
|
||||
set(FFTW3_LIBRARIES fftw3)
|
||||
set(FFTW3_LIBPATH ${FFTW3}/lib)
|
||||
endif()
|
||||
|
||||
set(PNG_LIBRARIES png)
|
||||
set(JPEG_LIBRARIES jpeg)
|
||||
|
||||
set(ZLIB /usr)
|
||||
set(ZLIB_INCLUDE_DIRS "${ZLIB}/include")
|
||||
set(ZLIB_LIBRARIES z bz2)
|
||||
|
||||
set(FREETYPE ${LIBDIR}/freetype)
|
||||
set(FREETYPE_INCLUDE_DIRS ${FREETYPE}/include ${FREETYPE}/include/freetype2)
|
||||
set(FREETYPE_LIBPATH ${FREETYPE}/lib)
|
||||
set(FREETYPE_LIBRARY freetype)
|
||||
|
||||
if(WITH_IMAGE_OPENEXR)
|
||||
set(OPENEXR ${LIBDIR}/openexr)
|
||||
set(OPENEXR_INCLUDE_DIR ${OPENEXR}/include)
|
||||
set(OPENEXR_INCLUDE_DIRS ${OPENEXR_INCLUDE_DIR} ${OPENEXR}/include/OpenEXR)
|
||||
if(WITH_CXX11)
|
||||
set(OPENEXR_POSTFIX -2_2)
|
||||
else()
|
||||
set(OPENEXR_POSTFIX)
|
||||
endif()
|
||||
set(OPENEXR_LIBRARIES
|
||||
Iex${OPENEXR_POSTFIX}
|
||||
Half
|
||||
IlmImf${OPENEXR_POSTFIX}
|
||||
Imath${OPENEXR_POSTFIX}
|
||||
IlmThread${OPENEXR_POSTFIX})
|
||||
set(OPENEXR_LIBPATH ${OPENEXR}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_CODEC_FFMPEG)
|
||||
set(FFMPEG ${LIBDIR}/ffmpeg)
|
||||
set(FFMPEG_INCLUDE_DIRS ${FFMPEG}/include)
|
||||
set(FFMPEG_LIBRARIES
|
||||
avcodec avdevice avformat avutil
|
||||
mp3lame swscale x264 xvidcore theora theoradec theoraenc vorbis vorbisenc vorbisfile ogg
|
||||
)
|
||||
if(WITH_CXX11)
|
||||
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} schroedinger orc vpx webp swresample)
|
||||
endif()
|
||||
set(FFMPEG_LIBPATH ${FFMPEG}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENJPEG OR WITH_CODEC_FFMPEG)
|
||||
# use openjpeg from libdir that is linked into ffmpeg
|
||||
if(WITH_CXX11)
|
||||
set(OPENJPEG ${LIBDIR}/openjpeg)
|
||||
set(WITH_SYSTEM_OPENJPEG ON)
|
||||
set(OPENJPEG_INCLUDE_DIRS ${OPENJPEG}/include)
|
||||
set(OPENJPEG_LIBRARIES ${OPENJPEG}/lib/libopenjpeg.a)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
find_library(SYSTEMSTUBS_LIBRARY
|
||||
NAMES
|
||||
SystemStubs
|
||||
PATHS
|
||||
)
|
||||
mark_as_advanced(SYSTEMSTUBS_LIBRARY)
|
||||
if(SYSTEMSTUBS_LIBRARY)
|
||||
list(APPEND PLATFORM_LINKLIBS SystemStubs)
|
||||
endif()
|
||||
|
||||
set(PLATFORM_CFLAGS "-pipe -funsigned-char")
|
||||
set(PLATFORM_LINKFLAGS
|
||||
"-fexceptions -framework CoreServices -framework Foundation -framework IOKit -framework AppKit -framework Cocoa -framework Carbon -framework AudioUnit -framework AudioToolbox -framework CoreAudio"
|
||||
)
|
||||
if(WITH_CODEC_QUICKTIME)
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -framework QTKit")
|
||||
if(CMAKE_OSX_ARCHITECTURES MATCHES i386)
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -framework QuickTime")
|
||||
# libSDL still needs 32bit carbon quicktime
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_CXX11)
|
||||
list(APPEND PLATFORM_LINKLIBS c++)
|
||||
else()
|
||||
list(APPEND PLATFORM_LINKLIBS stdc++)
|
||||
endif()
|
||||
|
||||
if(WITH_JACK)
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -F/Library/Frameworks -weak_framework jackmp")
|
||||
endif()
|
||||
|
||||
if(WITH_PYTHON_MODULE OR WITH_PYTHON_FRAMEWORK)
|
||||
# force cmake to link right framework
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} /Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}/Python")
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLLADA)
|
||||
set(OPENCOLLADA ${LIBDIR}/opencollada)
|
||||
|
||||
set(OPENCOLLADA_INCLUDE_DIRS
|
||||
${LIBDIR}/opencollada/include/COLLADAStreamWriter
|
||||
${LIBDIR}/opencollada/include/COLLADABaseUtils
|
||||
${LIBDIR}/opencollada/include/COLLADAFramework
|
||||
${LIBDIR}/opencollada/include/COLLADASaxFrameworkLoader
|
||||
${LIBDIR}/opencollada/include/GeneratedSaxParser
|
||||
)
|
||||
|
||||
set(OPENCOLLADA_LIBPATH ${OPENCOLLADA}/lib)
|
||||
set(OPENCOLLADA_LIBRARIES
|
||||
OpenCOLLADASaxFrameworkLoader
|
||||
-lOpenCOLLADAFramework
|
||||
-lOpenCOLLADABaseUtils
|
||||
-lOpenCOLLADAStreamWriter
|
||||
-lMathMLSolver
|
||||
-lGeneratedSaxParser
|
||||
-lxml2 -lbuffer -lftoa
|
||||
)
|
||||
# Use UTF functions from collada if LLVM is not enabled
|
||||
if(NOT WITH_LLVM)
|
||||
list(APPEND OPENCOLLADA_LIBRARIES -lUTF)
|
||||
endif()
|
||||
# pcre is bundled with openCollada
|
||||
#set(PCRE ${LIBDIR}/pcre)
|
||||
#set(PCRE_LIBPATH ${PCRE}/lib)
|
||||
set(PCRE_LIBRARIES pcre)
|
||||
#libxml2 is used
|
||||
#set(EXPAT ${LIBDIR}/expat)
|
||||
#set(EXPAT_LIBPATH ${EXPAT}/lib)
|
||||
set(EXPAT_LIB)
|
||||
endif()
|
||||
|
||||
if(WITH_SDL)
|
||||
set(SDL ${LIBDIR}/sdl)
|
||||
set(SDL_INCLUDE_DIR ${SDL}/include)
|
||||
set(SDL_LIBRARY SDL2)
|
||||
set(SDL_LIBPATH ${SDL}/lib)
|
||||
if(WITH_CXX11)
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -framework ForceFeedback")
|
||||
else()
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -lazy_framework ForceFeedback")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(PNG "${LIBDIR}/png")
|
||||
set(PNG_INCLUDE_DIRS "${PNG}/include")
|
||||
set(PNG_LIBPATH ${PNG}/lib)
|
||||
|
||||
set(JPEG "${LIBDIR}/jpeg")
|
||||
set(JPEG_INCLUDE_DIR "${JPEG}/include")
|
||||
set(JPEG_LIBPATH ${JPEG}/lib)
|
||||
|
||||
if(WITH_IMAGE_TIFF)
|
||||
set(TIFF ${LIBDIR}/tiff)
|
||||
set(TIFF_INCLUDE_DIR ${TIFF}/include)
|
||||
set(TIFF_LIBRARY tiff)
|
||||
set(TIFF_LIBPATH ${TIFF}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_BOOST)
|
||||
set(BOOST ${LIBDIR}/boost)
|
||||
set(BOOST_INCLUDE_DIR ${BOOST}/include)
|
||||
if(WITH_CXX11)
|
||||
set(BOOST_POSTFIX)
|
||||
else()
|
||||
set(BOOST_POSTFIX -mt)
|
||||
endif()
|
||||
set(BOOST_LIBRARIES
|
||||
boost_date_time${BOOST_POSTFIX}
|
||||
boost_filesystem${BOOST_POSTFIX}
|
||||
boost_regex${BOOST_POSTFIX}
|
||||
boost_system${BOOST_POSTFIX}
|
||||
boost_thread${BOOST_POSTFIX}
|
||||
boost_wave${BOOST_POSTFIX}
|
||||
)
|
||||
if(WITH_INTERNATIONAL)
|
||||
list(APPEND BOOST_LIBRARIES boost_locale${BOOST_POSTFIX})
|
||||
endif()
|
||||
if(WITH_CYCLES_NETWORK)
|
||||
list(APPEND BOOST_LIBRARIES boost_serialization${BOOST_POSTFIX})
|
||||
endif()
|
||||
if(WITH_OPENVDB)
|
||||
list(APPEND BOOST_LIBRARIES boost_iostreams${BOOST_POSTFIX})
|
||||
endif()
|
||||
set(BOOST_LIBPATH ${BOOST}/lib)
|
||||
set(BOOST_DEFINITIONS)
|
||||
endif()
|
||||
|
||||
if(WITH_INTERNATIONAL OR WITH_CODEC_FFMPEG)
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -liconv") # boost_locale and ffmpeg needs it !
|
||||
endif()
|
||||
|
||||
if(WITH_OPENIMAGEIO)
|
||||
set(OPENIMAGEIO ${LIBDIR}/openimageio)
|
||||
set(OPENIMAGEIO_INCLUDE_DIRS ${OPENIMAGEIO}/include)
|
||||
set(OPENIMAGEIO_LIBRARIES
|
||||
${OPENIMAGEIO}/lib/libOpenImageIO.a
|
||||
${PNG_LIBRARIES}
|
||||
${JPEG_LIBRARIES}
|
||||
${TIFF_LIBRARY}
|
||||
${OPENEXR_LIBRARIES}
|
||||
${ZLIB_LIBRARIES}
|
||||
)
|
||||
if(WITH_CXX11)
|
||||
set(OPENIMAGEIO_LIBRARIES ${OPENIMAGEIO_LIBRARIES} ${LIBDIR}/ffmpeg/lib/libwebp.a)
|
||||
endif()
|
||||
set(OPENIMAGEIO_LIBPATH
|
||||
${OPENIMAGEIO}/lib
|
||||
${JPEG_LIBPATH}
|
||||
${PNG_LIBPATH}
|
||||
${TIFF_LIBPATH}
|
||||
${OPENEXR_LIBPATH}
|
||||
${ZLIB_LIBPATH}
|
||||
)
|
||||
set(OPENIMAGEIO_DEFINITIONS "-DOIIO_STATIC_BUILD")
|
||||
set(OPENIMAGEIO_IDIFF "${LIBDIR}/openimageio/bin/idiff")
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLORIO)
|
||||
set(OPENCOLORIO ${LIBDIR}/opencolorio)
|
||||
set(OPENCOLORIO_INCLUDE_DIRS ${OPENCOLORIO}/include)
|
||||
set(OPENCOLORIO_LIBRARIES OpenColorIO tinyxml yaml-cpp)
|
||||
set(OPENCOLORIO_LIBPATH ${OPENCOLORIO}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENVDB)
|
||||
set(OPENVDB ${LIBDIR}/openvdb)
|
||||
set(OPENVDB_INCLUDE_DIRS ${OPENVDB}/include)
|
||||
set(TBB_INCLUDE_DIRS ${LIBDIR}/tbb/include)
|
||||
set(TBB_LIBRARIES ${LIBDIR}/tbb/lib/libtbb.a)
|
||||
set(OPENVDB_LIBRARIES openvdb blosc ${TBB_LIBRARIES})
|
||||
set(OPENVDB_LIBPATH ${LIBDIR}/openvdb/lib)
|
||||
set(OPENVDB_DEFINITIONS)
|
||||
endif()
|
||||
|
||||
if(WITH_LLVM)
|
||||
set(LLVM_ROOT_DIR ${LIBDIR}/llvm CACHE PATH "Path to the LLVM installation")
|
||||
set(LLVM_VERSION "3.4" CACHE STRING "Version of LLVM to use")
|
||||
if(EXISTS "${LLVM_ROOT_DIR}/bin/llvm-config")
|
||||
set(LLVM_CONFIG "${LLVM_ROOT_DIR}/bin/llvm-config")
|
||||
else()
|
||||
set(LLVM_CONFIG llvm-config)
|
||||
endif()
|
||||
execute_process(COMMAND ${LLVM_CONFIG} --version
|
||||
OUTPUT_VARIABLE LLVM_VERSION
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND ${LLVM_CONFIG} --prefix
|
||||
OUTPUT_VARIABLE LLVM_ROOT_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND ${LLVM_CONFIG} --libdir
|
||||
OUTPUT_VARIABLE LLVM_LIBPATH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
find_library(LLVM_LIBRARY
|
||||
NAMES LLVMAnalysis # first of a whole bunch of libs to get
|
||||
PATHS ${LLVM_LIBPATH})
|
||||
|
||||
if(LLVM_LIBRARY AND LLVM_ROOT_DIR AND LLVM_LIBPATH)
|
||||
if(LLVM_STATIC)
|
||||
# if static LLVM libraries were requested, use llvm-config to generate
|
||||
# the list of what libraries we need, and substitute that in the right
|
||||
# way for LLVM_LIBRARY.
|
||||
execute_process(COMMAND ${LLVM_CONFIG} --libfiles
|
||||
OUTPUT_VARIABLE LLVM_LIBRARY
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
string(REPLACE " " ";" LLVM_LIBRARY ${LLVM_LIBRARY})
|
||||
else()
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -lLLVM-3.4")
|
||||
endif()
|
||||
else()
|
||||
message(FATAL_ERROR "LLVM not found.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_CYCLES_OSL)
|
||||
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
|
||||
|
||||
find_library(OSL_LIB_EXEC NAMES oslexec PATHS ${CYCLES_OSL}/lib)
|
||||
find_library(OSL_LIB_COMP NAMES oslcomp PATHS ${CYCLES_OSL}/lib)
|
||||
find_library(OSL_LIB_QUERY NAMES oslquery PATHS ${CYCLES_OSL}/lib)
|
||||
# WARNING! depends on correct order of OSL libs linking
|
||||
list(APPEND OSL_LIBRARIES ${OSL_LIB_COMP} -force_load ${OSL_LIB_EXEC} ${OSL_LIB_QUERY})
|
||||
find_path(OSL_INCLUDE_DIR OSL/oslclosure.h PATHS ${CYCLES_OSL}/include)
|
||||
find_program(OSL_COMPILER NAMES oslc PATHS ${CYCLES_OSL}/bin)
|
||||
|
||||
if(OSL_INCLUDE_DIR AND OSL_LIBRARIES AND OSL_COMPILER)
|
||||
set(OSL_FOUND TRUE)
|
||||
else()
|
||||
message(STATUS "OSL not found")
|
||||
set(WITH_CYCLES_OSL OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_OPENMP)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} --version OUTPUT_VARIABLE COMPILER_VENDOR)
|
||||
string(SUBSTRING "${COMPILER_VENDOR}" 0 5 VENDOR_NAME) # truncate output
|
||||
if(${VENDOR_NAME} MATCHES "Apple") # Apple does not support OpenMP reliable with gcc and not with clang
|
||||
set(WITH_OPENMP OFF)
|
||||
else() # vanilla gcc or clang_omp support OpenMP
|
||||
message(STATUS "Using special OpenMP enabled compiler !") # letting find_package(OpenMP) module work for gcc
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "Clang") # clang-omp in darwin libs
|
||||
set(OPENMP_FOUND ON)
|
||||
set(OpenMP_C_FLAGS "-fopenmp" CACHE STRING "C compiler flags for OpenMP parallization" FORCE)
|
||||
set(OpenMP_CXX_FLAGS "-fopenmp" CACHE STRING "C++ compiler flags for OpenMP parallization" FORCE)
|
||||
include_directories(${LIBDIR}/openmp/include)
|
||||
link_directories(${LIBDIR}/openmp/lib)
|
||||
# This is a workaround for our helperbinaries ( datatoc, masgfmt, ... ),
|
||||
# They are linked also to omp lib, so we need it in builddir for runtime exexcution,
|
||||
# TODO: remove all unneeded dependencies from these
|
||||
|
||||
# for intermediate binaries, in respect to lib ID
|
||||
execute_process(
|
||||
COMMAND ditto -arch ${CMAKE_OSX_ARCHITECTURES}
|
||||
${LIBDIR}/openmp/lib/libiomp5.dylib
|
||||
${CMAKE_BINARY_DIR}/Resources/lib/libiomp5.dylib)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(EXETYPE MACOSX_BUNDLE)
|
||||
|
||||
set(CMAKE_C_FLAGS_DEBUG "-fno-strict-aliasing -g")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-fno-strict-aliasing -g")
|
||||
if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" OR CMAKE_OSX_ARCHITECTURES MATCHES "i386")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -mdynamic-no-pic -msse -msse2 -msse3 -mssse3")
|
||||
set(CMAKE_C_FLAGS_RELEASE "-O2 -mdynamic-no-pic -msse -msse2 -msse3 -mssse3")
|
||||
if(NOT CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -ftree-vectorize -fvariable-expansion-in-unroller")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -ftree-vectorize -fvariable-expansion-in-unroller")
|
||||
endif()
|
||||
else()
|
||||
set(CMAKE_C_FLAGS_RELEASE "-mdynamic-no-pic -fno-strict-aliasing")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-mdynamic-no-pic -fno-strict-aliasing")
|
||||
endif()
|
||||
|
||||
if(${XCODE_VERSION} VERSION_EQUAL 5 OR ${XCODE_VERSION} VERSION_GREATER 5)
|
||||
# Xcode 5 is always using CLANG, which has too low template depth of 128 for libmv
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ftemplate-depth=1024")
|
||||
endif()
|
||||
# Get rid of eventually clashes, we export some symbols explicite as local
|
||||
set(PLATFORM_LINKFLAGS
|
||||
"${PLATFORM_LINKFLAGS} -Xlinker -unexported_symbols_list -Xlinker ${CMAKE_SOURCE_DIR}/source/creator/osx_locals.map"
|
||||
)
|
||||
|
||||
if(WITH_CXX11)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -stdlib=libc++")
|
||||
endif()
|
||||
|
||||
# Suppress ranlib "has no symbols" warnings (workaround for T48250)
|
||||
set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> Scr <TARGET> <LINK_FLAGS> <OBJECTS>")
|
||||
set(CMAKE_C_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
|
||||
set(CMAKE_CXX_ARCHIVE_FINISH "<CMAKE_RANLIB> -no_warning_for_no_symbols -c <TARGET>")
|
@@ -1,428 +0,0 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# The Original Code is Copyright (C) 2016, Blender Foundation
|
||||
# All rights reserved.
|
||||
#
|
||||
# Contributor(s): Sergey Sharybin.
|
||||
#
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
# Libraries configuration for any *nix system including Linux and Unix.
|
||||
|
||||
macro(find_package_wrapper)
|
||||
if(WITH_STATIC_LIBS)
|
||||
find_package_static(${ARGV})
|
||||
else()
|
||||
find_package(${ARGV})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
find_package_wrapper(JPEG REQUIRED)
|
||||
find_package_wrapper(PNG REQUIRED)
|
||||
find_package_wrapper(ZLIB REQUIRED)
|
||||
find_package_wrapper(Freetype REQUIRED)
|
||||
|
||||
if(WITH_LZO AND WITH_SYSTEM_LZO)
|
||||
find_package_wrapper(LZO)
|
||||
if(NOT LZO_FOUND)
|
||||
message(FATAL_ERROR "Failed finding system LZO version!")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_SYSTEM_EIGEN3)
|
||||
find_package_wrapper(Eigen3)
|
||||
if(NOT EIGEN3_FOUND)
|
||||
message(FATAL_ERROR "Failed finding system Eigen3 version!")
|
||||
endif()
|
||||
endif()
|
||||
# else values are set below for all platforms
|
||||
|
||||
if(WITH_PYTHON)
|
||||
# No way to set py35, remove for now.
|
||||
# find_package(PythonLibs)
|
||||
|
||||
# Use our own instead, since without py is such a rare case,
|
||||
# require this package
|
||||
# XXX Linking errors with debian static python :/
|
||||
# find_package_wrapper(PythonLibsUnix REQUIRED)
|
||||
find_package(PythonLibsUnix REQUIRED)
|
||||
endif()
|
||||
|
||||
if(WITH_IMAGE_OPENEXR)
|
||||
find_package_wrapper(OpenEXR) # our own module
|
||||
if(NOT OPENEXR_FOUND)
|
||||
set(WITH_IMAGE_OPENEXR OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_IMAGE_OPENJPEG)
|
||||
find_package_wrapper(OpenJPEG)
|
||||
if(NOT OPENJPEG_FOUND)
|
||||
set(WITH_IMAGE_OPENJPEG OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_IMAGE_TIFF)
|
||||
# XXX Linking errors with debian static tiff :/
|
||||
# find_package_wrapper(TIFF)
|
||||
find_package(TIFF)
|
||||
if(NOT TIFF_FOUND)
|
||||
set(WITH_IMAGE_TIFF OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Audio IO
|
||||
if(WITH_SYSTEM_AUDASPACE)
|
||||
find_package_wrapper(Audaspace)
|
||||
if(NOT AUDASPACE_FOUND OR NOT AUDASPACE_C_FOUND)
|
||||
message(FATAL_ERROR "Audaspace external library not found!")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_OPENAL)
|
||||
find_package_wrapper(OpenAL)
|
||||
if(NOT OPENAL_FOUND)
|
||||
set(WITH_OPENAL OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_SDL)
|
||||
if(WITH_SDL_DYNLOAD)
|
||||
set(SDL_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/extern/sdlew/include/SDL2")
|
||||
set(SDL_LIBRARY)
|
||||
else()
|
||||
find_package_wrapper(SDL2)
|
||||
if(SDL2_FOUND)
|
||||
# Use same names for both versions of SDL until we move to 2.x.
|
||||
set(SDL_INCLUDE_DIR "${SDL2_INCLUDE_DIR}")
|
||||
set(SDL_LIBRARY "${SDL2_LIBRARY}")
|
||||
set(SDL_FOUND "${SDL2_FOUND}")
|
||||
else()
|
||||
find_package_wrapper(SDL)
|
||||
endif()
|
||||
mark_as_advanced(
|
||||
SDL_INCLUDE_DIR
|
||||
SDL_LIBRARY
|
||||
)
|
||||
# unset(SDLMAIN_LIBRARY CACHE)
|
||||
if(NOT SDL_FOUND)
|
||||
set(WITH_SDL OFF)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_JACK)
|
||||
find_package_wrapper(Jack)
|
||||
if(NOT JACK_FOUND)
|
||||
set(WITH_JACK OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Codecs
|
||||
if(WITH_CODEC_SNDFILE)
|
||||
find_package_wrapper(SndFile)
|
||||
if(NOT SNDFILE_FOUND)
|
||||
set(WITH_CODEC_SNDFILE OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_CODEC_FFMPEG)
|
||||
set(FFMPEG /usr CACHE PATH "FFMPEG Directory")
|
||||
set(FFMPEG_LIBRARIES avformat avcodec avutil avdevice swscale CACHE STRING "FFMPEG Libraries")
|
||||
|
||||
mark_as_advanced(FFMPEG)
|
||||
|
||||
# lame, but until we have proper find module for ffmpeg
|
||||
set(FFMPEG_INCLUDE_DIRS ${FFMPEG}/include)
|
||||
if(EXISTS "${FFMPEG}/include/ffmpeg/")
|
||||
list(APPEND FFMPEG_INCLUDE_DIRS "${FFMPEG}/include/ffmpeg")
|
||||
endif()
|
||||
# end lameness
|
||||
|
||||
mark_as_advanced(FFMPEG_LIBRARIES)
|
||||
set(FFMPEG_LIBPATH ${FFMPEG}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_FFTW3)
|
||||
find_package_wrapper(Fftw3)
|
||||
if(NOT FFTW3_FOUND)
|
||||
set(WITH_FFTW3 OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLLADA)
|
||||
find_package_wrapper(OpenCOLLADA)
|
||||
if(OPENCOLLADA_FOUND)
|
||||
find_package_wrapper(XML2)
|
||||
find_package_wrapper(PCRE)
|
||||
else()
|
||||
set(WITH_OPENCOLLADA OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_MEM_JEMALLOC)
|
||||
find_package_wrapper(JeMalloc)
|
||||
if(NOT JEMALLOC_FOUND)
|
||||
set(WITH_MEM_JEMALLOC OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_INPUT_NDOF)
|
||||
find_package_wrapper(Spacenav)
|
||||
if(SPACENAV_FOUND)
|
||||
# use generic names within blenders buildsystem.
|
||||
set(NDOF_INCLUDE_DIRS ${SPACENAV_INCLUDE_DIRS})
|
||||
set(NDOF_LIBRARIES ${SPACENAV_LIBRARIES})
|
||||
else()
|
||||
set(WITH_INPUT_NDOF OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_CYCLES_OSL)
|
||||
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
|
||||
if(NOT OSL_ROOT)
|
||||
set(OSL_ROOT ${CYCLES_OSL})
|
||||
endif()
|
||||
find_package_wrapper(OpenShadingLanguage)
|
||||
if(OSL_FOUND)
|
||||
if(${OSL_LIBRARY_VERSION_MAJOR} EQUAL "1" AND ${OSL_LIBRARY_VERSION_MINOR} LESS "6")
|
||||
# Note: --whole-archive is needed to force loading of all symbols in liboslexec,
|
||||
# otherwise LLVM is missing the osl_allocate_closure_component function
|
||||
set(OSL_LIBRARIES
|
||||
${OSL_OSLCOMP_LIBRARY}
|
||||
-Wl,--whole-archive ${OSL_OSLEXEC_LIBRARY}
|
||||
-Wl,--no-whole-archive ${OSL_OSLQUERY_LIBRARY}
|
||||
)
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "OSL not found, disabling it from Cycles")
|
||||
set(WITH_CYCLES_OSL OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_OPENVDB)
|
||||
find_package_wrapper(OpenVDB)
|
||||
find_package_wrapper(TBB)
|
||||
if(NOT OPENVDB_FOUND OR NOT TBB_FOUND)
|
||||
set(WITH_OPENVDB OFF)
|
||||
set(WITH_OPENVDB_BLOSC OFF)
|
||||
message(STATUS "OpenVDB not found, disabling it")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
find_package_wrapper(Alembic)
|
||||
|
||||
if(WITH_ALEMBIC_HDF5)
|
||||
set(HDF5_ROOT_DIR ${LIBDIR}/hdf5)
|
||||
find_package_wrapper(HDF5)
|
||||
endif()
|
||||
|
||||
if(NOT ALEMBIC_FOUND OR (WITH_ALEMBIC_HDF5 AND NOT HDF5_FOUND))
|
||||
set(WITH_ALEMBIC OFF)
|
||||
set(WITH_ALEMBIC_HDF5 OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_BOOST)
|
||||
# uses in build instructions to override include and library variables
|
||||
if(NOT BOOST_CUSTOM)
|
||||
if(WITH_STATIC_LIBS)
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
endif()
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
set(__boost_packages filesystem regex thread date_time)
|
||||
if(WITH_CYCLES_OSL)
|
||||
if(NOT (${OSL_LIBRARY_VERSION_MAJOR} EQUAL "1" AND ${OSL_LIBRARY_VERSION_MINOR} LESS "6"))
|
||||
list(APPEND __boost_packages wave)
|
||||
else()
|
||||
endif()
|
||||
endif()
|
||||
if(WITH_INTERNATIONAL)
|
||||
list(APPEND __boost_packages locale)
|
||||
endif()
|
||||
if(WITH_CYCLES_NETWORK)
|
||||
list(APPEND __boost_packages serialization)
|
||||
endif()
|
||||
if(WITH_OPENVDB)
|
||||
list(APPEND __boost_packages iostreams)
|
||||
endif()
|
||||
list(APPEND __boost_packages system)
|
||||
find_package(Boost 1.48 COMPONENTS ${__boost_packages})
|
||||
if(NOT Boost_FOUND)
|
||||
# try to find non-multithreaded if -mt not found, this flag
|
||||
# doesn't matter for us, it has nothing to do with thread
|
||||
# safety, but keep it to not disturb build setups
|
||||
set(Boost_USE_MULTITHREADED OFF)
|
||||
find_package(Boost 1.48 COMPONENTS ${__boost_packages})
|
||||
endif()
|
||||
unset(__boost_packages)
|
||||
if(Boost_USE_STATIC_LIBS AND WITH_BOOST_ICU)
|
||||
find_package(IcuLinux)
|
||||
endif()
|
||||
mark_as_advanced(Boost_DIR) # why doesnt boost do this?
|
||||
endif()
|
||||
|
||||
set(BOOST_INCLUDE_DIR ${Boost_INCLUDE_DIRS})
|
||||
set(BOOST_LIBRARIES ${Boost_LIBRARIES})
|
||||
set(BOOST_LIBPATH ${Boost_LIBRARY_DIRS})
|
||||
set(BOOST_DEFINITIONS "-DBOOST_ALL_NO_LIB")
|
||||
endif()
|
||||
|
||||
if(WITH_OPENIMAGEIO)
|
||||
find_package_wrapper(OpenImageIO)
|
||||
if(NOT OPENIMAGEIO_PUGIXML_FOUND AND WITH_CYCLES_STANDALONE)
|
||||
find_package_wrapper(PugiXML)
|
||||
else()
|
||||
set(PUGIXML_INCLUDE_DIR "${OPENIMAGEIO_INCLUDE_DIR/OpenImageIO}")
|
||||
set(PUGIXML_LIBRARIES "")
|
||||
endif()
|
||||
|
||||
set(OPENIMAGEIO_LIBRARIES
|
||||
${OPENIMAGEIO_LIBRARIES}
|
||||
${PNG_LIBRARIES}
|
||||
${JPEG_LIBRARIES}
|
||||
${ZLIB_LIBRARIES}
|
||||
${BOOST_LIBRARIES}
|
||||
)
|
||||
set(OPENIMAGEIO_LIBPATH) # TODO, remove and reference the absolute path everywhere
|
||||
set(OPENIMAGEIO_DEFINITIONS "")
|
||||
|
||||
if(WITH_IMAGE_TIFF)
|
||||
list(APPEND OPENIMAGEIO_LIBRARIES "${TIFF_LIBRARY}")
|
||||
endif()
|
||||
if(WITH_IMAGE_OPENEXR)
|
||||
list(APPEND OPENIMAGEIO_LIBRARIES "${OPENEXR_LIBRARIES}")
|
||||
endif()
|
||||
|
||||
if(NOT OPENIMAGEIO_FOUND)
|
||||
set(WITH_OPENIMAGEIO OFF)
|
||||
message(STATUS "OpenImageIO not found, disabling WITH_CYCLES")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLORIO)
|
||||
find_package_wrapper(OpenColorIO)
|
||||
|
||||
set(OPENCOLORIO_LIBRARIES ${OPENCOLORIO_LIBRARIES})
|
||||
set(OPENCOLORIO_LIBPATH) # TODO, remove and reference the absolute path everywhere
|
||||
set(OPENCOLORIO_DEFINITIONS)
|
||||
|
||||
if(NOT OPENCOLORIO_FOUND)
|
||||
set(WITH_OPENCOLORIO OFF)
|
||||
message(STATUS "OpenColorIO not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_LLVM)
|
||||
find_package_wrapper(LLVM)
|
||||
|
||||
if(NOT LLVM_FOUND)
|
||||
set(WITH_LLVM OFF)
|
||||
message(STATUS "LLVM not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_LLVM OR WITH_SDL_DYNLOAD)
|
||||
# Fix for conflict with Mesa llvmpipe
|
||||
set(PLATFORM_LINKFLAGS
|
||||
"${PLATFORM_LINKFLAGS} -Wl,--version-script='${CMAKE_SOURCE_DIR}/source/creator/blender.map'"
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENSUBDIV OR WITH_CYCLES_OPENSUBDIV)
|
||||
find_package_wrapper(OpenSubdiv)
|
||||
|
||||
set(OPENSUBDIV_LIBRARIES ${OPENSUBDIV_LIBRARIES})
|
||||
set(OPENSUBDIV_LIBPATH) # TODO, remove and reference the absolute path everywhere
|
||||
|
||||
if(NOT OPENSUBDIV_FOUND)
|
||||
set(WITH_OPENSUBDIV OFF)
|
||||
set(WITH_CYCLES_OPENSUBDIV OFF)
|
||||
message(STATUS "OpenSubdiv not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# OpenSuse needs lutil, ArchLinux not, for now keep, can avoid by using --as-needed
|
||||
list(APPEND PLATFORM_LINKLIBS -lutil -lc -lm)
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
list(APPEND PLATFORM_LINKLIBS ${CMAKE_THREAD_LIBS_INIT})
|
||||
# used by other platforms
|
||||
set(PTHREADS_LIBRARIES ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
if(CMAKE_DL_LIBS)
|
||||
list(APPEND PLATFORM_LINKLIBS ${CMAKE_DL_LIBS})
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
if(NOT WITH_PYTHON_MODULE)
|
||||
# binreloc is linux only
|
||||
set(BINRELOC_INCLUDE_DIRS ${CMAKE_SOURCE_DIR}/extern/binreloc/include)
|
||||
set(WITH_BINRELOC ON)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# lfs on glibc, all compilers should use
|
||||
add_definitions(-D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE)
|
||||
|
||||
# GNU Compiler
|
||||
if(CMAKE_COMPILER_IS_GNUCC)
|
||||
set(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing")
|
||||
|
||||
if(WITH_LINKER_GOLD)
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_C_COMPILER} -fuse-ld=gold -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if("${LD_VERSION}" MATCHES "GNU gold")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fuse-ld=gold")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fuse-ld=gold")
|
||||
else()
|
||||
message(STATUS "GNU gold linker isn't available, using the default system linker.")
|
||||
endif()
|
||||
unset(LD_VERSION)
|
||||
endif()
|
||||
|
||||
# CLang is the same as GCC for now.
|
||||
elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
set(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing")
|
||||
# Solaris CC
|
||||
elseif(CMAKE_C_COMPILER_ID MATCHES "SunPro")
|
||||
set(PLATFORM_CFLAGS "-pipe -features=extensions -fPIC -D__FUNCTION__=__func__")
|
||||
|
||||
# Intel C++ Compiler
|
||||
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
|
||||
# think these next two are broken
|
||||
find_program(XIAR xiar)
|
||||
if(XIAR)
|
||||
set(CMAKE_AR "${XIAR}")
|
||||
endif()
|
||||
mark_as_advanced(XIAR)
|
||||
|
||||
find_program(XILD xild)
|
||||
if(XILD)
|
||||
set(CMAKE_LINKER "${XILD}")
|
||||
endif()
|
||||
mark_as_advanced(XILD)
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fp-model precise -prec_div -parallel")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fp-model precise -prec_div -parallel")
|
||||
|
||||
# set(PLATFORM_CFLAGS "${PLATFORM_CFLAGS} -diag-enable sc3")
|
||||
set(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -static-intel")
|
||||
endif()
|
@@ -1,87 +0,0 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# The Original Code is Copyright (C) 2016, Blender Foundation
|
||||
# All rights reserved.
|
||||
#
|
||||
# Contributor(s): Sergey Sharybin.
|
||||
#
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
# Libraries configuration for Windows.
|
||||
|
||||
add_definitions(-DWIN32)
|
||||
|
||||
if(MSVC)
|
||||
include(platform_win32_msvc)
|
||||
elseif(CMAKE_COMPILER_IS_GNUCC)
|
||||
include(platform_win32_mingw)
|
||||
endif()
|
||||
|
||||
# Things common to both mingw and MSVC should go here
|
||||
|
||||
set(WINTAB_INC ${LIBDIR}/wintab/include)
|
||||
|
||||
if(WITH_OPENAL)
|
||||
set(OPENAL ${LIBDIR}/openal)
|
||||
set(OPENALDIR ${LIBDIR}/openal)
|
||||
set(OPENAL_INCLUDE_DIR ${OPENAL}/include)
|
||||
if(MSVC)
|
||||
set(OPENAL_LIBRARY openal32)
|
||||
else()
|
||||
set(OPENAL_LIBRARY wrap_oal)
|
||||
endif()
|
||||
set(OPENAL_LIBPATH ${OPENAL}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_CODEC_SNDFILE)
|
||||
set(SNDFILE ${LIBDIR}/sndfile)
|
||||
set(SNDFILE_INCLUDE_DIRS ${SNDFILE}/include)
|
||||
set(SNDFILE_LIBRARIES libsndfile-1)
|
||||
set(SNDFILE_LIBPATH ${SNDFILE}/lib) # TODO, deprecate
|
||||
endif()
|
||||
|
||||
if(WITH_RAYOPTIMIZATION AND SUPPORT_SSE_BUILD)
|
||||
add_definitions(-D__SSE__ -D__MMX__)
|
||||
endif()
|
||||
|
||||
if(WITH_CYCLES_OSL)
|
||||
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
|
||||
|
||||
find_library(OSL_LIB_EXEC NAMES oslexec PATHS ${CYCLES_OSL}/lib)
|
||||
find_library(OSL_LIB_COMP NAMES oslcomp PATHS ${CYCLES_OSL}/lib)
|
||||
find_library(OSL_LIB_QUERY NAMES oslquery PATHS ${CYCLES_OSL}/lib)
|
||||
find_library(OSL_LIB_EXEC_DEBUG NAMES oslexec_d PATHS ${CYCLES_OSL}/lib)
|
||||
find_library(OSL_LIB_COMP_DEBUG NAMES oslcomp_d PATHS ${CYCLES_OSL}/lib)
|
||||
find_library(OSL_LIB_QUERY_DEBUG NAMES oslquery_d PATHS ${CYCLES_OSL}/lib)
|
||||
list(APPEND OSL_LIBRARIES
|
||||
optimized ${OSL_LIB_COMP}
|
||||
optimized ${OSL_LIB_EXEC}
|
||||
optimized ${OSL_LIB_QUERY}
|
||||
debug ${OSL_LIB_EXEC_DEBUG}
|
||||
debug ${OSL_LIB_COMP_DEBUG}
|
||||
debug ${OSL_LIB_QUERY_DEBUG}
|
||||
)
|
||||
find_path(OSL_INCLUDE_DIR OSL/oslclosure.h PATHS ${CYCLES_OSL}/include)
|
||||
find_program(OSL_COMPILER NAMES oslc PATHS ${CYCLES_OSL}/bin)
|
||||
|
||||
if(OSL_INCLUDE_DIR AND OSL_LIBRARIES AND OSL_COMPILER)
|
||||
set(OSL_FOUND TRUE)
|
||||
else()
|
||||
message(STATUS "OSL not found")
|
||||
set(WITH_CYCLES_OSL OFF)
|
||||
endif()
|
||||
endif()
|
@@ -1,302 +0,0 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# The Original Code is Copyright (C) 2016, Blender Foundation
|
||||
# All rights reserved.
|
||||
#
|
||||
# Contributor(s): Sergey Sharybin.
|
||||
#
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
# Libraries configuration for Windows when compiling with MinGW.
|
||||
|
||||
# keep GCC specific stuff here
|
||||
include(CheckCSourceCompiles)
|
||||
# Setup 64bit and 64bit windows systems
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
#ifndef __MINGW64__
|
||||
#error
|
||||
#endif
|
||||
int main(void) { return 0; }
|
||||
"
|
||||
WITH_MINGW64
|
||||
)
|
||||
|
||||
if(NOT DEFINED LIBDIR)
|
||||
if(WITH_MINGW64)
|
||||
message(STATUS "Compiling for 64 bit with MinGW-w64.")
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/mingw64)
|
||||
else()
|
||||
message(STATUS "Compiling for 32 bit with MinGW-w32.")
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/mingw32)
|
||||
|
||||
if(WITH_RAYOPTIMIZATION)
|
||||
message(WARNING "MinGW-w32 is known to be unstable with 'WITH_RAYOPTIMIZATION' option enabled.")
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
|
||||
endif()
|
||||
if(NOT EXISTS "${LIBDIR}/")
|
||||
message(FATAL_ERROR "Windows requires pre-compiled libs at: '${LIBDIR}'")
|
||||
endif()
|
||||
|
||||
list(APPEND PLATFORM_LINKLIBS
|
||||
-lshell32 -lshfolder -lgdi32 -lmsvcrt -lwinmm -lmingw32 -lm -lws2_32
|
||||
-lz -lstdc++ -lole32 -luuid -lwsock32 -lpsapi -ldbghelp
|
||||
)
|
||||
|
||||
if(WITH_INPUT_IME)
|
||||
list(APPEND PLATFORM_LINKLIBS -limm32)
|
||||
endif()
|
||||
|
||||
set(PLATFORM_CFLAGS "-pipe -funsigned-char -fno-strict-aliasing")
|
||||
|
||||
if(WITH_MINGW64)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fpermissive")
|
||||
list(APPEND PLATFORM_LINKLIBS -lpthread)
|
||||
|
||||
add_definitions(-DFREE_WINDOWS64 -DMS_WIN64)
|
||||
endif()
|
||||
|
||||
add_definitions(-D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE)
|
||||
|
||||
add_definitions(-DFREE_WINDOWS)
|
||||
|
||||
set(PNG "${LIBDIR}/png")
|
||||
set(PNG_INCLUDE_DIRS "${PNG}/include")
|
||||
set(PNG_LIBPATH ${PNG}/lib) # not cmake defined
|
||||
|
||||
if(WITH_MINGW64)
|
||||
set(JPEG_LIBRARIES jpeg)
|
||||
else()
|
||||
set(JPEG_LIBRARIES libjpeg)
|
||||
endif()
|
||||
set(PNG_LIBRARIES png)
|
||||
|
||||
set(ZLIB ${LIBDIR}/zlib)
|
||||
set(ZLIB_INCLUDE_DIRS ${ZLIB}/include)
|
||||
set(ZLIB_LIBPATH ${ZLIB}/lib)
|
||||
set(ZLIB_LIBRARIES z)
|
||||
|
||||
set(JPEG "${LIBDIR}/jpeg")
|
||||
set(JPEG_INCLUDE_DIR "${JPEG}/include")
|
||||
set(JPEG_LIBPATH ${JPEG}/lib) # not cmake defined
|
||||
|
||||
# comes with own pthread library
|
||||
if(NOT WITH_MINGW64)
|
||||
set(PTHREADS ${LIBDIR}/pthreads)
|
||||
#set(PTHREADS_INCLUDE_DIRS ${PTHREADS}/include)
|
||||
set(PTHREADS_LIBPATH ${PTHREADS}/lib)
|
||||
set(PTHREADS_LIBRARIES pthreadGC2)
|
||||
endif()
|
||||
|
||||
set(FREETYPE ${LIBDIR}/freetype)
|
||||
set(FREETYPE_INCLUDE_DIRS ${FREETYPE}/include ${FREETYPE}/include/freetype2)
|
||||
set(FREETYPE_LIBPATH ${FREETYPE}/lib)
|
||||
set(FREETYPE_LIBRARY freetype)
|
||||
|
||||
if(WITH_FFTW3)
|
||||
set(FFTW3 ${LIBDIR}/fftw3)
|
||||
set(FFTW3_LIBRARIES fftw3)
|
||||
set(FFTW3_INCLUDE_DIRS ${FFTW3}/include)
|
||||
set(FFTW3_LIBPATH ${FFTW3}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLLADA)
|
||||
set(OPENCOLLADA ${LIBDIR}/opencollada)
|
||||
set(OPENCOLLADA_INCLUDE_DIRS
|
||||
${OPENCOLLADA}/include/opencollada/COLLADAStreamWriter
|
||||
${OPENCOLLADA}/include/opencollada/COLLADABaseUtils
|
||||
${OPENCOLLADA}/include/opencollada/COLLADAFramework
|
||||
${OPENCOLLADA}/include/opencollada/COLLADASaxFrameworkLoader
|
||||
${OPENCOLLADA}/include/opencollada/GeneratedSaxParser
|
||||
)
|
||||
set(OPENCOLLADA_LIBPATH ${OPENCOLLADA}/lib/opencollada)
|
||||
set(OPENCOLLADA_LIBRARIES
|
||||
OpenCOLLADAStreamWriter
|
||||
OpenCOLLADASaxFrameworkLoader
|
||||
OpenCOLLADAFramework
|
||||
OpenCOLLADABaseUtils
|
||||
GeneratedSaxParser
|
||||
UTF MathMLSolver buffer ftoa xml
|
||||
)
|
||||
set(PCRE_LIBRARIES pcre)
|
||||
endif()
|
||||
|
||||
if(WITH_CODEC_FFMPEG)
|
||||
set(FFMPEG ${LIBDIR}/ffmpeg)
|
||||
set(FFMPEG_INCLUDE_DIRS ${FFMPEG}/include)
|
||||
if(WITH_MINGW64)
|
||||
set(FFMPEG_LIBRARIES avcodec.dll avformat.dll avdevice.dll avutil.dll swscale.dll swresample.dll)
|
||||
else()
|
||||
set(FFMPEG_LIBRARIES avcodec-55 avformat-55 avdevice-55 avutil-52 swscale-2)
|
||||
endif()
|
||||
set(FFMPEG_LIBPATH ${FFMPEG}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_IMAGE_OPENEXR)
|
||||
set(OPENEXR ${LIBDIR}/openexr)
|
||||
set(OPENEXR_INCLUDE_DIR ${OPENEXR}/include)
|
||||
set(OPENEXR_INCLUDE_DIRS ${OPENEXR}/include/OpenEXR)
|
||||
set(OPENEXR_LIBRARIES Half IlmImf Imath IlmThread Iex)
|
||||
set(OPENEXR_LIBPATH ${OPENEXR}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_IMAGE_TIFF)
|
||||
set(TIFF ${LIBDIR}/tiff)
|
||||
set(TIFF_LIBRARY tiff)
|
||||
set(TIFF_INCLUDE_DIR ${TIFF}/include)
|
||||
set(TIFF_LIBPATH ${TIFF}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_JACK)
|
||||
set(JACK ${LIBDIR}/jack)
|
||||
set(JACK_INCLUDE_DIRS ${JACK}/include/jack ${JACK}/include)
|
||||
set(JACK_LIBRARIES jack)
|
||||
set(JACK_LIBPATH ${JACK}/lib)
|
||||
|
||||
# TODO, gives linking errors, force off
|
||||
set(WITH_JACK OFF)
|
||||
endif()
|
||||
|
||||
if(WITH_PYTHON)
|
||||
# normally cached but not since we include them with blender
|
||||
set(PYTHON_VERSION 3.5) # CACHE STRING)
|
||||
string(REPLACE "." "" _PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION})
|
||||
set(PYTHON_INCLUDE_DIR "${LIBDIR}/python/include/python${PYTHON_VERSION}") # CACHE PATH)
|
||||
set(PYTHON_LIBRARY "${LIBDIR}/python/lib/python${_PYTHON_VERSION_NO_DOTS}mw.lib") # CACHE FILEPATH)
|
||||
unset(_PYTHON_VERSION_NO_DOTS)
|
||||
|
||||
# uncached vars
|
||||
set(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}")
|
||||
set(PYTHON_LIBRARIES "${PYTHON_LIBRARY}")
|
||||
endif()
|
||||
|
||||
if(WITH_BOOST)
|
||||
set(BOOST ${LIBDIR}/boost)
|
||||
set(BOOST_INCLUDE_DIR ${BOOST}/include)
|
||||
if(WITH_MINGW64)
|
||||
set(BOOST_POSTFIX "mgw47-mt-s-1_49")
|
||||
set(BOOST_DEBUG_POSTFIX "mgw47-mt-sd-1_49")
|
||||
else()
|
||||
set(BOOST_POSTFIX "mgw46-mt-s-1_49")
|
||||
set(BOOST_DEBUG_POSTFIX "mgw46-mt-sd-1_49")
|
||||
endif()
|
||||
set(BOOST_LIBRARIES
|
||||
optimized boost_date_time-${BOOST_POSTFIX} boost_filesystem-${BOOST_POSTFIX}
|
||||
boost_regex-${BOOST_POSTFIX}
|
||||
boost_system-${BOOST_POSTFIX} boost_thread-${BOOST_POSTFIX}
|
||||
debug boost_date_time-${BOOST_DEBUG_POSTFIX} boost_filesystem-${BOOST_DEBUG_POSTFIX}
|
||||
boost_regex-${BOOST_DEBUG_POSTFIX}
|
||||
boost_system-${BOOST_DEBUG_POSTFIX} boost_thread-${BOOST_DEBUG_POSTFIX})
|
||||
if(WITH_INTERNATIONAL)
|
||||
set(BOOST_LIBRARIES ${BOOST_LIBRARIES}
|
||||
optimized boost_locale-${BOOST_POSTFIX}
|
||||
debug boost_locale-${BOOST_DEBUG_POSTFIX}
|
||||
)
|
||||
endif()
|
||||
if(WITH_CYCLES_OSL)
|
||||
set(BOOST_LIBRARIES ${BOOST_LIBRARIES}
|
||||
optimized boost_wave-${BOOST_POSTFIX}
|
||||
debug boost_wave-${BOOST_DEBUG_POSTFIX}
|
||||
)
|
||||
endif()
|
||||
set(BOOST_LIBPATH ${BOOST}/lib)
|
||||
set(BOOST_DEFINITIONS "-DBOOST_ALL_NO_LIB -DBOOST_THREAD_USE_LIB ")
|
||||
endif()
|
||||
|
||||
if(WITH_OPENIMAGEIO)
|
||||
set(OPENIMAGEIO ${LIBDIR}/openimageio)
|
||||
set(OPENIMAGEIO_INCLUDE_DIRS ${OPENIMAGEIO}/include)
|
||||
set(OPENIMAGEIO_LIBRARIES OpenImageIO)
|
||||
set(OPENIMAGEIO_LIBPATH ${OPENIMAGEIO}/lib)
|
||||
set(OPENIMAGEIO_DEFINITIONS "")
|
||||
set(OPENIMAGEIO_IDIFF "${OPENIMAGEIO}/bin/idiff.exe")
|
||||
endif()
|
||||
|
||||
if(WITH_LLVM)
|
||||
set(LLVM_ROOT_DIR ${LIBDIR}/llvm CACHE PATH "Path to the LLVM installation")
|
||||
set(LLVM_LIBPATH ${LLVM_ROOT_DIR}/lib)
|
||||
# Explicitly set llvm lib order.
|
||||
#---- WARNING ON GCC ORDER OF LIBS IS IMPORTANT, DO NOT CHANGE! ---------
|
||||
set(LLVM_LIBRARY LLVMSelectionDAG LLVMCodeGen LLVMScalarOpts LLVMAnalysis LLVMArchive
|
||||
LLVMAsmParser LLVMAsmPrinter
|
||||
LLVMBitReader LLVMBitWriter
|
||||
LLVMDebugInfo LLVMExecutionEngine
|
||||
LLVMInstCombine LLVMInstrumentation
|
||||
LLVMInterpreter LLVMJIT
|
||||
LLVMLinker LLVMMC
|
||||
LLVMMCDisassembler LLVMMCJIT
|
||||
LLVMMCParser LLVMObject
|
||||
LLVMRuntimeDyld
|
||||
LLVMSupport
|
||||
LLVMTableGen LLVMTarget
|
||||
LLVMTransformUtils LLVMVectorize
|
||||
LLVMX86AsmParser LLVMX86AsmPrinter
|
||||
LLVMX86CodeGen LLVMX86Desc
|
||||
LLVMX86Disassembler LLVMX86Info
|
||||
LLVMX86Utils LLVMipa
|
||||
LLVMipo LLVMCore)
|
||||
# imagehelp is needed by LLVM 3.1 on MinGW, check lib\Support\Windows\Signals.inc
|
||||
list(APPEND PLATFORM_LINKLIBS -limagehlp)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLORIO)
|
||||
set(OPENCOLORIO ${LIBDIR}/opencolorio)
|
||||
set(OPENCOLORIO_INCLUDE_DIRS ${OPENCOLORIO}/include)
|
||||
set(OPENCOLORIO_LIBRARIES OpenColorIO)
|
||||
set(OPENCOLORIO_LIBPATH ${OPENCOLORIO}/lib)
|
||||
set(OPENCOLORIO_DEFINITIONS)
|
||||
endif()
|
||||
|
||||
if(WITH_SDL)
|
||||
set(SDL ${LIBDIR}/sdl)
|
||||
set(SDL_INCLUDE_DIR ${SDL}/include)
|
||||
set(SDL_LIBRARY SDL)
|
||||
set(SDL_LIBPATH ${SDL}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENVDB)
|
||||
set(OPENVDB ${LIBDIR}/openvdb)
|
||||
set(OPENVDB_INCLUDE_DIRS ${OPENVDB}/include)
|
||||
set(OPENVDB_LIBRARIES openvdb ${TBB_LIBRARIES})
|
||||
set(OPENVDB_LIBPATH ${LIBDIR}/openvdb/lib)
|
||||
set(OPENVDB_DEFINITIONS)
|
||||
endif()
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
# TODO(sergey): For until someone drops by and compiles libraries for
|
||||
# MinGW we allow users to compile their own Alembic library and use
|
||||
# that via find_package(),
|
||||
#
|
||||
# Once precompiled libraries are there we'll use hardcoded locations.
|
||||
find_package_wrapper(Alembic)
|
||||
if(WITH_ALEMBIC_HDF5)
|
||||
set(HDF5_ROOT_DIR ${LIBDIR}/hdf5)
|
||||
find_package_wrapper(HDF5)
|
||||
endif()
|
||||
if(NOT ALEMBIC_FOUND OR (WITH_ALEMBIC_HDF5 AND NOT HDF5_FOUND))
|
||||
set(WITH_ALEMBIC OFF)
|
||||
set(WITH_ALEMBIC_HDF5 OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(PLATFORM_LINKFLAGS "-Xlinker --stack=2097152")
|
||||
|
||||
## DISABLE - causes linking errors
|
||||
## for re-distribution, so users dont need mingw installed
|
||||
# set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -static-libgcc -static-libstdc++")
|
@@ -1,490 +0,0 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# The Original Code is Copyright (C) 2016, Blender Foundation
|
||||
# All rights reserved.
|
||||
#
|
||||
# Contributor(s): Sergey Sharybin.
|
||||
#
|
||||
# ***** END GPL LICENSE BLOCK *****
|
||||
|
||||
# Libraries configuration for Windows when compiling with MSVC.
|
||||
|
||||
macro(warn_hardcoded_paths package_name
|
||||
)
|
||||
if(WITH_WINDOWS_FIND_MODULES)
|
||||
message(WARNING "Using HARDCODED ${package_name} locations")
|
||||
endif(WITH_WINDOWS_FIND_MODULES)
|
||||
endmacro()
|
||||
|
||||
macro(windows_find_package package_name
|
||||
)
|
||||
if(WITH_WINDOWS_FIND_MODULES)
|
||||
find_package( ${package_name})
|
||||
endif(WITH_WINDOWS_FIND_MODULES)
|
||||
endmacro()
|
||||
|
||||
add_definitions(-DWIN32)
|
||||
# Minimum MSVC Version
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
|
||||
if(MSVC_VERSION EQUAL 1800)
|
||||
set(_min_ver "18.0.31101")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${_min_ver})
|
||||
message(FATAL_ERROR
|
||||
"Visual Studio 2013 (Update 4, ${_min_ver}) required, "
|
||||
"found (${CMAKE_CXX_COMPILER_VERSION})")
|
||||
endif()
|
||||
endif()
|
||||
if(MSVC_VERSION EQUAL 1900)
|
||||
set(_min_ver "19.0.24210")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${_min_ver})
|
||||
message(FATAL_ERROR
|
||||
"Visual Studio 2015 (Update 3, ${_min_ver}) required, "
|
||||
"found (${CMAKE_CXX_COMPILER_VERSION})")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
unset(_min_ver)
|
||||
|
||||
# needed for some MSVC installations
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /SAFESEH:NO")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SAFESEH:NO")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /SAFESEH:NO")
|
||||
|
||||
list(APPEND PLATFORM_LINKLIBS
|
||||
ws2_32 vfw32 winmm kernel32 user32 gdi32 comdlg32
|
||||
advapi32 shfolder shell32 ole32 oleaut32 uuid psapi Dbghelp
|
||||
)
|
||||
|
||||
if(WITH_INPUT_IME)
|
||||
list(APPEND PLATFORM_LINKLIBS imm32)
|
||||
endif()
|
||||
|
||||
add_definitions(
|
||||
-D_CRT_NONSTDC_NO_DEPRECATE
|
||||
-D_CRT_SECURE_NO_DEPRECATE
|
||||
-D_SCL_SECURE_NO_DEPRECATE
|
||||
-D_CONSOLE
|
||||
-D_LIB
|
||||
)
|
||||
|
||||
# MSVC11 needs _ALLOW_KEYWORD_MACROS to build
|
||||
add_definitions(-D_ALLOW_KEYWORD_MACROS)
|
||||
|
||||
# We want to support Vista level ABI
|
||||
add_definitions(-D_WIN32_WINNT=0x600)
|
||||
|
||||
# Make cmake find the msvc redistributables
|
||||
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
|
||||
include(InstallRequiredSystemLibraries)
|
||||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /nologo /J /Gd /MP /EHsc")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /nologo /J /Gd /MP")
|
||||
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /MTd")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /MT")
|
||||
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /MT")
|
||||
set(CMAKE_C_FLAGS_MINSIZEREL "${CMAKE_C_FLAGS_MINSIZEREL} /MT")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /MT")
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} /MT")
|
||||
|
||||
set(PLATFORM_LINKFLAGS "/SUBSYSTEM:CONSOLE /STACK:2097152 /INCREMENTAL:NO ")
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} /NODEFAULTLIB:msvcrt.lib /NODEFAULTLIB:msvcmrt.lib /NODEFAULTLIB:msvcurt.lib /NODEFAULTLIB:msvcrtd.lib ")
|
||||
|
||||
# Ignore meaningless for us linker warnings.
|
||||
set(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} /ignore:4049 /ignore:4217 /ignore:4221")
|
||||
set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /ignore:4221")
|
||||
|
||||
# MSVC only, Mingw doesnt need
|
||||
if(CMAKE_CL_64)
|
||||
set(PLATFORM_LINKFLAGS "/MACHINE:X64 ${PLATFORM_LINKFLAGS}")
|
||||
else()
|
||||
set(PLATFORM_LINKFLAGS "/MACHINE:IX86 /LARGEADDRESSAWARE ${PLATFORM_LINKFLAGS}")
|
||||
endif()
|
||||
|
||||
set(PLATFORM_LINKFLAGS_DEBUG "/IGNORE:4099 /NODEFAULTLIB:libcmt.lib /NODEFAULTLIB:libc.lib")
|
||||
|
||||
if(NOT DEFINED LIBDIR)
|
||||
|
||||
# Setup 64bit and 64bit windows systems
|
||||
if(CMAKE_CL_64)
|
||||
message(STATUS "64 bit compiler detected.")
|
||||
set(LIBDIR_BASE "win64")
|
||||
else()
|
||||
message(STATUS "32 bit compiler detected.")
|
||||
set(LIBDIR_BASE "windows")
|
||||
endif()
|
||||
if(MSVC_VERSION EQUAL 1910)
|
||||
message(STATUS "Visual Studio 2017 detected.")
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc14)
|
||||
elseif(MSVC_VERSION EQUAL 1900)
|
||||
message(STATUS "Visual Studio 2015 detected.")
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc14)
|
||||
else()
|
||||
message(STATUS "Visual Studio 2013 detected.")
|
||||
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc12)
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
|
||||
endif()
|
||||
if(NOT EXISTS "${LIBDIR}/")
|
||||
message(FATAL_ERROR "Windows requires pre-compiled libs at: '${LIBDIR}'")
|
||||
endif()
|
||||
|
||||
# Add each of our libraries to our cmake_prefix_path so find_package() could work
|
||||
file(GLOB children RELATIVE ${LIBDIR} ${LIBDIR}/*)
|
||||
foreach(child ${children})
|
||||
if(IS_DIRECTORY ${LIBDIR}/${child})
|
||||
list(APPEND CMAKE_PREFIX_PATH ${LIBDIR}/${child})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(ZLIB_INCLUDE_DIRS ${LIBDIR}/zlib/include)
|
||||
set(ZLIB_LIBRARIES ${LIBDIR}/zlib/lib/libz_st.lib)
|
||||
set(ZLIB_INCLUDE_DIR ${LIBDIR}/zlib/include)
|
||||
set(ZLIB_LIBRARY ${LIBDIR}/zlib/lib/libz_st.lib)
|
||||
set(ZLIB_DIR ${LIBDIR}/zlib)
|
||||
|
||||
windows_find_package(zlib) # we want to find before finding things that depend on it like png
|
||||
windows_find_package(png)
|
||||
|
||||
if(NOT PNG_FOUND)
|
||||
warn_hardcoded_paths(libpng)
|
||||
set(PNG_PNG_INCLUDE_DIR ${LIBDIR}/png/include)
|
||||
set(PNG_LIBRARIES libpng)
|
||||
set(PNG "${LIBDIR}/png")
|
||||
set(PNG_INCLUDE_DIRS "${PNG}/include")
|
||||
set(PNG_LIBPATH ${PNG}/lib) # not cmake defined
|
||||
endif()
|
||||
|
||||
set(JPEG_NAMES ${JPEG_NAMES} libjpeg)
|
||||
windows_find_package(jpeg REQUIRED)
|
||||
if(NOT JPEG_FOUND)
|
||||
warn_hardcoded_paths(jpeg)
|
||||
set(JPEG_INCLUDE_DIR ${LIBDIR}/jpeg/include)
|
||||
set(JPEG_LIBRARIES ${LIBDIR}/jpeg/lib/libjpeg.lib)
|
||||
endif()
|
||||
|
||||
set(PTHREADS_INCLUDE_DIRS ${LIBDIR}/pthreads/include)
|
||||
set(PTHREADS_LIBRARIES ${LIBDIR}/pthreads/lib/pthreadVC2.lib)
|
||||
|
||||
set(FREETYPE ${LIBDIR}/freetype)
|
||||
set(FREETYPE_INCLUDE_DIRS
|
||||
${LIBDIR}/freetype/include
|
||||
${LIBDIR}/freetype/include/freetype2
|
||||
)
|
||||
set(FREETYPE_LIBRARY ${LIBDIR}/freetype/lib/freetype2ST.lib)
|
||||
windows_find_package(freetype REQUIRED)
|
||||
|
||||
if(WITH_FFTW3)
|
||||
set(FFTW3 ${LIBDIR}/fftw3)
|
||||
set(FFTW3_LIBRARIES libfftw)
|
||||
set(FFTW3_INCLUDE_DIRS ${FFTW3}/include)
|
||||
set(FFTW3_LIBPATH ${FFTW3}/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLLADA)
|
||||
set(OPENCOLLADA ${LIBDIR}/opencollada)
|
||||
|
||||
set(OPENCOLLADA_INCLUDE_DIRS
|
||||
${OPENCOLLADA}/include/opencollada/COLLADAStreamWriter
|
||||
${OPENCOLLADA}/include/opencollada/COLLADABaseUtils
|
||||
${OPENCOLLADA}/include/opencollada/COLLADAFramework
|
||||
${OPENCOLLADA}/include/opencollada/COLLADASaxFrameworkLoader
|
||||
${OPENCOLLADA}/include/opencollada/GeneratedSaxParser
|
||||
)
|
||||
|
||||
set(OPENCOLLADA_LIBRARIES
|
||||
${OPENCOLLADA}/lib/opencollada/OpenCOLLADASaxFrameworkLoader.lib
|
||||
${OPENCOLLADA}/lib/opencollada/OpenCOLLADAFramework.lib
|
||||
${OPENCOLLADA}/lib/opencollada/OpenCOLLADABaseUtils.lib
|
||||
${OPENCOLLADA}/lib/opencollada/OpenCOLLADAStreamWriter.lib
|
||||
${OPENCOLLADA}/lib/opencollada/MathMLSolver.lib
|
||||
${OPENCOLLADA}/lib/opencollada/GeneratedSaxParser.lib
|
||||
${OPENCOLLADA}/lib/opencollada/xml.lib
|
||||
${OPENCOLLADA}/lib/opencollada/buffer.lib
|
||||
${OPENCOLLADA}/lib/opencollada/ftoa.lib
|
||||
)
|
||||
|
||||
if(NOT WITH_LLVM)
|
||||
list(APPEND OPENCOLLADA_LIBRARIES ${OPENCOLLADA}/lib/opencollada/UTF.lib)
|
||||
endif()
|
||||
|
||||
set(PCRE_LIBRARIES
|
||||
${OPENCOLLADA}/lib/opencollada/pcre.lib
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_CODEC_FFMPEG)
|
||||
set(FFMPEG_INCLUDE_DIRS
|
||||
${LIBDIR}/ffmpeg/include
|
||||
${LIBDIR}/ffmpeg/include/msvc
|
||||
)
|
||||
windows_find_package(FFMPEG)
|
||||
if(NOT FFMPEG_FOUND)
|
||||
warn_hardcoded_paths(ffmpeg)
|
||||
set(FFMPEG_LIBRARY_VERSION 57)
|
||||
set(FFMPEG_LIBRARY_VERSION_AVU 55)
|
||||
set(FFMPEG_LIBRARIES
|
||||
${LIBDIR}/ffmpeg/lib/avcodec.lib
|
||||
${LIBDIR}/ffmpeg/lib/avformat.lib
|
||||
${LIBDIR}/ffmpeg/lib/avdevice.lib
|
||||
${LIBDIR}/ffmpeg/lib/avutil.lib
|
||||
${LIBDIR}/ffmpeg/lib/swscale.lib
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_IMAGE_OPENEXR)
|
||||
set(OPENEXR_ROOT_DIR ${LIBDIR}/openexr)
|
||||
set(OPENEXR_VERSION "2.1")
|
||||
windows_find_package(OPENEXR REQUIRED)
|
||||
if(NOT OPENEXR_FOUND)
|
||||
warn_hardcoded_paths(OpenEXR)
|
||||
set(OPENEXR ${LIBDIR}/openexr)
|
||||
set(OPENEXR_INCLUDE_DIR ${OPENEXR}/include)
|
||||
set(OPENEXR_INCLUDE_DIRS ${OPENEXR_INCLUDE_DIR} ${OPENEXR}/include/OpenEXR)
|
||||
set(OPENEXR_LIBPATH ${OPENEXR}/lib)
|
||||
set(OPENEXR_LIBRARIES
|
||||
optimized ${OPENEXR_LIBPATH}/Iex-2_2.lib
|
||||
optimized ${OPENEXR_LIBPATH}/Half.lib
|
||||
optimized ${OPENEXR_LIBPATH}/IlmImf-2_2.lib
|
||||
optimized ${OPENEXR_LIBPATH}/Imath-2_2.lib
|
||||
optimized ${OPENEXR_LIBPATH}/IlmThread-2_2.lib
|
||||
debug ${OPENEXR_LIBPATH}/Iex-2_2_d.lib
|
||||
debug ${OPENEXR_LIBPATH}/Half_d.lib
|
||||
debug ${OPENEXR_LIBPATH}/IlmImf-2_2_d.lib
|
||||
debug ${OPENEXR_LIBPATH}/Imath-2_2_d.lib
|
||||
debug ${OPENEXR_LIBPATH}/IlmThread-2_2_d.lib
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_IMAGE_TIFF)
|
||||
# Try to find tiff first then complain and set static and maybe wrong paths
|
||||
windows_find_package(TIFF)
|
||||
if(NOT TIFF_FOUND)
|
||||
warn_hardcoded_paths(libtiff)
|
||||
set(TIFF_LIBRARY ${LIBDIR}/tiff/lib/libtiff.lib)
|
||||
set(TIFF_INCLUDE_DIR ${LIBDIR}/tiff/include)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_JACK)
|
||||
set(JACK_INCLUDE_DIRS
|
||||
${LIBDIR}/jack/include/jack
|
||||
${LIBDIR}/jack/include
|
||||
)
|
||||
set(JACK_LIBRARIES optimized ${LIBDIR}/jack/lib/libjack.lib debug ${LIBDIR}/jack/lib/libjack_d.lib)
|
||||
endif()
|
||||
|
||||
if(WITH_PYTHON)
|
||||
set(PYTHON_VERSION 3.5) # CACHE STRING)
|
||||
|
||||
string(REPLACE "." "" _PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION})
|
||||
# Use shared libs for vc2008 and vc2010 until we actually have vc2010 libs
|
||||
set(PYTHON_LIBRARY ${LIBDIR}/python/lib/python${_PYTHON_VERSION_NO_DOTS}.lib)
|
||||
unset(_PYTHON_VERSION_NO_DOTS)
|
||||
|
||||
# Shared includes for both vc2008 and vc2010
|
||||
set(PYTHON_INCLUDE_DIR ${LIBDIR}/python/include/python${PYTHON_VERSION})
|
||||
|
||||
# uncached vars
|
||||
set(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}")
|
||||
set(PYTHON_LIBRARIES "${PYTHON_LIBRARY}")
|
||||
endif()
|
||||
|
||||
if(WITH_BOOST)
|
||||
if(WITH_CYCLES_OSL)
|
||||
set(boost_extra_libs wave)
|
||||
endif()
|
||||
if(WITH_INTERNATIONAL)
|
||||
list(APPEND boost_extra_libs locale)
|
||||
endif()
|
||||
if(WITH_OPENVDB)
|
||||
list(APPEND boost_extra_libs iostreams)
|
||||
endif()
|
||||
set(Boost_USE_STATIC_RUNTIME ON) # prefix lib
|
||||
set(Boost_USE_MULTITHREADED ON) # suffix -mt
|
||||
set(Boost_USE_STATIC_LIBS ON) # suffix -s
|
||||
if (WITH_WINDOWS_FIND_MODULES)
|
||||
find_package(Boost COMPONENTS date_time filesystem thread regex system ${boost_extra_libs})
|
||||
endif (WITH_WINDOWS_FIND_MODULES)
|
||||
if(NOT Boost_FOUND)
|
||||
warn_hardcoded_paths(BOOST)
|
||||
set(BOOST ${LIBDIR}/boost)
|
||||
set(BOOST_INCLUDE_DIR ${BOOST}/include)
|
||||
if(MSVC12)
|
||||
set(BOOST_LIBPATH ${BOOST}/lib)
|
||||
set(BOOST_POSTFIX "vc120-mt-s-1_60.lib")
|
||||
set(BOOST_DEBUG_POSTFIX "vc120-mt-sgd-1_60.lib")
|
||||
else()
|
||||
set(BOOST_LIBPATH ${BOOST}/lib)
|
||||
set(BOOST_POSTFIX "vc140-mt-s-1_60.lib")
|
||||
set(BOOST_DEBUG_POSTFIX "vc140-mt-sgd-1_60.lib")
|
||||
endif()
|
||||
set(BOOST_LIBRARIES
|
||||
optimized libboost_date_time-${BOOST_POSTFIX}
|
||||
optimized libboost_filesystem-${BOOST_POSTFIX}
|
||||
optimized libboost_regex-${BOOST_POSTFIX}
|
||||
optimized libboost_system-${BOOST_POSTFIX}
|
||||
optimized libboost_thread-${BOOST_POSTFIX}
|
||||
debug libboost_date_time-${BOOST_DEBUG_POSTFIX}
|
||||
debug libboost_filesystem-${BOOST_DEBUG_POSTFIX}
|
||||
debug libboost_regex-${BOOST_DEBUG_POSTFIX}
|
||||
debug libboost_system-${BOOST_DEBUG_POSTFIX}
|
||||
debug libboost_thread-${BOOST_DEBUG_POSTFIX}
|
||||
)
|
||||
if(WITH_CYCLES_OSL)
|
||||
set(BOOST_LIBRARIES ${BOOST_LIBRARIES}
|
||||
optimized libboost_wave-${BOOST_POSTFIX}
|
||||
debug libboost_wave-${BOOST_DEBUG_POSTFIX})
|
||||
endif()
|
||||
if(WITH_INTERNATIONAL)
|
||||
set(BOOST_LIBRARIES ${BOOST_LIBRARIES}
|
||||
optimized libboost_locale-${BOOST_POSTFIX}
|
||||
debug libboost_locale-${BOOST_DEBUG_POSTFIX})
|
||||
endif()
|
||||
else() # we found boost using find_package
|
||||
set(BOOST_INCLUDE_DIR ${Boost_INCLUDE_DIRS})
|
||||
set(BOOST_LIBRARIES ${Boost_LIBRARIES})
|
||||
set(BOOST_LIBPATH ${Boost_LIBRARY_DIRS})
|
||||
endif()
|
||||
set(BOOST_DEFINITIONS "-DBOOST_ALL_NO_LIB")
|
||||
endif()
|
||||
|
||||
if(WITH_OPENIMAGEIO)
|
||||
windows_find_package(OpenImageIO)
|
||||
set(OPENIMAGEIO ${LIBDIR}/openimageio)
|
||||
set(OPENIMAGEIO_INCLUDE_DIRS ${OPENIMAGEIO}/include)
|
||||
set(OIIO_OPTIMIZED optimized OpenImageIO optimized OpenImageIO_Util)
|
||||
set(OIIO_DEBUG debug OpenImageIO_d debug OpenImageIO_Util_d)
|
||||
set(OPENIMAGEIO_LIBRARIES ${OIIO_OPTIMIZED} ${OIIO_DEBUG})
|
||||
set(OPENIMAGEIO_LIBPATH ${OPENIMAGEIO}/lib)
|
||||
set(OPENIMAGEIO_DEFINITIONS "-DUSE_TBB=0")
|
||||
set(OPENCOLORIO_DEFINITIONS "-DOCIO_STATIC_BUILD")
|
||||
set(OPENIMAGEIO_IDIFF "${OPENIMAGEIO}/bin/idiff.exe")
|
||||
add_definitions(-DOIIO_STATIC_BUILD)
|
||||
add_definitions(-DOIIO_NO_SSE=1)
|
||||
endif()
|
||||
|
||||
if(WITH_LLVM)
|
||||
set(LLVM_ROOT_DIR ${LIBDIR}/llvm CACHE PATH "Path to the LLVM installation")
|
||||
file(GLOB LLVM_LIBRARY_OPTIMIZED ${LLVM_ROOT_DIR}/lib/*.lib)
|
||||
|
||||
if(EXISTS ${LLVM_ROOT_DIR}/debug/lib)
|
||||
foreach(LLVM_OPTIMIZED_LIB ${LLVM_LIBRARY_OPTIMIZED})
|
||||
get_filename_component(LIBNAME ${LLVM_OPTIMIZED_LIB} ABSOLUTE)
|
||||
list(APPEND LLVM_LIBS optimized ${LIBNAME})
|
||||
endforeach(LLVM_OPTIMIZED_LIB)
|
||||
|
||||
file(GLOB LLVM_LIBRARY_DEBUG ${LLVM_ROOT_DIR}/debug/lib/*.lib)
|
||||
|
||||
foreach(LLVM_DEBUG_LIB ${LLVM_LIBRARY_DEBUG})
|
||||
get_filename_component(LIBNAME ${LLVM_DEBUG_LIB} ABSOLUTE)
|
||||
list(APPEND LLVM_LIBS debug ${LIBNAME})
|
||||
endforeach(LLVM_DEBUG_LIB)
|
||||
|
||||
set(LLVM_LIBRARY ${LLVM_LIBS})
|
||||
else()
|
||||
message(WARNING "LLVM debug libs not present on this system. Using release libs for debug builds.")
|
||||
set(LLVM_LIBRARY ${LLVM_LIBRARY_OPTIMIZED})
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
if(WITH_OPENCOLORIO)
|
||||
set(OPENCOLORIO ${LIBDIR}/opencolorio)
|
||||
set(OPENCOLORIO_INCLUDE_DIRS ${OPENCOLORIO}/include)
|
||||
set(OPENCOLORIO_LIBRARIES OpenColorIO)
|
||||
set(OPENCOLORIO_LIBPATH ${LIBDIR}/opencolorio/lib)
|
||||
set(OPENCOLORIO_DEFINITIONS)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENVDB)
|
||||
set(BLOSC_LIBRARIES optimized ${LIBDIR}/blosc/lib/libblosc.lib debug ${LIBDIR}/blosc/lib/libblosc_d.lib)
|
||||
set(TBB_LIBRARIES optimized ${LIBDIR}/tbb/lib/tbb.lib debug ${LIBDIR}/tbb/lib/tbb_debug.lib)
|
||||
set(TBB_INCLUDE_DIR ${LIBDIR}/tbb/include)
|
||||
set(OPENVDB ${LIBDIR}/openvdb)
|
||||
set(OPENVDB_INCLUDE_DIRS ${OPENVDB}/include ${TBB_INCLUDE_DIR})
|
||||
set(OPENVDB_LIBRARIES optimized openvdb debug openvdb_d ${TBB_LIBRARIES} ${BLOSC_LIBRARIES})
|
||||
set(OPENVDB_LIBPATH ${LIBDIR}/openvdb/lib)
|
||||
endif()
|
||||
|
||||
if(WITH_ALEMBIC)
|
||||
set(ALEMBIC ${LIBDIR}/alembic)
|
||||
set(ALEMBIC_INCLUDE_DIR ${ALEMBIC}/include)
|
||||
set(ALEMBIC_INCLUDE_DIRS ${ALEMBIC_INCLUDE_DIR})
|
||||
set(ALEMBIC_LIBPATH ${ALEMBIC}/lib)
|
||||
set(ALEMBIC_LIBRARIES optimized alembic debug alembic_d)
|
||||
endif()
|
||||
|
||||
if(WITH_MOD_CLOTH_ELTOPO)
|
||||
set(LAPACK ${LIBDIR}/lapack)
|
||||
# set(LAPACK_INCLUDE_DIR ${LAPACK}/include)
|
||||
set(LAPACK_LIBPATH ${LAPACK}/lib)
|
||||
set(LAPACK_LIBRARIES
|
||||
${LIBDIR}/lapack/lib/libf2c.lib
|
||||
${LIBDIR}/lapack/lib/clapack_nowrap.lib
|
||||
${LIBDIR}/lapack/lib/BLAS_nowrap.lib
|
||||
)
|
||||
endif()
|
||||
|
||||
if(WITH_OPENSUBDIV OR WITH_CYCLES_OPENSUBDIV)
|
||||
set(OPENSUBDIV_INCLUDE_DIR ${LIBDIR}/opensubdiv/include)
|
||||
set(OPENSUBDIV_LIBPATH ${LIBDIR}/opensubdiv/lib)
|
||||
set(OPENSUBDIV_LIBRARIES ${OPENSUBDIV_LIBPATH}/osdCPU.lib ${OPENSUBDIV_LIBPATH}/osdGPU.lib)
|
||||
find_package(OpenSubdiv)
|
||||
endif()
|
||||
|
||||
if(WITH_SDL)
|
||||
set(SDL ${LIBDIR}/sdl)
|
||||
set(SDL_INCLUDE_DIR ${SDL}/include)
|
||||
set(SDL_LIBPATH ${SDL}/lib)
|
||||
# MinGW TODO: Update MinGW to SDL2
|
||||
if(NOT CMAKE_COMPILER_IS_GNUCC)
|
||||
set(SDL_LIBRARY SDL2)
|
||||
else()
|
||||
set(SDL_LIBRARY SDL)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Audio IO
|
||||
if(WITH_SYSTEM_AUDASPACE)
|
||||
set(AUDASPACE_INCLUDE_DIRS ${LIBDIR}/audaspace/include/audaspace)
|
||||
set(AUDASPACE_LIBRARIES ${LIBDIR}/audaspace/lib/audaspace.lib)
|
||||
set(AUDASPACE_C_INCLUDE_DIRS ${LIBDIR}/audaspace/include/audaspace)
|
||||
set(AUDASPACE_C_LIBRARIES ${LIBDIR}/audaspace/lib/audaspace-c.lib)
|
||||
set(AUDASPACE_PY_INCLUDE_DIRS ${LIBDIR}/audaspace/include/audaspace)
|
||||
set(AUDASPACE_PY_LIBRARIES ${LIBDIR}/audaspace/lib/audaspace-py.lib)
|
||||
endif()
|
||||
|
||||
# used in many places so include globally, like OpenGL
|
||||
blender_include_dirs_sys("${PTHREADS_INCLUDE_DIRS}")
|
||||
|
||||
#find signtool
|
||||
SET(ProgramFilesX86_NAME "ProgramFiles(x86)") #env dislikes the ( )
|
||||
find_program(SIGNTOOL_EXE signtool
|
||||
HINTS
|
||||
"$ENV{${ProgramFilesX86_NAME}}/Windows Kits/10/bin/x86/"
|
||||
"$ENV{ProgramFiles}/Windows Kits/10/bin/x86/"
|
||||
"$ENV{${ProgramFilesX86_NAME}}/Windows Kits/8.1/bin/x86/"
|
||||
"$ENV{ProgramFiles}/Windows Kits/8.1/bin/x86/"
|
||||
"$ENV{${ProgramFilesX86_NAME}}/Windows Kits/8.0/bin/x86/"
|
||||
"$ENV{ProgramFiles}/Windows Kits/8.0/bin/x86/"
|
||||
)
|
@@ -39,7 +39,7 @@ __all__ = (
|
||||
"is_py",
|
||||
"cmake_advanced_info",
|
||||
"cmake_compiler_defines",
|
||||
"project_name_get",
|
||||
"project_name_get"
|
||||
"init",
|
||||
)
|
||||
|
||||
@@ -214,12 +214,7 @@ def cmake_advanced_info():
|
||||
|
||||
def cmake_cache_var(var):
|
||||
cache_file = open(join(CMAKE_DIR, "CMakeCache.txt"), encoding='utf-8')
|
||||
lines = [
|
||||
l_strip for l in cache_file
|
||||
for l_strip in (l.strip(),)
|
||||
if l_strip
|
||||
if not l_strip.startswith(("//", "#"))
|
||||
]
|
||||
lines = [l_strip for l in cache_file for l_strip in (l.strip(),) if l_strip if not l_strip.startswith("//") if not l_strip.startswith("#")]
|
||||
cache_file.close()
|
||||
|
||||
for l in lines:
|
||||
|
@@ -23,7 +23,7 @@
|
||||
__all__ = (
|
||||
"build_info",
|
||||
"SOURCE_DIR",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
import sys
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
|
@@ -2,9 +2,11 @@
|
||||
|
||||
# custom blender vars
|
||||
blender_srcdir=$(dirname $startdir)"/../.."
|
||||
blender_version=$(grep "BLENDER_VERSION\s" $blender_srcdir/source/blender/blenkernel/BKE_blender_version.h | awk '{print $3}')
|
||||
# value may be formatted: 35042:35051M
|
||||
blender_revision=$(svnversion $blender_srcdir | cut -d: -f2 | awk '{print $3}')
|
||||
blender_version=$(grep "BLENDER_VERSION\s" $blender_srcdir/source/blender/blenkernel/BKE_blender.h | awk '{print $3}')
|
||||
blender_version=$(expr $blender_version / 100).$(expr $blender_version % 100) # 256 -> 2.56
|
||||
blender_version_char=$(sed -ne 's/.*BLENDER_VERSION_CHAR.*\([a-z]\)$/\1/p' $blender_srcdir/source/blender/blenkernel/BKE_blender_version.h)
|
||||
blender_version_char=$(sed -ne 's/.*BLENDER_VERSION_CHAR.*\([a-z]\)$/\1/p' $blender_srcdir/source/blender/blenkernel/BKE_blender.h)
|
||||
# blender_subversion=$(grep BLENDER_SUBVERSION $blender_srcdir/source/blender/blenkernel/BKE_blender.h | awk '{print $3}')
|
||||
|
||||
# map the version a -> 1
|
||||
@@ -25,9 +27,7 @@ arch=('i686' 'x86_64')
|
||||
url="www.blender.org"
|
||||
license=('GPL')
|
||||
groups=()
|
||||
depends=('libjpeg' 'libpng' 'openjpeg' 'libtiff' 'openexr' 'python>=3.5'
|
||||
'gettext' 'libxi' 'libxmu' 'mesa' 'freetype2' 'openal' 'sdl'
|
||||
'libsndfile' 'ffmpeg')
|
||||
depends=('libjpeg' 'libpng' 'openjpeg' 'libtiff' 'openexr' 'python>=3.4' 'gettext' 'libxi' 'libxmu' 'mesa' 'freetype2' 'openal' 'sdl' 'libsndfile' 'ffmpeg')
|
||||
makedepends=('cmake' 'git')
|
||||
optdepends=()
|
||||
provides=()
|
||||
|
@@ -6,10 +6,10 @@
|
||||
BASE_DIR="$PWD"
|
||||
|
||||
blender_srcdir=$(dirname -- $0)/../..
|
||||
blender_version=$(grep "BLENDER_VERSION\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
|
||||
blender_version_char=$(grep "BLENDER_VERSION_CHAR\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
|
||||
blender_version_cycle=$(grep "BLENDER_VERSION_CYCLE\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
|
||||
blender_subversion=$(grep "BLENDER_SUBVERSION\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
|
||||
blender_version=$(grep "BLENDER_VERSION\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender.h" | awk '{print $3}')
|
||||
blender_version_char=$(grep "BLENDER_VERSION_CHAR\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender.h" | awk '{print $3}')
|
||||
blender_version_cycle=$(grep "BLENDER_VERSION_CYCLE\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender.h" | awk '{print $3}')
|
||||
blender_subversion=$(grep "BLENDER_SUBVERSION\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender.h" | awk '{print $3}')
|
||||
|
||||
if [ "$blender_version_cycle" = "release" ] ; then
|
||||
VERSION=$(expr $blender_version / 100).$(expr $blender_version % 100)$blender_version_char
|
||||
|
@@ -187,7 +187,7 @@ The next table describes the information in the file-header.
|
||||
</table>
|
||||
|
||||
<p>
|
||||
<a href="https://en.wikipedia.org/wiki/Endianness">Endianness</a> addresses the way values are ordered in a sequence of bytes(see the <a href="#example-endianess">example</a> below):
|
||||
<a href="http://en.wikipedia.org/wiki/Endianness">Endianness</a> addresses the way values are ordered in a sequence of bytes(see the <a href="#example-endianess">example</a> below):
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
|
@@ -38,7 +38,7 @@ PROJECT_NAME = Blender
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = "V2.8x"
|
||||
PROJECT_NUMBER = "V2.7x"
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
@@ -699,7 +699,7 @@ LAYOUT_FILE =
|
||||
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
|
||||
# the reference definitions. This must be a list of .bib files. The .bib
|
||||
# extension is automatically appended if omitted. This requires the bibtex tool
|
||||
# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
|
||||
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
|
||||
# For LaTeX the style of the bibliography can be controlled using
|
||||
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
|
||||
# search path. See also \cite for info how to create references.
|
||||
@@ -1145,7 +1145,7 @@ HTML_EXTRA_FILES =
|
||||
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
|
||||
# will adjust the colors in the style sheet and background images according to
|
||||
# this color. Hue is specified as an angle on a colorwheel, see
|
||||
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
|
||||
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
|
||||
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
|
||||
# purple, and 360 is red again.
|
||||
# Minimum value: 0, maximum value: 359, default value: 220.
|
||||
@@ -1752,7 +1752,7 @@ LATEX_SOURCE_CODE = NO
|
||||
|
||||
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
|
||||
# bibliography, e.g. plainnat, or ieeetr. See
|
||||
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
|
||||
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
|
||||
# The default value is: plain.
|
||||
# This tag requires that the tag GENERATE_LATEX is set to YES.
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
|
132
doc/python_api/epy/IDProp.py
Normal file
132
doc/python_api/epy/IDProp.py
Normal file
@@ -0,0 +1,132 @@
|
||||
class IDGroup:
|
||||
"""
|
||||
The IDGroup Type
|
||||
================
|
||||
This type supports both iteration and the []
|
||||
operator to get child ID properties.
|
||||
|
||||
You can also add new properties using the [] operator.
|
||||
For example::
|
||||
|
||||
group['a float!'] = 0.0
|
||||
group['an int!'] = 0
|
||||
group['a string!'] = "hi!"
|
||||
group['an array!'] = [0, 0, 1.0, 0]
|
||||
|
||||
group['a subgroup!] = {"float": 0.0, "an int": 1.0, "an array": [1, 2],
|
||||
"another subgroup": {"a": 0.0, "str": "bleh"}}
|
||||
|
||||
Note that for arrays, the array type defaults to int unless a float is found
|
||||
while scanning the template list; if any floats are found, then the whole
|
||||
array is float. Note that double-precision floating point numbers are used for
|
||||
python-created float ID properties and arrays (though the internal C api does
|
||||
support single-precision floats, and the python code will read them).
|
||||
|
||||
You can also delete properties with the del operator. For example:
|
||||
|
||||
del group['property']
|
||||
|
||||
To get the type of a property, use the type() operator, for example::
|
||||
|
||||
if type(group['bleh']) == str: pass
|
||||
|
||||
To tell if the property is a group or array type, import the Blender.Types module and test
|
||||
against IDGroupType and IDArrayType, like so::
|
||||
|
||||
from Blender.Types import IDGroupType, IDArrayType.
|
||||
|
||||
if type(group['bleghr']) == IDGroupType:
|
||||
(do something)
|
||||
|
||||
@ivar name: The name of the property
|
||||
@type name: string
|
||||
"""
|
||||
|
||||
def pop(item):
|
||||
"""
|
||||
Pop an item from the group property.
|
||||
@type item: string
|
||||
@param item: The item name.
|
||||
@rtype: can be dict, list, int, float or string.
|
||||
@return: The removed property.
|
||||
"""
|
||||
|
||||
def update(updatedict):
|
||||
"""
|
||||
Updates items in the dict, similar to normal python
|
||||
dictionary method .update().
|
||||
@type updatedict: dict
|
||||
@param updatedict: A dict of simple types to derive updated/new IDProperties from.
|
||||
@rtype: None
|
||||
@return: None
|
||||
"""
|
||||
|
||||
def keys():
|
||||
"""
|
||||
Returns a list of the keys in this property group.
|
||||
@rtype: list of strings.
|
||||
@return: a list of the keys in this property group.
|
||||
"""
|
||||
|
||||
def values():
|
||||
"""
|
||||
Returns a list of the values in this property group.
|
||||
|
||||
Note that unless a value is itself a property group or an array, you
|
||||
cannot change it by changing the values in this list, you must change them
|
||||
in the parent property group.
|
||||
|
||||
For example,
|
||||
|
||||
group['some_property'] = new_value
|
||||
|
||||
. . .is correct, while,
|
||||
|
||||
values = group.values()
|
||||
values[0] = new_value
|
||||
|
||||
. . .is wrong.
|
||||
|
||||
@rtype: list of strings.
|
||||
@return: a list of the values in this property group.
|
||||
"""
|
||||
|
||||
def iteritems():
|
||||
"""
|
||||
Implements the python dictionary iteritmes method.
|
||||
|
||||
For example::
|
||||
|
||||
for k, v in group.iteritems():
|
||||
print "Property name: " + k
|
||||
print "Property value: " + str(v)
|
||||
|
||||
@rtype: an iterator that spits out items of the form [key, value]
|
||||
@return: an iterator.
|
||||
"""
|
||||
|
||||
def convert_to_pyobject():
|
||||
"""
|
||||
Converts the entire property group to a purely python form.
|
||||
|
||||
@rtype: dict
|
||||
@return: A python dictionary representing the property group
|
||||
"""
|
||||
|
||||
class IDArray:
|
||||
"""
|
||||
The IDArray Type
|
||||
================
|
||||
|
||||
@ivar type: returns the type of the array, can be either IDP_Int or IDP_Float
|
||||
"""
|
||||
|
||||
def __getitem__(index):
|
||||
pass
|
||||
|
||||
def __setitem__(index, value):
|
||||
pass
|
||||
|
||||
def __len__():
|
||||
pass
|
||||
|
@@ -197,15 +197,17 @@ fg_shaders = {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Helper function to attach a pixel shader to the material that receives the video frame.
|
||||
#
|
||||
|
||||
def config_video(obj, format, pixel, is3D=False, mat=0, card=0):
|
||||
if pixel not in fg_shaders:
|
||||
if not pixel in fg_shaders:
|
||||
raise('Unsuported shader')
|
||||
shader = obj.meshes[0].materials[mat].getShader()
|
||||
if shader is not None and not shader.isValid():
|
||||
if shader != None and not shader.isValid():
|
||||
shader.setSource(VertexShader, fg_shaders[pixel], True)
|
||||
shader.setSampler('tex', 0)
|
||||
shader.setUniformEyef("eye")
|
||||
@@ -221,9 +223,9 @@ def config_video(obj, format, pixel, is3D=False, mat=0, card=0):
|
||||
# and call it once to initialize the object
|
||||
#
|
||||
def init(cont):
|
||||
# config_video(cont.owner, 'HD720p5994', '8BitBGRA')
|
||||
# config_video(cont.owner, 'HD720p5994', '8BitYUV')
|
||||
# config_video(cont.owner, 'pal ', '10BitYUV')
|
||||
#config_video(cont.owner, 'HD720p5994', '8BitBGRA')
|
||||
#config_video(cont.owner, 'HD720p5994', '8BitYUV')
|
||||
#config_video(cont.owner, 'pal ', '10BitYUV')
|
||||
config_video(cont.owner, 'pal ', '8BitYUV')
|
||||
|
||||
|
||||
@@ -232,6 +234,6 @@ def init(cont):
|
||||
#
|
||||
def play(cont):
|
||||
obj = cont.owner
|
||||
video = obj.get("video")
|
||||
if video is not None:
|
||||
video.refresh(True)
|
||||
if hasattr(obj, "video"):
|
||||
obj["video"].refresh(True)
|
||||
|
||||
|
@@ -4,7 +4,7 @@ Persistent Handler Example
|
||||
|
||||
By default handlers are freed when loading new files, in some cases you may
|
||||
wan't the handler stay running across multiple files (when the handler is
|
||||
part of an add-on for example).
|
||||
part of an addon for example).
|
||||
|
||||
For this the :data:`bpy.app.handlers.persistent` decorator needs to be used.
|
||||
"""
|
||||
|
@@ -5,7 +5,7 @@ Intro
|
||||
.. warning::
|
||||
|
||||
Most of this object should only be useful if you actually manipulate i18n stuff from Python.
|
||||
If you are a regular add-on, you should only bother about :const:`contexts` member,
|
||||
If you are a regular addon, you should only bother about :const:`contexts` member,
|
||||
and the :func:`register`/:func:`unregister` functions! The :func:`pgettext` family of functions
|
||||
should only be used in rare, specific cases (like e.g. complex "composited" UI strings...).
|
||||
|
||||
@@ -21,11 +21,11 @@ Intro
|
||||
Then, call ``bpy.app.translations.register(__name__, your_dict)`` in your ``register()`` function, and
|
||||
``bpy.app.translations.unregister(__name__)`` in your ``unregister()`` one.
|
||||
|
||||
The ``Manage UI translations`` add-on has several functions to help you collect strings to translate, and
|
||||
The ``Manage UI translations`` addon has several functions to help you collect strings to translate, and
|
||||
generate the needed python code (the translation dictionary), as well as optional intermediary po files
|
||||
if you want some... See
|
||||
`How to Translate Blender <https://wiki.blender.org/index.php/Dev:Doc/Process/Translate_Blender>`_ and
|
||||
`Using i18n in Blender Code <https://wiki.blender.org/index.php/Dev:Source/Interface/Internationalization>`_
|
||||
`How to Translate Blender <http://wiki.blender.org/index.php/Dev:Doc/Process/Translate_Blender>`_ and
|
||||
`Using i18n in Blender Code <http://wiki.blender.org/index.php/Dev:Source/Interface/Internationalization>`_
|
||||
for more info.
|
||||
|
||||
Module References
|
||||
|
@@ -1,10 +1,10 @@
|
||||
bl_info = {
|
||||
"name": "Example Add-on Preferences",
|
||||
"name": "Example Addon Preferences",
|
||||
"author": "Your Name Here",
|
||||
"version": (1, 0),
|
||||
"blender": (2, 65, 0),
|
||||
"location": "SpaceBar Search -> Add-on Preferences Example",
|
||||
"description": "Example Add-on",
|
||||
"location": "SpaceBar Search -> Addon Preferences Example",
|
||||
"description": "Example Addon",
|
||||
"warning": "",
|
||||
"wiki_url": "",
|
||||
"tracker_url": "",
|
||||
@@ -18,7 +18,7 @@ from bpy.props import StringProperty, IntProperty, BoolProperty
|
||||
|
||||
|
||||
class ExampleAddonPreferences(AddonPreferences):
|
||||
# this must match the add-on name, use '__package__'
|
||||
# this must match the addon name, use '__package__'
|
||||
# when defining this in a submodule of a python package.
|
||||
bl_idname = __name__
|
||||
|
||||
@@ -37,7 +37,7 @@ class ExampleAddonPreferences(AddonPreferences):
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.label(text="This is a preferences view for our add-on")
|
||||
layout.label(text="This is a preferences view for our addon")
|
||||
layout.prop(self, "filepath")
|
||||
layout.prop(self, "number")
|
||||
layout.prop(self, "boolean")
|
||||
@@ -46,7 +46,7 @@ class ExampleAddonPreferences(AddonPreferences):
|
||||
class OBJECT_OT_addon_prefs_example(Operator):
|
||||
"""Display example preferences"""
|
||||
bl_idname = "object.addon_prefs_example"
|
||||
bl_label = "Add-on Preferences Example"
|
||||
bl_label = "Addon Preferences Example"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
def execute(self, context):
|
||||
|
@@ -2,9 +2,9 @@
|
||||
Extending Menus
|
||||
+++++++++++++++
|
||||
|
||||
When creating menus for add-ons you can't reference menus
|
||||
in Blender's default scripts.
|
||||
Instead, the add-on can add menu items to existing menus.
|
||||
When creating menus for addons you can't reference menus in Blender's default
|
||||
scripts.
|
||||
Instead, the addon can add menu items to existing menus.
|
||||
|
||||
The function menu_draw acts like :class:`Menu.draw`.
|
||||
"""
|
||||
|
@@ -13,7 +13,7 @@ be animated, accessed from the user interface and from python.
|
||||
definitions are not, this means whenever you load blender the class needs
|
||||
to be registered too.
|
||||
|
||||
This is best done by creating an add-on which loads on startup and registers
|
||||
This is best done by creating an addon which loads on startup and registers
|
||||
your properties.
|
||||
|
||||
.. note::
|
||||
|
@@ -49,7 +49,7 @@ vec2d[:] = vec3d[:2]
|
||||
|
||||
|
||||
# Vectors support 'swizzle' operations
|
||||
# See https://en.wikipedia.org/wiki/Swizzling_(computer_graphics)
|
||||
# See http://en.wikipedia.org/wiki/Swizzling_(computer_graphics)
|
||||
vec.xyz = vec.zyx
|
||||
vec.xy = vec4d.zw
|
||||
vec.xyz = vec4d.wzz
|
||||
|
@@ -8,11 +8,11 @@ Physics Constraints (bge.constraints)
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. include:: __/examples/bge.constraints.py
|
||||
.. include:: ../examples/bge.constraints.py
|
||||
:start-line: 1
|
||||
:end-line: 4
|
||||
|
||||
.. literalinclude:: __/examples/bge.constraints.py
|
||||
.. literalinclude:: ../examples/bge.constraints.py
|
||||
:lines: 6-
|
||||
|
||||
|
||||
|
@@ -12,53 +12,53 @@ This module holds key constants for the SCA_KeyboardSensor.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Set a connected keyboard sensor to accept F1
|
||||
import bge
|
||||
# Set a connected keyboard sensor to accept F1
|
||||
import bge
|
||||
|
||||
co = bge.logic.getCurrentController()
|
||||
# 'Keyboard' is a keyboard sensor
|
||||
sensor = co.sensors["Keyboard"]
|
||||
sensor.key = bge.events.F1KEY
|
||||
co = bge.logic.getCurrentController()
|
||||
# 'Keyboard' is a keyboard sensor
|
||||
sensor = co.sensors["Keyboard"]
|
||||
sensor.key = bge.events.F1KEY
|
||||
|
||||
code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
# Do the all keys thing
|
||||
import bge
|
||||
# Do the all keys thing
|
||||
import bge
|
||||
|
||||
co = bge.logic.getCurrentController()
|
||||
# 'Keyboard' is a keyboard sensor
|
||||
sensor = co.sensors["Keyboard"]
|
||||
co = bge.logic.getCurrentController()
|
||||
# 'Keyboard' is a keyboard sensor
|
||||
sensor = co.sensors["Keyboard"]
|
||||
|
||||
for key,status in sensor.events:
|
||||
# key[0] == bge.events.keycode, key[1] = status
|
||||
if status == bge.logic.KX_INPUT_JUST_ACTIVATED:
|
||||
if key == bge.events.WKEY:
|
||||
# Activate Forward!
|
||||
if key == bge.events.SKEY:
|
||||
# Activate Backward!
|
||||
if key == bge.events.AKEY:
|
||||
# Activate Left!
|
||||
if key == bge.events.DKEY:
|
||||
# Activate Right!
|
||||
for key,status in sensor.events:
|
||||
# key[0] == bge.events.keycode, key[1] = status
|
||||
if status == bge.logic.KX_INPUT_JUST_ACTIVATED:
|
||||
if key == bge.events.WKEY:
|
||||
# Activate Forward!
|
||||
if key == bge.events.SKEY:
|
||||
# Activate Backward!
|
||||
if key == bge.events.AKEY:
|
||||
# Activate Left!
|
||||
if key == bge.events.DKEY:
|
||||
# Activate Right!
|
||||
|
||||
code-block:: python
|
||||
.. code-block:: python
|
||||
|
||||
# The all keys thing without a keyboard sensor (but you will
|
||||
# need an always sensor with pulse mode on)
|
||||
import bge
|
||||
# The all keys thing without a keyboard sensor (but you will
|
||||
# need an always sensor with pulse mode on)
|
||||
import bge
|
||||
|
||||
# Just shortening names here
|
||||
keyboard = bge.logic.keyboard
|
||||
JUST_ACTIVATED = bge.logic.KX_INPUT_JUST_ACTIVATED
|
||||
# Just shortening names here
|
||||
keyboard = bge.logic.keyboard
|
||||
JUST_ACTIVATED = bge.logic.KX_INPUT_JUST_ACTIVATED
|
||||
|
||||
if keyboard.events[bge.events.WKEY] == JUST_ACTIVATED:
|
||||
print("Activate Forward!")
|
||||
if keyboard.events[bge.events.SKEY] == JUST_ACTIVATED:
|
||||
print("Activate Backward!")
|
||||
if keyboard.events[bge.events.AKEY] == JUST_ACTIVATED:
|
||||
print("Activate Left!")
|
||||
if keyboard.events[bge.events.DKEY] == JUST_ACTIVATED:
|
||||
print("Activate Right!")
|
||||
if keyboard.events[bge.events.WKEY] == JUST_ACTIVATED:
|
||||
print("Activate Forward!")
|
||||
if keyboard.events[bge.events.SKEY] == JUST_ACTIVATED:
|
||||
print("Activate Backward!")
|
||||
if keyboard.events[bge.events.AKEY] == JUST_ACTIVATED:
|
||||
print("Activate Left!")
|
||||
if keyboard.events[bge.events.DKEY] == JUST_ACTIVATED:
|
||||
print("Activate Right!")
|
||||
|
||||
|
||||
*********
|
||||
|
@@ -385,10 +385,9 @@ General functions
|
||||
If False, the render is skipped and another logic frame starts immediately.
|
||||
|
||||
.. note::
|
||||
|
||||
GPU VSync no longer limits the number of frame per second when render is off,
|
||||
but the *Use Frame Rate* option still regulates the fps. To run as many frames
|
||||
as possible, untick this option (Render Properties, System panel).
|
||||
but the 'Use Frame Rate' option still regulates the fps. To run as many frames
|
||||
as possible, untick this option (Render Properties, System panel)
|
||||
|
||||
:arg render: the render flag
|
||||
:type render: bool
|
||||
|
@@ -98,7 +98,6 @@ Constants
|
||||
|
||||
The pixel buffer for offscreen render is a Texture. Argument to :func:`offScreenCreate`
|
||||
|
||||
|
||||
*****
|
||||
Types
|
||||
*****
|
||||
@@ -108,8 +107,8 @@ Types
|
||||
An off-screen render buffer object.
|
||||
|
||||
Use :func:`offScreenCreate` to create it.
|
||||
Currently it can only be used in the :class:`bge.texture.ImageRender`
|
||||
constructor to render on a FBO rather than the default viewport.
|
||||
Currently it can only be used in the :class:`bge.texture.ImageRender` constructor to render on a FBO rather than the
|
||||
default viewport.
|
||||
|
||||
.. attribute:: width
|
||||
|
||||
@@ -125,14 +124,10 @@ Types
|
||||
|
||||
.. attribute:: color
|
||||
|
||||
The underlying OpenGL bind code of the texture object that holds
|
||||
the rendered image, 0 if the FBO is using RenderBuffer.
|
||||
The choice between RenderBuffer and Texture is determined
|
||||
by the target argument of :func:`offScreenCreate`.
|
||||
The underlying OpenGL bind code of the texture object that holds the rendered image, 0 if the FBO is using RenderBuffer. The choice between RenderBuffer and Texture is determined by the target argument of :func:`offScreenCreate`.
|
||||
|
||||
:type: integer
|
||||
|
||||
|
||||
*********
|
||||
Functions
|
||||
*********
|
||||
@@ -415,11 +410,7 @@ Functions
|
||||
:type height: integer
|
||||
:arg samples: the number of multisample for anti-aliasing (MSAA), 0 to disable MSAA
|
||||
:type samples: integer
|
||||
:arg target: the pixel storage: :data:`RAS_OFS_RENDER_BUFFER` to render on RenderBuffers (the default),
|
||||
:data:`RAS_OFS_RENDER_TEXTURE` to render on texture.
|
||||
The later is interesting if you want to access the texture directly (see :attr:`RASOffScreen.color`).
|
||||
Otherwise the default is preferable as it's more widely supported by GPUs and more efficient.
|
||||
If the GPU does not support MSAA+Texture (e.g. Intel HD GPU), MSAA will be disabled.
|
||||
:arg target: the pixel storage: :data:`RAS_OFS_RENDER_BUFFER` to render on RenderBuffers (the default), :data:`RAS_OFS_RENDER_TEXTURE` to render on texture. The later is interesting if you want to access the texture directly (see :attr:`RASOffScreen.color`). Otherwise the default is preferable as it's more widely supported by GPUs and more efficient. If the GPU does not support MSAA+Texture (e.g. Intel HD GPU), MSAA will be disabled.
|
||||
:type target: integer
|
||||
:rtype: :class:`RASOffScreen`
|
||||
|
||||
|
@@ -8,16 +8,13 @@ Introduction
|
||||
|
||||
The bge.texture module allows you to manipulate textures during the game.
|
||||
|
||||
Several sources for texture are possible: video files, image files, video capture, memory buffer,
|
||||
camera render or a mix of that.
|
||||
Several sources for texture are possible: video files, image files, video capture, memory buffer, camera render or a mix of that.
|
||||
|
||||
The video and image files can be loaded from the internet using an URL instead of a file name.
|
||||
|
||||
In addition, you can apply filters on the images before sending them to the GPU, allowing video effect:
|
||||
blue screen, color band, gray, normal map.
|
||||
In addition, you can apply filters on the images before sending them to the GPU, allowing video effect: blue screen, color band, gray, normal map.
|
||||
|
||||
bge.texture uses FFmpeg to load images and videos.
|
||||
All the formats and codecs that FFmpeg supports are supported by this module, including but not limited to:
|
||||
bge.texture uses FFmpeg to load images and videos. All the formats and codecs that FFmpeg supports are supported by this module, including but not limited to:
|
||||
|
||||
* AVI
|
||||
* Ogg
|
||||
@@ -39,29 +36,28 @@ When the texture object is deleted, the new texture is deleted and the old textu
|
||||
|
||||
.. module:: bge.texture
|
||||
|
||||
.. include:: __/examples/bge.texture.py
|
||||
.. include:: ../examples/bge.texture.py
|
||||
:start-line: 1
|
||||
:end-line: 5
|
||||
|
||||
.. literalinclude:: __/examples/bge.texture.py
|
||||
.. literalinclude:: ../examples/bge.texture.py
|
||||
:lines: 7-
|
||||
|
||||
.. include:: __/examples/bge.texture.1.py
|
||||
.. include:: ../examples/bge.texture.1.py
|
||||
:start-line: 1
|
||||
:end-line: 6
|
||||
|
||||
.. literalinclude:: __/examples/bge.texture.1.py
|
||||
.. literalinclude:: ../examples/bge.texture.1.py
|
||||
:lines: 8-
|
||||
|
||||
|
||||
.. include:: __/examples/bge.texture.2.py
|
||||
.. include:: ../examples/bge.texture.2.py
|
||||
:start-line: 1
|
||||
:end-line: 6
|
||||
|
||||
.. literalinclude:: __/examples/bge.texture.2.py
|
||||
.. literalinclude:: ../examples/bge.texture.2.py
|
||||
:lines: 8-
|
||||
|
||||
|
||||
*************
|
||||
Video classes
|
||||
*************
|
||||
@@ -184,23 +180,19 @@ Video classes
|
||||
:return: Whether the video was playing.
|
||||
:rtype: bool
|
||||
|
||||
.. method:: refresh(buffer=None, format="RGBA", timestamp=-1.0)
|
||||
.. method:: refresh(buffer=None, format="RGBA", ts=-1.0)
|
||||
|
||||
Refresh video - get its status and optionally copy the frame to an external buffer.
|
||||
|
||||
:arg buffer: An optional object that implements the buffer protocol.
|
||||
If specified, the image is copied to the buffer, which must be big enough or an exception is thrown.
|
||||
:arg buffer: An optional object that implements the buffer protocol. If specified, the image is copied to the buffer, which must be big enough or an exception is thrown.
|
||||
:type buffer: any buffer type
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer.
|
||||
Only valid values are "RGBA" or "BGRA"
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
|
||||
:type format: str
|
||||
:arg timestamp: An optional timestamp (in seconds from the start of the movie)
|
||||
of the frame to be copied to the buffer.
|
||||
:type timestamp: float
|
||||
:arg ts: An optional timestamp (in seconds from the start of the movie) of the frame to be copied to the buffer.
|
||||
:type ts: float
|
||||
:return: see `FFmpeg Video and Image Status`_.
|
||||
:rtype: int
|
||||
|
||||
|
||||
*************
|
||||
Image classes
|
||||
*************
|
||||
@@ -268,11 +260,9 @@ Image classes
|
||||
|
||||
Refresh image, get its status and optionally copy the frame to an external buffer.
|
||||
|
||||
:arg buffer: An optional object that implements the buffer protocol.
|
||||
If specified, the image is copied to the buffer, which must be big enough or an exception is thrown.
|
||||
:arg buffer: An optional object that implements the buffer protocol. If specified, the image is copied to the buffer, which must be big enough or an exception is thrown.
|
||||
:type buffer: any buffer type
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer.
|
||||
Only valid values are "RGBA" or "BGRA"
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
|
||||
:type format: str
|
||||
:return: see `FFmpeg Video and Image Status`_.
|
||||
:rtype: int
|
||||
@@ -341,8 +331,7 @@ Image classes
|
||||
Update image buffer.
|
||||
|
||||
:arg imageBuffer: Buffer to load the new data from.
|
||||
:type imageBuffer: :class:`~bgl.Buffer`, :class:`ImageBuff`
|
||||
or Python object implementing the buffer protocol (f.ex. bytes)
|
||||
:type imageBuffer: :class:`~bgl.Buffer`, :class:`ImageBuff` or Python object implementing the buffer protocol (f.ex. bytes)
|
||||
:arg width: Width of the data to load.
|
||||
:type width: int
|
||||
:arg height: Height of the data to load.
|
||||
@@ -379,8 +368,7 @@ Image classes
|
||||
|
||||
:arg scene: Scene in which the image has to be taken.
|
||||
:type scene: :class:`~bge.types.KX_Scene`
|
||||
:arg observer: Reference object for the mirror
|
||||
(the object from which the mirror has to be looked at, for example a camera).
|
||||
:arg observer: Reference object for the mirror (the object from which the mirror has to be looked at, for example a camera).
|
||||
:type observer: :class:`~bge.types.KX_GameObject`
|
||||
:arg mirror: Object holding the mirror.
|
||||
:type mirror: :class:`~bge.types.KX_GameObject`
|
||||
@@ -440,15 +428,11 @@ Image classes
|
||||
|
||||
.. method:: refresh(buffer=None, format="RGBA")
|
||||
|
||||
Refresh image - render and copy the image to an external buffer (optional)
|
||||
then invalidate its current content.
|
||||
Refresh image - render and copy the image to an external buffer (optional) then invalidate its current content.
|
||||
|
||||
:arg buffer: An optional object that implements the buffer protocol.
|
||||
If specified, the image is rendered and copied to the buffer,
|
||||
which must be big enough or an exception is thrown.
|
||||
:arg buffer: An optional object that implements the buffer protocol. If specified, the image is rendered and copied to the buffer, which must be big enough or an exception is thrown.
|
||||
:type buffer: any buffer type
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer.
|
||||
Only valid values are "RGBA" or "BGRA"
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
|
||||
:type format: str
|
||||
|
||||
.. attribute:: scale
|
||||
@@ -536,15 +520,11 @@ Image classes
|
||||
|
||||
.. method:: refresh(buffer=None, format="RGBA")
|
||||
|
||||
Refresh image - calculate and copy the image to an external buffer (optional)
|
||||
then invalidate its current content.
|
||||
Refresh image - calculate and copy the image to an external buffer (optional) then invalidate its current content.
|
||||
|
||||
:arg buffer: An optional object that implements the buffer protocol.
|
||||
If specified, the image is calculated and copied to the buffer,
|
||||
which must be big enough or an exception is thrown.
|
||||
:arg buffer: An optional object that implements the buffer protocol. If specified, the image is calculated and copied to the buffer, which must be big enough or an exception is thrown.
|
||||
:type buffer: any buffer type
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer.
|
||||
Only valid values are "RGBA" or "BGRA"
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
|
||||
:type format: str
|
||||
|
||||
.. attribute:: scale
|
||||
@@ -590,11 +570,10 @@ Image classes
|
||||
|
||||
:type: bool
|
||||
|
||||
.. class:: ImageRender(scene, camera)
|
||||
.. class:: ImageRender(scene, camera, fbo=None)
|
||||
|
||||
Image source from render.
|
||||
The render is done on a custom framebuffer object if fbo is specified,
|
||||
otherwise on the default framebuffer.
|
||||
Image source from render. The render is done on a custom framebuffer object if fbo is specified, otherwise on
|
||||
the default framebuffer.
|
||||
|
||||
:arg scene: Scene in which the image has to be taken.
|
||||
:type scene: :class:`~bge.types.KX_Scene`
|
||||
@@ -687,14 +666,7 @@ Image classes
|
||||
|
||||
.. method:: render()
|
||||
|
||||
Render the scene but do not extract the pixels yet.
|
||||
The function returns as soon as the render commands have been send to the GPU.
|
||||
The render will proceed asynchronously in the GPU while the host can perform other tasks.
|
||||
To complete the render, you can either call :func:`refresh`
|
||||
directly of refresh the texture of which this object is the source.
|
||||
This method is useful to implement asynchronous render for optimal performance: call render()
|
||||
on frame n and refresh() on frame n+1 to give as much as time as possible to the GPU
|
||||
to render the frame while the game engine can perform other tasks.
|
||||
Render the scene but do not extract the pixels yet. The function returns as soon as the render commands have been send to the GPU. The render will proceed asynchronously in the GPU while the host can perform other tasks. To complete the render, you can either call :func:`refresh` directly of refresh the texture of which this object is the source. This method is useful to implement asynchronous render for optimal performance: call render() on frame n and refresh() on frame n+1 to give as much as time as possible to the GPU to render the frame while the game engine can perform other tasks.
|
||||
|
||||
:return: True if the render was initiated, False if the render cannot be performed (e.g. the camera is active)
|
||||
:rtype: bool
|
||||
@@ -702,21 +674,11 @@ Image classes
|
||||
.. method:: refresh()
|
||||
.. method:: refresh(buffer, format="RGBA")
|
||||
|
||||
Refresh video - render and optionally copy the image to an external buffer then invalidate its current content.
|
||||
The render may have been started earlier with the :func:`render` method,
|
||||
in which case this function simply waits for the render operations to complete.
|
||||
When called without argument, the pixels are not extracted but the render is guaranteed
|
||||
to be completed when the function returns.
|
||||
This only makes sense with offscreen render on texture target (see :func:`~bge.render.offScreenCreate`).
|
||||
Refresh video - render and optionally copy the image to an external buffer then invalidate its current content. The render may have been started earlier with the :func:`render` method, in which case this function simply waits for the render operations to complete. When called without argument, the pixels are not extracted but the render is guaranteed to be completed when the function returns. This only makes sense with offscreen render on texture target (see :func:`~bge.render.offScreenCreate`).
|
||||
|
||||
:arg buffer: An object that implements the buffer protocol.
|
||||
If specified, the image is copied to the buffer, which must be big enough or an exception is thrown.
|
||||
The transfer to the buffer is optimal if no processing of the image is needed.
|
||||
This is the case if ``flip=False, alpha=True, scale=False, whole=True, depth=False, zbuff=False``
|
||||
and no filter is set.
|
||||
:arg buffer: An object that implements the buffer protocol. If specified, the image is copied to the buffer, which must be big enough or an exception is thrown. The transfer to the buffer is optimal if no processing of the image is needed. This is the case if flip=False, alpha=True, scale=False, whole=True, depth=False, zbuff=False and no filter is set.
|
||||
:type buffer: any buffer type of sufficient size
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer.
|
||||
Only valid values are "RGBA" or "BGRA"
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
|
||||
:type format: str
|
||||
:return: True if the render is complete, False if the render cannot be performed (e.g. the camera is active)
|
||||
:rtype: bool
|
||||
@@ -774,14 +736,9 @@ Image classes
|
||||
|
||||
Refresh video - copy the viewport to an external buffer (optional) then invalidate its current content.
|
||||
|
||||
:arg buffer: An optional object that implements the buffer protocol.
|
||||
If specified, the image is copied to the buffer, which must be big enough or an exception is thrown.
|
||||
The transfer to the buffer is optimal if no processing of the image is needed.
|
||||
This is the case if ``flip=False, alpha=True, scale=False, whole=True, depth=False, zbuff=False``
|
||||
and no filter is set.
|
||||
:arg buffer: An optional object that implements the buffer protocol. If specified, the image is copied to the buffer, which must be big enough or an exception is thrown. The transfer to the buffer is optimal if no processing of the image is needed. This is the case if flip=False, alpha=True, scale=False, whole=True, depth=False, zbuff=False and no filter is set.
|
||||
:type buffer: any buffer type
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer.
|
||||
Only valid values are "RGBA" or "BGRA"
|
||||
:arg format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
|
||||
:type format: str
|
||||
|
||||
.. attribute:: scale
|
||||
@@ -811,13 +768,13 @@ Image classes
|
||||
.. attribute:: depth
|
||||
|
||||
Use depth component of viewport as array of float - not suitable for texture source,
|
||||
should only be used with ``bge.texture.imageToArray(mode='F')``.
|
||||
should only be used with bge.texture.imageToArray(mode='F').
|
||||
|
||||
:type: bool
|
||||
|
||||
.. attribute:: zbuff
|
||||
|
||||
Use depth component of viewport as grey scale color - suitable for texture source.
|
||||
Use depth component of viewport as grey scale color - suitable for texture source.
|
||||
|
||||
:type: bool
|
||||
|
||||
@@ -838,88 +795,78 @@ Image classes
|
||||
:arg capture: Card number from which the input video must be captured.
|
||||
:type capture: int
|
||||
|
||||
The format argument must be written as ``<displayMode>/<pixelFormat>[/3D][:<cacheSize>]`` where ``<displayMode>``
|
||||
The format argument must be written as “<displayMode>/<pixelFormat>[/3D][:<cacheSize>]” where <displayMode>
|
||||
describes the frame size and rate and <pixelFormat> the encoding of the pixels.
|
||||
The optional ``/3D`` suffix is to be used if the video stream is stereo with a left and right eye feed.
|
||||
The optional ``:<cacheSize>`` suffix determines the number of the video frames kept in cache, by default 8.
|
||||
Some DeckLink cards won't work below a certain cache size.
|
||||
The default value 8 should be sufficient for all cards.
|
||||
The optional /3D suffix is to be used if the video stream is stereo with a left and right eye feed.
|
||||
The optional :<cacheSize> suffix determines the number of the video frames kept in cache, by default 8.
|
||||
Some DeckLink cards won't work below a certain cache size. The default value 8 should be sufficient for all cards.
|
||||
You may try to reduce the cache size to reduce the memory footprint. For example the The 4K Extreme is known
|
||||
to work with 3 frames only, the Extreme 2 needs 4 frames and the Intensity Shuttle needs 6 frames, etc.
|
||||
Reducing the cache size may be useful when Decklink is used in conjunction with GPUDirect:
|
||||
all frames must be locked in memory in that case and that puts a lot of pressure on memory.
|
||||
If you reduce the cache size too much,
|
||||
Reducing the cache size may be useful when Decklink is used in conjunction with GPUDirect: all frames must be locked
|
||||
in memory in that case and that puts a lot of pressure on memory. If you reduce the cache size too much,
|
||||
you'll get no error but no video feed either.
|
||||
|
||||
The valid ``<displayMode>`` values are copied from the ``BMDDisplayMode`` enum in the DeckLink API
|
||||
The valid <displayMode> values are copied from the 'BMDDisplayMode' enum in the DeckLink API
|
||||
without the 'bmdMode' prefix. In case a mode that is not in this list is added in a later version
|
||||
of the SDK, it is also possible to specify the 4 letters of the internal code for that mode.
|
||||
You will find the internal code in the ``DeckLinkAPIModes.h`` file that is part of the SDK.
|
||||
You will find the internal code in the DeckLinkAPIModes.h file that is part of the SDK.
|
||||
Here is for reference the full list of supported display modes with their equivalent internal code:
|
||||
|
||||
Internal Codes
|
||||
- NTSC 'ntsc'
|
||||
- NTSC2398 'nt23'
|
||||
- PAL 'pal '
|
||||
- NTSCp 'ntsp'
|
||||
- PALp 'palp'
|
||||
HD 1080 Modes
|
||||
- HD1080p2398 '23ps'
|
||||
- HD1080p24 '24ps'
|
||||
- HD1080p25 'Hp25'
|
||||
- HD1080p2997 'Hp29'
|
||||
- HD1080p30 'Hp30'
|
||||
- HD1080i50 'Hi50'
|
||||
- HD1080i5994 'Hi59'
|
||||
- HD1080i6000 'Hi60'
|
||||
- HD1080p50 'Hp50'
|
||||
- HD1080p5994 'Hp59'
|
||||
- HD1080p6000 'Hp60'
|
||||
HD 720 Modes
|
||||
- HD720p50 'hp50'
|
||||
- HD720p5994 'hp59'
|
||||
- HD720p60 'hp60'
|
||||
* NTSC 'ntsc'
|
||||
* NTSC2398 'nt23'
|
||||
* PAL 'pal '
|
||||
* NTSCp 'ntsp'
|
||||
* PALp 'palp'
|
||||
HD 1080 Modes:
|
||||
* HD1080p2398 '23ps'
|
||||
* HD1080p24 '24ps'
|
||||
* HD1080p25 'Hp25'
|
||||
* HD1080p2997 'Hp29'
|
||||
* HD1080p30 'Hp30'
|
||||
* HD1080i50 'Hi50'
|
||||
* HD1080i5994 'Hi59'
|
||||
* HD1080i6000 'Hi60'
|
||||
* HD1080p50 'Hp50'
|
||||
* HD1080p5994 'Hp59'
|
||||
* HD1080p6000 'Hp60'
|
||||
HD 720 Modes:
|
||||
* HD720p50 'hp50'
|
||||
* HD720p5994 'hp59'
|
||||
* HD720p60 'hp60'
|
||||
2k Modes
|
||||
- 2k2398 '2k23'
|
||||
- 2k24 '2k24'
|
||||
- 2k25 '2k25'
|
||||
* 2k2398 '2k23'
|
||||
* 2k24 '2k24'
|
||||
* 2k25 '2k25'
|
||||
4k Modes
|
||||
- 4K2160p2398 '4k23'
|
||||
- 4K2160p24 '4k24'
|
||||
- 4K2160p25 '4k25'
|
||||
- 4K2160p2997 '4k29'
|
||||
- 4K2160p30 '4k30'
|
||||
- 4K2160p50 '4k50'
|
||||
- 4K2160p5994 '4k59'
|
||||
- 4K2160p60 '4k60'
|
||||
|
||||
* 4K2160p2398 '4k23'
|
||||
* 4K2160p24 '4k24'
|
||||
* 4K2160p25 '4k25'
|
||||
* 4K2160p2997 '4k29'
|
||||
* 4K2160p30 '4k30'
|
||||
* 4K2160p50 '4k50'
|
||||
* 4K2160p5994 '4k59'
|
||||
* 4K2160p60 '4k60'
|
||||
Most of names are self explanatory. If necessary refer to the DeckLink API documentation for more information.
|
||||
|
||||
Similarly, <pixelFormat> is copied from the BMDPixelFormat enum.
|
||||
|
||||
Here is for reference the full list of supported pixel format and their equivalent internal code:
|
||||
|
||||
Pixel Formats
|
||||
- 8BitYUV '2vuy'
|
||||
- 10BitYUV 'v210'
|
||||
- 8BitARGB * no equivalent code *
|
||||
- 8BitBGRA 'BGRA'
|
||||
- 10BitRGB 'r210'
|
||||
- 12BitRGB 'R12B'
|
||||
- 12BitRGBLE 'R12L'
|
||||
- 10BitRGBXLE 'R10l'
|
||||
- 10BitRGBX 'R10b'
|
||||
|
||||
* 8BitYUV '2vuy'
|
||||
* 10BitYUV 'v210'
|
||||
* 8BitARGB * no equivalent code *
|
||||
* 8BitBGRA 'BGRA'
|
||||
* 10BitRGB 'r210'
|
||||
* 12BitRGB 'R12B'
|
||||
* 12BitRGBLE 'R12L'
|
||||
* 10BitRGBXLE 'R10l'
|
||||
* 10BitRGBX 'R10b'
|
||||
Refer to the DeckLink SDK documentation for a full description of these pixel format.
|
||||
It is important to understand them as the decoding of the pixels is NOT done in VideoTexture
|
||||
for performance reason. Instead a specific shader must be used to decode the pixel in the GPU.
|
||||
Only the '8BitARGB', '8BitBGRA' and '10BitRGBXLE' pixel formats are mapped directly to OpenGL RGB float textures.
|
||||
The '8BitYUV' and '10BitYUV' pixel formats are mapped to openGL RGB float texture but require a shader to decode.
|
||||
The other pixel formats are sent as a ``GL_RED_INTEGER`` texture (i.e. a texture with only the
|
||||
The other pixel formats are sent as a 'GL_RED_INTEGER' texture (i.e. a texture with only the
|
||||
red channel coded as an unsigned 32 bit integer) and are not recommended for use.
|
||||
|
||||
Example: ``HD1080p24/10BitYUV/3D:4`` is equivalent to ``24ps/v210/3D:4``
|
||||
and represents a full HD stereo feed at 24 frame per second and 4 frames cache size.
|
||||
Example: “HD1080p24/10BitYUV/3D:4” is equivalent to “24ps/v210/3D:4” and represents a full HD stereo feed at 24 frame per second and 4 frames cache size.
|
||||
|
||||
Although video format auto detection is possible with certain DeckLink devices, the corresponding
|
||||
API is NOT implemented in the BGE. Therefore it is important to specify the format string that
|
||||
@@ -999,7 +946,6 @@ Image classes
|
||||
:return: True if the capture could be stopped, False otherwise.
|
||||
:rtype: bool
|
||||
|
||||
|
||||
***************
|
||||
Texture classes
|
||||
***************
|
||||
@@ -1033,16 +979,16 @@ Texture classes
|
||||
|
||||
:type: bool
|
||||
|
||||
.. method:: refresh(refresh_source, timestamp=-1.0)
|
||||
.. method:: refresh(refresh_source=True, ts=-1.0)
|
||||
|
||||
Refresh texture from source.
|
||||
|
||||
:arg refresh_source: Whether to also refresh the image source of the texture.
|
||||
:type refresh_source: bool
|
||||
:arg timestamp: If the texture controls a VideoFFmpeg object:
|
||||
timestamp (in seconds from the start of the movie) of the frame to be loaded; this can be
|
||||
used for video-sound synchonization by passing :attr:`~bge.types.KX_SoundActuator.time` to it. (optional)
|
||||
:type timestamp: float
|
||||
:arg ts: If the texture controls a VideoFFmpeg object:
|
||||
timestamp (in seconds from the start of the movie) of the frame to be loaded; this can be
|
||||
used for video-sound synchonization by passing :attr:`~bge.types.KX_SoundActuator.time` to it. (optional)
|
||||
:type ts: float
|
||||
|
||||
.. attribute:: source
|
||||
|
||||
@@ -1069,9 +1015,7 @@ Texture classes
|
||||
on the output interfaces. Keying is supported: it allows to composite the frame with an
|
||||
input video feed that transits through the DeckLink card.
|
||||
|
||||
:arg cardIdx: Number of the card to be used for output (0=first card).
|
||||
It should be noted that DeckLink devices are usually half duplex:
|
||||
they can either be used for capture or playback but not both at the same time.
|
||||
:arg cardIdx: Number of the card to be used for output (0=first card). It should be noted that DeckLink devices are usually half duplex: they can either be used for capture or playback but not both at the same time.
|
||||
:type cardIdx: int
|
||||
:arg format: String representing the display mode of the output feed.
|
||||
:type format: str
|
||||
@@ -1081,11 +1025,11 @@ Texture classes
|
||||
specified. If keying is the goal (see keying attributes), the format must match exactly the
|
||||
input video feed, otherwise it can be any format supported by the device (there will be a
|
||||
runtime error if not).
|
||||
The format of the string is ``<displayMode>[/3D]``.
|
||||
The format of the string is “<displayMode>[/3D]”.
|
||||
|
||||
Refer to :class:`VideoDeckLink` to get the list of acceptable ``<displayMode>``.
|
||||
The optional ``/3D`` suffix is used to create a stereo 3D feed.
|
||||
In that case the 'right' attribute must also be set to specify the image source for the right eye.
|
||||
Refer to :class:`VideoDeckLink` to get the list of acceptable <displayMode>.
|
||||
The optional “/3D” suffix is used to create a stereo 3D feed. In that case the 'right' attribute
|
||||
must also be set to specify the image source for the right eye.
|
||||
|
||||
Note: The pixel format is not specified here because it is always BGRA. The alpha channel is
|
||||
used in keying to mix the source with the input video feed, otherwise it is not used.
|
||||
@@ -1104,14 +1048,14 @@ Texture classes
|
||||
copy inside VideoTexture).
|
||||
|
||||
:type: one of...
|
||||
- :class:`VideoFFmpeg`
|
||||
- :class:`VideoDeckLink`
|
||||
- :class:`ImageFFmpeg`
|
||||
- :class:`ImageBuff`
|
||||
- :class:`ImageMirror`
|
||||
- :class:`ImageMix`
|
||||
- :class:`ImageRender`
|
||||
- :class:`ImageViewport`
|
||||
* :class:`VideoFFmpeg`
|
||||
* :class:`VideoDeckLink`
|
||||
* :class:`ImageFFmpeg`
|
||||
* :class:`ImageBuff`
|
||||
* :class:`ImageMirror`
|
||||
* :class:`ImageMix`
|
||||
* :class:`ImageRender`
|
||||
* :class:`ImageViewport`
|
||||
|
||||
.. attribute:: right
|
||||
|
||||
@@ -1121,14 +1065,14 @@ Texture classes
|
||||
render buffer that is just the size of the video frame.
|
||||
|
||||
:type: one of...
|
||||
- :class:`VideoFFmpeg`
|
||||
- :class:`VideoDeckLink`
|
||||
- :class:`ImageFFmpeg`
|
||||
- :class:`ImageBuff`
|
||||
- :class:`ImageMirror`
|
||||
- :class:`ImageMix`
|
||||
- :class:`ImageRender`
|
||||
- :class:`ImageViewport`
|
||||
* :class:`VideoFFmpeg`
|
||||
* :class:`VideoDeckLink`
|
||||
* :class:`ImageFFmpeg`
|
||||
* :class:`ImageBuff`
|
||||
* :class:`ImageMirror`
|
||||
* :class:`ImageMix`
|
||||
* :class:`ImageRender`
|
||||
* :class:`ImageViewport`
|
||||
|
||||
.. attribute:: keying
|
||||
|
||||
@@ -1170,36 +1114,35 @@ Texture classes
|
||||
This method must be called frequently to update the output frame in the DeckLink device.
|
||||
|
||||
:arg refresh_source: True if the source objects image buffer should be invalidated after being
|
||||
used to compute the output frame. This triggers the recomputing of the
|
||||
source image on next refresh, which is normally the desired effect.
|
||||
False if the image source buffer should stay valid and reused on next refresh.
|
||||
Note that the DeckLink device stores the output frame and replays until a
|
||||
new frame is sent from the host. Thus, it is not necessary to refresh the
|
||||
DeckLink object if it is known that the image source has not changed.
|
||||
used to compute the output frame. This triggers the recomputing of the
|
||||
source image on next refresh, which is normally the desired effect.
|
||||
False if the image source buffer should stay valid and reused on next refresh.
|
||||
Note that the DeckLink device stores the output frame and replays until a
|
||||
new frame is sent from the host. Thus, it is not necessary to refresh the
|
||||
DeckLink object if it is known that the image source has not changed.
|
||||
:type refresh_source: bool
|
||||
:arg ts: The timestamp value passed to the image source object to compute the image.
|
||||
If unspecified, the BGE clock is used.
|
||||
:arg ts: The timestamp value passed to the image source object to compute the image. If unspecified, the BGE clock is used.
|
||||
:type ts: float
|
||||
|
||||
|
||||
**************
|
||||
Filter classes
|
||||
**************
|
||||
|
||||
|
||||
.. class:: FilterBGR24
|
||||
|
||||
Source filter BGR24.
|
||||
|
||||
.. class:: FilterBlueScreen
|
||||
|
||||
Filter for Blue Screen.
|
||||
The RGB channels of the color are left unchanged, while the output alpha is obtained as follows:
|
||||
Filter for Blue Screen. The RGB channels of the color are left unchanged, while the output alpha is obtained as follows:
|
||||
|
||||
- if the square of the euclidian distance between the RGB color
|
||||
and the filter's reference color is smaller than the filter's lower limit,
|
||||
* if the square of the euclidian distance between the RGB color and the filter's reference color is smaller than the filter's lower limit,
|
||||
the output alpha is set to 0;
|
||||
- if that square is bigger than the filter's upper limit, the output alpha is set to 255;
|
||||
- otherwise the output alpha is linarly extrapoled between 0 and 255 in the interval of the limits.
|
||||
|
||||
* if that square is bigger than the filter's upper limit, the output alpha is set to 255;
|
||||
|
||||
* otherwise the output alpha is linarly extrapoled between 0 and 255 in the interval of the limits.
|
||||
|
||||
.. attribute:: color
|
||||
|
||||
@@ -1232,8 +1175,7 @@ Filter classes
|
||||
|
||||
.. class:: FilterColor
|
||||
|
||||
Filter for color calculations.
|
||||
The output color is obtained by multiplying the reduced 4x4 matrix with the input color
|
||||
Filter for color calculations. The output color is obtained by multiplying the reduced 4x4 matrix with the input color
|
||||
and adding the remaining column to the result.
|
||||
|
||||
.. attribute:: matrix
|
||||
@@ -1260,8 +1202,7 @@ Filter classes
|
||||
|
||||
.. class:: FilterGray
|
||||
|
||||
Filter for gray scale effect.
|
||||
Proportions of R, G and B contributions in the output gray scale are 28:151:77.
|
||||
Filter for gray scale effect. Proportions of R, G and B contributions in the output gray scale are 28:151:77.
|
||||
|
||||
.. attribute:: previous
|
||||
|
||||
@@ -1380,26 +1321,22 @@ Functions
|
||||
|
||||
:arg mode: Optional argument representing the pixel format.
|
||||
|
||||
- You can use the characters R, G, B for the 3 color channels, A for the alpha channel,
|
||||
* You can use the characters R, G, B for the 3 color channels, A for the alpha channel,
|
||||
0 to force a fixed 0 color channel and 1 to force a fixed 255 color channel.
|
||||
|
||||
Examples:
|
||||
* "BGR" will return 3 bytes per pixel with the Blue, Green and Red channels in that order.
|
||||
* "RGB1" will return 4 bytes per pixel with the Red, Green, Blue channels in that order and the alpha channel forced to 255.
|
||||
|
||||
- "BGR" will return 3 bytes per pixel with the
|
||||
Blue, Green and Red channels in that order.
|
||||
- "RGB1" will return 4 bytes per pixel with the
|
||||
Red, Green, Blue channels in that order and the alpha channel forced to 255.
|
||||
|
||||
- A special mode "F" allows to return the image as an array of float.
|
||||
This mode should only be used to retrieve the depth buffer of the
|
||||
class:`ImageViewport` and :class:`ImageRender` objects.
|
||||
* A special mode "F" allows to return the image as an array of float. This mode should only be used to retrieve
|
||||
the depth buffer of the class:`ImageViewport` and :class:`ImageRender` objects.
|
||||
The default mode is "RGBA".
|
||||
|
||||
:type mode: str
|
||||
|
||||
:return: An object representing the image as one dimensional array of bytes of size (pixel_size*width*height),
|
||||
line by line starting from the bottom of the image. The pixel size and format is determined by the mode
|
||||
parameter. For mode 'F', the array is a one dimensional array of float of size (width*height).
|
||||
line by line starting from the bottom of the image. The pixel size and format is determined by the mode
|
||||
parameter. For mode 'F', the array is a one dimensional array of float of size (width*height).
|
||||
:rtype: :class:`~bgl.Buffer`
|
||||
|
||||
.. function:: materialID(object, name)
|
||||
@@ -1414,8 +1351,7 @@ Functions
|
||||
the texture by material. In that case the material must have a texture channel in first
|
||||
position.
|
||||
|
||||
If the object has no material that matches name, it generates a runtime error.
|
||||
Use try/except to catch the exception.
|
||||
If the object has no material that matches name, it generates a runtime error. Use try/except to catch the exception.
|
||||
|
||||
Ex: ``bge.texture.materialID(obj, 'IMvideo.png')``
|
||||
|
||||
|
@@ -27,10 +27,8 @@ base class --- :class:`PyObjectPlus`
|
||||
|
||||
:type: list of ints.
|
||||
|
||||
Each specifying the value of an axis between -1.0 and 1.0
|
||||
depending on how far the axis is pushed, 0 for nothing.
|
||||
The first 2 values are used by most joysticks and gamepads for directional control.
|
||||
3rd and 4th values are only on some joysticks and can be used for arbitary controls.
|
||||
Each specifying the value of an axis between -1.0 and 1.0 depending on how far the axis is pushed, 0 for nothing.
|
||||
The first 2 values are used by most joysticks and gamepads for directional control. 3rd and 4th values are only on some joysticks and can be used for arbitary controls.
|
||||
|
||||
* left:[-1.0, 0.0, ...]
|
||||
* right:[1.0, 0.0, ...]
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,7 @@
|
||||
..
|
||||
This document is appended to the auto generated bmesh api doc to avoid clogging up the C files with details.
|
||||
to test this run:
|
||||
./blender.bin -b -noaudio -P doc/python_api/sphinx_doc_gen.py -- \
|
||||
--partial bmesh* ; cd doc/python_api ; sphinx-build sphinx-in sphinx-out ; cd ../../
|
||||
./blender.bin -b -noaudio -P doc/python_api/sphinx_doc_gen.py -- --partial bmesh* ; cd doc/python_api ; sphinx-build sphinx-in sphinx-out ; cd ../../
|
||||
|
||||
|
||||
Submodules:
|
||||
@@ -23,7 +22,7 @@ The features exposed closely follow the C API,
|
||||
giving python access to the functions used by blenders own mesh editing tools.
|
||||
|
||||
For an overview of BMesh data types and how they reference each other see:
|
||||
`BMesh Design Document <https://wiki.blender.org/index.php/Dev:Source/Modeling/BMesh/Design>`_ .
|
||||
`BMesh Design Document <http://wiki.blender.org/index.php/Dev:2.6/Source/Modeling/BMesh/Design>`_ .
|
||||
|
||||
|
||||
.. note::
|
||||
@@ -31,16 +30,17 @@ For an overview of BMesh data types and how they reference each other see:
|
||||
**Disk** and **Radial** data is not exposed by the python api since this is for internal use only.
|
||||
|
||||
|
||||
.. warning:: TODO items are...
|
||||
.. warning::
|
||||
|
||||
TODO items are...
|
||||
|
||||
* add access to BMesh **walkers**
|
||||
* add custom-data manipulation functions add/remove/rename.
|
||||
|
||||
|
||||
Example Script
|
||||
--------------
|
||||
|
||||
.. literalinclude:: __/__/__/release/scripts/templates_py/bmesh_simple.py
|
||||
.. literalinclude:: ../../../release/scripts/templates_py/bmesh_simple.py
|
||||
|
||||
|
||||
Stand-Alone Module
|
||||
@@ -59,9 +59,9 @@ There are 2 ways to access BMesh data, you can create a new BMesh by converting
|
||||
:class:`bpy.types.BlendData.meshes` or by accessing the current edit mode mesh.
|
||||
see: :class:`bmesh.types.BMesh.from_mesh` and :mod:`bmesh.from_edit_mesh` respectively.
|
||||
|
||||
When explicitly converting from mesh data python **owns** the data, that is to say -
|
||||
that the mesh only exists while python holds a reference to it,
|
||||
and the script is responsible for putting it back into a mesh data-block when the edits are done.
|
||||
When explicitly converting from mesh data python **owns** the data, that is to say - that the mesh only exists while
|
||||
python holds a reference to it, and the script is responsible for putting it back into a mesh data-block when the edits
|
||||
are done.
|
||||
|
||||
Note that unlike :mod:`bpy`, a BMesh does not necessarily correspond to data in the currently open blend file,
|
||||
a BMesh can be created, edited and freed without the user ever seeing or having access to it.
|
||||
|
@@ -18,7 +18,7 @@ amongst our own scripts and make it easier to use python scripts from other proj
|
||||
|
||||
Using our style guide for your own scripts makes it easier if you eventually want to contribute them to blender.
|
||||
|
||||
This style guide is known as pep8 and can be found `here <https://www.python.org/dev/peps/pep-0008/>`_
|
||||
This style guide is known as pep8 and can be found `here <http://www.python.org/dev/peps/pep-0008>`_
|
||||
|
||||
A brief listing of pep8 criteria.
|
||||
|
||||
@@ -316,7 +316,7 @@ use to join a list of strings (the list may be temporary). In the following exam
|
||||
|
||||
|
||||
Join is fastest on many strings,
|
||||
`string formatting <https://wiki.blender.org/index.php/Dev:Source/Modeling/BMesh/Design>`__
|
||||
`string formatting <http://docs.python.org/py3k/library/string.html#string-formatting>`__
|
||||
is quite fast too (better for converting data types). String arithmetic is slowest.
|
||||
|
||||
|
||||
|
@@ -1,4 +1,3 @@
|
||||
|
||||
*******
|
||||
Gotchas
|
||||
*******
|
||||
@@ -39,6 +38,7 @@ but some operators are more picky about when they run.
|
||||
In most cases you can figure out what context an operator needs
|
||||
simply be seeing how it's used in Blender and thinking about what it does.
|
||||
|
||||
|
||||
Unfortunately if you're still stuck - the only way to **really** know
|
||||
whats going on is to read the source code for the poll function and see what its checking.
|
||||
|
||||
@@ -82,6 +82,7 @@ it should be reported to the bug tracker.
|
||||
Stale Data
|
||||
==========
|
||||
|
||||
|
||||
No updates after setting values
|
||||
-------------------------------
|
||||
|
||||
@@ -173,8 +174,8 @@ In this situation you can...
|
||||
|
||||
.. _info_gotcha_mesh_faces:
|
||||
|
||||
N-Gons and Tessellation Faces
|
||||
=============================
|
||||
NGons and Tessellation Faces
|
||||
============================
|
||||
|
||||
Since 2.63 NGons are supported, this adds some complexity
|
||||
since in some cases you need to access triangles/quads still (some exporters for example).
|
||||
@@ -508,7 +509,7 @@ Unicode Problems
|
||||
Python supports many different encodings so there is nothing stopping you from
|
||||
writing a script in ``latin1`` or ``iso-8859-15``.
|
||||
|
||||
See `pep-0263 <https://www.python.org/dev/peps/pep-0263/>`_
|
||||
See `pep-0263 <http://www.python.org/dev/peps/pep-0263/>`_
|
||||
|
||||
However this complicates matters for Blender's Python API because ``.blend`` files don't have an explicit encoding.
|
||||
|
||||
@@ -656,7 +657,7 @@ Here are some general hints to avoid running into these problems.
|
||||
.. note::
|
||||
|
||||
To find the line of your script that crashes you can use the ``faulthandler`` module.
|
||||
See the `faulthandler docs <https://docs.python.org/dev/library/faulthandler.html>`_.
|
||||
See `faulthandler docs <http://docs.python.org/dev/library/faulthandler.html>`_.
|
||||
|
||||
While the crash may be in Blenders C/C++ code,
|
||||
this can help a lot to track down the area of the script that causes the crash.
|
||||
|
@@ -43,7 +43,8 @@ scene manipulation, automation, defining your own toolset and customization.
|
||||
|
||||
On startup Blender scans the ``scripts/startup/`` directory for Python modules and imports them.
|
||||
The exact location of this directory depends on your installation.
|
||||
See the :ref:`directory layout docs <blender_manual:getting-started_installing-config-directories>`.
|
||||
`See the directory layout docs
|
||||
<https://www.blender.org/manual/getting_started/installing_blender/directorylayout.html>`__
|
||||
|
||||
|
||||
Script Loading
|
||||
@@ -76,22 +77,22 @@ To run as modules:
|
||||
- The obvious way, ``import some_module`` command from the text window or interactive console.
|
||||
- Open as a text block and tick "Register" option, this will load with the blend file.
|
||||
- copy into one of the directories ``scripts/startup``, where they will be automatically imported on startup.
|
||||
- define as an add-on, enabling the add-on will load it as a Python module.
|
||||
- define as an addon, enabling the addon will load it as a Python module.
|
||||
|
||||
|
||||
Add-ons
|
||||
Addons
|
||||
------
|
||||
|
||||
Some of Blenders functionality is best kept optional,
|
||||
alongside scripts loaded at startup we have add-ons which are kept in their own directory ``scripts/addons``,
|
||||
alongside scripts loaded at startup we have addons which are kept in their own directory ``scripts/addons``,
|
||||
and only load on startup if selected from the user preferences.
|
||||
|
||||
The only difference between add-ons and built-in Python modules is that add-ons must contain a ``bl_info``
|
||||
The only difference between addons and built-in Python modules is that addons must contain a ``bl_info``
|
||||
variable which Blender uses to read metadata such as name, author, category and URL.
|
||||
|
||||
The User Preferences add-on listing uses **bl_info** to display information about each add-on.
|
||||
The user preferences addon listing uses **bl_info** to display information about each addon.
|
||||
|
||||
`See Add-ons <https://wiki.blender.org/index.php/Dev:Py/Scripts/Guidelines/Addons>`__
|
||||
`See Addons <http://wiki.blender.org/index.php/Dev:2.5/Py/Scripts/Guidelines/Addons>`__
|
||||
for details on the ``bl_info`` dictionary.
|
||||
|
||||
|
||||
@@ -222,7 +223,7 @@ These functions usually appear at the bottom of the script containing class regi
|
||||
You can also use them for internal purposes setting up data for your own tools but take care
|
||||
since register won't re-run when a new blend file is loaded.
|
||||
|
||||
The register/unregister calls are used so it's possible to toggle add-ons and reload scripts while Blender runs.
|
||||
The register/unregister calls are used so it's possible to toggle addons and reload scripts while Blender runs.
|
||||
If the register calls were placed in the body of the script, registration would be called on import,
|
||||
meaning there would be no distinction between importing a module or loading its classes into Blender.
|
||||
|
||||
|
@@ -51,7 +51,8 @@ A quick list of helpful things to know before starting:
|
||||
| ``scripts/startup/bl_operators`` for operators.
|
||||
|
||||
Exact location depends on platform, see:
|
||||
:ref:`Configuration and Data Paths <blender_manual:getting-started_installing-config-directories>`.
|
||||
`Configuration and Data Paths
|
||||
<https://www.blender.org/manual/getting_started/installing_blender/directorylayout.html>`__.
|
||||
|
||||
|
||||
Running Scripts
|
||||
@@ -150,7 +151,7 @@ Data Creation/Removal
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Those of you familiar with other Python API's may be surprised that
|
||||
new data-blocks in the bpy API can't be created by calling the class:
|
||||
new datablocks in the bpy API can't be created by calling the class:
|
||||
|
||||
>>> bpy.types.Mesh()
|
||||
Traceback (most recent call last):
|
||||
@@ -304,7 +305,7 @@ In Python, this is done by defining a class, which is a subclass of an existing
|
||||
Example Operator
|
||||
----------------
|
||||
|
||||
.. literalinclude:: __/__/__/release/scripts/templates_py/operator_simple.py
|
||||
.. literalinclude:: ../../../release/scripts/templates_py/operator_simple.py
|
||||
|
||||
Once this script runs, ``SimpleOperator`` is registered with Blender
|
||||
and can be called from the operator search popup or added to the toolbar.
|
||||
@@ -335,7 +336,7 @@ Example Panel
|
||||
Panels register themselves as a class, like an operator.
|
||||
Notice the extra ``bl_`` variables used to set the context they display in.
|
||||
|
||||
.. literalinclude:: __/__/__/release/scripts/templates_py/ui_panel_simple.py
|
||||
.. literalinclude:: ../../../release/scripts/templates_py/ui_panel_simple.py
|
||||
|
||||
To run the script:
|
||||
|
||||
@@ -392,11 +393,11 @@ so these are accessed as normal Python types.
|
||||
Internal Types
|
||||
--------------
|
||||
|
||||
Used for Blender data-blocks and collections: :class:`bpy.types.bpy_struct`
|
||||
Used for Blender datablocks and collections: :class:`bpy.types.bpy_struct`
|
||||
|
||||
For data that contains its own attributes groups/meshes/bones/scenes... etc.
|
||||
|
||||
There are 2 main types that wrap Blenders data, one for data-blocks
|
||||
There are 2 main types that wrap Blenders data, one for datablocks
|
||||
(known internally as ``bpy_struct``), another for properties.
|
||||
|
||||
>>> bpy.context.object
|
||||
|
@@ -27,7 +27,7 @@ There are 3 main uses for the terminal, these are:
|
||||
|
||||
.. note::
|
||||
|
||||
For Linux and macOS users this means starting the terminal first, then running Blender from within it.
|
||||
For Linux and OSX users this means starting the terminal first, then running Blender from within it.
|
||||
On Windows the terminal can be enabled from the help menu.
|
||||
|
||||
|
||||
@@ -306,7 +306,7 @@ Advantages include:
|
||||
This is marked advanced because to run Blender as a Python module requires a special build option.
|
||||
|
||||
For instructions on building see
|
||||
`Building Blender as a Python module <https://wiki.blender.org/index.php/User:Ideasman42/BlenderAsPyModule>`_
|
||||
`Building Blender as a Python module <http://wiki.blender.org/index.php/User:Ideasman42/BlenderAsPyModule>`_
|
||||
|
||||
|
||||
Python Safety (Build Option)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
|
||||
Add-on Tutorial
|
||||
###############
|
||||
Addon Tutorial
|
||||
##############
|
||||
|
||||
************
|
||||
Introduction
|
||||
@@ -36,7 +36,6 @@ Suggested reading before starting this tutorial.
|
||||
To best troubleshoot any error message Python prints while writing scripts you run blender with from a terminal,
|
||||
see :ref:`Use The Terminal <use_the_terminal>`.
|
||||
|
||||
|
||||
Documentation Links
|
||||
===================
|
||||
|
||||
@@ -47,48 +46,51 @@ While going through the tutorial you may want to look into our reference documen
|
||||
- :mod:`bpy.context` api reference. -
|
||||
*Handy to have a list of available items your script may operate on.*
|
||||
- :class:`bpy.types.Operator`. -
|
||||
*The following add-ons define operators, these docs give details and more examples of operators.*
|
||||
*The following addons define operators, these docs give details and more examples of operators.*
|
||||
|
||||
|
||||
*******
|
||||
Add-ons
|
||||
*******
|
||||
******
|
||||
Addons
|
||||
******
|
||||
|
||||
What is an Add-on?
|
||||
==================
|
||||
|
||||
An add-on is simply a Python module with some additional requirements so Blender can display it in a list with useful
|
||||
What is an Addon?
|
||||
=================
|
||||
|
||||
An addon is simply a Python module with some additional requirements so Blender can display it in a list with useful
|
||||
information.
|
||||
|
||||
To give an example, here is the simplest possible add-on.
|
||||
To give an example, here is the simplest possible addon.
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
bl_info = {"name": "My Test Add-on", "category": "Object"}
|
||||
bl_info = {"name": "My Test Addon", "category": "Object"}
|
||||
def register():
|
||||
print("Hello World")
|
||||
def unregister():
|
||||
print("Goodbye World")
|
||||
|
||||
|
||||
- ``bl_info`` is a dictionary containing add-on metadata such as the title,
|
||||
version and author to be displayed in the user preferences add-on list.
|
||||
- ``register`` is a function which only runs when enabling the add-on,
|
||||
this means the module can be loaded without activating the add-on.
|
||||
- ``unregister`` is a function to unload anything setup by ``register``, this is called when the add-on is disabled.
|
||||
- ``bl_info`` is a dictionary containing addon meta-data such as the title, version and author to be displayed in the
|
||||
user preferences addon list.
|
||||
- ``register`` is a function which only runs when enabling the addon, this means the module can be loaded without
|
||||
activating the addon.
|
||||
- ``unregister`` is a function to unload anything setup by ``register``, this is called when the addon is disabled.
|
||||
|
||||
|
||||
Notice this add-on does not do anything related to Blender, (the :mod:`bpy` module is not imported for example).
|
||||
|
||||
This is a contrived example of an add-on that serves to illustrate the point
|
||||
that the base requirements of an add-on are simple.
|
||||
Notice this addon does not do anything related to Blender, (the :mod:`bpy` module is not imported for example).
|
||||
|
||||
An add-on will typically register operators, panels, menu items etc, but its worth noting that _any_ script can do this,
|
||||
This is a contrived example of an addon that serves to illustrate the point
|
||||
that the base requirements of an addon are simple.
|
||||
|
||||
An addon will typically register operators, panels, menu items etc, but its worth noting that _any_ script can do this,
|
||||
when executed from the text editor or even the interactive console - there is nothing inherently different about an
|
||||
add-on that allows it to integrate with Blender, such functionality is just provided by the :mod:`bpy` module for any
|
||||
addon that allows it to integrate with Blender, such functionality is just provided by the :mod:`bpy` module for any
|
||||
script to access.
|
||||
|
||||
So an add-on is just a way to encapsulate a Python module in a way a user can easily utilize.
|
||||
So an addon is just a way to encapsulate a Python module in a way a user can easily utilize.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -97,14 +99,14 @@ So an add-on is just a way to encapsulate a Python module in a way a user can ea
|
||||
Messages will be printed when enabling and disabling.
|
||||
|
||||
|
||||
Your First Add-on
|
||||
=================
|
||||
Your First Addon
|
||||
================
|
||||
|
||||
The simplest possible add-on above is useful as an example but not much else.
|
||||
This next add-on is simple but shows how to integrate a script into Blender using an ``Operator``
|
||||
The simplest possible addon above was useful as an example but not much else.
|
||||
This next addon is simple but shows how to integrate a script into Blender using an ``Operator``
|
||||
which is the typical way to define a tool accessed from menus, buttons and keyboard shortcuts.
|
||||
|
||||
For the first example we will make a script that simply moves all objects in a scene.
|
||||
For the first example we'll make a script that simply moves all objects in a scene.
|
||||
|
||||
|
||||
Write The Script
|
||||
@@ -121,14 +123,20 @@ Add the following script to the text editor in Blender.
|
||||
obj.location.x += 1.0
|
||||
|
||||
|
||||
Click the :ref:`Run Script button <blender_manual:editors-text-run-script>`,
|
||||
all objects in the active scene are moved by 1.0 Blender unit.
|
||||
.. image:: run_script.png
|
||||
:width: 924px
|
||||
:align: center
|
||||
:height: 574px
|
||||
:alt: Run Script button
|
||||
|
||||
Click the Run Script button, all objects in the active scene are moved by 1.0 Blender unit.
|
||||
Next we'll make this script into an addon.
|
||||
|
||||
|
||||
Write the Add-on (Simple)
|
||||
-------------------------
|
||||
Write the Addon (Simple)
|
||||
------------------------
|
||||
|
||||
This add-on takes the body of the script above, and adds them to an operator's ``execute()`` function.
|
||||
This addon takes the body of the script above, and adds them to an operator's ``execute()`` function.
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
@@ -165,7 +173,7 @@ This add-on takes the body of the script above, and adds them to an operator's `
|
||||
|
||||
|
||||
# This allows you to run the script directly from blenders text editor
|
||||
# to test the add-on without having to install it.
|
||||
# to test the addon without having to install it.
|
||||
if __name__ == "__main__":
|
||||
register()
|
||||
|
||||
@@ -198,33 +206,33 @@ Do this by pressing :kbd:`Spacebar` to bring up the operator search dialog and t
|
||||
|
||||
The objects should move as before.
|
||||
|
||||
*Keep this add-on open in Blender for the next step - Installing.*
|
||||
*Keep this addon open in Blender for the next step - Installing.*
|
||||
|
||||
|
||||
Install The Add-on
|
||||
------------------
|
||||
Install The Addon
|
||||
-----------------
|
||||
|
||||
Once you have your add-on within in Blender's text editor,
|
||||
Once you have your addon within in Blender's text editor,
|
||||
you will want to be able to install it so it can be enabled in the user preferences to load on startup.
|
||||
|
||||
Even though the add-on above is a test, lets go through the steps anyway so you know how to do it for later.
|
||||
Even though the addon above is a test, lets go through the steps anyway so you know how to do it for later.
|
||||
|
||||
To install the Blender text as an add-on you will first have to save it to disk, take care to obey the naming
|
||||
To install the Blender text as an addon you will first have to save it to disk, take care to obey the naming
|
||||
restrictions that apply to Python modules and end with a ``.py`` extension.
|
||||
|
||||
Once the file is on disk, you can install it as you would for an add-on downloaded online.
|
||||
Once the file is on disk, you can install it as you would for an addon downloaded online.
|
||||
|
||||
Open the user :menuselection:`File --> User Preferences`,
|
||||
Select the *Add-on* section, press *Install Add-on...* and select the file.
|
||||
Open the user :menuselection:`File -> User Preferences`,
|
||||
Select the *Addon* section, press *Install Addon...* and select the file.
|
||||
|
||||
Now the add-on will be listed and you can enable it by pressing the check-box,
|
||||
Now the addon will be listed and you can enable it by pressing the check-box,
|
||||
if you want it to be enabled on restart, press *Save as Default*.
|
||||
|
||||
.. note::
|
||||
|
||||
The destination of the add-on depends on your Blender configuration.
|
||||
When installing an add-on the source and destination path are printed in the console.
|
||||
You can also find add-on path locations by running this in the Python console.
|
||||
The destination of the addon depends on your Blender configuration.
|
||||
When installing an addon the source and destination path are printed in the console.
|
||||
You can also find addon path locations by running this in the Python console.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -232,20 +240,20 @@ if you want it to be enabled on restart, press *Save as Default*.
|
||||
print(addon_utils.paths())
|
||||
|
||||
More is written on this topic here:
|
||||
:ref:`Directory Layout <blender_manual:getting-started_installing-config-directories>`.
|
||||
`Directory Layout <https://www.blender.org/manual/getting_started/installing_blender/directorylayout.html>`_
|
||||
|
||||
|
||||
Your Second Add-on
|
||||
==================
|
||||
Your Second Addon
|
||||
=================
|
||||
|
||||
For our second add-on, we will focus on object instancing - this is - to make linked copies of an object in a
|
||||
For our second addon, we will focus on object instancing - this is - to make linked copies of an object in a
|
||||
similar way to what you may have seen with the array modifier.
|
||||
|
||||
|
||||
Write The Script
|
||||
----------------
|
||||
|
||||
As before, first we will start with a script, develop it, then convert into an add-on.
|
||||
As before, first we will start with a script, develop it, then convert into an addon.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@@ -316,17 +324,17 @@ allows vectors to be multiplied by numbers and matrices.
|
||||
|
||||
If you are interested in this area, read into :class:`mathutils.Vector` - there are many handy utility functions
|
||||
such as getting the angle between vectors, cross product, dot products
|
||||
as well as more advanced functions in :mod:`mathutils.geometry` such as Bézier Spline interpolation and
|
||||
as well as more advanced functions in :mod:`mathutils.geometry` such as bezier spline interpolation and
|
||||
ray-triangle intersection.
|
||||
|
||||
For now we will focus on making this script an add-on, but its good to know that this 3D math module is available and
|
||||
For now we'll focus on making this script an addon, but its good to know that this 3D math module is available and
|
||||
can help you with more advanced functionality later on.
|
||||
|
||||
|
||||
Write the Add-on
|
||||
----------------
|
||||
Write the Addon
|
||||
---------------
|
||||
|
||||
The first step is to convert the script as-is into an add-on.
|
||||
The first step is to convert the script as-is into an addon.
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
@@ -373,7 +381,7 @@ The first step is to convert the script as-is into an add-on.
|
||||
register()
|
||||
|
||||
|
||||
Everything here has been covered in the previous steps, you may want to try run the add-on still
|
||||
Everything here has been covered in the previous steps, you may want to try run the addon still
|
||||
and consider what could be done to make it more useful.
|
||||
|
||||
|
||||
@@ -426,7 +434,7 @@ however the link above includes examples of more advanced property usage.
|
||||
Menu Item
|
||||
^^^^^^^^^
|
||||
|
||||
Add-ons can add to the user interface of existing panels, headers and menus defined in Python.
|
||||
Addons can add to the user interface of existing panels, headers and menus defined in Python.
|
||||
|
||||
For this example we'll add to an existing menu.
|
||||
|
||||
@@ -456,7 +464,7 @@ For docs on extending menus see: :doc:`bpy.types.Menu`.
|
||||
Keymap
|
||||
^^^^^^
|
||||
|
||||
In Blender, add-ons have their own keymaps so as not to interfere with Blenders built in key-maps.
|
||||
In Blender addons have their own key-maps so as not to interfere with Blenders built in key-maps.
|
||||
|
||||
In the example below, a new object-mode :class:`bpy.types.KeyMap` is added,
|
||||
then a :class:`bpy.types.KeyMapItem` is added to the key-map which references our newly added operator,
|
||||
@@ -494,7 +502,7 @@ this allows you to have multiple keys accessing the same operator with different
|
||||
|
||||
.. note::
|
||||
|
||||
While :kbd:`Ctrl-Shift-Space` isn't a default Blender key shortcut, its hard to make sure add-ons won't
|
||||
While :kbd:`Ctrl-Shift-Space` isn't a default Blender key shortcut, its hard to make sure addons won't
|
||||
overwrite each others keymaps, At least take care when assigning keys that they don't
|
||||
conflict with important functionality within Blender.
|
||||
|
||||
@@ -598,14 +606,14 @@ After selecting it from the menu, you can choose how many instance of the cube y
|
||||
.. note::
|
||||
|
||||
Directly executing the script multiple times will add the menu each time too.
|
||||
While not useful behavior, theres nothing to worry about since add-ons won't register them selves multiple
|
||||
While not useful behavior, theres nothing to worry about since addons won't register them selves multiple
|
||||
times when enabled through the user preferences.
|
||||
|
||||
|
||||
Conclusions
|
||||
===========
|
||||
|
||||
Add-ons can encapsulate certain functionality neatly for writing tools to improve your work-flow or for writing utilities
|
||||
Addons can encapsulate certain functionality neatly for writing tools to improve your work-flow or for writing utilities
|
||||
for others to use.
|
||||
|
||||
While there are limits to what Python can do within Blender, there is certainly a lot that can be achieved without
|
||||
@@ -628,8 +636,8 @@ Here are some sites you might like to check on after completing this tutorial.
|
||||
*For more background details on Blender/Python integration.*
|
||||
- `How to Think Like a Computer Scientist <http://interactivepython.org/courselib/static/thinkcspy/index.html>`_ -
|
||||
*Great info for those who are still learning Python.*
|
||||
- `Blender Development (Wiki) <https://wiki.blender.org/index.php/Dev:Contents>`_ -
|
||||
- `Blender Development (Wiki) <http://wiki.blender.org/index.php/Dev:Contents>`_ -
|
||||
*Blender Development, general information and helpful links.*
|
||||
- `Blender Artists (Coding Section) <https://blenderartists.org/forum/forumdisplay.php?47-Coding>`_ -
|
||||
- `Blender Artists (Coding Section) <http://blenderartists.org/forum/forumdisplay.php?47-Coding>`_ -
|
||||
*forum where people ask Python development questions*
|
||||
|
||||
|
@@ -57,7 +57,7 @@ Operator Example
|
||||
++++++++++++++++
|
||||
This script shows how operators can be used to model a link of a chain.
|
||||
|
||||
.. literalinclude:: __/examples/bmesh.ops.1.py
|
||||
.. literalinclude:: ../examples/bmesh.ops.1.py
|
||||
"""
|
||||
|
||||
|
||||
|
@@ -27,7 +27,7 @@ output from this tool should be added into "doc/python_api/rst/change_log.rst"
|
||||
blender --background --python doc/python_api/sphinx_changelog_gen.py -- --dump
|
||||
|
||||
# create changelog
|
||||
blender --background --factory-startup --python doc/python_api/sphinx_changelog_gen.py -- \
|
||||
blender --background --python doc/python_api/sphinx_changelog_gen.py -- \
|
||||
--api_from blender_2_63_0.py \
|
||||
--api_to blender_2_64_0.py \
|
||||
--api_out changes.rst
|
||||
@@ -48,8 +48,7 @@ python doc/python_api/sphinx_changelog_gen.py -- \
|
||||
'''
|
||||
{"module.name":
|
||||
{"parent.class":
|
||||
{"basic_type", "member_name":
|
||||
("Name", type, range, length, default, descr, f_args, f_arg_types, f_ret_types)}, ...
|
||||
{"basic_type", "member_name": ("Name", type, range, length, default, descr, f_args, f_arg_types, f_ret_types)}, ...
|
||||
}, ...
|
||||
}
|
||||
'''
|
||||
@@ -100,34 +99,34 @@ def api_dump():
|
||||
prop_range = None
|
||||
|
||||
dump_class[prop_id] = (
|
||||
"prop_rna", # basic_type
|
||||
prop.name, # name
|
||||
prop_type, # type
|
||||
prop_range, # range
|
||||
prop_length, # length
|
||||
prop.default, # default
|
||||
prop.description, # descr
|
||||
Ellipsis, # f_args
|
||||
Ellipsis, # f_arg_types
|
||||
Ellipsis, # f_ret_types
|
||||
)
|
||||
"prop_rna", # basic_type
|
||||
prop.name, # name
|
||||
prop_type, # type
|
||||
prop_range, # range
|
||||
prop_length, # length
|
||||
prop.default, # default
|
||||
prop.description, # descr
|
||||
Ellipsis, # f_args
|
||||
Ellipsis, # f_arg_types
|
||||
Ellipsis, # f_ret_types
|
||||
)
|
||||
del props
|
||||
|
||||
# python props, tricky since we dont know much about them.
|
||||
for prop_id, attr in struct_info.get_py_properties():
|
||||
|
||||
dump_class[prop_id] = (
|
||||
"prop_py", # basic_type
|
||||
Ellipsis, # name
|
||||
Ellipsis, # type
|
||||
Ellipsis, # range
|
||||
Ellipsis, # length
|
||||
Ellipsis, # default
|
||||
attr.__doc__, # descr
|
||||
Ellipsis, # f_args
|
||||
Ellipsis, # f_arg_types
|
||||
Ellipsis, # f_ret_types
|
||||
)
|
||||
"prop_py", # basic_type
|
||||
Ellipsis, # name
|
||||
Ellipsis, # type
|
||||
Ellipsis, # range
|
||||
Ellipsis, # length
|
||||
Ellipsis, # default
|
||||
attr.__doc__, # descr
|
||||
Ellipsis, # f_args
|
||||
Ellipsis, # f_arg_types
|
||||
Ellipsis, # f_ret_types
|
||||
)
|
||||
|
||||
# kludge func -> props
|
||||
funcs = [(func.identifier, func) for func in struct_info.functions]
|
||||
@@ -138,17 +137,17 @@ def api_dump():
|
||||
func_args_type = tuple([prop.type for prop in func.args])
|
||||
|
||||
dump_class[func_id] = (
|
||||
"func_rna", # basic_type
|
||||
Ellipsis, # name
|
||||
Ellipsis, # type
|
||||
Ellipsis, # range
|
||||
Ellipsis, # length
|
||||
Ellipsis, # default
|
||||
func.description, # descr
|
||||
func_args_ids, # f_args
|
||||
func_args_type, # f_arg_types
|
||||
func_ret_types, # f_ret_types
|
||||
)
|
||||
"func_rna", # basic_type
|
||||
Ellipsis, # name
|
||||
Ellipsis, # type
|
||||
Ellipsis, # range
|
||||
Ellipsis, # length
|
||||
Ellipsis, # default
|
||||
func.description, # descr
|
||||
func_args_ids, # f_args
|
||||
func_args_type, # f_arg_types
|
||||
func_ret_types, # f_ret_types
|
||||
)
|
||||
del funcs
|
||||
|
||||
# kludge func -> props
|
||||
@@ -159,17 +158,17 @@ def api_dump():
|
||||
func_args_ids = tuple(inspect.getargspec(attr).args)
|
||||
|
||||
dump_class[func_id] = (
|
||||
"func_py", # basic_type
|
||||
Ellipsis, # name
|
||||
Ellipsis, # type
|
||||
Ellipsis, # range
|
||||
Ellipsis, # length
|
||||
Ellipsis, # default
|
||||
attr.__doc__, # descr
|
||||
func_args_ids, # f_args
|
||||
Ellipsis, # f_arg_types
|
||||
Ellipsis, # f_ret_types
|
||||
)
|
||||
"func_py", # basic_type
|
||||
Ellipsis, # name
|
||||
Ellipsis, # type
|
||||
Ellipsis, # range
|
||||
Ellipsis, # length
|
||||
Ellipsis, # default
|
||||
attr.__doc__, # descr
|
||||
func_args_ids, # f_args
|
||||
Ellipsis, # f_arg_types
|
||||
Ellipsis, # f_ret_types
|
||||
)
|
||||
del funcs
|
||||
|
||||
import pprint
|
||||
@@ -331,25 +330,21 @@ def main():
|
||||
|
||||
# When --help or no args are given, print this help
|
||||
usage_text = "Run blender in background mode with this script: "
|
||||
"blender --background --factory-startup --python %s -- [options]" % os.path.basename(__file__)
|
||||
"blender --background --python %s -- [options]" % os.path.basename(__file__)
|
||||
|
||||
epilog = "Run this before releases"
|
||||
|
||||
parser = argparse.ArgumentParser(description=usage_text, epilog=epilog)
|
||||
|
||||
parser.add_argument(
|
||||
"--dump", dest="dump", action='store_true',
|
||||
help="When set the api will be dumped into blender_version.py")
|
||||
parser.add_argument("--dump", dest="dump", action='store_true',
|
||||
help="When set the api will be dumped into blender_version.py")
|
||||
|
||||
parser.add_argument(
|
||||
"--api_from", dest="api_from", metavar='FILE',
|
||||
help="File to compare from (previous version)")
|
||||
parser.add_argument(
|
||||
"--api_to", dest="api_to", metavar='FILE',
|
||||
help="File to compare from (current)")
|
||||
parser.add_argument(
|
||||
"--api_out", dest="api_out", metavar='FILE',
|
||||
help="Output sphinx changelog")
|
||||
parser.add_argument("--api_from", dest="api_from", metavar='FILE',
|
||||
help="File to compare from (previous version)")
|
||||
parser.add_argument("--api_to", dest="api_to", metavar='FILE',
|
||||
help="File to compare from (current)")
|
||||
parser.add_argument("--api_out", dest="api_out", metavar='FILE',
|
||||
help="Output sphinx changelog")
|
||||
|
||||
args = parser.parse_args(argv) # In this example we wont use the args
|
||||
|
||||
|
@@ -24,18 +24,18 @@ SCRIPT_HELP_MSG = """
|
||||
|
||||
API dump in RST files
|
||||
---------------------
|
||||
Run this script from Blender's root path once you have compiled Blender
|
||||
Run this script from blenders root path once you have compiled blender
|
||||
|
||||
blender --background --factory-startup -noaudio --python doc/python_api/sphinx_doc_gen.py
|
||||
./blender.bin --background -noaudio --python doc/python_api/sphinx_doc_gen.py
|
||||
|
||||
This will generate python files in doc/python_api/sphinx-in/
|
||||
providing ./blender is or links to the blender executable
|
||||
providing ./blender.bin is or links to the blender executable
|
||||
|
||||
To choose sphinx-in directory:
|
||||
blender --background --factory-startup --python doc/python_api/sphinx_doc_gen.py -- --output ../python_api
|
||||
./blender.bin --background --python doc/python_api/sphinx_doc_gen.py -- --output ../python_api
|
||||
|
||||
For quick builds:
|
||||
blender --background --factory-startup --python doc/python_api/sphinx_doc_gen.py -- --partial bmesh.*
|
||||
./blender.bin --background --python doc/python_api/sphinx_doc_gen.py -- --partial bmesh.*
|
||||
|
||||
|
||||
Sphinx: HTML generation
|
||||
@@ -46,6 +46,8 @@ Sphinx: HTML generation
|
||||
cd doc/python_api
|
||||
sphinx-build sphinx-in sphinx-out
|
||||
|
||||
This requires sphinx 1.0.7 to be installed.
|
||||
|
||||
|
||||
Sphinx: PDF generation
|
||||
----------------------
|
||||
@@ -59,14 +61,14 @@ Sphinx: PDF generation
|
||||
"""
|
||||
|
||||
try:
|
||||
import bpy # Blender module
|
||||
import bpy # blender module
|
||||
except ImportError:
|
||||
print("\nERROR: this script must run from inside Blender")
|
||||
print(SCRIPT_HELP_MSG)
|
||||
import sys
|
||||
sys.exit()
|
||||
|
||||
import rna_info # Blender module
|
||||
import rna_info # blender module
|
||||
|
||||
|
||||
def rna_info_BuildRNAInfo_cache():
|
||||
@@ -84,7 +86,7 @@ import shutil
|
||||
import logging
|
||||
|
||||
from platform import platform
|
||||
PLATFORM = platform().split('-')[0].lower() # 'linux', 'darwin', 'windows'
|
||||
PLATFORM = platform().split('-')[0].lower() # 'linux', 'darwin', 'windows'
|
||||
|
||||
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
@@ -179,13 +181,15 @@ def handle_args():
|
||||
dest="log",
|
||||
default=False,
|
||||
action='store_true',
|
||||
help="Log the output of the api dump and sphinx|latex "
|
||||
"warnings and errors (default=False).\n"
|
||||
"If given, save logs in:\n"
|
||||
"* OUTPUT_DIR/.bpy.log\n"
|
||||
"* OUTPUT_DIR/.sphinx-build.log\n"
|
||||
"* OUTPUT_DIR/.sphinx-build_pdf.log\n"
|
||||
"* OUTPUT_DIR/.latex_make.log",
|
||||
help=(
|
||||
"Log the output of the api dump and sphinx|latex "
|
||||
"warnings and errors (default=False).\n"
|
||||
"If given, save logs in:\n"
|
||||
"* OUTPUT_DIR/.bpy.log\n"
|
||||
"* OUTPUT_DIR/.sphinx-build.log\n"
|
||||
"* OUTPUT_DIR/.sphinx-build_pdf.log\n"
|
||||
"* OUTPUT_DIR/.latex_make.log",
|
||||
),
|
||||
required=False)
|
||||
|
||||
# parse only the args passed after '--'
|
||||
@@ -206,12 +210,12 @@ BPY_LOGGER.setLevel(logging.DEBUG)
|
||||
"""
|
||||
# for quick rebuilds
|
||||
rm -rf /b/doc/python_api/sphinx-* && \
|
||||
./blender -b -noaudio --factory-startup -P doc/python_api/sphinx_doc_gen.py && \
|
||||
./blender.bin -b -noaudio --factory-startup -P doc/python_api/sphinx_doc_gen.py && \
|
||||
sphinx-build doc/python_api/sphinx-in doc/python_api/sphinx-out
|
||||
|
||||
or
|
||||
|
||||
./blender -b -noaudio --factory-startup -P doc/python_api/sphinx_doc_gen.py -- -f -B
|
||||
./blender.bin -b -noaudio --factory-startup -P doc/python_api/sphinx_doc_gen.py -- -f -B
|
||||
"""
|
||||
|
||||
# Switch for quick testing so doc-builds don't take so long
|
||||
@@ -220,12 +224,12 @@ if not ARGS.partial:
|
||||
FILTER_BPY_OPS = None
|
||||
FILTER_BPY_TYPES = None
|
||||
EXCLUDE_INFO_DOCS = False
|
||||
EXCLUDE_MODULES = []
|
||||
EXCLUDE_MODULES = ()
|
||||
|
||||
else:
|
||||
# can manually edit this too:
|
||||
# FILTER_BPY_OPS = ("import.scene", ) # allow
|
||||
# FILTER_BPY_TYPES = ("bpy_struct", "Operator", "ID") # allow
|
||||
#FILTER_BPY_OPS = ("import.scene", ) # allow
|
||||
#FILTER_BPY_TYPES = ("bpy_struct", "Operator", "ID") # allow
|
||||
EXCLUDE_INFO_DOCS = True
|
||||
EXCLUDE_MODULES = [
|
||||
"aud",
|
||||
@@ -258,7 +262,6 @@ else:
|
||||
"bpy_extras",
|
||||
"gpu",
|
||||
"gpu.offscreen",
|
||||
"idprop.types",
|
||||
"mathutils",
|
||||
"mathutils.bvhtree",
|
||||
"mathutils.geometry",
|
||||
@@ -272,7 +275,7 @@ else:
|
||||
"freestyle.shaders",
|
||||
"freestyle.types",
|
||||
"freestyle.utils",
|
||||
]
|
||||
]
|
||||
|
||||
# ------
|
||||
# Filter
|
||||
@@ -298,9 +301,7 @@ else:
|
||||
del m
|
||||
del fnmatch
|
||||
|
||||
BPY_LOGGER.debug(
|
||||
"Partial Doc Build, Skipping: %s\n" %
|
||||
"\n ".join(sorted(EXCLUDE_MODULES)))
|
||||
BPY_LOGGER.debug("Partial Doc Build, Skipping: %s\n" % "\n ".join(sorted(EXCLUDE_MODULES)))
|
||||
|
||||
#
|
||||
# done filtering
|
||||
@@ -310,39 +311,19 @@ try:
|
||||
__import__("aud")
|
||||
except ImportError:
|
||||
BPY_LOGGER.debug("Warning: Built without 'aud' module, docs incomplete...")
|
||||
EXCLUDE_MODULES.append("aud")
|
||||
EXCLUDE_MODULES = list(EXCLUDE_MODULES) + ["aud"]
|
||||
|
||||
try:
|
||||
__import__("freestyle")
|
||||
except ImportError:
|
||||
BPY_LOGGER.debug("Warning: Built without 'freestyle' module, docs incomplete...")
|
||||
EXCLUDE_MODULES.extend([
|
||||
"freestyle",
|
||||
"freestyle.chainingiterators",
|
||||
"freestyle.functions",
|
||||
"freestyle.predicates",
|
||||
"freestyle.shaders",
|
||||
"freestyle.types",
|
||||
"freestyle.utils",
|
||||
])
|
||||
|
||||
# Source files we use, and need to copy to the OUTPUT_DIR
|
||||
# to have working out-of-source builds.
|
||||
# Note that ".." is replaced by "__" in the RST files,
|
||||
# to avoid having to match Blender's source tree.
|
||||
EXTRA_SOURCE_FILES = (
|
||||
"../../../release/scripts/templates_py/bmesh_simple.py",
|
||||
"../../../release/scripts/templates_py/operator_simple.py",
|
||||
"../../../release/scripts/templates_py/ui_panel_simple.py",
|
||||
"../../../release/scripts/templates_py/ui_previews_custom_icon.py",
|
||||
"../examples/bge.constraints.py",
|
||||
"../examples/bge.texture.1.py",
|
||||
"../examples/bge.texture.2.py",
|
||||
"../examples/bge.texture.py",
|
||||
"../examples/bmesh.ops.1.py",
|
||||
"../examples/bpy.app.translations.py",
|
||||
)
|
||||
|
||||
EXCLUDE_MODULES = list(EXCLUDE_MODULES) + ["freestyle",
|
||||
"freestyle.chainingiterators",
|
||||
"freestyle.functions",
|
||||
"freestyle.predicates",
|
||||
"freestyle.shaders",
|
||||
"freestyle.types",
|
||||
"freestyle.utils"]
|
||||
|
||||
# examples
|
||||
EXAMPLES_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, "examples"))
|
||||
@@ -358,59 +339,52 @@ RST_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, "rst"))
|
||||
# extra info, not api reference docs
|
||||
# stored in ./rst/info_*
|
||||
INFO_DOCS = (
|
||||
("info_quickstart.rst",
|
||||
"Blender/Python Quickstart: new to Blender/scripting and want to get your feet wet?"),
|
||||
("info_overview.rst",
|
||||
"Blender/Python API Overview: a more complete explanation of Python integration"),
|
||||
("info_tutorial_addon.rst",
|
||||
"Blender/Python Add-on Tutorial: a step by step guide on how to write an add-on from scratch"),
|
||||
("info_api_reference.rst",
|
||||
"Blender/Python API Reference Usage: examples of how to use the API reference docs"),
|
||||
("info_best_practice.rst",
|
||||
"Best Practice: Conventions to follow for writing good scripts"),
|
||||
("info_tips_and_tricks.rst",
|
||||
"Tips and Tricks: Hints to help you while writing scripts for Blender"),
|
||||
("info_gotcha.rst",
|
||||
"Gotcha's: some of the problems you may come up against when writing scripts"),
|
||||
)
|
||||
("info_quickstart.rst", "Blender/Python Quickstart: new to blender/scripting and want to get your feet wet?"),
|
||||
("info_overview.rst", "Blender/Python API Overview: a more complete explanation of python integration"),
|
||||
("info_tutorial_addon.rst", "Blender/Python Addon Tutorial: a step by step guide on how to write an addon from scratch"),
|
||||
("info_api_reference.rst", "Blender/Python API Reference Usage: examples of how to use the API reference docs"),
|
||||
("info_best_practice.rst", "Best Practice: Conventions to follow for writing good scripts"),
|
||||
("info_tips_and_tricks.rst", "Tips and Tricks: Hints to help you while writing scripts for blender"),
|
||||
("info_gotcha.rst", "Gotcha's: some of the problems you may come up against when writing scripts"),
|
||||
)
|
||||
|
||||
# only support for properties atm.
|
||||
RNA_BLACKLIST = {
|
||||
# XXX messes up PDF!, really a bug but for now just workaround.
|
||||
"UserPreferencesSystem": {"language", }
|
||||
}
|
||||
}
|
||||
|
||||
MODULE_GROUPING = {
|
||||
"bmesh.types": (
|
||||
("Base Mesh Type", '-'),
|
||||
"BMesh",
|
||||
("Mesh Elements", '-'),
|
||||
"BMVert",
|
||||
"BMEdge",
|
||||
"BMFace",
|
||||
"BMLoop",
|
||||
("Sequence Accessors", '-'),
|
||||
"BMElemSeq",
|
||||
"BMVertSeq",
|
||||
"BMEdgeSeq",
|
||||
"BMFaceSeq",
|
||||
"BMLoopSeq",
|
||||
"BMIter",
|
||||
("Selection History", '-'),
|
||||
"BMEditSelSeq",
|
||||
"BMEditSelIter",
|
||||
("Custom-Data Layer Access", '-'),
|
||||
"BMLayerAccessVert",
|
||||
"BMLayerAccessEdge",
|
||||
"BMLayerAccessFace",
|
||||
"BMLayerAccessLoop",
|
||||
"BMLayerCollection",
|
||||
"BMLayerItem",
|
||||
("Custom-Data Layer Types", '-'),
|
||||
"BMLoopUV",
|
||||
"BMDeformVert"
|
||||
)
|
||||
}
|
||||
("Base Mesh Type", '-'),
|
||||
"BMesh",
|
||||
("Mesh Elements", '-'),
|
||||
"BMVert",
|
||||
"BMEdge",
|
||||
"BMFace",
|
||||
"BMLoop",
|
||||
("Sequence Accessors", '-'),
|
||||
"BMElemSeq",
|
||||
"BMVertSeq",
|
||||
"BMEdgeSeq",
|
||||
"BMFaceSeq",
|
||||
"BMLoopSeq",
|
||||
"BMIter",
|
||||
("Selection History", '-'),
|
||||
"BMEditSelSeq",
|
||||
"BMEditSelIter",
|
||||
("Custom-Data Layer Access", '-'),
|
||||
"BMLayerAccessVert",
|
||||
"BMLayerAccessEdge",
|
||||
"BMLayerAccessFace",
|
||||
"BMLayerAccessLoop",
|
||||
"BMLayerCollection",
|
||||
"BMLayerItem",
|
||||
("Custom-Data Layer Types", '-'),
|
||||
"BMLoopUV",
|
||||
"BMDeformVert"
|
||||
)
|
||||
}
|
||||
|
||||
# --------------------configure compile time options----------------------------
|
||||
|
||||
@@ -418,7 +392,7 @@ MODULE_GROUPING = {
|
||||
|
||||
blender_version_strings = [str(v) for v in bpy.app.version]
|
||||
|
||||
# converting bytes to strings, due to T30154
|
||||
# converting bytes to strings, due to #30154
|
||||
BLENDER_REVISION = str(bpy.app.build_hash, 'utf_8')
|
||||
BLENDER_DATE = str(bpy.app.build_date, 'utf_8')
|
||||
|
||||
@@ -486,10 +460,10 @@ MethodDescriptorType = type(dict.get)
|
||||
GetSetDescriptorType = type(int.real)
|
||||
StaticMethodType = type(staticmethod(lambda: None))
|
||||
from types import (
|
||||
MemberDescriptorType,
|
||||
MethodType,
|
||||
FunctionType,
|
||||
)
|
||||
MemberDescriptorType,
|
||||
MethodType,
|
||||
FunctionType,
|
||||
)
|
||||
|
||||
_BPY_STRUCT_FAKE = "bpy_struct"
|
||||
_BPY_PROP_COLLECTION_FAKE = "bpy_prop_collection"
|
||||
@@ -509,7 +483,7 @@ escape_rst.trans = str.maketrans({
|
||||
"|": "\\|",
|
||||
"*": "\\*",
|
||||
"\\": "\\\\",
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
def is_struct_seq(value):
|
||||
@@ -754,7 +728,7 @@ def py_c_func2sphinx(ident, fw, module_name, type_name, identifier, py_func, is_
|
||||
|
||||
def pyprop2sphinx(ident, fw, identifier, py_prop):
|
||||
'''
|
||||
Python property to sphinx
|
||||
python property to sphinx
|
||||
'''
|
||||
# readonly properties use "data" directive, variables use "attribute" directive
|
||||
if py_prop.fset is None:
|
||||
@@ -854,8 +828,7 @@ def pymodule2sphinx(basepath, module_name, module, title):
|
||||
# naughty, we also add getset's into PyStructs, this is not typical py but also not incorrect.
|
||||
|
||||
# type_name is only used for examples and messages
|
||||
# "<class 'bpy.app.handlers'>" --> bpy.app.handlers
|
||||
type_name = str(type(module)).strip("<>").split(" ", 1)[-1][1:-1]
|
||||
type_name = str(type(module)).strip("<>").split(" ", 1)[-1][1:-1] # "<class 'bpy.app.handlers'>" --> bpy.app.handlers
|
||||
if type(descr) == types.GetSetDescriptorType:
|
||||
py_descr2sphinx("", fw, descr, module_name, type_name, key)
|
||||
attribute_set.add(key)
|
||||
@@ -916,8 +889,7 @@ def pymodule2sphinx(basepath, module_name, module, title):
|
||||
for attribute, value, value_type in module_dir_value_type:
|
||||
if value_type == FunctionType:
|
||||
pyfunc2sphinx("", fw, module_name, None, attribute, value, is_class=False)
|
||||
# both the same at the moment but to be future proof
|
||||
elif value_type in {types.BuiltinMethodType, types.BuiltinFunctionType}:
|
||||
elif value_type in {types.BuiltinMethodType, types.BuiltinFunctionType}: # both the same at the moment but to be future proof
|
||||
# note: can't get args from these, so dump the string as is
|
||||
# this means any module used like this must have fully formatted docstrings.
|
||||
py_c_func2sphinx("", fw, module_name, None, attribute, value, is_class=False)
|
||||
@@ -983,7 +955,7 @@ def pymodule2sphinx(basepath, module_name, module, title):
|
||||
if type(descr) == ClassMethodDescriptorType:
|
||||
py_descr2sphinx(" ", fw, descr, module_name, type_name, key)
|
||||
|
||||
# needed for pure Python classes
|
||||
# needed for pure python classes
|
||||
for key, descr in descr_items:
|
||||
if type(descr) == FunctionType:
|
||||
pyfunc2sphinx(" ", fw, module_name, type_name, key, descr, is_class=True)
|
||||
@@ -1006,15 +978,12 @@ def pymodule2sphinx(basepath, module_name, module, title):
|
||||
|
||||
file.close()
|
||||
|
||||
# Changes in Blender will force errors here
|
||||
# Changes in blender will force errors here
|
||||
context_type_map = {
|
||||
"active_base": ("ObjectBase", False),
|
||||
"active_bone": ("EditBone", False),
|
||||
"active_gpencil_frame": ("GreasePencilLayer", True),
|
||||
"active_gpencil_layer": ("GPencilLayer", True),
|
||||
"active_gpencil_brush": ("GPencilSculptBrush", False),
|
||||
"active_gpencil_palette": ("GPencilPalette", True),
|
||||
"active_gpencil_palettecolor": ("GPencilPaletteColor", True),
|
||||
"active_node": ("Node", False),
|
||||
"active_object": ("Object", False),
|
||||
"active_operator": ("Operator", False),
|
||||
@@ -1097,10 +1066,9 @@ def pycontext2sphinx(basepath):
|
||||
fw(title_string("Context Access (bpy.context)", "="))
|
||||
fw(".. module:: bpy.context\n")
|
||||
fw("\n")
|
||||
fw("The context members available depend on the area of Blender which is currently being accessed.\n")
|
||||
fw("The context members available depend on the area of blender which is currently being accessed.\n")
|
||||
fw("\n")
|
||||
fw("Note that all context values are readonly,\n")
|
||||
fw("but may be modified through the data api or by running operators\n\n")
|
||||
fw("Note that all context values are readonly, but may be modified through the data api or by running operators\n\n")
|
||||
|
||||
def write_contex_cls():
|
||||
|
||||
@@ -1123,8 +1091,7 @@ def pycontext2sphinx(basepath):
|
||||
if prop.identifier in struct_blacklist:
|
||||
continue
|
||||
|
||||
type_descr = prop.get_type_description(
|
||||
class_fmt=":class:`bpy.types.%s`", collection_id=_BPY_PROP_COLLECTION_ID)
|
||||
type_descr = prop.get_type_description(class_fmt=":class:`bpy.types.%s`", collection_id=_BPY_PROP_COLLECTION_ID)
|
||||
fw(".. data:: %s\n\n" % prop.identifier)
|
||||
if prop.description:
|
||||
fw(" %s\n\n" % prop.description)
|
||||
@@ -1144,7 +1111,7 @@ def pycontext2sphinx(basepath):
|
||||
del write_contex_cls
|
||||
# end
|
||||
|
||||
# nasty, get strings directly from Blender because there is no other way to get it
|
||||
# nasty, get strings directly from blender because there is no other way to get it
|
||||
import ctypes
|
||||
|
||||
context_strings = (
|
||||
@@ -1180,9 +1147,7 @@ def pycontext2sphinx(basepath):
|
||||
# for member in sorted(unique):
|
||||
# print(' "%s": ("", False),' % member)
|
||||
if len(context_type_map) > len(unique):
|
||||
raise Exception(
|
||||
"Some types are not used: %s" %
|
||||
str([member for member in context_type_map if member not in unique]))
|
||||
raise Exception("Some types are not used: %s" % str([member for member in context_type_map if member not in unique]))
|
||||
else:
|
||||
pass # will have raised an error above
|
||||
|
||||
@@ -1261,11 +1226,11 @@ def pyrna2sphinx(basepath):
|
||||
fw(ident + ":%s%s: %s\n" % (id_type, identifier, type_descr))
|
||||
|
||||
def write_struct(struct):
|
||||
# if not struct.identifier.startswith("Sc") and not struct.identifier.startswith("I"):
|
||||
# return
|
||||
#if not struct.identifier.startswith("Sc") and not struct.identifier.startswith("I"):
|
||||
# return
|
||||
|
||||
# if not struct.identifier == "Object":
|
||||
# return
|
||||
#if not struct.identifier == "Object":
|
||||
# return
|
||||
|
||||
filepath = os.path.join(basepath, "bpy.types.%s.rst" % struct.identifier)
|
||||
file = open(filepath, "w", encoding="utf-8")
|
||||
@@ -1306,11 +1271,7 @@ def pyrna2sphinx(basepath):
|
||||
fw(", ".join((":class:`%s`" % base_id) for base_id in base_ids))
|
||||
fw("\n\n")
|
||||
|
||||
subclass_ids = [
|
||||
s.identifier for s in structs.values()
|
||||
if s.base is struct
|
||||
if not rna_info.rna_id_ignore(s.identifier)
|
||||
]
|
||||
subclass_ids = [s.identifier for s in structs.values() if s.base is struct if not rna_info.rna_id_ignore(s.identifier)]
|
||||
subclass_ids.sort()
|
||||
if subclass_ids:
|
||||
fw("subclasses --- \n" + ", ".join((":class:`%s`" % s) for s in subclass_ids) + "\n\n")
|
||||
@@ -1361,7 +1322,7 @@ def pyrna2sphinx(basepath):
|
||||
|
||||
fw(" :type: %s\n\n" % type_descr)
|
||||
|
||||
# Python attributes
|
||||
# python attributes
|
||||
py_properties = struct.get_py_properties()
|
||||
py_prop = None
|
||||
for identifier, py_prop in py_properties:
|
||||
@@ -1371,8 +1332,7 @@ def pyrna2sphinx(basepath):
|
||||
for func in struct.functions:
|
||||
args_str = ", ".join(prop.get_arg_default(force=False) for prop in func.args)
|
||||
|
||||
fw(" .. %s:: %s(%s)\n\n" %
|
||||
("classmethod" if func.is_classmethod else "method", func.identifier, args_str))
|
||||
fw(" .. %s:: %s(%s)\n\n" % ("classmethod" if func.is_classmethod else "method", func.identifier, args_str))
|
||||
fw(" %s\n\n" % func.description)
|
||||
|
||||
for prop in func.args:
|
||||
@@ -1383,10 +1343,8 @@ def pyrna2sphinx(basepath):
|
||||
elif func.return_values: # multiple return values
|
||||
fw(" :return (%s):\n" % ", ".join(prop.identifier for prop in func.return_values))
|
||||
for prop in func.return_values:
|
||||
# TODO, pyrna_enum2sphinx for multiple return values... actually dont
|
||||
# think we even use this but still!!!
|
||||
type_descr = prop.get_type_description(
|
||||
as_ret=True, class_fmt=":class:`%s`", collection_id=_BPY_PROP_COLLECTION_ID)
|
||||
# TODO, pyrna_enum2sphinx for multiple return values... actually dont think we even use this but still!!!
|
||||
type_descr = prop.get_type_description(as_ret=True, class_fmt=":class:`%s`", collection_id=_BPY_PROP_COLLECTION_ID)
|
||||
descr = prop.description
|
||||
if not descr:
|
||||
descr = prop.name
|
||||
@@ -1399,7 +1357,7 @@ def pyrna2sphinx(basepath):
|
||||
|
||||
fw("\n")
|
||||
|
||||
# Python methods
|
||||
# python methods
|
||||
py_funcs = struct.get_py_functions()
|
||||
py_func = None
|
||||
|
||||
@@ -1422,10 +1380,7 @@ def pyrna2sphinx(basepath):
|
||||
del lines[:]
|
||||
|
||||
if _BPY_STRUCT_FAKE:
|
||||
descr_items = [
|
||||
(key, descr) for key, descr in sorted(bpy.types.Struct.__bases__[0].__dict__.items())
|
||||
if not key.startswith("__")
|
||||
]
|
||||
descr_items = [(key, descr) for key, descr in sorted(bpy.types.Struct.__bases__[0].__dict__.items()) if not key.startswith("__")]
|
||||
|
||||
if _BPY_STRUCT_FAKE:
|
||||
for key, descr in descr_items:
|
||||
@@ -1521,28 +1476,19 @@ def pyrna2sphinx(basepath):
|
||||
fw("\n")
|
||||
|
||||
if use_subclasses:
|
||||
subclass_ids = [
|
||||
s.identifier for s in structs.values()
|
||||
if s.base is None
|
||||
if not rna_info.rna_id_ignore(s.identifier)
|
||||
]
|
||||
subclass_ids = [s.identifier for s in structs.values() if s.base is None if not rna_info.rna_id_ignore(s.identifier)]
|
||||
if subclass_ids:
|
||||
fw("subclasses --- \n" + ", ".join((":class:`%s`" % s) for s in sorted(subclass_ids)) + "\n\n")
|
||||
|
||||
fw(".. class:: %s\n\n" % class_name)
|
||||
fw(" %s\n\n" % descr_str)
|
||||
fw(" .. note::\n\n")
|
||||
fw(" Note that bpy.types.%s is not actually available from within Blender,\n"
|
||||
" it only exists for the purpose of documentation.\n\n" % class_name)
|
||||
fw(" Note that bpy.types.%s is not actually available from within blender, it only exists for the purpose of documentation.\n\n" % class_name)
|
||||
|
||||
descr_items = [
|
||||
(key, descr) for key, descr in sorted(class_value.__dict__.items())
|
||||
if not key.startswith("__")
|
||||
]
|
||||
descr_items = [(key, descr) for key, descr in sorted(class_value.__dict__.items()) if not key.startswith("__")]
|
||||
|
||||
for key, descr in descr_items:
|
||||
# GetSetDescriptorType, GetSetDescriptorType's are not documented yet
|
||||
if type(descr) == MethodDescriptorType:
|
||||
if type(descr) == MethodDescriptorType: # GetSetDescriptorType, GetSetDescriptorType's are not documented yet
|
||||
py_descr2sphinx(" ", fw, descr, "bpy.types", class_name, key)
|
||||
|
||||
for key, descr in descr_items:
|
||||
@@ -1553,21 +1499,17 @@ def pyrna2sphinx(basepath):
|
||||
# write fake classes
|
||||
if _BPY_STRUCT_FAKE:
|
||||
class_value = bpy.types.Struct.__bases__[0]
|
||||
fake_bpy_type(
|
||||
class_value, _BPY_STRUCT_FAKE,
|
||||
"built-in base class for all classes in bpy.types.", use_subclasses=True)
|
||||
fake_bpy_type(class_value, _BPY_STRUCT_FAKE, "built-in base class for all classes in bpy.types.", use_subclasses=True)
|
||||
|
||||
if _BPY_PROP_COLLECTION_FAKE:
|
||||
class_value = bpy.data.objects.__class__
|
||||
fake_bpy_type(
|
||||
class_value, _BPY_PROP_COLLECTION_FAKE,
|
||||
"built-in class used for all collections.", use_subclasses=False)
|
||||
fake_bpy_type(class_value, _BPY_PROP_COLLECTION_FAKE, "built-in class used for all collections.", use_subclasses=False)
|
||||
|
||||
# operators
|
||||
def write_ops():
|
||||
API_BASEURL = "https://developer.blender.org/diffusion/B/browse/master/release/scripts/ "
|
||||
API_BASEURL_ADDON = "https://developer.blender.org/diffusion/BA/"
|
||||
API_BASEURL_ADDON_CONTRIB = "https://developer.blender.org/diffusion/BAC/"
|
||||
API_BASEURL = "http://svn.blender.org/svnroot/bf-blender/trunk/blender/release/scripts"
|
||||
API_BASEURL_ADDON = "http://svn.blender.org/svnroot/bf-extensions/trunk/py/scripts"
|
||||
API_BASEURL_ADDON_CONTRIB = "http://svn.blender.org/svnroot/bf-extensions/contrib/py/scripts"
|
||||
|
||||
op_modules = {}
|
||||
for op in ops.values():
|
||||
@@ -1632,13 +1574,6 @@ def write_sphinx_conf_py(basepath):
|
||||
file = open(filepath, "w", encoding="utf-8")
|
||||
fw = file.write
|
||||
|
||||
fw("import sys, os\n")
|
||||
fw("\n")
|
||||
fw("extensions = ['sphinx.ext.intersphinx']\n")
|
||||
fw("\n")
|
||||
fw("intersphinx_mapping = {'blender_manual': ('https://www.blender.org/manual/', None)}\n")
|
||||
fw("\n")
|
||||
|
||||
fw("project = 'Blender'\n")
|
||||
# fw("master_doc = 'index'\n")
|
||||
fw("copyright = u'Blender Foundation'\n")
|
||||
@@ -1650,12 +1585,11 @@ def write_sphinx_conf_py(basepath):
|
||||
|
||||
if ARGS.sphinx_theme == "blender-org":
|
||||
fw("html_theme_path = ['../']\n")
|
||||
# copied with the theme, exclude else we get an error [T28873]
|
||||
# copied with the theme, exclude else we get an error [#28873]
|
||||
fw("html_favicon = 'favicon.ico'\n") # in <theme>/static/
|
||||
|
||||
# not helpful since the source is generated, adds to upload size.
|
||||
fw("html_copy_source = False\n")
|
||||
fw("html_split_index = True\n")
|
||||
fw("\n")
|
||||
|
||||
# needed for latex, pdf gen
|
||||
@@ -1681,13 +1615,11 @@ def write_rst_contents(basepath):
|
||||
|
||||
fw(title_string("Blender Documentation Contents", "%", double=True))
|
||||
fw("\n")
|
||||
fw("Welcome, this document is an API reference for Blender %s, built %s.\n" %
|
||||
(BLENDER_VERSION_DOTS, BLENDER_DATE))
|
||||
fw("Welcome, this document is an API reference for Blender %s, built %s.\n" % (BLENDER_VERSION_DOTS, BLENDER_DATE))
|
||||
fw("\n")
|
||||
|
||||
# fw("`A PDF version of this document is also available <%s>`_\n" % BLENDER_PDF_FILENAME)
|
||||
fw("This site can be downloaded for offline use `Download the full Documentation (zipped HTML files) <%s>`_\n" %
|
||||
BLENDER_ZIP_FILENAME)
|
||||
fw("This site can be downloaded for offline use `Download the full Documentation (zipped HTML files) <%s>`_\n" % BLENDER_ZIP_FILENAME)
|
||||
|
||||
fw("\n")
|
||||
|
||||
@@ -1720,7 +1652,7 @@ def write_rst_contents(basepath):
|
||||
|
||||
# C modules
|
||||
"bpy.props",
|
||||
)
|
||||
)
|
||||
|
||||
for mod in app_modules:
|
||||
if mod not in EXCLUDE_MODULES:
|
||||
@@ -1741,10 +1673,9 @@ def write_rst_contents(basepath):
|
||||
"freestyle", "bgl", "blf",
|
||||
"gpu", "gpu.offscreen",
|
||||
"aud", "bpy_extras",
|
||||
"idprop.types",
|
||||
# bmesh, submodules are in own page
|
||||
"bmesh",
|
||||
)
|
||||
)
|
||||
|
||||
for mod in standalone_modules:
|
||||
if mod not in EXCLUDE_MODULES:
|
||||
@@ -1782,8 +1713,7 @@ def write_rst_contents(basepath):
|
||||
fw(" * mesh creation and editing functions\n")
|
||||
fw(" \n")
|
||||
fw(" These parts of the API are relatively stable and are unlikely to change significantly\n")
|
||||
fw(" * data API, access to attributes of Blender data such as mesh verts, material color,\n")
|
||||
fw(" timeline frames and scene objects\n")
|
||||
fw(" * data API, access to attributes of blender data such as mesh verts, material color, timeline frames and scene objects\n")
|
||||
fw(" * user interface functions for defining buttons, creation of menus, headers, panels\n")
|
||||
fw(" * render engine integration\n")
|
||||
fw(" * modules: bgl, mathutils & game engine.\n")
|
||||
@@ -1855,11 +1785,11 @@ def write_rst_data(basepath):
|
||||
fw(title_string("Data Access (bpy.data)", "="))
|
||||
fw(".. module:: bpy\n")
|
||||
fw("\n")
|
||||
fw("This module is used for all Blender/Python access.\n")
|
||||
fw("This module is used for all blender/python access.\n")
|
||||
fw("\n")
|
||||
fw(".. data:: data\n")
|
||||
fw("\n")
|
||||
fw(" Access to Blender's internal data\n")
|
||||
fw(" Access to blenders internal data\n")
|
||||
fw("\n")
|
||||
fw(" :type: :class:`bpy.types.BlendData`\n")
|
||||
fw("\n")
|
||||
@@ -1874,38 +1804,37 @@ def write_rst_importable_modules(basepath):
|
||||
Write the rst files of importable modules
|
||||
'''
|
||||
importable_modules = {
|
||||
# Python_modules
|
||||
"bpy.path": "Path Utilities",
|
||||
"bpy.utils": "Utilities",
|
||||
"bpy_extras": "Extra Utilities",
|
||||
# python_modules
|
||||
"bpy.path" : "Path Utilities",
|
||||
"bpy.utils" : "Utilities",
|
||||
"bpy_extras" : "Extra Utilities",
|
||||
|
||||
# C_modules
|
||||
"aud": "Audio System",
|
||||
"blf": "Font Drawing",
|
||||
"gpu.offscreen": "GPU Off-Screen Buffer",
|
||||
"bmesh": "BMesh Module",
|
||||
"bmesh.types": "BMesh Types",
|
||||
"bmesh.utils": "BMesh Utilities",
|
||||
"bmesh.geometry": "BMesh Geometry Utilities",
|
||||
"bpy.app": "Application Data",
|
||||
"bpy.app.handlers": "Application Handlers",
|
||||
"bpy.app.translations": "Application Translations",
|
||||
"bpy.props": "Property Definitions",
|
||||
"idprop.types": "ID Property Access",
|
||||
"mathutils": "Math Types & Utilities",
|
||||
"mathutils.geometry": "Geometry Utilities",
|
||||
"mathutils.bvhtree": "BVHTree Utilities",
|
||||
"mathutils.kdtree": "KDTree Utilities",
|
||||
"aud" : "Audio System",
|
||||
"blf" : "Font Drawing",
|
||||
"gpu.offscreen" : "GPU Off-Screen Buffer",
|
||||
"bmesh" : "BMesh Module",
|
||||
"bmesh.types" : "BMesh Types",
|
||||
"bmesh.utils" : "BMesh Utilities",
|
||||
"bmesh.geometry" : "BMesh Geometry Utilities",
|
||||
"bpy.app" : "Application Data",
|
||||
"bpy.app.handlers" : "Application Handlers",
|
||||
"bpy.app.translations" : "Application Translations",
|
||||
"bpy.props" : "Property Definitions",
|
||||
"mathutils" : "Math Types & Utilities",
|
||||
"mathutils.geometry" : "Geometry Utilities",
|
||||
"mathutils.bvhtree" : "BVHTree Utilities",
|
||||
"mathutils.kdtree" : "KDTree Utilities",
|
||||
"mathutils.interpolate": "Interpolation Utilities",
|
||||
"mathutils.noise": "Noise Utilities",
|
||||
"freestyle": "Freestyle Module",
|
||||
"freestyle.types": "Freestyle Types",
|
||||
"freestyle.predicates": "Freestyle Predicates",
|
||||
"freestyle.functions": "Freestyle Functions",
|
||||
"freestyle.chainingiterators": "Freestyle Chaining Iterators",
|
||||
"freestyle.shaders": "Freestyle Shaders",
|
||||
"freestyle.utils": "Freestyle Utilities",
|
||||
}
|
||||
"mathutils.noise" : "Noise Utilities",
|
||||
"freestyle" : "Freestyle Module",
|
||||
"freestyle.types" : "Freestyle Types",
|
||||
"freestyle.predicates" : "Freestyle Predicates",
|
||||
"freestyle.functions" : "Freestyle Functions",
|
||||
"freestyle.chainingiterators" : "Freestyle Chaining Iterators",
|
||||
"freestyle.shaders" : "Freestyle Shaders",
|
||||
"freestyle.utils" : "Freestyle Utilities",
|
||||
}
|
||||
for mod_name, mod_descr in importable_modules.items():
|
||||
if mod_name not in EXCLUDE_MODULES:
|
||||
module = __import__(mod_name,
|
||||
@@ -1920,7 +1849,7 @@ def copy_handwritten_rsts(basepath):
|
||||
for info, info_desc in INFO_DOCS:
|
||||
shutil.copy2(os.path.join(RST_DIR, info), basepath)
|
||||
|
||||
# TODO put this docs in Blender's code and use import as per modules above
|
||||
# TODO put this docs in blender's code and use import as per modules above
|
||||
handwritten_modules = [
|
||||
"bge.logic",
|
||||
"bge.render",
|
||||
@@ -1961,21 +1890,6 @@ def copy_handwritten_rsts(basepath):
|
||||
shutil.copy2(os.path.join(RST_DIR, f), basepath)
|
||||
|
||||
|
||||
def copy_handwritten_extra(basepath):
|
||||
for f_src in EXTRA_SOURCE_FILES:
|
||||
if os.sep != "/":
|
||||
f_src = os.sep.join(f_src.split("/"))
|
||||
|
||||
f_dst = f_src.replace("..", "__")
|
||||
|
||||
f_src = os.path.join(RST_DIR, f_src)
|
||||
f_dst = os.path.join(basepath, f_dst)
|
||||
|
||||
os.makedirs(os.path.dirname(f_dst), exist_ok=True)
|
||||
|
||||
shutil.copy2(f_src, f_dst)
|
||||
|
||||
|
||||
def rna2sphinx(basepath):
|
||||
|
||||
try:
|
||||
@@ -2007,48 +1921,35 @@ def rna2sphinx(basepath):
|
||||
# copy the other rsts
|
||||
copy_handwritten_rsts(basepath)
|
||||
|
||||
# copy source files referenced
|
||||
copy_handwritten_extra(basepath)
|
||||
|
||||
|
||||
def align_sphinx_in_to_sphinx_in_tmp(dir_src, dir_dst):
|
||||
def align_sphinx_in_to_sphinx_in_tmp():
|
||||
'''
|
||||
Move changed files from SPHINX_IN_TMP to SPHINX_IN
|
||||
'''
|
||||
import filecmp
|
||||
|
||||
# possible the dir doesn't exist when running recursively
|
||||
os.makedirs(dir_dst, exist_ok=True)
|
||||
|
||||
sphinx_dst_files = set(os.listdir(dir_dst))
|
||||
sphinx_src_files = set(os.listdir(dir_src))
|
||||
sphinx_in_files = set(os.listdir(SPHINX_IN))
|
||||
sphinx_in_tmp_files = set(os.listdir(SPHINX_IN_TMP))
|
||||
|
||||
# remove deprecated files that have been removed
|
||||
for f in sorted(sphinx_dst_files):
|
||||
if f not in sphinx_src_files:
|
||||
for f in sorted(sphinx_in_files):
|
||||
if f not in sphinx_in_tmp_files:
|
||||
BPY_LOGGER.debug("\tdeprecated: %s" % f)
|
||||
f_dst = os.path.join(dir_dst, f)
|
||||
if os.path.isdir(f_dst):
|
||||
shutil.rmtree(f_dst, True)
|
||||
else:
|
||||
os.remove(f_dst)
|
||||
os.remove(os.path.join(SPHINX_IN, f))
|
||||
|
||||
# freshen with new files.
|
||||
for f in sorted(sphinx_src_files):
|
||||
f_src = os.path.join(dir_src, f)
|
||||
f_dst = os.path.join(dir_dst, f)
|
||||
for f in sorted(sphinx_in_tmp_files):
|
||||
f_from = os.path.join(SPHINX_IN_TMP, f)
|
||||
f_to = os.path.join(SPHINX_IN, f)
|
||||
|
||||
if os.path.isdir(f_src):
|
||||
align_sphinx_in_to_sphinx_in_tmp(f_src, f_dst)
|
||||
else:
|
||||
do_copy = True
|
||||
if f in sphinx_dst_files:
|
||||
if filecmp.cmp(f_src, f_dst):
|
||||
do_copy = False
|
||||
do_copy = True
|
||||
if f in sphinx_in_files:
|
||||
if filecmp.cmp(f_from, f_to):
|
||||
do_copy = False
|
||||
|
||||
if do_copy:
|
||||
BPY_LOGGER.debug("\tupdating: %s" % f)
|
||||
shutil.copy(f_src, f_dst)
|
||||
if do_copy:
|
||||
BPY_LOGGER.debug("\tupdating: %s" % f)
|
||||
shutil.copy(f_from, f_to)
|
||||
|
||||
|
||||
def refactor_sphinx_log(sphinx_logfile):
|
||||
@@ -2135,7 +2036,7 @@ def main():
|
||||
shutil.rmtree(SPHINX_OUT_PDF, True)
|
||||
else:
|
||||
# move changed files in SPHINX_IN
|
||||
align_sphinx_in_to_sphinx_in_tmp(SPHINX_IN_TMP, SPHINX_IN)
|
||||
align_sphinx_in_to_sphinx_in_tmp()
|
||||
|
||||
# report which example files weren't used
|
||||
EXAMPLE_SET_UNUSED = EXAMPLE_SET - EXAMPLE_SET_USED
|
||||
|
@@ -3,6 +3,11 @@
|
||||
# bash doc/python_api/sphinx_doc_gen.sh
|
||||
# ssh upload means you need an account on the server
|
||||
|
||||
if [ "$1" == "" ] ; then
|
||||
echo "Expected a single argument for the username on blender.org, aborting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Upload vars
|
||||
|
||||
@@ -17,15 +22,9 @@ if [ -z $BLENDER_BIN ] ; then
|
||||
BLENDER_BIN="./blender.bin"
|
||||
fi
|
||||
|
||||
if [ "$1" == "" ] ; then
|
||||
echo "Expected a single argument for the username on blender.org, skipping upload step!"
|
||||
DO_UPLOAD=false
|
||||
else
|
||||
SSH_USER=$1
|
||||
SSH_HOST=$SSH_USER"@blender.org"
|
||||
SSH_UPLOAD="/data/www/vhosts/www.blender.org/api" # blender_python_api_VERSION, added after
|
||||
fi
|
||||
|
||||
SSH_USER=$1
|
||||
SSH_HOST=$SSH_USER"@blender.org"
|
||||
SSH_UPLOAD="/data/www/vhosts/www.blender.org/api" # blender_python_api_VERSION, added after
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Blender Version & Info
|
||||
@@ -34,12 +33,10 @@ fi
|
||||
# "_".join(str(v) for v in bpy.app.version)
|
||||
# custom blender vars
|
||||
blender_srcdir=$(dirname -- $0)/../..
|
||||
blender_version_header="$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h"
|
||||
blender_version=$(grep "BLENDER_VERSION\s" "$blender_version_header" | awk '{print $3}')
|
||||
blender_version_char=$(grep "BLENDER_VERSION_CHAR\s" "$blender_version_header" | awk '{print $3}')
|
||||
blender_version_cycle=$(grep "BLENDER_VERSION_CYCLE\s" "$blender_version_header" | awk '{print $3}')
|
||||
blender_subversion=$(grep "BLENDER_SUBVERSION\s" "$blender_version_header" | awk '{print $3}')
|
||||
unset blender_version_header
|
||||
blender_version=$(grep "BLENDER_VERSION\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
|
||||
blender_version_char=$(grep "BLENDER_VERSION_CHAR\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
|
||||
blender_version_cycle=$(grep "BLENDER_VERSION_CYCLE\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
|
||||
blender_subversion=$(grep "BLENDER_SUBVERSION\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
|
||||
|
||||
if [ "$blender_version_cycle" = "release" ] ; then
|
||||
BLENDER_VERSION=$(expr $blender_version / 100)_$(expr $blender_version % 100)$blender_version_char"_release"
|
||||
@@ -51,8 +48,6 @@ SSH_UPLOAD_FULL=$SSH_UPLOAD/"blender_python_api_"$BLENDER_VERSION
|
||||
|
||||
SPHINXBASE=doc/python_api
|
||||
|
||||
SPHINX_WORKDIR="$(mktemp --directory --suffix=.sphinx)"
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Generate reStructuredText (blender/python only)
|
||||
@@ -64,10 +59,7 @@ if $DO_EXE_BLENDER ; then
|
||||
-noaudio \
|
||||
--factory-startup \
|
||||
--python-exit-code 1 \
|
||||
--python $SPHINXBASE/sphinx_doc_gen.py \
|
||||
-- \
|
||||
--output=$SPHINX_WORKDIR
|
||||
|
||||
--python $SPHINXBASE/sphinx_doc_gen.py
|
||||
|
||||
if (($? != 0)) ; then
|
||||
echo "Generating documentation failed, aborting"
|
||||
@@ -75,14 +67,15 @@ if $DO_EXE_BLENDER ; then
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Generate HTML (sphinx)
|
||||
|
||||
if $DO_OUT_HTML ; then
|
||||
# sphinx-build -n -b html $SPHINX_WORKDIR/sphinx-in $SPHINX_WORKDIR/sphinx-out
|
||||
# sphinx-build -n -b html $SPHINXBASE/sphinx-in $SPHINXBASE/sphinx-out
|
||||
|
||||
# annoying bug in sphinx makes it very slow unless we do this. should report.
|
||||
cd $SPHINX_WORKDIR
|
||||
cd $SPHINXBASE
|
||||
sphinx-build -b html sphinx-in sphinx-out
|
||||
|
||||
# XXX, saves space on upload and zip, should move HTML outside
|
||||
@@ -110,21 +103,20 @@ fi
|
||||
# Generate PDF (sphinx/laytex)
|
||||
|
||||
if $DO_OUT_PDF ; then
|
||||
cd $SPHINX_WORKDIR
|
||||
sphinx-build -n -b latex $SPHINX_WORKDIR/sphinx-in $SPHINX_WORKDIR/sphinx-out
|
||||
make -C $SPHINX_WORKDIR/sphinx-out
|
||||
mv $SPHINX_WORKDIR/sphinx-out/contents.pdf \
|
||||
$SPHINX_WORKDIR/sphinx-out/blender_python_reference_$BLENDER_VERSION.pdf
|
||||
sphinx-build -n -b latex $SPHINXBASE/sphinx-in $SPHINXBASE/sphinx-out
|
||||
make -C $SPHINXBASE/sphinx-out
|
||||
mv $SPHINXBASE/sphinx-out/contents.pdf $SPHINXBASE/sphinx-out/blender_python_reference_$BLENDER_VERSION.pdf
|
||||
fi
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Upload to blender servers, comment this section for testing
|
||||
|
||||
if $DO_UPLOAD ; then
|
||||
|
||||
cp $SPHINX_WORKDIR/sphinx-out/contents.html $SPHINX_WORKDIR/sphinx-out/index.html
|
||||
cp $SPHINXBASE/sphinx-out/contents.html $SPHINXBASE/sphinx-out/index.html
|
||||
ssh $SSH_USER@blender.org 'rm -rf '$SSH_UPLOAD_FULL'/*'
|
||||
rsync --progress -ave "ssh -p 22" $SPHINX_WORKDIR/sphinx-out/* $SSH_HOST:$SSH_UPLOAD_FULL/
|
||||
rsync --progress -ave "ssh -p 22" $SPHINXBASE/sphinx-out/* $SSH_HOST:$SSH_UPLOAD_FULL/
|
||||
|
||||
## symlink the dir to a static URL
|
||||
#ssh $SSH_USER@blender.org 'rm '$SSH_UPLOAD'/250PythonDoc && ln -s '$SSH_UPLOAD_FULL' '$SSH_UPLOAD'/250PythonDoc'
|
||||
@@ -142,15 +134,11 @@ if $DO_UPLOAD ; then
|
||||
|
||||
if $DO_OUT_PDF ; then
|
||||
# rename so local PDF has matching name.
|
||||
rsync --progress -ave "ssh -p 22" \
|
||||
$SPHINX_WORKDIR/sphinx-out/blender_python_reference_$BLENDER_VERSION.pdf \
|
||||
$SSH_HOST:$SSH_UPLOAD_FULL/blender_python_reference_$BLENDER_VERSION.pdf
|
||||
rsync --progress -ave "ssh -p 22" $SPHINXBASE/sphinx-out/blender_python_reference_$BLENDER_VERSION.pdf $SSH_HOST:$SSH_UPLOAD_FULL/blender_python_reference_$BLENDER_VERSION.pdf
|
||||
fi
|
||||
|
||||
if $DO_OUT_HTML_ZIP ; then
|
||||
rsync --progress -ave "ssh -p 22" \
|
||||
$SPHINX_WORKDIR/blender_python_reference_$BLENDER_VERSION.zip \
|
||||
$SSH_HOST:$SSH_UPLOAD_FULL/blender_python_reference_$BLENDER_VERSION.zip
|
||||
rsync --progress -ave "ssh -p 22" $SPHINXBASE/blender_python_reference_$BLENDER_VERSION.zip $SSH_HOST:$SSH_UPLOAD_FULL/blender_python_reference_$BLENDER_VERSION.zip
|
||||
fi
|
||||
|
||||
fi
|
||||
@@ -161,5 +149,5 @@ fi
|
||||
|
||||
echo ""
|
||||
echo "Finished! view the docs from: "
|
||||
if $DO_OUT_HTML ; then echo " html:" $SPHINX_WORKDIR/sphinx-out/contents.html ; fi
|
||||
if $DO_OUT_PDF ; then echo " pdf:" $SPHINX_WORKDIR/sphinx-out/blender_python_reference_$BLENDER_VERSION.pdf ; fi
|
||||
if $DO_OUT_HTML ; then echo " html:" $SPHINXBASE/sphinx-out/contents.html ; fi
|
||||
if $DO_OUT_PDF ; then echo " pdf:" $SPHINXBASE/sphinx-out/blender_python_reference_$BLENDER_VERSION.pdf ; fi
|
||||
|
@@ -1,186 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# Contributor(s): Bastien Montagne
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
"""
|
||||
This is a helper script to generate Blender Python API documentation (using Sphinx), and update server data using rsync.
|
||||
|
||||
You'll need to specify your user login and password, obviously.
|
||||
|
||||
Example usage:
|
||||
|
||||
./sphinx_doc_update.py --mirror ../../../docs/remote_api_backup/ --source ../.. --blender ../../../build_cmake/bin/blender --user foobar --password barfoo
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import zipfile
|
||||
|
||||
|
||||
DEFAULT_RSYNC_SERVER = "www.blender.org"
|
||||
DEFAULT_RSYNC_ROOT = "/api/"
|
||||
DEFAULT_SYMLINK_ROOT = "/data/www/vhosts/www.blender.org/api"
|
||||
|
||||
|
||||
def argparse_create():
|
||||
import argparse
|
||||
global __doc__
|
||||
|
||||
# When --help or no args are given, print this help
|
||||
usage_text = __doc__
|
||||
|
||||
parser = argparse.ArgumentParser(description=usage_text,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
|
||||
parser.add_argument(
|
||||
"--mirror", dest="mirror_dir",
|
||||
metavar='PATH', required=True,
|
||||
help="Path to local rsync mirror of api doc server")
|
||||
parser.add_argument(
|
||||
"--source", dest="source_dir",
|
||||
metavar='PATH', required=True,
|
||||
help="Path to Blender git repository")
|
||||
parser.add_argument(
|
||||
"--blender", dest="blender",
|
||||
metavar='PATH', required=True,
|
||||
help="Path to Blender executable")
|
||||
parser.add_argument(
|
||||
"--rsync-server", dest="rsync_server", default=DEFAULT_RSYNC_SERVER,
|
||||
metavar='RSYNCSERVER', type=str, required=False,
|
||||
help=("rsync server address"))
|
||||
parser.add_argument(
|
||||
"--rsync-root", dest="rsync_root", default=DEFAULT_RSYNC_ROOT,
|
||||
metavar='RSYNCROOT', type=str, required=False,
|
||||
help=("Root path of API doc on rsync server"))
|
||||
parser.add_argument(
|
||||
"--user", dest="user",
|
||||
metavar='USER', type=str, required=True,
|
||||
help=("User to login on rsync server"))
|
||||
parser.add_argument(
|
||||
"--password", dest="password",
|
||||
metavar='PASSWORD', type=str, required=True,
|
||||
help=("Password to login on rsync server"))
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
# ----------
|
||||
# Parse Args
|
||||
|
||||
args = argparse_create().parse_args()
|
||||
|
||||
rsync_base = "rsync://%s@%s:%s" % (args.user, args.rsync_server, args.rsync_root)
|
||||
|
||||
# I) Update local mirror using rsync.
|
||||
rsync_mirror_cmd = ("rsync", "--delete-after", "-avzz", rsync_base, args.mirror_dir)
|
||||
subprocess.run(rsync_mirror_cmd, env=dict(os.environ, RSYNC_PASSWORD=args.password))
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
# II) Generate doc source in temp dir.
|
||||
doc_gen_cmd = (args.blender, "--background", "-noaudio", "--factory-startup", "--python-exit-code", "1",
|
||||
"--python", "%s/doc/python_api/sphinx_doc_gen.py" % args.source_dir, "--",
|
||||
"--output", tmp_dir)
|
||||
subprocess.run(doc_gen_cmd)
|
||||
|
||||
# III) Get Blender version info.
|
||||
blenver = blenver_zip = ""
|
||||
getver_file = os.path.join(tmp_dir, "blendver.txt")
|
||||
getver_script = (""
|
||||
"import sys, bpy\n"
|
||||
"with open(sys.argv[-1], 'w') as f:\n"
|
||||
" f.write('%d_%d%s_release\\n' % (bpy.app.version[0], bpy.app.version[1], bpy.app.version_char)\n"
|
||||
" if bpy.app.version_cycle in {'rc', 'release'} else '%d_%d_%d\\n' % bpy.app.version)\n"
|
||||
" f.write('%d_%d_%d' % bpy.app.version)\n")
|
||||
get_ver_cmd = (args.blender, "--background", "-noaudio", "--factory-startup", "--python-exit-code", "1",
|
||||
"--python-expr", getver_script, "--", getver_file)
|
||||
subprocess.run(get_ver_cmd)
|
||||
with open(getver_file) as f:
|
||||
blenver, blenver_zip = f.read().split("\n")
|
||||
os.remove(getver_file)
|
||||
|
||||
# IV) Build doc.
|
||||
curr_dir = os.getcwd()
|
||||
os.chdir(tmp_dir)
|
||||
sphinx_cmd = ("sphinx-build", "-b", "html", "sphinx-in", "sphinx-out")
|
||||
subprocess.run(sphinx_cmd)
|
||||
shutil.rmtree(os.path.join("sphinx-out", ".doctrees"))
|
||||
os.chdir(curr_dir)
|
||||
|
||||
# V) Cleanup existing matching dir in server mirror (if any), and copy new doc.
|
||||
api_name = "blender_python_api_%s" % blenver
|
||||
api_dir = os.path.join(args.mirror_dir, api_name)
|
||||
if os.path.exists(api_dir):
|
||||
shutil.rmtree(api_dir)
|
||||
os.rename(os.path.join(tmp_dir, "sphinx-out"), api_dir)
|
||||
|
||||
# VI) Create zip archive.
|
||||
zip_name = "blender_python_reference_%s" % blenver_zip # We can't use 'release' postfix here...
|
||||
zip_path = os.path.join(args.mirror_dir, zip_name)
|
||||
with zipfile.ZipFile(zip_path, 'w') as zf:
|
||||
for dirname, _, filenames in os.walk(api_dir):
|
||||
for filename in filenames:
|
||||
filepath = os.path.join(dirname, filename)
|
||||
zip_filepath = os.path.join(zip_name, os.path.relpath(filepath, api_dir))
|
||||
zf.write(filepath, arcname=zip_filepath)
|
||||
os.rename(zip_path, os.path.join(api_dir, "%s.zip" % zip_name))
|
||||
|
||||
# VII) Create symlinks and html redirects.
|
||||
#~ os.symlink(os.path.join(DEFAULT_SYMLINK_ROOT, api_name, "contents.html"), os.path.join(api_dir, "index.html"))
|
||||
os.symlink("./contents.html", os.path.join(api_dir, "index.html"))
|
||||
if blenver.endswith("release"):
|
||||
symlink = os.path.join(args.mirror_dir, "blender_python_api_current")
|
||||
os.remove(symlink)
|
||||
os.symlink("./%s" % api_name, symlink)
|
||||
with open(os.path.join(args.mirror_dir, "250PythonDoc/index.html"), 'w') as f:
|
||||
f.write("<html><head><title>Redirecting...</title><meta http-equiv=\"REFRESH\""
|
||||
"content=\"0;url=../%s/\"></head><body>Redirecting...</body></html>" % api_name)
|
||||
else:
|
||||
symlink = os.path.join(args.mirror_dir, "blender_python_api_master")
|
||||
os.remove(symlink)
|
||||
os.symlink("./%s" % api_name, symlink)
|
||||
with open(os.path.join(args.mirror_dir, "blender_python_api/index.html"), 'w') as f:
|
||||
f.write("<html><head><title>Redirecting...</title><meta http-equiv=\"REFRESH\""
|
||||
"content=\"0;url=../%s/\"></head><body>Redirecting...</body></html>" % api_name)
|
||||
|
||||
# VIII) Upload (first do a dry-run so user can ensure everything is OK).
|
||||
print("Doc generated in local mirror %s, please check it before uploading "
|
||||
"(hit [Enter] to continue, [Ctrl-C] to exit):" % api_dir)
|
||||
sys.stdin.read(1)
|
||||
|
||||
rsync_mirror_cmd = ("rsync", "--dry-run", "--delete-after", "-avzz", args.mirror_dir, rsync_base)
|
||||
subprocess.run(rsync_mirror_cmd, env=dict(os.environ, RSYNC_PASSWORD=args.password))
|
||||
|
||||
print("Rsync upload simulated, please check every thing is OK (hit [Enter] to continue, [Ctrl-C] to exit):")
|
||||
sys.stdin.read(1)
|
||||
|
||||
rsync_mirror_cmd = ("rsync", "--delete-after", "-avzz", args.mirror_dir, rsync_base)
|
||||
subprocess.run(rsync_mirror_cmd, env=dict(os.environ, RSYNC_PASSWORD=args.password))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
9
extern/CMakeLists.txt
vendored
9
extern/CMakeLists.txt
vendored
@@ -29,14 +29,6 @@ add_subdirectory(curve_fit_nd)
|
||||
# Otherwise we get warnings here that we cant fix in external projects
|
||||
remove_strict_flags()
|
||||
|
||||
# Not a strict flag, but noisy for code we don't maintain
|
||||
if(CMAKE_COMPILER_IS_GNUCC)
|
||||
remove_cc_flag(
|
||||
"-Wmisleading-indentation"
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
add_subdirectory(rangetree)
|
||||
add_subdirectory(wcwidth)
|
||||
|
||||
@@ -105,7 +97,6 @@ endif()
|
||||
|
||||
if(WITH_GTESTS)
|
||||
add_subdirectory(gtest)
|
||||
add_subdirectory(gmock)
|
||||
endif()
|
||||
|
||||
if(WITH_SDL AND WITH_SDL_DYNLOAD)
|
||||
|
@@ -77,7 +77,7 @@ namespace std {
|
||||
void resize(size_type new_size)
|
||||
{ resize(new_size, T()); }
|
||||
|
||||
#if defined(_VECTOR_) && (_MSC_VER<1910)
|
||||
#if defined(_VECTOR_)
|
||||
// workaround MSVC std::vector implementation
|
||||
void resize(size_type new_size, const value_type& x)
|
||||
{
|
||||
@@ -110,7 +110,7 @@ namespace std {
|
||||
vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
|
||||
}
|
||||
#else
|
||||
// either GCC 4.1, MSVC2017 or non-GCC
|
||||
// either GCC 4.1 or non-GCC
|
||||
// default implementation which should always work.
|
||||
void resize(size_type new_size, const value_type& x)
|
||||
{
|
||||
|
@@ -151,8 +151,8 @@ static btScalar EdgeSeparation(const btBox2dShape* poly1, const btTransform& xf1
|
||||
int index = 0;
|
||||
btScalar minDot = BT_LARGE_FLOAT;
|
||||
|
||||
if( count2 > 0 )
|
||||
index = (int) normal1.minDot( vertices2, count2, minDot);
|
||||
if( count2 > 0 )
|
||||
index = (int) normal1.minDot( vertices2, count2, minDot);
|
||||
|
||||
btVector3 v1 = b2Mul(xf1, vertices1[edge1]);
|
||||
btVector3 v2 = b2Mul(xf2, vertices2[index]);
|
||||
@@ -174,9 +174,9 @@ static btScalar FindMaxSeparation(int* edgeIndex,
|
||||
|
||||
// Find edge normal on poly1 that has the largest projection onto d.
|
||||
int edge = 0;
|
||||
btScalar maxDot;
|
||||
if( count1 > 0 )
|
||||
edge = (int) dLocal1.maxDot( normals1, count1, maxDot);
|
||||
btScalar maxDot;
|
||||
if( count1 > 0 )
|
||||
edge = (int) dLocal1.maxDot( normals1, count1, maxDot);
|
||||
|
||||
// Get the separation for the edge normal.
|
||||
btScalar s = EdgeSeparation(poly1, xf1, edge, poly2, xf2);
|
||||
|
@@ -232,8 +232,8 @@ void btCompoundCollisionAlgorithm::processCollision (const btCollisionObjectWrap
|
||||
m_compoundShapeRevision = compoundShape->getUpdateRevision();
|
||||
}
|
||||
|
||||
if (m_childCollisionAlgorithms.size()==0)
|
||||
return;
|
||||
if (m_childCollisionAlgorithms.size()==0)
|
||||
return;
|
||||
|
||||
const btDbvt* tree = compoundShape->getDynamicAabbTree();
|
||||
//use a dynamic aabb tree to cull potential child-overlaps
|
||||
|
5
extern/ceres/CMakeLists.txt
vendored
5
extern/ceres/CMakeLists.txt
vendored
@@ -73,12 +73,10 @@ set(SRC
|
||||
internal/ceres/file.cc
|
||||
internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
|
||||
internal/ceres/generated/schur_eliminator_d_d_d.cc
|
||||
internal/ceres/gradient_checker.cc
|
||||
internal/ceres/gradient_checking_cost_function.cc
|
||||
internal/ceres/gradient_problem.cc
|
||||
internal/ceres/gradient_problem_solver.cc
|
||||
internal/ceres/implicit_schur_complement.cc
|
||||
internal/ceres/is_close.cc
|
||||
internal/ceres/iterative_schur_complement_solver.cc
|
||||
internal/ceres/lapack.cc
|
||||
internal/ceres/levenberg_marquardt_strategy.cc
|
||||
@@ -118,7 +116,6 @@ set(SRC
|
||||
internal/ceres/triplet_sparse_matrix.cc
|
||||
internal/ceres/trust_region_minimizer.cc
|
||||
internal/ceres/trust_region_preprocessor.cc
|
||||
internal/ceres/trust_region_step_evaluator.cc
|
||||
internal/ceres/trust_region_strategy.cc
|
||||
internal/ceres/types.cc
|
||||
internal/ceres/wall_time.cc
|
||||
@@ -207,7 +204,6 @@ set(SRC
|
||||
internal/ceres/householder_vector.h
|
||||
internal/ceres/implicit_schur_complement.h
|
||||
internal/ceres/integral_types.h
|
||||
internal/ceres/is_close.h
|
||||
internal/ceres/iterative_schur_complement_solver.h
|
||||
internal/ceres/lapack.h
|
||||
internal/ceres/levenberg_marquardt_strategy.h
|
||||
@@ -252,7 +248,6 @@ set(SRC
|
||||
internal/ceres/triplet_sparse_matrix.h
|
||||
internal/ceres/trust_region_minimizer.h
|
||||
internal/ceres/trust_region_preprocessor.h
|
||||
internal/ceres/trust_region_step_evaluator.h
|
||||
internal/ceres/trust_region_strategy.h
|
||||
internal/ceres/visibility_based_preconditioner.h
|
||||
internal/ceres/wall_time.h
|
||||
|
1073
extern/ceres/ChangeLog
vendored
1073
extern/ceres/ChangeLog
vendored
File diff suppressed because it is too large
Load Diff
21
extern/ceres/bundle.sh
vendored
21
extern/ceres/bundle.sh
vendored
@@ -173,5 +173,26 @@ if(WITH_OPENMP)
|
||||
)
|
||||
endif()
|
||||
|
||||
TEST_UNORDERED_MAP_SUPPORT()
|
||||
if(HAVE_STD_UNORDERED_MAP_HEADER)
|
||||
if(HAVE_UNORDERED_MAP_IN_STD_NAMESPACE)
|
||||
add_definitions(-DCERES_STD_UNORDERED_MAP)
|
||||
else()
|
||||
if(HAVE_UNORDERED_MAP_IN_TR1_NAMESPACE)
|
||||
add_definitions(-DCERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
|
||||
else()
|
||||
add_definitions(-DCERES_NO_UNORDERED_MAP)
|
||||
message(STATUS "Replacing unordered_map/set with map/set (warning: slower!)")
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
if(HAVE_UNORDERED_MAP_IN_TR1_NAMESPACE)
|
||||
add_definitions(-DCERES_TR1_UNORDERED_MAP)
|
||||
else()
|
||||
add_definitions(-DCERES_NO_UNORDERED_MAP)
|
||||
message(STATUS "Replacing unordered_map/set with map/set (warning: slower!)")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
blender_add_lib(extern_ceres "\${SRC}" "\${INC}" "\${INC_SYS}")
|
||||
EOF
|
||||
|
5
extern/ceres/files.txt
vendored
5
extern/ceres/files.txt
vendored
@@ -149,7 +149,6 @@ internal/ceres/generated/schur_eliminator_4_4_d.cc
|
||||
internal/ceres/generated/schur_eliminator_d_d_d.cc
|
||||
internal/ceres/generate_eliminator_specialization.py
|
||||
internal/ceres/generate_partitioned_matrix_view_specializations.py
|
||||
internal/ceres/gradient_checker.cc
|
||||
internal/ceres/gradient_checking_cost_function.cc
|
||||
internal/ceres/gradient_checking_cost_function.h
|
||||
internal/ceres/gradient_problem.cc
|
||||
@@ -161,8 +160,6 @@ internal/ceres/householder_vector.h
|
||||
internal/ceres/implicit_schur_complement.cc
|
||||
internal/ceres/implicit_schur_complement.h
|
||||
internal/ceres/integral_types.h
|
||||
internal/ceres/is_close.cc
|
||||
internal/ceres/is_close.h
|
||||
internal/ceres/iterative_schur_complement_solver.cc
|
||||
internal/ceres/iterative_schur_complement_solver.h
|
||||
internal/ceres/lapack.cc
|
||||
@@ -246,8 +243,6 @@ internal/ceres/trust_region_minimizer.cc
|
||||
internal/ceres/trust_region_minimizer.h
|
||||
internal/ceres/trust_region_preprocessor.cc
|
||||
internal/ceres/trust_region_preprocessor.h
|
||||
internal/ceres/trust_region_step_evaluator.cc
|
||||
internal/ceres/trust_region_step_evaluator.h
|
||||
internal/ceres/trust_region_strategy.cc
|
||||
internal/ceres/trust_region_strategy.h
|
||||
internal/ceres/types.cc
|
||||
|
@@ -130,8 +130,7 @@ class CostFunctionToFunctor {
|
||||
const int num_parameter_blocks =
|
||||
(N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
|
||||
(N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0);
|
||||
CHECK_EQ(static_cast<int>(parameter_block_sizes.size()),
|
||||
num_parameter_blocks);
|
||||
CHECK_EQ(parameter_block_sizes.size(), num_parameter_blocks);
|
||||
|
||||
CHECK_EQ(N0, parameter_block_sizes[0]);
|
||||
if (parameter_block_sizes.size() > 1) CHECK_EQ(N1, parameter_block_sizes[1]); // NOLINT
|
||||
|
56
extern/ceres/include/ceres/covariance.h
vendored
56
extern/ceres/include/ceres/covariance.h
vendored
@@ -357,28 +357,6 @@ class CERES_EXPORT Covariance {
|
||||
const double*> >& covariance_blocks,
|
||||
Problem* problem);
|
||||
|
||||
// Compute a part of the covariance matrix.
|
||||
//
|
||||
// The vector parameter_blocks contains the parameter blocks that
|
||||
// are used for computing the covariance matrix. From this vector
|
||||
// all covariance pairs are generated. This allows the covariance
|
||||
// estimation algorithm to only compute and store these blocks.
|
||||
//
|
||||
// parameter_blocks cannot contain duplicates. Bad things will
|
||||
// happen if they do.
|
||||
//
|
||||
// Note that the list of covariance_blocks is only used to determine
|
||||
// what parts of the covariance matrix are computed. The full
|
||||
// Jacobian is used to do the computation, i.e. they do not have an
|
||||
// impact on what part of the Jacobian is used for computation.
|
||||
//
|
||||
// The return value indicates the success or failure of the
|
||||
// covariance computation. Please see the documentation for
|
||||
// Covariance::Options for more on the conditions under which this
|
||||
// function returns false.
|
||||
bool Compute(const std::vector<const double*>& parameter_blocks,
|
||||
Problem* problem);
|
||||
|
||||
// Return the block of the cross-covariance matrix corresponding to
|
||||
// parameter_block1 and parameter_block2.
|
||||
//
|
||||
@@ -416,40 +394,6 @@ class CERES_EXPORT Covariance {
|
||||
const double* parameter_block2,
|
||||
double* covariance_block) const;
|
||||
|
||||
// Return the covariance matrix corresponding to all parameter_blocks.
|
||||
//
|
||||
// Compute must be called before calling GetCovarianceMatrix and all
|
||||
// parameter_blocks must have been present in the vector
|
||||
// parameter_blocks when Compute was called. Otherwise
|
||||
// GetCovarianceMatrix returns false.
|
||||
//
|
||||
// covariance_matrix must point to a memory location that can store
|
||||
// the size of the covariance matrix. The covariance matrix will be
|
||||
// a square matrix whose row and column count is equal to the sum of
|
||||
// the sizes of the individual parameter blocks. The covariance
|
||||
// matrix will be a row-major matrix.
|
||||
bool GetCovarianceMatrix(const std::vector<const double *> ¶meter_blocks,
|
||||
double *covariance_matrix);
|
||||
|
||||
// Return the covariance matrix corresponding to parameter_blocks
|
||||
// in the tangent space if a local parameterization is associated
|
||||
// with one of the parameter blocks else returns the covariance
|
||||
// matrix in the ambient space.
|
||||
//
|
||||
// Compute must be called before calling GetCovarianceMatrix and all
|
||||
// parameter_blocks must have been present in the vector
|
||||
// parameters_blocks when Compute was called. Otherwise
|
||||
// GetCovarianceMatrix returns false.
|
||||
//
|
||||
// covariance_matrix must point to a memory location that can store
|
||||
// the size of the covariance matrix. The covariance matrix will be
|
||||
// a square matrix whose row and column count is equal to the sum of
|
||||
// the sizes of the tangent spaces of the individual parameter
|
||||
// blocks. The covariance matrix will be a row-major matrix.
|
||||
bool GetCovarianceMatrixInTangentSpace(
|
||||
const std::vector<const double*>& parameter_blocks,
|
||||
double* covariance_matrix);
|
||||
|
||||
private:
|
||||
internal::scoped_ptr<internal::CovarianceImpl> impl_;
|
||||
};
|
||||
|
@@ -85,6 +85,22 @@ class DynamicNumericDiffCostFunction : public CostFunction {
|
||||
options_(options) {
|
||||
}
|
||||
|
||||
// Deprecated. New users should avoid using this constructor. Instead, use the
|
||||
// constructor with NumericDiffOptions.
|
||||
DynamicNumericDiffCostFunction(
|
||||
const CostFunctor* functor,
|
||||
Ownership ownership,
|
||||
double relative_step_size)
|
||||
: functor_(functor),
|
||||
ownership_(ownership),
|
||||
options_() {
|
||||
LOG(WARNING) << "This constructor is deprecated and will be removed in "
|
||||
"a future version. Please use the NumericDiffOptions "
|
||||
"constructor instead.";
|
||||
|
||||
options_.relative_step_size = relative_step_size;
|
||||
}
|
||||
|
||||
virtual ~DynamicNumericDiffCostFunction() {
|
||||
if (ownership_ != TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
@@ -122,19 +138,19 @@ class DynamicNumericDiffCostFunction : public CostFunction {
|
||||
std::vector<double> parameters_copy(parameters_size);
|
||||
std::vector<double*> parameters_references_copy(block_sizes.size());
|
||||
parameters_references_copy[0] = ¶meters_copy[0];
|
||||
for (size_t block = 1; block < block_sizes.size(); ++block) {
|
||||
for (int block = 1; block < block_sizes.size(); ++block) {
|
||||
parameters_references_copy[block] = parameters_references_copy[block - 1]
|
||||
+ block_sizes[block - 1];
|
||||
}
|
||||
|
||||
// Copy the parameters into the local temp space.
|
||||
for (size_t block = 0; block < block_sizes.size(); ++block) {
|
||||
for (int block = 0; block < block_sizes.size(); ++block) {
|
||||
memcpy(parameters_references_copy[block],
|
||||
parameters[block],
|
||||
block_sizes[block] * sizeof(*parameters[block]));
|
||||
}
|
||||
|
||||
for (size_t block = 0; block < block_sizes.size(); ++block) {
|
||||
for (int block = 0; block < block_sizes.size(); ++block) {
|
||||
if (jacobians[block] != NULL &&
|
||||
!NumericDiff<CostFunctor, method, DYNAMIC,
|
||||
DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC,
|
||||
|
237
extern/ceres/include/ceres/gradient_checker.h
vendored
237
extern/ceres/include/ceres/gradient_checker.h
vendored
@@ -27,121 +27,194 @@
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2007 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Authors: wjr@google.com (William Rucklidge),
|
||||
// keir@google.com (Keir Mierle),
|
||||
// dgossow@google.com (David Gossow)
|
||||
// Author: wjr@google.com (William Rucklidge)
|
||||
//
|
||||
// This file contains a class that exercises a cost function, to make sure
|
||||
// that it is computing reasonable derivatives. It compares the Jacobians
|
||||
// computed by the cost function with those obtained by finite
|
||||
// differences.
|
||||
|
||||
#ifndef CERES_PUBLIC_GRADIENT_CHECKER_H_
|
||||
#define CERES_PUBLIC_GRADIENT_CHECKER_H_
|
||||
|
||||
#include <cstddef>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "ceres/cost_function.h"
|
||||
#include "ceres/dynamic_numeric_diff_cost_function.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/fixed_array.h"
|
||||
#include "ceres/internal/macros.h"
|
||||
#include "ceres/internal/scoped_ptr.h"
|
||||
#include "ceres/local_parameterization.h"
|
||||
#include "ceres/numeric_diff_cost_function.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
namespace ceres {
|
||||
|
||||
// GradientChecker compares the Jacobians returned by a cost function against
|
||||
// derivatives estimated using finite differencing.
|
||||
// An object that exercises a cost function, to compare the answers that it
|
||||
// gives with derivatives estimated using finite differencing.
|
||||
//
|
||||
// The condition enforced is that
|
||||
//
|
||||
// (J_actual(i, j) - J_numeric(i, j))
|
||||
// ------------------------------------ < relative_precision
|
||||
// max(J_actual(i, j), J_numeric(i, j))
|
||||
//
|
||||
// where J_actual(i, j) is the jacobian as computed by the supplied cost
|
||||
// function (by the user) multiplied by the local parameterization Jacobian
|
||||
// and J_numeric is the jacobian as computed by finite differences, multiplied
|
||||
// by the local parameterization Jacobian as well.
|
||||
// The only likely usage of this is for testing.
|
||||
//
|
||||
// How to use: Fill in an array of pointers to parameter blocks for your
|
||||
// CostFunction, and then call Probe(). Check that the return value is 'true'.
|
||||
// CostFunction, and then call Probe(). Check that the return value is
|
||||
// 'true'. See prober_test.cc for an example.
|
||||
//
|
||||
// This is templated similarly to NumericDiffCostFunction, as it internally
|
||||
// uses that.
|
||||
template <typename CostFunctionToProbe,
|
||||
int M = 0, int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0>
|
||||
class GradientChecker {
|
||||
public:
|
||||
// This will not take ownership of the cost function or local
|
||||
// parameterizations.
|
||||
//
|
||||
// function: The cost function to probe.
|
||||
// local_parameterization: A vector of local parameterizations for each
|
||||
// parameter. May be NULL or contain NULL pointers to indicate that the
|
||||
// respective parameter does not have a local parameterization.
|
||||
// options: Options to use for numerical differentiation.
|
||||
GradientChecker(
|
||||
const CostFunction* function,
|
||||
const std::vector<const LocalParameterization*>* local_parameterizations,
|
||||
const NumericDiffOptions& options);
|
||||
// Here we stash some results from the probe, for later
|
||||
// inspection.
|
||||
struct GradientCheckResults {
|
||||
// Computed cost.
|
||||
Vector cost;
|
||||
|
||||
// Contains results from a call to Probe for later inspection.
|
||||
struct ProbeResults {
|
||||
// The return value of the cost function.
|
||||
bool return_value;
|
||||
|
||||
// Computed residual vector.
|
||||
Vector residuals;
|
||||
|
||||
// The sizes of the Jacobians below are dictated by the cost function's
|
||||
// parameter block size and residual block sizes. If a parameter block
|
||||
// has a local parameterization associated with it, the size of the "local"
|
||||
// Jacobian will be determined by the local parameterization dimension and
|
||||
// residual block size, otherwise it will be identical to the regular
|
||||
// Jacobian.
|
||||
// The sizes of these matrices are dictated by the cost function's
|
||||
// parameter and residual block sizes. Each vector's length will
|
||||
// term->parameter_block_sizes().size(), and each matrix is the
|
||||
// Jacobian of the residual with respect to the corresponding parameter
|
||||
// block.
|
||||
|
||||
// Derivatives as computed by the cost function.
|
||||
std::vector<Matrix> jacobians;
|
||||
std::vector<Matrix> term_jacobians;
|
||||
|
||||
// Derivatives as computed by the cost function in local space.
|
||||
std::vector<Matrix> local_jacobians;
|
||||
// Derivatives as computed by finite differencing.
|
||||
std::vector<Matrix> finite_difference_jacobians;
|
||||
|
||||
// Derivatives as computed by nuerical differentiation in local space.
|
||||
std::vector<Matrix> numeric_jacobians;
|
||||
|
||||
// Derivatives as computed by nuerical differentiation in local space.
|
||||
std::vector<Matrix> local_numeric_jacobians;
|
||||
|
||||
// Contains the maximum relative error found in the local Jacobians.
|
||||
double maximum_relative_error;
|
||||
|
||||
// If an error was detected, this will contain a detailed description of
|
||||
// that error.
|
||||
std::string error_log;
|
||||
// Infinity-norm of term_jacobians - finite_difference_jacobians.
|
||||
double error_jacobians;
|
||||
};
|
||||
|
||||
// Call the cost function, compute alternative Jacobians using finite
|
||||
// differencing and compare results. If local parameterizations are given,
|
||||
// the Jacobians will be multiplied by the local parameterization Jacobians
|
||||
// before performing the check, which effectively means that all errors along
|
||||
// the null space of the local parameterization will be ignored.
|
||||
// Returns false if the Jacobians don't match, the cost function return false,
|
||||
// or if the cost function returns different residual when called with a
|
||||
// Jacobian output argument vs. calling it without. Otherwise returns true.
|
||||
// Checks the Jacobian computed by a cost function.
|
||||
//
|
||||
// parameters: The parameter values at which to probe.
|
||||
// relative_precision: A threshold for the relative difference between the
|
||||
// Jacobians. If the Jacobians differ by more than this amount, then the
|
||||
// probe fails.
|
||||
// results: On return, the Jacobians (and other information) will be stored
|
||||
// here. May be NULL.
|
||||
// probe_point: The parameter values at which to probe.
|
||||
// error_tolerance: A threshold for the infinity-norm difference
|
||||
// between the Jacobians. If the Jacobians differ by more than
|
||||
// this amount, then the probe fails.
|
||||
//
|
||||
// term: The cost function to test. Not retained after this call returns.
|
||||
//
|
||||
// results: On return, the two Jacobians (and other information)
|
||||
// will be stored here. May be NULL.
|
||||
//
|
||||
// Returns true if no problems are detected and the difference between the
|
||||
// Jacobians is less than error_tolerance.
|
||||
bool Probe(double const* const* parameters,
|
||||
double relative_precision,
|
||||
ProbeResults* results) const;
|
||||
static bool Probe(double const* const* probe_point,
|
||||
double error_tolerance,
|
||||
CostFunctionToProbe *term,
|
||||
GradientCheckResults* results) {
|
||||
CHECK_NOTNULL(probe_point);
|
||||
CHECK_NOTNULL(term);
|
||||
LOG(INFO) << "-------------------- Starting Probe() --------------------";
|
||||
|
||||
// We need a GradientCheckeresults, whether or not they supplied one.
|
||||
internal::scoped_ptr<GradientCheckResults> owned_results;
|
||||
if (results == NULL) {
|
||||
owned_results.reset(new GradientCheckResults);
|
||||
results = owned_results.get();
|
||||
}
|
||||
|
||||
// Do a consistency check between the term and the template parameters.
|
||||
CHECK_EQ(M, term->num_residuals());
|
||||
const int num_residuals = M;
|
||||
const std::vector<int32>& block_sizes = term->parameter_block_sizes();
|
||||
const int num_blocks = block_sizes.size();
|
||||
|
||||
CHECK_LE(num_blocks, 5) << "Unable to test functions that take more "
|
||||
<< "than 5 parameter blocks";
|
||||
if (N0) {
|
||||
CHECK_EQ(N0, block_sizes[0]);
|
||||
CHECK_GE(num_blocks, 1);
|
||||
} else {
|
||||
CHECK_LT(num_blocks, 1);
|
||||
}
|
||||
if (N1) {
|
||||
CHECK_EQ(N1, block_sizes[1]);
|
||||
CHECK_GE(num_blocks, 2);
|
||||
} else {
|
||||
CHECK_LT(num_blocks, 2);
|
||||
}
|
||||
if (N2) {
|
||||
CHECK_EQ(N2, block_sizes[2]);
|
||||
CHECK_GE(num_blocks, 3);
|
||||
} else {
|
||||
CHECK_LT(num_blocks, 3);
|
||||
}
|
||||
if (N3) {
|
||||
CHECK_EQ(N3, block_sizes[3]);
|
||||
CHECK_GE(num_blocks, 4);
|
||||
} else {
|
||||
CHECK_LT(num_blocks, 4);
|
||||
}
|
||||
if (N4) {
|
||||
CHECK_EQ(N4, block_sizes[4]);
|
||||
CHECK_GE(num_blocks, 5);
|
||||
} else {
|
||||
CHECK_LT(num_blocks, 5);
|
||||
}
|
||||
|
||||
results->term_jacobians.clear();
|
||||
results->term_jacobians.resize(num_blocks);
|
||||
results->finite_difference_jacobians.clear();
|
||||
results->finite_difference_jacobians.resize(num_blocks);
|
||||
|
||||
internal::FixedArray<double*> term_jacobian_pointers(num_blocks);
|
||||
internal::FixedArray<double*>
|
||||
finite_difference_jacobian_pointers(num_blocks);
|
||||
for (int i = 0; i < num_blocks; i++) {
|
||||
results->term_jacobians[i].resize(num_residuals, block_sizes[i]);
|
||||
term_jacobian_pointers[i] = results->term_jacobians[i].data();
|
||||
results->finite_difference_jacobians[i].resize(
|
||||
num_residuals, block_sizes[i]);
|
||||
finite_difference_jacobian_pointers[i] =
|
||||
results->finite_difference_jacobians[i].data();
|
||||
}
|
||||
results->cost.resize(num_residuals, 1);
|
||||
|
||||
CHECK(term->Evaluate(probe_point, results->cost.data(),
|
||||
term_jacobian_pointers.get()));
|
||||
NumericDiffCostFunction<CostFunctionToProbe, CENTRAL, M, N0, N1, N2, N3, N4>
|
||||
numeric_term(term, DO_NOT_TAKE_OWNERSHIP);
|
||||
CHECK(numeric_term.Evaluate(probe_point, results->cost.data(),
|
||||
finite_difference_jacobian_pointers.get()));
|
||||
|
||||
results->error_jacobians = 0;
|
||||
for (int i = 0; i < num_blocks; i++) {
|
||||
Matrix jacobian_difference = results->term_jacobians[i] -
|
||||
results->finite_difference_jacobians[i];
|
||||
results->error_jacobians =
|
||||
std::max(results->error_jacobians,
|
||||
jacobian_difference.lpNorm<Eigen::Infinity>());
|
||||
}
|
||||
|
||||
LOG(INFO) << "========== term-computed derivatives ==========";
|
||||
for (int i = 0; i < num_blocks; i++) {
|
||||
LOG(INFO) << "term_computed block " << i;
|
||||
LOG(INFO) << "\n" << results->term_jacobians[i];
|
||||
}
|
||||
|
||||
LOG(INFO) << "========== finite-difference derivatives ==========";
|
||||
for (int i = 0; i < num_blocks; i++) {
|
||||
LOG(INFO) << "finite_difference block " << i;
|
||||
LOG(INFO) << "\n" << results->finite_difference_jacobians[i];
|
||||
}
|
||||
|
||||
LOG(INFO) << "========== difference ==========";
|
||||
for (int i = 0; i < num_blocks; i++) {
|
||||
LOG(INFO) << "difference block " << i;
|
||||
LOG(INFO) << (results->term_jacobians[i] -
|
||||
results->finite_difference_jacobians[i]);
|
||||
}
|
||||
|
||||
LOG(INFO) << "||difference|| = " << results->error_jacobians;
|
||||
|
||||
return results->error_jacobians < error_tolerance;
|
||||
}
|
||||
|
||||
private:
|
||||
CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(GradientChecker);
|
||||
|
||||
std::vector<const LocalParameterization*> local_parameterizations_;
|
||||
const CostFunction* function_;
|
||||
internal::scoped_ptr<CostFunction> finite_diff_cost_function_;
|
||||
};
|
||||
|
||||
} // namespace ceres
|
||||
|
22
extern/ceres/include/ceres/internal/port.h
vendored
22
extern/ceres/include/ceres/internal/port.h
vendored
@@ -33,8 +33,9 @@
|
||||
|
||||
// This file needs to compile as c code.
|
||||
#ifdef __cplusplus
|
||||
#include <cstddef>
|
||||
|
||||
#include "ceres/internal/config.h"
|
||||
|
||||
#if defined(CERES_TR1_MEMORY_HEADER)
|
||||
#include <tr1/memory>
|
||||
#else
|
||||
@@ -49,25 +50,6 @@ using std::tr1::shared_ptr;
|
||||
using std::shared_ptr;
|
||||
#endif
|
||||
|
||||
// We allocate some Eigen objects on the stack and other places they
|
||||
// might not be aligned to 16-byte boundaries. If we have C++11, we
|
||||
// can specify their alignment anyway, and thus can safely enable
|
||||
// vectorization on those matrices; in C++99, we are out of luck. Figure out
|
||||
// what case we're in and write macros that do the right thing.
|
||||
#ifdef CERES_USE_CXX11
|
||||
namespace port_constants {
|
||||
static constexpr size_t kMaxAlignBytes =
|
||||
// Work around a GCC 4.8 bug
|
||||
// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56019) where
|
||||
// std::max_align_t is misplaced.
|
||||
#if defined (__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8
|
||||
alignof(::max_align_t);
|
||||
#else
|
||||
alignof(std::max_align_t);
|
||||
#endif
|
||||
} // namespace port_constants
|
||||
#endif
|
||||
|
||||
} // namespace ceres
|
||||
|
||||
#endif // __cplusplus
|
||||
|
@@ -69,7 +69,7 @@ struct CERES_EXPORT IterationSummary {
|
||||
// Step was numerically valid, i.e., all values are finite and the
|
||||
// step reduces the value of the linearized model.
|
||||
//
|
||||
// Note: step_is_valid is always true when iteration = 0.
|
||||
// Note: step_is_valid is false when iteration = 0.
|
||||
bool step_is_valid;
|
||||
|
||||
// Step did not reduce the value of the objective function
|
||||
@@ -77,7 +77,7 @@ struct CERES_EXPORT IterationSummary {
|
||||
// acceptance criterion used by the non-monotonic trust region
|
||||
// algorithm.
|
||||
//
|
||||
// Note: step_is_nonmonotonic is always false when iteration = 0;
|
||||
// Note: step_is_nonmonotonic is false when iteration = 0;
|
||||
bool step_is_nonmonotonic;
|
||||
|
||||
// Whether or not the minimizer accepted this step or not. If the
|
||||
@@ -89,7 +89,7 @@ struct CERES_EXPORT IterationSummary {
|
||||
// relative decrease is not sufficient, the algorithm may accept the
|
||||
// step and the step is declared successful.
|
||||
//
|
||||
// Note: step_is_successful is always true when iteration = 0.
|
||||
// Note: step_is_successful is false when iteration = 0.
|
||||
bool step_is_successful;
|
||||
|
||||
// Value of the objective function.
|
||||
|
106
extern/ceres/include/ceres/jet.h
vendored
106
extern/ceres/include/ceres/jet.h
vendored
@@ -164,7 +164,6 @@
|
||||
|
||||
#include "Eigen/Core"
|
||||
#include "ceres/fpclassify.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
|
||||
@@ -228,23 +227,21 @@ struct Jet {
|
||||
T a;
|
||||
|
||||
// The infinitesimal part.
|
||||
|
||||
// We allocate Jets on the stack and other places they
|
||||
// might not be aligned to 16-byte boundaries. If we have C++11, we
|
||||
// can specify their alignment anyway, and thus can safely enable
|
||||
// vectorization on those matrices; in C++99, we are out of luck. Figure out
|
||||
// what case we're in and do the right thing.
|
||||
#ifndef CERES_USE_CXX11
|
||||
// fall back to safe version:
|
||||
//
|
||||
// Note the Eigen::DontAlign bit is needed here because this object
|
||||
// gets allocated on the stack and as part of other arrays and
|
||||
// structs. Forcing the right alignment there is the source of much
|
||||
// pain and suffering. Even if that works, passing Jets around to
|
||||
// functions by value has problems because the C++ ABI does not
|
||||
// guarantee alignment for function arguments.
|
||||
//
|
||||
// Setting the DontAlign bit prevents Eigen from using SSE for the
|
||||
// various operations on Jets. This is a small performance penalty
|
||||
// since the AutoDiff code will still expose much of the code as
|
||||
// statically sized loops to the compiler. But given the subtle
|
||||
// issues that arise due to alignment, especially when dealing with
|
||||
// multiple platforms, it seems to be a trade off worth making.
|
||||
Eigen::Matrix<T, N, 1, Eigen::DontAlign> v;
|
||||
#else
|
||||
static constexpr bool kShouldAlignMatrix =
|
||||
16 <= ::ceres::port_constants::kMaxAlignBytes;
|
||||
static constexpr int kAlignHint = kShouldAlignMatrix ?
|
||||
Eigen::AutoAlign : Eigen::DontAlign;
|
||||
static constexpr size_t kAlignment = kShouldAlignMatrix ? 16 : 1;
|
||||
alignas(kAlignment) Eigen::Matrix<T, N, 1, kAlignHint> v;
|
||||
#endif
|
||||
};
|
||||
|
||||
// Unary +
|
||||
@@ -391,8 +388,6 @@ inline double atan (double x) { return std::atan(x); }
|
||||
inline double sinh (double x) { return std::sinh(x); }
|
||||
inline double cosh (double x) { return std::cosh(x); }
|
||||
inline double tanh (double x) { return std::tanh(x); }
|
||||
inline double floor (double x) { return std::floor(x); }
|
||||
inline double ceil (double x) { return std::ceil(x); }
|
||||
inline double pow (double x, double y) { return std::pow(x, y); }
|
||||
inline double atan2(double y, double x) { return std::atan2(y, x); }
|
||||
|
||||
@@ -487,51 +482,10 @@ Jet<T, N> tanh(const Jet<T, N>& f) {
|
||||
return Jet<T, N>(tanh_a, tmp * f.v);
|
||||
}
|
||||
|
||||
// The floor function should be used with extreme care as this operation will
|
||||
// result in a zero derivative which provides no information to the solver.
|
||||
//
|
||||
// floor(a + h) ~= floor(a) + 0
|
||||
template <typename T, int N> inline
|
||||
Jet<T, N> floor(const Jet<T, N>& f) {
|
||||
return Jet<T, N>(floor(f.a));
|
||||
}
|
||||
|
||||
// The ceil function should be used with extreme care as this operation will
|
||||
// result in a zero derivative which provides no information to the solver.
|
||||
//
|
||||
// ceil(a + h) ~= ceil(a) + 0
|
||||
template <typename T, int N> inline
|
||||
Jet<T, N> ceil(const Jet<T, N>& f) {
|
||||
return Jet<T, N>(ceil(f.a));
|
||||
}
|
||||
|
||||
// Bessel functions of the first kind with integer order equal to 0, 1, n.
|
||||
//
|
||||
// Microsoft has deprecated the j[0,1,n]() POSIX Bessel functions in favour of
|
||||
// _j[0,1,n](). Where available on MSVC, use _j[0,1,n]() to avoid deprecated
|
||||
// function errors in client code (the specific warning is suppressed when
|
||||
// Ceres itself is built).
|
||||
inline double BesselJ0(double x) {
|
||||
#if defined(_MSC_VER) && defined(_j0)
|
||||
return _j0(x);
|
||||
#else
|
||||
return j0(x);
|
||||
#endif
|
||||
}
|
||||
inline double BesselJ1(double x) {
|
||||
#if defined(_MSC_VER) && defined(_j1)
|
||||
return _j1(x);
|
||||
#else
|
||||
return j1(x);
|
||||
#endif
|
||||
}
|
||||
inline double BesselJn(int n, double x) {
|
||||
#if defined(_MSC_VER) && defined(_jn)
|
||||
return _jn(n, x);
|
||||
#else
|
||||
return jn(n, x);
|
||||
#endif
|
||||
}
|
||||
inline double BesselJ0(double x) { return j0(x); }
|
||||
inline double BesselJ1(double x) { return j1(x); }
|
||||
inline double BesselJn(int n, double x) { return jn(n, x); }
|
||||
|
||||
// For the formulae of the derivatives of the Bessel functions see the book:
|
||||
// Olver, Lozier, Boisvert, Clark, NIST Handbook of Mathematical Functions,
|
||||
@@ -789,15 +743,7 @@ template<typename T, int N> inline Jet<T, N> ei_pow (const Jet<T, N>& x,
|
||||
// strange compile errors.
|
||||
template <typename T, int N>
|
||||
inline std::ostream &operator<<(std::ostream &s, const Jet<T, N>& z) {
|
||||
s << "[" << z.a << " ; ";
|
||||
for (int i = 0; i < N; ++i) {
|
||||
s << z.v[i];
|
||||
if (i != N - 1) {
|
||||
s << ", ";
|
||||
}
|
||||
}
|
||||
s << "]";
|
||||
return s;
|
||||
return s << "[" << z.a << " ; " << z.v.transpose() << "]";
|
||||
}
|
||||
|
||||
} // namespace ceres
|
||||
@@ -811,7 +757,6 @@ struct NumTraits<ceres::Jet<T, N> > {
|
||||
typedef ceres::Jet<T, N> Real;
|
||||
typedef ceres::Jet<T, N> NonInteger;
|
||||
typedef ceres::Jet<T, N> Nested;
|
||||
typedef ceres::Jet<T, N> Literal;
|
||||
|
||||
static typename ceres::Jet<T, N> dummy_precision() {
|
||||
return ceres::Jet<T, N>(1e-12);
|
||||
@@ -832,21 +777,6 @@ struct NumTraits<ceres::Jet<T, N> > {
|
||||
HasFloatingPoint = 1,
|
||||
RequireInitialization = 1
|
||||
};
|
||||
|
||||
template<bool Vectorized>
|
||||
struct Div {
|
||||
enum {
|
||||
#if defined(EIGEN_VECTORIZE_AVX)
|
||||
AVX = true,
|
||||
#else
|
||||
AVX = false,
|
||||
#endif
|
||||
|
||||
// Assuming that for Jets, division is as expensive as
|
||||
// multiplication.
|
||||
Cost = 3
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace Eigen
|
||||
|
@@ -211,28 +211,6 @@ class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
|
||||
virtual int LocalSize() const { return 3; }
|
||||
};
|
||||
|
||||
// Implements the quaternion local parameterization for Eigen's representation
|
||||
// of the quaternion. Eigen uses a different internal memory layout for the
|
||||
// elements of the quaternion than what is commonly used. Specifically, Eigen
|
||||
// stores the elements in memory as [x, y, z, w] where the real part is last
|
||||
// whereas it is typically stored first. Note, when creating an Eigen quaternion
|
||||
// through the constructor the elements are accepted in w, x, y, z order. Since
|
||||
// Ceres operates on parameter blocks which are raw double pointers this
|
||||
// difference is important and requires a different parameterization.
|
||||
//
|
||||
// Plus(x, delta) = [sin(|delta|) delta / |delta|, cos(|delta|)] * x
|
||||
// with * being the quaternion multiplication operator.
|
||||
class EigenQuaternionParameterization : public ceres::LocalParameterization {
|
||||
public:
|
||||
virtual ~EigenQuaternionParameterization() {}
|
||||
virtual bool Plus(const double* x,
|
||||
const double* delta,
|
||||
double* x_plus_delta) const;
|
||||
virtual bool ComputeJacobian(const double* x,
|
||||
double* jacobian) const;
|
||||
virtual int GlobalSize() const { return 4; }
|
||||
virtual int LocalSize() const { return 3; }
|
||||
};
|
||||
|
||||
// This provides a parameterization for homogeneous vectors which are commonly
|
||||
// used in Structure for Motion problems. One example where they are used is
|
||||
|
@@ -206,6 +206,29 @@ class NumericDiffCostFunction
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated. New users should avoid using this constructor. Instead, use the
|
||||
// constructor with NumericDiffOptions.
|
||||
NumericDiffCostFunction(CostFunctor* functor,
|
||||
Ownership ownership,
|
||||
int num_residuals,
|
||||
const double relative_step_size)
|
||||
:functor_(functor),
|
||||
ownership_(ownership),
|
||||
options_() {
|
||||
LOG(WARNING) << "This constructor is deprecated and will be removed in "
|
||||
"a future version. Please use the NumericDiffOptions "
|
||||
"constructor instead.";
|
||||
|
||||
if (kNumResiduals == DYNAMIC) {
|
||||
SizedCostFunction<kNumResiduals,
|
||||
N0, N1, N2, N3, N4,
|
||||
N5, N6, N7, N8, N9>
|
||||
::set_num_residuals(num_residuals);
|
||||
}
|
||||
|
||||
options_.relative_step_size = relative_step_size;
|
||||
}
|
||||
|
||||
~NumericDiffCostFunction() {
|
||||
if (ownership_ != TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
|
7
extern/ceres/include/ceres/problem.h
vendored
7
extern/ceres/include/ceres/problem.h
vendored
@@ -309,9 +309,6 @@ class CERES_EXPORT Problem {
|
||||
// Allow the indicated parameter block to vary during optimization.
|
||||
void SetParameterBlockVariable(double* values);
|
||||
|
||||
// Returns true if a parameter block is set constant, and false otherwise.
|
||||
bool IsParameterBlockConstant(double* values) const;
|
||||
|
||||
// Set the local parameterization for one of the parameter blocks.
|
||||
// The local_parameterization is owned by the Problem by default. It
|
||||
// is acceptable to set the same parameterization for multiple
|
||||
@@ -464,10 +461,6 @@ class CERES_EXPORT Problem {
|
||||
// parameter block has a local parameterization, then it contributes
|
||||
// "LocalSize" entries to the gradient vector (and the number of
|
||||
// columns in the jacobian).
|
||||
//
|
||||
// Note 3: This function cannot be called while the problem is being
|
||||
// solved, for example it cannot be called from an IterationCallback
|
||||
// at the end of an iteration during a solve.
|
||||
bool Evaluate(const EvaluateOptions& options,
|
||||
double* cost,
|
||||
std::vector<double>* residuals,
|
||||
|
3
extern/ceres/include/ceres/rotation.h
vendored
3
extern/ceres/include/ceres/rotation.h
vendored
@@ -48,6 +48,7 @@
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#include "glog/logging.h"
|
||||
|
||||
namespace ceres {
|
||||
|
||||
@@ -417,6 +418,7 @@ template <typename T>
|
||||
inline void EulerAnglesToRotationMatrix(const T* euler,
|
||||
const int row_stride_parameter,
|
||||
T* R) {
|
||||
CHECK_EQ(row_stride_parameter, 3);
|
||||
EulerAnglesToRotationMatrix(euler, RowMajorAdapter3x3(R));
|
||||
}
|
||||
|
||||
@@ -494,6 +496,7 @@ void QuaternionToRotation(const T q[4],
|
||||
QuaternionToScaledRotation(q, R);
|
||||
|
||||
T normalizer = q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3];
|
||||
CHECK_NE(normalizer, T(0));
|
||||
normalizer = T(1) / normalizer;
|
||||
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
|
31
extern/ceres/include/ceres/solver.h
vendored
31
extern/ceres/include/ceres/solver.h
vendored
@@ -134,7 +134,7 @@ class CERES_EXPORT Solver {
|
||||
trust_region_problem_dump_format_type = TEXTFILE;
|
||||
check_gradients = false;
|
||||
gradient_check_relative_precision = 1e-8;
|
||||
gradient_check_numeric_derivative_relative_step_size = 1e-6;
|
||||
numeric_derivative_relative_step_size = 1e-6;
|
||||
update_state_every_iteration = false;
|
||||
}
|
||||
|
||||
@@ -701,22 +701,12 @@ class CERES_EXPORT Solver {
|
||||
// this number, then the jacobian for that cost term is dumped.
|
||||
double gradient_check_relative_precision;
|
||||
|
||||
// WARNING: This option only applies to the to the numeric
|
||||
// differentiation used for checking the user provided derivatives
|
||||
// when when Solver::Options::check_gradients is true. If you are
|
||||
// using NumericDiffCostFunction and are interested in changing
|
||||
// the step size for numeric differentiation in your cost
|
||||
// function, please have a look at
|
||||
// include/ceres/numeric_diff_options.h.
|
||||
// Relative shift used for taking numeric derivatives. For finite
|
||||
// differencing, each dimension is evaluated at slightly shifted
|
||||
// values; for the case of central difference, this is what gets
|
||||
// evaluated:
|
||||
//
|
||||
// Relative shift used for taking numeric derivatives when
|
||||
// Solver::Options::check_gradients is true.
|
||||
//
|
||||
// For finite differencing, each dimension is evaluated at
|
||||
// slightly shifted values; for the case of central difference,
|
||||
// this is what gets evaluated:
|
||||
//
|
||||
// delta = gradient_check_numeric_derivative_relative_step_size;
|
||||
// delta = numeric_derivative_relative_step_size;
|
||||
// f_initial = f(x)
|
||||
// f_forward = f((1 + delta) * x)
|
||||
// f_backward = f((1 - delta) * x)
|
||||
@@ -733,7 +723,7 @@ class CERES_EXPORT Solver {
|
||||
// theory a good choice is sqrt(eps) * x, which for doubles means
|
||||
// about 1e-8 * x. However, I have found this number too
|
||||
// optimistic. This number should be exposed for users to change.
|
||||
double gradient_check_numeric_derivative_relative_step_size;
|
||||
double numeric_derivative_relative_step_size;
|
||||
|
||||
// If true, the user's parameter blocks are updated at the end of
|
||||
// every Minimizer iteration, otherwise they are updated when the
|
||||
@@ -811,13 +801,6 @@ class CERES_EXPORT Solver {
|
||||
// Number of times inner iterations were performed.
|
||||
int num_inner_iteration_steps;
|
||||
|
||||
// Total number of iterations inside the line search algorithm
|
||||
// across all invocations. We call these iterations "steps" to
|
||||
// distinguish them from the outer iterations of the line search
|
||||
// and trust region minimizer algorithms which call the line
|
||||
// search algorithm as a subroutine.
|
||||
int num_line_search_steps;
|
||||
|
||||
// All times reported below are wall times.
|
||||
|
||||
// When the user calls Solve, before the actual optimization
|
||||
|
2
extern/ceres/include/ceres/version.h
vendored
2
extern/ceres/include/ceres/version.h
vendored
@@ -32,7 +32,7 @@
|
||||
#define CERES_PUBLIC_VERSION_H_
|
||||
|
||||
#define CERES_VERSION_MAJOR 1
|
||||
#define CERES_VERSION_MINOR 12
|
||||
#define CERES_VERSION_MINOR 11
|
||||
#define CERES_VERSION_REVISION 0
|
||||
|
||||
// Classic CPP stringifcation; the extra level of indirection allows the
|
||||
|
@@ -46,7 +46,6 @@ namespace internal {
|
||||
using std::make_pair;
|
||||
using std::pair;
|
||||
using std::vector;
|
||||
using std::adjacent_find;
|
||||
|
||||
void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
|
||||
const Program* program, CompressedRowSparseMatrix* jacobian) {
|
||||
@@ -141,21 +140,12 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
|
||||
|
||||
// Sort the parameters by their position in the state vector.
|
||||
sort(parameter_indices.begin(), parameter_indices.end());
|
||||
if (adjacent_find(parameter_indices.begin(), parameter_indices.end()) !=
|
||||
parameter_indices.end()) {
|
||||
std::string parameter_block_description;
|
||||
for (int j = 0; j < num_parameter_blocks; ++j) {
|
||||
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
|
||||
parameter_block_description +=
|
||||
parameter_block->ToString() + "\n";
|
||||
}
|
||||
LOG(FATAL) << "Ceres internal error: "
|
||||
<< "Duplicate parameter blocks detected in a cost function. "
|
||||
<< "This should never happen. Please report this to "
|
||||
<< "the Ceres developers.\n"
|
||||
<< "Residual Block: " << residual_block->ToString() << "\n"
|
||||
<< "Parameter Blocks: " << parameter_block_description;
|
||||
}
|
||||
CHECK(unique(parameter_indices.begin(), parameter_indices.end()) ==
|
||||
parameter_indices.end())
|
||||
<< "Ceres internal error: "
|
||||
<< "Duplicate parameter blocks detected in a cost function. "
|
||||
<< "This should never happen. Please report this to "
|
||||
<< "the Ceres developers.";
|
||||
|
||||
// Update the row indices.
|
||||
const int num_residuals = residual_block->NumResiduals();
|
||||
|
23
extern/ceres/internal/ceres/covariance.cc
vendored
23
extern/ceres/internal/ceres/covariance.cc
vendored
@@ -38,7 +38,6 @@
|
||||
|
||||
namespace ceres {
|
||||
|
||||
using std::make_pair;
|
||||
using std::pair;
|
||||
using std::vector;
|
||||
|
||||
@@ -55,12 +54,6 @@ bool Covariance::Compute(
|
||||
return impl_->Compute(covariance_blocks, problem->problem_impl_.get());
|
||||
}
|
||||
|
||||
bool Covariance::Compute(
|
||||
const vector<const double*>& parameter_blocks,
|
||||
Problem* problem) {
|
||||
return impl_->Compute(parameter_blocks, problem->problem_impl_.get());
|
||||
}
|
||||
|
||||
bool Covariance::GetCovarianceBlock(const double* parameter_block1,
|
||||
const double* parameter_block2,
|
||||
double* covariance_block) const {
|
||||
@@ -80,20 +73,4 @@ bool Covariance::GetCovarianceBlockInTangentSpace(
|
||||
covariance_block);
|
||||
}
|
||||
|
||||
bool Covariance::GetCovarianceMatrix(
|
||||
const vector<const double*>& parameter_blocks,
|
||||
double* covariance_matrix) {
|
||||
return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
|
||||
true, // ambient
|
||||
covariance_matrix);
|
||||
}
|
||||
|
||||
bool Covariance::GetCovarianceMatrixInTangentSpace(
|
||||
const std::vector<const double *>& parameter_blocks,
|
||||
double *covariance_matrix) {
|
||||
return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
|
||||
false, // tangent
|
||||
covariance_matrix);
|
||||
}
|
||||
|
||||
} // namespace ceres
|
||||
|
172
extern/ceres/internal/ceres/covariance_impl.cc
vendored
172
extern/ceres/internal/ceres/covariance_impl.cc
vendored
@@ -36,8 +36,6 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
#include <sstream>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
@@ -45,7 +43,6 @@
|
||||
#include "Eigen/SparseQR"
|
||||
#include "Eigen/SVD"
|
||||
|
||||
#include "ceres/collections_port.h"
|
||||
#include "ceres/compressed_col_sparse_matrix_utils.h"
|
||||
#include "ceres/compressed_row_sparse_matrix.h"
|
||||
#include "ceres/covariance.h"
|
||||
@@ -54,7 +51,6 @@
|
||||
#include "ceres/map_util.h"
|
||||
#include "ceres/parameter_block.h"
|
||||
#include "ceres/problem_impl.h"
|
||||
#include "ceres/residual_block.h"
|
||||
#include "ceres/suitesparse.h"
|
||||
#include "ceres/wall_time.h"
|
||||
#include "glog/logging.h"
|
||||
@@ -65,7 +61,6 @@ namespace internal {
|
||||
using std::make_pair;
|
||||
using std::map;
|
||||
using std::pair;
|
||||
using std::sort;
|
||||
using std::swap;
|
||||
using std::vector;
|
||||
|
||||
@@ -91,38 +86,8 @@ CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
|
||||
CovarianceImpl::~CovarianceImpl() {
|
||||
}
|
||||
|
||||
template <typename T> void CheckForDuplicates(vector<T> blocks) {
|
||||
sort(blocks.begin(), blocks.end());
|
||||
typename vector<T>::iterator it =
|
||||
std::adjacent_find(blocks.begin(), blocks.end());
|
||||
if (it != blocks.end()) {
|
||||
// In case there are duplicates, we search for their location.
|
||||
map<T, vector<int> > blocks_map;
|
||||
for (int i = 0; i < blocks.size(); ++i) {
|
||||
blocks_map[blocks[i]].push_back(i);
|
||||
}
|
||||
|
||||
std::ostringstream duplicates;
|
||||
while (it != blocks.end()) {
|
||||
duplicates << "(";
|
||||
for (int i = 0; i < blocks_map[*it].size() - 1; ++i) {
|
||||
duplicates << blocks_map[*it][i] << ", ";
|
||||
}
|
||||
duplicates << blocks_map[*it].back() << ")";
|
||||
it = std::adjacent_find(it + 1, blocks.end());
|
||||
if (it < blocks.end()) {
|
||||
duplicates << " and ";
|
||||
}
|
||||
}
|
||||
|
||||
LOG(FATAL) << "Covariance::Compute called with duplicate blocks at "
|
||||
<< "indices " << duplicates.str();
|
||||
}
|
||||
}
|
||||
|
||||
bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
|
||||
ProblemImpl* problem) {
|
||||
CheckForDuplicates<pair<const double*, const double*> >(covariance_blocks);
|
||||
problem_ = problem;
|
||||
parameter_block_to_row_index_.clear();
|
||||
covariance_matrix_.reset(NULL);
|
||||
@@ -132,20 +97,6 @@ bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
|
||||
return is_valid_;
|
||||
}
|
||||
|
||||
bool CovarianceImpl::Compute(const vector<const double*>& parameter_blocks,
|
||||
ProblemImpl* problem) {
|
||||
CheckForDuplicates<const double*>(parameter_blocks);
|
||||
CovarianceBlocks covariance_blocks;
|
||||
for (int i = 0; i < parameter_blocks.size(); ++i) {
|
||||
for (int j = i; j < parameter_blocks.size(); ++j) {
|
||||
covariance_blocks.push_back(make_pair(parameter_blocks[i],
|
||||
parameter_blocks[j]));
|
||||
}
|
||||
}
|
||||
|
||||
return Compute(covariance_blocks, problem);
|
||||
}
|
||||
|
||||
bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
const double* original_parameter_block1,
|
||||
const double* original_parameter_block2,
|
||||
@@ -169,17 +120,9 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
ParameterBlock* block2 =
|
||||
FindOrDie(parameter_map,
|
||||
const_cast<double*>(original_parameter_block2));
|
||||
|
||||
const int block1_size = block1->Size();
|
||||
const int block2_size = block2->Size();
|
||||
const int block1_local_size = block1->LocalSize();
|
||||
const int block2_local_size = block2->LocalSize();
|
||||
if (!lift_covariance_to_ambient_space) {
|
||||
MatrixRef(covariance_block, block1_local_size, block2_local_size)
|
||||
.setZero();
|
||||
} else {
|
||||
MatrixRef(covariance_block, block1_size, block2_size).setZero();
|
||||
}
|
||||
MatrixRef(covariance_block, block1_size, block2_size).setZero();
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -297,94 +240,6 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
|
||||
const vector<const double*>& parameters,
|
||||
bool lift_covariance_to_ambient_space,
|
||||
double* covariance_matrix) const {
|
||||
CHECK(is_computed_)
|
||||
<< "Covariance::GetCovarianceMatrix called before Covariance::Compute";
|
||||
CHECK(is_valid_)
|
||||
<< "Covariance::GetCovarianceMatrix called when Covariance::Compute "
|
||||
<< "returned false.";
|
||||
|
||||
const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
|
||||
// For OpenMP compatibility we need to define these vectors in advance
|
||||
const int num_parameters = parameters.size();
|
||||
vector<int> parameter_sizes;
|
||||
vector<int> cum_parameter_size;
|
||||
parameter_sizes.reserve(num_parameters);
|
||||
cum_parameter_size.resize(num_parameters + 1);
|
||||
cum_parameter_size[0] = 0;
|
||||
for (int i = 0; i < num_parameters; ++i) {
|
||||
ParameterBlock* block =
|
||||
FindOrDie(parameter_map, const_cast<double*>(parameters[i]));
|
||||
if (lift_covariance_to_ambient_space) {
|
||||
parameter_sizes.push_back(block->Size());
|
||||
} else {
|
||||
parameter_sizes.push_back(block->LocalSize());
|
||||
}
|
||||
}
|
||||
std::partial_sum(parameter_sizes.begin(), parameter_sizes.end(),
|
||||
cum_parameter_size.begin() + 1);
|
||||
const int max_covariance_block_size =
|
||||
*std::max_element(parameter_sizes.begin(), parameter_sizes.end());
|
||||
const int covariance_size = cum_parameter_size.back();
|
||||
|
||||
// Assemble the blocks in the covariance matrix.
|
||||
MatrixRef covariance(covariance_matrix, covariance_size, covariance_size);
|
||||
const int num_threads = options_.num_threads;
|
||||
scoped_array<double> workspace(
|
||||
new double[num_threads * max_covariance_block_size *
|
||||
max_covariance_block_size]);
|
||||
|
||||
bool success = true;
|
||||
|
||||
// The collapse() directive is only supported in OpenMP 3.0 and higher. OpenMP
|
||||
// 3.0 was released in May 2008 (hence the version number).
|
||||
#if _OPENMP >= 200805
|
||||
# pragma omp parallel for num_threads(num_threads) schedule(dynamic) collapse(2)
|
||||
#else
|
||||
# pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
||||
#endif
|
||||
for (int i = 0; i < num_parameters; ++i) {
|
||||
for (int j = 0; j < num_parameters; ++j) {
|
||||
// The second loop can't start from j = i for compatibility with OpenMP
|
||||
// collapse command. The conditional serves as a workaround
|
||||
if (j >= i) {
|
||||
int covariance_row_idx = cum_parameter_size[i];
|
||||
int covariance_col_idx = cum_parameter_size[j];
|
||||
int size_i = parameter_sizes[i];
|
||||
int size_j = parameter_sizes[j];
|
||||
#ifdef CERES_USE_OPENMP
|
||||
int thread_id = omp_get_thread_num();
|
||||
#else
|
||||
int thread_id = 0;
|
||||
#endif
|
||||
double* covariance_block =
|
||||
workspace.get() +
|
||||
thread_id * max_covariance_block_size * max_covariance_block_size;
|
||||
if (!GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
parameters[i], parameters[j], lift_covariance_to_ambient_space,
|
||||
covariance_block)) {
|
||||
success = false;
|
||||
}
|
||||
|
||||
covariance.block(covariance_row_idx, covariance_col_idx,
|
||||
size_i, size_j) =
|
||||
MatrixRef(covariance_block, size_i, size_j);
|
||||
|
||||
if (i != j) {
|
||||
covariance.block(covariance_col_idx, covariance_row_idx,
|
||||
size_j, size_i) =
|
||||
MatrixRef(covariance_block, size_i, size_j).transpose();
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
// Determine the sparsity pattern of the covariance matrix based on
|
||||
// the block pairs requested by the user.
|
||||
bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
@@ -397,28 +252,18 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
vector<double*> all_parameter_blocks;
|
||||
problem->GetParameterBlocks(&all_parameter_blocks);
|
||||
const ProblemImpl::ParameterMap& parameter_map = problem->parameter_map();
|
||||
HashSet<ParameterBlock*> parameter_blocks_in_use;
|
||||
vector<ResidualBlock*> residual_blocks;
|
||||
problem->GetResidualBlocks(&residual_blocks);
|
||||
|
||||
for (int i = 0; i < residual_blocks.size(); ++i) {
|
||||
ResidualBlock* residual_block = residual_blocks[i];
|
||||
parameter_blocks_in_use.insert(residual_block->parameter_blocks(),
|
||||
residual_block->parameter_blocks() +
|
||||
residual_block->NumParameterBlocks());
|
||||
}
|
||||
|
||||
constant_parameter_blocks_.clear();
|
||||
vector<double*>& active_parameter_blocks =
|
||||
evaluate_options_.parameter_blocks;
|
||||
active_parameter_blocks.clear();
|
||||
for (int i = 0; i < all_parameter_blocks.size(); ++i) {
|
||||
double* parameter_block = all_parameter_blocks[i];
|
||||
|
||||
ParameterBlock* block = FindOrDie(parameter_map, parameter_block);
|
||||
if (!block->IsConstant() && (parameter_blocks_in_use.count(block) > 0)) {
|
||||
active_parameter_blocks.push_back(parameter_block);
|
||||
} else {
|
||||
if (block->IsConstant()) {
|
||||
constant_parameter_blocks_.insert(parameter_block);
|
||||
} else {
|
||||
active_parameter_blocks.push_back(parameter_block);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -541,8 +386,8 @@ bool CovarianceImpl::ComputeCovarianceValues() {
|
||||
switch (options_.algorithm_type) {
|
||||
case DENSE_SVD:
|
||||
return ComputeCovarianceValuesUsingDenseSVD();
|
||||
case SUITE_SPARSE_QR:
|
||||
#ifndef CERES_NO_SUITESPARSE
|
||||
case SUITE_SPARSE_QR:
|
||||
return ComputeCovarianceValuesUsingSuiteSparseQR();
|
||||
#else
|
||||
LOG(ERROR) << "SuiteSparse is required to use the "
|
||||
@@ -779,10 +624,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
|
||||
if (automatic_truncation) {
|
||||
break;
|
||||
} else {
|
||||
LOG(ERROR) << "Error: Covariance matrix is near rank deficient "
|
||||
<< "and the user did not specify a non-zero"
|
||||
<< "Covariance::Options::null_space_rank "
|
||||
<< "to enable the computation of a Pseudo-Inverse. "
|
||||
LOG(ERROR) << "Cholesky factorization of J'J is not reliable. "
|
||||
<< "Reciprocal condition number: "
|
||||
<< singular_value_ratio * singular_value_ratio << " "
|
||||
<< "min_reciprocal_condition_number: "
|
||||
|
@@ -55,21 +55,12 @@ class CovarianceImpl {
|
||||
const double*> >& covariance_blocks,
|
||||
ProblemImpl* problem);
|
||||
|
||||
bool Compute(
|
||||
const std::vector<const double*>& parameter_blocks,
|
||||
ProblemImpl* problem);
|
||||
|
||||
bool GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
const double* parameter_block1,
|
||||
const double* parameter_block2,
|
||||
bool lift_covariance_to_ambient_space,
|
||||
double* covariance_block) const;
|
||||
|
||||
bool GetCovarianceMatrixInTangentOrAmbientSpace(
|
||||
const std::vector<const double*>& parameters,
|
||||
bool lift_covariance_to_ambient_space,
|
||||
double *covariance_matrix) const;
|
||||
|
||||
bool ComputeCovarianceSparsity(
|
||||
const std::vector<std::pair<const double*,
|
||||
const double*> >& covariance_blocks,
|
||||
|
276
extern/ceres/internal/ceres/gradient_checker.cc
vendored
276
extern/ceres/internal/ceres/gradient_checker.cc
vendored
@@ -1,276 +0,0 @@
|
||||
// Ceres Solver - A fast non-linear least squares minimizer
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// http://ceres-solver.org/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: wjr@google.com (William Rucklidge),
|
||||
// keir@google.com (Keir Mierle),
|
||||
// dgossow@google.com (David Gossow)
|
||||
|
||||
#include "ceres/gradient_checker.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/is_close.h"
|
||||
#include "ceres/stringprintf.h"
|
||||
#include "ceres/types.h"
|
||||
|
||||
namespace ceres {
|
||||
|
||||
using internal::IsClose;
|
||||
using internal::StringAppendF;
|
||||
using internal::StringPrintf;
|
||||
using std::string;
|
||||
using std::vector;
|
||||
|
||||
namespace {
|
||||
// Evaluate the cost function and transform the returned Jacobians to
|
||||
// the local space of the respective local parameterizations.
|
||||
bool EvaluateCostFunction(
|
||||
const ceres::CostFunction* function,
|
||||
double const* const * parameters,
|
||||
const std::vector<const ceres::LocalParameterization*>&
|
||||
local_parameterizations,
|
||||
Vector* residuals,
|
||||
std::vector<Matrix>* jacobians,
|
||||
std::vector<Matrix>* local_jacobians) {
|
||||
CHECK_NOTNULL(residuals);
|
||||
CHECK_NOTNULL(jacobians);
|
||||
CHECK_NOTNULL(local_jacobians);
|
||||
|
||||
const vector<int32>& block_sizes = function->parameter_block_sizes();
|
||||
const int num_parameter_blocks = block_sizes.size();
|
||||
|
||||
// Allocate Jacobian matrices in local space.
|
||||
local_jacobians->resize(num_parameter_blocks);
|
||||
vector<double*> local_jacobian_data(num_parameter_blocks);
|
||||
for (int i = 0; i < num_parameter_blocks; ++i) {
|
||||
int block_size = block_sizes.at(i);
|
||||
if (local_parameterizations.at(i) != NULL) {
|
||||
block_size = local_parameterizations.at(i)->LocalSize();
|
||||
}
|
||||
local_jacobians->at(i).resize(function->num_residuals(), block_size);
|
||||
local_jacobians->at(i).setZero();
|
||||
local_jacobian_data.at(i) = local_jacobians->at(i).data();
|
||||
}
|
||||
|
||||
// Allocate Jacobian matrices in global space.
|
||||
jacobians->resize(num_parameter_blocks);
|
||||
vector<double*> jacobian_data(num_parameter_blocks);
|
||||
for (int i = 0; i < num_parameter_blocks; ++i) {
|
||||
jacobians->at(i).resize(function->num_residuals(), block_sizes.at(i));
|
||||
jacobians->at(i).setZero();
|
||||
jacobian_data.at(i) = jacobians->at(i).data();
|
||||
}
|
||||
|
||||
// Compute residuals & jacobians.
|
||||
CHECK_NE(0, function->num_residuals());
|
||||
residuals->resize(function->num_residuals());
|
||||
residuals->setZero();
|
||||
if (!function->Evaluate(parameters, residuals->data(),
|
||||
jacobian_data.data())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Convert Jacobians from global to local space.
|
||||
for (size_t i = 0; i < local_jacobians->size(); ++i) {
|
||||
if (local_parameterizations.at(i) == NULL) {
|
||||
local_jacobians->at(i) = jacobians->at(i);
|
||||
} else {
|
||||
int global_size = local_parameterizations.at(i)->GlobalSize();
|
||||
int local_size = local_parameterizations.at(i)->LocalSize();
|
||||
CHECK_EQ(jacobians->at(i).cols(), global_size);
|
||||
Matrix global_J_local(global_size, local_size);
|
||||
local_parameterizations.at(i)->ComputeJacobian(
|
||||
parameters[i], global_J_local.data());
|
||||
local_jacobians->at(i) = jacobians->at(i) * global_J_local;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
GradientChecker::GradientChecker(
|
||||
const CostFunction* function,
|
||||
const vector<const LocalParameterization*>* local_parameterizations,
|
||||
const NumericDiffOptions& options) :
|
||||
function_(function) {
|
||||
CHECK_NOTNULL(function);
|
||||
if (local_parameterizations != NULL) {
|
||||
local_parameterizations_ = *local_parameterizations;
|
||||
} else {
|
||||
local_parameterizations_.resize(function->parameter_block_sizes().size(),
|
||||
NULL);
|
||||
}
|
||||
DynamicNumericDiffCostFunction<CostFunction, CENTRAL>*
|
||||
finite_diff_cost_function =
|
||||
new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>(
|
||||
function, DO_NOT_TAKE_OWNERSHIP, options);
|
||||
finite_diff_cost_function_.reset(finite_diff_cost_function);
|
||||
|
||||
const vector<int32>& parameter_block_sizes =
|
||||
function->parameter_block_sizes();
|
||||
const int num_parameter_blocks = parameter_block_sizes.size();
|
||||
for (int i = 0; i < num_parameter_blocks; ++i) {
|
||||
finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]);
|
||||
}
|
||||
finite_diff_cost_function->SetNumResiduals(function->num_residuals());
|
||||
}
|
||||
|
||||
bool GradientChecker::Probe(double const* const * parameters,
|
||||
double relative_precision,
|
||||
ProbeResults* results_param) const {
|
||||
int num_residuals = function_->num_residuals();
|
||||
|
||||
// Make sure that we have a place to store results, no matter if the user has
|
||||
// provided an output argument.
|
||||
ProbeResults* results;
|
||||
ProbeResults results_local;
|
||||
if (results_param != NULL) {
|
||||
results = results_param;
|
||||
results->residuals.resize(0);
|
||||
results->jacobians.clear();
|
||||
results->numeric_jacobians.clear();
|
||||
results->local_jacobians.clear();
|
||||
results->local_numeric_jacobians.clear();
|
||||
results->error_log.clear();
|
||||
} else {
|
||||
results = &results_local;
|
||||
}
|
||||
results->maximum_relative_error = 0.0;
|
||||
results->return_value = true;
|
||||
|
||||
// Evaluate the derivative using the user supplied code.
|
||||
vector<Matrix>& jacobians = results->jacobians;
|
||||
vector<Matrix>& local_jacobians = results->local_jacobians;
|
||||
if (!EvaluateCostFunction(function_, parameters, local_parameterizations_,
|
||||
&results->residuals, &jacobians, &local_jacobians)) {
|
||||
results->error_log = "Function evaluation with Jacobians failed.";
|
||||
results->return_value = false;
|
||||
}
|
||||
|
||||
// Evaluate the derivative using numeric derivatives.
|
||||
vector<Matrix>& numeric_jacobians = results->numeric_jacobians;
|
||||
vector<Matrix>& local_numeric_jacobians = results->local_numeric_jacobians;
|
||||
Vector finite_diff_residuals;
|
||||
if (!EvaluateCostFunction(finite_diff_cost_function_.get(), parameters,
|
||||
local_parameterizations_, &finite_diff_residuals,
|
||||
&numeric_jacobians, &local_numeric_jacobians)) {
|
||||
results->error_log += "\nFunction evaluation with numerical "
|
||||
"differentiation failed.";
|
||||
results->return_value = false;
|
||||
}
|
||||
|
||||
if (!results->return_value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_residuals; ++i) {
|
||||
if (!IsClose(
|
||||
results->residuals[i],
|
||||
finite_diff_residuals[i],
|
||||
relative_precision,
|
||||
NULL,
|
||||
NULL)) {
|
||||
results->error_log = "Function evaluation with and without Jacobians "
|
||||
"resulted in different residuals.";
|
||||
LOG(INFO) << results->residuals.transpose();
|
||||
LOG(INFO) << finite_diff_residuals.transpose();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// See if any elements have relative error larger than the threshold.
|
||||
int num_bad_jacobian_components = 0;
|
||||
double& worst_relative_error = results->maximum_relative_error;
|
||||
worst_relative_error = 0;
|
||||
|
||||
// Accumulate the error message for all the jacobians, since it won't get
|
||||
// output if there are no bad jacobian components.
|
||||
string error_log;
|
||||
for (int k = 0; k < function_->parameter_block_sizes().size(); k++) {
|
||||
StringAppendF(&error_log,
|
||||
"========== "
|
||||
"Jacobian for " "block %d: (%ld by %ld)) "
|
||||
"==========\n",
|
||||
k,
|
||||
static_cast<long>(local_jacobians[k].rows()),
|
||||
static_cast<long>(local_jacobians[k].cols()));
|
||||
// The funny spacing creates appropriately aligned column headers.
|
||||
error_log +=
|
||||
" block row col user dx/dy num diff dx/dy "
|
||||
"abs error relative error parameter residual\n";
|
||||
|
||||
for (int i = 0; i < local_jacobians[k].rows(); i++) {
|
||||
for (int j = 0; j < local_jacobians[k].cols(); j++) {
|
||||
double term_jacobian = local_jacobians[k](i, j);
|
||||
double finite_jacobian = local_numeric_jacobians[k](i, j);
|
||||
double relative_error, absolute_error;
|
||||
bool bad_jacobian_entry =
|
||||
!IsClose(term_jacobian,
|
||||
finite_jacobian,
|
||||
relative_precision,
|
||||
&relative_error,
|
||||
&absolute_error);
|
||||
worst_relative_error = std::max(worst_relative_error, relative_error);
|
||||
|
||||
StringAppendF(&error_log,
|
||||
"%6d %4d %4d %17g %17g %17g %17g %17g %17g",
|
||||
k, i, j,
|
||||
term_jacobian, finite_jacobian,
|
||||
absolute_error, relative_error,
|
||||
parameters[k][j],
|
||||
results->residuals[i]);
|
||||
|
||||
if (bad_jacobian_entry) {
|
||||
num_bad_jacobian_components++;
|
||||
StringAppendF(
|
||||
&error_log,
|
||||
" ------ (%d,%d,%d) Relative error worse than %g",
|
||||
k, i, j, relative_precision);
|
||||
}
|
||||
error_log += "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Since there were some bad errors, dump comprehensive debug info.
|
||||
if (num_bad_jacobian_components) {
|
||||
string header = StringPrintf("\nDetected %d bad Jacobian component(s). "
|
||||
"Worst relative error was %g.\n",
|
||||
num_bad_jacobian_components,
|
||||
worst_relative_error);
|
||||
results->error_log = header + "\n" + error_log;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace ceres
|
@@ -26,8 +26,7 @@
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: keir@google.com (Keir Mierle),
|
||||
// dgossow@google.com (David Gossow)
|
||||
// Author: keir@google.com (Keir Mierle)
|
||||
|
||||
#include "ceres/gradient_checking_cost_function.h"
|
||||
|
||||
@@ -37,7 +36,7 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/gradient_checker.h"
|
||||
#include "ceres/cost_function.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/scoped_ptr.h"
|
||||
#include "ceres/parameter_block.h"
|
||||
@@ -60,25 +59,55 @@ using std::vector;
|
||||
|
||||
namespace {
|
||||
|
||||
// True if x and y have an absolute relative difference less than
|
||||
// relative_precision and false otherwise. Stores the relative and absolute
|
||||
// difference in relative/absolute_error if non-NULL.
|
||||
bool IsClose(double x, double y, double relative_precision,
|
||||
double *relative_error,
|
||||
double *absolute_error) {
|
||||
double local_absolute_error;
|
||||
double local_relative_error;
|
||||
if (!absolute_error) {
|
||||
absolute_error = &local_absolute_error;
|
||||
}
|
||||
if (!relative_error) {
|
||||
relative_error = &local_relative_error;
|
||||
}
|
||||
*absolute_error = abs(x - y);
|
||||
*relative_error = *absolute_error / max(abs(x), abs(y));
|
||||
if (x == 0 || y == 0) {
|
||||
// If x or y is exactly zero, then relative difference doesn't have any
|
||||
// meaning. Take the absolute difference instead.
|
||||
*relative_error = *absolute_error;
|
||||
}
|
||||
return abs(*relative_error) < abs(relative_precision);
|
||||
}
|
||||
|
||||
class GradientCheckingCostFunction : public CostFunction {
|
||||
public:
|
||||
GradientCheckingCostFunction(
|
||||
const CostFunction* function,
|
||||
const std::vector<const LocalParameterization*>* local_parameterizations,
|
||||
const NumericDiffOptions& options,
|
||||
double relative_precision,
|
||||
const string& extra_info,
|
||||
GradientCheckingIterationCallback* callback)
|
||||
GradientCheckingCostFunction(const CostFunction* function,
|
||||
const NumericDiffOptions& options,
|
||||
double relative_precision,
|
||||
const string& extra_info)
|
||||
: function_(function),
|
||||
gradient_checker_(function, local_parameterizations, options),
|
||||
relative_precision_(relative_precision),
|
||||
extra_info_(extra_info),
|
||||
callback_(callback) {
|
||||
CHECK_NOTNULL(callback_);
|
||||
extra_info_(extra_info) {
|
||||
DynamicNumericDiffCostFunction<CostFunction, CENTRAL>*
|
||||
finite_diff_cost_function =
|
||||
new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>(
|
||||
function,
|
||||
DO_NOT_TAKE_OWNERSHIP,
|
||||
options);
|
||||
|
||||
const vector<int32>& parameter_block_sizes =
|
||||
function->parameter_block_sizes();
|
||||
for (int i = 0; i < parameter_block_sizes.size(); ++i) {
|
||||
finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]);
|
||||
}
|
||||
*mutable_parameter_block_sizes() = parameter_block_sizes;
|
||||
set_num_residuals(function->num_residuals());
|
||||
finite_diff_cost_function->SetNumResiduals(num_residuals());
|
||||
finite_diff_cost_function_.reset(finite_diff_cost_function);
|
||||
}
|
||||
|
||||
virtual ~GradientCheckingCostFunction() { }
|
||||
@@ -91,92 +120,133 @@ class GradientCheckingCostFunction : public CostFunction {
|
||||
return function_->Evaluate(parameters, residuals, NULL);
|
||||
}
|
||||
|
||||
GradientChecker::ProbeResults results;
|
||||
bool okay = gradient_checker_.Probe(parameters,
|
||||
relative_precision_,
|
||||
&results);
|
||||
int num_residuals = function_->num_residuals();
|
||||
|
||||
// If the cost function returned false, there's nothing we can say about
|
||||
// the gradients.
|
||||
if (results.return_value == false) {
|
||||
// Make space for the jacobians of the two methods.
|
||||
const vector<int32>& block_sizes = function_->parameter_block_sizes();
|
||||
vector<Matrix> term_jacobians(block_sizes.size());
|
||||
vector<Matrix> finite_difference_jacobians(block_sizes.size());
|
||||
vector<double*> term_jacobian_pointers(block_sizes.size());
|
||||
vector<double*> finite_difference_jacobian_pointers(block_sizes.size());
|
||||
for (int i = 0; i < block_sizes.size(); i++) {
|
||||
term_jacobians[i].resize(num_residuals, block_sizes[i]);
|
||||
term_jacobian_pointers[i] = term_jacobians[i].data();
|
||||
finite_difference_jacobians[i].resize(num_residuals, block_sizes[i]);
|
||||
finite_difference_jacobian_pointers[i] =
|
||||
finite_difference_jacobians[i].data();
|
||||
}
|
||||
|
||||
// Evaluate the derivative using the user supplied code.
|
||||
if (!function_->Evaluate(parameters,
|
||||
residuals,
|
||||
&term_jacobian_pointers[0])) {
|
||||
LOG(WARNING) << "Function evaluation failed.";
|
||||
return false;
|
||||
}
|
||||
|
||||
// Copy the residuals.
|
||||
const int num_residuals = function_->num_residuals();
|
||||
MatrixRef(residuals, num_residuals, 1) = results.residuals;
|
||||
// Evaluate the derivative using numeric derivatives.
|
||||
finite_diff_cost_function_->Evaluate(
|
||||
parameters,
|
||||
residuals,
|
||||
&finite_difference_jacobian_pointers[0]);
|
||||
|
||||
// Copy the original jacobian blocks into the jacobians array.
|
||||
const vector<int32>& block_sizes = function_->parameter_block_sizes();
|
||||
// See if any elements have relative error larger than the threshold.
|
||||
int num_bad_jacobian_components = 0;
|
||||
double worst_relative_error = 0;
|
||||
|
||||
// Accumulate the error message for all the jacobians, since it won't get
|
||||
// output if there are no bad jacobian components.
|
||||
string m;
|
||||
for (int k = 0; k < block_sizes.size(); k++) {
|
||||
// Copy the original jacobian blocks into the jacobians array.
|
||||
if (jacobians[k] != NULL) {
|
||||
MatrixRef(jacobians[k],
|
||||
results.jacobians[k].rows(),
|
||||
results.jacobians[k].cols()) = results.jacobians[k];
|
||||
term_jacobians[k].rows(),
|
||||
term_jacobians[k].cols()) = term_jacobians[k];
|
||||
}
|
||||
|
||||
StringAppendF(&m,
|
||||
"========== "
|
||||
"Jacobian for " "block %d: (%ld by %ld)) "
|
||||
"==========\n",
|
||||
k,
|
||||
static_cast<long>(term_jacobians[k].rows()),
|
||||
static_cast<long>(term_jacobians[k].cols()));
|
||||
// The funny spacing creates appropriately aligned column headers.
|
||||
m += " block row col user dx/dy num diff dx/dy "
|
||||
"abs error relative error parameter residual\n";
|
||||
|
||||
for (int i = 0; i < term_jacobians[k].rows(); i++) {
|
||||
for (int j = 0; j < term_jacobians[k].cols(); j++) {
|
||||
double term_jacobian = term_jacobians[k](i, j);
|
||||
double finite_jacobian = finite_difference_jacobians[k](i, j);
|
||||
double relative_error, absolute_error;
|
||||
bool bad_jacobian_entry =
|
||||
!IsClose(term_jacobian,
|
||||
finite_jacobian,
|
||||
relative_precision_,
|
||||
&relative_error,
|
||||
&absolute_error);
|
||||
worst_relative_error = max(worst_relative_error, relative_error);
|
||||
|
||||
StringAppendF(&m, "%6d %4d %4d %17g %17g %17g %17g %17g %17g",
|
||||
k, i, j,
|
||||
term_jacobian, finite_jacobian,
|
||||
absolute_error, relative_error,
|
||||
parameters[k][j],
|
||||
residuals[i]);
|
||||
|
||||
if (bad_jacobian_entry) {
|
||||
num_bad_jacobian_components++;
|
||||
StringAppendF(
|
||||
&m, " ------ (%d,%d,%d) Relative error worse than %g",
|
||||
k, i, j, relative_precision_);
|
||||
}
|
||||
m += "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!okay) {
|
||||
std::string error_log = "Gradient Error detected!\nExtra info for "
|
||||
"this residual: " + extra_info_ + "\n" + results.error_log;
|
||||
callback_->SetGradientErrorDetected(error_log);
|
||||
// Since there were some bad errors, dump comprehensive debug info.
|
||||
if (num_bad_jacobian_components) {
|
||||
string header = StringPrintf("Detected %d bad jacobian component(s). "
|
||||
"Worst relative error was %g.\n",
|
||||
num_bad_jacobian_components,
|
||||
worst_relative_error);
|
||||
if (!extra_info_.empty()) {
|
||||
header += "Extra info for this residual: " + extra_info_ + "\n";
|
||||
}
|
||||
LOG(WARNING) << "\n" << header << m;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
const CostFunction* function_;
|
||||
GradientChecker gradient_checker_;
|
||||
internal::scoped_ptr<CostFunction> finite_diff_cost_function_;
|
||||
double relative_precision_;
|
||||
string extra_info_;
|
||||
GradientCheckingIterationCallback* callback_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
GradientCheckingIterationCallback::GradientCheckingIterationCallback()
|
||||
: gradient_error_detected_(false) {
|
||||
}
|
||||
|
||||
CallbackReturnType GradientCheckingIterationCallback::operator()(
|
||||
const IterationSummary& summary) {
|
||||
if (gradient_error_detected_) {
|
||||
LOG(ERROR)<< "Gradient error detected. Terminating solver.";
|
||||
return SOLVER_ABORT;
|
||||
}
|
||||
return SOLVER_CONTINUE;
|
||||
}
|
||||
void GradientCheckingIterationCallback::SetGradientErrorDetected(
|
||||
std::string& error_log) {
|
||||
mutex_.Lock();
|
||||
gradient_error_detected_ = true;
|
||||
error_log_ += "\n" + error_log;
|
||||
mutex_.Unlock();
|
||||
}
|
||||
|
||||
CostFunction* CreateGradientCheckingCostFunction(
|
||||
const CostFunction* cost_function,
|
||||
const std::vector<const LocalParameterization*>* local_parameterizations,
|
||||
CostFunction *CreateGradientCheckingCostFunction(
|
||||
const CostFunction *cost_function,
|
||||
double relative_step_size,
|
||||
double relative_precision,
|
||||
const std::string& extra_info,
|
||||
GradientCheckingIterationCallback* callback) {
|
||||
const string& extra_info) {
|
||||
NumericDiffOptions numeric_diff_options;
|
||||
numeric_diff_options.relative_step_size = relative_step_size;
|
||||
|
||||
return new GradientCheckingCostFunction(cost_function,
|
||||
local_parameterizations,
|
||||
numeric_diff_options,
|
||||
relative_precision, extra_info,
|
||||
callback);
|
||||
relative_precision,
|
||||
extra_info);
|
||||
}
|
||||
|
||||
ProblemImpl* CreateGradientCheckingProblemImpl(
|
||||
ProblemImpl* problem_impl,
|
||||
double relative_step_size,
|
||||
double relative_precision,
|
||||
GradientCheckingIterationCallback* callback) {
|
||||
CHECK_NOTNULL(callback);
|
||||
ProblemImpl* CreateGradientCheckingProblemImpl(ProblemImpl* problem_impl,
|
||||
double relative_step_size,
|
||||
double relative_precision) {
|
||||
// We create new CostFunctions by wrapping the original CostFunction
|
||||
// in a gradient checking CostFunction. So its okay for the
|
||||
// ProblemImpl to take ownership of it and destroy it. The
|
||||
@@ -190,9 +260,6 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
|
||||
gradient_checking_problem_options.local_parameterization_ownership =
|
||||
DO_NOT_TAKE_OWNERSHIP;
|
||||
|
||||
NumericDiffOptions numeric_diff_options;
|
||||
numeric_diff_options.relative_step_size = relative_step_size;
|
||||
|
||||
ProblemImpl* gradient_checking_problem_impl = new ProblemImpl(
|
||||
gradient_checking_problem_options);
|
||||
|
||||
@@ -227,26 +294,19 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
|
||||
string extra_info = StringPrintf(
|
||||
"Residual block id %d; depends on parameters [", i);
|
||||
vector<double*> parameter_blocks;
|
||||
vector<const LocalParameterization*> local_parameterizations;
|
||||
parameter_blocks.reserve(residual_block->NumParameterBlocks());
|
||||
local_parameterizations.reserve(residual_block->NumParameterBlocks());
|
||||
for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) {
|
||||
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
|
||||
parameter_blocks.push_back(parameter_block->mutable_user_state());
|
||||
StringAppendF(&extra_info, "%p", parameter_block->mutable_user_state());
|
||||
extra_info += (j < residual_block->NumParameterBlocks() - 1) ? ", " : "]";
|
||||
local_parameterizations.push_back(problem_impl->GetParameterization(
|
||||
parameter_block->mutable_user_state()));
|
||||
}
|
||||
|
||||
// Wrap the original CostFunction in a GradientCheckingCostFunction.
|
||||
CostFunction* gradient_checking_cost_function =
|
||||
new GradientCheckingCostFunction(residual_block->cost_function(),
|
||||
&local_parameterizations,
|
||||
numeric_diff_options,
|
||||
relative_precision,
|
||||
extra_info,
|
||||
callback);
|
||||
CreateGradientCheckingCostFunction(residual_block->cost_function(),
|
||||
relative_step_size,
|
||||
relative_precision,
|
||||
extra_info);
|
||||
|
||||
// The const_cast is necessary because
|
||||
// ProblemImpl::AddResidualBlock can potentially take ownership of
|
||||
|
@@ -26,8 +26,7 @@
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: keir@google.com (Keir Mierle),
|
||||
// dgossow@google.com (David Gossow)
|
||||
// Author: keir@google.com (Keir Mierle)
|
||||
|
||||
#ifndef CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
|
||||
#define CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
|
||||
@@ -35,76 +34,50 @@
|
||||
#include <string>
|
||||
|
||||
#include "ceres/cost_function.h"
|
||||
#include "ceres/iteration_callback.h"
|
||||
#include "ceres/local_parameterization.h"
|
||||
#include "ceres/mutex.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
class ProblemImpl;
|
||||
|
||||
// Callback that collects information about gradient checking errors, and
|
||||
// will abort the solve as soon as an error occurs.
|
||||
class GradientCheckingIterationCallback : public IterationCallback {
|
||||
public:
|
||||
GradientCheckingIterationCallback();
|
||||
|
||||
// Will return SOLVER_CONTINUE until a gradient error has been detected,
|
||||
// then return SOLVER_ABORT.
|
||||
virtual CallbackReturnType operator()(const IterationSummary& summary);
|
||||
|
||||
// Notify this that a gradient error has occurred (thread safe).
|
||||
void SetGradientErrorDetected(std::string& error_log);
|
||||
|
||||
// Retrieve error status (not thread safe).
|
||||
bool gradient_error_detected() const { return gradient_error_detected_; }
|
||||
const std::string& error_log() const { return error_log_; }
|
||||
private:
|
||||
bool gradient_error_detected_;
|
||||
std::string error_log_;
|
||||
// Mutex protecting member variables.
|
||||
ceres::internal::Mutex mutex_;
|
||||
};
|
||||
|
||||
// Creates a CostFunction that checks the Jacobians that cost_function computes
|
||||
// with finite differences. This API is only intended for unit tests that intend
|
||||
// to check the functionality of the GradientCheckingCostFunction
|
||||
// implementation directly.
|
||||
CostFunction* CreateGradientCheckingCostFunction(
|
||||
const CostFunction* cost_function,
|
||||
const std::vector<const LocalParameterization*>* local_parameterizations,
|
||||
double relative_step_size,
|
||||
double relative_precision,
|
||||
const std::string& extra_info,
|
||||
GradientCheckingIterationCallback* callback);
|
||||
|
||||
// Create a new ProblemImpl object from the input problem_impl, where all
|
||||
// cost functions are wrapped so that each time their Evaluate method is called,
|
||||
// an additional check is performed that compares the Jacobians computed by
|
||||
// the original cost function with alternative Jacobians computed using
|
||||
// numerical differentiation. If local parameterizations are given for any
|
||||
// parameters, the Jacobians will be compared in the local space instead of the
|
||||
// ambient space. For details on the gradient checking procedure, see the
|
||||
// documentation of the GradientChecker class. If an error is detected in any
|
||||
// iteration, the respective cost function will notify the
|
||||
// GradientCheckingIterationCallback.
|
||||
// Creates a CostFunction that checks the jacobians that cost_function computes
|
||||
// with finite differences. Bad results are logged; required precision is
|
||||
// controlled by relative_precision and the numeric differentiation step size is
|
||||
// controlled with relative_step_size. See solver.h for a better explanation of
|
||||
// relative_step_size. Caller owns result.
|
||||
//
|
||||
// The caller owns the returned ProblemImpl object.
|
||||
// The condition enforced is that
|
||||
//
|
||||
// (J_actual(i, j) - J_numeric(i, j))
|
||||
// ------------------------------------ < relative_precision
|
||||
// max(J_actual(i, j), J_numeric(i, j))
|
||||
//
|
||||
// where J_actual(i, j) is the jacobian as computed by the supplied cost
|
||||
// function (by the user) and J_numeric is the jacobian as computed by finite
|
||||
// differences.
|
||||
//
|
||||
// Note: This is quite inefficient and is intended only for debugging.
|
||||
CostFunction* CreateGradientCheckingCostFunction(
|
||||
const CostFunction* cost_function,
|
||||
double relative_step_size,
|
||||
double relative_precision,
|
||||
const std::string& extra_info);
|
||||
|
||||
// Create a new ProblemImpl object from the input problem_impl, where
|
||||
// each CostFunctions in problem_impl are wrapped inside a
|
||||
// GradientCheckingCostFunctions. This gives us a ProblemImpl object
|
||||
// which checks its derivatives against estimates from numeric
|
||||
// differentiation everytime a ResidualBlock is evaluated.
|
||||
//
|
||||
// relative_step_size and relative_precision are parameters to control
|
||||
// the numeric differentiation and the relative tolerance between the
|
||||
// jacobian computed by the CostFunctions in problem_impl and
|
||||
// jacobians obtained by numerically differentiating them. See the
|
||||
// documentation of 'numeric_derivative_relative_step_size' in solver.h for a
|
||||
// better explanation.
|
||||
ProblemImpl* CreateGradientCheckingProblemImpl(
|
||||
ProblemImpl* problem_impl,
|
||||
double relative_step_size,
|
||||
double relative_precision,
|
||||
GradientCheckingIterationCallback* callback);
|
||||
// jacobians obtained by numerically differentiating them. For more
|
||||
// details see the documentation for
|
||||
// CreateGradientCheckingCostFunction above.
|
||||
ProblemImpl* CreateGradientCheckingProblemImpl(ProblemImpl* problem_impl,
|
||||
double relative_step_size,
|
||||
double relative_precision);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
@@ -84,12 +84,6 @@ Solver::Options GradientProblemSolverOptionsToSolverOptions(
|
||||
|
||||
} // namespace
|
||||
|
||||
bool GradientProblemSolver::Options::IsValid(std::string* error) const {
|
||||
const Solver::Options solver_options =
|
||||
GradientProblemSolverOptionsToSolverOptions(*this);
|
||||
return solver_options.IsValid(error);
|
||||
}
|
||||
|
||||
GradientProblemSolver::~GradientProblemSolver() {
|
||||
}
|
||||
|
||||
@@ -105,6 +99,8 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
|
||||
using internal::SetSummaryFinalCost;
|
||||
|
||||
double start_time = WallTimeInSeconds();
|
||||
Solver::Options solver_options =
|
||||
GradientProblemSolverOptionsToSolverOptions(options);
|
||||
|
||||
*CHECK_NOTNULL(summary) = Summary();
|
||||
summary->num_parameters = problem.NumParameters();
|
||||
@@ -116,16 +112,14 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
|
||||
summary->nonlinear_conjugate_gradient_type = options.nonlinear_conjugate_gradient_type; // NOLINT
|
||||
|
||||
// Check validity
|
||||
if (!options.IsValid(&summary->message)) {
|
||||
if (!solver_options.IsValid(&summary->message)) {
|
||||
LOG(ERROR) << "Terminating: " << summary->message;
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(sameeragarwal): This is a bit convoluted, we should be able
|
||||
// to convert to minimizer options directly, but this will do for
|
||||
// now.
|
||||
Minimizer::Options minimizer_options =
|
||||
Minimizer::Options(GradientProblemSolverOptionsToSolverOptions(options));
|
||||
// Assuming that the parameter blocks in the program have been
|
||||
Minimizer::Options minimizer_options;
|
||||
minimizer_options = Minimizer::Options(solver_options);
|
||||
minimizer_options.evaluator.reset(new GradientProblemEvaluator(problem));
|
||||
|
||||
scoped_ptr<IterationCallback> logging_callback;
|
||||
|
59
extern/ceres/internal/ceres/is_close.cc
vendored
59
extern/ceres/internal/ceres/is_close.cc
vendored
@@ -1,59 +0,0 @@
|
||||
// Ceres Solver - A fast non-linear least squares minimizer
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// http://ceres-solver.org/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Authors: keir@google.com (Keir Mierle), dgossow@google.com (David Gossow)
|
||||
|
||||
#include "ceres/is_close.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
bool IsClose(double x, double y, double relative_precision,
|
||||
double *relative_error,
|
||||
double *absolute_error) {
|
||||
double local_absolute_error;
|
||||
double local_relative_error;
|
||||
if (!absolute_error) {
|
||||
absolute_error = &local_absolute_error;
|
||||
}
|
||||
if (!relative_error) {
|
||||
relative_error = &local_relative_error;
|
||||
}
|
||||
*absolute_error = std::fabs(x - y);
|
||||
*relative_error = *absolute_error / std::max(std::fabs(x), std::fabs(y));
|
||||
if (x == 0 || y == 0) {
|
||||
// If x or y is exactly zero, then relative difference doesn't have any
|
||||
// meaning. Take the absolute difference instead.
|
||||
*relative_error = *absolute_error;
|
||||
}
|
||||
return *relative_error < std::fabs(relative_precision);
|
||||
}
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user