1
1

Compare commits

..

1 Commits

Author SHA1 Message Date
2038250c62 WIP changes to compiling interface directory as C++ code 2021-04-28 23:32:55 -05:00
969 changed files with 19379 additions and 39244 deletions

View File

@@ -255,7 +255,6 @@ ForEachMacros:
- SCULPT_VERTEX_DUPLICATES_AND_NEIGHBORS_ITER_BEGIN
- SCULPT_VERTEX_NEIGHBORS_ITER_BEGIN
- SEQ_ALL_BEGIN
- SEQ_ITERATOR_FOREACH
- SURFACE_QUAD_ITER_BEGIN
- foreach
- ED_screen_areas_iter

View File

@@ -29,7 +29,7 @@ set(BLOSC_EXTRA_ARGS
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
)
# Prevent blosc from including its own local copy of zlib in the object file
# Prevent blosc from including it's own local copy of zlib in the object file
# and cause linker errors with everybody else.
set(BLOSC_EXTRA_ARGS ${BLOSC_EXTRA_ARGS}
-DPREFER_EXTERNAL_ZLIB=ON

View File

@@ -43,7 +43,7 @@ set(JPEG_FILE libjpeg-turbo-${JPEG_VERSION}.tar.gz)
set(BOOST_VERSION 1.73.0)
set(BOOST_VERSION_NODOTS 1_73_0)
set(BOOST_VERSION_NODOTS_SHORT 1_73)
set(BOOST_URI https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_NODOTS}.tar.gz)
set(BOOST_URI https://dl.bintray.com/boostorg/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_NODOTS}.tar.gz)
set(BOOST_HASH 4036cd27ef7548b8d29c30ea10956196)
set(BOOST_HASH_TYPE MD5)
set(BOOST_FILE boost_${BOOST_VERSION_NODOTS}.tar.gz)
@@ -297,10 +297,10 @@ set(OPENJPEG_HASH 63f5a4713ecafc86de51bfad89cc07bb788e9bba24ebbf0c4ca637621aadb6
set(OPENJPEG_HASH_TYPE SHA256)
set(OPENJPEG_FILE openjpeg-v${OPENJPEG_VERSION}.tar.gz)
set(FFMPEG_VERSION 4.4)
set(FFMPEG_VERSION 4.2.3)
set(FFMPEG_URI http://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.bz2)
set(FFMPEG_HASH 42093549751b582cf0f338a21a3664f52e0a9fbe0d238d3c992005e493607d0e)
set(FFMPEG_HASH_TYPE SHA256)
set(FFMPEG_HASH 695fad11f3baf27784e24cb0e977b65a)
set(FFMPEG_HASH_TYPE MD5)
set(FFMPEG_FILE ffmpeg-${FFMPEG_VERSION}.tar.bz2)
set(FFTW_VERSION 3.3.8)

View File

@@ -37,7 +37,7 @@ if [ $USE_DEBUG_TRAP -ne 0 ]; then
trap 'err_report $LINENO' ERR
fi
# Noisy, show every line that runs with its line number.
# Noisy, show every line that runs with it's line number.
if [ $USE_DEBUG_LOG -ne 0 ]; then
PS4='\e[0;33m$(printf %4d ${LINENO}):\e\033[0m '
set -x
@@ -563,9 +563,9 @@ OIDN_SKIP=false
ISPC_VERSION="1.14.1"
FFMPEG_VERSION="4.4"
FFMPEG_VERSION_SHORT="4.4"
FFMPEG_VERSION_MIN="4.4"
FFMPEG_VERSION="4.2.3"
FFMPEG_VERSION_SHORT="4.2"
FFMPEG_VERSION_MIN="3.0"
FFMPEG_VERSION_MAX="5.0"
FFMPEG_FORCE_BUILD=false
FFMPEG_FORCE_REBUILD=false

View File

@@ -20,7 +20,7 @@
# ILMBASE_LIBRARIES - list of libraries to link against when using IlmBase.
# ILMBASE_FOUND - True if IlmBase was found.
# Other standard issue macros
# Other standarnd issue macros
include(FindPackageHandleStandardArgs)
include(FindPackageMessage)
include(SelectLibraryConfigurations)

View File

@@ -22,7 +22,7 @@
# These are defined by the FindIlmBase module.
# OPENEXR_FOUND - True if OpenEXR was found.
# Other standard issue macros
# Other standarnd issue macros
include(SelectLibraryConfigurations)
include(FindPackageHandleStandardArgs)
include(FindPackageMessage)

View File

@@ -1,4 +1,70 @@
Buildbot Configuration
=====================
Blender Buildbot
================
Files used by Buildbot's `compile-code` step.
Code signing
------------
Code signing is done as part of INSTALL target, which makes it possible to sign
files which are aimed into a bundle and coming from a non-signed source (such as
libraries SVN).
This is achieved by specifying `worker_codesign.cmake` as a post-install script
run by CMake. This CMake script simply involves an utility script written in
Python which takes care of an actual signing.
### Configuration
Client configuration doesn't need anything special, other than variable
`SHARED_STORAGE_DIR` pointing to a location which is watched by a server.
This is done in `config_builder.py` file and is stored in Git (which makes it
possible to have almost zero-configuration buildbot machines).
Server configuration requires copying `config_server_template.py` under the
name of `config_server.py` and tweaking values, which are platform-specific.
#### Windows configuration
There are two things which are needed on Windows in order to have code signing
to work:
- `TIMESTAMP_AUTHORITY_URL` which is most likely set http://timestamp.digicert.com
- `CERTIFICATE_FILEPATH` which is a full file path to a PKCS #12 key (.pfx).
## Tips
### Self-signed certificate on Windows
It is easiest to test configuration using self-signed certificate.
The certificate manipulation utilities are coming with Windows SDK.
Unfortunately, they are not added to PATH. Here is an example of how to make
sure they are easily available:
```
set PATH=C:\Program Files (x86)\Windows Kits\10\App Certification Kit;%PATH%
set PATH=C:\Program Files (x86)\Windows Kits\10\bin\10.0.18362.0\x64;%PATH%
```
Generate CA:
```
makecert -r -pe -n "CN=Blender Test CA" -ss CA -sr CurrentUser -a sha256 ^
-cy authority -sky signature -sv BlenderTestCA.pvk BlenderTestCA.cer
```
Import the generated CA:
```
certutil -user -addstore Root BlenderTestCA.cer
```
Create self-signed certificate and pack it into PKCS #12:
```
makecert -pe -n "CN=Blender Test SPC" -a sha256 -cy end ^
-sky signature ^
-ic BlenderTestCA.cer -iv BlenderTestCA.pvk ^
-sv BlenderTestSPC.pvk BlenderTestSPC.cer
pvk2pfx -pvk BlenderTestSPC.pvk -spc BlenderTestSPC.cer -pfx BlenderTestSPC.pfx
```

View File

@@ -0,0 +1,127 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import argparse
import os
import re
import subprocess
import sys
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
# from whichcraft import which
from shutil import which
return which(name) is not None
class Builder:
def __init__(self, name, branch, codesign):
self.name = name
self.branch = branch
self.is_release_branch = re.match("^blender-v(.*)-release$", branch) is not None
self.codesign = codesign
# Buildbot runs from build/ directory
self.blender_dir = os.path.abspath(os.path.join('..', 'blender.git'))
self.build_dir = os.path.abspath(os.path.join('..', 'build'))
self.install_dir = os.path.abspath(os.path.join('..', 'install'))
self.upload_dir = os.path.abspath(os.path.join('..', 'install'))
# Detect platform
if name.startswith('mac'):
self.platform = 'mac'
self.command_prefix = []
elif name.startswith('linux'):
self.platform = 'linux'
if is_tool('scl'):
self.command_prefix = ['scl', 'enable', 'devtoolset-9', '--']
else:
self.command_prefix = []
elif name.startswith('win'):
self.platform = 'win'
self.command_prefix = []
else:
raise ValueError('Unkonw platform for builder ' + self.platform)
# Always 64 bit now
self.bits = 64
def create_builder_from_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('builder_name')
parser.add_argument('branch', default='master', nargs='?')
parser.add_argument("--codesign", action="store_true")
args = parser.parse_args()
return Builder(args.builder_name, args.branch, args.codesign)
class VersionInfo:
def __init__(self, builder):
# Get version information
buildinfo_h = os.path.join(builder.build_dir, "source", "creator", "buildinfo.h")
blender_h = os.path.join(builder.blender_dir, "source", "blender", "blenkernel", "BKE_blender_version.h")
version_number = int(self._parse_header_file(blender_h, 'BLENDER_VERSION'))
version_number_patch = int(self._parse_header_file(blender_h, 'BLENDER_VERSION_PATCH'))
version_numbers = (version_number // 100, version_number % 100, version_number_patch)
self.short_version = "%d.%d" % (version_numbers[0], version_numbers[1])
self.version = "%d.%d.%d" % version_numbers
self.version_cycle = self._parse_header_file(blender_h, 'BLENDER_VERSION_CYCLE')
self.hash = self._parse_header_file(buildinfo_h, 'BUILD_HASH')[1:-1]
if self.version_cycle == "release":
# Final release
self.full_version = self.version
self.is_development_build = False
elif self.version_cycle == "rc":
# Release candidate
self.full_version = self.version + self.version_cycle
self.is_development_build = False
else:
# Development build
self.full_version = self.version + '-' + self.hash
self.is_development_build = True
def _parse_header_file(self, filename, define):
import re
regex = re.compile(r"^#\s*define\s+%s\s+(.*)" % define)
with open(filename, "r") as file:
for l in file:
match = regex.match(l)
if match:
return match.group(1)
return None
def call(cmd, env=None, exit_on_error=True):
print(' '.join(cmd))
# Flush to ensure correct order output on Windows.
sys.stdout.flush()
sys.stderr.flush()
retcode = subprocess.call(cmd, env=env)
if exit_on_error and retcode != 0:
sys.exit(retcode)
return retcode

View File

@@ -0,0 +1,81 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from dataclasses import dataclass
from pathlib import Path
from typing import List
@dataclass
class AbsoluteAndRelativeFileName:
"""
Helper class which keeps track of absolute file path for a direct access and
corresponding relative path against given base.
The relative part is used to construct a file name within an archive which
contains files which are to be signed or which has been signed already
(depending on whether the archive is addressed to signing server or back
to the buildbot worker).
"""
# Base directory which is where relative_filepath is relative to.
base_dir: Path
# Full absolute path of the corresponding file.
absolute_filepath: Path
# Derived from full file path, contains part of the path which is relative
# to a desired base path.
relative_filepath: Path
def __init__(self, base_dir: Path, filepath: Path):
self.base_dir = base_dir
self.absolute_filepath = filepath.resolve()
self.relative_filepath = self.absolute_filepath.relative_to(
self.base_dir)
@classmethod
def from_path(cls, path: Path) -> 'AbsoluteAndRelativeFileName':
assert path.is_absolute()
assert path.is_file()
base_dir = path.parent
return AbsoluteAndRelativeFileName(base_dir, path)
@classmethod
def recursively_from_directory(cls, base_dir: Path) \
-> List['AbsoluteAndRelativeFileName']:
"""
Create list of AbsoluteAndRelativeFileName for all the files in the
given directory.
NOTE: Result will be pointing to a resolved paths.
"""
assert base_dir.is_absolute()
assert base_dir.is_dir()
base_dir = base_dir.resolve()
result = []
for filename in base_dir.glob('**/*'):
if not filename.is_file():
continue
result.append(AbsoluteAndRelativeFileName(base_dir, filename))
return result

View File

@@ -0,0 +1,245 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import dataclasses
import json
import os
from pathlib import Path
from typing import Optional
import codesign.util as util
class ArchiveStateError(Exception):
message: str
def __init__(self, message):
self.message = message
super().__init__(self.message)
@dataclasses.dataclass
class ArchiveState:
"""
Additional information (state) of the archive
Includes information like expected file size of the archive file in the case
the archive file is expected to be successfully created.
If the archive can not be created, this state will contain error message
indicating details of error.
"""
# Size in bytes of the corresponding archive.
file_size: Optional[int] = None
# Non-empty value indicates that error has happenned.
error_message: str = ''
def has_error(self) -> bool:
"""
Check whether the archive is at error state
"""
return self.error_message
def serialize_to_string(self) -> str:
payload = dataclasses.asdict(self)
return json.dumps(payload, sort_keys=True, indent=4)
def serialize_to_file(self, filepath: Path) -> None:
string = self.serialize_to_string()
filepath.write_text(string)
@classmethod
def deserialize_from_string(cls, string: str) -> 'ArchiveState':
try:
object_as_dict = json.loads(string)
except json.decoder.JSONDecodeError:
raise ArchiveStateError('Error parsing JSON')
return cls(**object_as_dict)
@classmethod
def deserialize_from_file(cls, filepath: Path):
string = filepath.read_text()
return cls.deserialize_from_string(string)
class ArchiveWithIndicator:
"""
The idea of this class is to wrap around logic which takes care of keeping
track of a name of an archive and synchronization routines between buildbot
worker and signing server.
The synchronization is done based on creating a special file after the
archive file is knowingly ready for access.
"""
# Base directory where the archive is stored (basically, a basename() of
# the absolute archive file name).
#
# For example, 'X:\\TEMP\\'.
base_dir: Path
# Absolute file name of the archive.
#
# For example, 'X:\\TEMP\\FOO.ZIP'.
archive_filepath: Path
# Absolute name of a file which acts as an indication of the fact that the
# archive is ready and is available for access.
#
# This is how synchronization between buildbot worker and signing server is
# done:
# - First, the archive is created under archive_filepath name.
# - Second, the indication file is created under ready_indicator_filepath
# name.
# - Third, the colleague of whoever created the indicator name watches for
# the indication file to appear, and once it's there it access the
# archive.
ready_indicator_filepath: Path
def __init__(
self, base_dir: Path, archive_name: str, ready_indicator_name: str):
"""
Construct the object from given base directory and name of the archive
file:
ArchiveWithIndicator(Path('X:\\TEMP'), 'FOO.ZIP', 'INPUT_READY')
"""
self.base_dir = base_dir
self.archive_filepath = self.base_dir / archive_name
self.ready_indicator_filepath = self.base_dir / ready_indicator_name
def is_ready_unsafe(self) -> bool:
"""
Check whether the archive is ready for access.
No guarding about possible network failres is done here.
"""
if not self.ready_indicator_filepath.exists():
return False
try:
archive_state = ArchiveState.deserialize_from_file(
self.ready_indicator_filepath)
except ArchiveStateError as error:
print(f'Error deserializing archive state: {error.message}')
return False
if archive_state.has_error():
# If the error did happen during codesign procedure there will be no
# corresponding archive file.
# The caller code will deal with the error check further.
return True
# Sometimes on macOS indicator file appears prior to the actual archive
# despite the order of creation and os.sync() used in tag_ready().
# So consider archive not ready if there is an indicator without an
# actual archive.
if not self.archive_filepath.exists():
print('Found indicator without actual archive, waiting for archive '
f'({self.archive_filepath}) to appear.')
return False
# Wait for until archive is fully stored.
actual_archive_size = self.archive_filepath.stat().st_size
if actual_archive_size != archive_state.file_size:
print('Partial/invalid archive size (expected '
f'{archive_state.file_size} got {actual_archive_size})')
return False
return True
def is_ready(self) -> bool:
"""
Check whether the archive is ready for access.
Will tolerate possible network failures: if there is a network failure
or if there is still no proper permission on a file False is returned.
"""
# There are some intermitten problem happening at a random which is
# translates to "OSError : [WinError 59] An unexpected network error occurred".
# Some reports suggests it might be due to lack of permissions to the file,
# which might be applicable in our case since it's possible that file is
# initially created with non-accessible permissions and gets chmod-ed
# after initial creation.
try:
return self.is_ready_unsafe()
except OSError as e:
print(f'Exception checking archive: {e}')
return False
def tag_ready(self, error_message='') -> None:
"""
Tag the archive as ready by creating the corresponding indication file.
NOTE: It is expected that the archive was never tagged as ready before
and that there are no subsequent tags of the same archive.
If it is violated, an assert will fail.
"""
assert not self.is_ready()
# Try the best to make sure everything is synced to the file system,
# to avoid any possibility of stamp appearing on a network share prior to
# an actual file.
if util.get_current_platform() != util.Platform.WINDOWS:
os.sync()
archive_size = -1
if self.archive_filepath.exists():
archive_size = self.archive_filepath.stat().st_size
archive_info = ArchiveState(
file_size=archive_size, error_message=error_message)
self.ready_indicator_filepath.write_text(
archive_info.serialize_to_string())
def get_state(self) -> ArchiveState:
"""
Get state object for this archive
The state is read from the corresponding state file.
"""
try:
return ArchiveState.deserialize_from_file(self.ready_indicator_filepath)
except ArchiveStateError as error:
return ArchiveState(error_message=f'Error in information format: {error}')
def clean(self) -> None:
"""
Remove both archive and the ready indication file.
"""
util.ensure_file_does_not_exist_or_die(self.ready_indicator_filepath)
util.ensure_file_does_not_exist_or_die(self.archive_filepath)
def is_fully_absent(self) -> bool:
"""
Check whether both archive and its ready indicator are absent.
Is used for a sanity check during code signing process by both
buildbot worker and signing server.
"""
return (not self.archive_filepath.exists() and
not self.ready_indicator_filepath.exists())

View File

@@ -0,0 +1,501 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Signing process overview.
#
# From buildbot worker side:
# - Files which needs to be signed are collected from either a directory to
# sign all signable files in there, or by filename of a single file to sign.
# - Those files gets packed into an archive and stored in a location location
# which is watched by the signing server.
# - A marker READY file is created which indicates the archive is ready for
# access.
# - Wait for the server to provide an archive with signed files.
# This is done by watching for the READY file which corresponds to an archive
# coming from the signing server.
# - Unpack the signed signed files from the archives and replace original ones.
#
# From code sign server:
# - Watch special location for a READY file which indicates the there is an
# archive with files which are to be signed.
# - Unpack the archive to a temporary location.
# - Run codesign tool and make sure all the files are signed.
# - Pack the signed files and store them in a location which is watched by
# the buildbot worker.
# - Create a READY file which indicates that the archive with signed files is
# ready.
import abc
import logging
import shutil
import subprocess
import time
import tarfile
import uuid
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Iterable, List
import codesign.util as util
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
from codesign.archive_with_indicator import ArchiveWithIndicator
from codesign.exception import CodeSignException
logger = logging.getLogger(__name__)
logger_builder = logger.getChild('builder')
logger_server = logger.getChild('server')
def pack_files(files: Iterable[AbsoluteAndRelativeFileName],
archive_filepath: Path) -> None:
"""
Create tar archive from given files for the signing pipeline.
Is used by buildbot worker to create an archive of files which are to be
signed, and by signing server to send signed files back to the worker.
"""
with tarfile.TarFile.open(archive_filepath, 'w') as tar_file_handle:
for file_info in files:
tar_file_handle.add(file_info.absolute_filepath,
arcname=file_info.relative_filepath)
def extract_files(archive_filepath: Path,
extraction_dir: Path) -> None:
"""
Extract all files form the given archive into the given direcotry.
"""
# TODO(sergey): Verify files in the archive have relative path.
with tarfile.TarFile.open(archive_filepath, mode='r') as tar_file_handle:
tar_file_handle.extractall(path=extraction_dir)
class BaseCodeSigner(metaclass=abc.ABCMeta):
"""
Base class for a platform-specific signer of binaries.
Contains all the logic shared across platform-specific implementations, such
as synchronization and notification logic.
Platform specific bits (such as actual command for signing the binary) are
to be implemented as a subclass.
Provides utilities code signing as a whole, including functionality needed
by a signing server and a buildbot worker.
The signer and builder may run on separate machines, the only requirement is
that they have access to a directory which is shared between them. For the
security concerns this is to be done as a separate machine (or as a Shared
Folder configuration in VirtualBox configuration). This directory might be
mounted under different base paths, but its underlying storage is to be
the same.
The code signer is short-lived on a buildbot worker side, and is living
forever on a code signing server side.
"""
# TODO(sergey): Find a neat way to have config annotated.
# config: Config
# Storage directory where builder puts files which are requested to be
# signed.
# Consider this an input of the code signing server.
unsigned_storage_dir: Path
# Storage where signed files are stored.
# Consider this an output of the code signer server.
signed_storage_dir: Path
# Platform the code is currently executing on.
platform: util.Platform
def __init__(self, config):
self.config = config
absolute_shared_storage_dir = config.SHARED_STORAGE_DIR.resolve()
# Unsigned (signing server input) configuration.
self.unsigned_storage_dir = absolute_shared_storage_dir / 'unsigned'
# Signed (signing server output) configuration.
self.signed_storage_dir = absolute_shared_storage_dir / 'signed'
self.platform = util.get_current_platform()
def cleanup_environment_for_builder(self) -> None:
# TODO(sergey): Revisit need of cleaning up the existing files.
# In practice it wasn't so helpful, and with multiple clients
# talking to the same server it becomes even more tricky.
pass
def cleanup_environment_for_signing_server(self) -> None:
# TODO(sergey): Revisit need of cleaning up the existing files.
# In practice it wasn't so helpful, and with multiple clients
# talking to the same server it becomes even more tricky.
pass
def generate_request_id(self) -> str:
"""
Generate an unique identifier for code signing request.
"""
return str(uuid.uuid4())
def archive_info_for_request_id(
self, path: Path, request_id: str) -> ArchiveWithIndicator:
return ArchiveWithIndicator(
path, f'{request_id}.tar', f'{request_id}.ready')
def signed_archive_info_for_request_id(
self, request_id: str) -> ArchiveWithIndicator:
return self.archive_info_for_request_id(
self.signed_storage_dir, request_id)
def unsigned_archive_info_for_request_id(
self, request_id: str) -> ArchiveWithIndicator:
return self.archive_info_for_request_id(
self.unsigned_storage_dir, request_id)
############################################################################
# Buildbot worker side helpers.
@abc.abstractmethod
def check_file_is_to_be_signed(
self, file: AbsoluteAndRelativeFileName) -> bool:
"""
Check whether file is to be signed.
Is used by both single file signing pipeline and recursive directory
signing pipeline.
This is where code signer is to check whether file is to be signed or
not. This check might be based on a simple extension test or on actual
test whether file have a digital signature already or not.
"""
def collect_files_to_sign(self, path: Path) \
-> List[AbsoluteAndRelativeFileName]:
"""
Get all files which need to be signed from the given path.
NOTE: The path might either be a file or directory.
This function is run from the buildbot worker side.
"""
# If there is a single file provided trust the buildbot worker that it
# is eligible for signing.
if path.is_file():
file = AbsoluteAndRelativeFileName.from_path(path)
if not self.check_file_is_to_be_signed(file):
return []
return [file]
all_files = AbsoluteAndRelativeFileName.recursively_from_directory(
path)
files_to_be_signed = [file for file in all_files
if self.check_file_is_to_be_signed(file)]
return files_to_be_signed
def wait_for_signed_archive_or_die(self, request_id) -> None:
"""
Wait until archive with signed files is available.
Will only return if the archive with signed files is available. If there
was an error during code sign procedure the SystemExit exception is
raised, with the message set to the error reported by the codesign
server.
Will only wait for the configured time. If that time exceeds and there
is still no responce from the signing server the application will exit
with a non-zero exit code.
"""
signed_archive_info = self.signed_archive_info_for_request_id(
request_id)
unsigned_archive_info = self.unsigned_archive_info_for_request_id(
request_id)
timeout_in_seconds = self.config.TIMEOUT_IN_SECONDS
time_start = time.monotonic()
while not signed_archive_info.is_ready():
time.sleep(1)
time_slept_in_seconds = time.monotonic() - time_start
if time_slept_in_seconds > timeout_in_seconds:
signed_archive_info.clean()
unsigned_archive_info.clean()
raise SystemExit("Signing server didn't finish signing in "
f'{timeout_in_seconds} seconds, dying :(')
archive_state = signed_archive_info.get_state()
if archive_state.has_error():
signed_archive_info.clean()
unsigned_archive_info.clean()
raise SystemExit(
f'Error happenned during codesign procedure: {archive_state.error_message}')
def copy_signed_files_to_directory(
self, signed_dir: Path, destination_dir: Path) -> None:
"""
Copy all files from signed_dir to destination_dir.
This function will overwrite any existing file. Permissions are copied
from the source files, but other metadata, such as timestamps, are not.
"""
for signed_filepath in signed_dir.glob('**/*'):
if not signed_filepath.is_file():
continue
relative_filepath = signed_filepath.relative_to(signed_dir)
destination_filepath = destination_dir / relative_filepath
destination_filepath.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(signed_filepath, destination_filepath)
def run_buildbot_path_sign_pipeline(self, path: Path) -> None:
"""
Run all steps needed to make given path signed.
Path points to an unsigned file or a directory which contains unsigned
files.
If the path points to a single file then this file will be signed.
This is used to sign a final bundle such as .msi on Windows or .dmg on
macOS.
NOTE: The code signed implementation might actually reject signing the
file, in which case the file will be left unsigned. This isn't anything
to be considered a failure situation, just might happen when buildbot
worker can not detect whether signing is really required in a specific
case or not.
If the path points to a directory then code signer will sign all
signable files from it (finding them recursively).
"""
self.cleanup_environment_for_builder()
# Make sure storage directory exists.
self.unsigned_storage_dir.mkdir(parents=True, exist_ok=True)
# Collect all files which needs to be signed and pack them into a single
# archive which will be sent to the signing server.
logger_builder.info('Collecting files which are to be signed...')
files = self.collect_files_to_sign(path)
if not files:
logger_builder.info('No files to be signed, ignoring.')
return
logger_builder.info('Found %d files to sign.', len(files))
request_id = self.generate_request_id()
signed_archive_info = self.signed_archive_info_for_request_id(
request_id)
unsigned_archive_info = self.unsigned_archive_info_for_request_id(
request_id)
pack_files(files=files,
archive_filepath=unsigned_archive_info.archive_filepath)
unsigned_archive_info.tag_ready()
# Wait for the signing server to finish signing.
logger_builder.info('Waiting signing server to sign the files...')
self.wait_for_signed_archive_or_die(request_id)
# Extract signed files from archive and move files to final location.
with TemporaryDirectory(prefix='blender-buildbot-') as temp_dir_str:
unpacked_signed_files_dir = Path(temp_dir_str)
logger_builder.info('Extracting signed files from archive...')
extract_files(
archive_filepath=signed_archive_info.archive_filepath,
extraction_dir=unpacked_signed_files_dir)
destination_dir = path
if destination_dir.is_file():
destination_dir = destination_dir.parent
self.copy_signed_files_to_directory(
unpacked_signed_files_dir, destination_dir)
logger_builder.info('Removing archive with signed files...')
signed_archive_info.clean()
############################################################################
# Signing server side helpers.
def wait_for_sign_request(self) -> str:
"""
Wait for the buildbot to request signing of an archive.
Returns an identifier of signing request.
"""
# TOOD(sergey): Support graceful shutdown on Ctrl-C.
logger_server.info(
f'Waiting for a request directory {self.unsigned_storage_dir} to appear.')
while not self.unsigned_storage_dir.exists():
time.sleep(1)
logger_server.info(
'Waiting for a READY indicator of any signing request.')
request_id = None
while request_id is None:
for file in self.unsigned_storage_dir.iterdir():
if file.suffix != '.ready':
continue
request_id = file.stem
logger_server.info(f'Found READY for request ID {request_id}.')
if request_id is None:
time.sleep(1)
unsigned_archive_info = self.unsigned_archive_info_for_request_id(
request_id)
while not unsigned_archive_info.is_ready():
time.sleep(1)
return request_id
@abc.abstractmethod
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
"""
Sign all files in the given directory.
NOTE: Signing should happen in-place.
"""
def run_signing_pipeline(self, request_id: str):
"""
Run the full signing pipeline starting from the point when buildbot
worker have requested signing.
"""
# Make sure storage directory exists.
self.signed_storage_dir.mkdir(parents=True, exist_ok=True)
with TemporaryDirectory(prefix='blender-codesign-') as temp_dir_str:
temp_dir = Path(temp_dir_str)
signed_archive_info = self.signed_archive_info_for_request_id(
request_id)
unsigned_archive_info = self.unsigned_archive_info_for_request_id(
request_id)
logger_server.info('Extracting unsigned files from archive...')
extract_files(
archive_filepath=unsigned_archive_info.archive_filepath,
extraction_dir=temp_dir)
logger_server.info('Collecting all files which needs signing...')
files = AbsoluteAndRelativeFileName.recursively_from_directory(
temp_dir)
logger_server.info('Signing all requested files...')
try:
self.sign_all_files(files)
except CodeSignException as error:
signed_archive_info.tag_ready(error_message=error.message)
unsigned_archive_info.clean()
logger_server.info('Signing is complete with errors.')
return
logger_server.info('Packing signed files...')
pack_files(files=files,
archive_filepath=signed_archive_info.archive_filepath)
signed_archive_info.tag_ready()
logger_server.info('Removing signing request...')
unsigned_archive_info.clean()
logger_server.info('Signing is complete.')
def run_signing_server(self):
logger_server.info('Starting new code signing server...')
self.cleanup_environment_for_signing_server()
logger_server.info('Code signing server is ready')
while True:
logger_server.info('Waiting for the signing request in %s...',
self.unsigned_storage_dir)
request_id = self.wait_for_sign_request()
logger_server.info(
f'Beging signign procedure for request ID {request_id}.')
self.run_signing_pipeline(request_id)
############################################################################
# Command executing.
#
# Abstracted to a degree that allows to run commands from a foreign
# platform.
# The goal with this is to allow performing dry-run tests of code signer
# server from other platforms (for example, to test that macOS code signer
# does what it is supposed to after doing a refactor on Linux).
# TODO(sergey): What is the type annotation for the command?
def run_command_or_mock(self, command, platform: util.Platform) -> None:
"""
Run given command if current platform matches given one
If the platform is different then it will only be printed allowing
to verify logic of the code signing process.
"""
if platform != self.platform:
logger_server.info(
f'Will run command for {platform}: {command}')
return
logger_server.info(f'Running command: {command}')
subprocess.run(command)
# TODO(sergey): What is the type annotation for the command?
def check_output_or_mock(self, command,
platform: util.Platform,
allow_nonzero_exit_code=False) -> str:
"""
Run given command if current platform matches given one
If the platform is different then it will only be printed allowing
to verify logic of the code signing process.
If allow_nonzero_exit_code is truth then the output will be returned
even if application quit with non-zero exit code.
Otherwise an subprocess.CalledProcessError exception will be raised
in such case.
"""
if platform != self.platform:
logger_server.info(
f'Will run command for {platform}: {command}')
return
if allow_nonzero_exit_code:
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
return output.decode()
logger_server.info(f'Running command: {command}')
return subprocess.check_output(
command, stderr=subprocess.STDOUT).decode()

View File

@@ -0,0 +1,62 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Configuration of a code signer which is specific to the code running from
# buildbot's worker.
import sys
from pathlib import Path
import codesign.util as util
from codesign.config_common import *
platform = util.get_current_platform()
if platform == util.Platform.LINUX:
SHARED_STORAGE_DIR = Path('/data/codesign')
elif platform == util.Platform.WINDOWS:
SHARED_STORAGE_DIR = Path('Z:\\codesign')
elif platform == util.Platform.MACOS:
SHARED_STORAGE_DIR = Path('/Volumes/codesign_macos/codesign')
# https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
LOGGING = {
'version': 1,
'formatters': {
'default': {'format': '%(asctime)-15s %(levelname)8s %(name)s %(message)s'}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'stream': 'ext://sys.stderr',
}
},
'loggers': {
'codesign': {'level': 'INFO'},
},
'root': {
'level': 'WARNING',
'handlers': [
'console',
],
}
}

View File

@@ -0,0 +1,36 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from pathlib import Path
# Timeout in seconds for the signing process.
#
# This is how long buildbot packing step will wait signing server to
# perform signing.
#
# NOTE: Notarization could take a long time, hence the rather high value
# here. Might consider using different timeout for different platforms.
TIMEOUT_IN_SECONDS = 45 * 60 * 60
# Directory which is shared across buildbot worker and signing server.
#
# This is where worker puts files requested for signing as well as where
# server puts signed files.
SHARED_STORAGE_DIR: Path

View File

@@ -0,0 +1,101 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Configuration of a code signer which is specific to the code signing server.
#
# NOTE: DO NOT put any sensitive information here, put it in an actual
# configuration on the signing machine.
from pathlib import Path
from codesign.config_common import *
CODESIGN_DIRECTORY = Path(__file__).absolute().parent
BLENDER_GIT_ROOT_DIRECTORY = CODESIGN_DIRECTORY.parent.parent.parent
################################################################################
# Common configuration.
# Directory where folders for codesign requests and signed result are stored.
# For example, /data/codesign
SHARED_STORAGE_DIR: Path
################################################################################
# macOS-specific configuration.
MACOS_ENTITLEMENTS_FILE = \
BLENDER_GIT_ROOT_DIRECTORY / 'release' / 'darwin' / 'entitlements.plist'
# Identity of the Developer ID Application certificate which is to be used for
# codesign tool.
# Use `security find-identity -v -p codesigning` to find the identity.
#
# NOTE: This identity is just an example from release/darwin/README.txt.
MACOS_CODESIGN_IDENTITY = 'AE825E26F12D08B692F360133210AF46F4CF7B97'
# User name (Apple ID) which will be used to request notarization.
MACOS_XCRUN_USERNAME = 'me@example.com'
# One-time application password which will be used to request notarization.
MACOS_XCRUN_PASSWORD = '@keychain:altool-password'
# Timeout in seconds within which the notarial office is supposed to reply.
MACOS_NOTARIZE_TIMEOUT_IN_SECONDS = 60 * 60
################################################################################
# Windows-specific configuration.
# URL to the timestamping authority.
WIN_TIMESTAMP_AUTHORITY_URL = 'http://timestamp.digicert.com'
# Full path to the certificate used for signing.
#
# The path and expected file format might vary depending on a platform.
#
# On Windows it is usually is a PKCS #12 key (.pfx), so the path will look
# like Path('C:\\Secret\\Blender.pfx').
WIN_CERTIFICATE_FILEPATH: Path
################################################################################
# Logging configuration, common for all platforms.
# https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
LOGGING = {
'version': 1,
'formatters': {
'default': {'format': '%(asctime)-15s %(levelname)8s %(name)s %(message)s'}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'stream': 'ext://sys.stderr',
}
},
'loggers': {
'codesign': {'level': 'INFO'},
},
'root': {
'level': 'WARNING',
'handlers': [
'console',
],
}
}

View File

@@ -0,0 +1,26 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
class CodeSignException(Exception):
message: str
def __init__(self, message):
self.message = message
super().__init__(self.message)

View File

@@ -0,0 +1,72 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# NOTE: This is a no-op signer (since there isn't really a procedure to sign
# Linux binaries yet). Used to debug and verify the code signing routines on
# a Linux environment.
import logging
from pathlib import Path
from typing import List
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
from codesign.base_code_signer import BaseCodeSigner
logger = logging.getLogger(__name__)
logger_server = logger.getChild('server')
class LinuxCodeSigner(BaseCodeSigner):
def is_active(self) -> bool:
"""
Check whether this signer is active.
if it is inactive, no files will be signed.
Is used to be able to debug code signing pipeline on Linux, where there
is no code signing happening in the actual buildbot and release
environment.
"""
return False
def check_file_is_to_be_signed(
self, file: AbsoluteAndRelativeFileName) -> bool:
if file.relative_filepath == Path('blender'):
return True
if (file.relative_filepath.parts[-3:-1] == ('python', 'bin') and
file.relative_filepath.name.startwith('python')):
return True
if file.relative_filepath.suffix == '.so':
return True
return False
def collect_files_to_sign(self, path: Path) \
-> List[AbsoluteAndRelativeFileName]:
if not self.is_active():
return []
return super().collect_files_to_sign(path)
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
num_files = len(files)
for file_index, file in enumerate(files):
logger.info('Server: Signed file [%d/%d] %s',
file_index + 1, num_files, file.relative_filepath)

View File

@@ -0,0 +1,456 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import logging
import re
import stat
import subprocess
import time
from pathlib import Path
from typing import List
import codesign.util as util
from buildbot_utils import Builder
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
from codesign.base_code_signer import BaseCodeSigner
from codesign.exception import CodeSignException
logger = logging.getLogger(__name__)
logger_server = logger.getChild('server')
# NOTE: Check is done as filename.endswith(), so keep the dot
EXTENSIONS_TO_BE_SIGNED = {'.dylib', '.so', '.dmg'}
# Prefixes of a file (not directory) name which are to be signed.
# Used to sign extra executable files in Contents/Resources.
NAME_PREFIXES_TO_BE_SIGNED = {'python'}
class NotarizationException(CodeSignException):
pass
def is_file_from_bundle(file: AbsoluteAndRelativeFileName) -> bool:
"""
Check whether file is coming from an .app bundle
"""
parts = file.relative_filepath.parts
if not parts:
return False
if not parts[0].endswith('.app'):
return False
return True
def get_bundle_from_file(
file: AbsoluteAndRelativeFileName) -> AbsoluteAndRelativeFileName:
"""
Get AbsoluteAndRelativeFileName descriptor of bundle
"""
assert(is_file_from_bundle(file))
parts = file.relative_filepath.parts
bundle_name = parts[0]
base_dir = file.base_dir
bundle_filepath = file.base_dir / bundle_name
return AbsoluteAndRelativeFileName(base_dir, bundle_filepath)
def is_bundle_executable_file(file: AbsoluteAndRelativeFileName) -> bool:
"""
Check whether given file is an executable within an app bundle
"""
if not is_file_from_bundle(file):
return False
parts = file.relative_filepath.parts
num_parts = len(parts)
if num_parts < 3:
return False
if parts[1:3] != ('Contents', 'MacOS'):
return False
return True
def xcrun_field_value_from_output(field: str, output: str) -> str:
"""
Get value of a given field from xcrun output.
If field is not found empty string is returned.
"""
field_prefix = field + ': '
for line in output.splitlines():
line = line.strip()
if line.startswith(field_prefix):
return line[len(field_prefix):]
return ''
class MacOSCodeSigner(BaseCodeSigner):
def check_file_is_to_be_signed(
self, file: AbsoluteAndRelativeFileName) -> bool:
if file.relative_filepath.name.startswith('.'):
return False
if is_bundle_executable_file(file):
return True
base_name = file.relative_filepath.name
if any(base_name.startswith(prefix)
for prefix in NAME_PREFIXES_TO_BE_SIGNED):
return True
mode = file.absolute_filepath.lstat().st_mode
if mode & stat.S_IXUSR != 0:
file_output = subprocess.check_output(
("file", file.absolute_filepath)).decode()
if "64-bit executable" in file_output:
return True
return file.relative_filepath.suffix in EXTENSIONS_TO_BE_SIGNED
def collect_files_to_sign(self, path: Path) \
-> List[AbsoluteAndRelativeFileName]:
# Include all files when signing app or dmg bundle: all the files are
# needed to do valid signature of bundle.
if path.name.endswith('.app'):
return AbsoluteAndRelativeFileName.recursively_from_directory(path)
if path.is_dir():
files = []
for child in path.iterdir():
if child.name.endswith('.app'):
current_files = AbsoluteAndRelativeFileName.recursively_from_directory(
child)
else:
current_files = super().collect_files_to_sign(child)
for current_file in current_files:
files.append(AbsoluteAndRelativeFileName(
path, current_file.absolute_filepath))
return files
return super().collect_files_to_sign(path)
############################################################################
# Codesign.
def codesign_remove_signature(
self, file: AbsoluteAndRelativeFileName) -> None:
"""
Make sure given file does not have codesign signature
This is needed because codesigning is not possible for file which has
signature already.
"""
logger_server.info(
'Removing codesign signature from %s...', file.relative_filepath)
command = ['codesign', '--remove-signature', file.absolute_filepath]
self.run_command_or_mock(command, util.Platform.MACOS)
def codesign_file(
self, file: AbsoluteAndRelativeFileName) -> None:
"""
Sign given file
NOTE: File must not have any signatures.
"""
logger_server.info(
'Codesigning %s...', file.relative_filepath)
entitlements_file = self.config.MACOS_ENTITLEMENTS_FILE
command = ['codesign',
'--timestamp',
'--options', 'runtime',
f'--entitlements={entitlements_file}',
'--sign', self.config.MACOS_CODESIGN_IDENTITY,
file.absolute_filepath]
self.run_command_or_mock(command, util.Platform.MACOS)
def codesign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
"""
Run codesign tool on all eligible files in the given list.
Will ignore all files which are not to be signed. For the rest will
remove possible existing signature and add a new signature.
"""
num_files = len(files)
have_ignored_files = False
signed_files = []
for file_index, file in enumerate(files):
# Ignore file if it is not to be signed.
# Allows to manually construct ZIP of a bundle and get it signed.
if not self.check_file_is_to_be_signed(file):
logger_server.info(
'Ignoring file [%d/%d] %s',
file_index + 1, num_files, file.relative_filepath)
have_ignored_files = True
continue
logger_server.info(
'Running codesigning routines for file [%d/%d] %s...',
file_index + 1, num_files, file.relative_filepath)
self.codesign_remove_signature(file)
self.codesign_file(file)
signed_files.append(file)
if have_ignored_files:
logger_server.info('Signed %d files:', len(signed_files))
num_signed_files = len(signed_files)
for file_index, signed_file in enumerate(signed_files):
logger_server.info(
'- [%d/%d] %s',
file_index + 1, num_signed_files,
signed_file.relative_filepath)
def codesign_bundles(
self, files: List[AbsoluteAndRelativeFileName]) -> None:
"""
Codesign all .app bundles in the given list of files.
Bundle is deducted from paths of the files, and every bundle is only
signed once.
"""
signed_bundles = set()
extra_files = []
for file in files:
if not is_file_from_bundle(file):
continue
bundle = get_bundle_from_file(file)
bundle_name = bundle.relative_filepath
if bundle_name in signed_bundles:
continue
logger_server.info('Running codesign routines on bundle %s',
bundle_name)
# It is not possible to remove signature from DMG.
if bundle.relative_filepath.name.endswith('.app'):
self.codesign_remove_signature(bundle)
self.codesign_file(bundle)
signed_bundles.add(bundle_name)
# Codesign on a bundle adds an extra folder with information.
# It needs to be compied to the source.
code_signature_directory = \
bundle.absolute_filepath / 'Contents' / '_CodeSignature'
code_signature_files = \
AbsoluteAndRelativeFileName.recursively_from_directory(
code_signature_directory)
for code_signature_file in code_signature_files:
bundle_relative_file = AbsoluteAndRelativeFileName(
bundle.base_dir,
code_signature_directory /
code_signature_file.relative_filepath)
extra_files.append(bundle_relative_file)
files.extend(extra_files)
############################################################################
# Notarization.
def notarize_get_bundle_id(self, file: AbsoluteAndRelativeFileName) -> str:
"""
Get bundle ID which will be used to notarize DMG
"""
name = file.relative_filepath.name
app_name = name.split('-', 2)[0].lower()
app_name_words = app_name.split()
if len(app_name_words) > 1:
app_name_id = ''.join(word.capitalize() for word in app_name_words)
else:
app_name_id = app_name_words[0]
# TODO(sergey): Consider using "alpha" for buildbot builds.
return f'org.blenderfoundation.{app_name_id}.release'
def notarize_request(self, file) -> str:
"""
Request notarization of the given file.
Returns UUID of the notarization request. If error occurred None is
returned instead of UUID.
"""
bundle_id = self.notarize_get_bundle_id(file)
logger_server.info('Bundle ID: %s', bundle_id)
logger_server.info('Submitting file to the notarial office.')
command = [
'xcrun', 'altool', '--notarize-app', '--verbose',
'-f', file.absolute_filepath,
'--primary-bundle-id', bundle_id,
'--username', self.config.MACOS_XCRUN_USERNAME,
'--password', self.config.MACOS_XCRUN_PASSWORD]
output = self.check_output_or_mock(
command, util.Platform.MACOS, allow_nonzero_exit_code=True)
for line in output.splitlines():
line = line.strip()
if line.startswith('RequestUUID = '):
request_uuid = line[14:]
return request_uuid
# Check whether the package has been already submitted.
if 'The software asset has already been uploaded.' in line:
request_uuid = re.sub(
'.*The upload ID is ([A-Fa-f0-9\-]+).*', '\\1', line)
logger_server.warning(
f'The package has been already submitted under UUID {request_uuid}')
return request_uuid
logger_server.error(output)
logger_server.error('xcrun command did not report RequestUUID')
return None
def notarize_review_status(self, xcrun_output: str) -> bool:
"""
Review status returned by xcrun's notarization info
Returns truth if the notarization process has finished.
If there are errors during notarization, a NotarizationException()
exception is thrown with status message from the notarial office.
"""
# Parse status and message
status = xcrun_field_value_from_output('Status', xcrun_output)
status_message = xcrun_field_value_from_output(
'Status Message', xcrun_output)
if status == 'success':
logger_server.info(
'Package successfully notarized: %s', status_message)
return True
if status == 'invalid':
logger_server.error(xcrun_output)
logger_server.error(
'Package notarization has failed: %s', status_message)
raise NotarizationException(status_message)
if status == 'in progress':
return False
logger_server.info(
'Unknown notarization status %s (%s)', status, status_message)
return False
def notarize_wait_result(self, request_uuid: str) -> None:
"""
Wait for until notarial office have a reply
"""
logger_server.info(
'Waiting for a result from the notarization office.')
command = ['xcrun', 'altool',
'--notarization-info', request_uuid,
'--username', self.config.MACOS_XCRUN_USERNAME,
'--password', self.config.MACOS_XCRUN_PASSWORD]
time_start = time.monotonic()
timeout_in_seconds = self.config.MACOS_NOTARIZE_TIMEOUT_IN_SECONDS
while True:
xcrun_output = self.check_output_or_mock(
command, util.Platform.MACOS, allow_nonzero_exit_code=True)
if self.notarize_review_status(xcrun_output):
break
logger_server.info('Keep waiting for notarization office.')
time.sleep(30)
time_slept_in_seconds = time.monotonic() - time_start
if time_slept_in_seconds > timeout_in_seconds:
logger_server.error(
"Notarial office didn't reply in %f seconds.",
timeout_in_seconds)
def notarize_staple(self, file: AbsoluteAndRelativeFileName) -> bool:
"""
Staple notarial label on the file
"""
logger_server.info('Stapling notarial stamp.')
command = ['xcrun', 'stapler', 'staple', '-v', file.absolute_filepath]
self.check_output_or_mock(command, util.Platform.MACOS)
def notarize_dmg(self, file: AbsoluteAndRelativeFileName) -> bool:
"""
Run entire pipeline to get DMG notarized.
"""
logger_server.info('Begin notarization routines on %s',
file.relative_filepath)
# Submit file for notarization.
request_uuid = self.notarize_request(file)
if not request_uuid:
return False
logger_server.info('Received Request UUID: %s', request_uuid)
# Wait for the status from the notarization office.
if not self.notarize_wait_result(request_uuid):
return False
# Staple.
self.notarize_staple(file)
def notarize_all_dmg(
self, files: List[AbsoluteAndRelativeFileName]) -> bool:
"""
Notarize all DMG images from the input.
Images are supposed to be codesigned already.
"""
for file in files:
if not file.relative_filepath.name.endswith('.dmg'):
continue
if not self.check_file_is_to_be_signed(file):
continue
self.notarize_dmg(file)
############################################################################
# Entry point.
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
# TODO(sergey): Handle errors somehow.
self.codesign_all_files(files)
self.codesign_bundles(files)
self.notarize_all_dmg(files)

View File

@@ -0,0 +1,52 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import logging.config
import sys
from pathlib import Path
from typing import Optional
import codesign.config_builder
import codesign.util as util
from codesign.base_code_signer import BaseCodeSigner
class SimpleCodeSigner:
code_signer: Optional[BaseCodeSigner]
def __init__(self):
platform = util.get_current_platform()
if platform == util.Platform.LINUX:
from codesign.linux_code_signer import LinuxCodeSigner
self.code_signer = LinuxCodeSigner(codesign.config_builder)
elif platform == util.Platform.MACOS:
from codesign.macos_code_signer import MacOSCodeSigner
self.code_signer = MacOSCodeSigner(codesign.config_builder)
elif platform == util.Platform.WINDOWS:
from codesign.windows_code_signer import WindowsCodeSigner
self.code_signer = WindowsCodeSigner(codesign.config_builder)
else:
self.code_signer = None
def sign_file_or_directory(self, path: Path) -> None:
logging.config.dictConfig(codesign.config_builder.LOGGING)
self.code_signer.run_buildbot_path_sign_pipeline(path)

View File

@@ -0,0 +1,54 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import sys
from enum import Enum
from pathlib import Path
class Platform(Enum):
LINUX = 1
MACOS = 2
WINDOWS = 3
def get_current_platform() -> Platform:
if sys.platform == 'linux':
return Platform.LINUX
elif sys.platform == 'darwin':
return Platform.MACOS
elif sys.platform == 'win32':
return Platform.WINDOWS
raise Exception(f'Unknown platform {sys.platform}')
def ensure_file_does_not_exist_or_die(filepath: Path) -> None:
"""
If the file exists, unlink it.
If the file path exists and is not a file an assert will trigger.
If the file path does not exists nothing happens.
"""
if not filepath.exists():
return
if not filepath.is_file():
# TODO(sergey): Provide information about what the filepath actually is.
raise SystemExit(f'{filepath} is expected to be a file, but is not')
filepath.unlink()

View File

@@ -0,0 +1,117 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import logging
from pathlib import Path
from typing import List
import codesign.util as util
from buildbot_utils import Builder
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
from codesign.base_code_signer import BaseCodeSigner
from codesign.exception import CodeSignException
logger = logging.getLogger(__name__)
logger_server = logger.getChild('server')
# NOTE: Check is done as filename.endswith(), so keep the dot
EXTENSIONS_TO_BE_SIGNED = {'.exe', '.dll', '.pyd', '.msi'}
BLACKLIST_FILE_PREFIXES = (
'api-ms-', 'concrt', 'msvcp', 'ucrtbase', 'vcomp', 'vcruntime')
class SigntoolException(CodeSignException):
pass
class WindowsCodeSigner(BaseCodeSigner):
def check_file_is_to_be_signed(
self, file: AbsoluteAndRelativeFileName) -> bool:
base_name = file.relative_filepath.name
if any(base_name.startswith(prefix)
for prefix in BLACKLIST_FILE_PREFIXES):
return False
return file.relative_filepath.suffix in EXTENSIONS_TO_BE_SIGNED
def get_sign_command_prefix(self) -> List[str]:
return [
'signtool', 'sign', '/v',
'/f', self.config.WIN_CERTIFICATE_FILEPATH,
'/tr', self.config.WIN_TIMESTAMP_AUTHORITY_URL]
def run_codesign_tool(self, filepath: Path) -> None:
command = self.get_sign_command_prefix() + [filepath]
try:
codesign_output = self.check_output_or_mock(command, util.Platform.WINDOWS)
except subprocess.CalledProcessError as e:
raise SigntoolException(f'Error running signtool {e}')
logger_server.info(f'signtool output:\n{codesign_output}')
got_number_of_success = False
for line in codesign_output.split('\n'):
line_clean = line.strip()
line_clean_lower = line_clean.lower()
if line_clean_lower.startswith('number of warnings') or \
line_clean_lower.startswith('number of errors'):
number = int(line_clean_lower.split(':')[1])
if number != 0:
raise SigntoolException('Non-clean success of signtool')
if line_clean_lower.startswith('number of files successfully signed'):
got_number_of_success = True
number = int(line_clean_lower.split(':')[1])
if number != 1:
raise SigntoolException('Signtool did not consider codesign a success')
if not got_number_of_success:
raise SigntoolException('Signtool did not report number of files signed')
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
# NOTE: Sign files one by one to avoid possible command line length
# overflow (which could happen if we ever decide to sign every binary
# in the install folder, for example).
#
# TODO(sergey): Consider doing batched signing of handful of files in
# one go (but only if this actually known to be much faster).
num_files = len(files)
for file_index, file in enumerate(files):
# Ignore file if it is not to be signed.
# Allows to manually construct ZIP of package and get it signed.
if not self.check_file_is_to_be_signed(file):
logger_server.info(
'Ignoring file [%d/%d] %s',
file_index + 1, num_files, file.relative_filepath)
continue
logger_server.info(
'Running signtool command for file [%d/%d] %s...',
file_index + 1, num_files, file.relative_filepath)
self.run_codesign_tool(file.absolute_filepath)

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env python3
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# NOTE: This is a no-op signer (since there isn't really a procedure to sign
# Linux binaries yet). Used to debug and verify the code signing routines on
# a Linux environment.
import logging.config
from pathlib import Path
from typing import List
from codesign.linux_code_signer import LinuxCodeSigner
import codesign.config_server
if __name__ == "__main__":
logging.config.dictConfig(codesign.config_server.LOGGING)
code_signer = LinuxCodeSigner(codesign.config_server)
code_signer.run_signing_server()

View File

@@ -0,0 +1,41 @@
#!/usr/bin/env python3
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import logging.config
from pathlib import Path
from typing import List
from codesign.macos_code_signer import MacOSCodeSigner
import codesign.config_server
if __name__ == "__main__":
entitlements_file = codesign.config_server.MACOS_ENTITLEMENTS_FILE
if not entitlements_file.exists():
raise SystemExit(
'Entitlements file {entitlements_file} does not exist.')
if not entitlements_file.is_file():
raise SystemExit(
'Entitlements file {entitlements_file} is not a file.')
logging.config.dictConfig(codesign.config_server.LOGGING)
code_signer = MacOSCodeSigner(codesign.config_server)
code_signer.run_signing_server()

View File

@@ -0,0 +1,11 @@
@echo off
rem This is an entry point of the codesign server for Windows.
rem It makes sure that signtool.exe is within the current PATH and can be
rem used by the Python script.
SETLOCAL
set PATH=C:\Program Files (x86)\Windows Kits\10\App Certification Kit;%PATH%
codesign_server_windows.py

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env python3
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Implementation of codesign server for Windows.
#
# NOTE: If signtool.exe is not in the PATH use codesign_server_windows.bat
import logging.config
import shutil
from pathlib import Path
from typing import List
import codesign.util as util
from codesign.windows_code_signer import WindowsCodeSigner
import codesign.config_server
if __name__ == "__main__":
logging.config.dictConfig(codesign.config_server.LOGGING)
logger = logging.getLogger(__name__)
logger_server = logger.getChild('server')
# TODO(sergey): Consider moving such sanity checks into
# CodeSigner.check_environment_or_die().
if not shutil.which('signtool.exe'):
if util.get_current_platform() == util.Platform.WINDOWS:
raise SystemExit("signtool.exe is not found in %PATH%")
logger_server.info(
'signtool.exe not found, '
'but will not be used on this foreign platform')
code_signer = WindowsCodeSigner(codesign.config_server)
code_signer.run_signing_server()

View File

@@ -0,0 +1,551 @@
#!/usr/bin/env python3
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import argparse
import re
import shutil
import subprocess
import sys
import time
from pathlib import Path
from tempfile import TemporaryDirectory, NamedTemporaryFile
from typing import List
BUILDBOT_DIRECTORY = Path(__file__).absolute().parent
CODESIGN_SCRIPT = BUILDBOT_DIRECTORY / 'worker_codesign.py'
BLENDER_GIT_ROOT_DIRECTORY = BUILDBOT_DIRECTORY.parent.parent
DARWIN_DIRECTORY = BLENDER_GIT_ROOT_DIRECTORY / 'release' / 'darwin'
# Extra size which is added on top of actual files size when estimating size
# of destination DNG.
EXTRA_DMG_SIZE_IN_BYTES = 800 * 1024 * 1024
################################################################################
# Common utilities
def get_directory_size(root_directory: Path) -> int:
"""
Get size of directory on disk
"""
total_size = 0
for file in root_directory.glob('**/*'):
total_size += file.lstat().st_size
return total_size
################################################################################
# DMG bundling specific logic
def create_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'source_dir',
type=Path,
help='Source directory which points to either existing .app bundle'
'or to a directory with .app bundles.')
parser.add_argument(
'--background-image',
type=Path,
help="Optional background picture which will be set on the DMG."
"If not provided default Blender's one is used.")
parser.add_argument(
'--volume-name',
type=str,
help='Optional name of a volume which will be used for DMG.')
parser.add_argument(
'--dmg',
type=Path,
help='Optional argument which points to a final DMG file name.')
parser.add_argument(
'--applescript',
type=Path,
help="Optional path to applescript to set up folder looks of DMG."
"If not provided default Blender's one is used.")
parser.add_argument(
'--codesign',
action="store_true",
help="Code sign and notarize DMG contents.")
return parser
def collect_app_bundles(source_dir: Path) -> List[Path]:
"""
Collect all app bundles which are to be put into DMG
If the source directory points to FOO.app it will be the only app bundle
packed.
Otherwise all .app bundles from given directory are placed to a single
DMG.
"""
if source_dir.name.endswith('.app'):
return [source_dir]
app_bundles = []
for filename in source_dir.glob('*'):
if not filename.is_dir():
continue
if not filename.name.endswith('.app'):
continue
app_bundles.append(filename)
return app_bundles
def collect_and_log_app_bundles(source_dir: Path) -> List[Path]:
app_bundles = collect_app_bundles(source_dir)
if not app_bundles:
print('No app bundles found for packing')
return
print(f'Found {len(app_bundles)} to pack:')
for app_bundle in app_bundles:
print(f'- {app_bundle}')
return app_bundles
def estimate_dmg_size(app_bundles: List[Path]) -> int:
"""
Estimate size of DMG to hold requested app bundles
The size is based on actual size of all files in all bundles plus some
space to compensate for different size-on-disk plus some space to hold
codesign signatures.
Is better to be on a high side since the empty space is compressed, but
lack of space might cause silent failures later on.
"""
app_bundles_size = 0
for app_bundle in app_bundles:
app_bundles_size += get_directory_size(app_bundle)
return app_bundles_size + EXTRA_DMG_SIZE_IN_BYTES
def copy_app_bundles_to_directory(app_bundles: List[Path],
directory: Path) -> None:
"""
Copy all bundles to a given directory
This directory is what the DMG will be created from.
"""
for app_bundle in app_bundles:
print(f'Copying {app_bundle.name}...')
shutil.copytree(app_bundle, directory / app_bundle.name)
def get_main_app_bundle(app_bundles: List[Path]) -> Path:
"""
Get application bundle main for the installation
"""
return app_bundles[0]
def create_dmg_image(app_bundles: List[Path],
dmg_filepath: Path,
volume_name: str) -> None:
"""
Create DMG disk image and put app bundles in it
No DMG configuration or codesigning is happening here.
"""
if dmg_filepath.exists():
print(f'Removing existing writable DMG {dmg_filepath}...')
dmg_filepath.unlink()
print('Preparing directory with app bundles for the DMG...')
with TemporaryDirectory(prefix='blender-dmg-content-') as content_dir_str:
# Copy all bundles to a clean directory.
content_dir = Path(content_dir_str)
copy_app_bundles_to_directory(app_bundles, content_dir)
# Estimate size of the DMG.
dmg_size = estimate_dmg_size(app_bundles)
print(f'Estimated DMG size: {dmg_size:,} bytes.')
# Create the DMG.
print(f'Creating writable DMG {dmg_filepath}')
command = ('hdiutil',
'create',
'-size', str(dmg_size),
'-fs', 'HFS+',
'-srcfolder', content_dir,
'-volname', volume_name,
'-format', 'UDRW',
dmg_filepath)
subprocess.run(command)
def get_writable_dmg_filepath(dmg_filepath: Path):
"""
Get file path for writable DMG image
"""
parent = dmg_filepath.parent
return parent / (dmg_filepath.stem + '-temp.dmg')
def mount_readwrite_dmg(dmg_filepath: Path) -> None:
"""
Mount writable DMG
Mounting point would be /Volumes/<volume name>
"""
print(f'Mounting read-write DMG ${dmg_filepath}')
command = ('hdiutil',
'attach', '-readwrite',
'-noverify',
'-noautoopen',
dmg_filepath)
subprocess.run(command)
def get_mount_directory_for_volume_name(volume_name: str) -> Path:
"""
Get directory under which the volume will be mounted
"""
return Path('/Volumes') / volume_name
def eject_volume(volume_name: str) -> None:
"""
Eject given volume, if mounted
"""
mount_directory = get_mount_directory_for_volume_name(volume_name)
if not mount_directory.exists():
return
mount_directory_str = str(mount_directory)
print(f'Ejecting volume {volume_name}')
# Figure out which device to eject.
mount_output = subprocess.check_output(['mount']).decode()
device = ''
for line in mount_output.splitlines():
if f'on {mount_directory_str} (' not in line:
continue
tokens = line.split(' ', 3)
if len(tokens) < 3:
continue
if tokens[1] != 'on':
continue
if device:
raise Exception(
f'Multiple devices found for mounting point {mount_directory}')
device = tokens[0]
if not device:
raise Exception(
f'No device found for mounting point {mount_directory}')
print(f'{mount_directory} is mounted as device {device}, ejecting...')
subprocess.run(['diskutil', 'eject', device])
def copy_background_if_needed(background_image_filepath: Path,
mount_directory: Path) -> None:
"""
Copy background to the DMG
If the background image is not specified it will not be copied.
"""
if not background_image_filepath:
print('No background image provided.')
return
print(f'Copying background image {background_image_filepath}')
destination_dir = mount_directory / '.background'
destination_dir.mkdir(exist_ok=True)
destination_filepath = destination_dir / background_image_filepath.name
shutil.copy(background_image_filepath, destination_filepath)
def create_applications_link(mount_directory: Path) -> None:
"""
Create link to /Applications in the given location
"""
print('Creating link to /Applications')
command = ('ln', '-s', '/Applications', mount_directory / ' ')
subprocess.run(command)
def run_applescript(applescript: Path,
volume_name: str,
app_bundles: List[Path],
background_image_filepath: Path) -> None:
"""
Run given applescript to adjust look and feel of the DMG
"""
main_app_bundle = get_main_app_bundle(app_bundles)
with NamedTemporaryFile(
mode='w', suffix='.applescript') as temp_applescript:
print('Adjusting applescript for volume name...')
# Adjust script to the specific volume name.
with open(applescript, mode='r') as input:
for line in input.readlines():
stripped_line = line.strip()
if stripped_line.startswith('tell disk'):
line = re.sub('tell disk ".*"',
f'tell disk "{volume_name}"',
line)
elif stripped_line.startswith('set background picture'):
if not background_image_filepath:
continue
else:
background_image_short = \
'.background:' + background_image_filepath.name
line = re.sub('to file ".*"',
f'to file "{background_image_short}"',
line)
line = line.replace('blender.app', main_app_bundle.name)
temp_applescript.write(line)
temp_applescript.flush()
print('Running applescript...')
command = ('osascript', temp_applescript.name)
subprocess.run(command)
print('Waiting for applescript...')
# NOTE: This is copied from bundle.sh. The exact reason for sleep is
# still remained a mystery.
time.sleep(5)
def codesign(subject: Path):
"""
Codesign file or directory
NOTE: For DMG it will also notarize.
"""
command = (CODESIGN_SCRIPT, subject)
subprocess.run(command)
def codesign_app_bundles_in_dmg(mount_directory: str) -> None:
"""
Code sign all binaries and bundles in the mounted directory
"""
print(f'Codesigning all app bundles in {mount_directory}')
codesign(mount_directory)
def codesign_and_notarize_dmg(dmg_filepath: Path) -> None:
"""
Run codesign and notarization pipeline on the DMG
"""
print(f'Codesigning and notarizing DMG {dmg_filepath}')
codesign(dmg_filepath)
def compress_dmg(writable_dmg_filepath: Path,
final_dmg_filepath: Path) -> None:
"""
Compress temporary read-write DMG
"""
command = ('hdiutil', 'convert',
writable_dmg_filepath,
'-format', 'UDZO',
'-o', final_dmg_filepath)
if final_dmg_filepath.exists():
print(f'Removing old compressed DMG {final_dmg_filepath}')
final_dmg_filepath.unlink()
print('Compressing disk image...')
subprocess.run(command)
def create_final_dmg(app_bundles: List[Path],
dmg_filepath: Path,
background_image_filepath: Path,
volume_name: str,
applescript: Path,
codesign: bool) -> None:
"""
Create DMG with all app bundles
Will take care configuring background, signing all binaries and app bundles
and notarizing the DMG.
"""
print('Running all routines to create final DMG')
writable_dmg_filepath = get_writable_dmg_filepath(dmg_filepath)
mount_directory = get_mount_directory_for_volume_name(volume_name)
# Make sure volume is not mounted.
# If it is mounted it will prevent removing old DMG files and could make
# it so app bundles are copied to the wrong place.
eject_volume(volume_name)
create_dmg_image(app_bundles, writable_dmg_filepath, volume_name)
mount_readwrite_dmg(writable_dmg_filepath)
# Run codesign first, prior to copying amything else.
#
# This allows to recurs into the content of bundles without worrying about
# possible interfereice of Application symlink.
if codesign:
codesign_app_bundles_in_dmg(mount_directory)
copy_background_if_needed(background_image_filepath, mount_directory)
create_applications_link(mount_directory)
run_applescript(applescript, volume_name, app_bundles,
background_image_filepath)
print('Ejecting read-write DMG image...')
eject_volume(volume_name)
compress_dmg(writable_dmg_filepath, dmg_filepath)
writable_dmg_filepath.unlink()
if codesign:
codesign_and_notarize_dmg(dmg_filepath)
def ensure_dmg_extension(filepath: Path) -> Path:
"""
Make sure given file have .dmg extension
"""
if filepath.suffix != '.dmg':
return filepath.with_suffix(f'{filepath.suffix}.dmg')
return filepath
def get_dmg_filepath(requested_name: Path, app_bundles: List[Path]) -> Path:
"""
Get full file path for the final DMG image
Will use the provided one when possible, otherwise will deduct it from
app bundles.
If the name is deducted, the DMG is stored in the current directory.
"""
if requested_name:
return ensure_dmg_extension(requested_name.absolute())
# TODO(sergey): This is not necessarily the main one.
main_bundle = app_bundles[0]
# Strip .app from the name
return Path(main_bundle.name[:-4] + '.dmg').absolute()
def get_background_image(requested_background_image: Path) -> Path:
"""
Get effective filepath for the background image
"""
if requested_background_image:
return requested_background_image.absolute()
return DARWIN_DIRECTORY / 'background.tif'
def get_applescript(requested_applescript: Path) -> Path:
"""
Get effective filepath for the applescript
"""
if requested_applescript:
return requested_applescript.absolute()
return DARWIN_DIRECTORY / 'blender.applescript'
def get_volume_name_from_dmg_filepath(dmg_filepath: Path) -> str:
"""
Deduct volume name from the DMG path
Will use first part of the DMG file name prior to dash.
"""
tokens = dmg_filepath.stem.split('-')
words = tokens[0].split()
return ' '.join(word.capitalize() for word in words)
def get_volume_name(requested_volume_name: str,
dmg_filepath: Path) -> str:
"""
Get effective name for DMG volume
"""
if requested_volume_name:
return requested_volume_name
return get_volume_name_from_dmg_filepath(dmg_filepath)
def main():
parser = create_argument_parser()
args = parser.parse_args()
# Get normalized input parameters.
source_dir = args.source_dir.absolute()
background_image_filepath = get_background_image(args.background_image)
applescript = get_applescript(args.applescript)
codesign = args.codesign
app_bundles = collect_and_log_app_bundles(source_dir)
if not app_bundles:
return
dmg_filepath = get_dmg_filepath(args.dmg, app_bundles)
volume_name = get_volume_name(args.volume_name, dmg_filepath)
print(f'Will produce DMG "{dmg_filepath.name}" (without quotes)')
create_final_dmg(app_bundles,
dmg_filepath,
background_image_filepath,
volume_name,
applescript,
codesign)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,44 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# This is a script which is used as POST-INSTALL one for regular CMake's
# INSTALL target.
# It is used by buildbot workers to sign every binary which is going into
# the final buundle.
# On Windows Python 3 there only is python.exe, no python3.exe.
#
# On other platforms it is possible to have python2 and python3, and a
# symbolic link to python to either of them. So on those platforms use
# an explicit Python version.
if(WIN32)
set(PYTHON_EXECUTABLE python)
else()
set(PYTHON_EXECUTABLE python3)
endif()
execute_process(
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/worker_codesign.py"
"${CMAKE_INSTALL_PREFIX}"
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
RESULT_VARIABLE exit_code
)
if(NOT exit_code EQUAL "0")
message(FATAL_ERROR "Non-zero exit code of codesign tool")
endif()

View File

@@ -0,0 +1,74 @@
#!/usr/bin/env python3
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Helper script which takes care of signing provided location.
#
# The location can either be a directory (in which case all eligible binaries
# will be signed) or a single file (in which case a single file will be signed).
#
# This script takes care of all the complexity of communicating between process
# which requests file to be signed and the code signing server.
#
# NOTE: Signing happens in-place.
import argparse
import sys
from pathlib import Path
from codesign.simple_code_signer import SimpleCodeSigner
def create_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('path_to_sign', type=Path)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
path_to_sign = args.path_to_sign.absolute()
if sys.platform == 'win32':
# When WIX packed is used to generate .msi on Windows the CPack will
# install two different projects and install them to different
# installation prefix:
#
# - C:\b\build\_CPack_Packages\WIX\Blender
# - C:\b\build\_CPack_Packages\WIX\Unspecified
#
# Annoying part is: CMake's post-install script will only be run
# once, with the install prefix which corresponds to a project which
# was installed last. But we want to sign binaries from all projects.
# So in order to do so we detect that we are running for a CPack's
# project used for WIX and force parent directory (which includes both
# projects) to be signed.
#
# Here we force both projects to be signed.
if path_to_sign.name == 'Unspecified' and 'WIX' in str(path_to_sign):
path_to_sign = path_to_sign.parent
code_signer = SimpleCodeSigner()
code_signer.sign_file_or_directory(path_to_sign)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,135 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import os
import shutil
import buildbot_utils
def get_cmake_options(builder):
codesign_script = os.path.join(
builder.blender_dir, 'build_files', 'buildbot', 'worker_codesign.cmake')
config_file = "build_files/cmake/config/blender_release.cmake"
options = ['-DCMAKE_BUILD_TYPE:STRING=Release',
'-DWITH_GTESTS=ON']
if builder.platform == 'mac':
options.append('-DCMAKE_OSX_ARCHITECTURES:STRING=x86_64')
options.append('-DCMAKE_OSX_DEPLOYMENT_TARGET=10.9')
elif builder.platform == 'win':
options.extend(['-G', 'Visual Studio 16 2019', '-A', 'x64'])
if builder.codesign:
options.extend(['-DPOSTINSTALL_SCRIPT:PATH=' + codesign_script])
elif builder.platform == 'linux':
config_file = "build_files/buildbot/config/blender_linux.cmake"
optix_sdk_dir = os.path.join(builder.blender_dir, '..', '..', 'NVIDIA-Optix-SDK-7.1')
options.append('-DOPTIX_ROOT_DIR:PATH=' + optix_sdk_dir)
# Workaround to build sm_30 kernels with CUDA 10, since CUDA 11 no longer supports that architecture
if builder.platform == 'win':
options.append('-DCUDA10_TOOLKIT_ROOT_DIR:PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1')
options.append('-DCUDA10_NVCC_EXECUTABLE:FILEPATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1/bin/nvcc.exe')
options.append('-DCUDA11_TOOLKIT_ROOT_DIR:PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1')
options.append('-DCUDA11_NVCC_EXECUTABLE:FILEPATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1/bin/nvcc.exe')
elif builder.platform == 'linux':
options.append('-DCUDA10_TOOLKIT_ROOT_DIR:PATH=/usr/local/cuda-10.1')
options.append('-DCUDA10_NVCC_EXECUTABLE:FILEPATH=/usr/local/cuda-10.1/bin/nvcc')
options.append('-DCUDA11_TOOLKIT_ROOT_DIR:PATH=/usr/local/cuda-11.1')
options.append('-DCUDA11_NVCC_EXECUTABLE:FILEPATH=/usr/local/cuda-11.1/bin/nvcc')
options.append("-C" + os.path.join(builder.blender_dir, config_file))
options.append("-DCMAKE_INSTALL_PREFIX=%s" % (builder.install_dir))
return options
def update_git(builder):
# Do extra git fetch because not all platform/git/buildbot combinations
# update the origin remote, causing buildinfo to detect local changes.
os.chdir(builder.blender_dir)
print("Fetching remotes")
command = ['git', 'fetch', '--all']
buildbot_utils.call(builder.command_prefix + command)
def clean_directories(builder):
# Make sure no garbage remained from the previous run
if os.path.isdir(builder.install_dir):
shutil.rmtree(builder.install_dir)
# Make sure build directory exists and enter it
os.makedirs(builder.build_dir, exist_ok=True)
# Remove buildinfo files to force buildbot to re-generate them.
for buildinfo in ('buildinfo.h', 'buildinfo.h.txt', ):
full_path = os.path.join(builder.build_dir, 'source', 'creator', buildinfo)
if os.path.exists(full_path):
print("Removing {}" . format(buildinfo))
os.remove(full_path)
def cmake_configure(builder):
# CMake configuration
os.chdir(builder.build_dir)
cmake_cache = os.path.join(builder.build_dir, 'CMakeCache.txt')
if os.path.exists(cmake_cache):
print("Removing CMake cache")
os.remove(cmake_cache)
print("CMake configure:")
cmake_options = get_cmake_options(builder)
command = ['cmake', builder.blender_dir] + cmake_options
buildbot_utils.call(builder.command_prefix + command)
def cmake_build(builder):
# CMake build
os.chdir(builder.build_dir)
# NOTE: CPack will build an INSTALL target, which would mean that code
# signing will happen twice when using `make install` and CPack.
# The tricky bit here is that it is not possible to know whether INSTALL
# target is used by CPack or by a buildbot itaself. Extra level on top of
# this is that on Windows it is required to build INSTALL target in order
# to have unit test binaries to run.
# So on the one hand we do an extra unneeded code sign on Windows, but on
# a positive side we don't add complexity and don't make build process more
# fragile trying to avoid this. The signing process is way faster than just
# a clean build of buildbot, especially with regression tests enabled.
if builder.platform == 'win':
command = ['cmake', '--build', '.', '--target', 'install', '--config', 'Release']
else:
command = ['make', '-s', '-j16', 'install']
print("CMake build:")
buildbot_utils.call(builder.command_prefix + command)
if __name__ == "__main__":
builder = buildbot_utils.create_builder_from_arguments()
update_git(builder)
clean_directories(builder)
cmake_configure(builder)
cmake_build(builder)

View File

@@ -0,0 +1,208 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Runs on buildbot worker, creating a release package using the build
# system and zipping it into buildbot_upload.zip. This is then uploaded
# to the master in the next buildbot step.
import os
import sys
from pathlib import Path
import buildbot_utils
def get_package_name(builder, platform=None):
info = buildbot_utils.VersionInfo(builder)
package_name = 'blender-' + info.full_version
if platform:
package_name += '-' + platform
if not (builder.branch == 'master' or builder.is_release_branch):
if info.is_development_build:
package_name = builder.branch + "-" + package_name
return package_name
def sign_file_or_directory(path):
from codesign.simple_code_signer import SimpleCodeSigner
code_signer = SimpleCodeSigner()
code_signer.sign_file_or_directory(Path(path))
def create_buildbot_upload_zip(builder, package_files):
import zipfile
buildbot_upload_zip = os.path.join(builder.upload_dir, "buildbot_upload.zip")
if os.path.exists(buildbot_upload_zip):
os.remove(buildbot_upload_zip)
try:
z = zipfile.ZipFile(buildbot_upload_zip, "w", compression=zipfile.ZIP_STORED)
for filepath, filename in package_files:
print("Packaged", filename)
z.write(filepath, arcname=filename)
z.close()
except Exception as ex:
sys.stderr.write('Create buildbot_upload.zip failed: ' + str(ex) + '\n')
sys.exit(1)
def create_tar_xz(src, dest, package_name):
# One extra to remove leading os.sep when cleaning root for package_root
ln = len(src) + 1
flist = list()
# Create list of tuples containing file and archive name
for root, dirs, files in os.walk(src):
package_root = os.path.join(package_name, root[ln:])
flist.extend([(os.path.join(root, file), os.path.join(package_root, file)) for file in files])
import tarfile
# Set UID/GID of archived files to 0, otherwise they'd be owned by whatever
# user compiled the package. If root then unpacks it to /usr/local/ you get
# a security issue.
def _fakeroot(tarinfo):
tarinfo.gid = 0
tarinfo.gname = "root"
tarinfo.uid = 0
tarinfo.uname = "root"
return tarinfo
package = tarfile.open(dest, 'w:xz', preset=9)
for entry in flist:
package.add(entry[0], entry[1], recursive=False, filter=_fakeroot)
package.close()
def cleanup_files(dirpath, extension):
for f in os.listdir(dirpath):
filepath = os.path.join(dirpath, f)
if os.path.isfile(filepath) and f.endswith(extension):
os.remove(filepath)
def pack_mac(builder):
info = buildbot_utils.VersionInfo(builder)
os.chdir(builder.build_dir)
cleanup_files(builder.build_dir, '.dmg')
package_name = get_package_name(builder, 'macOS')
package_filename = package_name + '.dmg'
package_filepath = os.path.join(builder.build_dir, package_filename)
release_dir = os.path.join(builder.blender_dir, 'release', 'darwin')
buildbot_dir = os.path.join(builder.blender_dir, 'build_files', 'buildbot')
bundle_script = os.path.join(buildbot_dir, 'worker_bundle_dmg.py')
command = [bundle_script]
command += ['--dmg', package_filepath]
if info.is_development_build:
background_image = os.path.join(release_dir, 'buildbot', 'background.tif')
command += ['--background-image', background_image]
if builder.codesign:
command += ['--codesign']
command += [builder.install_dir]
buildbot_utils.call(command)
create_buildbot_upload_zip(builder, [(package_filepath, package_filename)])
def pack_win(builder):
info = buildbot_utils.VersionInfo(builder)
os.chdir(builder.build_dir)
cleanup_files(builder.build_dir, '.zip')
# CPack will add the platform name
cpack_name = get_package_name(builder, None)
package_name = get_package_name(builder, 'windows' + str(builder.bits))
command = ['cmake', '-DCPACK_OVERRIDE_PACKAGENAME:STRING=' + cpack_name, '.']
buildbot_utils.call(builder.command_prefix + command)
command = ['cpack', '-G', 'ZIP']
buildbot_utils.call(builder.command_prefix + command)
package_filename = package_name + '.zip'
package_filepath = os.path.join(builder.build_dir, package_filename)
package_files = [(package_filepath, package_filename)]
if info.version_cycle == 'release':
# Installer only for final release builds, otherwise will get
# 'this product is already installed' messages.
command = ['cpack', '-G', 'WIX']
buildbot_utils.call(builder.command_prefix + command)
package_filename = package_name + '.msi'
package_filepath = os.path.join(builder.build_dir, package_filename)
if builder.codesign:
sign_file_or_directory(package_filepath)
package_files += [(package_filepath, package_filename)]
create_buildbot_upload_zip(builder, package_files)
def pack_linux(builder):
blender_executable = os.path.join(builder.install_dir, 'blender')
info = buildbot_utils.VersionInfo(builder)
# Strip all unused symbols from the binaries
print("Stripping binaries...")
buildbot_utils.call(builder.command_prefix + ['strip', '--strip-all', blender_executable])
print("Stripping python...")
py_target = os.path.join(builder.install_dir, info.short_version)
buildbot_utils.call(
builder.command_prefix + [
'find', py_target, '-iname', '*.so', '-exec', 'strip', '-s', '{}', ';',
],
)
# Construct package name
platform_name = 'linux64'
package_name = get_package_name(builder, platform_name)
package_filename = package_name + ".tar.xz"
print("Creating .tar.xz archive")
package_filepath = builder.install_dir + '.tar.xz'
create_tar_xz(builder.install_dir, package_filepath, package_name)
# Create buildbot_upload.zip
create_buildbot_upload_zip(builder, [(package_filepath, package_filename)])
if __name__ == "__main__":
builder = buildbot_utils.create_builder_from_arguments()
# Make sure install directory always exists
os.makedirs(builder.install_dir, exist_ok=True)
if builder.platform == 'mac':
pack_mac(builder)
elif builder.platform == 'win':
pack_win(builder)
elif builder.platform == 'linux':
pack_linux(builder)

View File

@@ -0,0 +1,42 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import buildbot_utils
import os
import sys
def get_ctest_arguments(builder):
args = ['--output-on-failure']
if builder.platform == 'win':
args += ['-C', 'Release']
return args
def test(builder):
os.chdir(builder.build_dir)
command = builder.command_prefix + ['ctest'] + get_ctest_arguments(builder)
buildbot_utils.call(command)
if __name__ == "__main__":
builder = buildbot_utils.create_builder_from_arguments()
test(builder)

View File

@@ -0,0 +1,31 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import buildbot_utils
import os
import sys
if __name__ == "__main__":
builder = buildbot_utils.create_builder_from_arguments()
os.chdir(builder.blender_dir)
# Run make update which handles all libraries and submodules.
make_update = os.path.join(builder.blender_dir, "build_files", "utils", "make_update.py")
buildbot_utils.call([sys.executable, make_update, '--no-blender', "--use-tests", "--use-centos-libraries"])

View File

@@ -79,7 +79,7 @@ if(EXISTS ${SOURCE_DIR}/.git)
ERROR_QUIET)
if(NOT _git_below_check STREQUAL "")
# If there're commits between HEAD and upstream this means
# that we're reset-ed to older revision. Use its hash then.
# that we're reset-ed to older revision. Use it's hash then.
execute_process(COMMAND git rev-parse --short=12 HEAD
WORKING_DIRECTORY ${SOURCE_DIR}
OUTPUT_VARIABLE MY_WC_HASH

View File

@@ -56,6 +56,10 @@ set(WITH_TBB ON CACHE BOOL "" FORCE)
set(WITH_USD ON CACHE BOOL "" FORCE)
set(WITH_MEM_JEMALLOC ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
set(CYCLES_CUDA_BINARIES_ARCH sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61;sm_70;sm_75;sm_86;compute_75 CACHE STRING "" FORCE)
set(WITH_CYCLES_DEVICE_OPTIX ON CACHE BOOL "" FORCE)
# platform dependent options
if(APPLE)
@@ -76,8 +80,4 @@ if(UNIX AND NOT APPLE)
endif()
if(NOT APPLE)
set(WITH_XR_OPENXR ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_DEVICE_OPTIX ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
endif()

View File

@@ -104,8 +104,8 @@ if(WIN32)
set(CPACK_WIX_LIGHT_EXTRA_FLAGS -dcl:medium)
endif()
set(CPACK_PACKAGE_EXECUTABLES "blender-launcher" "blender")
set(CPACK_CREATE_DESKTOP_LINKS "blender-launcher" "blender")
set(CPACK_PACKAGE_EXECUTABLES "blender" "blender")
set(CPACK_CREATE_DESKTOP_LINKS "blender" "blender")
include(CPack)

View File

@@ -20,6 +20,12 @@
# Libraries configuration for Apple.
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(MACOSX_DEPLOYMENT_TARGET 11.00)
else()
set(MACOSX_DEPLOYMENT_TARGET 10.13)
endif()
macro(find_package_wrapper)
# do nothing, just satisfy the macro
endmacro()

View File

@@ -168,15 +168,21 @@ endif()
unset(OSX_SDKROOT)
# 10.13 is our min. target, if you use higher sdk, weak linking happens
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
# M1 chips run Big Sur onwards.
set(OSX_MIN_DEPLOYMENT_TARGET 11.00)
else()
# 10.13 is our min. target, if you use higher sdk, weak linking happens
set(OSX_MIN_DEPLOYMENT_TARGET 10.13)
endif()
set(CMAKE_OSX_DEPLOYMENT_TARGET "${OSX_MIN_DEPLOYMENT_TARGET}" CACHE STRING "" FORCE)
if(CMAKE_OSX_DEPLOYMENT_TARGET)
if(${CMAKE_OSX_DEPLOYMENT_TARGET} VERSION_LESS ${OSX_MIN_DEPLOYMENT_TARGET})
message(STATUS "Setting deployment target to ${OSX_MIN_DEPLOYMENT_TARGET}, lower versions are not supported")
set(CMAKE_OSX_DEPLOYMENT_TARGET "${OSX_MIN_DEPLOYMENT_TARGET}" CACHE STRING "" FORCE)
endif()
else()
set(CMAKE_OSX_DEPLOYMENT_TARGET "${OSX_MIN_DEPLOYMENT_TARGET}" CACHE STRING "" FORCE)
endif()
if(NOT ${CMAKE_GENERATOR} MATCHES "Xcode")
# Force CMAKE_OSX_DEPLOYMENT_TARGET for makefiles, will not work else (CMake bug?)

View File

@@ -119,7 +119,7 @@ string(APPEND CMAKE_MODULE_LINKER_FLAGS " /SAFESEH:NO /ignore:4099")
list(APPEND PLATFORM_LINKLIBS
ws2_32 vfw32 winmm kernel32 user32 gdi32 comdlg32 Comctl32 version
advapi32 shfolder shell32 ole32 oleaut32 uuid psapi Dbghelp Shlwapi
pathcch Shcore
pathcch
)
if(WITH_INPUT_IME)
@@ -144,8 +144,8 @@ add_definitions(-D_ALLOW_KEYWORD_MACROS)
# that both /GR and /GR- are specified.
remove_cc_flag("/GR")
# Make the Windows 8.1 API available for use.
add_definitions(-D_WIN32_WINNT=0x603)
# We want to support Windows 7 level ABI
add_definitions(-D_WIN32_WINNT=0x601)
include(build_files/cmake/platform/platform_win32_bundle_crt.cmake)
remove_cc_flag("/MDd" "/MD" "/Zi")

View File

@@ -1,8 +0,0 @@
Pipeline Config
===============
This configuration file is used by buildbot new pipeline for the `update-code` step.
It will soon be used by the ../utils/make_update.py script.
Both buildbot and developers will eventually use the same configuration file.

View File

@@ -1,87 +0,0 @@
{
"update-code":
{
"git" :
{
"submodules":
[
{ "path": "release/scripts/addons", "branch": "master", "commit_id": "HEAD" },
{ "path": "release/scripts/addons_contrib", "branch": "master", "commit_id": "HEAD" },
{ "path": "release/datafiles/locale", "branch": "master", "commit_id": "HEAD" },
{ "path": "source/tools", "branch": "master", "commit_id": "HEAD" }
]
},
"svn":
{
"tests": { "path": "lib/tests", "branch": "trunk", "commit_id": "HEAD" },
"libraries":
{
"darwin-x86_64": { "path": "lib/darwin", "branch": "trunk", "commit_id": "HEAD" },
"darwin-arm64": { "path": "lib/darwin_arm64", "branch": "trunk", "commit_id": "HEAD" },
"linux-x86_64": { "path": "lib/linux_centos7_x86_64", "branch": "trunk", "commit_id": "HEAD" },
"windows-amd64": { "path": "lib/win64_vc15", "branch": "trunk", "commit_id": "HEAD" }
}
}
},
"buildbot":
{
"gcc":
{
"version": "9.0"
},
"sdks":
{
"optix":
{
"version": "7.1.0"
},
"cuda10":
{
"version": "10.1"
},
"cuda11":
{
"version": "11.3"
}
},
"cmake":
{
"default":
{
"version": "any",
"overrides":
{
}
},
"darwin-x86_64":
{
"overrides":
{
}
},
"darwin-arm64":
{
"overrides":
{
}
},
"linux-x86_64":
{
"overrides":
{
}
},
"windows-amd64":
{
"overrides":
{
}
}
}
}
}

View File

@@ -1,5 +0,0 @@
Make Utility Scripts
====================
Scripts used only by developers for now

View File

@@ -85,7 +85,7 @@ def openBlendFile(filename):
'''
handle = open(filename, 'rb')
magic = ReadString(handle, 7)
if magic in {"BLENDER", "BULLETf"}:
if magic in ("BLENDER", "BULLETf"):
log.debug("normal blendfile detected")
handle.seek(0, os.SEEK_SET)
return handle
@@ -137,7 +137,7 @@ class BlendFile:
fileblock = BlendFileBlock(handle, self)
found_dna_block = False
while not found_dna_block:
if fileblock.Header.Code in {"DNA1", "SDNA"}:
if fileblock.Header.Code in ("DNA1", "SDNA"):
self.Catalog = DNACatalog(self.Header, handle)
found_dna_block = True
else:

View File

@@ -4,9 +4,7 @@ Simple Render Engine
"""
import bpy
import array
import gpu
from gpu_extras.presets import draw_texture_2d
import bgl
class CustomRenderEngine(bpy.types.RenderEngine):
@@ -102,7 +100,8 @@ class CustomRenderEngine(bpy.types.RenderEngine):
dimensions = region.width, region.height
# Bind shader that converts from scene linear to display space,
gpu.state.blend_set('ALPHA_PREMULT')
bgl.glEnable(bgl.GL_BLEND)
bgl.glBlendFunc(bgl.GL_ONE, bgl.GL_ONE_MINUS_SRC_ALPHA)
self.bind_display_space_shader(scene)
if not self.draw_data or self.draw_data.dimensions != dimensions:
@@ -111,7 +110,7 @@ class CustomRenderEngine(bpy.types.RenderEngine):
self.draw_data.draw()
self.unbind_display_space_shader()
gpu.state.blend_set('NONE')
bgl.glDisable(bgl.GL_BLEND)
class CustomDrawData:
@@ -120,21 +119,68 @@ class CustomDrawData:
self.dimensions = dimensions
width, height = dimensions
pixels = width * height * array.array('f', [0.1, 0.2, 0.1, 1.0])
pixels = gpu.types.Buffer('FLOAT', width * height * 4, pixels)
pixels = [0.1, 0.2, 0.1, 1.0] * width * height
pixels = bgl.Buffer(bgl.GL_FLOAT, width * height * 4, pixels)
# Generate texture
self.texture = gpu.types.GPUTexture((width, height), format='RGBA16F', data=pixels)
self.texture = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenTextures(1, self.texture)
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0])
bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA16F, width, height, 0, bgl.GL_RGBA, bgl.GL_FLOAT, pixels)
bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR)
bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)
# Note: This is just a didactic example.
# In this case it would be more convenient to fill the texture with:
# self.texture.clear('FLOAT', value=[0.1, 0.2, 0.1, 1.0])
# Bind shader that converts from scene linear to display space,
# use the scene's color management settings.
shader_program = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGetIntegerv(bgl.GL_CURRENT_PROGRAM, shader_program)
# Generate vertex array
self.vertex_array = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenVertexArrays(1, self.vertex_array)
bgl.glBindVertexArray(self.vertex_array[0])
texturecoord_location = bgl.glGetAttribLocation(shader_program[0], "texCoord")
position_location = bgl.glGetAttribLocation(shader_program[0], "pos")
bgl.glEnableVertexAttribArray(texturecoord_location)
bgl.glEnableVertexAttribArray(position_location)
# Generate geometry buffers for drawing textured quad
position = [0.0, 0.0, width, 0.0, width, height, 0.0, height]
position = bgl.Buffer(bgl.GL_FLOAT, len(position), position)
texcoord = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]
texcoord = bgl.Buffer(bgl.GL_FLOAT, len(texcoord), texcoord)
self.vertex_buffer = bgl.Buffer(bgl.GL_INT, 2)
bgl.glGenBuffers(2, self.vertex_buffer)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, position, bgl.GL_STATIC_DRAW)
bgl.glVertexAttribPointer(position_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[1])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, texcoord, bgl.GL_STATIC_DRAW)
bgl.glVertexAttribPointer(texturecoord_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0)
bgl.glBindVertexArray(0)
def __del__(self):
del self.texture
bgl.glDeleteBuffers(2, self.vertex_buffer)
bgl.glDeleteVertexArrays(1, self.vertex_array)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)
bgl.glDeleteTextures(1, self.texture)
def draw(self):
draw_texture_2d(self.texture, (0, 0), self.texture.width, self.texture.height)
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0])
bgl.glBindVertexArray(self.vertex_array[0])
bgl.glDrawArrays(bgl.GL_TRIANGLE_FAN, 0, 4)
bgl.glBindVertexArray(0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)
# RenderEngines also need to tell UI Panels that they are compatible with.

View File

@@ -4,6 +4,7 @@ Mesh with Random Vertex Colors
"""
import bpy
import gpu
import bgl
import numpy as np
from random import random
from gpu_extras.batch import batch_for_shader
@@ -30,10 +31,9 @@ batch = batch_for_shader(
def draw():
gpu.state.depth_test_set('LESS_EQUAL')
gpu.state.depth_mask_set(True)
bgl.glEnable(bgl.GL_DEPTH_TEST)
batch.draw(shader)
gpu.state.depth_mask_set(False)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bpy.types.SpaceView3D.draw_handler_add(draw, (), 'WINDOW', 'POST_VIEW')

View File

@@ -6,11 +6,11 @@ To use this example you have to provide an image that should be displayed.
"""
import bpy
import gpu
import bgl
from gpu_extras.batch import batch_for_shader
IMAGE_NAME = "Untitled"
image = bpy.data.images[IMAGE_NAME]
texture = gpu.texture.from_image(image)
shader = gpu.shader.from_builtin('2D_IMAGE')
batch = batch_for_shader(
@@ -21,9 +21,16 @@ batch = batch_for_shader(
},
)
if image.gl_load():
raise Exception()
def draw():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_sampler("image", texture)
shader.uniform_int("image", 0)
batch.draw(shader)

View File

@@ -9,6 +9,7 @@ Generate a texture using Offscreen Rendering
"""
import bpy
import gpu
import bgl
from mathutils import Matrix
from gpu_extras.batch import batch_for_shader
from gpu_extras.presets import draw_circle_2d
@@ -19,8 +20,8 @@ from gpu_extras.presets import draw_circle_2d
offscreen = gpu.types.GPUOffScreen(512, 512)
with offscreen.bind():
fb = gpu.state.active_framebuffer_get()
fb.clear(color=(0.0, 0.0, 0.0, 0.0))
bgl.glClearColor(0.0, 0.0, 0.0, 0.0)
bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
with gpu.matrix.push_pop():
# reset matrices -> use normalized device coordinates [-1, 1]
gpu.matrix.load_matrix(Matrix.Identity(4))
@@ -74,10 +75,13 @@ batch = batch_for_shader(
def draw():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, offscreen.color_texture)
shader.bind()
shader.uniform_float("modelMatrix", Matrix.Translation((1, 2, 3)) @ Matrix.Scale(3, 4))
shader.uniform_float("viewProjectionMatrix", bpy.context.region_data.perspective_matrix)
shader.uniform_sampler("image", offscreen.texture_color)
shader.uniform_float("image", 0)
batch.draw(shader)

View File

@@ -7,10 +7,11 @@ If it already exists, it will override the existing one.
Currently almost all of the execution time is spent in the last line.
In the future this will hopefully be solved by implementing the Python buffer protocol
for :class:`gpu.types.Buffer` and :class:`bpy.types.Image.pixels` (aka ``bpy_prop_array``).
for :class:`bgl.Buffer` and :class:`bpy.types.Image.pixels` (aka ``bpy_prop_array``).
"""
import bpy
import gpu
import bgl
import random
from mathutils import Matrix
from gpu_extras.presets import draw_circle_2d
@@ -24,8 +25,8 @@ RING_AMOUNT = 10
offscreen = gpu.types.GPUOffScreen(WIDTH, HEIGHT)
with offscreen.bind():
fb = gpu.state.active_framebuffer_get()
fb.clear(color=(0.0, 0.0, 0.0, 0.0))
bgl.glClearColor(0.0, 0.0, 0.0, 0.0)
bgl.glClear(bgl.GL_COLOR_BUFFER_BIT)
with gpu.matrix.push_pop():
# reset matrices -> use normalized device coordinates [-1, 1]
gpu.matrix.load_matrix(Matrix.Identity(4))
@@ -36,7 +37,9 @@ with offscreen.bind():
(random.uniform(-1, 1), random.uniform(-1, 1)),
(1, 1, 1, 1), random.uniform(0.1, 1), 20)
buffer = fb.read_color(0, 0, WIDTH, HEIGHT, 4, 0, 'UBYTE')
buffer = bgl.Buffer(bgl.GL_BYTE, WIDTH * HEIGHT * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, WIDTH, HEIGHT, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
@@ -45,6 +48,4 @@ if not IMAGE_NAME in bpy.data.images:
bpy.data.images.new(IMAGE_NAME, WIDTH, HEIGHT)
image = bpy.data.images[IMAGE_NAME]
image.scale(WIDTH, HEIGHT)
buffer.dimensions = WIDTH * HEIGHT * 4
image.pixels = [v / 255 for v in buffer]

View File

@@ -7,6 +7,7 @@ You could also make this independent of a specific camera,
but Blender does not expose good functions to create view and projection matrices yet.
"""
import bpy
import bgl
import gpu
from gpu_extras.presets import draw_texture_2d
@@ -33,8 +34,8 @@ def draw():
view_matrix,
projection_matrix)
gpu.state.depth_mask_set(False)
draw_texture_2d(offscreen.texture_color, (10, 10), WIDTH, HEIGHT)
bgl.glDisable(bgl.GL_DEPTH_TEST)
draw_texture_2d(offscreen.color_texture, (10, 10), WIDTH, HEIGHT)
bpy.types.SpaceView3D.draw_handler_add(draw, (), 'WINDOW', 'POST_PIXEL')

View File

@@ -1,5 +0,0 @@
# Compute local object transformation matrix:
if obj.rotation_mode == 'QUATERNION':
matrix = mathutils.Matrix.LocRotScale(obj.location, obj.rotation_quaternion, obj.scale)
else:
matrix = mathutils.Matrix.LocRotScale(obj.location, obj.rotation_euler, obj.scale)

View File

@@ -14,14 +14,10 @@ mat_rot = mathutils.Matrix.Rotation(math.radians(45.0), 4, 'X')
mat_out = mat_loc @ mat_rot @ mat_sca
print(mat_out)
# extract components back out of the matrix as two vectors and a quaternion
# extract components back out of the matrix
loc, rot, sca = mat_out.decompose()
print(loc, rot, sca)
# recombine extracted components
mat_out2 = mathutils.Matrix.LocRotScale(loc, rot, sca)
print(mat_out2)
# it can also be useful to access components of a matrix directly
mat = mathutils.Matrix()
mat[0][0], mat[1][0], mat[2][0] = 0.0, 1.0, 2.0

View File

@@ -545,13 +545,6 @@ def range_str(val):
def example_extract_docstring(filepath):
'''
Return (text, line_no, line_no_has_content) where:
- ``text`` is the doc-string text.
- ``line_no`` is the line the doc-string text ends.
- ``line_no_has_content`` when False, this file only contains a doc-string.
There is no need to include the remainder.
'''
file = open(filepath, "r", encoding="utf-8")
line = file.readline()
line_no = 0
@@ -560,7 +553,7 @@ def example_extract_docstring(filepath):
line_no += 1
else:
file.close()
return "", 0, True
return "", 0, False
for line in file:
line_no += 1
@@ -954,7 +947,7 @@ def pymodule2sphinx(basepath, module_name, module, title, module_all_extra):
# constant, not much fun we can do here except to list it.
# TODO, figure out some way to document these!
fw(".. data:: %s\n\n" % attribute)
write_indented_lines(" ", fw, "Constant value %s" % repr(value), False)
write_indented_lines(" ", fw, "constant value %s" % repr(value), False)
fw("\n")
else:
BPY_LOGGER.debug("\tnot documenting %s.%s of %r type" % (module_name, attribute, value_type.__name__))
@@ -1036,6 +1029,7 @@ def pymodule2sphinx(basepath, module_name, module, title, module_all_extra):
context_type_map = {
# context_member: (RNA type, is_collection)
"active_annotation_layer": ("GPencilLayer", False),
"active_base": ("ObjectBase", False),
"active_bone": ("EditBone", False),
"active_gpencil_frame": ("GreasePencilLayer", True),
"active_gpencil_layer": ("GPencilLayer", True),
@@ -1246,7 +1240,7 @@ def pyrna_enum2sphinx(prop, use_empty_descriptions=False):
"%s.\n" % (
identifier,
# Account for multi-line enum descriptions, allowing this to be a block of text.
indent(" -- ".join(escape_rst(val) for val in (name, description) if val) or "Undocumented", " "),
indent(", ".join(escape_rst(val) for val in (name, description) if val) or "Undocumented", " "),
)
for identifier, name, description in prop.enum_items
])
@@ -1555,8 +1549,8 @@ def pyrna2sphinx(basepath):
fw(".. hlist::\n")
fw(" :columns: 2\n\n")
# Context does its own thing.
# "active_object": ("Object", False),
# context does its own thing
# "active_base": ("ObjectBase", False),
for ref_attr, (ref_type, ref_is_seq) in sorted(context_type_map.items()):
if ref_type == struct_id:
fw(" * :mod:`bpy.context.%s`\n" % ref_attr)

View File

@@ -1,8 +1,7 @@
/* T76453: Prevent Long enum lists */
.field-list > dd p {
.field-list li {
max-height: 245px;
overflow-y: auto !important;
word-break: break-word;
}
/* Hide home icon in search area */
@@ -12,15 +11,3 @@
.wy-nav-content {
max-width: 1000px !important;
}
/* Fix long titles on mobile */
h1, h2, h3, h4, h5, h6 {word-break: break-all}
/* Temp fix for https://github.com/readthedocs/sphinx_rtd_theme/pull/1109 */
.hlist tr {
display: -ms-flexbox;
display: flex;
flex-flow: row wrap;
}
.hlist td {margin-right: auto}

View File

@@ -75,7 +75,6 @@ void FFMPEGWriter::encode()
m_frame->nb_samples = m_input_samples;
m_frame->format = m_codecCtx->sample_fmt;
m_frame->channel_layout = m_codecCtx->channel_layout;
m_frame->channels = m_specs.channels;
if(avcodec_fill_audio_frame(m_frame, m_specs.channels, m_codecCtx->sample_fmt, reinterpret_cast<data_t*>(data), m_input_buffer.getSize(), 0) < 0)
AUD_THROW(FileException, "File couldn't be written, filling the audio frame failed with ffmpeg.");

View File

@@ -29,10 +29,10 @@
#if OPENVDB == 1
# include "openvdb/openvdb.h"
# include "openvdb/points/PointConversion.h"
# include "openvdb/points/PointCount.h"
# include "openvdb/tools/Clip.h"
# include "openvdb/tools/Dense.h"
# include <openvdb/points/PointConversion.h>
# include <openvdb/points/PointCount.h>
# include <openvdb/tools/Clip.h>
# include <openvdb/tools/Dense.h>
#endif
#define POSITION_NAME "P"
@@ -519,7 +519,7 @@ int writeObjectsVDB(const string &filename,
}
}
// Write only if there is at least one grid, optionally write with compression.
// Write only if the is at least one grid, optionally write with compression.
if (gridsVDB.size()) {
int vdb_flags = openvdb::io::COMPRESS_ACTIVE_MASK;
switch (compression) {
@@ -534,8 +534,7 @@ int writeObjectsVDB(const string &filename,
}
case COMPRESSION_BLOSC: {
# if OPENVDB_BLOSC == 1
// Cannot use |= here, causes segfault with blosc 1.5.0 (== recommended version)
vdb_flags = openvdb::io::COMPRESS_BLOSC;
vdb_flags |= openvdb::io::COMPRESS_BLOSC;
# else
debMsg("OpenVDB was built without Blosc support, using Zip compression instead", 1);
vdb_flags |= openvdb::io::COMPRESS_ZIP;

View File

@@ -384,7 +384,6 @@ class FluidSolver : public PbClass {
GridStorage<Real> mGrids4dReal;
GridStorage<Vec3> mGrids4dVec;
GridStorage<Vec4> mGrids4dVec4;
public:
PbArgs _args;
}

View File

@@ -42,7 +42,7 @@ inline void updateQtGui(bool full, int frame, float time, const std::string &cur
# ifdef _DEBUG
# define DEBUG 1
# endif // _DEBUG
#endif // DEBUG
#endif // DEBUG
// Standard exception
class Error : public std::exception {

View File

@@ -1,3 +1,3 @@
#define MANTA_GIT_VERSION "commit 9c505cd22e289b98c9aa717efba8ef3201c7e458"
#define MANTA_GIT_VERSION "commit 39b7a415721ecbf6643612a24e8eadd221aeb934"

View File

@@ -389,7 +389,6 @@ class GridBase : public PbClass {
Real mDx;
bool m3D; // precomputed Z shift: to ensure 2D compatibility, always use this instead of sx*sy !
IndexInt mStrideZ;
public:
PbArgs _args;
}

View File

@@ -326,7 +326,6 @@ class Grid4dBase : public PbClass {
// precomputed Z,T shift: to ensure 2D compatibility, always use this instead of sx*sy !
IndexInt mStrideZ;
IndexInt mStrideT;
public:
PbArgs _args;
}
@@ -951,7 +950,6 @@ template<class T> class Grid4d : public Grid4dBase {
protected:
T *mData;
public:
PbArgs _args;
}

View File

@@ -266,7 +266,6 @@ class LevelsetGrid : public Grid<Real> {
}
static Real invalidTimeValue();
public:
PbArgs _args;
}

View File

@@ -796,7 +796,6 @@ class Mesh : public PbClass {
std::vector<MeshDataImpl<int> *>
mMdataInt; //! indicate that mdata of this mesh is copied, and needs to be freed
bool mFreeMdata;
public:
PbArgs _args;
}
@@ -882,7 +881,6 @@ class MeshDataBase : public PbClass {
protected:
Mesh *mMesh;
public:
PbArgs _args;
}
@@ -1647,7 +1645,6 @@ template<class T> class MeshDataImpl : public MeshDataBase {
//! optionally , we might have an associated grid from which to grab new data
Grid<T> *mpGridSource; //! unfortunately , we need to distinguish mac vs regular vec3
bool mGridSourceMAC;
public:
PbArgs _args;
}

View File

@@ -154,7 +154,6 @@ class MovingObstacle : public PbClass {
int mEmptyType;
int mID;
static int sIDcnt;
public:
PbArgs _args;
}

View File

@@ -236,7 +236,6 @@ class WaveletNoiseField : public PbClass {
static int randomSeed;
// global reference count for noise tile
static std::atomic<int> mNoiseReferenceCount;
public:
PbArgs _args;
}

View File

@@ -205,7 +205,6 @@ class ParticleBase : public PbClass {
//! custom seed for particle systems, used by plugins
int mSeed; //! fix global random seed storage, used mainly by functions in this class
static int globalSeed;
public:
PbArgs _args;
}
@@ -629,7 +628,6 @@ template<class S> class ParticleSystem : public ParticleBase {
std::vector<S> mData;
//! reduce storage , called by doCompress
virtual void compress();
public:
PbArgs _args;
}
@@ -920,7 +918,6 @@ class ParticleIndexSystem : public ParticleSystem<ParticleIndexData> {
return -1;
}
};
public:
PbArgs _args;
}
@@ -985,7 +982,6 @@ template<class DATA, class CON> class ConnectedParticleSystem : public ParticleS
protected:
std::vector<CON> mSegments;
virtual void compress();
public:
PbArgs _args;
}
@@ -1075,7 +1071,6 @@ class ParticleDataBase : public PbClass {
protected:
ParticleBase *mpParticleSys;
public:
PbArgs _args;
}
@@ -1848,7 +1843,6 @@ template<class T> class ParticleDataImpl : public ParticleDataBase {
//! optionally , we might have an associated grid from which to grab new data
Grid<T> *mpGridSource; //! unfortunately , we need to distinguish mac vs regular vec3
bool mGridSourceMAC;
public:
PbArgs _args;
}

View File

@@ -234,10 +234,10 @@ void subdivideMesh(
normalize(ne2);
// Real thisArea = sqrMag(cross(-e2,e0));
// small angle approximation says sin(x) = arcsin(x) = x,
// arccos(x) = pi/2 - arcsin(x),
// cos(x) = dot(A,B),
// so angle is approximately 1 - dot(A,B).
// small angle approximation says sin(x) = arcsin(x) = x,
// arccos(x) = pi/2 - arcsin(x),
// cos(x) = dot(A,B),
// so angle is approximately 1 - dot(A,B).
Real angle[3];
angle[0] = 1.0 - dot(ne0, -ne2);
angle[1] = 1.0 - dot(ne1, -ne0);

View File

@@ -2287,10 +2287,9 @@ struct knFlipComputePotentialTrappedAir : public KernelBase {
const Vec3 &vj = scaleFromManta * v.getCentered(x, y, z);
const Vec3 xij = xi - xj;
const Vec3 vij = vi - vj;
Real h = !pot.is3D() ?
1.414 * radius :
1.732 * radius; // estimate sqrt(2)*radius resp. sqrt(3)*radius for h, due
// to squared resp. cubic neighbor area
Real h = !pot.is3D() ? 1.414 * radius :
1.732 * radius; // estimate sqrt(2)*radius resp. sqrt(3)*radius
// for h, due to squared resp. cubic neighbor area
vdiff += norm(vij) * (1 - dot(getNormalized(vij), getNormalized(xij))) *
(1 - norm(xij) / h);
}

View File

@@ -269,7 +269,6 @@ class Shape : public PbClass {
protected:
GridType mType;
public:
PbArgs _args;
}
@@ -320,7 +319,6 @@ class NullShape : public Shape {
{
gridSetConst<Real>(phi, 1000.0f);
}
public:
PbArgs _args;
}
@@ -396,7 +394,6 @@ class Box : public Shape {
protected:
Vec3 mP0, mP1;
public:
PbArgs _args;
}
@@ -458,7 +455,6 @@ class Sphere : public Shape {
protected:
Vec3 mCenter, mScale;
Real mRadius;
public:
PbArgs _args;
}
@@ -583,7 +579,6 @@ class Cylinder : public Shape {
protected:
Vec3 mCenter, mZDir;
Real mRadius, mZ;
public:
PbArgs _args;
}
@@ -660,7 +655,6 @@ class Slope : public Shape {
Real mAnglexy, mAngleyz;
Real mOrigin;
Vec3 mGs;
public:
PbArgs _args;
}

View File

@@ -199,7 +199,6 @@ class TurbulenceParticleSystem : public ParticleSystem<TurbulenceParticleData> {
private:
WaveletNoiseField &noise;
public:
PbArgs _args;
}

View File

@@ -127,7 +127,6 @@ class VortexParticleSystem : public ParticleSystem<VortexParticleData> {
}
virtual ParticleBase *clone();
public:
PbArgs _args;
}

View File

@@ -240,7 +240,6 @@ class VortexSheetMesh : public Mesh {
VorticityChannel mVorticity;
TexCoord3Channel mTex1, mTex2;
TurbulenceChannel mTurb;
public:
PbArgs _args;
}

View File

@@ -118,7 +118,6 @@ typedef struct CLG_LogType {
typedef struct CLG_LogRef {
const char *identifier;
CLG_LogType *type;
struct CLG_LogRef *next;
} CLG_LogRef;
void CLG_log_str(CLG_LogType *lg,

View File

@@ -81,8 +81,6 @@ typedef struct CLG_IDFilter {
typedef struct CLogContext {
/** Single linked list of types. */
CLG_LogType *types;
/** Single linked list of references. */
CLG_LogRef *refs;
#ifdef WITH_CLOG_PTHREADS
pthread_mutex_t types_lock;
#endif
@@ -322,9 +320,7 @@ static bool clg_ctx_filter_check(CLogContext *ctx, const char *identifier)
if (flt->match[0] == '*' && flt->match[len - 1] == '*') {
char *match = MEM_callocN(sizeof(char) * len - 1, __func__);
memcpy(match, flt->match + 1, len - 2);
const bool success = (strstr(identifier, match) != NULL);
MEM_freeN(match);
if (success) {
if (strstr(identifier, match) != NULL) {
return (bool)i;
}
}
@@ -677,12 +673,6 @@ static void CLG_ctx_free(CLogContext *ctx)
MEM_freeN(item);
}
while (ctx->refs != NULL) {
CLG_LogRef *item = ctx->refs;
ctx->refs = item->next;
item->type = NULL;
}
for (uint i = 0; i < 2; i++) {
while (ctx->filters[i] != NULL) {
CLG_IDFilter *item = ctx->filters[i];
@@ -779,10 +769,6 @@ void CLG_logref_init(CLG_LogRef *clg_ref)
pthread_mutex_lock(&g_ctx->types_lock);
#endif
if (clg_ref->type == NULL) {
/* Add to the refs list so we can NULL the pointers to 'type' when CLG_exit() is called. */
clg_ref->next = g_ctx->refs;
g_ctx->refs = clg_ref;
CLG_LogType *clg_ty = clg_ctx_type_find_by_name(g_ctx, clg_ref->identifier);
if (clg_ty == NULL) {
clg_ty = clg_ctx_type_register(g_ctx, clg_ref->identifier);

View File

@@ -71,16 +71,6 @@ if(WITH_CYCLES_STANDALONE)
target_link_libraries(cycles ${LIBRARIES})
cycles_target_link_libraries(cycles)
if(APPLE)
if(WITH_OPENCOLORIO)
set_property(TARGET cycles APPEND_STRING PROPERTY LINK_FLAGS " -framework IOKit")
endif()
if(WITH_OPENIMAGEDENOISE AND "${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
# OpenImageDenoise uses BNNS from the Accelerate framework.
set_property(TARGET cycles APPEND_STRING PROPERTY LINK_FLAGS " -framework Accelerate")
endif()
endif()
if(UNIX AND NOT APPLE)
set_target_properties(cycles PROPERTIES INSTALL_RPATH $ORIGIN/lib)
endif()

View File

@@ -19,16 +19,16 @@ from __future__ import annotations
def _is_using_buggy_driver():
import gpu
import bgl
# We need to be conservative here because in multi-GPU systems display card
# might be quite old, but others one might be just good.
#
# So We shouldn't disable possible good dedicated cards just because display
# card seems weak. And instead we only blacklist configurations which are
# proven to cause problems.
if gpu.platform.vendor_get() == "ATI Technologies Inc.":
if bgl.glGetString(bgl.GL_VENDOR) == "ATI Technologies Inc.":
import re
version = gpu.platform.version_get()
version = bgl.glGetString(bgl.GL_VERSION)
if version.endswith("Compatibility Profile Context"):
# Old HD 4xxx and 5xxx series drivers did not have driver version
# in the version string, but those cards do not quite work and

View File

@@ -552,9 +552,7 @@ class CYCLES_RENDER_PT_light_paths_fast_gi(CyclesButtonsPanel, Panel):
if world:
light = world.light_settings
col = layout.column(align=True)
col.prop(light, "ao_factor", text="AO Factor")
col.prop(light, "distance", text="AO Distance")
layout.prop(light, "distance", text="AO Distance")
class CYCLES_RENDER_PT_motion_blur(CyclesButtonsPanel, Panel):
@@ -725,7 +723,7 @@ class CYCLES_RENDER_PT_performance_tiles(CyclesButtonsPanel, Panel):
col.prop(cscene, "tile_order", text="Order")
sub = col.column()
sub.active = not rd.use_save_buffers and not cscene.use_adaptive_sampling
sub.active = not rd.use_save_buffers
sub.prop(cscene, "use_progressive_refine")

View File

@@ -83,8 +83,6 @@ struct BlenderCamera {
BoundBox2D pano_viewplane;
BoundBox2D viewport_camera_border;
float passepartout_alpha;
Transform matrix;
float offscreen_dicing_scale;
@@ -127,7 +125,6 @@ static void blender_camera_init(BlenderCamera *bcam, BL::RenderSettings &b_rende
bcam->pano_viewplane.top = 1.0f;
bcam->viewport_camera_border.right = 1.0f;
bcam->viewport_camera_border.top = 1.0f;
bcam->passepartout_alpha = 0.5f;
bcam->offscreen_dicing_scale = 1.0f;
bcam->matrix = transform_identity();
@@ -215,8 +212,6 @@ static void blender_camera_from_object(BlenderCamera *bcam,
bcam->lens = b_camera.lens();
bcam->passepartout_alpha = b_camera.show_passepartout() ? b_camera.passepartout_alpha() : 0.0f;
if (b_camera.dof().use_dof()) {
/* allow f/stop number to change aperture_size but still
* give manual control over aperture radius */
@@ -839,19 +834,15 @@ static void blender_camera_border(BlenderCamera *bcam,
full_border,
&bcam->viewport_camera_border);
if (b_render.use_border()) {
bcam->border.left = b_render.border_min_x();
bcam->border.right = b_render.border_max_x();
bcam->border.bottom = b_render.border_min_y();
bcam->border.top = b_render.border_max_y();
}
else if (bcam->passepartout_alpha == 1.0f) {
bcam->border = full_border;
}
else {
if (!b_render.use_border()) {
return;
}
bcam->border.left = b_render.border_min_x();
bcam->border.right = b_render.border_max_x();
bcam->border.bottom = b_render.border_min_y();
bcam->border.top = b_render.border_max_y();
/* Determine viewport subset matching camera border. */
blender_camera_border_subset(b_engine,
b_render,
@@ -894,7 +885,8 @@ void BlenderSync::sync_view(BL::SpaceView3D &b_v3d,
}
}
BufferParams BlenderSync::get_buffer_params(BL::SpaceView3D &b_v3d,
BufferParams BlenderSync::get_buffer_params(BL::RenderSettings &b_render,
BL::SpaceView3D &b_v3d,
BL::RegionView3D &b_rv3d,
Camera *cam,
int width,
@@ -910,8 +902,7 @@ BufferParams BlenderSync::get_buffer_params(BL::SpaceView3D &b_v3d,
if (b_v3d && b_rv3d && b_rv3d.view_perspective() != BL::RegionView3D::view_perspective_CAMERA)
use_border = b_v3d.use_render_border();
else
/* the camera can always have a passepartout */
use_border = true;
use_border = b_render.use_border();
if (use_border) {
/* border render */

View File

@@ -96,49 +96,7 @@ bool BlenderSync::object_is_light(BL::Object &b_ob)
return (b_ob_data && b_ob_data.is_a(&RNA_Light));
}
void BlenderSync::sync_object_motion_init(BL::Object &b_parent, BL::Object &b_ob, Object *object)
{
/* Initialize motion blur for object, detecting if it's enabled and creating motion
* steps array if so. */
array<Transform> motion;
object->set_motion(motion);
Scene::MotionType need_motion = scene->need_motion();
if (need_motion == Scene::MOTION_NONE || !object->get_geometry()) {
return;
}
Geometry *geom = object->get_geometry();
geom->set_use_motion_blur(false);
geom->set_motion_steps(0);
uint motion_steps;
if (need_motion == Scene::MOTION_BLUR) {
motion_steps = object_motion_steps(b_parent, b_ob, Object::MAX_MOTION_STEPS);
geom->set_motion_steps(motion_steps);
if (motion_steps && object_use_deform_motion(b_parent, b_ob)) {
geom->set_use_motion_blur(true);
}
}
else {
motion_steps = 3;
geom->set_motion_steps(motion_steps);
}
motion.resize(motion_steps, transform_empty());
if (motion_steps) {
motion[motion_steps / 2] = object->get_tfm();
/* update motion socket before trying to access object->motion_time */
object->set_motion(motion);
for (size_t step = 0; step < motion_steps; step++) {
motion_times.insert(object->motion_time(step));
}
}
}
/* Object */
Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
BL::ViewLayer &b_view_layer,
@@ -261,8 +219,10 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
}
/* test if we need to sync */
bool object_updated = object_map.add_or_update(&object, b_ob, b_parent, key) ||
(tfm != object->get_tfm());
bool object_updated = false;
if (object_map.add_or_update(&object, b_ob, b_parent, key))
object_updated = true;
/* mesh sync */
/* b_ob is owned by the iterator and will go out of scope at the end of the block.
@@ -311,11 +271,49 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
* transform comparison should not be needed, but duplis don't work perfect
* in the depsgraph and may not signal changes, so this is a workaround */
if (object->is_modified() || object_updated ||
(object->get_geometry() && object->get_geometry()->is_modified())) {
(object->get_geometry() && object->get_geometry()->is_modified()) ||
tfm != object->get_tfm()) {
object->name = b_ob.name().c_str();
object->set_pass_id(b_ob.pass_index());
object->set_color(get_float3(b_ob.color()));
object->set_tfm(tfm);
array<Transform> motion;
object->set_motion(motion);
/* motion blur */
Scene::MotionType need_motion = scene->need_motion();
if (need_motion != Scene::MOTION_NONE && object->get_geometry()) {
Geometry *geom = object->get_geometry();
geom->set_use_motion_blur(false);
geom->set_motion_steps(0);
uint motion_steps;
if (need_motion == Scene::MOTION_BLUR) {
motion_steps = object_motion_steps(b_parent, b_ob, Object::MAX_MOTION_STEPS);
geom->set_motion_steps(motion_steps);
if (motion_steps && object_use_deform_motion(b_parent, b_ob)) {
geom->set_use_motion_blur(true);
}
}
else {
motion_steps = 3;
geom->set_motion_steps(motion_steps);
}
motion.resize(motion_steps, transform_empty());
if (motion_steps) {
motion[motion_steps / 2] = tfm;
/* update motion socket before trying to access object->motion_time */
object->set_motion(motion);
for (size_t step = 0; step < motion_steps; step++) {
motion_times.insert(object->motion_time(step));
}
}
}
/* dupli texture coordinates and random_id */
if (is_instance) {
@@ -333,8 +331,6 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
object->tag_update(scene);
}
sync_object_motion_init(b_parent, b_ob, object);
if (is_instance) {
/* Sync possible particle data. */
sync_dupli_particle(b_parent, b_instance, object);
@@ -564,12 +560,10 @@ void BlenderSync::sync_objects(BL::Depsgraph &b_depsgraph,
if (!cancel && !motion) {
sync_background_light(b_v3d, use_portal);
/* Handle removed data and modified pointers, as this may free memory, delete Nodes in the
* right order to ensure that dependent data is freed after their users. Objects should be
* freed before particle systems and geometries. */
/* handle removed data and modified pointers */
light_map.post_sync();
object_map.post_sync();
geometry_map.post_sync();
object_map.post_sync();
particle_system_map.post_sync();
}
@@ -617,7 +611,7 @@ void BlenderSync::sync_motion(BL::RenderSettings &b_render,
if (b_cam) {
sync_camera_motion(b_render, b_cam, width, height, 0.0f);
}
sync_objects(b_depsgraph, b_v3d);
sync_objects(b_depsgraph, b_v3d, 0.0f);
}
/* Insert motion times from camera. Motion times from other objects

View File

@@ -35,7 +35,6 @@
#include "util/util_path.h"
#include "util/util_string.h"
#include "util/util_task.h"
#include "util/util_tbb.h"
#include "util/util_types.h"
#ifdef WITH_OSL
@@ -289,11 +288,9 @@ static PyObject *render_func(PyObject * /*self*/, PyObject *args)
RNA_pointer_create(NULL, &RNA_Depsgraph, (ID *)PyLong_AsVoidPtr(pydepsgraph), &depsgraphptr);
BL::Depsgraph b_depsgraph(depsgraphptr);
/* Allow Blender to execute other Python scripts, and isolate TBB tasks so we
* don't get deadlocks with Blender threads accessing shared data like images. */
python_thread_state_save(&session->python_thread_state);
tbb::this_task_arena::isolate([&] { session->render(b_depsgraph); });
session->render(b_depsgraph);
python_thread_state_restore(&session->python_thread_state);
@@ -330,8 +327,7 @@ static PyObject *bake_func(PyObject * /*self*/, PyObject *args)
python_thread_state_save(&session->python_thread_state);
tbb::this_task_arena::isolate(
[&] { session->bake(b_depsgraph, b_object, pass_type, pass_filter, width, height); });
session->bake(b_depsgraph, b_object, pass_type, pass_filter, width, height);
python_thread_state_restore(&session->python_thread_state);
@@ -377,7 +373,7 @@ static PyObject *reset_func(PyObject * /*self*/, PyObject *args)
python_thread_state_save(&session->python_thread_state);
tbb::this_task_arena::isolate([&] { session->reset_session(b_data, b_depsgraph); });
session->reset_session(b_data, b_depsgraph);
python_thread_state_restore(&session->python_thread_state);
@@ -399,7 +395,7 @@ static PyObject *sync_func(PyObject * /*self*/, PyObject *args)
python_thread_state_save(&session->python_thread_state);
tbb::this_task_arena::isolate([&] { session->synchronize(b_depsgraph); });
session->synchronize(b_depsgraph);
python_thread_state_restore(&session->python_thread_state);

View File

@@ -155,7 +155,7 @@ void BlenderSession::create_session()
/* set buffer parameters */
BufferParams buffer_params = BlenderSync::get_buffer_params(
b_v3d, b_rv3d, scene->camera, width, height, session_params.denoising.use);
b_render, b_v3d, b_rv3d, scene->camera, width, height, session_params.denoising.use);
session->reset(buffer_params, session_params.samples);
b_engine.use_highlight_tiles(session_params.progressive_refine == false);
@@ -237,12 +237,10 @@ void BlenderSession::reset_session(BL::BlendData &b_data, BL::Depsgraph &b_depsg
sync->sync_recalc(b_depsgraph, b_v3d);
}
BL::Object b_camera_override(b_engine.camera_override());
sync->sync_camera(b_render, b_camera_override, width, height, "");
BL::SpaceView3D b_null_space_view3d(PointerRNA_NULL);
BL::RegionView3D b_null_region_view3d(PointerRNA_NULL);
BufferParams buffer_params = BlenderSync::get_buffer_params(b_null_space_view3d,
BufferParams buffer_params = BlenderSync::get_buffer_params(b_render,
b_null_space_view3d,
b_null_region_view3d,
scene->camera,
width,
@@ -485,7 +483,7 @@ void BlenderSession::render(BL::Depsgraph &b_depsgraph_)
SessionParams session_params = BlenderSync::get_session_params(
b_engine, b_userpref, b_scene, background, b_view_layer);
BufferParams buffer_params = BlenderSync::get_buffer_params(
b_v3d, b_rv3d, scene->camera, width, height, session_params.denoising.use);
b_render, b_v3d, b_rv3d, scene->camera, width, height, session_params.denoising.use);
/* temporary render result to find needed passes and views */
BL::RenderResult b_rr = begin_render_result(
@@ -809,7 +807,7 @@ void BlenderSession::synchronize(BL::Depsgraph &b_depsgraph_)
/* get buffer parameters */
BufferParams buffer_params = BlenderSync::get_buffer_params(
b_v3d, b_rv3d, scene->camera, width, height, session_params.denoising.use);
b_render, b_v3d, b_rv3d, scene->camera, width, height, session_params.denoising.use);
if (!buffer_params.denoising_data_pass) {
session_params.denoising.use = false;
@@ -888,7 +886,7 @@ bool BlenderSession::draw(int w, int h)
SessionParams session_params = BlenderSync::get_session_params(
b_engine, b_userpref, b_scene, background);
BufferParams buffer_params = BlenderSync::get_buffer_params(
b_v3d, b_rv3d, scene->camera, width, height, session_params.denoising.use);
b_render, b_v3d, b_rv3d, scene->camera, width, height, session_params.denoising.use);
bool session_pause = BlenderSync::get_session_pause(b_scene, background);
if (session_pause == false) {
@@ -906,7 +904,7 @@ bool BlenderSession::draw(int w, int h)
/* draw */
BufferParams buffer_params = BlenderSync::get_buffer_params(
b_v3d, b_rv3d, scene->camera, width, height, session->params.denoising.use);
b_render, b_v3d, b_rv3d, scene->camera, width, height, session->params.denoising.use);
DeviceDrawParams draw_params;
if (session->params.display_buffer_linear) {

View File

@@ -1373,7 +1373,7 @@ void BlenderSync::sync_world(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d,
BlenderViewportParameters new_viewport_parameters(b_v3d);
if (world_recalc || update_all || b_world.ptr.data != world_map ||
viewport_parameters.shader_modified(new_viewport_parameters)) {
viewport_parameters.modified(new_viewport_parameters)) {
Shader *shader = scene->default_background;
ShaderGraph *graph = new ShaderGraph();
@@ -1501,8 +1501,8 @@ void BlenderSync::sync_world(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d,
background->set_transparent_roughness_threshold(0.0f);
}
background->set_use_shader(view_layer.use_background_shader ||
viewport_parameters.use_custom_shader());
background->set_use_shader(view_layer.use_background_shader |
viewport_parameters.custom_viewport_parameters());
background->set_use_ao(background->get_use_ao() && view_layer.use_background_ao);
background->tag_update(scene);
@@ -1553,9 +1553,13 @@ void BlenderSync::sync_lights(BL::Depsgraph &b_depsgraph, bool update_all)
void BlenderSync::sync_shaders(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d)
{
/* for auto refresh images */
ImageManager *image_manager = scene->image_manager;
const int frame = b_scene.frame_current();
const bool auto_refresh_update = image_manager->set_animation_frame_update(frame);
bool auto_refresh_update = false;
if (preview) {
ImageManager *image_manager = scene->image_manager;
int frame = b_scene.frame_current();
auto_refresh_update = image_manager->set_animation_frame_update(frame);
}
shader_map.pre_sync();

View File

@@ -69,8 +69,7 @@ BlenderSync::BlenderSync(BL::RenderEngine &b_engine,
experimental(false),
dicing_rate(1.0f),
max_subdivisions(12),
progress(progress),
has_updates_(true)
progress(progress)
{
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
dicing_rate = preview ? RNA_float_get(&cscene, "preview_dicing_rate") :
@@ -85,9 +84,7 @@ BlenderSync::~BlenderSync()
void BlenderSync::reset(BL::BlendData &b_data, BL::Scene &b_scene)
{
/* Update data and scene pointers in case they change in session reset,
* for example after undo.
* Note that we do not modify the `has_updates_` flag here because the sync
* reset is also used during viewport navigation. */
* for example after undo. */
this->b_data = b_data;
this->b_scene = b_scene;
}
@@ -120,8 +117,6 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
}
if (dicing_prop_changed) {
has_updates_ = true;
for (const pair<const GeometryKey, Geometry *> &iter : geometry_map.key_to_scene_data()) {
Geometry *geom = iter.second;
if (geom->is_mesh()) {
@@ -138,12 +133,6 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
/* Iterate over all IDs in this depsgraph. */
for (BL::DepsgraphUpdate &b_update : b_depsgraph.updates) {
/* TODO(sergey): Can do more selective filter here. For example, ignore changes made to
* screen datablock. Note that sync_data() needs to be called after object deletion, and
* currently this is ensured by the scene ID tagged for update, which sets the `has_updates_`
* flag. */
has_updates_ = true;
BL::ID b_id(b_update.id());
/* Material */
@@ -224,13 +213,9 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
if (b_v3d) {
BlenderViewportParameters new_viewport_parameters(b_v3d);
if (viewport_parameters.shader_modified(new_viewport_parameters)) {
if (viewport_parameters.modified(new_viewport_parameters)) {
world_recalc = true;
has_updates_ = true;
}
has_updates_ |= viewport_parameters.modified(new_viewport_parameters);
}
}
@@ -242,15 +227,11 @@ void BlenderSync::sync_data(BL::RenderSettings &b_render,
int height,
void **python_thread_state)
{
if (!has_updates_) {
return;
}
scoped_timer timer;
BL::ViewLayer b_view_layer = b_depsgraph.view_layer_eval();
sync_view_layer(b_view_layer);
sync_view_layer(b_v3d, b_view_layer);
sync_integrator();
sync_film(b_v3d);
sync_shaders(b_depsgraph, b_v3d);
@@ -273,8 +254,6 @@ void BlenderSync::sync_data(BL::RenderSettings &b_render,
free_data_after_sync(b_depsgraph);
VLOG(1) << "Total time spent synchronizing data: " << timer.get_time();
has_updates_ = false;
}
/* Integrator */
@@ -445,7 +424,7 @@ void BlenderSync::sync_film(BL::SpaceView3D &b_v3d)
/* Render Layer */
void BlenderSync::sync_view_layer(BL::ViewLayer &b_view_layer)
void BlenderSync::sync_view_layer(BL::SpaceView3D & /*b_v3d*/, BL::ViewLayer &b_view_layer)
{
view_layer.name = b_view_layer.name();
@@ -760,18 +739,12 @@ void BlenderSync::free_data_after_sync(BL::Depsgraph &b_depsgraph)
* caches to be releases from blender side in order to reduce peak memory
* footprint during synchronization process.
*/
const bool is_interface_locked = b_engine.render() && b_engine.render().use_lock_interface();
const bool is_persistent_data = b_engine.render() && b_engine.render().use_persistent_data();
const bool can_free_caches =
(BlenderSession::headless || is_interface_locked) &&
/* Baking re-uses the depsgraph multiple times, clearing crashes
* reading un-evaluated mesh data which isn't aligned with the
* geometry we're baking, see T71012. */
!scene->bake_manager->get_baking() &&
/* Persistent data must main caches for performance and correctness. */
!is_persistent_data;
const bool can_free_caches = (BlenderSession::headless || is_interface_locked) &&
/* Baking re-uses the depsgraph multiple times, clearing crashes
* reading un-evaluated mesh data which isn't aligned with the
* geometry we're baking, see T71012. */
!scene->bake_manager->get_baking();
if (!can_free_caches) {
return;
}
@@ -896,9 +869,6 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine,
/* Clamp samples. */
params.samples = min(params.samples, Integrator::MAX_SAMPLES);
/* Adaptive sampling. */
params.adaptive_sampling = RNA_boolean_get(&cscene, "use_adaptive_sampling");
/* tiles */
const bool is_cpu = (params.device.type == DEVICE_CPU);
if (!is_cpu && !background) {
@@ -951,7 +921,7 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine,
BL::RenderSettings b_r = b_scene.render();
params.progressive_refine = b_engine.is_preview() ||
get_boolean(cscene, "use_progressive_refine");
if (b_r.use_save_buffers() || params.adaptive_sampling)
if (b_r.use_save_buffers())
params.progressive_refine = false;
if (background) {
@@ -987,6 +957,8 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine,
params.use_profiling = params.device.has_profiling && !b_engine.is_preview() && background &&
BlenderSession::print_render_stats;
params.adaptive_sampling = RNA_boolean_get(&cscene, "use_adaptive_sampling");
return params;
}

View File

@@ -73,7 +73,7 @@ class BlenderSync {
int width,
int height,
void **python_thread_state);
void sync_view_layer(BL::ViewLayer &b_view_layer);
void sync_view_layer(BL::SpaceView3D &b_v3d, BL::ViewLayer &b_view_layer);
vector<Pass> sync_render_passes(BL::Scene &b_scene,
BL::RenderLayer &b_render_layer,
BL::ViewLayer &b_view_layer,
@@ -104,7 +104,8 @@ class BlenderSync {
bool background,
BL::ViewLayer b_view_layer = BL::ViewLayer(PointerRNA_NULL));
static bool get_session_pause(BL::Scene &b_scene, bool background);
static BufferParams get_buffer_params(BL::SpaceView3D &b_v3d,
static BufferParams get_buffer_params(BL::RenderSettings &b_render,
BL::SpaceView3D &b_v3d,
BL::RegionView3D &b_rv3d,
Camera *cam,
int width,
@@ -149,7 +150,6 @@ class BlenderSync {
BlenderObjectCulling &culling,
bool *use_portal,
TaskPool *geom_task_pool);
void sync_object_motion_init(BL::Object &b_parent, BL::Object &b_ob, Object *object);
bool sync_object_attributes(BL::DepsgraphObjectInstance &b_instance, Object *object);
@@ -263,12 +263,6 @@ class BlenderSync {
} view_layer;
Progress &progress;
protected:
/* Indicates that `sync_recalc()` detected changes in the scene.
* If this flag is false then the data is considered to be up-to-date and will not be
* synchronized at all. */
bool has_updates_ = true;
};
CCL_NAMESPACE_END

View File

@@ -13,7 +13,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "blender_viewport.h"
#include "blender_util.h"
@@ -26,39 +25,29 @@ BlenderViewportParameters::BlenderViewportParameters()
studiolight_rotate_z(0.0f),
studiolight_intensity(1.0f),
studiolight_background_alpha(1.0f),
display_pass(PASS_COMBINED)
studiolight_path(ustring())
{
}
BlenderViewportParameters::BlenderViewportParameters(BL::SpaceView3D &b_v3d)
: BlenderViewportParameters()
{
if (!b_v3d) {
return;
}
BL::View3DShading shading = b_v3d.shading();
PointerRNA cshading = RNA_pointer_get(&shading.ptr, "cycles");
/* We only copy the shading parameters if we are in look dev mode. otherwise
/* We only copy the parameters if we are in look dev mode. otherwise
* defaults are being used. These defaults mimic normal render settings */
if (shading.type() == BL::View3DShading::type_RENDERED) {
use_scene_world = shading.use_scene_world_render();
use_scene_lights = shading.use_scene_lights_render();
if (b_v3d && b_v3d.shading().type() == BL::View3DShading::type_RENDERED) {
use_scene_world = b_v3d.shading().use_scene_world_render();
use_scene_lights = b_v3d.shading().use_scene_lights_render();
if (!use_scene_world) {
studiolight_rotate_z = shading.studiolight_rotate_z();
studiolight_intensity = shading.studiolight_intensity();
studiolight_background_alpha = shading.studiolight_background_alpha();
studiolight_path = shading.selected_studio_light().path();
studiolight_rotate_z = b_v3d.shading().studiolight_rotate_z();
studiolight_intensity = b_v3d.shading().studiolight_intensity();
studiolight_background_alpha = b_v3d.shading().studiolight_background_alpha();
studiolight_path = b_v3d.shading().selected_studio_light().path();
}
}
/* Film. */
display_pass = (PassType)get_enum(cshading, "render_pass", -1, -1);
}
bool BlenderViewportParameters::shader_modified(const BlenderViewportParameters &other) const
/* Check if two instances are different. */
const bool BlenderViewportParameters::modified(const BlenderViewportParameters &other) const
{
return use_scene_world != other.use_scene_world || use_scene_lights != other.use_scene_lights ||
studiolight_rotate_z != other.studiolight_rotate_z ||
@@ -67,26 +56,26 @@ bool BlenderViewportParameters::shader_modified(const BlenderViewportParameters
studiolight_path != other.studiolight_path;
}
bool BlenderViewportParameters::film_modified(const BlenderViewportParameters &other) const
{
return display_pass != other.display_pass;
}
bool BlenderViewportParameters::modified(const BlenderViewportParameters &other) const
{
return shader_modified(other) || film_modified(other);
}
bool BlenderViewportParameters::use_custom_shader() const
const bool BlenderViewportParameters::custom_viewport_parameters() const
{
return !(use_scene_world && use_scene_lights);
}
PassType BlenderViewportParameters::get_viewport_display_render_pass(BL::SpaceView3D &b_v3d)
{
PassType display_pass = PASS_NONE;
if (b_v3d) {
BL::View3DShading b_view3dshading = b_v3d.shading();
PointerRNA cshading = RNA_pointer_get(&b_view3dshading.ptr, "cycles");
display_pass = (PassType)get_enum(cshading, "render_pass", -1, -1);
}
return display_pass;
}
PassType update_viewport_display_passes(BL::SpaceView3D &b_v3d, vector<Pass> &passes)
{
if (b_v3d) {
const BlenderViewportParameters viewport_parameters(b_v3d);
const PassType display_pass = viewport_parameters.display_pass;
PassType display_pass = BlenderViewportParameters::get_viewport_display_render_pass(b_v3d);
passes.clear();
Pass::add(display_pass, passes);

View File

@@ -18,18 +18,17 @@
#define __BLENDER_VIEWPORT_H__
#include "MEM_guardedalloc.h"
#include "RNA_access.h"
#include "RNA_blender_cpp.h"
#include "RNA_types.h"
#include "render/film.h"
#include "util/util_param.h"
CCL_NAMESPACE_BEGIN
class BlenderViewportParameters {
public:
/* Shader. */
private:
bool use_scene_world;
bool use_scene_lights;
float studiolight_rotate_z;
@@ -37,24 +36,17 @@ class BlenderViewportParameters {
float studiolight_background_alpha;
ustring studiolight_path;
/* Film. */
PassType display_pass;
BlenderViewportParameters();
explicit BlenderViewportParameters(BL::SpaceView3D &b_v3d);
BlenderViewportParameters(BL::SpaceView3D &b_v3d);
/* Check whether any of shading related settings are different from the given parameters. */
bool shader_modified(const BlenderViewportParameters &other) const;
const bool modified(const BlenderViewportParameters &other) const;
const bool custom_viewport_parameters() const;
friend class BlenderSync;
/* Check whether any of film related settings are different from the given parameters. */
bool film_modified(const BlenderViewportParameters &other) const;
/* Check whether any of settings are different from the given parameters. */
bool modified(const BlenderViewportParameters &other) const;
/* Returns truth when a custom shader defined by the viewport is to be used instead of the
* regular background shader or scene light. */
bool use_custom_shader() const;
public:
/* Retrieve the render pass that needs to be displayed on the given `SpaceView3D`
* When the `b_v3d` parameter is not given `PASS_NONE` will be returned. */
static PassType get_viewport_display_render_pass(BL::SpaceView3D &b_v3d);
};
PassType update_viewport_display_passes(BL::SpaceView3D &b_v3d, vector<Pass> &passes);

View File

@@ -17,8 +17,6 @@
#ifdef WITH_OPTIX
# include "device/device.h"
# include "bvh/bvh_optix.h"
CCL_NAMESPACE_BEGIN
@@ -28,7 +26,6 @@ BVHOptiX::BVHOptiX(const BVHParams &params_,
const vector<Object *> &objects_,
Device *device)
: BVH(params_, geometry_, objects_),
device(device),
traversable_handle(0),
as_data(device, params_.top_level ? "optix tlas" : "optix blas", false),
motion_transform_data(device, "optix motion transform", false)
@@ -37,9 +34,7 @@ BVHOptiX::BVHOptiX(const BVHParams &params_,
BVHOptiX::~BVHOptiX()
{
// Acceleration structure memory is delayed freed on device, since deleting the
// BVH may happen while still being used for rendering.
device->release_optix_bvh(this);
// Acceleration structure memory is freed via the 'as_data' destructor
}
CCL_NAMESPACE_END

View File

@@ -28,7 +28,6 @@ CCL_NAMESPACE_BEGIN
class BVHOptiX : public BVH {
public:
Device *device;
uint64_t traversable_handle;
device_only_memory<char> as_data;
device_only_memory<char> motion_transform_data;

View File

@@ -61,6 +61,7 @@ enum DeviceTypeMask {
};
enum DeviceKernelStatus {
DEVICE_KERNEL_WAITING_FOR_FEATURE_KERNEL = 0,
DEVICE_KERNEL_FEATURE_KERNEL_AVAILABLE,
DEVICE_KERNEL_USING_FEATURE_KERNEL,
DEVICE_KERNEL_FEATURE_KERNEL_INVALID,
@@ -426,9 +427,6 @@ class Device {
/* acceleration structure building */
virtual void build_bvh(BVH *bvh, Progress &progress, bool refit);
/* OptiX specific destructor. */
virtual void release_optix_bvh(BVH * /*bvh*/){};
#ifdef WITH_NETWORK
/* networking */
void server_run();

View File

@@ -35,54 +35,10 @@ device_memory::device_memory(Device *device, const char *name, MemoryType type)
device_pointer(0),
host_pointer(0),
shared_pointer(0),
shared_counter(0),
original_device_ptr(0),
original_device_size(0),
original_device(0),
need_realloc_(false),
modified(false)
shared_counter(0)
{
}
device_memory::device_memory(device_memory &&other) noexcept
: data_type(other.data_type),
data_elements(other.data_elements),
data_size(other.data_size),
device_size(other.device_size),
data_width(other.data_width),
data_height(other.data_height),
data_depth(other.data_depth),
type(other.type),
name(other.name),
device(other.device),
device_pointer(other.device_pointer),
host_pointer(other.host_pointer),
shared_pointer(other.shared_pointer),
shared_counter(other.shared_counter),
original_device_ptr(other.original_device_ptr),
original_device_size(other.original_device_size),
original_device(other.original_device),
need_realloc_(other.need_realloc_),
modified(other.modified)
{
other.data_elements = 0;
other.data_size = 0;
other.device_size = 0;
other.data_width = 0;
other.data_height = 0;
other.data_depth = 0;
other.device = 0;
other.device_pointer = 0;
other.host_pointer = 0;
other.shared_pointer = 0;
other.shared_counter = 0;
other.original_device_ptr = 0;
other.original_device_size = 0;
other.original_device = 0;
other.need_realloc_ = false;
other.modified = false;
}
device_memory::~device_memory()
{
assert(shared_pointer == 0);

View File

@@ -238,7 +238,6 @@ class device_memory {
/* Only create through subclasses. */
device_memory(Device *device, const char *name, MemoryType type);
device_memory(device_memory &&other) noexcept;
/* No copying allowed. */
device_memory(const device_memory &) = delete;
@@ -278,10 +277,6 @@ template<typename T> class device_only_memory : public device_memory {
data_elements = max(device_type_traits<T>::num_elements, 1);
}
device_only_memory(device_only_memory &&other) noexcept : device_memory(std::move(other))
{
}
virtual ~device_only_memory()
{
free();

View File

@@ -232,6 +232,10 @@ class MultiDevice : public Device {
foreach (SubDevice &sub, devices) {
DeviceKernelStatus subresult = sub.device->get_active_kernel_switch_state();
switch (subresult) {
case DEVICE_KERNEL_WAITING_FOR_FEATURE_KERNEL:
result = subresult;
break;
case DEVICE_KERNEL_FEATURE_KERNEL_INVALID:
case DEVICE_KERNEL_FEATURE_KERNEL_AVAILABLE:
return subresult;

View File

@@ -193,9 +193,6 @@ class OptiXDevice : public CUDADevice {
device_only_memory<unsigned char> denoiser_state;
int denoiser_input_passes = 0;
vector<device_only_memory<char>> delayed_free_bvh_memory;
thread_mutex delayed_free_bvh_mutex;
public:
OptiXDevice(DeviceInfo &info_, Stats &stats_, Profiler &profiler_, bool background_)
: CUDADevice(info_, stats_, profiler_, background_),
@@ -261,8 +258,6 @@ class OptiXDevice : public CUDADevice {
// Make CUDA context current
const CUDAContextScope scope(cuContext);
free_bvh_memory_delayed();
sbt_data.free();
texture_info.free();
launch_params.free();
@@ -726,11 +721,7 @@ class OptiXDevice : public CUDADevice {
}
}
else if (task.type == DeviceTask::SHADER) {
// CUDA kernels are used when doing baking
if (optix_module == NULL)
CUDADevice::shader(task);
else
launch_shader_eval(task, thread_index);
launch_shader_eval(task, thread_index);
}
else if (task.type == DeviceTask::DENOISE_BUFFER) {
// Set up a single tile that covers the whole task and denoise it
@@ -1306,8 +1297,6 @@ class OptiXDevice : public CUDADevice {
return;
}
free_bvh_memory_delayed();
BVHOptiX *const bvh_optix = static_cast<BVHOptiX *>(bvh);
progress.set_substatus("Building OptiX acceleration structure");
@@ -1778,24 +1767,6 @@ class OptiXDevice : public CUDADevice {
}
}
void release_optix_bvh(BVH *bvh) override
{
thread_scoped_lock lock(delayed_free_bvh_mutex);
/* Do delayed free of BVH memory, since geometry holding BVH might be deleted
* while GPU is still rendering. */
BVHOptiX *const bvh_optix = static_cast<BVHOptiX *>(bvh);
delayed_free_bvh_memory.emplace_back(std::move(bvh_optix->as_data));
delayed_free_bvh_memory.emplace_back(std::move(bvh_optix->motion_transform_data));
bvh_optix->traversable_handle = 0;
}
void free_bvh_memory_delayed()
{
thread_scoped_lock lock(delayed_free_bvh_mutex);
delayed_free_bvh_memory.free_memory();
}
void const_copy_to(const char *name, void *host, size_t size) override
{
// Set constant memory for CUDA module

View File

@@ -269,6 +269,7 @@ class OpenCLDevice : public Device {
cl_device_id cdDevice;
cl_int ciErr;
int device_num;
bool use_preview_kernels;
class OpenCLProgram {
public:
@@ -368,7 +369,8 @@ class OpenCLDevice : public Device {
/* Load the kernels and put the created kernels in the given
* `programs` parameter. */
void load_kernels(vector<OpenCLProgram *> &programs,
const DeviceRequestedFeatures &requested_features);
const DeviceRequestedFeatures &requested_features,
bool is_preview = false);
};
DeviceSplitKernel *split_kernel;
@@ -380,6 +382,7 @@ class OpenCLDevice : public Device {
OpenCLProgram denoising_program;
OpenCLSplitPrograms kernel_programs;
OpenCLSplitPrograms preview_programs;
typedef map<string, device_vector<uchar> *> ConstMemMap;
typedef map<string, device_ptr> MemMap;
@@ -409,6 +412,7 @@ class OpenCLDevice : public Device {
string device_md5_hash(string kernel_custom_build_options = "");
bool load_kernels(const DeviceRequestedFeatures &requested_features);
void load_required_kernels(const DeviceRequestedFeatures &requested_features);
void load_preview_kernels();
bool wait_for_availability(const DeviceRequestedFeatures &requested_features);
DeviceKernelStatus get_active_kernel_switch_state();
@@ -418,7 +422,8 @@ class OpenCLDevice : public Device {
/* Get the program file name to compile (*.cl) for the given kernel */
const string get_opencl_program_filename(const string &kernel_name);
string get_build_options(const DeviceRequestedFeatures &requested_features,
const string &opencl_program_name);
const string &opencl_program_name,
bool preview_kernel = false);
/* Enable the default features to reduce recompilation events */
void enable_default_features(DeviceRequestedFeatures &features);

View File

@@ -107,7 +107,8 @@ void OpenCLDevice::enable_default_features(DeviceRequestedFeatures &features)
}
string OpenCLDevice::get_build_options(const DeviceRequestedFeatures &requested_features,
const string &opencl_program_name)
const string &opencl_program_name,
bool preview_kernel)
{
/* first check for non-split kernel programs */
if (opencl_program_name == "base" || opencl_program_name == "denoising") {
@@ -184,7 +185,13 @@ string OpenCLDevice::get_build_options(const DeviceRequestedFeatures &requested_
enable_default_features(nofeatures);
/* Add program specific optimized compile directives */
if (opencl_program_name == "split_do_volume" && !requested_features.use_volume) {
if (preview_kernel) {
DeviceRequestedFeatures preview_features;
preview_features.use_hair = true;
build_options += "-D__KERNEL_AO_PREVIEW__ ";
build_options += preview_features.get_build_options();
}
else if (opencl_program_name == "split_do_volume" && !requested_features.use_volume) {
build_options += nofeatures.get_build_options();
}
else {
@@ -231,7 +238,9 @@ OpenCLDevice::OpenCLSplitPrograms::~OpenCLSplitPrograms()
}
void OpenCLDevice::OpenCLSplitPrograms::load_kernels(
vector<OpenCLProgram *> &programs, const DeviceRequestedFeatures &requested_features)
vector<OpenCLProgram *> &programs,
const DeviceRequestedFeatures &requested_features,
bool is_preview)
{
if (!requested_features.use_baking) {
# define ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(kernel_name) \
@@ -242,7 +251,7 @@ void OpenCLDevice::OpenCLSplitPrograms::load_kernels(
device, \
program_name_##kernel_name, \
"kernel_" #kernel_name ".cl", \
device->get_build_options(requested_features, program_name_##kernel_name)); \
device->get_build_options(requested_features, program_name_##kernel_name, is_preview)); \
program_##kernel_name.add_kernel(ustring("path_trace_" #kernel_name)); \
programs.push_back(&program_##kernel_name);
@@ -250,7 +259,7 @@ void OpenCLDevice::OpenCLSplitPrograms::load_kernels(
ADD_SPLIT_KERNEL_PROGRAM(subsurface_scatter);
ADD_SPLIT_KERNEL_PROGRAM(direct_lighting);
ADD_SPLIT_KERNEL_PROGRAM(indirect_background);
if (requested_features.use_volume) {
if (requested_features.use_volume || is_preview) {
ADD_SPLIT_KERNEL_PROGRAM(do_volume);
}
ADD_SPLIT_KERNEL_PROGRAM(shader_eval);
@@ -265,7 +274,7 @@ void OpenCLDevice::OpenCLSplitPrograms::load_kernels(
device,
"split_bundle",
"kernel_split_bundle.cl",
device->get_build_options(requested_features, "split_bundle"));
device->get_build_options(requested_features, "split_bundle", is_preview));
ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(data_init);
ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(state_buffer_size);
@@ -394,7 +403,7 @@ class OpenCLSplitKernel : public DeviceSplitKernel {
device,
program_name,
device->get_opencl_program_filename(kernel_name),
device->get_build_options(requested_features, program_name));
device->get_build_options(requested_features, program_name, device->use_preview_kernels));
kernel->program.add_kernel(ustring("path_trace_" + kernel_name));
kernel->program.load();
@@ -560,11 +569,6 @@ class OpenCLSplitKernel : public DeviceSplitKernel {
size_t num_elements = max_elements_for_max_buffer_size(kg, data, max_buffer_size);
int2 global_size = make_int2(max(round_down((int)sqrt(num_elements), 64), 64),
(int)sqrt(num_elements));
if (device->info.description.find("Intel") != string::npos) {
global_size = make_int2(min(512, global_size.x), min(512, global_size.y));
}
VLOG(1) << "Global size: " << global_size << ".";
return global_size;
}
@@ -608,6 +612,7 @@ OpenCLDevice::OpenCLDevice(DeviceInfo &info, Stats &stats, Profiler &profiler, b
: Device(info, stats, profiler, background),
load_kernel_num_compiling(0),
kernel_programs(this),
preview_programs(this),
memory_manager(this),
texture_info(this, "__texture_info", MEM_GLOBAL)
{
@@ -617,6 +622,7 @@ OpenCLDevice::OpenCLDevice(DeviceInfo &info, Stats &stats, Profiler &profiler, b
cqCommandQueue = NULL;
device_initialized = false;
textures_need_update = true;
use_preview_kernels = !background;
vector<OpenCLPlatformDevice> usable_devices;
OpenCLInfo::get_usable_devices(&usable_devices);
@@ -672,6 +678,9 @@ OpenCLDevice::OpenCLDevice(DeviceInfo &info, Stats &stats, Profiler &profiler, b
device_initialized = true;
split_kernel = new OpenCLSplitKernel(this);
if (use_preview_kernels) {
load_preview_kernels();
}
}
OpenCLDevice::~OpenCLDevice()
@@ -762,7 +771,7 @@ bool OpenCLDevice::load_kernels(const DeviceRequestedFeatures &requested_feature
load_required_kernels(requested_features);
vector<OpenCLProgram *> programs;
kernel_programs.load_kernels(programs, requested_features);
kernel_programs.load_kernels(programs, requested_features, false);
if (!requested_features.use_baking && requested_features.use_denoising) {
denoising_program = OpenCLProgram(
@@ -840,6 +849,19 @@ void OpenCLDevice::load_required_kernels(const DeviceRequestedFeatures &requeste
}
}
void OpenCLDevice::load_preview_kernels()
{
DeviceRequestedFeatures no_features;
vector<OpenCLProgram *> programs;
preview_programs.load_kernels(programs, no_features, true);
foreach (OpenCLProgram *program, programs) {
if (!program->load()) {
load_required_kernel_task_pool.push(function_bind(&OpenCLProgram::compile, program));
}
}
}
bool OpenCLDevice::wait_for_availability(const DeviceRequestedFeatures &requested_features)
{
if (requested_features.use_baking) {
@@ -847,18 +869,59 @@ bool OpenCLDevice::wait_for_availability(const DeviceRequestedFeatures &requeste
return true;
}
load_kernel_task_pool.wait_work();
if (background) {
load_kernel_task_pool.wait_work();
use_preview_kernels = false;
}
else {
/* We use a device setting to determine to load preview kernels or not
* Better to check on device level than per kernel as mixing preview and
* non-preview kernels does not work due to different data types */
if (use_preview_kernels) {
use_preview_kernels = load_kernel_num_compiling.load() > 0;
}
}
return split_kernel->load_kernels(requested_features);
}
OpenCLDevice::OpenCLSplitPrograms *OpenCLDevice::get_split_programs()
{
return &kernel_programs;
return use_preview_kernels ? &preview_programs : &kernel_programs;
}
DeviceKernelStatus OpenCLDevice::get_active_kernel_switch_state()
{
return DEVICE_KERNEL_USING_FEATURE_KERNEL;
/* Do not switch kernels for background renderings
* We do foreground rendering but use the preview kernels
* Check for the optimized kernels
*
* This works also the other way around, where we are using
* optimized kernels but new ones are being compiled due
* to other features that are needed */
if (background) {
/* The if-statements below would find the same result,
* But as the `finished` method uses a mutex we added
* this as an early exit */
return DEVICE_KERNEL_USING_FEATURE_KERNEL;
}
bool other_kernels_finished = load_kernel_num_compiling.load() == 0;
if (use_preview_kernels) {
if (other_kernels_finished) {
return DEVICE_KERNEL_FEATURE_KERNEL_AVAILABLE;
}
else {
return DEVICE_KERNEL_WAITING_FOR_FEATURE_KERNEL;
}
}
else {
if (other_kernels_finished) {
return DEVICE_KERNEL_USING_FEATURE_KERNEL;
}
else {
return DEVICE_KERNEL_FEATURE_KERNEL_INVALID;
}
}
}
void OpenCLDevice::mem_alloc(device_memory &mem)

View File

@@ -367,17 +367,9 @@ void Node::copy_value(const SocketType &socket, const Node &other, const SocketT
case SocketType::TRANSFORM_ARRAY:
copy_array<Transform>(this, socket, &other, other_socket);
break;
case SocketType::NODE_ARRAY: {
case SocketType::NODE_ARRAY:
copy_array<void *>(this, socket, &other, other_socket);
array<Node *> &node_array = get_socket_value<array<Node *>>(this, socket);
for (Node *node : node_array) {
node->reference();
}
break;
}
default:
assert(0);
break;
@@ -387,14 +379,6 @@ void Node::copy_value(const SocketType &socket, const Node &other, const SocketT
const void *src = ((char *)&other) + other_socket.struct_offset;
void *dst = ((char *)this) + socket.struct_offset;
memcpy(dst, src, socket.size());
if (socket.type == SocketType::NODE) {
Node *node = get_socket_value<Node *>(this, socket);
if (node) {
node->reference();
}
}
}
}
@@ -789,26 +773,6 @@ void Node::set_owner(const NodeOwner *owner_)
owner = owner_;
}
void Node::dereference_all_used_nodes()
{
foreach (const SocketType &socket, type->inputs) {
if (socket.type == SocketType::NODE) {
Node *node = get_socket_value<Node *>(this, socket);
if (node) {
node->dereference();
}
}
else if (socket.type == SocketType::NODE_ARRAY) {
const array<Node *> &nodes = get_socket_value<array<Node *>>(this, socket);
for (Node *node : nodes) {
node->dereference();
}
}
}
}
bool Node::socket_is_modified(const SocketType &input) const
{
return (socket_modified & input.modified_flag_bit) != 0;
@@ -839,25 +803,6 @@ template<typename T> void Node::set_if_different(const SocketType &input, T valu
socket_modified |= input.modified_flag_bit;
}
void Node::set_if_different(const SocketType &input, Node *value)
{
if (get_socket_value<Node *>(this, input) == value) {
return;
}
Node *old_node = get_socket_value<Node *>(this, input);
if (old_node) {
old_node->dereference();
}
if (value) {
value->reference();
}
get_socket_value<Node *>(this, input) = value;
socket_modified |= input.modified_flag_bit;
}
template<typename T> void Node::set_if_different(const SocketType &input, array<T> &value)
{
if (!socket_is_modified(input)) {
@@ -870,27 +815,6 @@ template<typename T> void Node::set_if_different(const SocketType &input, array<
socket_modified |= input.modified_flag_bit;
}
void Node::set_if_different(const SocketType &input, array<Node *> &value)
{
if (!socket_is_modified(input)) {
if (get_socket_value<array<Node *>>(this, input) == value) {
return;
}
}
array<Node *> &old_nodes = get_socket_value<array<Node *>>(this, input);
for (Node *old_node : old_nodes) {
old_node->dereference();
}
for (Node *new_node : value) {
new_node->reference();
}
get_socket_value<array<Node *>>(this, input).steal_data(value);
socket_modified |= input.modified_flag_bit;
}
void Node::print_modified_sockets() const
{
printf("Node : %s\n", name.c_str());

View File

@@ -177,32 +177,8 @@ struct Node {
const NodeOwner *get_owner() const;
void set_owner(const NodeOwner *owner_);
int reference_count() const
{
return ref_count;
}
void reference()
{
ref_count += 1;
}
void dereference()
{
ref_count -= 1;
}
/* Set the reference count to zero. This should only be called when we know for sure that the
* Node is not used by anyone else. For now, this is only the case when "deleting" shaders, as
* they are never actually deleted. */
void clear_reference_count()
{
ref_count = 0;
}
protected:
const NodeOwner *owner;
int ref_count{0};
template<typename T> static T &get_socket_value(const Node *node, const SocketType &socket)
{
@@ -213,19 +189,7 @@ struct Node {
template<typename T> void set_if_different(const SocketType &input, T value);
/* Explicit overload for Node sockets so we can handle reference counting. The old Node is
* dereferenced, and the new one is referenced. */
void set_if_different(const SocketType &input, Node *value);
template<typename T> void set_if_different(const SocketType &input, array<T> &value);
/* Explicit overload for Node sockets so we can handle reference counting. The old Nodes are
* dereferenced, and the new ones are referenced. */
void set_if_different(const SocketType &input, array<Node *> &value);
/* Call this function in derived classes' destructors to ensure that used Nodes are dereferenced
* properly. */
void dereference_all_used_nodes();
};
CCL_NAMESPACE_END

View File

@@ -148,17 +148,16 @@ struct NodeType {
#define NODE_DECLARE \
static const NodeType *get_node_type(); \
template<typename T> static const NodeType *register_type(); \
static Node *create(const NodeType *type); \
static const NodeType *node_type;
static Node *create(const NodeType *type);
#define NODE_DEFINE(structname) \
const NodeType *structname::node_type = structname::register_type<structname>(); \
Node *structname::create(const NodeType *) \
{ \
return new structname(); \
} \
const NodeType *structname::get_node_type() \
{ \
static const NodeType *node_type = register_type<structname>(); \
return node_type; \
} \
template<typename T> const NodeType *structname::register_type()
@@ -170,8 +169,6 @@ struct NodeType {
#define NODE_ABSTRACT_DEFINE(structname) \
const NodeType *structname::get_node_base_type() \
{ \
/* Base types constructed in this getter to ensure correct initialization \
* order. Regular types are not so they are auto-registered for XML parsing. */ \
static const NodeType *node_base_type = register_base_type<structname>(); \
return node_base_type; \
} \

View File

@@ -57,24 +57,14 @@ ccl_device ccl_addr_space void *closure_alloc_extra(ShaderData *sd, int size)
ccl_device_inline ShaderClosure *bsdf_alloc(ShaderData *sd, int size, float3 weight)
{
kernel_assert(isfinite3_safe(weight));
ShaderClosure *sc = closure_alloc(sd, size, CLOSURE_NONE_ID, weight);
const float sample_weight = fabsf(average(weight));
if (sc == NULL)
return NULL;
/* Use comparison this way to help dealing with non-finite weight: if the average is not finite
* we will not allocate new closure. */
if (sample_weight >= CLOSURE_WEIGHT_CUTOFF) {
ShaderClosure *sc = closure_alloc(sd, size, CLOSURE_NONE_ID, weight);
if (sc == NULL) {
return NULL;
}
sc->sample_weight = sample_weight;
return sc;
}
return NULL;
float sample_weight = fabsf(average(weight));
sc->sample_weight = sample_weight;
return (sample_weight >= CLOSURE_WEIGHT_CUTOFF) ? sc : NULL;
}
#ifdef __OSL__
@@ -83,27 +73,17 @@ ccl_device_inline ShaderClosure *bsdf_alloc_osl(ShaderData *sd,
float3 weight,
void *data)
{
kernel_assert(isfinite3_safe(weight));
ShaderClosure *sc = closure_alloc(sd, size, CLOSURE_NONE_ID, weight);
const float sample_weight = fabsf(average(weight));
if (!sc)
return NULL;
/* Use comparison this way to help dealing with non-finite weight: if the average is not finite
* we will not allocate new closure. */
if (sample_weight >= CLOSURE_WEIGHT_CUTOFF) {
ShaderClosure *sc = closure_alloc(sd, size, CLOSURE_NONE_ID, weight);
if (!sc) {
return NULL;
}
memcpy((void *)sc, data, size);
memcpy((void *)sc, data, size);
sc->weight = weight;
sc->sample_weight = sample_weight;
return sc;
}
return NULL;
float sample_weight = fabsf(average(weight));
sc->weight = weight;
sc->sample_weight = sample_weight;
return (sample_weight >= CLOSURE_WEIGHT_CUTOFF) ? sc : NULL;
}
#endif

View File

@@ -200,12 +200,12 @@ ccl_device bool light_spread_clamp_area_light(const float3 P,
* uv coordinates. */
const float new_center_u = 0.5f * (min_u + max_u);
const float new_center_v = 0.5f * (min_v + max_v);
const float new_len_u = max_u - min_u;
const float new_len_v = max_v - min_v;
const float new_len_u = 0.5f * (max_u - min_u);
const float new_len_v = 0.5f * (max_v - min_v);
*lightP = *lightP + new_center_u * u + new_center_v * v;
*axisu = u * new_len_u;
*axisv = v * new_len_v;
*axisu = u * new_len_u * 2.0f;
*axisv = v * new_len_v * 2.0f;
return true;
}

View File

@@ -195,108 +195,31 @@ ccl_device float2 regular_polygon_sample(float corners, float rotation, float u,
ccl_device float3 ensure_valid_reflection(float3 Ng, float3 I, float3 N)
{
float3 R = 2 * dot(N, I) * N - I;
float3 R;
float NI = dot(N, I);
float NgR, threshold;
/* Reflection rays may always be at least as shallow as the incoming ray. */
float threshold = min(0.9f * dot(Ng, I), 0.01f);
if (dot(Ng, R) >= threshold) {
return N;
}
/* Check if the incident ray is coming from behind normal N. */
if (NI > 0) {
/* Normal reflection */
R = (2 * NI) * N - I;
NgR = dot(Ng, R);
/* Form coordinate system with Ng as the Z axis and N inside the X-Z-plane.
* The X axis is found by normalizing the component of N that's orthogonal to Ng.
* The Y axis isn't actually needed.
*/
float NdotNg = dot(N, Ng);
float3 X = normalize(N - NdotNg * Ng);
/* Keep math expressions. */
/* clang-format off */
/* Calculate N.z and N.x in the local coordinate system.
*
* The goal of this computation is to find a N' that is rotated towards Ng just enough
* to lift R' above the threshold (here called t), therefore dot(R', Ng) = t.
*
* According to the standard reflection equation,
* this means that we want dot(2*dot(N', I)*N' - I, Ng) = t.
*
* Since the Z axis of our local coordinate system is Ng, dot(x, Ng) is just x.z, so we get
* 2*dot(N', I)*N'.z - I.z = t.
*
* The rotation is simple to express in the coordinate system we formed -
* since N lies in the X-Z-plane, we know that N' will also lie in the X-Z-plane,
* so N'.y = 0 and therefore dot(N', I) = N'.x*I.x + N'.z*I.z .
*
* Furthermore, we want N' to be normalized, so N'.x = sqrt(1 - N'.z^2).
*
* With these simplifications,
* we get the final equation 2*(sqrt(1 - N'.z^2)*I.x + N'.z*I.z)*N'.z - I.z = t.
*
* The only unknown here is N'.z, so we can solve for that.
*
* The equation has four solutions in general:
*
* N'.z = +-sqrt(0.5*(+-sqrt(I.x^2*(I.x^2 + I.z^2 - t^2)) + t*I.z + I.x^2 + I.z^2)/(I.x^2 + I.z^2))
* We can simplify this expression a bit by grouping terms:
*
* a = I.x^2 + I.z^2
* b = sqrt(I.x^2 * (a - t^2))
* c = I.z*t + a
* N'.z = +-sqrt(0.5*(+-b + c)/a)
*
* Two solutions can immediately be discarded because they're negative so N' would lie in the
* lower hemisphere.
*/
/* clang-format on */
float Ix = dot(I, X), Iz = dot(I, Ng);
float Ix2 = sqr(Ix), Iz2 = sqr(Iz);
float a = Ix2 + Iz2;
float b = safe_sqrtf(Ix2 * (a - sqr(threshold)));
float c = Iz * threshold + a;
/* Evaluate both solutions.
* In many cases one can be immediately discarded (if N'.z would be imaginary or larger than
* one), so check for that first. If no option is viable (might happen in extreme cases like N
* being in the wrong hemisphere), give up and return Ng. */
float fac = 0.5f / a;
float N1_z2 = fac * (b + c), N2_z2 = fac * (-b + c);
bool valid1 = (N1_z2 > 1e-5f) && (N1_z2 <= (1.0f + 1e-5f));
bool valid2 = (N2_z2 > 1e-5f) && (N2_z2 <= (1.0f + 1e-5f));
float2 N_new;
if (valid1 && valid2) {
/* If both are possible, do the expensive reflection-based check. */
float2 N1 = make_float2(safe_sqrtf(1.0f - N1_z2), safe_sqrtf(N1_z2));
float2 N2 = make_float2(safe_sqrtf(1.0f - N2_z2), safe_sqrtf(N2_z2));
float R1 = 2 * (N1.x * Ix + N1.y * Iz) * N1.y - Iz;
float R2 = 2 * (N2.x * Ix + N2.y * Iz) * N2.y - Iz;
valid1 = (R1 >= 1e-5f);
valid2 = (R2 >= 1e-5f);
if (valid1 && valid2) {
/* If both solutions are valid, return the one with the shallower reflection since it will be
* closer to the input (if the original reflection wasn't shallow, we would not be in this
* part of the function). */
N_new = (R1 < R2) ? N1 : N2;
/* Reflection rays may always be at least as shallow as the incoming ray. */
threshold = min(0.9f * dot(Ng, I), 0.01f);
if (NgR >= threshold) {
return N;
}
else {
/* If only one reflection is valid (= positive), pick that one. */
N_new = (R1 > R2) ? N1 : N2;
}
}
else if (valid1 || valid2) {
/* Only one solution passes the N'.z criterium, so pick that one. */
float Nz2 = valid1 ? N1_z2 : N2_z2;
N_new = make_float2(safe_sqrtf(1.0f - Nz2), safe_sqrtf(Nz2));
}
else {
return Ng;
/* Bad incident */
R = -I;
NgR = dot(Ng, R);
threshold = 0.01f;
}
return N_new.x * X + N_new.y * Ng;
R = R + Ng * (threshold - NgR); /* Lift the reflection above the threshold. */
return normalize(I * len(R) + R * len(I)); /* Find a bisector. */
}
CCL_NAMESPACE_END

View File

@@ -25,9 +25,8 @@ CCL_NAMESPACE_BEGIN
ccl_device_inline float3
subsurface_scatter_eval(ShaderData *sd, const ShaderClosure *sc, float disk_r, float r, bool all)
{
/* This is the Veach one-sample model with balance heuristic, some pdf
* factors drop out when using balance heuristic weighting. For branched
* path tracing (all) we sample all closure and don't use MIS. */
/* this is the veach one-sample model with balance heuristic, some pdf
* factors drop out when using balance heuristic weighting */
float3 eval_sum = zero_float3();
float pdf_sum = 0.0f;
float sample_weight_inv = 0.0f;
@@ -66,30 +65,6 @@ subsurface_scatter_eval(ShaderData *sd, const ShaderClosure *sc, float disk_r, f
return (pdf_sum > 0.0f) ? eval_sum / pdf_sum : zero_float3();
}
ccl_device_inline float3 subsurface_scatter_walk_eval(ShaderData *sd,
const ShaderClosure *sc,
float3 throughput,
bool all)
{
/* This is the Veach one-sample model with balance heuristic, some pdf
* factors drop out when using balance heuristic weighting. For branched
* path tracing (all) we sample all closure and don't use MIS. */
if (!all) {
float bssrdf_weight = 0.0f;
float weight = sc->sample_weight;
for (int i = 0; i < sd->num_closure; i++) {
sc = &sd->closure[i];
if (CLOSURE_IS_BSSRDF(sc->type)) {
bssrdf_weight += sc->sample_weight;
}
}
throughput *= bssrdf_weight / weight;
}
return throughput;
}
/* replace closures with a single diffuse bsdf closure after scatter step */
ccl_device void subsurface_scatter_setup_diffuse_bsdf(
KernelGlobals *kg, ShaderData *sd, ClosureType type, float roughness, float3 weight, float3 N)
@@ -462,8 +437,7 @@ ccl_device_noinline
ccl_addr_space PathState *state,
const ShaderClosure *sc,
const float bssrdf_u,
const float bssrdf_v,
bool all)
const float bssrdf_v)
{
/* Sample diffuse surface scatter into the object. */
float3 D;
@@ -632,10 +606,10 @@ ccl_device_noinline
t = ray->t;
}
else if (bounce == 0) {
/* Restore original position if nothing was hit after the first bounce,
* without the ray_offset() that was added to avoid self-intersection.
* Otherwise if that offset is relatively large compared to the scattering
* radius, we never go back up high enough to exit the surface. */
/* Restore original position if nothing was hit after the first bounce.
* Otherwise if the ray_offset() to avoid self-intersection is relatively
* large compared to the scattering radius, we go never backup high enough
* to exit the surface. */
ray->P = sd->P;
}
@@ -695,7 +669,7 @@ ccl_device_noinline
/* TODO: gain back performance lost from merging with disk BSSRDF. We
* only need to return on hit so this indirect ray push/pop overhead
* is not actually needed, but it does keep the code simpler. */
ss_isect->weight[0] = subsurface_scatter_walk_eval(sd, sc, throughput, all);
ss_isect->weight[0] = throughput;
#ifdef __SPLIT_KERNEL__
ss_isect->ray = *ray;
#endif
@@ -717,7 +691,7 @@ ccl_device_inline int subsurface_scatter_multi_intersect(KernelGlobals *kg,
return subsurface_scatter_disk(kg, ss_isect, sd, sc, lcg_state, bssrdf_u, bssrdf_v, all);
}
else {
return subsurface_random_walk(kg, ss_isect, sd, state, sc, bssrdf_u, bssrdf_v, all);
return subsurface_random_walk(kg, ss_isect, sd, state, sc, bssrdf_u, bssrdf_v);
}
}

View File

@@ -99,23 +99,27 @@ CCL_NAMESPACE_BEGIN
#define __AO__
#define __PASSES__
#define __HAIR__
#define __SVM__
#define __EMISSION__
#define __HOLDOUT__
#define __MULTI_CLOSURE__
#define __TRANSPARENT_SHADOWS__
#define __BACKGROUND_MIS__
#define __LAMP_MIS__
#define __CAMERA_MOTION__
#define __OBJECT_MOTION__
#define __BAKING__
#define __PRINCIPLED__
#define __SUBSURFACE__
#define __VOLUME__
#define __VOLUME_SCATTER__
#define __CMJ__
#define __SHADOW_RECORD_ALL__
#define __BRANCHED_PATH__
/* Without these we get an AO render, used by OpenCL preview kernel. */
#ifndef __KERNEL_AO_PREVIEW__
# define __SVM__
# define __EMISSION__
# define __HOLDOUT__
# define __MULTI_CLOSURE__
# define __TRANSPARENT_SHADOWS__
# define __BACKGROUND_MIS__
# define __LAMP_MIS__
# define __CAMERA_MOTION__
# define __OBJECT_MOTION__
# define __BAKING__
# define __PRINCIPLED__
# define __SUBSURFACE__
# define __VOLUME__
# define __VOLUME_SCATTER__
# define __CMJ__
# define __SHADOW_RECORD_ALL__
# define __BRANCHED_PATH__
#endif
/* Device specific features */
#ifdef __KERNEL_CPU__

Some files were not shown because too many files have changed in this diff Show More