Compare commits
368 Commits
tmp-gpu-sh
...
blender-v2
Author | SHA1 | Date | |
---|---|---|---|
6f40a18ecc | |||
a56e2faeb7 | |||
92d3a15239 | |||
574f685855 | |||
![]() |
ff05921099 | ||
4f16dad6b3 | |||
56958cd2e7 | |||
b84159e441 | |||
86c526d2c7 | |||
a6d70eecf4 | |||
6ea0ecdc51 | |||
05ca297596 | |||
![]() |
f7b8875ed5 | ||
0ac83d05d7 | |||
0b2ea1d69b | |||
63fdcbb588 | |||
![]() |
6570d5596b | ||
![]() |
a4171f4866 | ||
b61a912128 | |||
22ce804c7d | |||
8b4d0d7338 | |||
dd9f4e1b81 | |||
9d519f1db4 | |||
50733e1479 | |||
944a705215 | |||
b82b608e91 | |||
fcdaeba778 | |||
6786100313 | |||
5d1efa9759 | |||
4946f0c5a2 | |||
![]() |
cbc6c3938d | ||
![]() |
ca881afef1 | ||
70395ff7fd | |||
798b989e7d | |||
2bd6a9d882 | |||
22cf8b9532 | |||
95c82513ca | |||
14b8f6c25b | |||
b382632665 | |||
90ee4b94ee | |||
edee6f911c | |||
00b0253c46 | |||
08fe34b7fd | |||
87237df4fd | |||
c6856da22c | |||
5f156291cd | |||
06317ce957 | |||
161e90f7ac | |||
0367ce4b19 | |||
7dd982a9b8 | |||
6be85c0eff | |||
e41a8d57d4 | |||
e291f482ef | |||
add1d601dc | |||
d3856b7e97 | |||
dbdf22786d | |||
103e314be4 | |||
bca66a2a71 | |||
a70b625a10 | |||
607e3d3b88 | |||
a609e9f07f | |||
120292a190 | |||
6d1895c681 | |||
bbfb29c794 | |||
4f7dc4c918 | |||
ccc6b688fe | |||
430262b133 | |||
86ff78ca09 | |||
1f96166e08 | |||
66c65da688 | |||
![]() |
29001c9c88 | ||
d395109e5a | |||
550752a8fb | |||
5a59c563da | |||
0518bd85f1 | |||
41d31be637 | |||
b2f2daf21f | |||
1a1510a3a0 | |||
![]() |
0de54a9cfe | ||
c5c4727d6e | |||
![]() |
8dd18a77e7 | ||
f6fb695ed8 | |||
fb67495c7f | |||
051567553d | |||
a925c8969d | |||
531e4fcf3e | |||
58e3d82108 | |||
6dde03eb4a | |||
d553edeb7d | |||
3f973791fb | |||
1555809480 | |||
b779fae993 | |||
eee51b3cda | |||
0babc7a657 | |||
704f56d68f | |||
fd30365201 | |||
759fd9e4c2 | |||
ae368ff74b | |||
9db56e4b7b | |||
![]() |
ec0a3ff456 | ||
0b61b75151 | |||
1e16662de8 | |||
42089fcde0 | |||
c5c35e9b47 | |||
47cc05471d | |||
d35974cd87 | |||
93472a2389 | |||
bb50046730 | |||
052156a646 | |||
2833f2b9bf | |||
e954e565e1 | |||
7b00b67dfd | |||
738c38cf29 | |||
c661515090 | |||
8d02177ab0 | |||
f00ba34411 | |||
b4bddf2e3b | |||
08aaa07adb | |||
510541563e | |||
ad0da42751 | |||
f897f122cb | |||
949314af2a | |||
79976ba4df | |||
fbddc8d2e4 | |||
![]() |
cde858ae98 | ||
![]() |
6265fe8605 | ||
b78d373e96 | |||
f6c7da5759 | |||
fa54bf482c | |||
89eda78d88 | |||
4a61e9ea0f | |||
db7c7486fd | |||
2f3e97b6d3 | |||
6c777ed76b | |||
7edf1e64b3 | |||
350d490a13 | |||
![]() |
02ff143f60 | ||
f59be64867 | |||
b439a15544 | |||
c3c3807b05 | |||
a08a08cb4c | |||
50e8072708 | |||
d56111383a | |||
13b02a724f | |||
8b59119e10 | |||
19b85c5f4b | |||
22c0555cc5 | |||
712f79b0fe | |||
4179be6499 | |||
4abfc0bcd5 | |||
670ee877d9 | |||
ffb220590e | |||
![]() |
6b1042f45a | ||
c7236ef4f1 | |||
0ff7d21c27 | |||
d5555115e1 | |||
0845dc0eec | |||
8926b09fa9 | |||
c2535dff90 | |||
70d7863500 | |||
05b4d8d13a | |||
7e39e0a9f6 | |||
89b5c9d433 | |||
f994c8c707 | |||
5949b796f0 | |||
3e104c5c42 | |||
![]() |
6c9c479048 | ||
23bf3b09dd | |||
dac242b993 | |||
3f2b1f1b66 | |||
f7c5296f1d | |||
38d1bd9cc6 | |||
055c0f9077 | |||
63ed7c19d5 | |||
d745d0a7dc | |||
![]() |
bfa485a26f | ||
394a287714 | |||
08b938a343 | |||
![]() |
3bdf921e82 | ||
b72df3a4a4 | |||
7ac5695b29 | |||
094addfe84 | |||
6144b5bd4e | |||
192e591af9 | |||
0016e73eda | |||
32057453ca | |||
fe08aa4e2c | |||
bcacd98a6a | |||
a4be38c065 | |||
46bdfcab10 | |||
![]() |
3be5697b88 | ||
0f928c5841 | |||
de0fc2a540 | |||
ade72e46fd | |||
8610c69777 | |||
1f58b0bbf7 | |||
a9cacb2280 | |||
006ff64538 | |||
![]() |
c4b9e2da8c | ||
521ae3d458 | |||
cbb5201f09 | |||
812c234377 | |||
74a9ffc804 | |||
7071daaee3 | |||
8cc52ef6e2 | |||
421ce37f9c | |||
1012569dfa | |||
42434d120b | |||
dc61f7c171 | |||
dedab68dcc | |||
19fe5529d7 | |||
676d995d29 | |||
a9ae45597c | |||
79945c8126 | |||
ec44ab0e89 | |||
d80a95c2ee | |||
a994e7a4b9 | |||
22d8506ae5 | |||
0fb7d5381e | |||
7369a24f61 | |||
38ae241020 | |||
3a5ef92896 | |||
fe492d922d | |||
70637b303d | |||
8a129041c5 | |||
bcff166cc7 | |||
c2b144df39 | |||
04345dcf89 | |||
![]() |
200de72d26 | ||
5d42024be6 | |||
fa2a13bcb9 | |||
112416e4fb | |||
f2b71df549 | |||
6cbbe04ced | |||
5c917ef032 | |||
16b4b412e6 | |||
220470be15 | |||
02c3428e0c | |||
c15352dd67 | |||
0122615d36 | |||
39d7c111db | |||
1d816e3a66 | |||
ee43580623 | |||
2c68c92524 | |||
4a08939e12 | |||
a71490c4b2 | |||
f47f9a04b1 | |||
2d3deb29ea | |||
e48054ece4 | |||
b7a72778ec | |||
287d5e8305 | |||
ce44b1f15e | |||
6d30a2b085 | |||
c113af8288 | |||
5ab8641492 | |||
093e7f5cf9 | |||
aacedd7861 | |||
d5809b39d5 | |||
573972ff41 | |||
53b98de260 | |||
650cceb453 | |||
f1f8ac3921 | |||
38aa78c6f4 | |||
46c3ef5715 | |||
9a4d51c5d9 | |||
322c67c974 | |||
3058bf0757 | |||
c8d0a615ef | |||
3f23de9a3f | |||
![]() |
bf92d262a3 | ||
de3e797d47 | |||
5c806a2f04 | |||
0471349c90 | |||
833f67bf51 | |||
a3db0850b4 | |||
2472615d0a | |||
1e53ca9b71 | |||
6ec565d1f5 | |||
353e5bd749 | |||
6ff5943109 | |||
9c41744ef4 | |||
3e36829394 | |||
0f95cc8293 | |||
![]() |
abdaf4159f | ||
9949b5098a | |||
52be97a91b | |||
080d62d405 | |||
f212a16c6a | |||
9f6c98534f | |||
2087b87807 | |||
592e306236 | |||
d0c986b748 | |||
e34a751172 | |||
78c7e7e3bc | |||
e2e9df1520 | |||
3e7310f890 | |||
2a78504d0c | |||
061869fe61 | |||
ad0154aebc | |||
7329138e82 | |||
b740f7f1c7 | |||
239fbf7d93 | |||
2822744b0b | |||
![]() |
6e6954ac83 | ||
005c66395a | |||
ef0ded4df3 | |||
910b421045 | |||
2d89951be5 | |||
000fbef35d | |||
a2c1f41d95 | |||
bb4f8b76ec | |||
089ff4a1dd | |||
91b455c00f | |||
f7e73b5d45 | |||
9a8dd8d623 | |||
020b8e2c73 | |||
e936269304 | |||
9171342185 | |||
b894480d68 | |||
3de9efdc9e | |||
141eb92345 | |||
8289fc688b | |||
f575a14801 | |||
![]() |
91f07fbfd6 | ||
6e13cff487 | |||
68fae13d9b | |||
![]() |
e553408bee | ||
17ddb4c4ea | |||
dceaef92d7 | |||
465d5c018e | |||
3bbcc4f6e8 | |||
58533aca4d | |||
5f01048dcb | |||
945e18f037 | |||
293b00beb5 | |||
517f32045c | |||
4f3fdf7715 | |||
![]() |
554ed613ae | ||
783d3c675a | |||
![]() |
e21a903b98 | ||
0148059c68 | |||
d114288f90 | |||
3947cbf916 | |||
![]() |
37a07d8a75 | ||
8e555bf4e3 | |||
0178e7b393 | |||
42a517779a | |||
60bf482dba | |||
ce11640eef | |||
c26cd6e996 | |||
0a8eeae831 | |||
f649e5c418 | |||
5439f43e88 | |||
5fc252feae | |||
296e3ee62c | |||
c310bf4ebe | |||
548312ed82 | |||
56b345adc6 | |||
404c3adfcc | |||
9d5e5e282c | |||
7b754c8c99 | |||
1510c04d41 | |||
2c3ef36a0b | |||
0402cc7e9e | |||
09db0f2a34 | |||
e4ac4769f1 | |||
864f8cbb86 | |||
488bf53207 |
@@ -369,7 +369,7 @@ option(WITH_CYCLES_CUDA_BINARIES "Build Cycles CUDA binaries" OFF)
|
||||
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
|
||||
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
|
||||
mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 CACHE STRING "CUDA architectures to build binaries for")
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 sm_86 compute_75 CACHE STRING "CUDA architectures to build binaries for")
|
||||
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
|
||||
unset(PLATFORM_DEFAULT)
|
||||
option(WITH_CYCLES_LOGGING "Build Cycles with logging support" ON)
|
||||
|
@@ -301,7 +301,7 @@ set(SQLITE_HASH fb558c49ee21a837713c4f1e7e413309aabdd9c7)
|
||||
|
||||
set(EMBREE_VERSION 3.8.0)
|
||||
set(EMBREE_URI https://github.com/embree/embree/archive/v${EMBREE_VERSION}.zip)
|
||||
set(EMBREE_HASH ac504d5426945fe25dec1267e0c39d52)
|
||||
set(EMBREE_HASH 837b297bfe9c328152e9ce42c301d340)
|
||||
|
||||
set(USD_VERSION 19.11)
|
||||
set(USD_URI https://github.com/PixarAnimationStudios/USD/archive/v${USD_VERSION}.tar.gz)
|
||||
|
@@ -898,8 +898,8 @@ PYTHON_SOURCE=( "https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHO
|
||||
NUMPY_SOURCE=( "https://github.com/numpy/numpy/releases/download/v$NUMPY_VERSION/numpy-$NUMPY_VERSION.tar.gz" )
|
||||
|
||||
_boost_version_nodots=`echo "$BOOST_VERSION" | sed -r 's/\./_/g'`
|
||||
BOOST_SOURCE=( "http://sourceforge.net/projects/boost/files/boost/$BOOST_VERSION/boost_$_boost_version_nodots.tar.bz2/download" )
|
||||
BOOST_BUILD_MODULES="--with-system --with-filesystem --with-thread --with-regex --with-locale --with-date_time --with-wave --with-iostreams --with-python --with-program_options"
|
||||
BOOST_SOURCE=( "https://boostorg.jfrog.io/artifactory/main/release/$BOOST_VERSION/source/boost_$_boost_version_nodots.tar.bz2" )
|
||||
BOOST_BUILD_MODULES="--with-system --with-filesystem --with-thread --with-regex --with-locale --with-date_time --with-wave --with-iostreams --with-python --with-program_options --with-serialization --with-atomic"
|
||||
|
||||
OCIO_USE_REPO=false
|
||||
OCIO_SOURCE=( "https://github.com/AcademySoftwareFoundation/OpenColorIO/archive/v$OCIO_VERSION.tar.gz")
|
||||
|
@@ -8,7 +8,7 @@ Code signing is done as part of INSTALL target, which makes it possible to sign
|
||||
files which are aimed into a bundle and coming from a non-signed source (such as
|
||||
libraries SVN).
|
||||
|
||||
This is achieved by specifying `slave_codesign.cmake` as a post-install script
|
||||
This is achieved by specifying `worker_codesign.cmake` as a post-install script
|
||||
run by CMake. This CMake script simply involves an utility script written in
|
||||
Python which takes care of an actual signing.
|
||||
|
||||
|
@@ -33,15 +33,16 @@ def is_tool(name):
|
||||
return which(name) is not None
|
||||
|
||||
class Builder:
|
||||
def __init__(self, name, branch):
|
||||
def __init__(self, name, branch, codesign):
|
||||
self.name = name
|
||||
self.branch = branch
|
||||
self.is_release_branch = re.match("^blender-v(.*)-release$", branch) is not None
|
||||
self.codesign = codesign
|
||||
|
||||
# Buildbot runs from build/ directory
|
||||
self.blender_dir = os.path.abspath(os.path.join('..', 'blender.git'))
|
||||
self.build_dir = os.path.abspath(os.path.join('..', 'build', name))
|
||||
self.install_dir = os.path.abspath(os.path.join('..', 'install', name))
|
||||
self.build_dir = os.path.abspath(os.path.join('..', 'build'))
|
||||
self.install_dir = os.path.abspath(os.path.join('..', 'install'))
|
||||
self.upload_dir = os.path.abspath(os.path.join('..', 'install'))
|
||||
|
||||
# Detect platform
|
||||
@@ -67,8 +68,9 @@ def create_builder_from_arguments():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('builder_name')
|
||||
parser.add_argument('branch', default='master', nargs='?')
|
||||
parser.add_argument("--codesign", action="store_true")
|
||||
args = parser.parse_args()
|
||||
return Builder(args.builder_name, args.branch)
|
||||
return Builder(args.builder_name, args.branch, args.codesign)
|
||||
|
||||
|
||||
class VersionInfo:
|
||||
@@ -83,7 +85,6 @@ class VersionInfo:
|
||||
self.short_version = "%d.%02d" % (version_numbers[0], version_numbers[1])
|
||||
self.version = "%d.%02d.%d" % version_numbers
|
||||
self.version_cycle = self._parse_header_file(blender_h, 'BLENDER_VERSION_CYCLE')
|
||||
self.version_cycle_number = self._parse_header_file(blender_h, 'BLENDER_VERSION_CYCLE_NUMBER')
|
||||
self.hash = self._parse_header_file(buildinfo_h, 'BUILD_HASH')[1:-1]
|
||||
|
||||
if self.version_cycle == "release":
|
||||
@@ -92,8 +93,7 @@ class VersionInfo:
|
||||
self.is_development_build = False
|
||||
elif self.version_cycle == "rc":
|
||||
# Release candidate
|
||||
version_cycle = self.version_cycle + self.version_cycle_number
|
||||
self.full_version = self.version + version_cycle
|
||||
self.full_version = self.version + self.version_cycle
|
||||
self.is_development_build = False
|
||||
else:
|
||||
# Development build
|
||||
|
@@ -18,12 +18,72 @@
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import codesign.util as util
|
||||
|
||||
|
||||
class ArchiveStateError(Exception):
|
||||
message: str
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ArchiveState:
|
||||
"""
|
||||
Additional information (state) of the archive
|
||||
|
||||
Includes information like expected file size of the archive file in the case
|
||||
the archive file is expected to be successfully created.
|
||||
|
||||
If the archive can not be created, this state will contain error message
|
||||
indicating details of error.
|
||||
"""
|
||||
|
||||
# Size in bytes of the corresponding archive.
|
||||
file_size: Optional[int] = None
|
||||
|
||||
# Non-empty value indicates that error has happenned.
|
||||
error_message: str = ''
|
||||
|
||||
def has_error(self) -> bool:
|
||||
"""
|
||||
Check whether the archive is at error state
|
||||
"""
|
||||
|
||||
return self.error_message
|
||||
|
||||
def serialize_to_string(self) -> str:
|
||||
payload = dataclasses.asdict(self)
|
||||
return json.dumps(payload, sort_keys=True, indent=4)
|
||||
|
||||
def serialize_to_file(self, filepath: Path) -> None:
|
||||
string = self.serialize_to_string()
|
||||
filepath.write_text(string)
|
||||
|
||||
@classmethod
|
||||
def deserialize_from_string(cls, string: str) -> 'ArchiveState':
|
||||
try:
|
||||
object_as_dict = json.loads(string)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ArchiveStateError('Error parsing JSON')
|
||||
|
||||
return cls(**object_as_dict)
|
||||
|
||||
@classmethod
|
||||
def deserialize_from_file(cls, filepath: Path):
|
||||
string = filepath.read_text()
|
||||
return cls.deserialize_from_string(string)
|
||||
|
||||
|
||||
class ArchiveWithIndicator:
|
||||
"""
|
||||
The idea of this class is to wrap around logic which takes care of keeping
|
||||
@@ -79,6 +139,19 @@ class ArchiveWithIndicator:
|
||||
if not self.ready_indicator_filepath.exists():
|
||||
return False
|
||||
|
||||
try:
|
||||
archive_state = ArchiveState.deserialize_from_file(
|
||||
self.ready_indicator_filepath)
|
||||
except ArchiveStateError as error:
|
||||
print(f'Error deserializing archive state: {error.message}')
|
||||
return False
|
||||
|
||||
if archive_state.has_error():
|
||||
# If the error did happen during codesign procedure there will be no
|
||||
# corresponding archive file.
|
||||
# The caller code will deal with the error check further.
|
||||
return True
|
||||
|
||||
# Sometimes on macOS indicator file appears prior to the actual archive
|
||||
# despite the order of creation and os.sync() used in tag_ready().
|
||||
# So consider archive not ready if there is an indicator without an
|
||||
@@ -88,23 +161,11 @@ class ArchiveWithIndicator:
|
||||
f'({self.archive_filepath}) to appear.')
|
||||
return False
|
||||
|
||||
# Read archive size from indicator/
|
||||
#
|
||||
# Assume that file is either empty or is fully written. This is being checked
|
||||
# by performing ValueError check since empty string will throw this exception
|
||||
# when attempted to be converted to int.
|
||||
expected_archive_size_str = self.ready_indicator_filepath.read_text()
|
||||
try:
|
||||
expected_archive_size = int(expected_archive_size_str)
|
||||
except ValueError:
|
||||
print(f'Invalid archive size "{expected_archive_size_str}"')
|
||||
return False
|
||||
|
||||
# Wait for until archive is fully stored.
|
||||
actual_archive_size = self.archive_filepath.stat().st_size
|
||||
if actual_archive_size != expected_archive_size:
|
||||
if actual_archive_size != archive_state.file_size:
|
||||
print('Partial/invalid archive size (expected '
|
||||
f'{expected_archive_size} got {actual_archive_size})')
|
||||
f'{archive_state.file_size} got {actual_archive_size})')
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -129,7 +190,7 @@ class ArchiveWithIndicator:
|
||||
print(f'Exception checking archive: {e}')
|
||||
return False
|
||||
|
||||
def tag_ready(self) -> None:
|
||||
def tag_ready(self, error_message='') -> None:
|
||||
"""
|
||||
Tag the archive as ready by creating the corresponding indication file.
|
||||
|
||||
@@ -138,13 +199,34 @@ class ArchiveWithIndicator:
|
||||
If it is violated, an assert will fail.
|
||||
"""
|
||||
assert not self.is_ready()
|
||||
|
||||
# Try the best to make sure everything is synced to the file system,
|
||||
# to avoid any possibility of stamp appearing on a network share prior to
|
||||
# an actual file.
|
||||
if util.get_current_platform() != util.Platform.WINDOWS:
|
||||
os.sync()
|
||||
archive_size = self.archive_filepath.stat().st_size
|
||||
self.ready_indicator_filepath.write_text(str(archive_size))
|
||||
|
||||
archive_size = -1
|
||||
if self.archive_filepath.exists():
|
||||
archive_size = self.archive_filepath.stat().st_size
|
||||
|
||||
archive_info = ArchiveState(
|
||||
file_size=archive_size, error_message=error_message)
|
||||
|
||||
self.ready_indicator_filepath.write_text(
|
||||
archive_info.serialize_to_string())
|
||||
|
||||
def get_state(self) -> ArchiveState:
|
||||
"""
|
||||
Get state object for this archive
|
||||
|
||||
The state is read from the corresponding state file.
|
||||
"""
|
||||
|
||||
try:
|
||||
return ArchiveState.deserialize_from_file(self.ready_indicator_filepath)
|
||||
except ArchiveStateError as error:
|
||||
return ArchiveState(error_message=f'Error in information format: {error}')
|
||||
|
||||
def clean(self) -> None:
|
||||
"""
|
||||
|
@@ -48,6 +48,7 @@ import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import tarfile
|
||||
import uuid
|
||||
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
@@ -57,6 +58,7 @@ import codesign.util as util
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.archive_with_indicator import ArchiveWithIndicator
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -121,21 +123,10 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
# Consider this an input of the code signing server.
|
||||
unsigned_storage_dir: Path
|
||||
|
||||
# Information about archive which contains files which are to be signed.
|
||||
#
|
||||
# This archive is created by the buildbot worked and acts as an input for
|
||||
# the code signing server.
|
||||
unsigned_archive_info: ArchiveWithIndicator
|
||||
|
||||
# Storage where signed files are stored.
|
||||
# Consider this an output of the code signer server.
|
||||
signed_storage_dir: Path
|
||||
|
||||
# Information about archive which contains signed files.
|
||||
#
|
||||
# This archive is created by the code signing server.
|
||||
signed_archive_info: ArchiveWithIndicator
|
||||
|
||||
# Platform the code is currently executing on.
|
||||
platform: util.Platform
|
||||
|
||||
@@ -146,50 +137,44 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
|
||||
# Unsigned (signing server input) configuration.
|
||||
self.unsigned_storage_dir = absolute_shared_storage_dir / 'unsigned'
|
||||
self.unsigned_archive_info = ArchiveWithIndicator(
|
||||
self.unsigned_storage_dir, 'unsigned_files.tar', 'ready.stamp')
|
||||
|
||||
# Signed (signing server output) configuration.
|
||||
self.signed_storage_dir = absolute_shared_storage_dir / 'signed'
|
||||
self.signed_archive_info = ArchiveWithIndicator(
|
||||
self.signed_storage_dir, 'signed_files.tar', 'ready.stamp')
|
||||
|
||||
self.platform = util.get_current_platform()
|
||||
|
||||
"""
|
||||
General note on cleanup environment functions.
|
||||
|
||||
It is expected that there is only one instance of the code signer server
|
||||
running for a given input/output directory, and that it serves a single
|
||||
buildbot worker.
|
||||
By its nature, a buildbot worker only produces one build at a time and
|
||||
never performs concurrent builds.
|
||||
This leads to a conclusion that when starting in a clean environment
|
||||
there shouldn't be any archives remaining from a previous build.
|
||||
|
||||
However, it is possible to have various failure scenarios which might
|
||||
leave the environment in a non-clean state:
|
||||
|
||||
- Network hiccup which makes buildbot worker to stop current build
|
||||
and re-start it after connection to server is re-established.
|
||||
|
||||
Note, this could also happen during buildbot server maintenance.
|
||||
|
||||
- Signing server might get restarted due to updates or other reasons.
|
||||
|
||||
Requiring manual interaction in such cases is not something good to
|
||||
require, so here we simply assume that the system is used the way it is
|
||||
intended to and restore environment to a prestine clean state.
|
||||
"""
|
||||
|
||||
def cleanup_environment_for_builder(self) -> None:
|
||||
self.unsigned_archive_info.clean()
|
||||
self.signed_archive_info.clean()
|
||||
# TODO(sergey): Revisit need of cleaning up the existing files.
|
||||
# In practice it wasn't so helpful, and with multiple clients
|
||||
# talking to the same server it becomes even more tricky.
|
||||
pass
|
||||
|
||||
def cleanup_environment_for_signing_server(self) -> None:
|
||||
# Don't clear the requested to-be-signed archive since we might be
|
||||
# restarting signing machine while the buildbot is busy.
|
||||
self.signed_archive_info.clean()
|
||||
# TODO(sergey): Revisit need of cleaning up the existing files.
|
||||
# In practice it wasn't so helpful, and with multiple clients
|
||||
# talking to the same server it becomes even more tricky.
|
||||
pass
|
||||
|
||||
def generate_request_id(self) -> str:
|
||||
"""
|
||||
Generate an unique identifier for code signing request.
|
||||
"""
|
||||
return str(uuid.uuid4())
|
||||
|
||||
def archive_info_for_request_id(
|
||||
self, path: Path, request_id: str) -> ArchiveWithIndicator:
|
||||
return ArchiveWithIndicator(
|
||||
path, f'{request_id}.tar', f'{request_id}.ready')
|
||||
|
||||
def signed_archive_info_for_request_id(
|
||||
self, request_id: str) -> ArchiveWithIndicator:
|
||||
return self.archive_info_for_request_id(
|
||||
self.signed_storage_dir, request_id);
|
||||
|
||||
def unsigned_archive_info_for_request_id(
|
||||
self, request_id: str) -> ArchiveWithIndicator:
|
||||
return self.archive_info_for_request_id(
|
||||
self.unsigned_storage_dir, request_id);
|
||||
|
||||
############################################################################
|
||||
# Buildbot worker side helpers.
|
||||
@@ -232,23 +217,43 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
if self.check_file_is_to_be_signed(file)]
|
||||
return files_to_be_signed
|
||||
|
||||
def wait_for_signed_archive_or_die(self) -> None:
|
||||
def wait_for_signed_archive_or_die(self, request_id) -> None:
|
||||
"""
|
||||
Wait until archive with signed files is available.
|
||||
|
||||
Will only return if the archive with signed files is available. If there
|
||||
was an error during code sign procedure the SystemExit exception is
|
||||
raised, with the message set to the error reported by the codesign
|
||||
server.
|
||||
|
||||
Will only wait for the configured time. If that time exceeds and there
|
||||
is still no responce from the signing server the application will exit
|
||||
with a non-zero exit code.
|
||||
|
||||
"""
|
||||
|
||||
signed_archive_info = self.signed_archive_info_for_request_id(
|
||||
request_id)
|
||||
unsigned_archive_info = self.unsigned_archive_info_for_request_id(
|
||||
request_id)
|
||||
|
||||
timeout_in_seconds = self.config.TIMEOUT_IN_SECONDS
|
||||
time_start = time.monotonic()
|
||||
while not self.signed_archive_info.is_ready():
|
||||
while not signed_archive_info.is_ready():
|
||||
time.sleep(1)
|
||||
time_slept_in_seconds = time.monotonic() - time_start
|
||||
if time_slept_in_seconds > timeout_in_seconds:
|
||||
self.unsigned_archive_info.clean()
|
||||
signed_archive_info.clean()
|
||||
unsigned_archive_info.clean()
|
||||
raise SystemExit("Signing server didn't finish signing in "
|
||||
f"{timeout_in_seconds} seconds, dying :(")
|
||||
f'{timeout_in_seconds} seconds, dying :(')
|
||||
|
||||
archive_state = signed_archive_info.get_state()
|
||||
if archive_state.has_error():
|
||||
signed_archive_info.clean()
|
||||
unsigned_archive_info.clean()
|
||||
raise SystemExit(
|
||||
f'Error happenned during codesign procedure: {archive_state.error_message}')
|
||||
|
||||
def copy_signed_files_to_directory(
|
||||
self, signed_dir: Path, destination_dir: Path) -> None:
|
||||
@@ -303,13 +308,19 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
return
|
||||
logger_builder.info('Found %d files to sign.', len(files))
|
||||
|
||||
request_id = self.generate_request_id()
|
||||
signed_archive_info = self.signed_archive_info_for_request_id(
|
||||
request_id)
|
||||
unsigned_archive_info = self.unsigned_archive_info_for_request_id(
|
||||
request_id)
|
||||
|
||||
pack_files(files=files,
|
||||
archive_filepath=self.unsigned_archive_info.archive_filepath)
|
||||
self.unsigned_archive_info.tag_ready()
|
||||
archive_filepath=unsigned_archive_info.archive_filepath)
|
||||
unsigned_archive_info.tag_ready()
|
||||
|
||||
# Wait for the signing server to finish signing.
|
||||
logger_builder.info('Waiting signing server to sign the files...')
|
||||
self.wait_for_signed_archive_or_die()
|
||||
self.wait_for_signed_archive_or_die(request_id)
|
||||
|
||||
# Extract signed files from archive and move files to final location.
|
||||
with TemporaryDirectory(prefix='blender-buildbot-') as temp_dir_str:
|
||||
@@ -317,7 +328,7 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
|
||||
logger_builder.info('Extracting signed files from archive...')
|
||||
extract_files(
|
||||
archive_filepath=self.signed_archive_info.archive_filepath,
|
||||
archive_filepath=signed_archive_info.archive_filepath,
|
||||
extraction_dir=unpacked_signed_files_dir)
|
||||
|
||||
destination_dir = path
|
||||
@@ -327,19 +338,39 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
unpacked_signed_files_dir, destination_dir)
|
||||
|
||||
logger_builder.info('Removing archive with signed files...')
|
||||
self.signed_archive_info.clean()
|
||||
signed_archive_info.clean()
|
||||
|
||||
############################################################################
|
||||
# Signing server side helpers.
|
||||
|
||||
def wait_for_sign_request(self) -> None:
|
||||
def wait_for_sign_request(self) -> str:
|
||||
"""
|
||||
Wait for the buildbot to request signing of an archive.
|
||||
|
||||
Returns an identifier of signing request.
|
||||
"""
|
||||
|
||||
# TOOD(sergey): Support graceful shutdown on Ctrl-C.
|
||||
while not self.unsigned_archive_info.is_ready():
|
||||
|
||||
logger_server.info(
|
||||
'Waiting for a READY indicator of any signign request.')
|
||||
request_id = None
|
||||
while request_id is None:
|
||||
for file in self.unsigned_storage_dir.iterdir():
|
||||
if file.suffix != '.ready':
|
||||
continue
|
||||
request_id = file.stem
|
||||
logger_server.info(f'Found READY for request ID {request_id}.')
|
||||
if request_id is None:
|
||||
time.sleep(1)
|
||||
|
||||
unsigned_archive_info = self.unsigned_archive_info_for_request_id(
|
||||
request_id)
|
||||
while not unsigned_archive_info.is_ready():
|
||||
time.sleep(1)
|
||||
|
||||
return request_id
|
||||
|
||||
@abc.abstractmethod
|
||||
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
"""
|
||||
@@ -348,7 +379,7 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
NOTE: Signing should happen in-place.
|
||||
"""
|
||||
|
||||
def run_signing_pipeline(self):
|
||||
def run_signing_pipeline(self, request_id: str):
|
||||
"""
|
||||
Run the full signing pipeline starting from the point when buildbot
|
||||
worker have requested signing.
|
||||
@@ -360,9 +391,14 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
with TemporaryDirectory(prefix='blender-codesign-') as temp_dir_str:
|
||||
temp_dir = Path(temp_dir_str)
|
||||
|
||||
signed_archive_info = self.signed_archive_info_for_request_id(
|
||||
request_id)
|
||||
unsigned_archive_info = self.unsigned_archive_info_for_request_id(
|
||||
request_id)
|
||||
|
||||
logger_server.info('Extracting unsigned files from archive...')
|
||||
extract_files(
|
||||
archive_filepath=self.unsigned_archive_info.archive_filepath,
|
||||
archive_filepath=unsigned_archive_info.archive_filepath,
|
||||
extraction_dir=temp_dir)
|
||||
|
||||
logger_server.info('Collecting all files which needs signing...')
|
||||
@@ -370,15 +406,21 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
temp_dir)
|
||||
|
||||
logger_server.info('Signing all requested files...')
|
||||
self.sign_all_files(files)
|
||||
try:
|
||||
self.sign_all_files(files)
|
||||
except CodeSignException as error:
|
||||
signed_archive_info.tag_ready(error_message=error.message)
|
||||
unsigned_archive_info.clean()
|
||||
logger_server.info('Signing is complete with errors.')
|
||||
return
|
||||
|
||||
logger_server.info('Packing signed files...')
|
||||
pack_files(files=files,
|
||||
archive_filepath=self.signed_archive_info.archive_filepath)
|
||||
self.signed_archive_info.tag_ready()
|
||||
archive_filepath=signed_archive_info.archive_filepath)
|
||||
signed_archive_info.tag_ready()
|
||||
|
||||
logger_server.info('Removing signing request...')
|
||||
self.unsigned_archive_info.clean()
|
||||
unsigned_archive_info.clean()
|
||||
|
||||
logger_server.info('Signing is complete.')
|
||||
|
||||
@@ -389,11 +431,11 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
while True:
|
||||
logger_server.info('Waiting for the signing request in %s...',
|
||||
self.unsigned_storage_dir)
|
||||
self.wait_for_sign_request()
|
||||
request_id = self.wait_for_sign_request()
|
||||
|
||||
logger_server.info(
|
||||
'Got signing request, beging signign procedure.')
|
||||
self.run_signing_pipeline()
|
||||
f'Beging signign procedure for request ID {request_id}.')
|
||||
self.run_signing_pipeline(request_id)
|
||||
|
||||
############################################################################
|
||||
# Command executing.
|
||||
|
@@ -18,20 +18,9 @@
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# Runs on buildbot slave, rsync zip directly to buildbot server rather
|
||||
# than using upload which is much slower
|
||||
class CodeSignException(Exception):
|
||||
message: str
|
||||
|
||||
import buildbot_utils
|
||||
import os
|
||||
import sys
|
||||
|
||||
if __name__ == "__main__":
|
||||
builder = buildbot_utils.create_builder_from_arguments()
|
||||
|
||||
# rsync, this assumes ssh keys are setup so no password is needed
|
||||
local_zip = "buildbot_upload.zip"
|
||||
remote_folder = "builder.blender.org:/data/buildbot-master/uploaded/"
|
||||
remote_zip = remote_folder + "buildbot_upload_" + builder.name + ".zip"
|
||||
|
||||
command = ["rsync", "-avz", local_zip, remote_zip]
|
||||
buildbot_utils.call(command)
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
@@ -33,6 +33,7 @@ from buildbot_utils import Builder
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.base_code_signer import BaseCodeSigner
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_server = logger.getChild('server')
|
||||
@@ -45,6 +46,10 @@ EXTENSIONS_TO_BE_SIGNED = {'.dylib', '.so', '.dmg'}
|
||||
NAME_PREFIXES_TO_BE_SIGNED = {'python'}
|
||||
|
||||
|
||||
class NotarizationException(CodeSignException):
|
||||
pass
|
||||
|
||||
|
||||
def is_file_from_bundle(file: AbsoluteAndRelativeFileName) -> bool:
|
||||
"""
|
||||
Check whether file is coming from an .app bundle
|
||||
@@ -186,7 +191,7 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
file.absolute_filepath]
|
||||
self.run_command_or_mock(command, util.Platform.MACOS)
|
||||
|
||||
def codesign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> bool:
|
||||
def codesign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
"""
|
||||
Run codesign tool on all eligible files in the given list.
|
||||
|
||||
@@ -225,8 +230,6 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
file_index + 1, num_signed_files,
|
||||
signed_file.relative_filepath)
|
||||
|
||||
return True
|
||||
|
||||
def codesign_bundles(
|
||||
self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
"""
|
||||
@@ -273,8 +276,6 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
|
||||
files.extend(extra_files)
|
||||
|
||||
return True
|
||||
|
||||
############################################################################
|
||||
# Notarization.
|
||||
|
||||
@@ -334,7 +335,40 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
logger_server.error('xcrun command did not report RequestUUID')
|
||||
return None
|
||||
|
||||
def notarize_wait_result(self, request_uuid: str) -> bool:
|
||||
def notarize_review_status(self, xcrun_output: str) -> bool:
|
||||
"""
|
||||
Review status returned by xcrun's notarization info
|
||||
|
||||
Returns truth if the notarization process has finished.
|
||||
If there are errors during notarization, a NotarizationException()
|
||||
exception is thrown with status message from the notarial office.
|
||||
"""
|
||||
|
||||
# Parse status and message
|
||||
status = xcrun_field_value_from_output('Status', xcrun_output)
|
||||
status_message = xcrun_field_value_from_output(
|
||||
'Status Message', xcrun_output)
|
||||
|
||||
if status == 'success':
|
||||
logger_server.info(
|
||||
'Package successfully notarized: %s', status_message)
|
||||
return True
|
||||
|
||||
if status == 'invalid':
|
||||
logger_server.error(xcrun_output)
|
||||
logger_server.error(
|
||||
'Package notarization has failed: %s', status_message)
|
||||
raise NotarizationException(status_message)
|
||||
|
||||
if status == 'in progress':
|
||||
return False
|
||||
|
||||
logger_server.info(
|
||||
'Unknown notarization status %s (%s)', status, status_message)
|
||||
|
||||
return False
|
||||
|
||||
def notarize_wait_result(self, request_uuid: str) -> None:
|
||||
"""
|
||||
Wait for until notarial office have a reply
|
||||
"""
|
||||
@@ -351,29 +385,11 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
timeout_in_seconds = self.config.MACOS_NOTARIZE_TIMEOUT_IN_SECONDS
|
||||
|
||||
while True:
|
||||
output = self.check_output_or_mock(
|
||||
xcrun_output = self.check_output_or_mock(
|
||||
command, util.Platform.MACOS, allow_nonzero_exit_code=True)
|
||||
# Parse status and message
|
||||
status = xcrun_field_value_from_output('Status', output)
|
||||
status_message = xcrun_field_value_from_output(
|
||||
'Status Message', output)
|
||||
|
||||
# Review status.
|
||||
if status:
|
||||
if status == 'success':
|
||||
logger_server.info(
|
||||
'Package successfully notarized: %s', status_message)
|
||||
return True
|
||||
elif status == 'invalid':
|
||||
logger_server.error(output)
|
||||
logger_server.error(
|
||||
'Package notarization has failed: %s', status_message)
|
||||
return False
|
||||
elif status == 'in progress':
|
||||
pass
|
||||
else:
|
||||
logger_server.info(
|
||||
'Unknown notarization status %s (%s)', status, status_message)
|
||||
if self.notarize_review_status(xcrun_output):
|
||||
break
|
||||
|
||||
logger_server.info('Keep waiting for notarization office.')
|
||||
time.sleep(30)
|
||||
@@ -394,8 +410,6 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
command = ['xcrun', 'stapler', 'staple', '-v', file.absolute_filepath]
|
||||
self.check_output_or_mock(command, util.Platform.MACOS)
|
||||
|
||||
return True
|
||||
|
||||
def notarize_dmg(self, file: AbsoluteAndRelativeFileName) -> bool:
|
||||
"""
|
||||
Run entire pipeline to get DMG notarized.
|
||||
@@ -414,10 +428,7 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
return False
|
||||
|
||||
# Staple.
|
||||
if not self.notarize_staple(file):
|
||||
return False
|
||||
|
||||
return True
|
||||
self.notarize_staple(file)
|
||||
|
||||
def notarize_all_dmg(
|
||||
self, files: List[AbsoluteAndRelativeFileName]) -> bool:
|
||||
@@ -432,10 +443,7 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
if not self.check_file_is_to_be_signed(file):
|
||||
continue
|
||||
|
||||
if not self.notarize_dmg(file):
|
||||
return False
|
||||
|
||||
return True
|
||||
self.notarize_dmg(file)
|
||||
|
||||
############################################################################
|
||||
# Entry point.
|
||||
@@ -443,11 +451,6 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
# TODO(sergey): Handle errors somehow.
|
||||
|
||||
if not self.codesign_all_files(files):
|
||||
return
|
||||
|
||||
if not self.codesign_bundles(files):
|
||||
return
|
||||
|
||||
if not self.notarize_all_dmg(files):
|
||||
return
|
||||
self.codesign_all_files(files)
|
||||
self.codesign_bundles(files)
|
||||
self.notarize_all_dmg(files)
|
||||
|
@@ -29,6 +29,7 @@ from buildbot_utils import Builder
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.base_code_signer import BaseCodeSigner
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_server = logger.getChild('server')
|
||||
@@ -40,6 +41,9 @@ BLACKLIST_FILE_PREFIXES = (
|
||||
'api-ms-', 'concrt', 'msvcp', 'ucrtbase', 'vcomp', 'vcruntime')
|
||||
|
||||
|
||||
class SigntoolException(CodeSignException):
|
||||
pass
|
||||
|
||||
class WindowsCodeSigner(BaseCodeSigner):
|
||||
def check_file_is_to_be_signed(
|
||||
self, file: AbsoluteAndRelativeFileName) -> bool:
|
||||
@@ -50,12 +54,41 @@ class WindowsCodeSigner(BaseCodeSigner):
|
||||
|
||||
return file.relative_filepath.suffix in EXTENSIONS_TO_BE_SIGNED
|
||||
|
||||
|
||||
def get_sign_command_prefix(self) -> List[str]:
|
||||
return [
|
||||
'signtool', 'sign', '/v',
|
||||
'/f', self.config.WIN_CERTIFICATE_FILEPATH,
|
||||
'/tr', self.config.WIN_TIMESTAMP_AUTHORITY_URL]
|
||||
|
||||
|
||||
def run_codesign_tool(self, filepath: Path) -> None:
|
||||
command = self.get_sign_command_prefix() + [filepath]
|
||||
codesign_output = self.check_output_or_mock(command, util.Platform.WINDOWS)
|
||||
logger_server.info(f'signtool output:\n{codesign_output}')
|
||||
|
||||
got_number_of_success = False
|
||||
|
||||
for line in codesign_output.split('\n'):
|
||||
line_clean = line.strip()
|
||||
line_clean_lower = line_clean.lower()
|
||||
|
||||
if line_clean_lower.startswith('number of warnings') or \
|
||||
line_clean_lower.startswith('number of errors'):
|
||||
number = int(line_clean_lower.split(':')[1])
|
||||
if number != 0:
|
||||
raise SigntoolException('Non-clean success of signtool')
|
||||
|
||||
if line_clean_lower.startswith('number of files successfully signed'):
|
||||
got_number_of_success = True
|
||||
number = int(line_clean_lower.split(':')[1])
|
||||
if number != 1:
|
||||
raise SigntoolException('Signtool did not consider codesign a success')
|
||||
|
||||
if not got_number_of_success:
|
||||
raise SigntoolException('Signtool did not report number of files signed')
|
||||
|
||||
|
||||
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
# NOTE: Sign files one by one to avoid possible command line length
|
||||
# overflow (which could happen if we ever decide to sign every binary
|
||||
@@ -73,12 +106,7 @@ class WindowsCodeSigner(BaseCodeSigner):
|
||||
file_index + 1, num_files, file.relative_filepath)
|
||||
continue
|
||||
|
||||
command = self.get_sign_command_prefix()
|
||||
command.append(file.absolute_filepath)
|
||||
logger_server.info(
|
||||
'Running signtool command for file [%d/%d] %s...',
|
||||
file_index + 1, num_files, file.relative_filepath)
|
||||
# TODO(sergey): Check the status somehow. With a missing certificate
|
||||
# the command still exists with a zero code.
|
||||
self.run_command_or_mock(command, util.Platform.WINDOWS)
|
||||
# TODO(sergey): Report number of signed and ignored files.
|
||||
self.run_codesign_tool(file.absolute_filepath)
|
||||
|
@@ -30,7 +30,7 @@ from tempfile import TemporaryDirectory, NamedTemporaryFile
|
||||
from typing import List
|
||||
|
||||
BUILDBOT_DIRECTORY = Path(__file__).absolute().parent
|
||||
CODESIGN_SCRIPT = BUILDBOT_DIRECTORY / 'slave_codesign.py'
|
||||
CODESIGN_SCRIPT = BUILDBOT_DIRECTORY / 'worker_codesign.py'
|
||||
BLENDER_GIT_ROOT_DIRECTORY = BUILDBOT_DIRECTORY.parent.parent
|
||||
DARWIN_DIRECTORY = BLENDER_GIT_ROOT_DIRECTORY / 'release' / 'darwin'
|
||||
|
||||
@@ -82,6 +82,10 @@ def create_argument_parser():
|
||||
type=Path,
|
||||
help="Optional path to applescript to set up folder looks of DMG."
|
||||
"If not provided default Blender's one is used.")
|
||||
parser.add_argument(
|
||||
'--codesign',
|
||||
action="store_true",
|
||||
help="Code sign and notarize DMG contents.")
|
||||
return parser
|
||||
|
||||
|
||||
@@ -395,7 +399,8 @@ def create_final_dmg(app_bundles: List[Path],
|
||||
dmg_filepath: Path,
|
||||
background_image_filepath: Path,
|
||||
volume_name: str,
|
||||
applescript: Path) -> None:
|
||||
applescript: Path,
|
||||
codesign: bool) -> None:
|
||||
"""
|
||||
Create DMG with all app bundles
|
||||
|
||||
@@ -421,7 +426,8 @@ def create_final_dmg(app_bundles: List[Path],
|
||||
#
|
||||
# This allows to recurs into the content of bundles without worrying about
|
||||
# possible interfereice of Application symlink.
|
||||
codesign_app_bundles_in_dmg(mount_directory)
|
||||
if codesign:
|
||||
codesign_app_bundles_in_dmg(mount_directory)
|
||||
|
||||
copy_background_if_needed(background_image_filepath, mount_directory)
|
||||
create_applications_link(mount_directory)
|
||||
@@ -434,7 +440,8 @@ def create_final_dmg(app_bundles: List[Path],
|
||||
compress_dmg(writable_dmg_filepath, dmg_filepath)
|
||||
writable_dmg_filepath.unlink()
|
||||
|
||||
codesign_and_notarize_dmg(dmg_filepath)
|
||||
if codesign:
|
||||
codesign_and_notarize_dmg(dmg_filepath)
|
||||
|
||||
|
||||
def ensure_dmg_extension(filepath: Path) -> Path:
|
||||
@@ -521,6 +528,7 @@ def main():
|
||||
source_dir = args.source_dir.absolute()
|
||||
background_image_filepath = get_background_image(args.background_image)
|
||||
applescript = get_applescript(args.applescript)
|
||||
codesign = args.codesign
|
||||
|
||||
app_bundles = collect_and_log_app_bundles(source_dir)
|
||||
if not app_bundles:
|
||||
@@ -535,7 +543,8 @@ def main():
|
||||
dmg_filepath,
|
||||
background_image_filepath,
|
||||
volume_name,
|
||||
applescript)
|
||||
applescript,
|
||||
codesign)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@@ -33,7 +33,7 @@ else()
|
||||
endif()
|
||||
|
||||
execute_process(
|
||||
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/slave_codesign.py"
|
||||
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/worker_codesign.py"
|
||||
"${CMAKE_INSTALL_PREFIX}"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
|
||||
RESULT_VARIABLE exit_code
|
@@ -24,8 +24,8 @@ import shutil
|
||||
import buildbot_utils
|
||||
|
||||
def get_cmake_options(builder):
|
||||
post_install_script = os.path.join(
|
||||
builder.blender_dir, 'build_files', 'buildbot', 'slave_codesign.cmake')
|
||||
codesign_script = os.path.join(
|
||||
builder.blender_dir, 'build_files', 'buildbot', 'worker_codesign.cmake')
|
||||
|
||||
config_file = "build_files/cmake/config/blender_release.cmake"
|
||||
options = ['-DCMAKE_BUILD_TYPE:STRING=Release',
|
||||
@@ -36,13 +36,26 @@ def get_cmake_options(builder):
|
||||
options.append('-DCMAKE_OSX_DEPLOYMENT_TARGET=10.9')
|
||||
elif builder.platform == 'win':
|
||||
options.extend(['-G', 'Visual Studio 15 2017 Win64'])
|
||||
options.extend(['-DPOSTINSTALL_SCRIPT:PATH=' + post_install_script])
|
||||
if builder.codesign:
|
||||
options.extend(['-DPOSTINSTALL_SCRIPT:PATH=' + codesign_script])
|
||||
elif builder.platform == 'linux':
|
||||
config_file = "build_files/buildbot/config/blender_linux.cmake"
|
||||
|
||||
optix_sdk_dir = os.path.join(builder.blender_dir, '..', '..', 'NVIDIA-Optix-SDK')
|
||||
options.append('-DOPTIX_ROOT_DIR:PATH=' + optix_sdk_dir)
|
||||
|
||||
# Workaround to build sm_30 kernels with CUDA 10, since CUDA 11 no longer supports that architecture
|
||||
if builder.platform == 'win':
|
||||
options.append('-DCUDA10_TOOLKIT_ROOT_DIR:PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1')
|
||||
options.append('-DCUDA10_NVCC_EXECUTABLE:FILEPATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1/bin/nvcc.exe')
|
||||
options.append('-DCUDA11_TOOLKIT_ROOT_DIR:PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1')
|
||||
options.append('-DCUDA11_NVCC_EXECUTABLE:FILEPATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1/bin/nvcc.exe')
|
||||
elif builder.platform == 'linux':
|
||||
options.append('-DCUDA10_TOOLKIT_ROOT_DIR:PATH=/usr/local/cuda-10.1')
|
||||
options.append('-DCUDA10_NVCC_EXECUTABLE:FILEPATH=/usr/local/cuda-10.1/bin/nvcc')
|
||||
options.append('-DCUDA11_TOOLKIT_ROOT_DIR:PATH=/usr/local/cuda-11.1')
|
||||
options.append('-DCUDA11_NVCC_EXECUTABLE:FILEPATH=/usr/local/cuda-11.1/bin/nvcc')
|
||||
|
||||
options.append("-C" + os.path.join(builder.blender_dir, config_file))
|
||||
options.append("-DCMAKE_INSTALL_PREFIX=%s" % (builder.install_dir))
|
||||
|
@@ -18,7 +18,7 @@
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# Runs on buildbot slave, creating a release package using the build
|
||||
# Runs on buildbot worker, creating a release package using the build
|
||||
# system and zipping it into buildbot_upload.zip. This is then uploaded
|
||||
# to the master in the next buildbot step.
|
||||
|
||||
@@ -110,13 +110,15 @@ def pack_mac(builder):
|
||||
|
||||
release_dir = os.path.join(builder.blender_dir, 'release', 'darwin')
|
||||
buildbot_dir = os.path.join(builder.blender_dir, 'build_files', 'buildbot')
|
||||
bundle_script = os.path.join(buildbot_dir, 'slave_bundle_dmg.py')
|
||||
bundle_script = os.path.join(buildbot_dir, 'worker_bundle_dmg.py')
|
||||
|
||||
command = [bundle_script]
|
||||
command += ['--dmg', package_filepath]
|
||||
if info.is_development_build:
|
||||
background_image = os.path.join(release_dir, 'buildbot', 'background.tif')
|
||||
command += ['--background-image', background_image]
|
||||
if builder.codesign:
|
||||
command += ['--codesign']
|
||||
command += [builder.install_dir]
|
||||
buildbot_utils.call(command)
|
||||
|
||||
@@ -150,7 +152,8 @@ def pack_win(builder):
|
||||
|
||||
package_filename = package_name + '.msi'
|
||||
package_filepath = os.path.join(builder.build_dir, package_filename)
|
||||
sign_file_or_directory(package_filepath)
|
||||
if builder.codesign:
|
||||
sign_file_or_directory(package_filepath)
|
||||
|
||||
package_files += [(package_filepath, package_filename)]
|
||||
|
@@ -52,7 +52,7 @@ set(WITH_USD ON CACHE BOOL "" FORCE)
|
||||
set(WITH_MEM_JEMALLOC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
|
||||
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61;sm_70;sm_75 CACHE STRING "" FORCE)
|
||||
set(CYCLES_CUDA_BINARIES_ARCH sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61;sm_70;sm_75;sm_86;compute_75 CACHE STRING "" FORCE)
|
||||
set(WITH_CYCLES_DEVICE_OPTIX ON CACHE BOOL "" FORCE)
|
||||
|
||||
# platform dependent options
|
||||
|
@@ -186,7 +186,7 @@ if(SYSTEMSTUBS_LIBRARY)
|
||||
list(APPEND PLATFORM_LINKLIBS SystemStubs)
|
||||
endif()
|
||||
|
||||
set(PLATFORM_CFLAGS "-pipe -funsigned-char")
|
||||
set(PLATFORM_CFLAGS "-pipe -funsigned-char -fno-strict-aliasing")
|
||||
set(PLATFORM_LINKFLAGS
|
||||
"-fexceptions -framework CoreServices -framework Foundation -framework IOKit -framework AppKit -framework Cocoa -framework Carbon -framework AudioUnit -framework AudioToolbox -framework CoreAudio -framework Metal -framework QuartzCore"
|
||||
)
|
||||
@@ -369,8 +369,9 @@ if(WITH_CYCLES_OSL)
|
||||
list(APPEND OSL_LIBRARIES ${OSL_LIB_COMP} -force_load ${OSL_LIB_EXEC} ${OSL_LIB_QUERY})
|
||||
find_path(OSL_INCLUDE_DIR OSL/oslclosure.h PATHS ${CYCLES_OSL}/include)
|
||||
find_program(OSL_COMPILER NAMES oslc PATHS ${CYCLES_OSL}/bin)
|
||||
find_path(OSL_SHADER_DIR NAMES stdosl.h PATHS ${CYCLES_OSL}/shaders)
|
||||
|
||||
if(OSL_INCLUDE_DIR AND OSL_LIBRARIES AND OSL_COMPILER)
|
||||
if(OSL_INCLUDE_DIR AND OSL_LIBRARIES AND OSL_COMPILER AND OSL_SHADER_DIR)
|
||||
set(OSL_FOUND TRUE)
|
||||
else()
|
||||
message(STATUS "OSL not found")
|
||||
@@ -429,8 +430,8 @@ endif()
|
||||
|
||||
set(EXETYPE MACOSX_BUNDLE)
|
||||
|
||||
set(CMAKE_C_FLAGS_DEBUG "-fno-strict-aliasing -g")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-fno-strict-aliasing -g")
|
||||
set(CMAKE_C_FLAGS_DEBUG "-g")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-g")
|
||||
if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" OR CMAKE_OSX_ARCHITECTURES MATCHES "i386")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -mdynamic-no-pic -msse -msse2 -msse3 -mssse3")
|
||||
set(CMAKE_C_FLAGS_RELEASE "-O2 -mdynamic-no-pic -msse -msse2 -msse3 -mssse3")
|
||||
@@ -439,8 +440,8 @@ if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" OR CMAKE_OSX_ARCHITECTURES MATCHES "
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -ftree-vectorize -fvariable-expansion-in-unroller")
|
||||
endif()
|
||||
else()
|
||||
set(CMAKE_C_FLAGS_RELEASE "-mdynamic-no-pic -fno-strict-aliasing")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-mdynamic-no-pic -fno-strict-aliasing")
|
||||
set(CMAKE_C_FLAGS_RELEASE "-mdynamic-no-pic")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-mdynamic-no-pic")
|
||||
endif()
|
||||
|
||||
if(${XCODE_VERSION} VERSION_EQUAL 5 OR ${XCODE_VERSION} VERSION_GREATER 5)
|
||||
|
@@ -134,14 +134,7 @@ endif()
|
||||
unset(OSX_SDKROOT)
|
||||
|
||||
# 10.11 is our min. target, if you use higher sdk, weak linking happens
|
||||
if(CMAKE_OSX_DEPLOYMENT_TARGET)
|
||||
if(${CMAKE_OSX_DEPLOYMENT_TARGET} VERSION_LESS 10.11)
|
||||
message(STATUS "Setting deployment target to 10.11, lower versions are not supported")
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.11" CACHE STRING "" FORCE)
|
||||
endif()
|
||||
else()
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.11" CACHE STRING "" FORCE)
|
||||
endif()
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.11" CACHE STRING "" FORCE)
|
||||
|
||||
if(NOT ${CMAKE_GENERATOR} MATCHES "Xcode")
|
||||
# Force CMAKE_OSX_DEPLOYMENT_TARGET for makefiles, will not work else (CMake bug?)
|
||||
|
@@ -7,6 +7,15 @@ if(WITH_WINDOWS_BUNDLE_CRT)
|
||||
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
|
||||
include(InstallRequiredSystemLibraries)
|
||||
|
||||
# ucrtbase(d).dll cannot be in the manifest, due to the way windows 10 handles
|
||||
# redirects for this dll, for details see T88813.
|
||||
foreach(lib ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS})
|
||||
string(FIND ${lib} "ucrtbase" pos)
|
||||
if(NOT pos EQUAL -1)
|
||||
list(REMOVE_ITEM CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS ${lib})
|
||||
install(FILES ${lib} DESTINATION . COMPONENT Libraries)
|
||||
endif()
|
||||
endforeach()
|
||||
# Install the CRT to the blender.crt Sub folder.
|
||||
install(FILES ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS} DESTINATION ./blender.crt COMPONENT Libraries)
|
||||
|
||||
|
70
build_files/config/pipeline_config.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
#
|
||||
# Do NOT merge to master
|
||||
#
|
||||
|
||||
#
|
||||
# Used by Buildbot build pipeline make_update.py script only for now
|
||||
# We intended to udpate the make_update.py in the branches to use this file eventually
|
||||
#
|
||||
update-code:
|
||||
git:
|
||||
submodules:
|
||||
- branch: blender-v2.83-release
|
||||
commit_id: HEAD
|
||||
path: release/scripts/addons
|
||||
- branch: blender-v2.83-release
|
||||
commit_id: HEAD
|
||||
path: release/scripts/addons_contrib
|
||||
- branch: blender-v2.83-release
|
||||
commit_id: HEAD
|
||||
path: release/datafiles/locale
|
||||
- branch: blender-v2.83-release
|
||||
commit_id: HEAD
|
||||
path: source/tools
|
||||
svn:
|
||||
libraries:
|
||||
darwin-arm64:
|
||||
branch: tags/blender-2.83-release
|
||||
commit_id: HEAD
|
||||
path: lib/darwin_arm64
|
||||
darwin-x86_64:
|
||||
branch: tags/blender-2.83-release
|
||||
commit_id: HEAD
|
||||
path: lib/darwin
|
||||
linux-x86_64:
|
||||
branch: tags/blender-2.83-release
|
||||
commit_id: HEAD
|
||||
path: lib/linux_centos7_x86_64
|
||||
windows-amd64:
|
||||
branch: tags/blender-2.83-release
|
||||
commit_id: HEAD
|
||||
path: lib/win64_vc15
|
||||
tests:
|
||||
branch: tags/blender-2.83-release
|
||||
commit_id: HEAD
|
||||
path: lib/tests
|
||||
|
||||
#
|
||||
# Buildbot only configs
|
||||
#
|
||||
buildbot:
|
||||
gcc:
|
||||
version: '9.0.0'
|
||||
cuda10:
|
||||
version: '10.1.243'
|
||||
cuda11:
|
||||
version: '11.4.1'
|
||||
optix:
|
||||
version: '7.0.0'
|
||||
cmake:
|
||||
default:
|
||||
overrides: {}
|
||||
version: any
|
||||
darwin-arm64:
|
||||
overrides: {}
|
||||
darwin-x86_64:
|
||||
overrides: {}
|
||||
linux-x86_64:
|
||||
overrides: {}
|
||||
windows-amd64:
|
||||
overrides: {}
|
@@ -40,7 +40,8 @@ if make_utils.command_missing(git_command):
|
||||
|
||||
# Test if we are building a specific release version.
|
||||
branch = make_utils.git_branch(git_command)
|
||||
release_version = make_utils.git_branch_release_version(branch)
|
||||
tag = make_utils.git_tag(git_command)
|
||||
release_version = make_utils.git_branch_release_version(branch, tag)
|
||||
lib_tests_dirpath = os.path.join('..', 'lib', "tests")
|
||||
|
||||
if not os.path.exists(lib_tests_dirpath):
|
||||
|
@@ -197,7 +197,8 @@ if __name__ == "__main__":
|
||||
|
||||
# Test if we are building a specific release version.
|
||||
branch = make_utils.git_branch(args.git_command)
|
||||
release_version = make_utils.git_branch_release_version(branch)
|
||||
tag = make_utils.git_tag(args.git_command)
|
||||
release_version = make_utils.git_branch_release_version(branch, tag)
|
||||
|
||||
if not args.no_libraries:
|
||||
svn_update(args, release_version)
|
||||
|
@@ -36,7 +36,7 @@ def check_output(cmd, exit_on_error=True):
|
||||
return output.strip()
|
||||
|
||||
def git_branch(git_command):
|
||||
# Test if we are building a specific release version.
|
||||
# Get current branch name.
|
||||
try:
|
||||
branch = subprocess.check_output([git_command, "rev-parse", "--abbrev-ref", "HEAD"])
|
||||
except subprocess.CalledProcessError as e:
|
||||
@@ -45,10 +45,23 @@ def git_branch(git_command):
|
||||
|
||||
return branch.strip().decode('utf8')
|
||||
|
||||
def git_branch_release_version(branch):
|
||||
def git_tag(git_command):
|
||||
# Get current tag name.
|
||||
try:
|
||||
tag = subprocess.check_output([git_command, "describe", "--exact-match"], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
return None
|
||||
|
||||
return tag.strip().decode('utf8')
|
||||
|
||||
def git_branch_release_version(branch, tag):
|
||||
release_version = re.search("^blender-v(.*)-release$", branch)
|
||||
if release_version:
|
||||
release_version = release_version.group(1)
|
||||
elif tag:
|
||||
release_version = re.search("^v([0-9]*\.[0-9]*).*", tag)
|
||||
if release_version:
|
||||
release_version = release_version.group(1)
|
||||
return release_version
|
||||
|
||||
def svn_libraries_base_url(release_version):
|
||||
|
42
extern/audaspace/plugins/sdl/SDLDevice.cpp
vendored
@@ -52,7 +52,7 @@ SDLDevice::SDLDevice(DeviceSpecs specs, int buffersize) :
|
||||
if(specs.channels == CHANNELS_INVALID)
|
||||
specs.channels = CHANNELS_STEREO;
|
||||
if(specs.format == FORMAT_INVALID)
|
||||
specs.format = FORMAT_S16;
|
||||
specs.format = FORMAT_FLOAT32;
|
||||
if(specs.rate == RATE_INVALID)
|
||||
specs.rate = RATE_48000;
|
||||
|
||||
@@ -61,10 +61,25 @@ SDLDevice::SDLDevice(DeviceSpecs specs, int buffersize) :
|
||||
SDL_AudioSpec format, obtained;
|
||||
|
||||
format.freq = m_specs.rate;
|
||||
if(m_specs.format == FORMAT_U8)
|
||||
switch(m_specs.format)
|
||||
{
|
||||
case FORMAT_U8:
|
||||
format.format = AUDIO_U8;
|
||||
else
|
||||
break;
|
||||
case FORMAT_S16:
|
||||
format.format = AUDIO_S16SYS;
|
||||
break;
|
||||
case FORMAT_S32:
|
||||
format.format = AUDIO_S32SYS;
|
||||
break;
|
||||
case FORMAT_FLOAT32:
|
||||
format.format = AUDIO_F32SYS;
|
||||
break;
|
||||
default:
|
||||
format.format = AUDIO_F32SYS;
|
||||
break;
|
||||
}
|
||||
|
||||
format.channels = m_specs.channels;
|
||||
format.samples = buffersize;
|
||||
format.callback = SDLDevice::SDL_mix;
|
||||
@@ -75,14 +90,25 @@ SDLDevice::SDLDevice(DeviceSpecs specs, int buffersize) :
|
||||
|
||||
m_specs.rate = (SampleRate)obtained.freq;
|
||||
m_specs.channels = (Channels)obtained.channels;
|
||||
if(obtained.format == AUDIO_U8)
|
||||
m_specs.format = FORMAT_U8;
|
||||
else if(obtained.format == AUDIO_S16LSB || obtained.format == AUDIO_S16MSB)
|
||||
m_specs.format = FORMAT_S16;
|
||||
else
|
||||
|
||||
switch(obtained.format)
|
||||
{
|
||||
case AUDIO_U8:
|
||||
m_specs.format = FORMAT_U8;
|
||||
break;
|
||||
case AUDIO_S16SYS:
|
||||
m_specs.format = FORMAT_S16;
|
||||
break;
|
||||
case AUDIO_S32SYS:
|
||||
m_specs.format = FORMAT_S32;
|
||||
break;
|
||||
case AUDIO_F32SYS:
|
||||
m_specs.format = FORMAT_FLOAT32;
|
||||
break;
|
||||
default:
|
||||
SDL_CloseAudio();
|
||||
AUD_THROW(DeviceException, "The sample format obtained from SDL is not supported.");
|
||||
break;
|
||||
}
|
||||
|
||||
create();
|
||||
|
1
extern/cuew/include/cuew.h
vendored
@@ -609,6 +609,7 @@ typedef enum cudaError_enum {
|
||||
CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = 219,
|
||||
CUDA_ERROR_NVLINK_UNCORRECTABLE = 220,
|
||||
CUDA_ERROR_JIT_COMPILER_NOT_FOUND = 221,
|
||||
CUDA_ERROR_UNSUPPORTED_PTX_VERSION = 222,
|
||||
CUDA_ERROR_INVALID_SOURCE = 300,
|
||||
CUDA_ERROR_FILE_NOT_FOUND = 301,
|
||||
CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = 302,
|
||||
|
1
extern/cuew/src/cuew.c
vendored
@@ -736,6 +736,7 @@ const char *cuewErrorString(CUresult result) {
|
||||
case CUDA_ERROR_INVALID_GRAPHICS_CONTEXT: return "Invalid graphics context";
|
||||
case CUDA_ERROR_NVLINK_UNCORRECTABLE: return "Nvlink uncorrectable";
|
||||
case CUDA_ERROR_JIT_COMPILER_NOT_FOUND: return "Jit compiler not found";
|
||||
case CUDA_ERROR_UNSUPPORTED_PTX_VERSION: return "Unsupported PTX version";
|
||||
case CUDA_ERROR_INVALID_SOURCE: return "Invalid source";
|
||||
case CUDA_ERROR_FILE_NOT_FOUND: return "File not found";
|
||||
case CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND: return "Link to a shared object failed to resolve";
|
||||
|
@@ -153,7 +153,6 @@ static void clg_str_reserve(CLogStringBuf *cstr, const uint len)
|
||||
cstr->data = data;
|
||||
cstr->is_alloc = true;
|
||||
}
|
||||
cstr->len_alloc = len;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,26 +178,34 @@ static void clg_str_vappendf(CLogStringBuf *cstr, const char *fmt, va_list args)
|
||||
{
|
||||
/* Use limit because windows may use '-1' for a formatting error. */
|
||||
const uint len_max = 65535;
|
||||
uint len_avail = (cstr->len_alloc - cstr->len);
|
||||
if (len_avail == 0) {
|
||||
len_avail = CLOG_BUF_LEN_INIT;
|
||||
clg_str_reserve(cstr, len_avail);
|
||||
}
|
||||
while (true) {
|
||||
uint len_avail = cstr->len_alloc - cstr->len;
|
||||
|
||||
va_list args_cpy;
|
||||
va_copy(args_cpy, args);
|
||||
int retval = vsnprintf(cstr->data + cstr->len, len_avail, fmt, args_cpy);
|
||||
va_end(args_cpy);
|
||||
if (retval != -1) {
|
||||
cstr->len += retval;
|
||||
|
||||
if (retval < 0) {
|
||||
/* Some encoding error happened, not much we can do here, besides skipping/cancelling this
|
||||
* message. */
|
||||
break;
|
||||
}
|
||||
else if ((uint)retval <= len_avail) {
|
||||
/* Copy was successful. */
|
||||
cstr->len += (uint)retval;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
len_avail *= 2;
|
||||
if (len_avail >= len_max) {
|
||||
/* vsnprintf was not successful, due to lack of allocated space, retval contains expected
|
||||
* length of the formated string, use it to allocate required amount of memory. */
|
||||
uint len_alloc = cstr->len + (uint)retval;
|
||||
if (len_alloc >= len_max) {
|
||||
/* Safe upper-limit, just in case... */
|
||||
break;
|
||||
}
|
||||
clg_str_reserve(cstr, len_avail);
|
||||
clg_str_reserve(cstr, len_alloc);
|
||||
len_avail = cstr->len_alloc - cstr->len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -313,7 +313,7 @@ if(WITH_CYCLES_CUDA_BINARIES AND (NOT WITH_CYCLES_CUBIN_COMPILER))
|
||||
set(MAX_MSVC 1910)
|
||||
elseif(${CUDA_VERSION} EQUAL "9.1")
|
||||
set(MAX_MSVC 1911)
|
||||
elseif(${CUDA_VERSION} LESS "11.0")
|
||||
elseif(${CUDA_VERSION} VERSION_GREATER_EQUAL 10.0)
|
||||
set(MAX_MSVC 1999)
|
||||
endif()
|
||||
if(NOT MSVC_VERSION LESS ${MAX_MSVC} OR CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
|
@@ -84,7 +84,7 @@ def update_script_node(node, report):
|
||||
if script.is_in_memory or script.is_dirty or script.is_modified or not os.path.exists(osl_path):
|
||||
# write text datablock contents to temporary file
|
||||
osl_file = tempfile.NamedTemporaryFile(mode='w', suffix=".osl", delete=False)
|
||||
osl_file.write(script.as_string())
|
||||
osl_file.write(script.as_string() + "\n")
|
||||
osl_file.close()
|
||||
|
||||
ok, oso_path = osl_compile(osl_file.name, report)
|
||||
|
@@ -1577,8 +1577,13 @@ class CyclesPreferences(bpy.types.AddonPreferences):
|
||||
# For backwards compatibility, only returns CUDA and OpenCL but still
|
||||
# refreshes all devices.
|
||||
def get_devices(self, compute_device_type=''):
|
||||
import _cycles
|
||||
# Ensure `self.devices` is not re-allocated when the second call to
|
||||
# get_devices_for_type is made, freeing items from the first list.
|
||||
for device_type in ('CUDA', 'OPTIX', 'OPENCL'):
|
||||
self.update_device_entries(_cycles.available_devices(device_type))
|
||||
|
||||
cuda_devices = self.get_devices_for_type('CUDA')
|
||||
self.get_devices_for_type('OPTIX')
|
||||
opencl_devices = self.get_devices_for_type('OPENCL')
|
||||
return cuda_devices, opencl_devices
|
||||
|
||||
|
@@ -456,15 +456,19 @@ void BlenderSync::sync_motion(BL::RenderSettings &b_render,
|
||||
python_thread_state_restore(python_thread_state);
|
||||
b_engine.frame_set(frame, subframe);
|
||||
python_thread_state_save(python_thread_state);
|
||||
sync_camera_motion(b_render, b_cam, width, height, 0.0f);
|
||||
if (b_cam) {
|
||||
sync_camera_motion(b_render, b_cam, width, height, 0.0f);
|
||||
}
|
||||
sync_objects(b_depsgraph, b_v3d, 0.0f);
|
||||
}
|
||||
|
||||
/* Insert motion times from camera. Motion times from other objects
|
||||
* have already been added in a sync_objects call. */
|
||||
uint camera_motion_steps = object_motion_steps(b_cam, b_cam);
|
||||
for (size_t step = 0; step < camera_motion_steps; step++) {
|
||||
motion_times.insert(scene->camera->motion_time(step));
|
||||
if (b_cam) {
|
||||
uint camera_motion_steps = object_motion_steps(b_cam, b_cam);
|
||||
for (size_t step = 0; step < camera_motion_steps; step++) {
|
||||
motion_times.insert(scene->camera->motion_time(step));
|
||||
}
|
||||
}
|
||||
|
||||
/* note iteration over motion_times set happens in sorted order */
|
||||
|
@@ -384,16 +384,16 @@ static ShaderNode *add_node(Scene *scene,
|
||||
|
||||
switch (b_aniso_node.distribution()) {
|
||||
case BL::ShaderNodeBsdfAnisotropic::distribution_BECKMANN:
|
||||
aniso->distribution = CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID;
|
||||
aniso->distribution = CLOSURE_BSDF_MICROFACET_BECKMANN_ID;
|
||||
break;
|
||||
case BL::ShaderNodeBsdfAnisotropic::distribution_GGX:
|
||||
aniso->distribution = CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID;
|
||||
aniso->distribution = CLOSURE_BSDF_MICROFACET_GGX_ID;
|
||||
break;
|
||||
case BL::ShaderNodeBsdfAnisotropic::distribution_MULTI_GGX:
|
||||
aniso->distribution = CLOSURE_BSDF_MICROFACET_MULTI_GGX_ANISO_ID;
|
||||
aniso->distribution = CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID;
|
||||
break;
|
||||
case BL::ShaderNodeBsdfAnisotropic::distribution_ASHIKHMIN_SHIRLEY:
|
||||
aniso->distribution = CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID;
|
||||
aniso->distribution = CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -678,7 +678,7 @@ static ShaderNode *add_node(Scene *scene,
|
||||
* builtin names for packed images and movies
|
||||
*/
|
||||
int scene_frame = b_scene.frame_current();
|
||||
int image_frame = image_user_frame_number(b_image_user, scene_frame);
|
||||
int image_frame = image_user_frame_number(b_image_user, b_image, scene_frame);
|
||||
image->handle = scene->image_manager->add_image(
|
||||
new BlenderImageLoader(b_image, image_frame), image->image_params());
|
||||
}
|
||||
@@ -713,7 +713,7 @@ static ShaderNode *add_node(Scene *scene,
|
||||
|
||||
if (is_builtin) {
|
||||
int scene_frame = b_scene.frame_current();
|
||||
int image_frame = image_user_frame_number(b_image_user, scene_frame);
|
||||
int image_frame = image_user_frame_number(b_image_user, b_image, scene_frame);
|
||||
env->handle = scene->image_manager->add_image(new BlenderImageLoader(b_image, image_frame),
|
||||
env->image_params());
|
||||
}
|
||||
|
@@ -693,7 +693,11 @@ void BlenderSync::free_data_after_sync(BL::Depsgraph &b_depsgraph)
|
||||
* footprint during synchronization process.
|
||||
*/
|
||||
const bool is_interface_locked = b_engine.render() && b_engine.render().use_lock_interface();
|
||||
const bool can_free_caches = BlenderSession::headless || is_interface_locked;
|
||||
const bool can_free_caches = (BlenderSession::headless || is_interface_locked) &&
|
||||
/* Baking re-uses the depsgraph multiple times, clearing crashes
|
||||
* reading un-evaluated mesh data which isn't aligned with the
|
||||
* geometry we're baking, see T71012. */
|
||||
!scene->bake_manager->get_baking();
|
||||
if (!can_free_caches) {
|
||||
return;
|
||||
}
|
||||
|
@@ -238,7 +238,7 @@ static inline string image_user_file_path(BL::ImageUser &iuser,
|
||||
{
|
||||
char filepath[1024];
|
||||
iuser.tile(0);
|
||||
BKE_image_user_frame_calc(NULL, iuser.ptr.data, cfra);
|
||||
BKE_image_user_frame_calc(ima.ptr.data, iuser.ptr.data, cfra);
|
||||
BKE_image_user_file_path(iuser.ptr.data, ima.ptr.data, filepath);
|
||||
|
||||
string filepath_str = string(filepath);
|
||||
@@ -248,9 +248,9 @@ static inline string image_user_file_path(BL::ImageUser &iuser,
|
||||
return filepath_str;
|
||||
}
|
||||
|
||||
static inline int image_user_frame_number(BL::ImageUser &iuser, int cfra)
|
||||
static inline int image_user_frame_number(BL::ImageUser &iuser, BL::Image &ima, int cfra)
|
||||
{
|
||||
BKE_image_user_frame_calc(NULL, iuser.ptr.data, cfra);
|
||||
BKE_image_user_frame_calc(ima.ptr.data, iuser.ptr.data, cfra);
|
||||
return iuser.frame_current();
|
||||
}
|
||||
|
||||
|
@@ -35,8 +35,10 @@ CCL_NAMESPACE_BEGIN
|
||||
class BlenderSmokeLoader : public ImageLoader {
|
||||
public:
|
||||
BlenderSmokeLoader(BL::Object &b_ob, AttributeStandard attribute)
|
||||
: b_domain(object_fluid_gas_domain_find(b_ob)), b_mesh(b_ob.data()), attribute(attribute)
|
||||
: b_domain(object_fluid_gas_domain_find(b_ob)), attribute(attribute)
|
||||
{
|
||||
BL::Mesh b_mesh(b_ob.data());
|
||||
mesh_texture_space(b_mesh, texspace_loc, texspace_size);
|
||||
}
|
||||
|
||||
bool load_metadata(ImageMetaData &metadata) override
|
||||
@@ -77,9 +79,7 @@ class BlenderSmokeLoader : public ImageLoader {
|
||||
/* Create a matrix to transform from object space to mesh texture space.
|
||||
* This does not work with deformations but that can probably only be done
|
||||
* well with a volume grid mapping of coordinates. */
|
||||
float3 loc, size;
|
||||
mesh_texture_space(b_mesh, loc, size);
|
||||
metadata.transform_3d = transform_translate(-loc) * transform_scale(size);
|
||||
metadata.transform_3d = transform_translate(-texspace_loc) * transform_scale(texspace_size);
|
||||
metadata.use_transform_3d = true;
|
||||
|
||||
return true;
|
||||
@@ -177,7 +177,7 @@ class BlenderSmokeLoader : public ImageLoader {
|
||||
}
|
||||
|
||||
BL::FluidDomainSettings b_domain;
|
||||
BL::Mesh b_mesh;
|
||||
float3 texspace_loc, texspace_size;
|
||||
AttributeStandard attribute;
|
||||
};
|
||||
|
||||
@@ -216,25 +216,16 @@ static void sync_smoke_volume(Scene *scene, BL::Object &b_ob, Mesh *mesh, float
|
||||
|
||||
class BlenderVolumeLoader : public VDBImageLoader {
|
||||
public:
|
||||
BlenderVolumeLoader(BL::Volume b_volume, const string &grid_name)
|
||||
: VDBImageLoader(grid_name),
|
||||
b_volume(b_volume),
|
||||
b_volume_grid(PointerRNA_NULL),
|
||||
unload(false)
|
||||
BlenderVolumeLoader(BL::BlendData &b_data, BL::Volume &b_volume, const string &grid_name)
|
||||
: VDBImageLoader(grid_name), b_data(b_data), b_volume(b_volume), unload(false)
|
||||
{
|
||||
#ifdef WITH_OPENVDB
|
||||
/* Find grid with matching name. */
|
||||
BL::Volume::grids_iterator b_grid_iter;
|
||||
for (b_volume.grids.begin(b_grid_iter); b_grid_iter != b_volume.grids.end(); ++b_grid_iter) {
|
||||
if (b_grid_iter->name() == grid_name) {
|
||||
b_volume_grid = *b_grid_iter;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool load_metadata(ImageMetaData &metadata) override
|
||||
{
|
||||
b_volume.grids.load(b_data.ptr.data);
|
||||
BL::VolumeGrid b_volume_grid = find_grid();
|
||||
|
||||
if (!b_volume_grid) {
|
||||
return false;
|
||||
}
|
||||
@@ -255,6 +246,9 @@ class BlenderVolumeLoader : public VDBImageLoader {
|
||||
const size_t pixel_size,
|
||||
const bool associate_alpha) override
|
||||
{
|
||||
b_volume.grids.load(b_data.ptr.data);
|
||||
BL::VolumeGrid b_volume_grid = find_grid();
|
||||
|
||||
if (!b_volume_grid) {
|
||||
return false;
|
||||
}
|
||||
@@ -266,19 +260,38 @@ class BlenderVolumeLoader : public VDBImageLoader {
|
||||
{
|
||||
/* TODO: detect multiple volume datablocks with the same filepath. */
|
||||
const BlenderVolumeLoader &other_loader = (const BlenderVolumeLoader &)other;
|
||||
return b_volume == other_loader.b_volume && b_volume_grid == other_loader.b_volume_grid;
|
||||
return b_volume == other_loader.b_volume && grid_name == other_loader.grid_name;
|
||||
}
|
||||
|
||||
void cleanup() override
|
||||
{
|
||||
VDBImageLoader::cleanup();
|
||||
|
||||
BL::VolumeGrid b_volume_grid = find_grid();
|
||||
if (b_volume_grid && unload) {
|
||||
b_volume_grid.unload();
|
||||
}
|
||||
}
|
||||
|
||||
/* Find grid with matching name. Grid point not stored in the class since
|
||||
* grids may be unloaded before we load the pixels, for example for motion
|
||||
* blur where we move between frames. */
|
||||
BL::VolumeGrid find_grid()
|
||||
{
|
||||
#ifdef WITH_OPENVDB
|
||||
BL::Volume::grids_iterator b_grid_iter;
|
||||
for (b_volume.grids.begin(b_grid_iter); b_grid_iter != b_volume.grids.end(); ++b_grid_iter) {
|
||||
if (b_grid_iter->name() == grid_name) {
|
||||
return *b_grid_iter;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return BL::VolumeGrid(PointerRNA_NULL);
|
||||
}
|
||||
|
||||
BL::BlendData b_data;
|
||||
BL::Volume b_volume;
|
||||
BL::VolumeGrid b_volume_grid;
|
||||
bool unload;
|
||||
};
|
||||
|
||||
@@ -325,7 +338,7 @@ static void sync_volume_object(BL::BlendData &b_data, BL::Object &b_ob, Scene *s
|
||||
mesh->attributes.add(std) :
|
||||
mesh->attributes.add(name, TypeDesc::TypeFloat, ATTR_ELEMENT_VOXEL);
|
||||
|
||||
ImageLoader *loader = new BlenderVolumeLoader(b_volume, name.string());
|
||||
ImageLoader *loader = new BlenderVolumeLoader(b_data, b_volume, name.string());
|
||||
ImageParams params;
|
||||
params.frame = b_volume.grids.frame();
|
||||
|
||||
|
@@ -352,11 +352,24 @@ string CUDADevice::compile_kernel(const DeviceRequestedFeatures &requested_featu
|
||||
}
|
||||
}
|
||||
|
||||
const string ptx = path_get(string_printf("lib/%s_compute_%d%d.ptx", name, major, minor));
|
||||
VLOG(1) << "Testing for pre-compiled kernel " << ptx << ".";
|
||||
if (path_exists(ptx)) {
|
||||
VLOG(1) << "Using precompiled kernel.";
|
||||
return ptx;
|
||||
/* The driver can JIT-compile PTX generated for older generations, so find the closest one. */
|
||||
int ptx_major = major, ptx_minor = minor;
|
||||
while (ptx_major >= 3) {
|
||||
const string ptx = path_get(
|
||||
string_printf("lib/%s_compute_%d%d.ptx", name, ptx_major, ptx_minor));
|
||||
VLOG(1) << "Testing for pre-compiled kernel " << ptx << ".";
|
||||
if (path_exists(ptx)) {
|
||||
VLOG(1) << "Using precompiled kernel.";
|
||||
return ptx;
|
||||
}
|
||||
|
||||
if (ptx_minor > 0) {
|
||||
ptx_minor--;
|
||||
}
|
||||
else {
|
||||
ptx_major--;
|
||||
ptx_minor = 9;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,18 +426,19 @@ string CUDADevice::compile_kernel(const DeviceRequestedFeatures &requested_featu
|
||||
|
||||
const int nvcc_cuda_version = cuewCompilerVersion();
|
||||
VLOG(1) << "Found nvcc " << nvcc << ", CUDA version " << nvcc_cuda_version << ".";
|
||||
if (nvcc_cuda_version < 80) {
|
||||
if (nvcc_cuda_version < 101) {
|
||||
printf(
|
||||
"Unsupported CUDA version %d.%d detected, "
|
||||
"you need CUDA 8.0 or newer.\n",
|
||||
"you need CUDA 10.1 or newer.\n",
|
||||
nvcc_cuda_version / 10,
|
||||
nvcc_cuda_version % 10);
|
||||
return string();
|
||||
}
|
||||
else if (!(nvcc_cuda_version == 101 || nvcc_cuda_version == 102)) {
|
||||
else if (!(nvcc_cuda_version == 101 || nvcc_cuda_version == 102 || nvcc_cuda_version == 111 ||
|
||||
nvcc_cuda_version == 112 || nvcc_cuda_version == 113 || nvcc_cuda_version == 114)) {
|
||||
printf(
|
||||
"CUDA version %d.%d detected, build may succeed but only "
|
||||
"CUDA 10.1 and 10.2 are officially supported.\n",
|
||||
"CUDA 10.1 to 11.4 are officially supported.\n",
|
||||
nvcc_cuda_version / 10,
|
||||
nvcc_cuda_version % 10);
|
||||
}
|
||||
@@ -531,9 +545,9 @@ bool CUDADevice::load_kernels(const DeviceRequestedFeatures &requested_features)
|
||||
|
||||
if (result == CUDA_SUCCESS) {
|
||||
reserve_local_memory(requested_features);
|
||||
}
|
||||
|
||||
load_functions();
|
||||
load_functions();
|
||||
}
|
||||
|
||||
return (result == CUDA_SUCCESS);
|
||||
}
|
||||
|
@@ -1537,34 +1537,22 @@ bool device_optix_init()
|
||||
|
||||
void device_optix_info(const vector<DeviceInfo> &cuda_devices, vector<DeviceInfo> &devices)
|
||||
{
|
||||
devices.reserve(cuda_devices.size());
|
||||
|
||||
// Simply add all supported CUDA devices as OptiX devices again
|
||||
for (const DeviceInfo &cuda_info : cuda_devices) {
|
||||
DeviceInfo info = cuda_info;
|
||||
for (DeviceInfo info : cuda_devices) {
|
||||
assert(info.type == DEVICE_CUDA);
|
||||
|
||||
int major;
|
||||
cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, info.num);
|
||||
if (major < 5) {
|
||||
continue; // Only Maxwell and up are supported by OptiX
|
||||
}
|
||||
|
||||
info.type = DEVICE_OPTIX;
|
||||
info.id += "_OptiX";
|
||||
|
||||
// Figure out RTX support
|
||||
CUdevice cuda_device = 0;
|
||||
CUcontext cuda_context = NULL;
|
||||
unsigned int rtcore_version = 0;
|
||||
if (cuDeviceGet(&cuda_device, info.num) == CUDA_SUCCESS &&
|
||||
cuDevicePrimaryCtxRetain(&cuda_context, cuda_device) == CUDA_SUCCESS) {
|
||||
OptixDeviceContext optix_context = NULL;
|
||||
if (optixDeviceContextCreate(cuda_context, nullptr, &optix_context) == OPTIX_SUCCESS) {
|
||||
optixDeviceContextGetProperty(optix_context,
|
||||
OPTIX_DEVICE_PROPERTY_RTCORE_VERSION,
|
||||
&rtcore_version,
|
||||
sizeof(rtcore_version));
|
||||
optixDeviceContextDestroy(optix_context);
|
||||
}
|
||||
cuDevicePrimaryCtxRelease(cuda_device);
|
||||
}
|
||||
|
||||
// Only add devices with RTX support
|
||||
if (rtcore_version != 0 || getenv("CYCLES_OPTIX_TEST")) {
|
||||
devices.push_back(info);
|
||||
}
|
||||
devices.push_back(info);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1892,10 +1892,8 @@ void OpenCLDevice::shader(DeviceTask &task)
|
||||
}
|
||||
}
|
||||
|
||||
string OpenCLDevice::kernel_build_options(const string *debug_src)
|
||||
static bool kernel_build_opencl_2(cl_device_id cdDevice)
|
||||
{
|
||||
string build_options = "-cl-no-signed-zeros -cl-mad-enable ";
|
||||
|
||||
/* Build with OpenCL 2.0 if available, this improves performance
|
||||
* with AMD OpenCL drivers on Windows and Linux (legacy drivers).
|
||||
* Note that OpenCL selects the highest 1.x version by default,
|
||||
@@ -1903,10 +1901,36 @@ string OpenCLDevice::kernel_build_options(const string *debug_src)
|
||||
int version_major, version_minor;
|
||||
if (OpenCLInfo::get_device_version(cdDevice, &version_major, &version_minor)) {
|
||||
if (version_major >= 2) {
|
||||
build_options += "-cl-std=CL2.0 ";
|
||||
/* This appears to trigger a driver bug in Radeon RX cards with certain
|
||||
* driver version, so don't use OpenCL 2.0 for those. */
|
||||
string device_name = OpenCLInfo::get_readable_device_name(cdDevice);
|
||||
if (string_startswith(device_name, "Radeon RX 4") ||
|
||||
string_startswith(device_name, "Radeon (TM) RX 4") ||
|
||||
string_startswith(device_name, "Radeon RX 5") ||
|
||||
string_startswith(device_name, "Radeon (TM) RX 5")) {
|
||||
char version[256] = "";
|
||||
int driver_major, driver_minor;
|
||||
clGetDeviceInfo(cdDevice, CL_DEVICE_VERSION, sizeof(version), &version, NULL);
|
||||
if (sscanf(version, "OpenCL 2.0 AMD-APP (%d.%d)", &driver_major, &driver_minor) == 2) {
|
||||
return !(driver_major == 3075 && driver_minor <= 12);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
string OpenCLDevice::kernel_build_options(const string *debug_src)
|
||||
{
|
||||
string build_options = "-cl-no-signed-zeros -cl-mad-enable ";
|
||||
|
||||
if (kernel_build_opencl_2(cdDevice)) {
|
||||
build_options += "-cl-std=CL2.0 ";
|
||||
}
|
||||
|
||||
if (platform_name == "NVIDIA CUDA") {
|
||||
build_options +=
|
||||
"-D__KERNEL_OPENCL_NVIDIA__ "
|
||||
|
@@ -64,6 +64,9 @@ void MemoryManager::DeviceBuffer::update_device_memory(OpenCLDevice *device)
|
||||
total_size += alloc_size;
|
||||
}
|
||||
|
||||
/* Always allocate non-empty buffer, NULL pointers cause problems with some drivers. */
|
||||
total_size = std::max(total_size, (size_t)16);
|
||||
|
||||
if (need_realloc) {
|
||||
cl_ulong max_buffer_size;
|
||||
clGetDeviceInfo(
|
||||
@@ -251,7 +254,7 @@ void MemoryManager::set_kernel_arg_buffers(cl_kernel kernel, cl_uint *narg)
|
||||
device->kernel_set_args(kernel, (*narg)++, *device_buffer.buffer);
|
||||
}
|
||||
else {
|
||||
device->kernel_set_args(kernel, (*narg)++, 0);
|
||||
device->kernel_set_args(kernel, (*narg)++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -375,11 +375,16 @@ if(WITH_CYCLES_CUDA_BINARIES)
|
||||
set(CUDA_VERSION "${CUDA_VERSION_MAJOR}${CUDA_VERSION_MINOR}")
|
||||
|
||||
# warn for other versions
|
||||
if((CUDA_VERSION MATCHES "101") OR (CUDA_VERSION MATCHES "102"))
|
||||
if((CUDA_VERSION MATCHES "101") OR
|
||||
(CUDA_VERSION MATCHES "102") OR
|
||||
(CUDA_VERSION MATCHES "111") OR
|
||||
(CUDA_VERSION MATCHES "112") OR
|
||||
(CUDA_VERSION MATCHES "113") OR
|
||||
(CUDA_VERSION MATCHES "114"))
|
||||
else()
|
||||
message(WARNING
|
||||
"CUDA version ${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR} detected, "
|
||||
"build may succeed but only CUDA 10.1 and 10.2 are officially supported")
|
||||
"build may succeed but only CUDA 10.1 to 11.4 are officially supported")
|
||||
endif()
|
||||
|
||||
# build for each arch
|
||||
@@ -445,10 +450,10 @@ if(WITH_CYCLES_CUDA_BINARIES)
|
||||
# cycles_cubin_cc since the env variable is read before main()
|
||||
if(APPLE)
|
||||
set(CUBIN_CC_ENV ${CMAKE_COMMAND}
|
||||
-E env DYLD_LIBRARY_PATH="${CUDA_TOOLKIT_ROOT_DIR}/lib")
|
||||
-E env DYLD_LIBRARY_PATH="${cuda_toolkit_root_dir}/lib")
|
||||
elseif(UNIX)
|
||||
set(CUBIN_CC_ENV ${CMAKE_COMMAND}
|
||||
-E env LD_LIBRARY_PATH="${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
-E env LD_LIBRARY_PATH="${cuda_toolkit_root_dir}/lib64")
|
||||
endif()
|
||||
|
||||
add_custom_command(
|
||||
@@ -459,12 +464,12 @@ if(WITH_CYCLES_CUDA_BINARIES)
|
||||
-i ${CMAKE_CURRENT_SOURCE_DIR}${cuda_kernel_src}
|
||||
${cuda_flags}
|
||||
-v
|
||||
-cuda-toolkit-dir "${CUDA_TOOLKIT_ROOT_DIR}"
|
||||
-cuda-toolkit-dir "${cuda_toolkit_root_dir}"
|
||||
DEPENDS ${kernel_sources} cycles_cubin_cc)
|
||||
else()
|
||||
add_custom_command(
|
||||
OUTPUT ${cuda_file}
|
||||
COMMAND ${CUDA_NVCC_EXECUTABLE}
|
||||
COMMAND ${cuda_nvcc_executable}
|
||||
-arch=${arch}
|
||||
${CUDA_NVCC_FLAGS}
|
||||
--${format}
|
||||
@@ -481,11 +486,35 @@ if(WITH_CYCLES_CUDA_BINARIES)
|
||||
|
||||
set(prev_arch "none")
|
||||
foreach(arch ${CYCLES_CUDA_BINARIES_ARCH})
|
||||
if(${arch} MATCHES "sm_2.")
|
||||
if(${arch} MATCHES ".*_2.")
|
||||
message(STATUS "CUDA binaries for ${arch} are no longer supported, skipped.")
|
||||
elseif(${arch} MATCHES "sm_7." AND ${CUDA_VERSION} LESS 100)
|
||||
elseif(${arch} MATCHES ".*_30")
|
||||
if(DEFINED CUDA10_NVCC_EXECUTABLE)
|
||||
set(cuda_nvcc_executable ${CUDA10_NVCC_EXECUTABLE})
|
||||
set(cuda_toolkit_root_dir ${CUDA10_TOOLKIT_ROOT_DIR})
|
||||
elseif(${CUDA_VERSION} LESS 110) # Support for sm_30 was removed in CUDA 11
|
||||
set(cuda_nvcc_executable ${CUDA_NVCC_EXECUTABLE})
|
||||
set(cuda_toolkit_root_dir ${CUDA_TOOLKIT_ROOT_DIR})
|
||||
else()
|
||||
message(STATUS "CUDA binaries for ${arch} require CUDA 10 or earlier, skipped.")
|
||||
endif()
|
||||
elseif(${arch} MATCHES ".*_7." AND ${CUDA_VERSION} LESS 100)
|
||||
message(STATUS "CUDA binaries for ${arch} require CUDA 10.0+, skipped.")
|
||||
elseif(${arch} MATCHES ".*_8.")
|
||||
if(DEFINED CUDA11_NVCC_EXECUTABLE)
|
||||
set(cuda_nvcc_executable ${CUDA11_NVCC_EXECUTABLE})
|
||||
set(cuda_toolkit_root_dir ${CUDA11_TOOLKIT_ROOT_DIR})
|
||||
elseif(${CUDA_VERSION} GREATER_EQUAL 111) # Support for sm_86 was introduced in CUDA 11
|
||||
set(cuda_nvcc_executable ${CUDA_NVCC_EXECUTABLE})
|
||||
set(cuda_toolkit_root_dir ${CUDA_TOOLKIT_ROOT_DIR})
|
||||
else()
|
||||
message(STATUS "CUDA binaries for ${arch} require CUDA 11.1+, skipped.")
|
||||
endif()
|
||||
else()
|
||||
set(cuda_nvcc_executable ${CUDA_NVCC_EXECUTABLE})
|
||||
set(cuda_toolkit_root_dir ${CUDA_TOOLKIT_ROOT_DIR})
|
||||
endif()
|
||||
if(DEFINED cuda_nvcc_executable AND DEFINED cuda_toolkit_root_dir)
|
||||
# Compile regular kernel
|
||||
CYCLES_CUDA_KERNEL_ADD(${arch} ${prev_arch} filter "" "${cuda_filter_sources}" FALSE)
|
||||
CYCLES_CUDA_KERNEL_ADD(${arch} ${prev_arch} kernel "" "${cuda_sources}" FALSE)
|
||||
@@ -498,6 +527,9 @@ if(WITH_CYCLES_CUDA_BINARIES)
|
||||
if(WITH_CYCLES_CUDA_BUILD_SERIAL)
|
||||
set(prev_arch ${arch})
|
||||
endif()
|
||||
|
||||
unset(cuda_nvcc_executable)
|
||||
unset(cuda_toolkit_root_dir)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
@@ -549,7 +581,7 @@ if(WITH_CYCLES_DEVICE_OPTIX AND WITH_CYCLES_CUDA_BINARIES)
|
||||
${SRC_UTIL_HEADERS}
|
||||
COMMAND ${CUBIN_CC_ENV}
|
||||
"$<TARGET_FILE:cycles_cubin_cc>"
|
||||
-target 30
|
||||
-target 50
|
||||
-ptx
|
||||
-i ${CMAKE_CURRENT_SOURCE_DIR}/${input}
|
||||
${cuda_flags}
|
||||
@@ -573,7 +605,7 @@ if(WITH_CYCLES_DEVICE_OPTIX AND WITH_CYCLES_CUDA_BINARIES)
|
||||
COMMAND
|
||||
${CUDA_NVCC_EXECUTABLE}
|
||||
--ptx
|
||||
-arch=sm_30
|
||||
-arch=sm_50
|
||||
${cuda_flags}
|
||||
${input}
|
||||
WORKING_DIRECTORY
|
||||
|
@@ -229,8 +229,6 @@ ccl_device_inline int bsdf_sample(KernelGlobals *kg,
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
|
||||
label = bsdf_microfacet_ggx_sample(kg,
|
||||
sc,
|
||||
@@ -281,7 +279,6 @@ ccl_device_inline int bsdf_sample(KernelGlobals *kg,
|
||||
&sd->lcg_state);
|
||||
break;
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
|
||||
label = bsdf_microfacet_beckmann_sample(kg,
|
||||
sc,
|
||||
@@ -298,7 +295,6 @@ ccl_device_inline int bsdf_sample(KernelGlobals *kg,
|
||||
pdf);
|
||||
break;
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID:
|
||||
label = bsdf_ashikhmin_shirley_sample(sc,
|
||||
sd->Ng,
|
||||
sd->I,
|
||||
@@ -504,8 +500,6 @@ ccl_device_inline
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
|
||||
eval = bsdf_microfacet_ggx_eval_reflect(sc, sd->I, omega_in, pdf);
|
||||
break;
|
||||
@@ -519,12 +513,10 @@ ccl_device_inline
|
||||
sc, sd->I, omega_in, pdf, &sd->lcg_state);
|
||||
break;
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
|
||||
eval = bsdf_microfacet_beckmann_eval_reflect(sc, sd->I, omega_in, pdf);
|
||||
break;
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID:
|
||||
eval = bsdf_ashikhmin_shirley_eval_reflect(sc, sd->I, omega_in, pdf);
|
||||
break;
|
||||
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
|
||||
@@ -595,8 +587,6 @@ ccl_device_inline
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
|
||||
eval = bsdf_microfacet_ggx_eval_transmit(sc, sd->I, omega_in, pdf);
|
||||
break;
|
||||
@@ -610,12 +600,10 @@ ccl_device_inline
|
||||
sc, sd->I, omega_in, pdf, &sd->lcg_state);
|
||||
break;
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
|
||||
eval = bsdf_microfacet_beckmann_eval_transmit(sc, sd->I, omega_in, pdf);
|
||||
break;
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID:
|
||||
eval = bsdf_ashikhmin_shirley_eval_transmit(sc, sd->I, omega_in, pdf);
|
||||
break;
|
||||
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
|
||||
@@ -679,18 +667,14 @@ ccl_device void bsdf_blur(KernelGlobals *kg, ShaderClosure *sc, float roughness)
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
|
||||
bsdf_microfacet_ggx_blur(sc, roughness);
|
||||
break;
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
|
||||
bsdf_microfacet_beckmann_blur(sc, roughness);
|
||||
break;
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID:
|
||||
bsdf_ashikhmin_shirley_blur(sc, roughness);
|
||||
break;
|
||||
case CLOSURE_BSDF_HAIR_PRINCIPLED_ID:
|
||||
@@ -719,18 +703,14 @@ ccl_device bool bsdf_merge(ShaderClosure *a, ShaderClosure *b)
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID:
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID:
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID:
|
||||
return bsdf_microfacet_merge(a, b);
|
||||
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID:
|
||||
return bsdf_ashikhmin_velvet_merge(a, b);
|
||||
|
@@ -32,20 +32,11 @@ Other than that, the implementation directly follows the paper.
|
||||
CCL_NAMESPACE_BEGIN
|
||||
|
||||
ccl_device int bsdf_ashikhmin_shirley_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
bsdf->alpha_x = clamp(bsdf->alpha_x, 1e-4f, 1.0f);
|
||||
bsdf->alpha_y = bsdf->alpha_x;
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID;
|
||||
return SD_BSDF | SD_BSDF_HAS_EVAL;
|
||||
}
|
||||
|
||||
ccl_device int bsdf_ashikhmin_shirley_aniso_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
bsdf->alpha_x = clamp(bsdf->alpha_x, 1e-4f, 1.0f);
|
||||
bsdf->alpha_y = clamp(bsdf->alpha_y, 1e-4f, 1.0f);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID;
|
||||
bsdf->type = CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID;
|
||||
return SD_BSDF | SD_BSDF_HAS_EVAL;
|
||||
}
|
||||
|
||||
|
@@ -256,9 +256,7 @@ ccl_device_forceinline float3 reflection_color(const MicrofacetBsdf *bsdf, float
|
||||
{
|
||||
float3 F = make_float3(1.0f, 1.0f, 1.0f);
|
||||
bool use_fresnel = (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID ||
|
||||
bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID ||
|
||||
bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID);
|
||||
|
||||
bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID);
|
||||
if (use_fresnel) {
|
||||
float F0 = fresnel_dielectric_cos(1.0f, bsdf->ior);
|
||||
|
||||
@@ -311,19 +309,27 @@ ccl_device int bsdf_microfacet_ggx_setup(MicrofacetBsdf *bsdf)
|
||||
bsdf->extra = NULL;
|
||||
|
||||
bsdf->alpha_x = saturate(bsdf->alpha_x);
|
||||
bsdf->alpha_y = bsdf->alpha_x;
|
||||
bsdf->alpha_y = saturate(bsdf->alpha_y);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_GGX_ID;
|
||||
|
||||
return SD_BSDF | SD_BSDF_HAS_EVAL;
|
||||
}
|
||||
|
||||
/* Required to maintain OSL interface. */
|
||||
ccl_device int bsdf_microfacet_ggx_isotropic_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
bsdf->alpha_y = bsdf->alpha_x;
|
||||
|
||||
return bsdf_microfacet_ggx_setup(bsdf);
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_ggx_fresnel_setup(MicrofacetBsdf *bsdf, const ShaderData *sd)
|
||||
{
|
||||
bsdf->extra->cspec0 = saturate3(bsdf->extra->cspec0);
|
||||
|
||||
bsdf->alpha_x = saturate(bsdf->alpha_x);
|
||||
bsdf->alpha_y = bsdf->alpha_x;
|
||||
bsdf->alpha_y = saturate(bsdf->alpha_y);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID;
|
||||
|
||||
@@ -361,32 +367,6 @@ ccl_device bool bsdf_microfacet_merge(const ShaderClosure *a, const ShaderClosur
|
||||
(bsdf_a->extra->clearcoat == bsdf_b->extra->clearcoat)));
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_ggx_aniso_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
bsdf->extra = NULL;
|
||||
|
||||
bsdf->alpha_x = saturate(bsdf->alpha_x);
|
||||
bsdf->alpha_y = saturate(bsdf->alpha_y);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID;
|
||||
|
||||
return SD_BSDF | SD_BSDF_HAS_EVAL;
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_ggx_aniso_fresnel_setup(MicrofacetBsdf *bsdf, const ShaderData *sd)
|
||||
{
|
||||
bsdf->extra->cspec0 = saturate3(bsdf->extra->cspec0);
|
||||
|
||||
bsdf->alpha_x = saturate(bsdf->alpha_x);
|
||||
bsdf->alpha_y = saturate(bsdf->alpha_y);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID;
|
||||
|
||||
bsdf_microfacet_fresnel_color(sd, bsdf);
|
||||
|
||||
return SD_BSDF | SD_BSDF_HAS_EVAL;
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_ggx_refraction_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
bsdf->extra = NULL;
|
||||
@@ -636,8 +616,7 @@ ccl_device int bsdf_microfacet_ggx_sample(KernelGlobals *kg,
|
||||
*eval = make_float3(1e6f, 1e6f, 1e6f);
|
||||
|
||||
bool use_fresnel = (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID ||
|
||||
bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID ||
|
||||
bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID);
|
||||
bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID);
|
||||
|
||||
/* if fresnel is used, calculate the color with reflection_color(...) */
|
||||
if (use_fresnel) {
|
||||
@@ -811,19 +790,18 @@ ccl_device int bsdf_microfacet_ggx_sample(KernelGlobals *kg,
|
||||
ccl_device int bsdf_microfacet_beckmann_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
bsdf->alpha_x = saturate(bsdf->alpha_x);
|
||||
bsdf->alpha_y = bsdf->alpha_x;
|
||||
bsdf->alpha_y = saturate(bsdf->alpha_y);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_BECKMANN_ID;
|
||||
return SD_BSDF | SD_BSDF_HAS_EVAL;
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_beckmann_aniso_setup(MicrofacetBsdf *bsdf)
|
||||
/* Required to maintain OSL interface. */
|
||||
ccl_device int bsdf_microfacet_beckmann_isotropic_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
bsdf->alpha_x = saturate(bsdf->alpha_x);
|
||||
bsdf->alpha_y = saturate(bsdf->alpha_y);
|
||||
bsdf->alpha_y = bsdf->alpha_x;
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID;
|
||||
return SD_BSDF | SD_BSDF_HAS_EVAL;
|
||||
return bsdf_microfacet_beckmann_setup(bsdf);
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_beckmann_refraction_setup(MicrofacetBsdf *bsdf)
|
||||
|
@@ -384,32 +384,10 @@ ccl_device int bsdf_microfacet_multi_ggx_common_setup(MicrofacetBsdf *bsdf)
|
||||
return SD_BSDF | SD_BSDF_HAS_EVAL | SD_BSDF_NEEDS_LCG;
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_multi_ggx_aniso_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
if (is_zero(bsdf->T))
|
||||
bsdf->T = make_float3(1.0f, 0.0f, 0.0f);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID;
|
||||
|
||||
return bsdf_microfacet_multi_ggx_common_setup(bsdf);
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_multi_ggx_aniso_fresnel_setup(MicrofacetBsdf *bsdf,
|
||||
const ShaderData *sd)
|
||||
{
|
||||
if (is_zero(bsdf->T))
|
||||
bsdf->T = make_float3(1.0f, 0.0f, 0.0f);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID;
|
||||
|
||||
bsdf_microfacet_fresnel_color(sd, bsdf);
|
||||
|
||||
return bsdf_microfacet_multi_ggx_common_setup(bsdf);
|
||||
}
|
||||
|
||||
ccl_device int bsdf_microfacet_multi_ggx_setup(MicrofacetBsdf *bsdf)
|
||||
{
|
||||
bsdf->alpha_y = bsdf->alpha_x;
|
||||
if (is_zero(bsdf->T))
|
||||
bsdf->T = make_float3(1.0f, 0.0f, 0.0f);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID;
|
||||
|
||||
@@ -418,7 +396,8 @@ ccl_device int bsdf_microfacet_multi_ggx_setup(MicrofacetBsdf *bsdf)
|
||||
|
||||
ccl_device int bsdf_microfacet_multi_ggx_fresnel_setup(MicrofacetBsdf *bsdf, const ShaderData *sd)
|
||||
{
|
||||
bsdf->alpha_y = bsdf->alpha_x;
|
||||
if (is_zero(bsdf->T))
|
||||
bsdf->T = make_float3(1.0f, 0.0f, 0.0f);
|
||||
|
||||
bsdf->type = CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID;
|
||||
|
||||
|
@@ -70,8 +70,8 @@
|
||||
# endif
|
||||
# define CUDA_KERNEL_BRANCHED_MAX_REGISTERS 63
|
||||
|
||||
/* 7.x */
|
||||
#elif __CUDA_ARCH__ <= 799
|
||||
/* 7.x, 8.x */
|
||||
#elif __CUDA_ARCH__ <= 899
|
||||
# define CUDA_MULTIPRESSOR_MAX_REGISTERS 65536
|
||||
# define CUDA_MULTIPROCESSOR_MAX_BLOCKS 32
|
||||
# define CUDA_BLOCK_MAX_THREADS 1024
|
||||
|
@@ -66,8 +66,8 @@ kernel_tex_image_interp_bicubic(const TextureInfo &info, CUtexObject tex, float
|
||||
x = (x * info.width) - 0.5f;
|
||||
y = (y * info.height) - 0.5f;
|
||||
|
||||
float px = floor(x);
|
||||
float py = floor(y);
|
||||
float px = floorf(x);
|
||||
float py = floorf(y);
|
||||
float fx = x - px;
|
||||
float fy = y - py;
|
||||
|
||||
@@ -91,9 +91,9 @@ ccl_device T kernel_tex_image_interp_bicubic_3d(
|
||||
y = (y * info.height) - 0.5f;
|
||||
z = (z * info.depth) - 0.5f;
|
||||
|
||||
float px = floor(x);
|
||||
float py = floor(y);
|
||||
float pz = floor(z);
|
||||
float px = floorf(x);
|
||||
float py = floorf(y);
|
||||
float pz = floorf(z);
|
||||
float fx = x - px;
|
||||
float fy = y - py;
|
||||
float fz = z - pz;
|
||||
|
@@ -100,14 +100,14 @@ CLOSURE_FLOAT3_PARAM(DiffuseClosure, params.N),
|
||||
BSDF_CLOSURE_CLASS_END(AshikhminVelvet, ashikhmin_velvet)
|
||||
|
||||
BSDF_CLOSURE_CLASS_BEGIN(AshikhminShirley,
|
||||
ashikhmin_shirley_aniso,
|
||||
ashikhmin_shirley,
|
||||
MicrofacetBsdf,
|
||||
LABEL_GLOSSY | LABEL_REFLECT)
|
||||
CLOSURE_FLOAT3_PARAM(AshikhminShirleyClosure, params.N),
|
||||
CLOSURE_FLOAT3_PARAM(AshikhminShirleyClosure, params.T),
|
||||
CLOSURE_FLOAT_PARAM(AshikhminShirleyClosure, params.alpha_x),
|
||||
CLOSURE_FLOAT_PARAM(AshikhminShirleyClosure, params.alpha_y),
|
||||
BSDF_CLOSURE_CLASS_END(AshikhminShirley, ashikhmin_shirley_aniso)
|
||||
BSDF_CLOSURE_CLASS_END(AshikhminShirley, ashikhmin_shirley)
|
||||
|
||||
BSDF_CLOSURE_CLASS_BEGIN(DiffuseToon, diffuse_toon, ToonBsdf, LABEL_DIFFUSE)
|
||||
CLOSURE_FLOAT3_PARAM(DiffuseToonClosure, params.N),
|
||||
@@ -121,42 +121,42 @@ CLOSURE_FLOAT3_PARAM(DiffuseClosure, params.N),
|
||||
CLOSURE_FLOAT_PARAM(GlossyToonClosure, params.smooth),
|
||||
BSDF_CLOSURE_CLASS_END(GlossyToon, glossy_toon)
|
||||
|
||||
BSDF_CLOSURE_CLASS_BEGIN(MicrofacetGGXIsotropic,
|
||||
microfacet_ggx_isotropic,
|
||||
MicrofacetBsdf,
|
||||
LABEL_GLOSSY | LABEL_REFLECT)
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetGGXIsotropicClosure, params.N),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetGGXIsotropicClosure, params.alpha_x),
|
||||
BSDF_CLOSURE_CLASS_END(MicrofacetGGXIsotropic, microfacet_ggx_isotropic)
|
||||
|
||||
BSDF_CLOSURE_CLASS_BEGIN(MicrofacetGGX,
|
||||
microfacet_ggx,
|
||||
MicrofacetBsdf,
|
||||
LABEL_GLOSSY | LABEL_REFLECT)
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetGGXClosure, params.N),
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetGGXClosure, params.T),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetGGXClosure, params.alpha_x),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetGGXClosure, params.alpha_y),
|
||||
BSDF_CLOSURE_CLASS_END(MicrofacetGGX, microfacet_ggx)
|
||||
|
||||
BSDF_CLOSURE_CLASS_BEGIN(MicrofacetGGXAniso,
|
||||
microfacet_ggx_aniso,
|
||||
BSDF_CLOSURE_CLASS_BEGIN(MicrofacetBeckmannIsotropic,
|
||||
microfacet_beckmann_isotropic,
|
||||
MicrofacetBsdf,
|
||||
LABEL_GLOSSY | LABEL_REFLECT)
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetGGXAnisoClosure, params.N),
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetGGXAnisoClosure, params.T),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetGGXAnisoClosure, params.alpha_x),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetGGXAnisoClosure, params.alpha_y),
|
||||
BSDF_CLOSURE_CLASS_END(MicrofacetGGXAniso, microfacet_ggx_aniso)
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetBeckmannIsotropicClosure, params.N),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetBeckmannIsotropicClosure, params.alpha_x),
|
||||
BSDF_CLOSURE_CLASS_END(MicrofacetBeckmannIsotropic, microfacet_beckmann_isotropic)
|
||||
|
||||
BSDF_CLOSURE_CLASS_BEGIN(MicrofacetBeckmann,
|
||||
microfacet_beckmann,
|
||||
MicrofacetBsdf,
|
||||
LABEL_GLOSSY | LABEL_REFLECT)
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetBeckmannClosure, params.N),
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetBeckmannClosure, params.T),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetBeckmannClosure, params.alpha_x),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetBeckmannClosure, params.alpha_y),
|
||||
BSDF_CLOSURE_CLASS_END(MicrofacetBeckmann, microfacet_beckmann)
|
||||
|
||||
BSDF_CLOSURE_CLASS_BEGIN(MicrofacetBeckmannAniso,
|
||||
microfacet_beckmann_aniso,
|
||||
MicrofacetBsdf,
|
||||
LABEL_GLOSSY | LABEL_REFLECT)
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetBeckmannAnisoClosure, params.N),
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetBeckmannAnisoClosure, params.T),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetBeckmannAnisoClosure, params.alpha_x),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetBeckmannAnisoClosure, params.alpha_y),
|
||||
BSDF_CLOSURE_CLASS_END(MicrofacetBeckmannAniso, microfacet_beckmann_aniso)
|
||||
|
||||
BSDF_CLOSURE_CLASS_BEGIN(MicrofacetGGXRefraction,
|
||||
microfacet_ggx_refraction,
|
||||
MicrofacetBsdf,
|
||||
@@ -362,13 +362,16 @@ void OSLShader::register_closures(OSLShadingSystem *ss_)
|
||||
id++,
|
||||
closure_bsdf_transparent_params(),
|
||||
closure_bsdf_transparent_prepare);
|
||||
|
||||
register_closure(
|
||||
ss, "microfacet_ggx", id++, bsdf_microfacet_ggx_params(), bsdf_microfacet_ggx_prepare);
|
||||
ss, "microfacet", id++, closure_bsdf_microfacet_params(), closure_bsdf_microfacet_prepare);
|
||||
register_closure(ss,
|
||||
"microfacet_ggx_aniso",
|
||||
"microfacet_ggx",
|
||||
id++,
|
||||
bsdf_microfacet_ggx_aniso_params(),
|
||||
bsdf_microfacet_ggx_aniso_prepare);
|
||||
bsdf_microfacet_ggx_isotropic_params(),
|
||||
bsdf_microfacet_ggx_isotropic_prepare);
|
||||
register_closure(
|
||||
ss, "microfacet_ggx_aniso", id++, bsdf_microfacet_ggx_params(), bsdf_microfacet_ggx_prepare);
|
||||
register_closure(ss,
|
||||
"microfacet_ggx_refraction",
|
||||
id++,
|
||||
@@ -417,13 +420,13 @@ void OSLShader::register_closures(OSLShadingSystem *ss_)
|
||||
register_closure(ss,
|
||||
"microfacet_beckmann",
|
||||
id++,
|
||||
bsdf_microfacet_beckmann_params(),
|
||||
bsdf_microfacet_beckmann_prepare);
|
||||
bsdf_microfacet_beckmann_isotropic_params(),
|
||||
bsdf_microfacet_beckmann_isotropic_prepare);
|
||||
register_closure(ss,
|
||||
"microfacet_beckmann_aniso",
|
||||
id++,
|
||||
bsdf_microfacet_beckmann_aniso_params(),
|
||||
bsdf_microfacet_beckmann_aniso_prepare);
|
||||
bsdf_microfacet_beckmann_params(),
|
||||
bsdf_microfacet_beckmann_prepare);
|
||||
register_closure(ss,
|
||||
"microfacet_beckmann_refraction",
|
||||
id++,
|
||||
@@ -432,8 +435,8 @@ void OSLShader::register_closures(OSLShadingSystem *ss_)
|
||||
register_closure(ss,
|
||||
"ashikhmin_shirley",
|
||||
id++,
|
||||
bsdf_ashikhmin_shirley_aniso_params(),
|
||||
bsdf_ashikhmin_shirley_aniso_prepare);
|
||||
bsdf_ashikhmin_shirley_params(),
|
||||
bsdf_ashikhmin_shirley_prepare);
|
||||
register_closure(
|
||||
ss, "ashikhmin_velvet", id++, bsdf_ashikhmin_velvet_params(), bsdf_ashikhmin_velvet_prepare);
|
||||
register_closure(
|
||||
@@ -508,6 +511,82 @@ bool CBSDFClosure::skip(const ShaderData *sd, int path_flag, int scattering)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Standard Microfacet Closure */
|
||||
|
||||
class MicrofacetClosure : public CBSDFClosure {
|
||||
public:
|
||||
MicrofacetBsdf params;
|
||||
ustring distribution;
|
||||
int refract;
|
||||
|
||||
void setup(ShaderData *sd, int path_flag, float3 weight)
|
||||
{
|
||||
static ustring u_ggx("ggx");
|
||||
static ustring u_default("default");
|
||||
|
||||
const int label = (refract) ? LABEL_TRANSMIT : LABEL_REFLECT;
|
||||
if (skip(sd, path_flag, LABEL_GLOSSY | label)) {
|
||||
return;
|
||||
}
|
||||
|
||||
MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc_osl(
|
||||
sd, sizeof(MicrofacetBsdf), weight, ¶ms);
|
||||
|
||||
if (!bsdf) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* GGX */
|
||||
if (distribution == u_ggx || distribution == u_default) {
|
||||
if (!refract) {
|
||||
if (params.alpha_x == params.alpha_y) {
|
||||
/* Isotropic */
|
||||
sd->flag |= bsdf_microfacet_ggx_isotropic_setup(bsdf);
|
||||
}
|
||||
else {
|
||||
/* Anisotropic */
|
||||
sd->flag |= bsdf_microfacet_ggx_setup(bsdf);
|
||||
}
|
||||
}
|
||||
else {
|
||||
sd->flag |= bsdf_microfacet_ggx_refraction_setup(bsdf);
|
||||
}
|
||||
}
|
||||
/* Beckmann */
|
||||
else {
|
||||
if (!refract) {
|
||||
if (params.alpha_x == params.alpha_y) {
|
||||
/* Isotropic */
|
||||
sd->flag |= bsdf_microfacet_beckmann_isotropic_setup(bsdf);
|
||||
}
|
||||
else {
|
||||
/* Anisotropic */
|
||||
sd->flag |= bsdf_microfacet_beckmann_setup(bsdf);
|
||||
}
|
||||
}
|
||||
else {
|
||||
sd->flag |= bsdf_microfacet_beckmann_refraction_setup(bsdf);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ClosureParam *closure_bsdf_microfacet_params()
|
||||
{
|
||||
static ClosureParam params[] = {CLOSURE_STRING_PARAM(MicrofacetClosure, distribution),
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetClosure, params.N),
|
||||
CLOSURE_FLOAT3_PARAM(MicrofacetClosure, params.T),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetClosure, params.alpha_x),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetClosure, params.alpha_y),
|
||||
CLOSURE_FLOAT_PARAM(MicrofacetClosure, params.ior),
|
||||
CLOSURE_INT_PARAM(MicrofacetClosure, refract),
|
||||
CLOSURE_STRING_KEYPARAM(MicrofacetClosure, label, "label"),
|
||||
CLOSURE_FINISH_PARAM(MicrofacetClosure)};
|
||||
|
||||
return params;
|
||||
}
|
||||
CCLOSURE_PREPARE(closure_bsdf_microfacet_prepare, MicrofacetClosure)
|
||||
|
||||
/* GGX closures with Fresnel */
|
||||
|
||||
class MicrofacetFresnelClosure : public CBSDFClosure {
|
||||
@@ -582,7 +661,7 @@ class MicrofacetGGXAnisoFresnelClosure : public MicrofacetFresnelClosure {
|
||||
return;
|
||||
}
|
||||
|
||||
sd->flag |= bsdf_microfacet_ggx_aniso_fresnel_setup(bsdf, sd);
|
||||
sd->flag |= bsdf_microfacet_ggx_fresnel_setup(bsdf, sd);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -676,7 +755,7 @@ class MicrofacetMultiGGXAnisoClosure : public MicrofacetMultiClosure {
|
||||
}
|
||||
|
||||
bsdf->ior = 0.0f;
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_aniso_setup(bsdf);
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_setup(bsdf);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -801,7 +880,7 @@ class MicrofacetMultiGGXAnisoFresnelClosure : public MicrofacetMultiFresnelClosu
|
||||
return;
|
||||
}
|
||||
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_aniso_fresnel_setup(bsdf, sd);
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_fresnel_setup(bsdf, sd);
|
||||
}
|
||||
};
|
||||
|
||||
|
@@ -51,6 +51,7 @@ OSL::ClosureParam *closure_bsdf_transparent_params();
|
||||
OSL::ClosureParam *closure_bssrdf_params();
|
||||
OSL::ClosureParam *closure_absorption_params();
|
||||
OSL::ClosureParam *closure_henyey_greenstein_params();
|
||||
OSL::ClosureParam *closure_bsdf_microfacet_params();
|
||||
OSL::ClosureParam *closure_bsdf_microfacet_multi_ggx_params();
|
||||
OSL::ClosureParam *closure_bsdf_microfacet_multi_ggx_glass_params();
|
||||
OSL::ClosureParam *closure_bsdf_microfacet_multi_ggx_aniso_params();
|
||||
@@ -70,6 +71,7 @@ void closure_bsdf_transparent_prepare(OSL::RendererServices *, int id, void *dat
|
||||
void closure_bssrdf_prepare(OSL::RendererServices *, int id, void *data);
|
||||
void closure_absorption_prepare(OSL::RendererServices *, int id, void *data);
|
||||
void closure_henyey_greenstein_prepare(OSL::RendererServices *, int id, void *data);
|
||||
void closure_bsdf_microfacet_prepare(OSL::RendererServices *, int id, void *data);
|
||||
void closure_bsdf_microfacet_multi_ggx_prepare(OSL::RendererServices *, int id, void *data);
|
||||
void closure_bsdf_microfacet_multi_ggx_glass_prepare(OSL::RendererServices *, int id, void *data);
|
||||
void closure_bsdf_microfacet_multi_ggx_aniso_prepare(OSL::RendererServices *, int id, void *data);
|
||||
|
@@ -59,23 +59,7 @@ ccl_device void kernel_buffer_update(KernelGlobals *kg,
|
||||
kernel_split_params.queue_size,
|
||||
1);
|
||||
|
||||
#ifdef __COMPUTE_DEVICE_GPU__
|
||||
/* If we are executing on a GPU device, we exit all threads that are not
|
||||
* required.
|
||||
*
|
||||
* If we are executing on a CPU device, then we need to keep all threads
|
||||
* active since we have barrier() calls later in the kernel. CPU devices,
|
||||
* expect all threads to execute barrier statement.
|
||||
*/
|
||||
if (ray_index == QUEUE_EMPTY_SLOT) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __COMPUTE_DEVICE_GPU__
|
||||
if (ray_index != QUEUE_EMPTY_SLOT) {
|
||||
#endif
|
||||
|
||||
ccl_global char *ray_state = kernel_split_state.ray_state;
|
||||
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
|
||||
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
|
||||
@@ -153,10 +137,7 @@ ccl_device void kernel_buffer_update(KernelGlobals *kg,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef __COMPUTE_DEVICE_GPU__
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Enqueue RAY_REGENERATED rays into QUEUE_ACTIVE_AND_REGENERATED_RAYS;
|
||||
* These rays will be made active during next SceneIntersectkernel.
|
||||
|
@@ -73,23 +73,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
|
||||
kernel_split_params.queue_size,
|
||||
0);
|
||||
|
||||
#ifdef __COMPUTE_DEVICE_GPU__
|
||||
/* If we are executing on a GPU device, we exit all threads that are not
|
||||
* required.
|
||||
*
|
||||
* If we are executing on a CPU device, then we need to keep all threads
|
||||
* active since we have barrier() calls later in the kernel. CPU devices,
|
||||
* expect all threads to execute barrier statement.
|
||||
*/
|
||||
if (ray_index == QUEUE_EMPTY_SLOT) {
|
||||
return;
|
||||
}
|
||||
#endif /* __COMPUTE_DEVICE_GPU__ */
|
||||
|
||||
#ifndef __COMPUTE_DEVICE_GPU__
|
||||
if (ray_index != QUEUE_EMPTY_SLOT) {
|
||||
#endif
|
||||
|
||||
ccl_global PathState *state = 0x0;
|
||||
float3 throughput;
|
||||
|
||||
@@ -148,10 +132,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
|
||||
}
|
||||
}
|
||||
#endif /* __AO__ */
|
||||
|
||||
#ifndef __COMPUTE_DEVICE_GPU__
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __AO__
|
||||
/* Enqueue to-shadow-ray-cast rays. */
|
||||
|
@@ -33,18 +33,15 @@ ccl_device void kernel_shader_setup(KernelGlobals *kg,
|
||||
|
||||
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
|
||||
int queue_index = kernel_split_params.queue_index[QUEUE_ACTIVE_AND_REGENERATED_RAYS];
|
||||
if (ray_index >= queue_index) {
|
||||
return;
|
||||
}
|
||||
ray_index = get_ray_index(kg,
|
||||
ray_index,
|
||||
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
|
||||
kernel_split_state.queue_data,
|
||||
kernel_split_params.queue_size,
|
||||
0);
|
||||
|
||||
if (ray_index == QUEUE_EMPTY_SLOT) {
|
||||
return;
|
||||
if (ray_index < queue_index) {
|
||||
ray_index = get_ray_index(kg,
|
||||
ray_index,
|
||||
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
|
||||
kernel_split_state.queue_data,
|
||||
kernel_split_params.queue_size,
|
||||
0);
|
||||
} else {
|
||||
ray_index = QUEUE_EMPTY_SLOT;
|
||||
}
|
||||
|
||||
char enqueue_flag = (IS_STATE(kernel_split_state.ray_state, ray_index, RAY_TO_REGENERATE)) ? 1 :
|
||||
|
@@ -320,9 +320,9 @@ ccl_device void svm_node_closure_bsdf(KernelGlobals *kg,
|
||||
/* setup bsdf */
|
||||
if (distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID ||
|
||||
roughness <= 0.075f) /* use single-scatter GGX */
|
||||
sd->flag |= bsdf_microfacet_ggx_aniso_fresnel_setup(bsdf, sd);
|
||||
sd->flag |= bsdf_microfacet_ggx_fresnel_setup(bsdf, sd);
|
||||
else /* use multi-scatter GGX */
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_aniso_fresnel_setup(bsdf, sd);
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_fresnel_setup(bsdf, sd);
|
||||
}
|
||||
}
|
||||
# ifdef __CAUSTICS_TRICKS__
|
||||
@@ -515,12 +515,34 @@ ccl_device void svm_node_closure_bsdf(KernelGlobals *kg,
|
||||
float roughness = sqr(param1);
|
||||
|
||||
bsdf->N = N;
|
||||
bsdf->T = make_float3(0.0f, 0.0f, 0.0f);
|
||||
bsdf->alpha_x = roughness;
|
||||
bsdf->alpha_y = roughness;
|
||||
bsdf->ior = 0.0f;
|
||||
bsdf->extra = NULL;
|
||||
|
||||
if (data_node.y == SVM_STACK_INVALID) {
|
||||
bsdf->T = make_float3(0.0f, 0.0f, 0.0f);
|
||||
bsdf->alpha_x = roughness;
|
||||
bsdf->alpha_y = roughness;
|
||||
}
|
||||
else {
|
||||
bsdf->T = stack_load_float3(stack, data_node.y);
|
||||
|
||||
/* rotate tangent */
|
||||
float rotation = stack_load_float(stack, data_node.z);
|
||||
if (rotation != 0.0f)
|
||||
bsdf->T = rotate_around_axis(bsdf->T, bsdf->N, rotation * M_2PI_F);
|
||||
|
||||
/* compute roughness */
|
||||
float anisotropy = clamp(param2, -0.99f, 0.99f);
|
||||
if (anisotropy < 0.0f) {
|
||||
bsdf->alpha_x = roughness / (1.0f + anisotropy);
|
||||
bsdf->alpha_y = roughness * (1.0f + anisotropy);
|
||||
}
|
||||
else {
|
||||
bsdf->alpha_x = roughness * (1.0f - anisotropy);
|
||||
bsdf->alpha_y = roughness / (1.0f - anisotropy);
|
||||
}
|
||||
}
|
||||
|
||||
/* setup bsdf */
|
||||
if (type == CLOSURE_BSDF_REFLECTION_ID)
|
||||
sd->flag |= bsdf_reflection_setup(bsdf);
|
||||
@@ -529,10 +551,10 @@ ccl_device void svm_node_closure_bsdf(KernelGlobals *kg,
|
||||
else if (type == CLOSURE_BSDF_MICROFACET_GGX_ID)
|
||||
sd->flag |= bsdf_microfacet_ggx_setup(bsdf);
|
||||
else if (type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID) {
|
||||
kernel_assert(stack_valid(data_node.z));
|
||||
kernel_assert(stack_valid(data_node.w));
|
||||
bsdf->extra = (MicrofacetExtra *)closure_alloc_extra(sd, sizeof(MicrofacetExtra));
|
||||
if (bsdf->extra) {
|
||||
bsdf->extra->color = stack_load_float3(stack, data_node.z);
|
||||
bsdf->extra->color = stack_load_float3(stack, data_node.w);
|
||||
bsdf->extra->cspec0 = make_float3(0.0f, 0.0f, 0.0f);
|
||||
bsdf->extra->clearcoat = 0.0f;
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_setup(bsdf);
|
||||
@@ -675,64 +697,6 @@ ccl_device void svm_node_closure_bsdf(KernelGlobals *kg,
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_glass_setup(bsdf);
|
||||
break;
|
||||
}
|
||||
case CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID:
|
||||
case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ANISO_ID:
|
||||
case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID: {
|
||||
#ifdef __CAUSTICS_TRICKS__
|
||||
if (!kernel_data.integrator.caustics_reflective && (path_flag & PATH_RAY_DIFFUSE))
|
||||
break;
|
||||
#endif
|
||||
float3 weight = sd->svm_closure_weight * mix_weight;
|
||||
MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(sd, sizeof(MicrofacetBsdf), weight);
|
||||
|
||||
if (bsdf) {
|
||||
bsdf->N = N;
|
||||
bsdf->extra = NULL;
|
||||
bsdf->T = stack_load_float3(stack, data_node.y);
|
||||
|
||||
/* rotate tangent */
|
||||
float rotation = stack_load_float(stack, data_node.z);
|
||||
|
||||
if (rotation != 0.0f)
|
||||
bsdf->T = rotate_around_axis(bsdf->T, bsdf->N, rotation * M_2PI_F);
|
||||
|
||||
/* compute roughness */
|
||||
float roughness = sqr(param1);
|
||||
float anisotropy = clamp(param2, -0.99f, 0.99f);
|
||||
|
||||
if (anisotropy < 0.0f) {
|
||||
bsdf->alpha_x = roughness / (1.0f + anisotropy);
|
||||
bsdf->alpha_y = roughness * (1.0f + anisotropy);
|
||||
}
|
||||
else {
|
||||
bsdf->alpha_x = roughness * (1.0f - anisotropy);
|
||||
bsdf->alpha_y = roughness / (1.0f - anisotropy);
|
||||
}
|
||||
|
||||
bsdf->ior = 0.0f;
|
||||
|
||||
if (type == CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID) {
|
||||
sd->flag |= bsdf_microfacet_beckmann_aniso_setup(bsdf);
|
||||
}
|
||||
else if (type == CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID) {
|
||||
sd->flag |= bsdf_microfacet_ggx_aniso_setup(bsdf);
|
||||
}
|
||||
else if (type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_ANISO_ID) {
|
||||
kernel_assert(stack_valid(data_node.w));
|
||||
bsdf->extra = (MicrofacetExtra *)closure_alloc_extra(sd, sizeof(MicrofacetExtra));
|
||||
if (bsdf->extra) {
|
||||
bsdf->extra->color = stack_load_float3(stack, data_node.w);
|
||||
bsdf->extra->cspec0 = make_float3(0.0f, 0.0f, 0.0f);
|
||||
bsdf->extra->clearcoat = 0.0f;
|
||||
sd->flag |= bsdf_microfacet_multi_ggx_aniso_setup(bsdf);
|
||||
}
|
||||
}
|
||||
else
|
||||
sd->flag |= bsdf_ashikhmin_shirley_aniso_setup(bsdf);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID: {
|
||||
float3 weight = sd->svm_closure_weight * mix_weight;
|
||||
VelvetBsdf *bsdf = (VelvetBsdf *)bsdf_alloc(sd, sizeof(VelvetBsdf), weight);
|
||||
|
@@ -539,12 +539,6 @@ typedef enum ClosureType {
|
||||
CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID,
|
||||
CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID,
|
||||
CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID,
|
||||
CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID,
|
||||
CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID,
|
||||
CLOSURE_BSDF_MICROFACET_MULTI_GGX_ANISO_ID,
|
||||
CLOSURE_BSDF_MICROFACET_MULTI_GGX_ANISO_FRESNEL_ID,
|
||||
CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID,
|
||||
CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID,
|
||||
CLOSURE_BSDF_ASHIKHMIN_VELVET_ID,
|
||||
CLOSURE_BSDF_PHONG_RAMP_ID,
|
||||
CLOSURE_BSDF_GLOSSY_TOON_ID,
|
||||
@@ -605,10 +599,9 @@ typedef enum ClosureType {
|
||||
#define CLOSURE_IS_BSDF_TRANSPARENT(type) (type == CLOSURE_BSDF_TRANSPARENT_ID)
|
||||
#define CLOSURE_IS_BSDF_MULTISCATTER(type) \
|
||||
(type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID || \
|
||||
type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_ANISO_ID || \
|
||||
type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID)
|
||||
#define CLOSURE_IS_BSDF_MICROFACET(type) \
|
||||
((type >= CLOSURE_BSDF_MICROFACET_GGX_ID && type <= CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID) || \
|
||||
((type >= CLOSURE_BSDF_MICROFACET_GGX_ID && type <= CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID) || \
|
||||
(type >= CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID && \
|
||||
type <= CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID) || \
|
||||
(type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID))
|
||||
@@ -616,8 +609,7 @@ typedef enum ClosureType {
|
||||
(type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID || \
|
||||
type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID || \
|
||||
type == CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID || \
|
||||
type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID || \
|
||||
type == CLOSURE_BSDF_MICROFACET_GGX_ANISO_FRESNEL_ID)
|
||||
type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID)
|
||||
#define CLOSURE_IS_BSDF_OR_BSSRDF(type) (type <= CLOSURE_BSSRDF_PRINCIPLED_RANDOM_WALK_ID)
|
||||
#define CLOSURE_IS_BSSRDF(type) \
|
||||
(type >= CLOSURE_BSSRDF_CUBIC_ID && type <= CLOSURE_BSSRDF_PRINCIPLED_RANDOM_WALK_ID)
|
||||
|
@@ -2166,12 +2166,11 @@ NODE_DEFINE(AnisotropicBsdfNode)
|
||||
SOCKET_IN_FLOAT(surface_mix_weight, "SurfaceMixWeight", 0.0f, SocketType::SVM_INTERNAL);
|
||||
|
||||
static NodeEnum distribution_enum;
|
||||
distribution_enum.insert("beckmann", CLOSURE_BSDF_MICROFACET_BECKMANN_ANISO_ID);
|
||||
distribution_enum.insert("GGX", CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID);
|
||||
distribution_enum.insert("Multiscatter GGX", CLOSURE_BSDF_MICROFACET_MULTI_GGX_ANISO_ID);
|
||||
distribution_enum.insert("ashikhmin_shirley", CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ANISO_ID);
|
||||
SOCKET_ENUM(
|
||||
distribution, "Distribution", distribution_enum, CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID);
|
||||
distribution_enum.insert("beckmann", CLOSURE_BSDF_MICROFACET_BECKMANN_ID);
|
||||
distribution_enum.insert("GGX", CLOSURE_BSDF_MICROFACET_GGX_ID);
|
||||
distribution_enum.insert("Multiscatter GGX", CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID);
|
||||
distribution_enum.insert("ashikhmin_shirley", CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID);
|
||||
SOCKET_ENUM(distribution, "Distribution", distribution_enum, CLOSURE_BSDF_MICROFACET_GGX_ID);
|
||||
|
||||
SOCKET_IN_VECTOR(tangent, "Tangent", make_float3(0.0f, 0.0f, 0.0f), SocketType::LINK_TANGENT);
|
||||
|
||||
@@ -2186,7 +2185,7 @@ NODE_DEFINE(AnisotropicBsdfNode)
|
||||
|
||||
AnisotropicBsdfNode::AnisotropicBsdfNode() : BsdfNode(node_type)
|
||||
{
|
||||
closure = CLOSURE_BSDF_MICROFACET_GGX_ANISO_ID;
|
||||
closure = CLOSURE_BSDF_MICROFACET_GGX_ID;
|
||||
}
|
||||
|
||||
void AnisotropicBsdfNode::attributes(Shader *shader, AttributeRequestSet *attributes)
|
||||
@@ -2205,7 +2204,7 @@ void AnisotropicBsdfNode::compile(SVMCompiler &compiler)
|
||||
{
|
||||
closure = distribution;
|
||||
|
||||
if (closure == CLOSURE_BSDF_MICROFACET_MULTI_GGX_ANISO_ID)
|
||||
if (closure == CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID)
|
||||
BsdfNode::compile(
|
||||
compiler, input("Roughness"), input("Anisotropy"), input("Rotation"), input("Color"));
|
||||
else
|
||||
@@ -2299,7 +2298,7 @@ void GlossyBsdfNode::compile(SVMCompiler &compiler)
|
||||
if (closure == CLOSURE_BSDF_REFLECTION_ID)
|
||||
BsdfNode::compile(compiler, NULL, NULL);
|
||||
else if (closure == CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID)
|
||||
BsdfNode::compile(compiler, input("Roughness"), NULL, input("Color"));
|
||||
BsdfNode::compile(compiler, input("Roughness"), NULL, NULL, input("Color"));
|
||||
else
|
||||
BsdfNode::compile(compiler, input("Roughness"), NULL);
|
||||
}
|
||||
|
@@ -108,7 +108,12 @@ CYCLES_TEST(util_path "cycles_util;${OPENIMAGEIO_LIBRARIES};${BOOST_LIBRARIES}")
|
||||
CYCLES_TEST(util_string "cycles_util;${OPENIMAGEIO_LIBRARIES};${BOOST_LIBRARIES}")
|
||||
CYCLES_TEST(util_task "cycles_util;${OPENIMAGEIO_LIBRARIES};${BOOST_LIBRARIES};bf_intern_numaapi")
|
||||
CYCLES_TEST(util_time "cycles_util;${OPENIMAGEIO_LIBRARIES};${BOOST_LIBRARIES}")
|
||||
set_source_files_properties(util_avxf_avx_test.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_AVX_KERNEL_FLAGS}")
|
||||
CYCLES_TEST(util_avxf_avx "cycles_util;bf_intern_numaapi;${OPENIMAGEIO_LIBRARIES};${BOOST_LIBRARIES}")
|
||||
set_source_files_properties(util_avxf_avx2_test.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_AVX2_KERNEL_FLAGS}")
|
||||
CYCLES_TEST(util_avxf_avx2 "cycles_util;bf_intern_numaapi;${OPENIMAGEIO_LIBRARIES};${BOOST_LIBRARIES}")
|
||||
|
||||
# Disable AVX tests on macOS. Rosetta has problems running them, and other
|
||||
# platforms should be enough to verify AVX operations are implemented correctly.
|
||||
if(NOT APPLE)
|
||||
set_source_files_properties(util_avxf_avx_test.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_AVX_KERNEL_FLAGS}")
|
||||
CYCLES_TEST(util_avxf_avx "cycles_util;bf_intern_numaapi;${OPENIMAGEIO_LIBRARIES};${BOOST_LIBRARIES}")
|
||||
set_source_files_properties(util_avxf_avx2_test.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_AVX2_KERNEL_FLAGS}")
|
||||
CYCLES_TEST(util_avxf_avx2 "cycles_util;bf_intern_numaapi;${OPENIMAGEIO_LIBRARIES};${BOOST_LIBRARIES}")
|
||||
endif()
|
||||
|
@@ -32,9 +32,13 @@ bool validate_cpu_capabilities()
|
||||
#endif
|
||||
}
|
||||
|
||||
#define VALIDATECPU \
|
||||
#define INIT_AVX_TEST \
|
||||
if (!validate_cpu_capabilities()) \
|
||||
return;
|
||||
return; \
|
||||
\
|
||||
const avxf avxf_a(0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f); \
|
||||
const avxf avxf_b(1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f); \
|
||||
const avxf avxf_c(1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f);
|
||||
|
||||
#define compare_vector_scalar(a, b) \
|
||||
for (size_t index = 0; index < a.size; index++) \
|
||||
@@ -49,22 +53,19 @@ bool validate_cpu_capabilities()
|
||||
EXPECT_NEAR(a[index], b[index], abserror);
|
||||
|
||||
#define basic_test_vv(a, b, op) \
|
||||
VALIDATECPU \
|
||||
INIT_AVX_TEST \
|
||||
avxf c = a op b; \
|
||||
for (size_t i = 0; i < a.size; i++) \
|
||||
EXPECT_FLOAT_EQ(c[i], a[i] op b[i]);
|
||||
|
||||
/* vector op float tests */
|
||||
#define basic_test_vf(a, b, op) \
|
||||
VALIDATECPU \
|
||||
INIT_AVX_TEST \
|
||||
avxf c = a op b; \
|
||||
for (size_t i = 0; i < a.size; i++) \
|
||||
EXPECT_FLOAT_EQ(c[i], a[i] op b);
|
||||
|
||||
const avxf avxf_a(0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f);
|
||||
const avxf avxf_b(1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f);
|
||||
const avxf avxf_c(1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f);
|
||||
const float float_b = 1.5f;
|
||||
static const float float_b = 1.5f;
|
||||
|
||||
TEST(util_avx, avxf_add_vv){basic_test_vv(avxf_a, avxf_b, +)} TEST(util_avx, avxf_sub_vv){
|
||||
basic_test_vv(avxf_a, avxf_b, -)} TEST(util_avx, avxf_mul_vv){
|
||||
@@ -77,7 +78,7 @@ TEST(util_avx, avxf_add_vv){basic_test_vv(avxf_a, avxf_b, +)} TEST(util_avx, avx
|
||||
|
||||
TEST(util_avx, avxf_ctor)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
compare_vector_scalar(avxf(7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f),
|
||||
static_cast<float>(index));
|
||||
compare_vector_scalar(avxf(1.0f), 1.0f);
|
||||
@@ -90,28 +91,28 @@ TEST(util_avx, avxf_ctor)
|
||||
|
||||
TEST(util_avx, avxf_sqrt)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
compare_vector_vector(mm256_sqrt(avxf(1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f, 64.0f)),
|
||||
avxf(1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f));
|
||||
}
|
||||
|
||||
TEST(util_avx, avxf_min_max)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
compare_vector_vector(min(avxf_a, avxf_b), avxf_a);
|
||||
compare_vector_vector(max(avxf_a, avxf_b), avxf_b);
|
||||
}
|
||||
|
||||
TEST(util_avx, avxf_set_sign)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf res = set_sign_bit<1, 0, 0, 0, 0, 0, 0, 0>(avxf_a);
|
||||
compare_vector_vector(res, avxf(0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, -0.8f));
|
||||
}
|
||||
|
||||
TEST(util_avx, avxf_msub)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf res = msub(avxf_a, avxf_b, avxf_c);
|
||||
avxf exp = avxf((avxf_a[7] * avxf_b[7]) - avxf_c[7],
|
||||
(avxf_a[6] * avxf_b[6]) - avxf_c[6],
|
||||
@@ -126,7 +127,7 @@ TEST(util_avx, avxf_msub)
|
||||
|
||||
TEST(util_avx, avxf_madd)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf res = madd(avxf_a, avxf_b, avxf_c);
|
||||
avxf exp = avxf((avxf_a[7] * avxf_b[7]) + avxf_c[7],
|
||||
(avxf_a[6] * avxf_b[6]) + avxf_c[6],
|
||||
@@ -141,7 +142,7 @@ TEST(util_avx, avxf_madd)
|
||||
|
||||
TEST(util_avx, avxf_nmadd)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf res = nmadd(avxf_a, avxf_b, avxf_c);
|
||||
avxf exp = avxf(avxf_c[7] - (avxf_a[7] * avxf_b[7]),
|
||||
avxf_c[6] - (avxf_a[6] * avxf_b[6]),
|
||||
@@ -156,7 +157,7 @@ TEST(util_avx, avxf_nmadd)
|
||||
|
||||
TEST(util_avx, avxf_compare)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf a(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f);
|
||||
avxf b(7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f);
|
||||
avxb res = a <= b;
|
||||
@@ -175,28 +176,28 @@ TEST(util_avx, avxf_compare)
|
||||
|
||||
TEST(util_avx, avxf_permute)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf res = permute<3, 0, 1, 7, 6, 5, 2, 4>(avxf_b);
|
||||
compare_vector_vector(res, avxf(4.0f, 6.0f, 3.0f, 2.0f, 1.0f, 7.0f, 8.0f, 5.0f));
|
||||
}
|
||||
|
||||
TEST(util_avx, avxf_blend)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf res = blend<0, 0, 1, 0, 1, 0, 1, 0>(avxf_a, avxf_b);
|
||||
compare_vector_vector(res, avxf(0.1f, 0.2f, 3.0f, 0.4f, 5.0f, 0.6f, 7.0f, 0.8f));
|
||||
}
|
||||
|
||||
TEST(util_avx, avxf_shuffle)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf res = shuffle<0, 1, 2, 3, 1, 3, 2, 0>(avxf_a);
|
||||
compare_vector_vector(res, avxf(0.4f, 0.2f, 0.1f, 0.3f, 0.5f, 0.6f, 0.7f, 0.8f));
|
||||
}
|
||||
|
||||
TEST(util_avx, avxf_cross)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
avxf res = cross(avxf_b, avxf_c);
|
||||
compare_vector_vector_near(res,
|
||||
avxf(0.0f,
|
||||
@@ -212,7 +213,7 @@ TEST(util_avx, avxf_cross)
|
||||
|
||||
TEST(util_avx, avxf_dot3)
|
||||
{
|
||||
VALIDATECPU
|
||||
INIT_AVX_TEST
|
||||
float den, den2;
|
||||
dot3(avxf_a, avxf_b, den, den2);
|
||||
EXPECT_FLOAT_EQ(den, 14.9f);
|
||||
|
@@ -56,7 +56,8 @@ class GHOST_IXrGraphicsBinding {
|
||||
std::string *r_requirement_info) const = 0;
|
||||
virtual void initFromGhostContext(class GHOST_Context *ghost_ctx) = 0;
|
||||
virtual bool chooseSwapchainFormat(const std::vector<int64_t> &runtime_formats,
|
||||
int64_t *r_result) const = 0;
|
||||
int64_t &r_result,
|
||||
bool &r_is_rgb_format) const = 0;
|
||||
virtual std::vector<XrSwapchainImageBaseHeader *> createSwapchainImages(
|
||||
uint32_t image_count) = 0;
|
||||
virtual void submitToSwapchainImage(XrSwapchainImageBaseHeader *swapchain_image,
|
||||
|
@@ -411,17 +411,17 @@ GHOST_WindowCocoa::GHOST_WindowCocoa(GHOST_SystemCocoa *systemCocoa,
|
||||
NSTIFFPboardType,
|
||||
nil]];
|
||||
|
||||
if (state != GHOST_kWindowStateFullScreen) {
|
||||
if (is_dialog && parentWindow) {
|
||||
[parentWindow->getCocoaWindow() addChildWindow:m_window ordered:NSWindowAbove];
|
||||
[m_window setCollectionBehavior:NSWindowCollectionBehaviorFullScreenAuxiliary];
|
||||
}
|
||||
else {
|
||||
[m_window setCollectionBehavior:NSWindowCollectionBehaviorFullScreenPrimary];
|
||||
}
|
||||
|
||||
if (state == GHOST_kWindowStateFullScreen)
|
||||
setState(GHOST_kWindowStateFullScreen);
|
||||
|
||||
if (is_dialog && parentWindow) {
|
||||
[parentWindow->getCocoaWindow() addChildWindow:m_window ordered:NSWindowAbove];
|
||||
}
|
||||
|
||||
setNativePixelSize();
|
||||
|
||||
[pool drain];
|
||||
|
@@ -88,18 +88,26 @@ void GHOST_XrContext::initialize(const GHOST_XrContextCreateInfo *create_info)
|
||||
printAvailableAPILayersAndExtensionsInfo();
|
||||
}
|
||||
|
||||
m_gpu_binding_type = determineGraphicsBindingTypeToEnable(create_info);
|
||||
/* Multiple graphics binding extensions can be enabled, but only one will actually be used
|
||||
* (determined later on). */
|
||||
const std::vector<GHOST_TXrGraphicsBinding> graphics_binding_types =
|
||||
determineGraphicsBindingTypesToEnable(create_info);
|
||||
|
||||
assert(m_oxr->instance == XR_NULL_HANDLE);
|
||||
createOpenXRInstance();
|
||||
createOpenXRInstance(graphics_binding_types);
|
||||
storeInstanceProperties();
|
||||
|
||||
/* Multiple bindings may be enabled. Now that we know the runtime in use, settle for one. */
|
||||
m_gpu_binding_type = determineGraphicsBindingTypeToUse(graphics_binding_types);
|
||||
|
||||
printInstanceInfo();
|
||||
if (isDebugMode()) {
|
||||
initDebugMessenger();
|
||||
}
|
||||
}
|
||||
|
||||
void GHOST_XrContext::createOpenXRInstance()
|
||||
void GHOST_XrContext::createOpenXRInstance(
|
||||
const std::vector<GHOST_TXrGraphicsBinding> &graphics_binding_types)
|
||||
{
|
||||
XrInstanceCreateInfo create_info = {XR_TYPE_INSTANCE_CREATE_INFO};
|
||||
|
||||
@@ -108,7 +116,7 @@ void GHOST_XrContext::createOpenXRInstance()
|
||||
create_info.applicationInfo.apiVersion = XR_CURRENT_API_VERSION;
|
||||
|
||||
getAPILayersToEnable(m_enabled_layers);
|
||||
getExtensionsToEnable(m_enabled_extensions);
|
||||
getExtensionsToEnable(graphics_binding_types, m_enabled_extensions);
|
||||
create_info.enabledApiLayerCount = m_enabled_layers.size();
|
||||
create_info.enabledApiLayerNames = m_enabled_layers.data();
|
||||
create_info.enabledExtensionCount = m_enabled_extensions.size();
|
||||
@@ -126,6 +134,7 @@ void GHOST_XrContext::storeInstanceProperties()
|
||||
const std::map<std::string, GHOST_TXrOpenXRRuntimeID> runtime_map = {
|
||||
{"Monado(XRT) by Collabora et al", OPENXR_RUNTIME_MONADO},
|
||||
{"Oculus", OPENXR_RUNTIME_OCULUS},
|
||||
{"SteamVR/OpenXR", OPENXR_RUNTIME_STEAMVR},
|
||||
{"Windows Mixed Reality Runtime", OPENXR_RUNTIME_WMR}};
|
||||
decltype(runtime_map)::const_iterator runtime_map_iter;
|
||||
|
||||
@@ -392,14 +401,11 @@ static const char *openxr_ext_name_from_wm_gpu_binding(GHOST_TXrGraphicsBinding
|
||||
/**
|
||||
* Gather an array of names for the extensions to enable.
|
||||
*/
|
||||
void GHOST_XrContext::getExtensionsToEnable(std::vector<const char *> &r_ext_names)
|
||||
void GHOST_XrContext::getExtensionsToEnable(
|
||||
const std::vector<GHOST_TXrGraphicsBinding> &graphics_binding_types,
|
||||
std::vector<const char *> &r_ext_names)
|
||||
{
|
||||
assert(m_gpu_binding_type != GHOST_kXrGraphicsUnknown);
|
||||
|
||||
const char *gpu_binding = openxr_ext_name_from_wm_gpu_binding(m_gpu_binding_type);
|
||||
static std::vector<std::string> try_ext;
|
||||
|
||||
try_ext.clear();
|
||||
std::vector<std::string> try_ext;
|
||||
|
||||
/* Try enabling debug extension. */
|
||||
#ifndef WIN32
|
||||
@@ -408,12 +414,15 @@ void GHOST_XrContext::getExtensionsToEnable(std::vector<const char *> &r_ext_nam
|
||||
}
|
||||
#endif
|
||||
|
||||
r_ext_names.reserve(try_ext.size() + 1); /* + 1 for graphics binding extension. */
|
||||
r_ext_names.reserve(try_ext.size() + graphics_binding_types.size());
|
||||
|
||||
/* Add graphics binding extension. */
|
||||
assert(gpu_binding);
|
||||
assert(openxr_extension_is_available(m_oxr->extensions, gpu_binding));
|
||||
r_ext_names.push_back(gpu_binding);
|
||||
/* Add graphics binding extensions (may be multiple ones, we'll settle for one to use later, once
|
||||
* we have more info about the runtime). */
|
||||
for (GHOST_TXrGraphicsBinding type : graphics_binding_types) {
|
||||
const char *gpu_binding = openxr_ext_name_from_wm_gpu_binding(type);
|
||||
assert(openxr_extension_is_available(m_oxr->extensions, gpu_binding));
|
||||
r_ext_names.push_back(gpu_binding);
|
||||
}
|
||||
|
||||
for (const std::string &ext : try_ext) {
|
||||
if (openxr_extension_is_available(m_oxr->extensions, ext)) {
|
||||
@@ -426,9 +435,10 @@ void GHOST_XrContext::getExtensionsToEnable(std::vector<const char *> &r_ext_nam
|
||||
* Decide which graphics binding extension to use based on
|
||||
* #GHOST_XrContextCreateInfo.gpu_binding_candidates and available extensions.
|
||||
*/
|
||||
GHOST_TXrGraphicsBinding GHOST_XrContext::determineGraphicsBindingTypeToEnable(
|
||||
std::vector<GHOST_TXrGraphicsBinding> GHOST_XrContext::determineGraphicsBindingTypesToEnable(
|
||||
const GHOST_XrContextCreateInfo *create_info)
|
||||
{
|
||||
std::vector<GHOST_TXrGraphicsBinding> result;
|
||||
assert(create_info->gpu_binding_candidates != NULL);
|
||||
assert(create_info->gpu_binding_candidates_count > 0);
|
||||
|
||||
@@ -437,11 +447,35 @@ GHOST_TXrGraphicsBinding GHOST_XrContext::determineGraphicsBindingTypeToEnable(
|
||||
const char *ext_name = openxr_ext_name_from_wm_gpu_binding(
|
||||
create_info->gpu_binding_candidates[i]);
|
||||
if (openxr_extension_is_available(m_oxr->extensions, ext_name)) {
|
||||
return create_info->gpu_binding_candidates[i];
|
||||
result.push_back(create_info->gpu_binding_candidates[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return GHOST_kXrGraphicsUnknown;
|
||||
if (result.empty()) {
|
||||
throw GHOST_XrException("No supported graphics binding found.");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
GHOST_TXrGraphicsBinding GHOST_XrContext::determineGraphicsBindingTypeToUse(
|
||||
const std::vector<GHOST_TXrGraphicsBinding> &enabled_types)
|
||||
{
|
||||
/* Return the first working type. */
|
||||
for (GHOST_TXrGraphicsBinding type : enabled_types) {
|
||||
#ifdef WIN32
|
||||
/* The SteamVR OpenGL backend fails currently. Disable it and allow falling back to the DirectX
|
||||
* one. */
|
||||
if ((m_runtime_id == OPENXR_RUNTIME_STEAMVR) && (type == GHOST_kXrGraphicsOpenGL)) {
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(type != GHOST_kXrGraphicsUnknown);
|
||||
return type;
|
||||
}
|
||||
|
||||
throw GHOST_XrException("Failed to determine a graphics binding to use.");
|
||||
}
|
||||
|
||||
/** \} */ /* OpenXR API-Layers and Extensions */
|
||||
|
@@ -49,6 +49,7 @@ struct GHOST_XrCustomFuncs {
|
||||
enum GHOST_TXrOpenXRRuntimeID {
|
||||
OPENXR_RUNTIME_MONADO,
|
||||
OPENXR_RUNTIME_OCULUS,
|
||||
OPENXR_RUNTIME_STEAMVR,
|
||||
OPENXR_RUNTIME_WMR, /* Windows Mixed Reality */
|
||||
|
||||
OPENXR_RUNTIME_UNKNOWN
|
||||
@@ -114,7 +115,7 @@ class GHOST_XrContext : public GHOST_IXrContext {
|
||||
bool m_debug = false;
|
||||
bool m_debug_time = false;
|
||||
|
||||
void createOpenXRInstance();
|
||||
void createOpenXRInstance(const std::vector<GHOST_TXrGraphicsBinding> &graphics_binding_types);
|
||||
void storeInstanceProperties();
|
||||
void initDebugMessenger();
|
||||
|
||||
@@ -126,9 +127,12 @@ class GHOST_XrContext : public GHOST_IXrContext {
|
||||
void initExtensions();
|
||||
void initExtensionsEx(std::vector<XrExtensionProperties> &extensions, const char *layer_name);
|
||||
void getAPILayersToEnable(std::vector<const char *> &r_ext_names);
|
||||
void getExtensionsToEnable(std::vector<const char *> &r_ext_names);
|
||||
GHOST_TXrGraphicsBinding determineGraphicsBindingTypeToEnable(
|
||||
void getExtensionsToEnable(const std::vector<GHOST_TXrGraphicsBinding> &graphics_binding_types,
|
||||
std::vector<const char *> &r_ext_names);
|
||||
std::vector<GHOST_TXrGraphicsBinding> determineGraphicsBindingTypesToEnable(
|
||||
const GHOST_XrContextCreateInfo *create_info);
|
||||
GHOST_TXrGraphicsBinding determineGraphicsBindingTypeToUse(
|
||||
const std::vector<GHOST_TXrGraphicsBinding> &enabled_types);
|
||||
};
|
||||
|
||||
#endif // __GHOST_XRCONTEXT_H__
|
||||
|
@@ -36,7 +36,7 @@
|
||||
|
||||
static bool choose_swapchain_format_from_candidates(std::vector<int64_t> gpu_binding_formats,
|
||||
std::vector<int64_t> runtime_formats,
|
||||
int64_t *r_result)
|
||||
int64_t &r_result)
|
||||
{
|
||||
if (gpu_binding_formats.empty()) {
|
||||
return false;
|
||||
@@ -50,7 +50,7 @@ static bool choose_swapchain_format_from_candidates(std::vector<int64_t> gpu_bin
|
||||
return false;
|
||||
}
|
||||
|
||||
*r_result = *res;
|
||||
r_result = *res;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -132,10 +132,20 @@ class GHOST_XrGraphicsBindingOpenGL : public GHOST_IXrGraphicsBinding {
|
||||
}
|
||||
|
||||
bool chooseSwapchainFormat(const std::vector<int64_t> &runtime_formats,
|
||||
int64_t *r_result) const override
|
||||
int64_t &r_result,
|
||||
bool &r_is_srgb_format) const override
|
||||
{
|
||||
std::vector<int64_t> gpu_binding_formats = {GL_RGBA8};
|
||||
return choose_swapchain_format_from_candidates(gpu_binding_formats, runtime_formats, r_result);
|
||||
std::vector<int64_t> gpu_binding_formats = {
|
||||
GL_RGBA8,
|
||||
GL_SRGB8_ALPHA8,
|
||||
};
|
||||
|
||||
if (choose_swapchain_format_from_candidates(gpu_binding_formats, runtime_formats, r_result)) {
|
||||
r_is_srgb_format = (r_result == GL_SRGB8_ALPHA8);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<XrSwapchainImageBaseHeader *> createSwapchainImages(uint32_t image_count) override
|
||||
@@ -248,10 +258,19 @@ class GHOST_XrGraphicsBindingD3D : public GHOST_IXrGraphicsBinding {
|
||||
}
|
||||
|
||||
bool chooseSwapchainFormat(const std::vector<int64_t> &runtime_formats,
|
||||
int64_t *r_result) const override
|
||||
int64_t &r_result,
|
||||
bool &r_is_srgb_format) const override
|
||||
{
|
||||
std::vector<int64_t> gpu_binding_formats = {DXGI_FORMAT_R8G8B8A8_UNORM};
|
||||
return choose_swapchain_format_from_candidates(gpu_binding_formats, runtime_formats, r_result);
|
||||
std::vector<int64_t> gpu_binding_formats = {
|
||||
DXGI_FORMAT_R8G8B8A8_UNORM,
|
||||
DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
|
||||
};
|
||||
|
||||
if (choose_swapchain_format_from_candidates(gpu_binding_formats, runtime_formats, r_result)) {
|
||||
r_is_srgb_format = (r_result == DXGI_FORMAT_R8G8B8A8_UNORM_SRGB);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<XrSwapchainImageBaseHeader *> createSwapchainImages(uint32_t image_count) override
|
||||
|
@@ -375,14 +375,6 @@ static void ghost_xr_draw_view_info_from_view(const XrView &view, GHOST_XrDrawVi
|
||||
r_info.fov.angle_down = view.fov.angleDown;
|
||||
}
|
||||
|
||||
static bool ghost_xr_draw_view_expects_srgb_buffer(const GHOST_XrContext *context)
|
||||
{
|
||||
/* Monado seems to be faulty and doesn't do OETF transform correctly. So expect a SRGB buffer to
|
||||
* compensate. You get way too dark rendering without this, it's pretty obvious (even in the
|
||||
* default startup scene). */
|
||||
return (context->getOpenXRRuntimeID() == OPENXR_RUNTIME_MONADO);
|
||||
}
|
||||
|
||||
void GHOST_XrSession::drawView(GHOST_XrSwapchain &swapchain,
|
||||
XrCompositionLayerProjectionView &r_proj_layer_view,
|
||||
XrSpaceLocation &view_location,
|
||||
@@ -397,7 +389,7 @@ void GHOST_XrSession::drawView(GHOST_XrSwapchain &swapchain,
|
||||
r_proj_layer_view.fov = view.fov;
|
||||
swapchain.updateCompositionLayerProjectViewSubImage(r_proj_layer_view.subImage);
|
||||
|
||||
draw_view_info.expects_srgb_buffer = ghost_xr_draw_view_expects_srgb_buffer(m_context);
|
||||
draw_view_info.expects_srgb_buffer = swapchain.isBufferSRGB();
|
||||
draw_view_info.ofsx = r_proj_layer_view.subImage.imageRect.offset.x;
|
||||
draw_view_info.ofsy = r_proj_layer_view.subImage.imageRect.offset.y;
|
||||
draw_view_info.width = r_proj_layer_view.subImage.imageRect.extent.width;
|
||||
|
@@ -68,7 +68,7 @@ GHOST_XrSwapchain::GHOST_XrSwapchain(GHOST_IXrGraphicsBinding &gpu_binding,
|
||||
"Failed to get swapchain image formats.");
|
||||
assert(swapchain_formats.size() == format_count);
|
||||
|
||||
if (!gpu_binding.chooseSwapchainFormat(swapchain_formats, &chosen_format)) {
|
||||
if (!gpu_binding.chooseSwapchainFormat(swapchain_formats, chosen_format, m_is_srgb_buffer)) {
|
||||
throw GHOST_XrException(
|
||||
"Error: No format matching OpenXR runtime supported swapchain formats found.");
|
||||
}
|
||||
@@ -95,7 +95,8 @@ GHOST_XrSwapchain::GHOST_XrSwapchain(GHOST_IXrGraphicsBinding &gpu_binding,
|
||||
GHOST_XrSwapchain::GHOST_XrSwapchain(GHOST_XrSwapchain &&other)
|
||||
: m_oxr(std::move(other.m_oxr)),
|
||||
m_image_width(other.m_image_width),
|
||||
m_image_height(other.m_image_height)
|
||||
m_image_height(other.m_image_height),
|
||||
m_is_srgb_buffer(other.m_is_srgb_buffer)
|
||||
{
|
||||
/* Prevent xrDestroySwapchain call for the moved out item. */
|
||||
other.m_oxr = nullptr;
|
||||
@@ -132,6 +133,11 @@ void GHOST_XrSwapchain::updateCompositionLayerProjectViewSubImage(XrSwapchainSub
|
||||
r_sub_image.imageRect.extent = {m_image_width, m_image_height};
|
||||
}
|
||||
|
||||
bool GHOST_XrSwapchain::isBufferSRGB()
|
||||
{
|
||||
return m_is_srgb_buffer;
|
||||
}
|
||||
|
||||
void GHOST_XrSwapchain::releaseImage()
|
||||
{
|
||||
XrSwapchainImageReleaseInfo release_info = {XR_TYPE_SWAPCHAIN_IMAGE_RELEASE_INFO};
|
||||
|
@@ -38,9 +38,12 @@ class GHOST_XrSwapchain {
|
||||
|
||||
void updateCompositionLayerProjectViewSubImage(XrSwapchainSubImage &r_sub_image);
|
||||
|
||||
bool isBufferSRGB();
|
||||
|
||||
private:
|
||||
std::unique_ptr<OpenXRSwapchainData> m_oxr; /* Could use stack, but PImpl is preferable. */
|
||||
int32_t m_image_width, m_image_height;
|
||||
bool m_is_srgb_buffer = false;
|
||||
};
|
||||
|
||||
#endif // GHOST_XRSWAPCHAIN_H
|
||||
|
@@ -204,7 +204,8 @@ extern size_t (*MEM_get_peak_memory)(void) ATTR_WARN_UNUSED_RESULT;
|
||||
do { \
|
||||
typeof(&(v)) _v = &(v); \
|
||||
if (*_v) { \
|
||||
MEM_freeN(*_v); \
|
||||
/* Cast so we can free constant arrays. */ \
|
||||
MEM_freeN((void *)*_v); \
|
||||
*_v = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
@@ -117,6 +117,13 @@ void bl_locale_set(const char *locale)
|
||||
|
||||
#undef LOCALE_INFO
|
||||
}
|
||||
// Extra catch on `std::runtime_error` is needed for macOS/Clang as it seems that exceptions
|
||||
// like `boost::locale::conv::conversion_error` (which inherit from `std::runtime_error`) are
|
||||
// not caught by their ancestor `std::exception`. See
|
||||
// https://developer.blender.org/T88877#1177108 .
|
||||
catch (std::runtime_error const &e) {
|
||||
std::cout << "bl_locale_set(" << locale << "): " << e.what() << " \n";
|
||||
}
|
||||
catch (std::exception const &e) {
|
||||
std::cout << "bl_locale_set(" << locale << "): " << e.what() << " \n";
|
||||
}
|
||||
|
@@ -14,7 +14,17 @@ const char *osx_user_locale()
|
||||
CFLocaleRef myCFLocale = CFLocaleCopyCurrent();
|
||||
NSLocale *myNSLocale = (NSLocale *)myCFLocale;
|
||||
[myNSLocale autorelease];
|
||||
NSString *nsIdentifier = [myNSLocale localeIdentifier];
|
||||
|
||||
// This produces gettext-invalid locale in recent macOS versions (11.4),
|
||||
// like `ko-Kore_KR` instead of `ko_KR`. See T88877.
|
||||
// NSString *nsIdentifier = [myNSLocale localeIdentifier];
|
||||
|
||||
const NSString *nsIdentifier = [myNSLocale languageCode];
|
||||
const NSString *const nsIdentifier_country = [myNSLocale countryCode];
|
||||
if ([nsIdentifier length] != 0 && [nsIdentifier_country length] != 0) {
|
||||
nsIdentifier = [NSString stringWithFormat:@"%@_%@", nsIdentifier, nsIdentifier_country];
|
||||
}
|
||||
|
||||
user_locale = ::strdup([nsIdentifier UTF8String]);
|
||||
[pool drain];
|
||||
|
||||
|
@@ -16,4 +16,4 @@ if [ -n "$XDG_RUNTIME_DIR" ]; then
|
||||
fi
|
||||
|
||||
# Run Blender
|
||||
$SNAP/blender
|
||||
$SNAP/blender "$@"
|
||||
|
@@ -164,7 +164,7 @@ class ScalarBlendModifier(StrokeShader):
|
||||
v1 = facm * v1 + fac * v1 / v2 if v2 != 0.0 else v1
|
||||
elif self.blend_type == 'DIFFERENCE':
|
||||
v1 = facm * v1 + fac * abs(v1 - v2)
|
||||
elif self.blend_type == 'MININUM':
|
||||
elif self.blend_type == 'MINIMUM':
|
||||
v1 = min(fac * v2, v1)
|
||||
elif self.blend_type == 'MAXIMUM':
|
||||
v1 = max(fac * v2, v1)
|
||||
|
@@ -314,7 +314,7 @@ def do_previews(do_objects, do_collections, do_scenes, do_data_intern):
|
||||
do_save = True
|
||||
|
||||
if do_data_intern:
|
||||
bpy.ops.wm.previews_clear(id_type='SHADING')
|
||||
bpy.ops.wm.previews_clear(id_type={'SHADING'})
|
||||
bpy.ops.wm.previews_ensure()
|
||||
|
||||
render_contexts = {}
|
||||
@@ -439,7 +439,7 @@ def do_previews(do_objects, do_collections, do_scenes, do_data_intern):
|
||||
|
||||
def do_clear_previews(do_objects, do_collections, do_scenes, do_data_intern):
|
||||
if do_data_intern:
|
||||
bpy.ops.wm.previews_clear(id_type='SHADING')
|
||||
bpy.ops.wm.previews_clear(id_type={'SHADING'})
|
||||
|
||||
if do_objects:
|
||||
for ob in ids_nolib(bpy.data.objects):
|
||||
|
@@ -3604,6 +3604,12 @@ def km_grease_pencil_stroke_weight_mode(params):
|
||||
*_template_items_context_panel("VIEW3D_PT_gpencil_weight_context_menu", params.context_menu_event),
|
||||
])
|
||||
|
||||
if params.select_mouse == 'LEFTMOUSE':
|
||||
# Bone selection for combined weight paint + pose mode.
|
||||
items.extend([
|
||||
("view3d.select", {"type": 'LEFTMOUSE', "value": 'PRESS', "ctrl": True}, None),
|
||||
])
|
||||
|
||||
return keymap
|
||||
|
||||
|
||||
|
@@ -260,7 +260,13 @@ class NLA_OT_bake(Operator):
|
||||
|
||||
def execute(self, context):
|
||||
from bpy_extras import anim_utils
|
||||
do_pose = 'POSE' in self.bake_types
|
||||
do_object = 'OBJECT' in self.bake_types
|
||||
|
||||
objects = context.selected_editable_objects
|
||||
if do_pose and not do_object:
|
||||
objects = [obj for obj in objects if obj.pose is not None]
|
||||
|
||||
object_action_pairs = (
|
||||
[(obj, getattr(obj.animation_data, "action", None)) for obj in objects]
|
||||
if self.use_current_action else
|
||||
@@ -271,8 +277,8 @@ class NLA_OT_bake(Operator):
|
||||
object_action_pairs,
|
||||
frames=range(self.frame_start, self.frame_end + 1, self.step),
|
||||
only_selected=self.only_selected,
|
||||
do_pose='POSE' in self.bake_types,
|
||||
do_object='OBJECT' in self.bake_types,
|
||||
do_pose=do_pose,
|
||||
do_object=do_object,
|
||||
do_visual_keying=self.visual_keying,
|
||||
do_constraint_clear=self.clear_constraints,
|
||||
do_parents_clear=self.clear_parents,
|
||||
|
@@ -486,7 +486,10 @@ class CLIP_OT_constraint_to_fcurve(Operator):
|
||||
return {'FINISHED'}
|
||||
|
||||
# Find start and end frames.
|
||||
if con.object:
|
||||
if con.type == 'CAMERA_SOLVER':
|
||||
# Camera solver constraint is always referring to camera.
|
||||
tracks = clip.tracking.tracks
|
||||
elif con.object:
|
||||
tracking_object = clip.tracking.objects.get(con.object, None)
|
||||
if not tracking_object:
|
||||
self.report({'ERROR'}, "Motion Tracking object not found")
|
||||
|
@@ -163,7 +163,12 @@ def extend(obj, EXTEND_MODE):
|
||||
l_b_uv = [l[uv_act].uv for l in l_b]
|
||||
|
||||
if EXTEND_MODE == 'LENGTH_AVERAGE':
|
||||
fac = edge_lengths[l_b[2].edge.index][0] / edge_lengths[l_a[1].edge.index][0]
|
||||
d1 = edge_lengths[l_a[1].edge.index][0]
|
||||
d2 = edge_lengths[l_b[2].edge.index][0]
|
||||
try:
|
||||
fac = d2 / d1
|
||||
except ZeroDivisionError:
|
||||
fac = 1.0
|
||||
elif EXTEND_MODE == 'LENGTH':
|
||||
a0, b0, c0 = l_a[3].vert.co, l_a[0].vert.co, l_b[3].vert.co
|
||||
a1, b1, c1 = l_a[2].vert.co, l_a[1].vert.co, l_b[2].vert.co
|
||||
|
@@ -1078,7 +1078,7 @@ class VIEW3D_MT_mirror(Menu):
|
||||
for axis_index, axis_name in enumerate("XYZ"):
|
||||
props = layout.operator("transform.mirror", text=f"{axis_name!s} {space_name!s}")
|
||||
props.constraint_axis[axis_index] = True
|
||||
props.orient_type = 'GLOBAL'
|
||||
props.orient_type = space_id
|
||||
|
||||
if space_id == 'GLOBAL':
|
||||
layout.separator()
|
||||
@@ -6768,7 +6768,10 @@ class VIEW3D_PT_overlay_gpencil_options(Panel):
|
||||
|
||||
if context.object.mode in {'PAINT_GPENCIL', 'VERTEX_GPENCIL'}:
|
||||
layout.label(text="Vertex Paint")
|
||||
layout.prop(overlay, "gpencil_vertex_paint_opacity", text="Opacity", slider=True)
|
||||
row = layout.row()
|
||||
shading = VIEW3D_PT_shading.get_shading(context)
|
||||
row.enabled = shading.type not in {'WIREFRAME', 'RENDERED'}
|
||||
row.prop(overlay, "gpencil_vertex_paint_opacity", text="Opacity", slider=True)
|
||||
|
||||
|
||||
class VIEW3D_PT_quad_view(Panel):
|
||||
|
@@ -799,7 +799,8 @@ class VIEW3D_PT_sculpt_dyntopo(Panel, View3DPaintPanel):
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return (context.sculpt_object and context.tool_settings.sculpt)
|
||||
paint_settings = cls.paint_settings(context)
|
||||
return (context.sculpt_object and context.tool_settings.sculpt and paint_settings)
|
||||
|
||||
def draw_header(self, context):
|
||||
is_popover = self.is_popover
|
||||
|
70
release/steam/README.md
Normal file
@@ -0,0 +1,70 @@
|
||||
Creating Steam builds for Blender
|
||||
=================================
|
||||
|
||||
This script automates creation of the Steam files: download of the archives,
|
||||
extraction of the archives, preparation of the build scripts (VDF files), actual
|
||||
building of the Steam game files.
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
* MacOS machine - Tested on Catalina 10.15.6. Extracting contents from the DMG
|
||||
archive did not work Windows nor on Linux using 7-zip. All DMG archives tested
|
||||
failed to be extracted. As such only MacOS is known to work.
|
||||
* Steam SDK downloaded from SteamWorks - The `steamcmd` is used to generate the
|
||||
Steam game files. The path to the `steamcmd` is what is actually needed.
|
||||
* SteamWorks credentials - Needed to log in using `steamcmd`.
|
||||
* Login to SteamWorks with the `steamcmd` from the command-line at least once -
|
||||
Needded to ensure the user is properly logged in. On a new machine the user
|
||||
will have to go through two-factor authentication.
|
||||
* App ID and Depot IDs - Needed to create the VDF files.
|
||||
* Python 3.x - 3.7 was tested.
|
||||
* Base URL - for downloading the archives.
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
```bash
|
||||
$ export STEAMUSER=SteamUserName
|
||||
$ export STEAMPW=SteamUserPW
|
||||
$ export BASEURL=https://download.blender.org/release/Blender2.83/
|
||||
$ export VERSION=2.83.3
|
||||
$ export APPID=appidnr
|
||||
$ export WINID=winidnr
|
||||
$ export LINID=linuxidnr
|
||||
$ export MACOSID=macosidnr
|
||||
|
||||
# log in to SteamWorks from command-line at least once
|
||||
|
||||
$ ../sdk/tools/ContentBuilder/builder_osx/steamcmd +login $STEAMUSER $STEAMPW
|
||||
|
||||
# once that has been done we can now actually start our tool
|
||||
|
||||
$ python3.7 create_steam_builds.py --baseurl $BASEURL --version $VERSION --appid $APPID --winid $WINID --linuxid $LINID --macosid $MACOSID --steamuser $STEAMUSER --steampw $STEAMPW --steamcmd ../sdk/tools/ContentBuilder/builder_osx/steamcmd
|
||||
```
|
||||
|
||||
All arguments in the above example are required.
|
||||
|
||||
At the start the tool will login using `steamcmd`. This is necessary to let the
|
||||
Steam SDK update itself if necessary.
|
||||
|
||||
There are a few optional arguments:
|
||||
|
||||
* `--dryrun`: If set building the game files will not actually happen. A set of
|
||||
log files and a preview manifest per depot will be created in the output folder.
|
||||
This can be used to double-check everything works as expected.
|
||||
* `--skipdl`: If set will skip downloading of the archives. The tool expects the
|
||||
archives to already exist in the correct content location.
|
||||
* `--skipextract`: If set will skip extraction of all archives. The tool expects
|
||||
the archives to already have been correctly extracted in the content location.
|
||||
|
||||
Run the tool with `-h` for detailed information on each argument.
|
||||
|
||||
The content and output folders are generated through appending the version
|
||||
without dots to the words `content` and `output` respectively, e.g. `content2833`
|
||||
and `output2833`. These folders are created next to the tool.
|
||||
|
||||
From all `.template` files the Steam build scripts will be generated also in the
|
||||
same directory as the tool. The files will have the extension `.vdf`.
|
||||
|
||||
In case of errors the tool will have a non-zero return code.
|
17
release/steam/blender_app_build.vdf.template
Normal file
@@ -0,0 +1,17 @@
|
||||
"appbuild"
|
||||
{
|
||||
"appid" "[APPID]"
|
||||
"desc" "Blender [VERSION]" // description for this build
|
||||
"buildoutput" "./[OUTPUT]" // build output folder for .log, .csm & .csd files, relative to location of this file
|
||||
"contentroot" "./[CONTENT]" // root content folder, relative to location of this file
|
||||
"setlive" "" // branch to set live after successful build, non if empty
|
||||
"preview" "[DRYRUN]" // 1 to enable preview builds, 0 to commit build to steampipe
|
||||
"local" "" // set to flie path of local content server
|
||||
|
||||
"depots"
|
||||
{
|
||||
"[WINID]" "depot_build_win.vdf"
|
||||
"[LINUXID]" "depot_build_linux.vdf"
|
||||
"[MACOSID]" "depot_build_macos.vdf"
|
||||
}
|
||||
}
|
397
release/steam/create_steam_builds.py
Normal file
@@ -0,0 +1,397 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import pathlib
|
||||
import requests
|
||||
import shutil
|
||||
import subprocess
|
||||
from typing import Callable, Iterator, List, Tuple
|
||||
|
||||
# supported archive and platform endings, used to create actual archive names
|
||||
archive_endings = ["windows64.zip", "linux64.tar.xz", "macOS.dmg"]
|
||||
|
||||
|
||||
def add_optional_argument(option: str, help: str) -> None:
|
||||
global parser
|
||||
"""Add an optional argument
|
||||
|
||||
Args:
|
||||
option (str): Option to add
|
||||
help (str): Help description for the argument
|
||||
"""
|
||||
parser.add_argument(option, help=help, action='store_const', const=1)
|
||||
|
||||
|
||||
def blender_archives(version: str) -> Iterator[str]:
|
||||
"""Generator for Blender archives for version.
|
||||
|
||||
Yields for items in archive_endings an archive name in the form of
|
||||
blender-{version}-{ending}.
|
||||
|
||||
Args:
|
||||
version (str): Version string of the form 2.83.2
|
||||
|
||||
|
||||
Yields:
|
||||
Iterator[str]: Name in the form of blender-{version}-{ending}
|
||||
"""
|
||||
global archive_endings
|
||||
|
||||
for ending in archive_endings:
|
||||
yield f"blender-{version}-{ending}"
|
||||
|
||||
|
||||
def get_archive_type(archive_type: str, version: str) -> str:
|
||||
"""Return the archive of given type and version.
|
||||
|
||||
Args:
|
||||
archive_type (str): extension for archive type to check for
|
||||
version (str): Version string in the form 2.83.2
|
||||
|
||||
Raises:
|
||||
Exception: Execption when archive type isn't found
|
||||
|
||||
Returns:
|
||||
str: archive name for given type
|
||||
"""
|
||||
|
||||
for archive in blender_archives(version):
|
||||
if archive.endswith(archive_type):
|
||||
return archive
|
||||
raise Exception("Unknown archive type")
|
||||
|
||||
|
||||
def execute_command(cmd: List[str], name: str, errcode: int, cwd=".", capture_output=True) -> str:
|
||||
"""Execute the given command.
|
||||
|
||||
Returns the process stdout upon success if any.
|
||||
|
||||
On error print message the command with name that has failed. Print stdout
|
||||
and stderr of the process if any, and then exit with given error code.
|
||||
|
||||
Args:
|
||||
cmd (List[str]): Command in list format, each argument as their own item
|
||||
name (str): Name of command to use when printing to command-line
|
||||
errcode (int): Error code to use in case of exit()
|
||||
cwd (str, optional): Folder to use as current work directory for command
|
||||
execution. Defaults to ".".
|
||||
capture_output (bool, optional): Whether to capture command output or not.
|
||||
Defaults to True.
|
||||
|
||||
Returns:
|
||||
str: stdout if any, or empty string
|
||||
"""
|
||||
cmd_process = subprocess.run(
|
||||
cmd, capture_output=capture_output, encoding="UTF-8", cwd=cwd)
|
||||
if cmd_process.returncode == 0:
|
||||
if cmd_process.stdout:
|
||||
return cmd_process.stdout
|
||||
else:
|
||||
return ""
|
||||
else:
|
||||
print(f"ERROR: {name} failed.")
|
||||
if cmd_process.stdout:
|
||||
print(cmd_process.stdout)
|
||||
if cmd_process.stderr:
|
||||
print(cmd_process.stderr)
|
||||
exit(errcode)
|
||||
return ""
|
||||
|
||||
|
||||
def download_archives(base_url: str, archives: Callable[[str], Iterator[str]], version: str, dst_dir: pathlib.Path):
|
||||
"""Download archives from the given base_url.
|
||||
|
||||
Archives is a generator for Blender archive names based on version.
|
||||
|
||||
Archive names are appended to the base_url to load from, and appended to
|
||||
dst_dir to save to.
|
||||
|
||||
Args:
|
||||
base_url (str): Base URL to load archives from
|
||||
archives (Callable[[str], Iterator[str]]): Generator for Blender archive
|
||||
names based on version
|
||||
version (str): Version string in the form of 2.83.2
|
||||
dst_dir (pathlib.Path): Download destination
|
||||
"""
|
||||
|
||||
if base_url[-1] != '/':
|
||||
base_url = base_url + '/'
|
||||
|
||||
for archive in archives(version):
|
||||
download_url = f"{base_url}{archive}"
|
||||
target_file = dst_dir.joinpath(archive)
|
||||
download_file(download_url, target_file)
|
||||
|
||||
|
||||
def download_file(from_url: str, to_file: pathlib.Path) -> None:
|
||||
"""Download from_url as to_file.
|
||||
|
||||
Actual downloading will be skipped if --skipdl is given on the command-line.
|
||||
|
||||
Args:
|
||||
from_url (str): Full URL to resource to download
|
||||
to_file (pathlib.Path): Full path to save downloaded resource as
|
||||
"""
|
||||
global args
|
||||
|
||||
if not args.skipdl or not to_file.exists():
|
||||
print(f"Downloading {from_url}")
|
||||
with open(to_file, "wb") as download_zip:
|
||||
response = requests.get(from_url)
|
||||
if response.status_code != requests.codes.ok:
|
||||
print(f"ERROR: failed to download {from_url} (status code: {response.status_code})")
|
||||
exit(1313)
|
||||
download_zip.write(response.content)
|
||||
else:
|
||||
print(f"Downloading {from_url} skipped")
|
||||
print(" ... OK")
|
||||
|
||||
|
||||
def copy_contents_from_dmg_to_path(dmg_file: pathlib.Path, dst: pathlib.Path) -> None:
|
||||
"""Copy the contents of the given DMG file to the destination folder.
|
||||
|
||||
Args:
|
||||
dmg_file (pathlib.Path): Full path to DMG archive to extract from
|
||||
dst (pathlib.Path): Full path to destination to extract to
|
||||
"""
|
||||
hdiutil_attach = ["hdiutil",
|
||||
"attach",
|
||||
"-readonly",
|
||||
f"{dmg_file}"
|
||||
]
|
||||
attached = execute_command(hdiutil_attach, "hdiutil attach", 1)
|
||||
|
||||
# Last line of output is what we want, it is of the form
|
||||
# /dev/somedisk Apple_HFS /Volumes/Blender
|
||||
# We want to retain the mount point, and the folder the mount is
|
||||
# created on. The mounted disk we need for detaching, the folder we
|
||||
# need to be able to copy the contents to where we can use them
|
||||
attachment_items = attached.splitlines()[-1].split()
|
||||
mounted_disk = attachment_items[0]
|
||||
source_location = pathlib.Path(attachment_items[2], "Blender.app")
|
||||
|
||||
print(f"{source_location} -> {dst}")
|
||||
|
||||
shutil.copytree(source_location, dst)
|
||||
|
||||
hdiutil_detach = ["hdiutil",
|
||||
"detach",
|
||||
f"{mounted_disk}"
|
||||
]
|
||||
execute_command(hdiutil_detach, "hdiutil detach", 2)
|
||||
|
||||
|
||||
def create_build_script(template_name: str, vars: List[Tuple[str, str]]) -> pathlib.Path:
|
||||
"""
|
||||
Create the Steam build script
|
||||
|
||||
Use the given template and template variable tuple list.
|
||||
|
||||
Returns pathlib.Path to the created script.
|
||||
|
||||
Args:
|
||||
template_name (str): [description]
|
||||
vars (List[Tuple[str, str]]): [description]
|
||||
|
||||
Returns:
|
||||
pathlib.Path: Full path to the generated script
|
||||
"""
|
||||
build_script = pathlib.Path(".", template_name).read_text()
|
||||
for var in vars:
|
||||
build_script = build_script.replace(var[0], var[1])
|
||||
build_script_file = template_name.replace(".template", "")
|
||||
build_script_path = pathlib.Path(".", build_script_file)
|
||||
build_script_path.write_text(build_script)
|
||||
return build_script_path
|
||||
|
||||
|
||||
def clean_up() -> None:
|
||||
"""Remove intermediate files depending on given command-line arguments
|
||||
"""
|
||||
global content_location, args
|
||||
|
||||
if not args.leavearch and not args.leaveextracted:
|
||||
shutil.rmtree(content_location)
|
||||
|
||||
if args.leavearch and not args.leaveextracted:
|
||||
shutil.rmtree(content_location.joinpath(zip_extract_folder))
|
||||
shutil.rmtree(content_location.joinpath(tarxz_extract_folder))
|
||||
shutil.rmtree(content_location.joinpath(dmg_extract_folder))
|
||||
|
||||
if args.leaveextracted and not args.leavearch:
|
||||
import os
|
||||
os.remove(content_location.joinpath(zipped_blender))
|
||||
os.remove(content_location.joinpath(tarxz_blender))
|
||||
os.remove(content_location.joinpath(dmg_blender))
|
||||
|
||||
|
||||
def extract_archive(archive: str, extract_folder_name: str,
|
||||
cmd: List[str], errcode: int) -> None:
|
||||
"""Extract all files from archive to given folder name.
|
||||
|
||||
Will not extract if
|
||||
target folder already exists, or if --skipextract was given on the
|
||||
command-line.
|
||||
|
||||
Args:
|
||||
archive (str): Archive name to extract
|
||||
extract_folder_name (str): Folder name to extract to
|
||||
cmd (List[str]): Command with arguments to use
|
||||
errcode (int): Error code to use for exit()
|
||||
"""
|
||||
global args, content_location
|
||||
|
||||
extract_location = content_location.joinpath(extract_folder_name)
|
||||
|
||||
pre_extract = set(content_location.glob("*"))
|
||||
|
||||
if not args.skipextract or not extract_location.exists():
|
||||
print(f"Extracting files from {archive}...")
|
||||
cmd.append(content_location.joinpath(archive))
|
||||
execute_command(cmd, cmd[0], errcode, cwd=content_location)
|
||||
# in case we use a non-release archive the naming will be incorrect.
|
||||
# simply rename to expected target name
|
||||
post_extract = set(content_location.glob("*"))
|
||||
diff_extract = post_extract - pre_extract
|
||||
if not extract_location in diff_extract:
|
||||
folder_to_rename = list(diff_extract)[0]
|
||||
folder_to_rename.rename(extract_location)
|
||||
print(" OK")
|
||||
else:
|
||||
print(f"Skipping extraction {archive}!")
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--baseurl", required=True,
|
||||
help="The base URL for files to download, "
|
||||
"i.e. https://download.blender.org/release/Blender2.83/")
|
||||
|
||||
parser.add_argument("--version", required=True,
|
||||
help="The Blender version to release, in the form 2.83.3")
|
||||
|
||||
parser.add_argument("--appid", required=True,
|
||||
help="The Blender App ID on Steam")
|
||||
parser.add_argument("--winid", required=True,
|
||||
help="The Windows depot ID")
|
||||
parser.add_argument("--linuxid", required=True,
|
||||
help="The Linux depot ID")
|
||||
parser.add_argument("--macosid", required=True,
|
||||
help="The MacOS depot ID")
|
||||
|
||||
parser.add_argument("--steamcmd", required=True,
|
||||
help="Path to the steamcmd")
|
||||
parser.add_argument("--steamuser", required=True,
|
||||
help="The login for the Steam builder user")
|
||||
parser.add_argument("--steampw", required=True,
|
||||
help="Login password for the Steam builder user")
|
||||
|
||||
add_optional_argument("--dryrun",
|
||||
"If set the Steam files will not be uploaded")
|
||||
add_optional_argument("--leavearch",
|
||||
help="If set don't clean up the downloaded archives")
|
||||
add_optional_argument("--leaveextracted",
|
||||
help="If set don't clean up the extraction folders")
|
||||
add_optional_argument("--skipdl",
|
||||
help="If set downloading the archives is skipped if it already exists locally.")
|
||||
add_optional_argument("--skipextract",
|
||||
help="If set skips extracting of archives. The tool assumes the archives"
|
||||
"have already been extracted to their correct locations")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
VERSIONNODOTS = args.version.replace('.', '')
|
||||
OUTPUT = f"output{VERSIONNODOTS}"
|
||||
CONTENT = f"content{VERSIONNODOTS}"
|
||||
|
||||
# ===== set up main locations
|
||||
|
||||
content_location = pathlib.Path(".", CONTENT).absolute()
|
||||
output_location = pathlib.Path(".", OUTPUT).absolute()
|
||||
|
||||
content_location.mkdir(parents=True, exist_ok=True)
|
||||
output_location.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# ===== login
|
||||
|
||||
# Logging into Steam once to ensure the SDK updates itself properly. If we don't
|
||||
# do that the combined +login and +run_app_build_http at the end of the tool
|
||||
# will fail.
|
||||
steam_login = [args.steamcmd,
|
||||
"+login",
|
||||
args.steamuser,
|
||||
args.steampw,
|
||||
"+quit"
|
||||
]
|
||||
print("Logging in to Steam...")
|
||||
execute_command(steam_login, "Login to Steam", 10)
|
||||
print(" OK")
|
||||
|
||||
# ===== prepare Steam build scripts
|
||||
|
||||
template_vars = [
|
||||
("[APPID]", args.appid),
|
||||
("[OUTPUT]", OUTPUT),
|
||||
("[CONTENT]", CONTENT),
|
||||
("[VERSION]", args.version),
|
||||
("[WINID]", args.winid),
|
||||
("[LINUXID]", args.linuxid),
|
||||
("[MACOSID]", args.macosid),
|
||||
("[DRYRUN]", f"{args.dryrun}" if args.dryrun else "0")
|
||||
]
|
||||
|
||||
blender_app_build = create_build_script(
|
||||
"blender_app_build.vdf.template", template_vars)
|
||||
create_build_script("depot_build_win.vdf.template", template_vars)
|
||||
create_build_script("depot_build_linux.vdf.template", template_vars)
|
||||
create_build_script("depot_build_macos.vdf.template", template_vars)
|
||||
|
||||
# ===== download archives
|
||||
|
||||
download_archives(args.baseurl, blender_archives,
|
||||
args.version, content_location)
|
||||
|
||||
# ===== set up file and folder names
|
||||
|
||||
zipped_blender = get_archive_type("zip", args.version)
|
||||
zip_extract_folder = zipped_blender.replace(".zip", "")
|
||||
tarxz_blender = get_archive_type("tar.xz", args.version)
|
||||
tarxz_extract_folder = tarxz_blender.replace(".tar.xz", "")
|
||||
dmg_blender = get_archive_type("dmg", args.version)
|
||||
dmg_extract_folder = dmg_blender.replace(".dmg", "")
|
||||
|
||||
# ===== extract
|
||||
|
||||
unzip_cmd = ["unzip", "-q"]
|
||||
extract_archive(zipped_blender, zip_extract_folder, unzip_cmd, 3)
|
||||
|
||||
untarxz_cmd = ["tar", "-xf"]
|
||||
extract_archive(tarxz_blender, tarxz_extract_folder, untarxz_cmd, 4)
|
||||
|
||||
if not args.skipextract or not content_location.joinpath(dmg_extract_folder).exists():
|
||||
print("Extracting files from Blender MacOS archive...")
|
||||
blender_dmg = content_location.joinpath(dmg_blender)
|
||||
target_location = content_location.joinpath(
|
||||
dmg_extract_folder, "Blender.app")
|
||||
copy_contents_from_dmg_to_path(blender_dmg, target_location)
|
||||
print(" OK")
|
||||
else:
|
||||
print("Skipping extraction of .dmg!")
|
||||
|
||||
# ===== building
|
||||
|
||||
print("Build Steam game files...")
|
||||
steam_build = [args.steamcmd,
|
||||
"+login",
|
||||
args.steamuser,
|
||||
args.steampw,
|
||||
"+run_app_build_http",
|
||||
blender_app_build.absolute(),
|
||||
"+quit"
|
||||
]
|
||||
execute_command(steam_build, "Build with steamcmd", 13)
|
||||
print(" OK")
|
||||
|
||||
clean_up()
|
31
release/steam/depot_build_linux.vdf.template
Normal file
@@ -0,0 +1,31 @@
|
||||
"DepotBuildConfig"
|
||||
{
|
||||
// Set your assigned depot ID here
|
||||
"DepotID" "[LINUXID]"
|
||||
|
||||
// Set a root for all content.
|
||||
// All relative paths specified below (LocalPath in FileMapping entries, and FileExclusion paths)
|
||||
// will be resolved relative to this root.
|
||||
// If you don't define ContentRoot, then it will be assumed to be
|
||||
// the location of this script file, which probably isn't what you want
|
||||
"ContentRoot" "./blender-[VERSION]-linux64/"
|
||||
|
||||
// include all files recursivley
|
||||
"FileMapping"
|
||||
{
|
||||
// This can be a full path, or a path relative to ContentRoot
|
||||
"LocalPath" "*"
|
||||
|
||||
// This is a path relative to the install folder of your game
|
||||
"DepotPath" "."
|
||||
|
||||
// If LocalPath contains wildcards, setting this means that all
|
||||
// matching files within subdirectories of LocalPath will also
|
||||
// be included.
|
||||
"recursive" "1"
|
||||
}
|
||||
|
||||
// but exclude all symbol files
|
||||
// This can be a full path, or a path relative to ContentRoot
|
||||
"FileExclusion" "*.pdb"
|
||||
}
|
30
release/steam/depot_build_macos.vdf.template
Normal file
@@ -0,0 +1,30 @@
|
||||
"DepotBuildConfig"
|
||||
{
|
||||
// Set your assigned depot ID here
|
||||
"DepotID" "[MACOSID]"
|
||||
|
||||
// Set a root for all content.
|
||||
// All relative paths specified below (LocalPath in FileMapping entries, and FileExclusion paths)
|
||||
// will be resolved relative to this root.
|
||||
// If you don't define ContentRoot, then it will be assumed to be
|
||||
// the location of this script file, which probably isn't what you want
|
||||
"ContentRoot" "./blender-[VERSION]-macOS/"
|
||||
// include all files recursivley
|
||||
"FileMapping"
|
||||
{
|
||||
// This can be a full path, or a path relative to ContentRoot
|
||||
"LocalPath" "*"
|
||||
|
||||
// This is a path relative to the install folder of your game
|
||||
"DepotPath" "."
|
||||
|
||||
// If LocalPath contains wildcards, setting this means that all
|
||||
// matching files within subdirectories of LocalPath will also
|
||||
// be included.
|
||||
"recursive" "1"
|
||||
}
|
||||
|
||||
// but exclude all symbol files
|
||||
// This can be a full path, or a path relative to ContentRoot
|
||||
"FileExclusion" "*.pdb"
|
||||
}
|
31
release/steam/depot_build_win.vdf.template
Normal file
@@ -0,0 +1,31 @@
|
||||
"DepotBuildConfig"
|
||||
{
|
||||
// Set your assigned depot ID here
|
||||
"DepotID" "[WINID]"
|
||||
|
||||
// Set a root for all content.
|
||||
// All relative paths specified below (LocalPath in FileMapping entries, and FileExclusion paths)
|
||||
// will be resolved relative to this root.
|
||||
// If you don't define ContentRoot, then it will be assumed to be
|
||||
// the location of this script file, which probably isn't what you want
|
||||
"ContentRoot" "./blender-[VERSION]-windows64/"
|
||||
|
||||
// include all files recursivley
|
||||
"FileMapping"
|
||||
{
|
||||
// This can be a full path, or a path relative to ContentRoot
|
||||
"LocalPath" "*"
|
||||
|
||||
// This is a path relative to the install folder of your game
|
||||
"DepotPath" "."
|
||||
|
||||
// If LocalPath contains wildcards, setting this means that all
|
||||
// matching files within subdirectories of LocalPath will also
|
||||
// be included.
|
||||
"recursive" "1"
|
||||
}
|
||||
|
||||
// but exclude all symbol files
|
||||
// This can be a full path, or a path relative to ContentRoot
|
||||
"FileExclusion" "*.pdb"
|
||||
}
|
60
release/windows/msix/AppxManifest.xml.template
Normal file
@@ -0,0 +1,60 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Package xmlns="http://schemas.microsoft.com/appx/manifest/foundation/windows10" xmlns:uap="http://schemas.microsoft.com/appx/manifest/uap/windows10" xmlns:uap2="http://schemas.microsoft.com/appx/manifest/uap/windows10/2" xmlns:uap3="http://schemas.microsoft.com/appx/manifest/uap/windows10/3" xmlns:uap4="http://schemas.microsoft.com/appx/manifest/uap/windows10/4" xmlns:uap6="http://schemas.microsoft.com/appx/manifest/uap/windows10/6" xmlns:uap7="http://schemas.microsoft.com/appx/manifest/uap/windows10/7" xmlns:uap8="http://schemas.microsoft.com/appx/manifest/uap/windows10/8" xmlns:uap10="http://schemas.microsoft.com/appx/manifest/uap/windows10/10" xmlns:mobile="http://schemas.microsoft.com/appx/manifest/mobile/windows10" xmlns:iot="http://schemas.microsoft.com/appx/manifest/iot/windows10" xmlns:desktop="http://schemas.microsoft.com/appx/manifest/desktop/windows10" xmlns:desktop2="http://schemas.microsoft.com/appx/manifest/desktop/windows10/2" xmlns:desktop6="http://schemas.microsoft.com/appx/manifest/desktop/windows10/6" xmlns:rescap="http://schemas.microsoft.com/appx/manifest/foundation/windows10/restrictedcapabilities" xmlns:rescap3="http://schemas.microsoft.com/appx/manifest/foundation/windows10/restrictedcapabilities/3" xmlns:rescap6="http://schemas.microsoft.com/appx/manifest/foundation/windows10/restrictedcapabilities/6" xmlns:com="http://schemas.microsoft.com/appx/manifest/com/windows10" xmlns:com2="http://schemas.microsoft.com/appx/manifest/com/windows10/2" xmlns:com3="http://schemas.microsoft.com/appx/manifest/com/windows10/3" IgnorableNamespaces="uap uap2 uap3 uap4 uap6 uap7 uap8 uap10 mobile iot desktop desktop2 desktop6 rescap rescap3 rescap6 com com2 com3">
|
||||
<Identity Name="BlenderFoundation.Blender[PACKAGETYPE]" Publisher="[PUBLISHER]" Version="[VERSION]" ProcessorArchitecture="x64" />
|
||||
<Properties>
|
||||
<DisplayName>Blender[LTSORNOT]</DisplayName>
|
||||
<PublisherDisplayName>Blender Foundation</PublisherDisplayName>
|
||||
<Description>Blender [VERSION] is the Free and Open Source 3D creation suite</Description>
|
||||
<Logo>Assets\StoreLogo.scale-100.png</Logo>
|
||||
</Properties>
|
||||
<Resources>
|
||||
<Resource Language="en-us" />
|
||||
</Resources>
|
||||
<Dependencies>
|
||||
<TargetDeviceFamily Name="Windows.Desktop" MinVersion="10.0.17763.0" MaxVersionTested="10.0.18335.0" />
|
||||
</Dependencies>
|
||||
<Capabilities>
|
||||
<rescap:Capability Name="runFullTrust" />
|
||||
</Capabilities>
|
||||
<Applications>
|
||||
<Application Id="BLENDER" Executable="Blender\blender.exe" EntryPoint="Windows.FullTrustApplication">
|
||||
<uap:VisualElements
|
||||
BackgroundColor="transparent"
|
||||
DisplayName="Blender [VERSION]"
|
||||
Square150x150Logo="Assets\Square150x150Logo.png"
|
||||
Square44x44Logo="Assets\Square44x44Logo.png"
|
||||
Description="Blender is the Free and Open Source 3D creation suite"
|
||||
>
|
||||
<uap:DefaultTile
|
||||
Wide310x150Logo="Assets\Wide310x150Logo.png"
|
||||
Square310x310Logo="Assets\Square310x310Logo.png"
|
||||
Square71x71Logo="Assets\Square71x71Logo.png"
|
||||
ShortName="Blender [VERSION]"
|
||||
>
|
||||
<uap:ShowNameOnTiles>
|
||||
<uap:ShowOn Tile="square150x150Logo"/> <!-- Show app name on the 150x150 tile -->
|
||||
<uap:ShowOn Tile="wide310x150Logo"/> <!-- …and also on the 310x150 tile -->
|
||||
<uap:ShowOn Tile="square310x310Logo"/> <!-- …and also on the 310x150 tile -->
|
||||
</uap:ShowNameOnTiles>
|
||||
</uap:DefaultTile>
|
||||
</uap:VisualElements>
|
||||
<Extensions>
|
||||
<uap3:Extension Category="windows.fileTypeAssociation">
|
||||
<uap3:FileTypeAssociation Name="blend">
|
||||
<uap:SupportedFileTypes>
|
||||
<uap:FileType>.blend</uap:FileType>
|
||||
</uap:SupportedFileTypes>
|
||||
<uap2:SupportedVerbs>
|
||||
<uap3:Verb Id="open" Parameters=""%1"">open</uap3:Verb>
|
||||
</uap2:SupportedVerbs>
|
||||
</uap3:FileTypeAssociation>
|
||||
</uap3:Extension>
|
||||
<uap3:Extension Category="windows.appExecutionAlias" Executable="Blender\blender.exe" EntryPoint="Windows.FullTrustApplication">
|
||||
<uap3:AppExecutionAlias>
|
||||
<desktop:ExecutionAlias Alias="blender.exe" />
|
||||
</uap3:AppExecutionAlias>
|
||||
</uap3:Extension>
|
||||
</Extensions>
|
||||
</Application>
|
||||
</Applications>
|
||||
</Package>
|
BIN
release/windows/msix/Assets/Square150x150Logo.scale-100.png
Normal file
After Width: | Height: | Size: 4.0 KiB |
BIN
release/windows/msix/Assets/Square150x150Logo.scale-125.png
Normal file
After Width: | Height: | Size: 5.1 KiB |
BIN
release/windows/msix/Assets/Square150x150Logo.scale-150.png
Normal file
After Width: | Height: | Size: 6.3 KiB |
BIN
release/windows/msix/Assets/Square150x150Logo.scale-200.png
Normal file
After Width: | Height: | Size: 8.9 KiB |
BIN
release/windows/msix/Assets/Square150x150Logo.scale-400.png
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
release/windows/msix/Assets/Square310x310Logo.scale-100.png
Normal file
After Width: | Height: | Size: 9.2 KiB |
BIN
release/windows/msix/Assets/Square310x310Logo.scale-125.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
release/windows/msix/Assets/Square310x310Logo.scale-150.png
Normal file
After Width: | Height: | Size: 15 KiB |
BIN
release/windows/msix/Assets/Square310x310Logo.scale-200.png
Normal file
After Width: | Height: | Size: 21 KiB |
BIN
release/windows/msix/Assets/Square310x310Logo.scale-400.png
Normal file
After Width: | Height: | Size: 51 KiB |
After Width: | Height: | Size: 604 B |
After Width: | Height: | Size: 918 B |
After Width: | Height: | Size: 7.8 KiB |