Initial commit
This commit is contained in:
7
.gitignore
vendored
Normal file
7
.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# Ignore all directories with binary files.
|
||||
/scenes
|
||||
/blender
|
||||
# Ignore production configuration files.
|
||||
/config/*.cfg
|
||||
# Ignore Python cache
|
||||
__pycache__
|
949
benchmark/blendfile/blendfile.py
Normal file
949
benchmark/blendfile/blendfile.py
Normal file
@@ -0,0 +1,949 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
#
|
||||
# ***** END GPL LICENCE BLOCK *****
|
||||
#
|
||||
# (c) 2009, At Mind B.V. - Jeroen Bakker
|
||||
# (c) 2014, Blender Foundation - Campbell Barton
|
||||
|
||||
import gzip
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import tempfile
|
||||
|
||||
log = logging.getLogger("blendfile")
|
||||
|
||||
FILE_BUFFER_SIZE = 1024 * 1024
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# module global routines
|
||||
#
|
||||
# read routines
|
||||
# open a filename
|
||||
# determine if the file is compressed
|
||||
# and returns a handle
|
||||
def open_blend(filename, access="rb"):
|
||||
"""Opens a blend file for reading or writing pending on the access
|
||||
supports 2 kind of blend files. Uncompressed and compressed.
|
||||
Known issue: does not support packaged blend files
|
||||
"""
|
||||
handle = open(filename, access)
|
||||
magic_test = b"BLENDER"
|
||||
magic = handle.read(len(magic_test))
|
||||
if magic == magic_test:
|
||||
log.debug("normal blendfile detected")
|
||||
handle.seek(0, os.SEEK_SET)
|
||||
bfile = BlendFile(handle)
|
||||
bfile.is_compressed = False
|
||||
bfile.filepath_orig = filename
|
||||
return bfile
|
||||
elif magic[:2] == b'\x1f\x8b':
|
||||
log.debug("gzip blendfile detected")
|
||||
handle.close()
|
||||
log.debug("decompressing started")
|
||||
fs = gzip.open(filename, "rb")
|
||||
data = fs.read(FILE_BUFFER_SIZE)
|
||||
magic = data[:len(magic_test)]
|
||||
if magic == magic_test:
|
||||
handle = tempfile.TemporaryFile()
|
||||
while data:
|
||||
handle.write(data)
|
||||
data = fs.read(FILE_BUFFER_SIZE)
|
||||
log.debug("decompressing finished")
|
||||
fs.close()
|
||||
log.debug("resetting decompressed file")
|
||||
handle.seek(os.SEEK_SET, 0)
|
||||
bfile = BlendFile(handle)
|
||||
bfile.is_compressed = True
|
||||
bfile.filepath_orig = filename
|
||||
return bfile
|
||||
else:
|
||||
raise Exception("filetype inside gzip not a blend")
|
||||
else:
|
||||
raise Exception("filetype not a blend or a gzip blend")
|
||||
|
||||
|
||||
def pad_up_4(offset):
|
||||
return (offset + 3) & ~3
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# module classes
|
||||
|
||||
|
||||
class BlendFile:
|
||||
"""
|
||||
Blend file.
|
||||
"""
|
||||
__slots__ = (
|
||||
# file (result of open())
|
||||
"handle",
|
||||
# str (original name of the file path)
|
||||
"filepath_orig",
|
||||
# BlendFileHeader
|
||||
"header",
|
||||
# struct.Struct
|
||||
"block_header_struct",
|
||||
# BlendFileBlock
|
||||
"blocks",
|
||||
# [DNAStruct, ...]
|
||||
"structs",
|
||||
# dict {b'StructName': sdna_index}
|
||||
# (where the index is an index into 'structs')
|
||||
"sdna_index_from_id",
|
||||
# dict {addr_old: block}
|
||||
"block_from_offset",
|
||||
# int
|
||||
"code_index",
|
||||
# bool (did we make a change)
|
||||
"is_modified",
|
||||
# bool (is file gzipped)
|
||||
"is_compressed",
|
||||
)
|
||||
|
||||
def __init__(self, handle):
|
||||
log.debug("initializing reading blend-file")
|
||||
self.handle = handle
|
||||
self.header = BlendFileHeader(handle)
|
||||
self.block_header_struct = self.header.create_block_header_struct()
|
||||
self.blocks = []
|
||||
self.code_index = {}
|
||||
self.structs = []
|
||||
self.sdna_index_from_id = {}
|
||||
|
||||
block = BlendFileBlock(handle, self)
|
||||
while block.code != b'ENDB':
|
||||
if block.code == b'DNA1':
|
||||
(self.structs,
|
||||
self.sdna_index_from_id,
|
||||
) = BlendFile.decode_structs(self.header, block, handle)
|
||||
else:
|
||||
handle.seek(block.size, os.SEEK_CUR)
|
||||
|
||||
self.blocks.append(block)
|
||||
self.code_index.setdefault(block.code, []).append(block)
|
||||
|
||||
block = BlendFileBlock(handle, self)
|
||||
self.is_modified = False
|
||||
self.blocks.append(block)
|
||||
|
||||
if not self.structs:
|
||||
raise Exception("No DNA1 block in file, this is not a valid .blend file!")
|
||||
|
||||
# cache (could lazy init, incase we never use?)
|
||||
self.block_from_offset = {block.addr_old: block for block in self.blocks if block.code != b'ENDB'}
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
self.close()
|
||||
|
||||
def find_blocks_from_code(self, code):
|
||||
assert(type(code) == bytes)
|
||||
if code not in self.code_index:
|
||||
return []
|
||||
return self.code_index[code]
|
||||
|
||||
def find_block_from_offset(self, offset):
|
||||
# same as looking looping over all blocks,
|
||||
# then checking ``block.addr_old == offset``
|
||||
assert(type(offset) is int)
|
||||
return self.block_from_offset.get(offset)
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Close the blend file
|
||||
writes the blend file to disk if changes has happened
|
||||
"""
|
||||
handle = self.handle
|
||||
|
||||
if self.is_modified:
|
||||
if self.is_compressed:
|
||||
log.debug("close compressed blend file")
|
||||
handle.seek(os.SEEK_SET, 0)
|
||||
log.debug("compressing started")
|
||||
fs = gzip.open(self.filepath_orig, "wb")
|
||||
data = handle.read(FILE_BUFFER_SIZE)
|
||||
while data:
|
||||
fs.write(data)
|
||||
data = handle.read(FILE_BUFFER_SIZE)
|
||||
fs.close()
|
||||
log.debug("compressing finished")
|
||||
|
||||
handle.close()
|
||||
|
||||
def ensure_subtype_smaller(self, sdna_index_curr, sdna_index_next):
|
||||
# never refine to a smaller type
|
||||
if (self.structs[sdna_index_curr].size >
|
||||
self.structs[sdna_index_next].size):
|
||||
|
||||
raise RuntimeError("cant refine to smaller type (%s -> %s)" %
|
||||
(self.structs[sdna_index_curr].dna_type_id.decode('ascii'),
|
||||
self.structs[sdna_index_next].dna_type_id.decode('ascii')))
|
||||
|
||||
@staticmethod
|
||||
def decode_structs(header, block, handle):
|
||||
"""
|
||||
DNACatalog is a catalog of all information in the DNA1 file-block
|
||||
"""
|
||||
log.debug("building DNA catalog")
|
||||
shortstruct = DNA_IO.USHORT[header.endian_index]
|
||||
shortstruct2 = struct.Struct(header.endian_str + b'HH')
|
||||
intstruct = DNA_IO.UINT[header.endian_index]
|
||||
|
||||
data = handle.read(block.size)
|
||||
types = []
|
||||
names = []
|
||||
|
||||
structs = []
|
||||
sdna_index_from_id = {}
|
||||
|
||||
offset = 8
|
||||
names_len = intstruct.unpack_from(data, offset)[0]
|
||||
offset += 4
|
||||
|
||||
log.debug("building #%d names" % names_len)
|
||||
for i in range(names_len):
|
||||
tName = DNA_IO.read_data0_offset(data, offset)
|
||||
offset = offset + len(tName) + 1
|
||||
names.append(DNAName(tName))
|
||||
del names_len
|
||||
|
||||
offset = pad_up_4(offset)
|
||||
offset += 4
|
||||
types_len = intstruct.unpack_from(data, offset)[0]
|
||||
offset += 4
|
||||
log.debug("building #%d types" % types_len)
|
||||
for i in range(types_len):
|
||||
dna_type_id = DNA_IO.read_data0_offset(data, offset)
|
||||
# None will be replaced by the DNAStruct, below
|
||||
types.append(DNAStruct(dna_type_id))
|
||||
offset += len(dna_type_id) + 1
|
||||
|
||||
offset = pad_up_4(offset)
|
||||
offset += 4
|
||||
log.debug("building #%d type-lengths" % types_len)
|
||||
for i in range(types_len):
|
||||
tLen = shortstruct.unpack_from(data, offset)[0]
|
||||
offset = offset + 2
|
||||
types[i].size = tLen
|
||||
del types_len
|
||||
|
||||
offset = pad_up_4(offset)
|
||||
offset += 4
|
||||
|
||||
structs_len = intstruct.unpack_from(data, offset)[0]
|
||||
offset += 4
|
||||
log.debug("building #%d structures" % structs_len)
|
||||
for sdna_index in range(structs_len):
|
||||
d = shortstruct2.unpack_from(data, offset)
|
||||
struct_type_index = d[0]
|
||||
offset += 4
|
||||
dna_struct = types[struct_type_index]
|
||||
sdna_index_from_id[dna_struct.dna_type_id] = sdna_index
|
||||
structs.append(dna_struct)
|
||||
|
||||
fields_len = d[1]
|
||||
dna_offset = 0
|
||||
|
||||
for field_index in range(fields_len):
|
||||
d2 = shortstruct2.unpack_from(data, offset)
|
||||
field_type_index = d2[0]
|
||||
field_name_index = d2[1]
|
||||
offset += 4
|
||||
dna_type = types[field_type_index]
|
||||
dna_name = names[field_name_index]
|
||||
if dna_name.is_pointer or dna_name.is_method_pointer:
|
||||
dna_size = header.pointer_size * dna_name.array_size
|
||||
else:
|
||||
dna_size = dna_type.size * dna_name.array_size
|
||||
|
||||
field = DNAField(dna_type, dna_name, dna_size, dna_offset)
|
||||
dna_struct.fields.append(field)
|
||||
dna_struct.field_from_name[dna_name.name_only] = field
|
||||
dna_offset += dna_size
|
||||
|
||||
return structs, sdna_index_from_id
|
||||
|
||||
|
||||
class BlendFileBlock:
|
||||
"""
|
||||
Instance of a struct.
|
||||
"""
|
||||
__slots__ = (
|
||||
# BlendFile
|
||||
"file",
|
||||
"code",
|
||||
"size",
|
||||
"addr_old",
|
||||
"sdna_index",
|
||||
"count",
|
||||
"file_offset",
|
||||
"user_data",
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return ("<%s.%s (%s), size=%d at %s>" %
|
||||
# fields=[%s]
|
||||
(self.__class__.__name__,
|
||||
self.dna_type_name,
|
||||
self.code.decode(),
|
||||
self.size,
|
||||
# b", ".join(f.dna_name.name_only for f in self.dna_type.fields).decode('ascii'),
|
||||
hex(self.addr_old),
|
||||
))
|
||||
|
||||
def __init__(self, handle, bfile):
|
||||
OLDBLOCK = struct.Struct(b'4sI')
|
||||
|
||||
self.file = bfile
|
||||
self.user_data = None
|
||||
|
||||
data = handle.read(bfile.block_header_struct.size)
|
||||
|
||||
if len(data) != bfile.block_header_struct.size:
|
||||
print("WARNING! Blend file seems to be badly truncated!")
|
||||
self.code = b'ENDB'
|
||||
self.size = 0
|
||||
self.addr_old = 0
|
||||
self.sdna_index = 0
|
||||
self.count = 0
|
||||
self.file_offset = 0
|
||||
return
|
||||
# header size can be 8, 20, or 24 bytes long
|
||||
# 8: old blend files ENDB block (exception)
|
||||
# 20: normal headers 32 bit platform
|
||||
# 24: normal headers 64 bit platform
|
||||
if len(data) > 15:
|
||||
blockheader = bfile.block_header_struct.unpack(data)
|
||||
self.code = blockheader[0].partition(b'\0')[0]
|
||||
if self.code != b'ENDB':
|
||||
self.size = blockheader[1]
|
||||
self.addr_old = blockheader[2]
|
||||
self.sdna_index = blockheader[3]
|
||||
self.count = blockheader[4]
|
||||
self.file_offset = handle.tell()
|
||||
else:
|
||||
self.size = 0
|
||||
self.addr_old = 0
|
||||
self.sdna_index = 0
|
||||
self.count = 0
|
||||
self.file_offset = 0
|
||||
else:
|
||||
blockheader = OLDBLOCK.unpack(data)
|
||||
self.code = blockheader[0].partition(b'\0')[0]
|
||||
self.code = DNA_IO.read_data0(blockheader[0])
|
||||
self.size = 0
|
||||
self.addr_old = 0
|
||||
self.sdna_index = 0
|
||||
self.count = 0
|
||||
self.file_offset = 0
|
||||
|
||||
@property
|
||||
def dna_type(self):
|
||||
return self.file.structs[self.sdna_index]
|
||||
|
||||
@property
|
||||
def dna_type_name(self):
|
||||
return self.dna_type.dna_type_id.decode('ascii')
|
||||
|
||||
def refine_type_from_index(self, sdna_index_next):
|
||||
assert(type(sdna_index_next) is int)
|
||||
sdna_index_curr = self.sdna_index
|
||||
self.file.ensure_subtype_smaller(sdna_index_curr, sdna_index_next)
|
||||
self.sdna_index = sdna_index_next
|
||||
|
||||
def refine_type(self, dna_type_id):
|
||||
assert(type(dna_type_id) is bytes)
|
||||
self.refine_type_from_index(self.file.sdna_index_from_id[dna_type_id])
|
||||
|
||||
def get_file_offset(self, path,
|
||||
default=...,
|
||||
sdna_index_refine=None,
|
||||
base_index=0,
|
||||
):
|
||||
"""
|
||||
Return (offset, length)
|
||||
"""
|
||||
assert(type(path) is bytes)
|
||||
|
||||
ofs = self.file_offset
|
||||
if base_index != 0:
|
||||
assert(base_index < self.count)
|
||||
ofs += (self.size // self.count) * base_index
|
||||
self.file.handle.seek(ofs, os.SEEK_SET)
|
||||
|
||||
if sdna_index_refine is None:
|
||||
sdna_index_refine = self.sdna_index
|
||||
else:
|
||||
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
|
||||
|
||||
dna_struct = self.file.structs[sdna_index_refine]
|
||||
field = dna_struct.field_from_path(
|
||||
self.file.header, self.file.handle, path)
|
||||
|
||||
return (self.file.handle.tell(), field.dna_name.array_size)
|
||||
|
||||
def get(self, path,
|
||||
default=...,
|
||||
sdna_index_refine=None,
|
||||
use_nil=True, use_str=True,
|
||||
base_index=0,
|
||||
):
|
||||
|
||||
ofs = self.file_offset
|
||||
if base_index != 0:
|
||||
assert(base_index < self.count)
|
||||
ofs += (self.size // self.count) * base_index
|
||||
self.file.handle.seek(ofs, os.SEEK_SET)
|
||||
|
||||
if sdna_index_refine is None:
|
||||
sdna_index_refine = self.sdna_index
|
||||
else:
|
||||
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
|
||||
|
||||
dna_struct = self.file.structs[sdna_index_refine]
|
||||
return dna_struct.field_get(
|
||||
self.file.header, self.file.handle, path,
|
||||
default=default,
|
||||
use_nil=use_nil, use_str=use_str,
|
||||
)
|
||||
|
||||
def get_recursive_iter(self, path, path_root=b"",
|
||||
default=...,
|
||||
sdna_index_refine=None,
|
||||
use_nil=True, use_str=True,
|
||||
base_index=0,
|
||||
):
|
||||
if path_root:
|
||||
path_full = (
|
||||
(path_root if type(path_root) is tuple else (path_root, )) +
|
||||
(path if type(path) is tuple else (path, )))
|
||||
else:
|
||||
path_full = path
|
||||
|
||||
try:
|
||||
yield (path_full, self.get(path_full, default, sdna_index_refine, use_nil, use_str, base_index))
|
||||
except NotImplementedError as ex:
|
||||
msg, dna_name, dna_type = ex.args
|
||||
struct_index = self.file.sdna_index_from_id.get(dna_type.dna_type_id, None)
|
||||
if struct_index is None:
|
||||
yield (path_full, "<%s>" % dna_type.dna_type_id.decode('ascii'))
|
||||
else:
|
||||
struct = self.file.structs[struct_index]
|
||||
for f in struct.fields:
|
||||
yield from self.get_recursive_iter(
|
||||
f.dna_name.name_only, path_full, default, None, use_nil, use_str, 0)
|
||||
|
||||
def items_recursive_iter(self):
|
||||
for k in self.keys():
|
||||
yield from self.get_recursive_iter(k, use_str=False)
|
||||
|
||||
def get_data_hash(self):
|
||||
"""
|
||||
Generates a 'hash' that can be used instead of addr_old as block id, and that should be 'stable' across .blend
|
||||
file load & save (i.e. it does not changes due to pointer addresses variations).
|
||||
"""
|
||||
# TODO This implementation is most likely far from optimal... and CRC32 is not renown as the best hashing
|
||||
# algo either. But for now does the job!
|
||||
import zlib
|
||||
def _is_pointer(self, k):
|
||||
return self.file.structs[self.sdna_index].field_from_path(
|
||||
self.file.header, self.file.handle, k).dna_name.is_pointer
|
||||
|
||||
hsh = 1
|
||||
for k, v in self.items_recursive_iter():
|
||||
if not _is_pointer(self, k):
|
||||
hsh = zlib.adler32(str(v).encode(), hsh)
|
||||
return hsh
|
||||
|
||||
def set(self, path, value,
|
||||
sdna_index_refine=None,
|
||||
):
|
||||
|
||||
if sdna_index_refine is None:
|
||||
sdna_index_refine = self.sdna_index
|
||||
else:
|
||||
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
|
||||
|
||||
dna_struct = self.file.structs[sdna_index_refine]
|
||||
self.file.handle.seek(self.file_offset, os.SEEK_SET)
|
||||
self.file.is_modified = True
|
||||
return dna_struct.field_set(
|
||||
self.file.header, self.file.handle, path, value)
|
||||
|
||||
# ---------------
|
||||
# Utility get/set
|
||||
#
|
||||
# avoid inline pointer casting
|
||||
def get_pointer(
|
||||
self, path,
|
||||
default=...,
|
||||
sdna_index_refine=None,
|
||||
base_index=0,
|
||||
):
|
||||
if sdna_index_refine is None:
|
||||
sdna_index_refine = self.sdna_index
|
||||
result = self.get(path, default, sdna_index_refine=sdna_index_refine, base_index=base_index)
|
||||
|
||||
# default
|
||||
if type(result) is not int:
|
||||
return result
|
||||
|
||||
assert(self.file.structs[sdna_index_refine].field_from_path(
|
||||
self.file.header, self.file.handle, path).dna_name.is_pointer)
|
||||
if result != 0:
|
||||
# possible (but unlikely)
|
||||
# that this fails and returns None
|
||||
# maybe we want to raise some exception in this case
|
||||
return self.file.find_block_from_offset(result)
|
||||
else:
|
||||
return None
|
||||
|
||||
# ----------------------
|
||||
# Python convenience API
|
||||
|
||||
# dict like access
|
||||
def __getitem__(self, item):
|
||||
return self.get(item, use_str=False)
|
||||
|
||||
def __setitem__(self, item, value):
|
||||
self.set(item, value)
|
||||
|
||||
def keys(self):
|
||||
return (f.dna_name.name_only for f in self.dna_type.fields)
|
||||
|
||||
def values(self):
|
||||
for k in self.keys():
|
||||
try:
|
||||
yield self[k]
|
||||
except NotImplementedError as ex:
|
||||
msg, dna_name, dna_type = ex.args
|
||||
yield "<%s>" % dna_type.dna_type_id.decode('ascii')
|
||||
|
||||
def items(self):
|
||||
for k in self.keys():
|
||||
try:
|
||||
yield (k, self[k])
|
||||
except NotImplementedError as ex:
|
||||
msg, dna_name, dna_type = ex.args
|
||||
yield (k, "<%s>" % dna_type.dna_type_id.decode('ascii'))
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Read Magic
|
||||
#
|
||||
# magic = str
|
||||
# pointer_size = int
|
||||
# is_little_endian = bool
|
||||
# version = int
|
||||
|
||||
|
||||
class BlendFileHeader:
|
||||
"""
|
||||
BlendFileHeader allocates the first 12 bytes of a blend file
|
||||
it contains information about the hardware architecture
|
||||
"""
|
||||
__slots__ = (
|
||||
# str
|
||||
"magic",
|
||||
# int 4/8
|
||||
"pointer_size",
|
||||
# bool
|
||||
"is_little_endian",
|
||||
# int
|
||||
"version",
|
||||
# str, used to pass to 'struct'
|
||||
"endian_str",
|
||||
# int, used to index common types
|
||||
"endian_index",
|
||||
)
|
||||
|
||||
def __init__(self, handle):
|
||||
FILEHEADER = struct.Struct(b'7s1s1s3s')
|
||||
|
||||
log.debug("reading blend-file-header")
|
||||
values = FILEHEADER.unpack(handle.read(FILEHEADER.size))
|
||||
self.magic = values[0]
|
||||
pointer_size_id = values[1]
|
||||
if pointer_size_id == b'-':
|
||||
self.pointer_size = 8
|
||||
elif pointer_size_id == b'_':
|
||||
self.pointer_size = 4
|
||||
else:
|
||||
assert(0)
|
||||
endian_id = values[2]
|
||||
if endian_id == b'v':
|
||||
self.is_little_endian = True
|
||||
self.endian_str = b'<'
|
||||
self.endian_index = 0
|
||||
elif endian_id == b'V':
|
||||
self.is_little_endian = False
|
||||
self.endian_index = 1
|
||||
self.endian_str = b'>'
|
||||
else:
|
||||
assert(0)
|
||||
|
||||
version_id = values[3]
|
||||
self.version = int(version_id)
|
||||
|
||||
def create_block_header_struct(self):
|
||||
return struct.Struct(b''.join((
|
||||
self.endian_str,
|
||||
b'4sI',
|
||||
b'I' if self.pointer_size == 4 else b'Q',
|
||||
b'II',
|
||||
)))
|
||||
|
||||
|
||||
class DNAName:
|
||||
"""
|
||||
DNAName is a C-type name stored in the DNA
|
||||
"""
|
||||
__slots__ = (
|
||||
"name_full",
|
||||
"name_only",
|
||||
"is_pointer",
|
||||
"is_method_pointer",
|
||||
"array_size",
|
||||
)
|
||||
|
||||
def __init__(self, name_full):
|
||||
self.name_full = name_full
|
||||
self.name_only = self.calc_name_only()
|
||||
self.is_pointer = self.calc_is_pointer()
|
||||
self.is_method_pointer = self.calc_is_method_pointer()
|
||||
self.array_size = self.calc_array_size()
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%r)' % (type(self).__qualname__, self.name_full)
|
||||
|
||||
def as_reference(self, parent):
|
||||
if parent is None:
|
||||
result = b''
|
||||
else:
|
||||
result = parent + b'.'
|
||||
|
||||
result = result + self.name_only
|
||||
return result
|
||||
|
||||
def calc_name_only(self):
|
||||
result = self.name_full.strip(b'*()')
|
||||
index = result.find(b'[')
|
||||
if index != -1:
|
||||
result = result[:index]
|
||||
return result
|
||||
|
||||
def calc_is_pointer(self):
|
||||
return (b'*' in self.name_full)
|
||||
|
||||
def calc_is_method_pointer(self):
|
||||
return (b'(*' in self.name_full)
|
||||
|
||||
def calc_array_size(self):
|
||||
result = 1
|
||||
temp = self.name_full
|
||||
index = temp.find(b'[')
|
||||
|
||||
while index != -1:
|
||||
index_2 = temp.find(b']')
|
||||
result *= int(temp[index + 1:index_2])
|
||||
temp = temp[index_2 + 1:]
|
||||
index = temp.find(b'[')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class DNAField:
|
||||
"""
|
||||
DNAField is a coupled DNAStruct and DNAName
|
||||
and cache offset for reuse
|
||||
"""
|
||||
__slots__ = (
|
||||
# DNAName
|
||||
"dna_name",
|
||||
# tuple of 3 items
|
||||
# [bytes (struct name), int (struct size), DNAStruct]
|
||||
"dna_type",
|
||||
# size on-disk
|
||||
"dna_size",
|
||||
# cached info (avoid looping over fields each time)
|
||||
"dna_offset",
|
||||
)
|
||||
|
||||
def __init__(self, dna_type, dna_name, dna_size, dna_offset):
|
||||
self.dna_type = dna_type
|
||||
self.dna_name = dna_name
|
||||
self.dna_size = dna_size
|
||||
self.dna_offset = dna_offset
|
||||
|
||||
|
||||
class DNAStruct:
|
||||
"""
|
||||
DNAStruct is a C-type structure stored in the DNA
|
||||
"""
|
||||
__slots__ = (
|
||||
"dna_type_id",
|
||||
"size",
|
||||
"fields",
|
||||
"field_from_name",
|
||||
"user_data",
|
||||
)
|
||||
|
||||
def __init__(self, dna_type_id):
|
||||
self.dna_type_id = dna_type_id
|
||||
self.fields = []
|
||||
self.field_from_name = {}
|
||||
self.user_data = None
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%r)' % (type(self).__qualname__, self.dna_type_id)
|
||||
|
||||
def field_from_path(self, header, handle, path):
|
||||
"""
|
||||
Support lookups as bytes or a tuple of bytes and optional index.
|
||||
|
||||
C style 'id.name' --> (b'id', b'name')
|
||||
C style 'array[4]' --> ('array', 4)
|
||||
"""
|
||||
if type(path) is tuple:
|
||||
name = path[0]
|
||||
if len(path) >= 2 and type(path[1]) is not bytes:
|
||||
name_tail = path[2:]
|
||||
index = path[1]
|
||||
assert(type(index) is int)
|
||||
else:
|
||||
name_tail = path[1:]
|
||||
index = 0
|
||||
else:
|
||||
name = path
|
||||
name_tail = None
|
||||
index = 0
|
||||
|
||||
assert(type(name) is bytes)
|
||||
|
||||
field = self.field_from_name.get(name)
|
||||
|
||||
if field is not None:
|
||||
handle.seek(field.dna_offset, os.SEEK_CUR)
|
||||
if index != 0:
|
||||
if field.dna_name.is_pointer:
|
||||
index_offset = header.pointer_size * index
|
||||
else:
|
||||
index_offset = field.dna_type.size * index
|
||||
assert(index_offset < field.dna_size)
|
||||
handle.seek(index_offset, os.SEEK_CUR)
|
||||
if not name_tail: # None or ()
|
||||
return field
|
||||
else:
|
||||
return field.dna_type.field_from_path(header, handle, name_tail)
|
||||
|
||||
def field_get(self, header, handle, path,
|
||||
default=...,
|
||||
use_nil=True, use_str=True,
|
||||
):
|
||||
field = self.field_from_path(header, handle, path)
|
||||
if field is None:
|
||||
if default is not ...:
|
||||
return default
|
||||
else:
|
||||
raise KeyError("%r not found in %r (%r)" %
|
||||
(path, [f.dna_name.name_only for f in self.fields], self.dna_type_id))
|
||||
|
||||
dna_type = field.dna_type
|
||||
dna_name = field.dna_name
|
||||
dna_size = field.dna_size
|
||||
|
||||
if dna_name.is_pointer:
|
||||
return DNA_IO.read_pointer(handle, header)
|
||||
elif dna_type.dna_type_id == b'int':
|
||||
if dna_name.array_size > 1:
|
||||
return [DNA_IO.read_int(handle, header) for i in range(dna_name.array_size)]
|
||||
return DNA_IO.read_int(handle, header)
|
||||
elif dna_type.dna_type_id == b'short':
|
||||
if dna_name.array_size > 1:
|
||||
return [DNA_IO.read_short(handle, header) for i in range(dna_name.array_size)]
|
||||
return DNA_IO.read_short(handle, header)
|
||||
elif dna_type.dna_type_id == b'uint64_t':
|
||||
if dna_name.array_size > 1:
|
||||
return [DNA_IO.read_ulong(handle, header) for i in range(dna_name.array_size)]
|
||||
return DNA_IO.read_ulong(handle, header)
|
||||
elif dna_type.dna_type_id == b'float':
|
||||
if dna_name.array_size > 1:
|
||||
return [DNA_IO.read_float(handle, header) for i in range(dna_name.array_size)]
|
||||
return DNA_IO.read_float(handle, header)
|
||||
elif dna_type.dna_type_id == b'char':
|
||||
if dna_size == 1:
|
||||
# Single char, assume it's bitflag or int value, and not a string/bytes data...
|
||||
return DNA_IO.read_char(handle, header)
|
||||
if use_str:
|
||||
if use_nil:
|
||||
return DNA_IO.read_string0(handle, dna_name.array_size)
|
||||
else:
|
||||
return DNA_IO.read_string(handle, dna_name.array_size)
|
||||
else:
|
||||
if use_nil:
|
||||
return DNA_IO.read_bytes0(handle, dna_name.array_size)
|
||||
else:
|
||||
return DNA_IO.read_bytes(handle, dna_name.array_size)
|
||||
else:
|
||||
raise NotImplementedError("%r exists but isn't pointer, can't resolve field %r" %
|
||||
(path, dna_name.name_only), dna_name, dna_type)
|
||||
|
||||
def field_set(self, header, handle, path, value):
|
||||
assert(type(path) == bytes)
|
||||
|
||||
field = self.field_from_path(header, handle, path)
|
||||
if field is None:
|
||||
raise KeyError("%r not found in %r" %
|
||||
(path, [f.dna_name.name_only for f in self.fields]))
|
||||
|
||||
dna_type = field.dna_type
|
||||
dna_name = field.dna_name
|
||||
|
||||
if dna_type.dna_type_id == b'char':
|
||||
if type(value) is str:
|
||||
return DNA_IO.write_string(handle, value, dna_name.array_size)
|
||||
else:
|
||||
return DNA_IO.write_bytes(handle, value, dna_name.array_size)
|
||||
else:
|
||||
raise NotImplementedError("Setting %r is not yet supported for %r" %
|
||||
(dna_type, dna_name), dna_name, dna_type)
|
||||
|
||||
|
||||
class DNA_IO:
|
||||
"""
|
||||
Module like class, for read-write utility functions.
|
||||
|
||||
Only stores static methods & constants.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
raise RuntimeError("%s should not be instantiated" % cls)
|
||||
|
||||
@staticmethod
|
||||
def write_string(handle, astring, fieldlen):
|
||||
assert(isinstance(astring, str))
|
||||
if len(astring) >= fieldlen:
|
||||
stringw = astring[0:fieldlen]
|
||||
else:
|
||||
stringw = astring + '\0'
|
||||
handle.write(stringw.encode('utf-8'))
|
||||
|
||||
@staticmethod
|
||||
def write_bytes(handle, astring, fieldlen):
|
||||
assert(isinstance(astring, (bytes, bytearray)))
|
||||
if len(astring) >= fieldlen:
|
||||
stringw = astring[0:fieldlen]
|
||||
else:
|
||||
stringw = astring + b'\0'
|
||||
|
||||
handle.write(stringw)
|
||||
|
||||
@staticmethod
|
||||
def read_bytes(handle, length):
|
||||
data = handle.read(length)
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def read_bytes0(handle, length):
|
||||
data = handle.read(length)
|
||||
return DNA_IO.read_data0(data)
|
||||
|
||||
@staticmethod
|
||||
def read_string(handle, length):
|
||||
return DNA_IO.read_bytes(handle, length).decode('utf-8')
|
||||
|
||||
@staticmethod
|
||||
def read_string0(handle, length):
|
||||
return DNA_IO.read_bytes0(handle, length).decode('utf-8')
|
||||
|
||||
@staticmethod
|
||||
def read_data0_offset(data, offset):
|
||||
add = data.find(b'\0', offset) - offset
|
||||
return data[offset:offset + add]
|
||||
|
||||
@staticmethod
|
||||
def read_data0(data):
|
||||
add = data.find(b'\0')
|
||||
return data[:add]
|
||||
|
||||
UCHAR = struct.Struct(b'<b'), struct.Struct(b'>b')
|
||||
|
||||
@staticmethod
|
||||
def read_char(handle, fileheader):
|
||||
st = DNA_IO.UCHAR[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
USHORT = struct.Struct(b'<H'), struct.Struct(b'>H')
|
||||
|
||||
@staticmethod
|
||||
def read_ushort(handle, fileheader):
|
||||
st = DNA_IO.USHORT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
SSHORT = struct.Struct(b'<h'), struct.Struct(b'>h')
|
||||
|
||||
@staticmethod
|
||||
def read_short(handle, fileheader):
|
||||
st = DNA_IO.SSHORT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
UINT = struct.Struct(b'<I'), struct.Struct(b'>I')
|
||||
|
||||
@staticmethod
|
||||
def read_uint(handle, fileheader):
|
||||
st = DNA_IO.UINT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
SINT = struct.Struct(b'<i'), struct.Struct(b'>i')
|
||||
|
||||
@staticmethod
|
||||
def read_int(handle, fileheader):
|
||||
st = DNA_IO.SINT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
FLOAT = struct.Struct(b'<f'), struct.Struct(b'>f')
|
||||
|
||||
@staticmethod
|
||||
def read_float(handle, fileheader):
|
||||
st = DNA_IO.FLOAT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
ULONG = struct.Struct(b'<Q'), struct.Struct(b'>Q')
|
||||
|
||||
@staticmethod
|
||||
def read_ulong(handle, fileheader):
|
||||
st = DNA_IO.ULONG[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
@staticmethod
|
||||
def read_pointer(handle, header):
|
||||
"""
|
||||
reads an pointer from a file handle
|
||||
the pointer size is given by the header (BlendFileHeader)
|
||||
"""
|
||||
if header.pointer_size == 4:
|
||||
st = DNA_IO.UINT[header.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
if header.pointer_size == 8:
|
||||
st = DNA_IO.ULONG[header.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
160
benchmark/configure.py
Normal file
160
benchmark/configure.py
Normal file
@@ -0,0 +1,160 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
def setDeviceCPU(context, cpref):
|
||||
cpref.compute_device_type = 'NONE'
|
||||
return True
|
||||
|
||||
|
||||
def isDisplayDevice(device):
|
||||
return "(Display)" in device.name
|
||||
|
||||
|
||||
def setUseRequestedDevice(context, cpref, device_type, requested_device):
|
||||
import _cycles
|
||||
# Empty device type means we'l ltry to render on a single card,
|
||||
# preferably non-display one.
|
||||
if requested_device == "":
|
||||
device_found = False
|
||||
# Try to enable first non-display card.
|
||||
for device in cpref.devices:
|
||||
if isDisplayDevice(device):
|
||||
device.use = False
|
||||
elif not device_found:
|
||||
# Enable first non-display GPU.
|
||||
device_found = True
|
||||
device.use = True
|
||||
else:
|
||||
# Keep disanling rest of GPUs.
|
||||
device.use = False
|
||||
if not device_found:
|
||||
# Only display devices, enable first of them.
|
||||
cpref.devices[0].use = True
|
||||
device_found = True
|
||||
else:
|
||||
device_found = False
|
||||
for device in cpref.devices:
|
||||
device_name = device.name.replace(" (Display)", "")
|
||||
if device_name == requested_device:
|
||||
device.use = True
|
||||
device_found = True
|
||||
else:
|
||||
device.use = False
|
||||
return device_found
|
||||
|
||||
|
||||
def setDeviceCUDA(context, cpref, requested_device):
|
||||
cpref.compute_device_type = 'CUDA'
|
||||
return setUseRequestedDevice(context, cpref, 'CUDA', requested_device)
|
||||
|
||||
|
||||
def setDeviceOpenCL(context, cpref, requested_device):
|
||||
cpref.compute_device_type = 'OPENCL'
|
||||
return setUseRequestedDevice(context, cpref, 'OPENCL', requested_device)
|
||||
|
||||
|
||||
def setDeviceGPU(context, cpref, requested_device):
|
||||
import _cycles
|
||||
has_cuda = has_opencl = False
|
||||
for device in _cycles.available_devices():
|
||||
if device[1] == 'CUDA':
|
||||
has_cuda = True
|
||||
if device[1] == 'OPENCL':
|
||||
has_opencl = True
|
||||
|
||||
if has_cuda:
|
||||
return setDeviceCUDA(context, cpref, requested_device)
|
||||
if has_opencl:
|
||||
return setDeviceOpenCL(context, cpref, requested_device)
|
||||
return False
|
||||
|
||||
|
||||
def logComputeDevices(cpref):
|
||||
device_type = cpref.compute_device_type
|
||||
if device_type == 'NONE':
|
||||
device_type = 'CPU'
|
||||
print("Compute device type: {}" . format(device_type))
|
||||
if device_type == 'CPU':
|
||||
import _cycles
|
||||
for device in _cycles.available_devices():
|
||||
if device[1] == 'CPU':
|
||||
print("Using compute device: {}" . format(device[0]))
|
||||
else:
|
||||
for device in cpref.devices:
|
||||
if device.type != device_type:
|
||||
continue
|
||||
if device.use:
|
||||
print("Using compute device: {}" . format(device.name))
|
||||
|
||||
|
||||
def logSystemInfo(cpref):
|
||||
import json
|
||||
info_devices = []
|
||||
for device in cpref.devices:
|
||||
info_device = {
|
||||
"name": device.name.rstrip(" (Display)"),
|
||||
"type": device.type,
|
||||
}
|
||||
info_devices.append(info_device)
|
||||
print("Benchmark Devices: {}" . format(json.dumps(info_devices)))
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
argv = sys.argv
|
||||
if "--" not in argv:
|
||||
return
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cycles benchmark helper script.")
|
||||
parser.add_argument("--benchmark-warmup",
|
||||
help="Do quick warm-up render pass",
|
||||
action='store_true',
|
||||
default=False)
|
||||
parser.add_argument("--benchmark-device-type",
|
||||
help="Device type to be rendered on",
|
||||
default="CPU")
|
||||
parser.add_argument("--benchmark-device",
|
||||
help="Device to be rendered on",
|
||||
default="")
|
||||
parser.add_argument("--benchmark-system-info",
|
||||
help="Dump whole system information",
|
||||
action='store_true',
|
||||
default=False)
|
||||
|
||||
argv = argv[argv.index("--") + 1:]
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
context = bpy.context
|
||||
cpref = context.user_preferences.addons['cycles'].preferences
|
||||
|
||||
# Adjust samples so we render real quick.
|
||||
if args.benchmark_warmup:
|
||||
for scene in bpy.data.scenes:
|
||||
scene.cycles.samples = 1
|
||||
scene.cycles.aa_samples = 1
|
||||
|
||||
# Configure the compute device.
|
||||
if args.benchmark_device_type == 'CPU':
|
||||
device_ok = setDeviceCPU(context, cpref)
|
||||
elif args.benchmark_device_type == 'CUDA':
|
||||
device_ok = setDeviceCUDA(context, cpref, args.benchmark_device)
|
||||
elif args.benchmark_device_type == 'OPENCL':
|
||||
device_ok = setDeviceOpenCL(context, cpref, args.benchmark_device)
|
||||
elif args.benchmark_device_type == 'GPU':
|
||||
device_ok = setDeviceGPU(context, cpref, args.benchmark_device)
|
||||
if not device_ok:
|
||||
sys.exit(1)
|
||||
|
||||
if args.benchmark_system_info:
|
||||
logSystemInfo(cpref)
|
||||
else:
|
||||
logComputeDevices(cpref)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
351
benchmark/farm.py
Executable file
351
benchmark/farm.py
Executable file
@@ -0,0 +1,351 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import foundation
|
||||
from foundation import (benchrunner,
|
||||
buildbot,
|
||||
config,
|
||||
context,
|
||||
logger,
|
||||
system_info,
|
||||
util)
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
|
||||
########################################
|
||||
# Parser helpers.
|
||||
########################################
|
||||
|
||||
|
||||
def configureArgumentParser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cycles benchmark helper script.")
|
||||
parser.add_argument("-b", "--blender",
|
||||
help="Full file path to Blender's binary " +
|
||||
"to use for rendering",
|
||||
default="")
|
||||
parser.add_argument("-d", "--scenes-dir",
|
||||
help="Directory with scenes",
|
||||
default="")
|
||||
# parser.add_argument('-s', '--scenes',
|
||||
# nargs='+',
|
||||
# help='Scenes to be rendered',
|
||||
# default=[])
|
||||
parser.add_argument('-t', '--device-type',
|
||||
help="Type of the device to render on",
|
||||
default="CPU")
|
||||
parser.add_argument('-n', '--device-name',
|
||||
help="Device name to render on",
|
||||
default="")
|
||||
parser.add_argument('-v', '--verbose',
|
||||
help="Do verbose logging",
|
||||
action='store_true',
|
||||
default=False)
|
||||
return parser
|
||||
|
||||
########################################
|
||||
# Configuration helpers.
|
||||
########################################
|
||||
|
||||
|
||||
def injectDefaultConfiguration(config):
|
||||
"""
|
||||
For a specified configuration object, set all possible properties to their
|
||||
default value.
|
||||
"""
|
||||
root_dir = util.getBundleRootDirectory()
|
||||
section = {
|
||||
"scenes_dir": os.path.join(root_dir, "scenes"),
|
||||
"device_name": "",
|
||||
}
|
||||
config['farm'] = section
|
||||
|
||||
|
||||
def injectArgparseConfiguration(config, args):
|
||||
"""
|
||||
Override settings wit harguments passed from the command line.
|
||||
"""
|
||||
section = config['farm']
|
||||
if args.blender:
|
||||
section['blender'] = args.blender
|
||||
if args.scenes_dir:
|
||||
section['scenes_dir'] = args.scenes_dir
|
||||
if args.device_type:
|
||||
section['device_type'] = args.device_type
|
||||
if args.device_name:
|
||||
section['device_name'] = args.device_name
|
||||
|
||||
|
||||
def readConfiguration(args):
|
||||
"""
|
||||
Read configuration file and return BenchmarkConfig with all the settings
|
||||
we will need to use.
|
||||
"""
|
||||
config = foundation.config.BenchmarkConfig()
|
||||
injectDefaultConfiguration(config)
|
||||
read_configs = config.readGlobalConfig("farm")
|
||||
if read_configs:
|
||||
logger.INFO("Configuration was read from:")
|
||||
for cfg in read_configs:
|
||||
print(" " + util.stripSensitiveInfo(cfg))
|
||||
injectArgparseConfiguration(config, args)
|
||||
return config
|
||||
|
||||
|
||||
def checkConfiguration(config):
|
||||
"""
|
||||
Check whether configuration is complete and usable.
|
||||
"""
|
||||
required_keys = ('device_type', )
|
||||
known_device_types = ('CPU', 'GPU', 'CUDA', 'OPENCL', )
|
||||
logger.INFO("Validating configuration...")
|
||||
# Check whether section exists.
|
||||
if 'farm' not in config.sections():
|
||||
logger.INFO(" Missing configuration section for 'farm'.")
|
||||
return False
|
||||
section = config['farm']
|
||||
# Check whether required keys exists.
|
||||
for required_key in required_keys:
|
||||
if required_key not in section:
|
||||
logger.INFO(" Missing configuration key {}" .
|
||||
format(required_key))
|
||||
return False
|
||||
# Check whether device configuration is correct.
|
||||
device_type = section['device_type']
|
||||
if device_type not in known_device_types:
|
||||
logger.INFO(" Unknown device type {}" . format(device_type))
|
||||
return False
|
||||
if device_type != 'CPU':
|
||||
if 'device_name' not in section or not section['device_name']:
|
||||
logger.INFO(" Need to explicitly specify device name.")
|
||||
return False
|
||||
# Check whether directories are correct.
|
||||
if not os.path.exists(section["scenes_dir"]):
|
||||
logger.INFO(" Scenes directory does not exist.")
|
||||
return False
|
||||
if not section["output_dir"]:
|
||||
logger.INFO(" Missing configuration for output directory.")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
########################################
|
||||
# Temporary directories helpers.
|
||||
########################################
|
||||
|
||||
def createTempDirectory(config):
|
||||
"""
|
||||
Usually creates an unique temporary directory and returns full path to it.
|
||||
|
||||
In the development environment uses pre-defined directory, where all data
|
||||
might already be existing (for example, blender might be already downloaded
|
||||
and unpacked).
|
||||
"""
|
||||
temp_dir = tempfile.mkdtemp(prefix="blender-benchmark-")
|
||||
return temp_dir
|
||||
|
||||
|
||||
def deleteTempDirectory(config, temp_dir):
|
||||
"""
|
||||
Get rid of automatically created temp directory/
|
||||
"""
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
|
||||
########################################
|
||||
# Latets Blender helper script.
|
||||
########################################
|
||||
|
||||
def downloadLatestBlender(directory):
|
||||
"""
|
||||
Download latets Blender from buildbot to given directory.
|
||||
"""
|
||||
# TODO(sergey): This we need to change to currently running configuration.
|
||||
latest_blender_url = buildbot.buildbotGetLatetsVersion("Linux", "64bit")
|
||||
if not latest_blender_url:
|
||||
logger.ERROR("Unable to figure out latest Blender version")
|
||||
return
|
||||
blender_filename = latest_blender_url.split('/')[-1]
|
||||
logger.INFO("Found latest Blender: {}" . format(blender_filename))
|
||||
local_blender = os.path.join(directory, blender_filename)
|
||||
logger.INFO("Downloading Blender...")
|
||||
util.downloadFile(latest_blender_url, local_blender)
|
||||
return local_blender
|
||||
|
||||
|
||||
def findBlenderDirInDirectory(directory):
|
||||
"""
|
||||
Find Blender directory in given folder.
|
||||
"""
|
||||
for filename in os.listdir(directory):
|
||||
if filename.startswith("blender"):
|
||||
full_filename = os.path.join(directory, filename)
|
||||
if os.path.isdir(full_filename):
|
||||
return full_filename
|
||||
return None
|
||||
|
||||
|
||||
def findBlenderInDirectory(directory):
|
||||
"""
|
||||
Find blender executable file in given directory. Will recurse into
|
||||
Blender directory in there
|
||||
"""
|
||||
blender_dir = findBlenderDirInDirectory(directory)
|
||||
platform = sys.platform
|
||||
if platform == 'linux':
|
||||
return os.path.join(blender_dir, "blender")
|
||||
else:
|
||||
raise Exception("Need to support your OS!")
|
||||
|
||||
|
||||
def getLatetsBlenderBinary(config, temp_dir):
|
||||
"""
|
||||
Get full file path to latest Blender executable which will be used for
|
||||
actual benchmark.
|
||||
|
||||
It will either use Blender from already specified location or it will
|
||||
download latest Blender from buildbot.
|
||||
"""
|
||||
# Firts try to use Blender specified in configuration.
|
||||
if 'blender' in config['farm']:
|
||||
return config['farm']['blender']
|
||||
# Well, download and unpack the latest Blender from buildbot.
|
||||
logger.INFO("Will get latest Blender from buildbot.")
|
||||
blender_archive = downloadLatestBlender(temp_dir)
|
||||
logger.INFO("Unpacking Blender...")
|
||||
util.unpackArchive(blender_archive, temp_dir)
|
||||
return findBlenderInDirectory(temp_dir)
|
||||
|
||||
########################################
|
||||
# Results output.
|
||||
########################################
|
||||
|
||||
|
||||
def latestDirGet(path):
|
||||
"""
|
||||
Get directory with bigger number in the given path
|
||||
"""
|
||||
max_file = None
|
||||
for f in os.listdir(path):
|
||||
x = int(f)
|
||||
if not max_file or x > max_file:
|
||||
max_file = x
|
||||
return max_file
|
||||
|
||||
|
||||
def ensureOutputDir(config):
|
||||
output_dir = config['farm']['output_dir']
|
||||
if not os.path.exists(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
max_dir = latestDirGet(output_dir)
|
||||
if not max_dir:
|
||||
max_dir = 0
|
||||
new_dir = str(max_dir + 1).zfill(8)
|
||||
new_full_dir = os.path.join(output_dir, new_dir)
|
||||
os.mkdir(new_full_dir)
|
||||
return new_full_dir
|
||||
|
||||
|
||||
def ensureImageOutputDir(results_output_dir):
|
||||
images_output_dir = os.path.join(results_output_dir, "images")
|
||||
os.mkdir(images_output_dir)
|
||||
return images_output_dir
|
||||
|
||||
|
||||
def getResultJSONString(ctx, results):
|
||||
# Convert custom classes to dictionaries for easier JSON dump.
|
||||
json_results = results
|
||||
stats = json_results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
if stats[scene]:
|
||||
stats[scene] = stats[scene].asDict()
|
||||
stats[scene]['result'] = 'OK'
|
||||
else:
|
||||
stats[scene] = {'result': 'CRASH'}
|
||||
return json.dumps(json_results, sort_keys=True, indent=4)
|
||||
|
||||
|
||||
def saveResults(ctx, results, output_dir):
|
||||
json_string = getResultJSONString(ctx, results)
|
||||
results_file = os.path.join(output_dir, "results.json")
|
||||
with open(results_file, "w") as f:
|
||||
f.write(json_string)
|
||||
|
||||
########################################
|
||||
# Main logic.
|
||||
########################################
|
||||
|
||||
def main():
|
||||
parser = configureArgumentParser()
|
||||
args = parser.parse_args()
|
||||
logger.VERBOSE = args.verbose
|
||||
logger.init()
|
||||
logger.HEADER("Cycles Benchmark Suite v{}, farm edition" .
|
||||
format(foundation.VERSION))
|
||||
# Some platform independent directories to helper scripts.
|
||||
script_directory = os.path.dirname(os.path.realpath(__file__))
|
||||
configure_script = os.path.join(script_directory, "configure.py")
|
||||
# Read configuration file, so we know what we will be doing.
|
||||
config = readConfiguration(args)
|
||||
if not checkConfiguration(config):
|
||||
logger.ERROR("Configuration is not complete or valid, aborting.")
|
||||
return False
|
||||
logger.INFO("Configuration looks reasonable, continuing.")
|
||||
# Create temporary directory, all runtime files will be stored there.
|
||||
temp_dir = createTempDirectory(config)
|
||||
results_output_dir = ensureOutputDir(config)
|
||||
images_output_dir = ensureImageOutputDir(results_output_dir)
|
||||
try:
|
||||
blender_binary = getLatetsBlenderBinary(config, temp_dir)
|
||||
logger.INFO("Will benchmark the following Blender: {}".
|
||||
format(util.stripSensitiveInfo(blender_binary)))
|
||||
# Bechmark context initialization.
|
||||
farm_config = config['farm']
|
||||
ctx = context.Context()
|
||||
ctx.blender = blender_binary
|
||||
ctx.configure_script = configure_script
|
||||
ctx.scenes_dir = farm_config['scenes_dir']
|
||||
ctx.device_type = farm_config['device_type']
|
||||
ctx.device_name = farm_config['device_name']
|
||||
ctx.image_output_dir = images_output_dir
|
||||
ctx.scenes = ctx.listAllScenes(ctx.scenes_dir)
|
||||
# Print prelmiinary information.
|
||||
blender_dvice_info = benchrunner.benchmarkGetDeviceInfo(ctx)
|
||||
if not blender_dvice_info['device_type']:
|
||||
logger.ERROR("Requested device can not be enabled in Blender.")
|
||||
logger.INFO("Requested device details:")
|
||||
logger.INFO(" Device type: {}" . format(ctx.device_type))
|
||||
logger.INFO(" Device name: {}" . format(ctx.device_name))
|
||||
all_stats = None
|
||||
else:
|
||||
logger.INFO("Configured device details:")
|
||||
logger.INFO(" Device type: {}" .
|
||||
format(blender_dvice_info["device_type"]))
|
||||
logger.INFO(" Compute devices:")
|
||||
for compute_device in blender_dvice_info["compute_devices"]:
|
||||
logger.INFO(" {}" . format(compute_device))
|
||||
# Run benchmark.
|
||||
all_stats = benchrunner.benchmarkAll(ctx)
|
||||
# Gather all information together.
|
||||
results = {
|
||||
"blender_version": system_info.getBlenderVersion(ctx),
|
||||
"system_info": system_info.gatherSystemInfo(ctx),
|
||||
"device_info": blender_dvice_info,
|
||||
"stats": all_stats if all_stats else {}
|
||||
}
|
||||
saveResults(ctx, results, results_output_dir)
|
||||
return all_stats is not None
|
||||
finally:
|
||||
deleteTempDirectory(config, temp_dir)
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if not main():
|
||||
sys.exit(1)
|
1
benchmark/foundation/__init__.py
Normal file
1
benchmark/foundation/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
VERSION = "0.1"
|
162
benchmark/foundation/benchrunner.py
Normal file
162
benchmark/foundation/benchrunner.py
Normal file
@@ -0,0 +1,162 @@
|
||||
import os
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
from foundation import (logger,
|
||||
progress,
|
||||
stats,
|
||||
util)
|
||||
|
||||
|
||||
def constructBenchmarkCommand(ctx, scene, blendfile, output_folder, cfra):
|
||||
command = [ctx.blender,
|
||||
"--background",
|
||||
"--factory-startup",
|
||||
"-noaudio",
|
||||
"--debug-cycles",
|
||||
"--enable-autoexec",
|
||||
"--engine", "CYCLES",
|
||||
blendfile]
|
||||
if ctx.image_output_dir:
|
||||
output = os.path.join(ctx.image_output_dir, scene) + "#"
|
||||
command.extend(["--render-format", "PNG",
|
||||
"--render-output", output])
|
||||
else:
|
||||
command.extend(["--render-output", output_folder,
|
||||
"--render-format", "PNG"])
|
||||
command.extend(["--python", ctx.configure_script,
|
||||
"-f", str(cfra),
|
||||
"--", "--benchmark-device-type", ctx.device_type])
|
||||
if ctx.device_name:
|
||||
command.extend(["--benchmark-device", ctx.device_name])
|
||||
return command
|
||||
|
||||
|
||||
def benchmarkBlenderWatched(command):
|
||||
# Run Blender with configured command line.
|
||||
logger.DEBUG("About to execuet command: {}" . format(command))
|
||||
start_time = time.time()
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
|
||||
# Keep reading status while Blender is alive.
|
||||
st = stats.Stats()
|
||||
while True:
|
||||
line = process.stdout.readline()
|
||||
if line == b"" and process.poll() is not None:
|
||||
break
|
||||
line = line.decode().strip()
|
||||
if line == "":
|
||||
continue
|
||||
if logger.VERBOSE:
|
||||
print(line)
|
||||
else:
|
||||
logger.DEBUG("Line from stdout: {}" . format(line))
|
||||
st.update(line)
|
||||
if st.current_tiles != 0:
|
||||
elapsed_time = time.time() - start_time
|
||||
elapsed_time_str = util.humanReadableTimeDifference(elapsed_time)
|
||||
progress.progress(st.current_tiles,
|
||||
st.total_tiles,
|
||||
prefix="Path Tracing Tiles {}" .
|
||||
format(elapsed_time_str))
|
||||
|
||||
# Clear line used by progress.
|
||||
progress.progressClear()
|
||||
|
||||
if process.returncode != 0:
|
||||
logger.ERROR("Rendering crashed")
|
||||
return None
|
||||
logger.OK("Successfully rendered")
|
||||
|
||||
return st
|
||||
|
||||
|
||||
def benchmarkScene(ctx, scene):
|
||||
logger.BOLD("Begin benchmark of scene {}" . format(scene))
|
||||
# Get usable full path to the corresponding .blend file.
|
||||
blendfile = ctx.getSceneFilename(scene)
|
||||
logger.DEBUG("File to use: {}" . format(blendfile))
|
||||
# Get command for rendering.
|
||||
# TODO(sergey): Create some temp folder.
|
||||
cfra = util.queryCurrentFrame(blendfile)
|
||||
command = constructBenchmarkCommand(ctx, scene, blendfile, "/tmp/", cfra)
|
||||
logger.DEBUG("Command for rendering: {}" . format(command))
|
||||
|
||||
logger.INFO("> Warm-up round, making sure everything is ready " +
|
||||
"(this might take several minutes).")
|
||||
warmup_command = command + ['--benchmark-warmup']
|
||||
benchmarkBlenderWatched(warmup_command)
|
||||
# Remove resutl of warmup round.
|
||||
if ctx.image_output_dir:
|
||||
full_image_output = os.path.join(ctx.image_output_dir, scene) + \
|
||||
str(cfra) + ".png"
|
||||
if os.path.exists(full_image_output):
|
||||
os.remove(full_image_output)
|
||||
# TODO(sergey): Consider doing several passes.
|
||||
logger.INFO("> Doing real benchmark pass now.")
|
||||
stats = benchmarkBlenderWatched(command)
|
||||
# Rename file to more sensible name.
|
||||
if ctx.image_output_dir:
|
||||
if os.path.exists(full_image_output):
|
||||
full_image_output_no_frame = \
|
||||
os.path.join(ctx.image_output_dir, scene) + ".png"
|
||||
os.rename(full_image_output, full_image_output_no_frame)
|
||||
if stats:
|
||||
logger.INFO("Total render time: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
stats.pipeline_render_time)))
|
||||
return stats
|
||||
|
||||
|
||||
def benchmarkAll(ctx):
|
||||
"""
|
||||
Benchmark all scenes from the cntext with requested settings.
|
||||
"""
|
||||
# First of all, print summary of what we'll be doing.
|
||||
ctx.printSummary()
|
||||
if not ctx.verify():
|
||||
return False
|
||||
all_stats = {}
|
||||
for scene in ctx.scenes:
|
||||
file_stats = benchmarkScene(ctx, scene)
|
||||
all_stats[scene] = file_stats
|
||||
return all_stats
|
||||
|
||||
|
||||
def benchmarkGetDeviceInfo(ctx):
|
||||
command = [ctx.blender,
|
||||
"--background",
|
||||
"--factory-startup",
|
||||
"-noaudio",
|
||||
"--enable-autoexec",
|
||||
"--engine", "CYCLES",
|
||||
"--python", ctx.configure_script,
|
||||
"--",
|
||||
"--benchmark-device-type", ctx.device_type]
|
||||
if ctx.device_name:
|
||||
command.extend(["--benchmark-device", ctx.device_name])
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
stdout, stderr = process.communicate()
|
||||
lines = stdout.decode().split("\n")
|
||||
# Parse output
|
||||
device_type = ""
|
||||
compute_devices = []
|
||||
for line in lines:
|
||||
if line.startswith("Compute device type:"):
|
||||
device_type = line.split(':', 1)[1].strip()
|
||||
elif line.startswith("Using compute device:"):
|
||||
compute_devices.append(line.split(':', 1)[1].strip())
|
||||
return {"device_type": device_type,
|
||||
"compute_devices": compute_devices}
|
||||
|
||||
|
||||
def benchmarkPrintDeviceInfo(ctx):
|
||||
device_info = benchmarkGetDeviceInfo(ctx)
|
||||
logger.INFO(" Device type: {}" . format(device_info["device_type"]))
|
||||
logger.INFO(" Compute devices:")
|
||||
for compute_device in device_info["compute_devices"]:
|
||||
logger.INFO(" {}" . format(compute_device))
|
79
benchmark/foundation/buildbot.py
Normal file
79
benchmark/foundation/buildbot.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import os
|
||||
import re
|
||||
import requests
|
||||
from html.parser import HTMLParser
|
||||
|
||||
BUILDBOT_URL = "https://builder.blender.org/"
|
||||
BUILDBOT_DOWNLOAD_URL = BUILDBOT_URL + "download/"
|
||||
|
||||
|
||||
class BuildbotHTMLParser(HTMLParser):
|
||||
def __init__(self):
|
||||
HTMLParser.__init__(self)
|
||||
self._is_inside_official_table = False
|
||||
self._is_official_table_finished = False
|
||||
self.official_builds = []
|
||||
|
||||
def handle_starttag(self, tag, attrs):
|
||||
tag_lower = tag.lower()
|
||||
if tag_lower == 'a':
|
||||
if self._is_inside_official_table:
|
||||
for attr in attrs:
|
||||
if attr[0].lower() == 'href':
|
||||
href = attr[1]
|
||||
self.official_builds.append(href)
|
||||
elif tag_lower == 'table':
|
||||
classes = ()
|
||||
for attr in attrs:
|
||||
if attr[0].lower() == 'class':
|
||||
classes = attr[1].lower().split()
|
||||
if 'table-striped' in classes:
|
||||
if self._is_inside_official_table:
|
||||
self._is_inside_official_table = False
|
||||
self._is_official_table_finished = True
|
||||
else:
|
||||
if not self._is_official_table_finished:
|
||||
self._is_inside_official_table = True
|
||||
|
||||
def handle_endtag(self, tag):
|
||||
pass
|
||||
|
||||
def handle_data(self, data):
|
||||
pass
|
||||
|
||||
|
||||
def _getBuildbotPlatformRegex(platform, bitness):
|
||||
platform_lower = platform.lower()
|
||||
if platform_lower in ("linux", "lin"):
|
||||
if bitness.startswith("64"):
|
||||
return re.compile(".*linux-glibc[0-9]+-x86_64.*")
|
||||
elif bitness.startswith("32"):
|
||||
return re.compile(".*linux-glibc[0-9]+-i686.*")
|
||||
else:
|
||||
# TOGO(sergey): Needs implementation
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def buildbotGetLatetsVersion(platform, bitness):
|
||||
"""
|
||||
Get latest Blender version URL from buildbot website.
|
||||
|
||||
Returns None if something is wrong.
|
||||
"""
|
||||
# Get content of the page.
|
||||
r = requests.get(BUILDBOT_DOWNLOAD_URL)
|
||||
if r.status_code != requests.codes.ok:
|
||||
return None
|
||||
# Parse the page.
|
||||
parser = BuildbotHTMLParser()
|
||||
parser.feed(r.text)
|
||||
official_builds = parser.official_builds
|
||||
# Get build which corresponds to requested platform.
|
||||
regex = _getBuildbotPlatformRegex(platform, bitness)
|
||||
if not regex:
|
||||
return None
|
||||
for build in official_builds:
|
||||
if regex.match(build):
|
||||
return BUILDBOT_DOWNLOAD_URL + build
|
||||
return None
|
53
benchmark/foundation/config.py
Normal file
53
benchmark/foundation/config.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import configparser
|
||||
import foundation
|
||||
from foundation import util
|
||||
import os
|
||||
|
||||
|
||||
class BenchmarkConfig:
|
||||
"""
|
||||
Generic configuration storage and parser.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.config_ = configparser.ConfigParser()
|
||||
|
||||
def readFromFile(self, filename):
|
||||
"""
|
||||
Read configuration from given file. File name is expected to be
|
||||
a full file path to read from.
|
||||
|
||||
Will do nothing if file does not exist.
|
||||
"""
|
||||
if os.path.exists(filename):
|
||||
return self.config_.read(filename)
|
||||
return []
|
||||
|
||||
def readGlobalConfig(self, name):
|
||||
"""
|
||||
Read named configuration from benchmark's configuration folder
|
||||
"""
|
||||
config_dir = util.getGlobalConfigDirectory()
|
||||
filename = os.path.join(config_dir, name + ".cfg")
|
||||
return self.readFromFile(filename)
|
||||
|
||||
def dump(self):
|
||||
"""
|
||||
Dump configuration to screen for debugging purposes.
|
||||
"""
|
||||
for section_name in self.config_.sections():
|
||||
section = self.config_[section_name]
|
||||
print("[{}]" . format(section_name))
|
||||
for key, value in section.items():
|
||||
print("{} = {} " . format(key, value))
|
||||
|
||||
# Bypass some handy methods to underlying configuration object.
|
||||
|
||||
def sections(self):
|
||||
return self.config_.sections()
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.config_.__getitem__(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
return self.config_.__setitem__(key, value)
|
100
benchmark/foundation/context.py
Normal file
100
benchmark/foundation/context.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import os
|
||||
|
||||
from foundation import logger
|
||||
|
||||
|
||||
class Context:
|
||||
__slots__ = ('blender',
|
||||
'device_type',
|
||||
'device_name',
|
||||
'scenes',
|
||||
'scenes_dir',
|
||||
'configure_script',
|
||||
'image_output_dir')
|
||||
|
||||
def __init__(self):
|
||||
# Full path to blender binary.
|
||||
self.blender = "blender"
|
||||
# Type of the device to run on. It must be either CPU, CUDA or OpenCL.
|
||||
self.device_type = 'CPU'
|
||||
# Name of the device to render on.
|
||||
self.device_name = 'NAME'
|
||||
# By default we use empty list, it is up to the user to fill it in.
|
||||
self.scenes = []
|
||||
# It is up to the user to provide proper path to scenes.
|
||||
self.scenes_dir = ""
|
||||
# Blender-side configuration script.
|
||||
self.configure_script = "configure.py"
|
||||
# Directory where render result images will be saved.
|
||||
# Empty means no results are saved.
|
||||
self.image_output_dir = ""
|
||||
|
||||
def listAllScenes(self, directory):
|
||||
import os
|
||||
if not os.path.exists(directory):
|
||||
logger.ERROR("Non-exiting directory {}" . format(directory))
|
||||
return []
|
||||
if not os.path.isdir(directory):
|
||||
logger.ERROR("Not a directory {}" . format(directory))
|
||||
return []
|
||||
all_scenes = sorted(os.listdir(directory))
|
||||
usable_scenes = []
|
||||
for scene in all_scenes:
|
||||
scene_filename = self.getSceneFilename(scene, do_checks=False)
|
||||
if os.path.exists(scene_filename) and \
|
||||
os.path.isfile(scene_filename):
|
||||
usable_scenes.append(scene)
|
||||
return usable_scenes
|
||||
|
||||
def printSummary(self):
|
||||
logger.INFO("Benchmark summary:")
|
||||
logger.INFO(" Device type: {}" . format(self.device_type))
|
||||
if self.device_name:
|
||||
logger.INFO(" Device name: {}" . format(self.device_name))
|
||||
logger.INFO(" Scenes: {}" . format(", ".join(self.scenes)))
|
||||
|
||||
def verify(self):
|
||||
if not os.path.exists(self.blender):
|
||||
logger.ERROR("Missing blender: {}" . format(self.blender))
|
||||
return False
|
||||
if not os.path.isfile(self.blender):
|
||||
logger.ERROR("Blender is not a file: {}" . format(self.blender))
|
||||
return False
|
||||
if not os.path.exists(self.configure_script):
|
||||
logger.ERROR("Missing configuration script: {}" .
|
||||
format(self.configure_script))
|
||||
return False
|
||||
if not os.path.isfile(self.configure_script):
|
||||
logger.ERROR("Configuration script is not a file: {}" .
|
||||
format(self.configure_script))
|
||||
return False
|
||||
if self.image_output_dir:
|
||||
if not os.path.exists(self.image_output_dir):
|
||||
logger.ERROR("Missing image output directory: {}" .
|
||||
format(self.image_output_dir))
|
||||
return False
|
||||
if not os.path.isdir(self.image_output_dir):
|
||||
logger.ERROR("Image out is not a directory: {}" .
|
||||
format(self.image_output_dir))
|
||||
return False
|
||||
return True
|
||||
|
||||
def getDeviceFileSuffix(self):
|
||||
if self.device_type == 'CPU':
|
||||
return '_cpu'
|
||||
elif self.device_type in ('CUDA', 'OPENCL', 'GPU'):
|
||||
return '_gpu'
|
||||
else:
|
||||
logger.FATAL("Unknown device type: {}" . format(self.device_type))
|
||||
return ""
|
||||
|
||||
def getSceneFilename(self, scene, do_checks=True):
|
||||
suffix = self.getDeviceFileSuffix()
|
||||
blendfile = scene + suffix + ".blend"
|
||||
filepath = os.path.join(self.scenes_dir, scene, blendfile)
|
||||
if do_checks:
|
||||
if not os.path.exists(filepath):
|
||||
logger.FATAL("File not file: {}" . format(filepath))
|
||||
if not os.path.isfile(filepath):
|
||||
logger.FATAL("Scene is not a file: {}" . format(filepath))
|
||||
return filepath
|
95
benchmark/foundation/logger.py
Normal file
95
benchmark/foundation/logger.py
Normal file
@@ -0,0 +1,95 @@
|
||||
class COLORS_DUMMY:
|
||||
HEADER = ''
|
||||
OKBLUE = ''
|
||||
OKGREEN = ''
|
||||
WARNING = ''
|
||||
FAIL = ''
|
||||
ENDC = ''
|
||||
BOLD = ''
|
||||
UNDERLINE = ''
|
||||
|
||||
|
||||
class COLORS_ANSI:
|
||||
HEADER = '\033[94m'
|
||||
OKBLUE = '\033[94m'
|
||||
OKGREEN = '\033[92m'
|
||||
WARNING = '\033[93m'
|
||||
FAIL = '\033[91m'
|
||||
ENDC = '\033[0m'
|
||||
BOLD = '\033[1m'
|
||||
UNDERLINE = '\033[4m'
|
||||
|
||||
|
||||
VERBOSE = False
|
||||
COLORS = COLORS_DUMMY
|
||||
|
||||
|
||||
def supportsColor():
|
||||
"""
|
||||
Returns True if the running system's terminal supports color, and False
|
||||
otherwise.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
plat = sys.platform
|
||||
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
|
||||
'ANSICON' in os.environ)
|
||||
# isatty is not always implemented, #6223.
|
||||
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
|
||||
if not supported_platform or not is_a_tty:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def HEADER(*args):
|
||||
print(COLORS.HEADER + COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def WARNING(*args):
|
||||
print(COLORS.WARNING + COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def ERROR(*args):
|
||||
print(COLORS.FAIL + COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def OK(*args):
|
||||
print(COLORS.OKGREEN + COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def BOLD(*args):
|
||||
print(COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def INFO(*args):
|
||||
print(*args)
|
||||
|
||||
|
||||
def DEBUG(*args):
|
||||
# TODO(sergey): Add check that debug is enabled.
|
||||
if False:
|
||||
print(*args)
|
||||
|
||||
|
||||
def FATAL(*args):
|
||||
import sys
|
||||
ERROR(*args)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def init():
|
||||
if not VERBOSE and supportsColor():
|
||||
global COLORS
|
||||
COLORS = COLORS_ANSI
|
34
benchmark/foundation/progress.py
Normal file
34
benchmark/foundation/progress.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from foundation import logger
|
||||
|
||||
|
||||
def progress(count, total, prefix="", suffix=""):
|
||||
if logger.VERBOSE:
|
||||
return
|
||||
|
||||
size = shutil.get_terminal_size((80, 20))
|
||||
|
||||
if prefix != "":
|
||||
prefix = prefix + " "
|
||||
if suffix != "":
|
||||
suffix = " " + suffix
|
||||
|
||||
bar_len = size.columns - len(prefix) - len(suffix) - 10
|
||||
filled_len = int(round(bar_len * count / float(total)))
|
||||
|
||||
percents = round(100.0 * count / float(total), 1)
|
||||
bar = '=' * filled_len + '-' * (bar_len - filled_len)
|
||||
|
||||
sys.stdout.write('%s[%s] %s%%%s\r' % (prefix, bar, percents, suffix))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def progressClear():
|
||||
if logger.VERBOSE:
|
||||
return
|
||||
|
||||
size = shutil.get_terminal_size((80, 20))
|
||||
sys.stdout.write(" " * size.columns + "\r")
|
||||
sys.stdout.flush()
|
88
benchmark/foundation/stats.py
Normal file
88
benchmark/foundation/stats.py
Normal file
@@ -0,0 +1,88 @@
|
||||
import re
|
||||
|
||||
from foundation import util
|
||||
|
||||
|
||||
class Stats:
|
||||
def __init__(self):
|
||||
# Pepare some regex for parsing
|
||||
self.re_path_tracing = re.compile(
|
||||
".*Path Tracing Tile ([0-9]+)/([0-9]+)" +
|
||||
"(, Sample ([0-9]+)\/([0-9]+))?$")
|
||||
self.re_total_render_time = re.compile(
|
||||
".*Total render time: ([0-9]+(\.[0-9]+)?)")
|
||||
self.re_render_time_no_sync = re.compile(
|
||||
".*Render time \(without synchronization\): ([0-9]+(\.[0-9]+)?)")
|
||||
self.re_pipeline_time = re.compile(
|
||||
"Time: ([0-9:\.]+) \(Saving: ([0-9:\.]+)\)")
|
||||
self.re_cycles_memory = re.compile(
|
||||
".*\| Mem:([0-9.]+[KM]?), Peak:([0-9.]+[KM]?) \|.*")
|
||||
|
||||
# Render time stats.
|
||||
self.total_render_time = "N/A"
|
||||
self.render_time_no_sync = "N/A"
|
||||
self.pipeline_render_time = "N/A"
|
||||
|
||||
# Render memory stats.
|
||||
self.device_peak_memory = "N/A"
|
||||
self.device_memory_usage = "N/A"
|
||||
|
||||
# Current stats.
|
||||
self.current_tiles = 0
|
||||
self.total_tiles = 0
|
||||
|
||||
def update(self, line):
|
||||
# Current tile progress.
|
||||
match = self.re_path_tracing.match(line)
|
||||
if match:
|
||||
self.current_tiles = int(match.group(1))
|
||||
self.total_tiles = int(match.group(2))
|
||||
# Total render time.
|
||||
match = self.re_total_render_time.match(line)
|
||||
if match:
|
||||
self.total_render_time = float(match.group(1))
|
||||
# Render time without sync.
|
||||
match = self.re_render_time_no_sync.match(line)
|
||||
if match:
|
||||
self.render_time_no_sync = float(match.group(1))
|
||||
# Total pipeline time.
|
||||
match = self.re_pipeline_time.match(line)
|
||||
if match:
|
||||
self.pipeline_render_time = \
|
||||
util.humanReadableTimeToSeconds(match.group(1))
|
||||
# Memory usage.
|
||||
match = self.re_cycles_memory.match(line)
|
||||
if match:
|
||||
mem = util.humanReadableSizeToMegabytes(match.group(1))
|
||||
peak = util.humanReadableSizeToMegabytes(match.group(1))
|
||||
if self.device_memory_usage == "N/A" or \
|
||||
mem > self.device_memory_usage:
|
||||
self.device_memory_usage = mem
|
||||
if self.device_peak_memory == "N/A" or \
|
||||
mem > self.device_peak_memory:
|
||||
self.device_peak_memory = mem
|
||||
|
||||
def print(self):
|
||||
# TODO(sergey): Check that all stats are available.
|
||||
print("Total pipeline render time: {} ({} sec)"
|
||||
. format(util.humanReadableTimeDifference(
|
||||
self.pipeline_render_time),
|
||||
self.pipeline_render_time))
|
||||
print("Total Cycles render time: {} ({} sec)"
|
||||
. format(util.humanReadableTimeDifference(
|
||||
self.total_render_time),
|
||||
self.total_render_time))
|
||||
print("Pure Cycles render time (without sync): {} ({} sec)"
|
||||
. format(util.humanReadableTimeDifference(
|
||||
self.render_time_no_sync),
|
||||
self.render_time_no_sync))
|
||||
print("Cycles memoty usage: {} ({} peak)"
|
||||
. format(self.device_memory_usage,
|
||||
self.device_peak_memory))
|
||||
|
||||
def asDict(self):
|
||||
return {'total_render_time': self.total_render_time,
|
||||
'render_time_no_sync': self.render_time_no_sync,
|
||||
'pipeline_render_time': self.pipeline_render_time,
|
||||
'device_peak_memory': self.device_peak_memory,
|
||||
'device_memory_usage': self.device_memory_usage}
|
73
benchmark/foundation/system_info.py
Normal file
73
benchmark/foundation/system_info.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import json
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from third_party import cpuinfo
|
||||
|
||||
|
||||
def _getBlenderDeviceInfo(ctx):
|
||||
PREFIX = "Benchmark Devices: "
|
||||
command = [ctx.blender,
|
||||
"--background",
|
||||
"--factory-startup",
|
||||
"-noaudio",
|
||||
"--enable-autoexec",
|
||||
"--engine", "CYCLES",
|
||||
"--python", ctx.configure_script,
|
||||
"--",
|
||||
"--benchmark-system-info"]
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
stdout, stderr = process.communicate()
|
||||
lines = stdout.decode().split("\n")
|
||||
for line in lines:
|
||||
if line.startswith(PREFIX):
|
||||
return json.loads(line[len(PREFIX):])
|
||||
return []
|
||||
|
||||
|
||||
def getBlenderVersion(ctx):
|
||||
INFO = ("build_date",
|
||||
"build_time",
|
||||
"build_commit_date",
|
||||
"build_commit_time",
|
||||
"build_hash")
|
||||
command = [ctx.blender, "--version"]
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
stdout, stderr = process.communicate()
|
||||
lines = stdout.decode().split("\n")
|
||||
info = {}
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if line.startswith("Blender "):
|
||||
version = line[8:].strip()
|
||||
info['version'] = version
|
||||
if not line.startswith("build "):
|
||||
continue
|
||||
tokens = line.split(":", 1)
|
||||
tokens[0] = tokens[0].replace(" ", "_")
|
||||
if tokens[0] in INFO:
|
||||
info[tokens[0]] = tokens[1].strip()
|
||||
return info
|
||||
|
||||
|
||||
def gatherSystemInfo(ctx):
|
||||
system_info = {}
|
||||
system_info['bitness'] = platform.architecture()[0]
|
||||
system_info['machine'] = platform.machine()
|
||||
system_info['system'] = platform.system()
|
||||
if system_info['system'] == "Linux":
|
||||
distro = platform.linux_distribution()
|
||||
system_info['dist_name'] = distro[0]
|
||||
system_info['dist_version'] = distro[1]
|
||||
# system_info['libc_version'] = "-".join(platform.libc_ver())
|
||||
# TODO(sergey): Make this to work on Windows and macOS
|
||||
cpu_info = cpuinfo.get_cpu_info()
|
||||
system_info['cpu_brand'] = cpu_info['brand']
|
||||
system_info['devices'] = _getBlenderDeviceInfo(ctx)
|
||||
# TODO(sergey): query number of CPUs and threads.
|
||||
return system_info
|
143
benchmark/foundation/util.py
Normal file
143
benchmark/foundation/util.py
Normal file
@@ -0,0 +1,143 @@
|
||||
import foundation
|
||||
from foundation import progress
|
||||
import os
|
||||
import requests
|
||||
import tarfile
|
||||
|
||||
|
||||
def humanReadableTimeDifference(seconds):
|
||||
"""
|
||||
Convert time difference in seconds to a human readable format.
|
||||
|
||||
For example, time difference of 125 seconds will be returned as 2:05
|
||||
"""
|
||||
|
||||
hours = int(seconds) // 60 // 60
|
||||
seconds = seconds - hours * 60 * 60
|
||||
minutes = int(seconds) // 60
|
||||
seconds = seconds - minutes * 60
|
||||
if hours == 0:
|
||||
return "%02d:%05.2f" % (minutes, seconds)
|
||||
else:
|
||||
return "%02d:%02d:%05.2f" % (hours, minutes, seconds)
|
||||
|
||||
|
||||
def humanReadableTimeToSeconds(time):
|
||||
"""
|
||||
Convert human readable string like HH:MM:SS to seconds.
|
||||
"""
|
||||
|
||||
tokens = time.split(".")
|
||||
result = 0
|
||||
if len(tokens) == 2:
|
||||
result = float("0." + tokens[1])
|
||||
mult = 1
|
||||
for token in reversed(tokens[0].split(":")):
|
||||
result += int(token) * mult
|
||||
mult *= 60
|
||||
return result
|
||||
|
||||
|
||||
def queryMainScene(filepath, callbacks):
|
||||
"""
|
||||
Return the equivalent to bpy.context.scene
|
||||
"""
|
||||
|
||||
from blendfile import blendfile
|
||||
|
||||
with blendfile.open_blend(filepath) as blend:
|
||||
# There is no bpy.context.scene, we get it from the main window
|
||||
window_manager = [block for block in blend.blocks
|
||||
if block.code == b'WM'][0]
|
||||
window = window_manager.get_pointer(b'winactive')
|
||||
screen = window.get_pointer(b'screen')
|
||||
scene = screen.get_pointer(b'scene')
|
||||
|
||||
output = []
|
||||
for callback in callbacks:
|
||||
output.append(callback(scene))
|
||||
return output
|
||||
|
||||
|
||||
def queryCurrentFrame(filepath):
|
||||
"""
|
||||
Get frame number to render.
|
||||
"""
|
||||
|
||||
def get_cfra(scene):
|
||||
return scene.get((b'r', b'cfra'))
|
||||
cfra, = queryMainScene(filepath, [get_cfra])
|
||||
return cfra
|
||||
|
||||
|
||||
def humanReadableSizeToMegabytes(size):
|
||||
if size[-1] == 'K':
|
||||
return float(size[:-1]) / 1024
|
||||
elif size[-1] == 'M':
|
||||
return float(size[:-1])
|
||||
else:
|
||||
return float(size)
|
||||
|
||||
|
||||
def humanReadableSize(size):
|
||||
return "{} Mb" . format(size)
|
||||
|
||||
|
||||
def downloadFile(url, filename):
|
||||
"""
|
||||
Download file form given UTR and save it to filename
|
||||
"""
|
||||
r = requests.get(url, stream=True)
|
||||
downloaded_size = 0
|
||||
total_size = 0
|
||||
if 'Content-length' in r.headers:
|
||||
total_size = r.headers['Content-length']
|
||||
with open(filename, 'wb') as f:
|
||||
for chunk in r.iter_content(chunk_size=1024):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
if total_size != 0:
|
||||
progress.progress(downloaded_size, total_size)
|
||||
if total_size != 0:
|
||||
progress.progressClear()
|
||||
|
||||
|
||||
def unpackArchive(filename, directory):
|
||||
"""
|
||||
Unpack archive <filename> to given <directory>
|
||||
"""
|
||||
if filename.endswith(".tar.bz2"):
|
||||
tar = tarfile.open(name=filename, mode="r:bz2")
|
||||
tar.extractall(directory)
|
||||
else:
|
||||
# TODO(sergey): Need to support more archive types.
|
||||
pass
|
||||
|
||||
|
||||
def stripSensitiveInfo(s):
|
||||
"""
|
||||
Strip any possibly sensitive information we want to avoid from the logs.
|
||||
"""
|
||||
root_dir = getBundleRootDirectory()
|
||||
s = s.replace(root_dir, "<FARM_DIR>")
|
||||
return s
|
||||
|
||||
########################################
|
||||
# Directories manipulation
|
||||
########################################
|
||||
|
||||
|
||||
def getBundleRootDirectory():
|
||||
"""
|
||||
Get fuill file path to the root directory of benchmark bundle.
|
||||
"""
|
||||
script_directory = os.path.dirname(os.path.realpath(__file__))
|
||||
return os.path.dirname(os.path.dirname(script_directory))
|
||||
|
||||
|
||||
def getGlobalConfigDirectory():
|
||||
"""
|
||||
Get configuration directory global for the all components of this bundle.
|
||||
"""
|
||||
return os.path.join(getBundleRootDirectory(), "config")
|
157
benchmark/main.py
Executable file
157
benchmark/main.py
Executable file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import foundation
|
||||
from foundation import (benchrunner,
|
||||
context,
|
||||
logger,
|
||||
system_info,
|
||||
util)
|
||||
|
||||
|
||||
def configureArgumentParser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cycles benchmark helper script.")
|
||||
parser.add_argument("-b", "--blender",
|
||||
help="Full file path to Blender's binary " +
|
||||
"to use for rendering",
|
||||
default="blender")
|
||||
parser.add_argument("-d", "--scenes-dir",
|
||||
help="Directory with scenes",
|
||||
default="")
|
||||
parser.add_argument('-s', '--scenes',
|
||||
nargs='+',
|
||||
help='Scenes to be rendered',
|
||||
default=[])
|
||||
parser.add_argument('-c', '--configure-script',
|
||||
help="Blender-side configuration script",
|
||||
default="configure.py")
|
||||
parser.add_argument('-t', '--device-type',
|
||||
help="Type of the device to render on",
|
||||
default="CPU")
|
||||
parser.add_argument('-n', '--device-name',
|
||||
help="Device name to render on",
|
||||
default="")
|
||||
parser.add_argument('-f', '--full-dump',
|
||||
help="Dump all available in formation",
|
||||
action='store_true',
|
||||
default=False)
|
||||
parser.add_argument('-j', '--json',
|
||||
help="When in full dump mode, dump JSON",
|
||||
action='store_true',
|
||||
default=False)
|
||||
return parser
|
||||
|
||||
|
||||
def _printFullResult(ctx, results):
|
||||
print("")
|
||||
print("=" * 40)
|
||||
# Print system information.
|
||||
sys_info = results['system_info']
|
||||
print("System info:")
|
||||
print(" System: {} {}" . format(sys_info['system'],
|
||||
sys_info['bitness']))
|
||||
if sys_info['system'] == "Linux":
|
||||
print(" Linux distro: {}, {}" . format(sys_info['dist_name'],
|
||||
sys_info['dist_version']))
|
||||
print(" CPU: {}" . format(sys_info['cpu_brand']))
|
||||
devices = sys_info['devices']
|
||||
if devices:
|
||||
print(" Compute devices:")
|
||||
for device in devices:
|
||||
print(" - {}: {}" . format(device['type'], device['name']))
|
||||
# Print Blender version.
|
||||
blender = results['blender_version']
|
||||
print("Blender:")
|
||||
print(" Version: {}" . format(blender['version']))
|
||||
print(" Hash: {}" . format(blender['build_hash']))
|
||||
print(" Commit: {} {}" . format(blender['build_commit_date'],
|
||||
blender['build_commit_time']))
|
||||
print(" Build: {} {}" . format(blender['build_date'],
|
||||
blender['build_time']))
|
||||
# Print scenes status.
|
||||
print("Nenchmark results:")
|
||||
stats = results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
stats = stats[scene]
|
||||
print(" {}:" . format(scene))
|
||||
print(" - Engine render time: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
stats.total_render_time)))
|
||||
print(" - Render time without sync: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
stats.render_time_no_sync)))
|
||||
print(" - Total render time: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
stats.pipeline_render_time)))
|
||||
print(" - Peak memory used on device: {}" . format(
|
||||
util.humanReadableSize(stats.device_peak_memory)))
|
||||
print(" - Memory used on device during rendering: {}" . format(
|
||||
util.humanReadableSize(stats.device_memory_usage)))
|
||||
|
||||
|
||||
def _printFullJSONResult(ctx, results):
|
||||
import json
|
||||
# Convert custom classes to dictionaries for easier JSON dump.
|
||||
json_results = results
|
||||
stats = json_results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
stats[scene] = stats[scene].asDict()
|
||||
print(json.dumps(json_results, sort_keys=True, indent=4))
|
||||
|
||||
|
||||
def _printBriefResult(ctx, results):
|
||||
print("")
|
||||
print("=" * 40)
|
||||
stats = results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
stats = stats[scene]
|
||||
print("{}: {}" . format(
|
||||
scene,
|
||||
util.humanReadableTimeDifference(
|
||||
stats.pipeline_render_time)))
|
||||
|
||||
|
||||
def main():
|
||||
parser = configureArgumentParser()
|
||||
args = parser.parse_args()
|
||||
logger.init()
|
||||
logger.HEADER("Cycles Benchmark Suite v{}" . format(foundation.VERSION))
|
||||
# Configure context.
|
||||
ctx = context.Context()
|
||||
ctx.blender = args.blender
|
||||
ctx.scenes_dir = args.scenes_dir
|
||||
ctx.configure_script = args.configure_script
|
||||
ctx.device_type = args.device_type
|
||||
ctx.device_name = args.device_name
|
||||
if args.scenes:
|
||||
ctx.scenes = args.scenes
|
||||
else:
|
||||
ctx.scenes = ctx.listAllScenes(args.scenes_dir)
|
||||
logger.INFO("Requested device details:")
|
||||
benchrunner.benchmarkPrintDeviceInfo(ctx)
|
||||
# Run benchmark.
|
||||
all_stats = benchrunner.benchmarkAll(ctx)
|
||||
# Gather all information together.
|
||||
result = {
|
||||
"blender_version": system_info.getBlenderVersion(ctx),
|
||||
"system_info": system_info.gatherSystemInfo(ctx),
|
||||
"stats": all_stats if all_stats else {}
|
||||
}
|
||||
if args.full_dump:
|
||||
if args.json:
|
||||
_printFullJSONResult(ctx, result)
|
||||
else:
|
||||
_printFullResult(ctx, result)
|
||||
else:
|
||||
_printBriefResult(ctx, result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
1875
benchmark/third_party/cpuinfo.py
vendored
Normal file
1875
benchmark/third_party/cpuinfo.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4
config/farm.cfg.example
Normal file
4
config/farm.cfg.example
Normal file
@@ -0,0 +1,4 @@
|
||||
[farm]
|
||||
output_dir = /tmp/blender-benchmark-results
|
||||
device_type = GPU
|
||||
device_name = "GeForce GT 520M"
|
26
run.bat
Normal file
26
run.bat
Normal file
@@ -0,0 +1,26 @@
|
||||
@echo off
|
||||
|
||||
rem set SCRIPTPATH_SLASH=%~dp0
|
||||
rem set SCRIPTPATH=%SCRIPTPATH_SLASH:~0,-1%
|
||||
set SCRIPTPATH=
|
||||
set BOOTSTRAP_VERSION=0.1
|
||||
set BLENDER_VERSION=2.78
|
||||
|
||||
reg Query "HKLM\Hardware\Description\System\CentralProcessor\0" | find /i "x86" > NUL && set BITNESS=32 || set BITNESS=64
|
||||
|
||||
echo Cycles Benchmark Suite bootstrap v%BOOTSTRAP_VERSION%
|
||||
echo Detected OS: Windows %BITNESS%bit
|
||||
|
||||
set BLENDER_DIR=%SCRIPTPATH%blender\windows%BITNESS%
|
||||
set BLENDER_BIN=%BLENDER_DIR%\blender.exe
|
||||
set SCENES_DIR=%SCRIPTPATH%scenes
|
||||
set PYTHON=%BLENDER_DIR%\%BLENDER_VERSION%\python\bin\python.exe
|
||||
set BENCHMARK=%SCRIPTPATH%benchmark\main.py
|
||||
set CONFIGURE_SCRIPT=%SCRIPTPATH%benchmark\configure.py
|
||||
|
||||
|
||||
echo %BLENDER_DIR%
|
||||
echo Using Python from %PYTHON%
|
||||
echo Running benchmark script %BENCHMARK%
|
||||
|
||||
%PYTHON% %BENCHMARK% --blender "%BLENDER_BIN%" --scenes-dir "%SCENES_DIR%" --configure-script "%CONFIGURE_SCRIPT%" %*
|
168
run.sh
Executable file
168
run.sh
Executable file
@@ -0,0 +1,168 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
UNAME=`which uname`
|
||||
|
||||
# Self versioning info
|
||||
BOOTSTRAP_VERSION="0.1"
|
||||
|
||||
# Variables which are automatically detected based on particular blender distro
|
||||
# we are benchmarking.
|
||||
BLENDER_VERSION=""
|
||||
PYTHON_VERSION=""
|
||||
PYTHON_ABI=""
|
||||
|
||||
Black=''
|
||||
Blue=''
|
||||
Green=''
|
||||
Cyan=''
|
||||
Red=''
|
||||
Purple=''
|
||||
Brown=''
|
||||
LGray=''
|
||||
DGray=''
|
||||
LBlue=''
|
||||
LGreen=''
|
||||
LCyan=''
|
||||
LRed=''
|
||||
LPurple=''
|
||||
Yellow=''
|
||||
White=''
|
||||
Normal=''
|
||||
|
||||
# Check if stdout is a terminal.
|
||||
if test -t 1; then
|
||||
# See if it supports colors.
|
||||
ncolors=$(tput colors)
|
||||
if test -n "$ncolors" && test $ncolors -ge 8; then
|
||||
Black='\033[00;30m'
|
||||
Blue='\033[00;34m'
|
||||
Green='\033[00;32m'
|
||||
Cyan='\033[00;36m'
|
||||
Red='\033[00;31m'
|
||||
Purple='\033[00;35m'
|
||||
Brown='\033[00;33m'
|
||||
LGray='\033[00;37m'
|
||||
DGray='\033[01;30m'
|
||||
LBlue='\033[01;34m'
|
||||
LGreen='\033[01;32m'
|
||||
LCyan='\033[01;36m'
|
||||
LRed='\033[01;31m'
|
||||
LPurple='\033[01;35m'
|
||||
Yellow='\033[01;33m'
|
||||
White='\033[01;37m'
|
||||
Normal='\033[00m'
|
||||
fi
|
||||
fi
|
||||
|
||||
STRIP_SENSITIVE() {
|
||||
echo "${@/$SCRIPTPATH/<SCRIPT_PATH>}"
|
||||
}
|
||||
|
||||
PRINT_HEADER() {
|
||||
echo -e "${LBlue}`STRIP_SENSITIVE "${@}"`${Normal}"
|
||||
}
|
||||
|
||||
PRINT_ERROR() {
|
||||
echo -e "${LRed}Error: `STRIP_SENSITIVE "${@}"`${Normal}"
|
||||
}
|
||||
|
||||
FATAL_ERROR() {
|
||||
PRINT_ERROR "${@}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
PRINT_INFO() {
|
||||
echo -e `STRIP_SENSITIVE "${@}"`
|
||||
}
|
||||
|
||||
PRINT_HEADER "Cycles Benchmark Suite bootstrap v${BOOTSTRAP_VERSION}"
|
||||
|
||||
# Check uname exists.
|
||||
# Without this we would not be able to know which Blender to run.
|
||||
if [ -z "${UNAME}" ]; then
|
||||
PRINT_ERROR "Unable to find uname command."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check whether we support operation system.
|
||||
KERNEL_NAME=`$UNAME -s`
|
||||
PRINT_INFO "Detected OS: ${White}${KERNEL_NAME}${Normal}"
|
||||
|
||||
case "${KERNEL_NAME}" in
|
||||
Darwin)
|
||||
SCRIPTPATH=`cd "$(dirname "${0}")"; pwd`
|
||||
BLENDER_PLATFORM="macos"
|
||||
# TODO(sergey): We assume only newer 64 bit MacOS machines.
|
||||
BINESS="64"
|
||||
;;
|
||||
|
||||
Linux)
|
||||
SCRIPT=$(readlink -f $0)
|
||||
SCRIPTPATH=`dirname $SCRIPT`
|
||||
BLENDER_PLATFORM="linux"
|
||||
MACHINE_TYPE=`uname -m`
|
||||
# TODO(sergey): Handle other architectures than x86_64/i686 here.
|
||||
if [ ${MACHINE_TYPE} == 'x86_64' ]; then
|
||||
BITNESS="64"
|
||||
else
|
||||
BITNESS="32"
|
||||
fi
|
||||
;;
|
||||
|
||||
CYGWIN*|MINGW32*|MSYS*)
|
||||
# TODO(sergey): Maybe support Cygwin in the future.
|
||||
FATAL_ERROR "On Windows platform run.bat is to be used."
|
||||
;;
|
||||
|
||||
*)
|
||||
FATAL_ERROR "Unknown OS, can not continue."
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "${BLENDER_DIR}" ]; then
|
||||
BLENDER_DIR="${SCRIPTPATH}/blender/${BLENDER_PLATFORM}${BITNESS}"
|
||||
fi
|
||||
BLENDER_BIN="${BLENDER_DIR}/blender"
|
||||
SCENES_DIR="${SCRIPTPATH}/scenes"
|
||||
BENCHMARK="${SCRIPTPATH}/benchmark/main.py"
|
||||
CONFIGURE_SCRIPT="${SCRIPTPATH}/benchmark/configure.py"
|
||||
|
||||
# Detect Blender version.
|
||||
if [ ! -f "${BLENDER_BIN}" ]; then
|
||||
FATAL_ERROR "Unable to find Blender executable."
|
||||
fi
|
||||
BLENDER_VERSION=`${BLENDER_BIN} --version |
|
||||
head -n 1 | sed -r 's/Blender ([0-9]\.[0-9]{2}).*/\1/'`
|
||||
PRINT_INFO "Detected Blender version: ${White}${BLENDER_VERSION}${Normal}"
|
||||
|
||||
# Detect Python version used by Blender.
|
||||
PYTHON_VERSION=`${BLENDER_BIN} -b --python-expr \
|
||||
'import platform; print("Python Version: {}" . \
|
||||
format(platform.python_version()))' |
|
||||
grep "Python Version" |
|
||||
sed -r 's/.*\s([0-9]+)\.([0-9]+).*/\1.\2/'`
|
||||
PRINT_INFO "Detected Python version: ${White}${PYTHON_VERSION}${Normal}"
|
||||
|
||||
# Detect Python ABI
|
||||
PYTHON="${BLENDER_DIR}/${BLENDER_VERSION}/python/bin/python${PYTHON_VERSION}"
|
||||
if [ ! -f "${PYTHON}" ]; then
|
||||
for ABI in m d md; do
|
||||
PYTHON_ABI="${ABI}"
|
||||
PYTHON="${BLENDER_DIR}/${BLENDER_VERSION}/python/bin/python${PYTHON_VERSION}${PYTHON_ABI}"
|
||||
if [ -f "${PYTHON}" ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
PRINT_INFO "Detected Python ABI: ${White}${PYTHON_ABI}${Normal}"
|
||||
|
||||
PRINT_INFO "Using Python from ${White}${PYTHON}${Normal}"
|
||||
PRINT_INFO "Running benchmark script ${White}${BENCHMARK}${Normal}"
|
||||
|
||||
"${PYTHON}" "${BENCHMARK}" \
|
||||
--blender "${BLENDER_BIN}" \
|
||||
--scenes-dir "${SCENES_DIR}" \
|
||||
--configure-script "${CONFIGURE_SCRIPT}" \
|
||||
"${@}"
|
Reference in New Issue
Block a user