commit ab2fe20c8fb531d0e5bb38dc01f4ca8751299897 Author: Sergey Sharybin Date: Fri Aug 18 12:12:25 2017 +0200 Initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..276cf03 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +# Ignore all directories with binary files. +/scenes +/blender +# Ignore production configuration files. +/config/*.cfg +# Ignore Python cache +__pycache__ \ No newline at end of file diff --git a/benchmark/blendfile/blendfile.py b/benchmark/blendfile/blendfile.py new file mode 100644 index 0000000..c7a83c3 --- /dev/null +++ b/benchmark/blendfile/blendfile.py @@ -0,0 +1,949 @@ +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# ***** END GPL LICENCE BLOCK ***** +# +# (c) 2009, At Mind B.V. - Jeroen Bakker +# (c) 2014, Blender Foundation - Campbell Barton + +import gzip +import logging +import os +import struct +import tempfile + +log = logging.getLogger("blendfile") + +FILE_BUFFER_SIZE = 1024 * 1024 + + +# ----------------------------------------------------------------------------- +# module global routines +# +# read routines +# open a filename +# determine if the file is compressed +# and returns a handle +def open_blend(filename, access="rb"): + """Opens a blend file for reading or writing pending on the access + supports 2 kind of blend files. Uncompressed and compressed. + Known issue: does not support packaged blend files + """ + handle = open(filename, access) + magic_test = b"BLENDER" + magic = handle.read(len(magic_test)) + if magic == magic_test: + log.debug("normal blendfile detected") + handle.seek(0, os.SEEK_SET) + bfile = BlendFile(handle) + bfile.is_compressed = False + bfile.filepath_orig = filename + return bfile + elif magic[:2] == b'\x1f\x8b': + log.debug("gzip blendfile detected") + handle.close() + log.debug("decompressing started") + fs = gzip.open(filename, "rb") + data = fs.read(FILE_BUFFER_SIZE) + magic = data[:len(magic_test)] + if magic == magic_test: + handle = tempfile.TemporaryFile() + while data: + handle.write(data) + data = fs.read(FILE_BUFFER_SIZE) + log.debug("decompressing finished") + fs.close() + log.debug("resetting decompressed file") + handle.seek(os.SEEK_SET, 0) + bfile = BlendFile(handle) + bfile.is_compressed = True + bfile.filepath_orig = filename + return bfile + else: + raise Exception("filetype inside gzip not a blend") + else: + raise Exception("filetype not a blend or a gzip blend") + + +def pad_up_4(offset): + return (offset + 3) & ~3 + + +# ----------------------------------------------------------------------------- +# module classes + + +class BlendFile: + """ + Blend file. + """ + __slots__ = ( + # file (result of open()) + "handle", + # str (original name of the file path) + "filepath_orig", + # BlendFileHeader + "header", + # struct.Struct + "block_header_struct", + # BlendFileBlock + "blocks", + # [DNAStruct, ...] + "structs", + # dict {b'StructName': sdna_index} + # (where the index is an index into 'structs') + "sdna_index_from_id", + # dict {addr_old: block} + "block_from_offset", + # int + "code_index", + # bool (did we make a change) + "is_modified", + # bool (is file gzipped) + "is_compressed", + ) + + def __init__(self, handle): + log.debug("initializing reading blend-file") + self.handle = handle + self.header = BlendFileHeader(handle) + self.block_header_struct = self.header.create_block_header_struct() + self.blocks = [] + self.code_index = {} + self.structs = [] + self.sdna_index_from_id = {} + + block = BlendFileBlock(handle, self) + while block.code != b'ENDB': + if block.code == b'DNA1': + (self.structs, + self.sdna_index_from_id, + ) = BlendFile.decode_structs(self.header, block, handle) + else: + handle.seek(block.size, os.SEEK_CUR) + + self.blocks.append(block) + self.code_index.setdefault(block.code, []).append(block) + + block = BlendFileBlock(handle, self) + self.is_modified = False + self.blocks.append(block) + + if not self.structs: + raise Exception("No DNA1 block in file, this is not a valid .blend file!") + + # cache (could lazy init, incase we never use?) + self.block_from_offset = {block.addr_old: block for block in self.blocks if block.code != b'ENDB'} + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def find_blocks_from_code(self, code): + assert(type(code) == bytes) + if code not in self.code_index: + return [] + return self.code_index[code] + + def find_block_from_offset(self, offset): + # same as looking looping over all blocks, + # then checking ``block.addr_old == offset`` + assert(type(offset) is int) + return self.block_from_offset.get(offset) + + def close(self): + """ + Close the blend file + writes the blend file to disk if changes has happened + """ + handle = self.handle + + if self.is_modified: + if self.is_compressed: + log.debug("close compressed blend file") + handle.seek(os.SEEK_SET, 0) + log.debug("compressing started") + fs = gzip.open(self.filepath_orig, "wb") + data = handle.read(FILE_BUFFER_SIZE) + while data: + fs.write(data) + data = handle.read(FILE_BUFFER_SIZE) + fs.close() + log.debug("compressing finished") + + handle.close() + + def ensure_subtype_smaller(self, sdna_index_curr, sdna_index_next): + # never refine to a smaller type + if (self.structs[sdna_index_curr].size > + self.structs[sdna_index_next].size): + + raise RuntimeError("cant refine to smaller type (%s -> %s)" % + (self.structs[sdna_index_curr].dna_type_id.decode('ascii'), + self.structs[sdna_index_next].dna_type_id.decode('ascii'))) + + @staticmethod + def decode_structs(header, block, handle): + """ + DNACatalog is a catalog of all information in the DNA1 file-block + """ + log.debug("building DNA catalog") + shortstruct = DNA_IO.USHORT[header.endian_index] + shortstruct2 = struct.Struct(header.endian_str + b'HH') + intstruct = DNA_IO.UINT[header.endian_index] + + data = handle.read(block.size) + types = [] + names = [] + + structs = [] + sdna_index_from_id = {} + + offset = 8 + names_len = intstruct.unpack_from(data, offset)[0] + offset += 4 + + log.debug("building #%d names" % names_len) + for i in range(names_len): + tName = DNA_IO.read_data0_offset(data, offset) + offset = offset + len(tName) + 1 + names.append(DNAName(tName)) + del names_len + + offset = pad_up_4(offset) + offset += 4 + types_len = intstruct.unpack_from(data, offset)[0] + offset += 4 + log.debug("building #%d types" % types_len) + for i in range(types_len): + dna_type_id = DNA_IO.read_data0_offset(data, offset) + # None will be replaced by the DNAStruct, below + types.append(DNAStruct(dna_type_id)) + offset += len(dna_type_id) + 1 + + offset = pad_up_4(offset) + offset += 4 + log.debug("building #%d type-lengths" % types_len) + for i in range(types_len): + tLen = shortstruct.unpack_from(data, offset)[0] + offset = offset + 2 + types[i].size = tLen + del types_len + + offset = pad_up_4(offset) + offset += 4 + + structs_len = intstruct.unpack_from(data, offset)[0] + offset += 4 + log.debug("building #%d structures" % structs_len) + for sdna_index in range(structs_len): + d = shortstruct2.unpack_from(data, offset) + struct_type_index = d[0] + offset += 4 + dna_struct = types[struct_type_index] + sdna_index_from_id[dna_struct.dna_type_id] = sdna_index + structs.append(dna_struct) + + fields_len = d[1] + dna_offset = 0 + + for field_index in range(fields_len): + d2 = shortstruct2.unpack_from(data, offset) + field_type_index = d2[0] + field_name_index = d2[1] + offset += 4 + dna_type = types[field_type_index] + dna_name = names[field_name_index] + if dna_name.is_pointer or dna_name.is_method_pointer: + dna_size = header.pointer_size * dna_name.array_size + else: + dna_size = dna_type.size * dna_name.array_size + + field = DNAField(dna_type, dna_name, dna_size, dna_offset) + dna_struct.fields.append(field) + dna_struct.field_from_name[dna_name.name_only] = field + dna_offset += dna_size + + return structs, sdna_index_from_id + + +class BlendFileBlock: + """ + Instance of a struct. + """ + __slots__ = ( + # BlendFile + "file", + "code", + "size", + "addr_old", + "sdna_index", + "count", + "file_offset", + "user_data", + ) + + def __str__(self): + return ("<%s.%s (%s), size=%d at %s>" % + # fields=[%s] + (self.__class__.__name__, + self.dna_type_name, + self.code.decode(), + self.size, + # b", ".join(f.dna_name.name_only for f in self.dna_type.fields).decode('ascii'), + hex(self.addr_old), + )) + + def __init__(self, handle, bfile): + OLDBLOCK = struct.Struct(b'4sI') + + self.file = bfile + self.user_data = None + + data = handle.read(bfile.block_header_struct.size) + + if len(data) != bfile.block_header_struct.size: + print("WARNING! Blend file seems to be badly truncated!") + self.code = b'ENDB' + self.size = 0 + self.addr_old = 0 + self.sdna_index = 0 + self.count = 0 + self.file_offset = 0 + return + # header size can be 8, 20, or 24 bytes long + # 8: old blend files ENDB block (exception) + # 20: normal headers 32 bit platform + # 24: normal headers 64 bit platform + if len(data) > 15: + blockheader = bfile.block_header_struct.unpack(data) + self.code = blockheader[0].partition(b'\0')[0] + if self.code != b'ENDB': + self.size = blockheader[1] + self.addr_old = blockheader[2] + self.sdna_index = blockheader[3] + self.count = blockheader[4] + self.file_offset = handle.tell() + else: + self.size = 0 + self.addr_old = 0 + self.sdna_index = 0 + self.count = 0 + self.file_offset = 0 + else: + blockheader = OLDBLOCK.unpack(data) + self.code = blockheader[0].partition(b'\0')[0] + self.code = DNA_IO.read_data0(blockheader[0]) + self.size = 0 + self.addr_old = 0 + self.sdna_index = 0 + self.count = 0 + self.file_offset = 0 + + @property + def dna_type(self): + return self.file.structs[self.sdna_index] + + @property + def dna_type_name(self): + return self.dna_type.dna_type_id.decode('ascii') + + def refine_type_from_index(self, sdna_index_next): + assert(type(sdna_index_next) is int) + sdna_index_curr = self.sdna_index + self.file.ensure_subtype_smaller(sdna_index_curr, sdna_index_next) + self.sdna_index = sdna_index_next + + def refine_type(self, dna_type_id): + assert(type(dna_type_id) is bytes) + self.refine_type_from_index(self.file.sdna_index_from_id[dna_type_id]) + + def get_file_offset(self, path, + default=..., + sdna_index_refine=None, + base_index=0, + ): + """ + Return (offset, length) + """ + assert(type(path) is bytes) + + ofs = self.file_offset + if base_index != 0: + assert(base_index < self.count) + ofs += (self.size // self.count) * base_index + self.file.handle.seek(ofs, os.SEEK_SET) + + if sdna_index_refine is None: + sdna_index_refine = self.sdna_index + else: + self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine) + + dna_struct = self.file.structs[sdna_index_refine] + field = dna_struct.field_from_path( + self.file.header, self.file.handle, path) + + return (self.file.handle.tell(), field.dna_name.array_size) + + def get(self, path, + default=..., + sdna_index_refine=None, + use_nil=True, use_str=True, + base_index=0, + ): + + ofs = self.file_offset + if base_index != 0: + assert(base_index < self.count) + ofs += (self.size // self.count) * base_index + self.file.handle.seek(ofs, os.SEEK_SET) + + if sdna_index_refine is None: + sdna_index_refine = self.sdna_index + else: + self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine) + + dna_struct = self.file.structs[sdna_index_refine] + return dna_struct.field_get( + self.file.header, self.file.handle, path, + default=default, + use_nil=use_nil, use_str=use_str, + ) + + def get_recursive_iter(self, path, path_root=b"", + default=..., + sdna_index_refine=None, + use_nil=True, use_str=True, + base_index=0, + ): + if path_root: + path_full = ( + (path_root if type(path_root) is tuple else (path_root, )) + + (path if type(path) is tuple else (path, ))) + else: + path_full = path + + try: + yield (path_full, self.get(path_full, default, sdna_index_refine, use_nil, use_str, base_index)) + except NotImplementedError as ex: + msg, dna_name, dna_type = ex.args + struct_index = self.file.sdna_index_from_id.get(dna_type.dna_type_id, None) + if struct_index is None: + yield (path_full, "<%s>" % dna_type.dna_type_id.decode('ascii')) + else: + struct = self.file.structs[struct_index] + for f in struct.fields: + yield from self.get_recursive_iter( + f.dna_name.name_only, path_full, default, None, use_nil, use_str, 0) + + def items_recursive_iter(self): + for k in self.keys(): + yield from self.get_recursive_iter(k, use_str=False) + + def get_data_hash(self): + """ + Generates a 'hash' that can be used instead of addr_old as block id, and that should be 'stable' across .blend + file load & save (i.e. it does not changes due to pointer addresses variations). + """ + # TODO This implementation is most likely far from optimal... and CRC32 is not renown as the best hashing + # algo either. But for now does the job! + import zlib + def _is_pointer(self, k): + return self.file.structs[self.sdna_index].field_from_path( + self.file.header, self.file.handle, k).dna_name.is_pointer + + hsh = 1 + for k, v in self.items_recursive_iter(): + if not _is_pointer(self, k): + hsh = zlib.adler32(str(v).encode(), hsh) + return hsh + + def set(self, path, value, + sdna_index_refine=None, + ): + + if sdna_index_refine is None: + sdna_index_refine = self.sdna_index + else: + self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine) + + dna_struct = self.file.structs[sdna_index_refine] + self.file.handle.seek(self.file_offset, os.SEEK_SET) + self.file.is_modified = True + return dna_struct.field_set( + self.file.header, self.file.handle, path, value) + + # --------------- + # Utility get/set + # + # avoid inline pointer casting + def get_pointer( + self, path, + default=..., + sdna_index_refine=None, + base_index=0, + ): + if sdna_index_refine is None: + sdna_index_refine = self.sdna_index + result = self.get(path, default, sdna_index_refine=sdna_index_refine, base_index=base_index) + + # default + if type(result) is not int: + return result + + assert(self.file.structs[sdna_index_refine].field_from_path( + self.file.header, self.file.handle, path).dna_name.is_pointer) + if result != 0: + # possible (but unlikely) + # that this fails and returns None + # maybe we want to raise some exception in this case + return self.file.find_block_from_offset(result) + else: + return None + + # ---------------------- + # Python convenience API + + # dict like access + def __getitem__(self, item): + return self.get(item, use_str=False) + + def __setitem__(self, item, value): + self.set(item, value) + + def keys(self): + return (f.dna_name.name_only for f in self.dna_type.fields) + + def values(self): + for k in self.keys(): + try: + yield self[k] + except NotImplementedError as ex: + msg, dna_name, dna_type = ex.args + yield "<%s>" % dna_type.dna_type_id.decode('ascii') + + def items(self): + for k in self.keys(): + try: + yield (k, self[k]) + except NotImplementedError as ex: + msg, dna_name, dna_type = ex.args + yield (k, "<%s>" % dna_type.dna_type_id.decode('ascii')) + + +# ----------------------------------------------------------------------------- +# Read Magic +# +# magic = str +# pointer_size = int +# is_little_endian = bool +# version = int + + +class BlendFileHeader: + """ + BlendFileHeader allocates the first 12 bytes of a blend file + it contains information about the hardware architecture + """ + __slots__ = ( + # str + "magic", + # int 4/8 + "pointer_size", + # bool + "is_little_endian", + # int + "version", + # str, used to pass to 'struct' + "endian_str", + # int, used to index common types + "endian_index", + ) + + def __init__(self, handle): + FILEHEADER = struct.Struct(b'7s1s1s3s') + + log.debug("reading blend-file-header") + values = FILEHEADER.unpack(handle.read(FILEHEADER.size)) + self.magic = values[0] + pointer_size_id = values[1] + if pointer_size_id == b'-': + self.pointer_size = 8 + elif pointer_size_id == b'_': + self.pointer_size = 4 + else: + assert(0) + endian_id = values[2] + if endian_id == b'v': + self.is_little_endian = True + self.endian_str = b'<' + self.endian_index = 0 + elif endian_id == b'V': + self.is_little_endian = False + self.endian_index = 1 + self.endian_str = b'>' + else: + assert(0) + + version_id = values[3] + self.version = int(version_id) + + def create_block_header_struct(self): + return struct.Struct(b''.join(( + self.endian_str, + b'4sI', + b'I' if self.pointer_size == 4 else b'Q', + b'II', + ))) + + +class DNAName: + """ + DNAName is a C-type name stored in the DNA + """ + __slots__ = ( + "name_full", + "name_only", + "is_pointer", + "is_method_pointer", + "array_size", + ) + + def __init__(self, name_full): + self.name_full = name_full + self.name_only = self.calc_name_only() + self.is_pointer = self.calc_is_pointer() + self.is_method_pointer = self.calc_is_method_pointer() + self.array_size = self.calc_array_size() + + def __repr__(self): + return '%s(%r)' % (type(self).__qualname__, self.name_full) + + def as_reference(self, parent): + if parent is None: + result = b'' + else: + result = parent + b'.' + + result = result + self.name_only + return result + + def calc_name_only(self): + result = self.name_full.strip(b'*()') + index = result.find(b'[') + if index != -1: + result = result[:index] + return result + + def calc_is_pointer(self): + return (b'*' in self.name_full) + + def calc_is_method_pointer(self): + return (b'(*' in self.name_full) + + def calc_array_size(self): + result = 1 + temp = self.name_full + index = temp.find(b'[') + + while index != -1: + index_2 = temp.find(b']') + result *= int(temp[index + 1:index_2]) + temp = temp[index_2 + 1:] + index = temp.find(b'[') + + return result + + +class DNAField: + """ + DNAField is a coupled DNAStruct and DNAName + and cache offset for reuse + """ + __slots__ = ( + # DNAName + "dna_name", + # tuple of 3 items + # [bytes (struct name), int (struct size), DNAStruct] + "dna_type", + # size on-disk + "dna_size", + # cached info (avoid looping over fields each time) + "dna_offset", + ) + + def __init__(self, dna_type, dna_name, dna_size, dna_offset): + self.dna_type = dna_type + self.dna_name = dna_name + self.dna_size = dna_size + self.dna_offset = dna_offset + + +class DNAStruct: + """ + DNAStruct is a C-type structure stored in the DNA + """ + __slots__ = ( + "dna_type_id", + "size", + "fields", + "field_from_name", + "user_data", + ) + + def __init__(self, dna_type_id): + self.dna_type_id = dna_type_id + self.fields = [] + self.field_from_name = {} + self.user_data = None + + def __repr__(self): + return '%s(%r)' % (type(self).__qualname__, self.dna_type_id) + + def field_from_path(self, header, handle, path): + """ + Support lookups as bytes or a tuple of bytes and optional index. + + C style 'id.name' --> (b'id', b'name') + C style 'array[4]' --> ('array', 4) + """ + if type(path) is tuple: + name = path[0] + if len(path) >= 2 and type(path[1]) is not bytes: + name_tail = path[2:] + index = path[1] + assert(type(index) is int) + else: + name_tail = path[1:] + index = 0 + else: + name = path + name_tail = None + index = 0 + + assert(type(name) is bytes) + + field = self.field_from_name.get(name) + + if field is not None: + handle.seek(field.dna_offset, os.SEEK_CUR) + if index != 0: + if field.dna_name.is_pointer: + index_offset = header.pointer_size * index + else: + index_offset = field.dna_type.size * index + assert(index_offset < field.dna_size) + handle.seek(index_offset, os.SEEK_CUR) + if not name_tail: # None or () + return field + else: + return field.dna_type.field_from_path(header, handle, name_tail) + + def field_get(self, header, handle, path, + default=..., + use_nil=True, use_str=True, + ): + field = self.field_from_path(header, handle, path) + if field is None: + if default is not ...: + return default + else: + raise KeyError("%r not found in %r (%r)" % + (path, [f.dna_name.name_only for f in self.fields], self.dna_type_id)) + + dna_type = field.dna_type + dna_name = field.dna_name + dna_size = field.dna_size + + if dna_name.is_pointer: + return DNA_IO.read_pointer(handle, header) + elif dna_type.dna_type_id == b'int': + if dna_name.array_size > 1: + return [DNA_IO.read_int(handle, header) for i in range(dna_name.array_size)] + return DNA_IO.read_int(handle, header) + elif dna_type.dna_type_id == b'short': + if dna_name.array_size > 1: + return [DNA_IO.read_short(handle, header) for i in range(dna_name.array_size)] + return DNA_IO.read_short(handle, header) + elif dna_type.dna_type_id == b'uint64_t': + if dna_name.array_size > 1: + return [DNA_IO.read_ulong(handle, header) for i in range(dna_name.array_size)] + return DNA_IO.read_ulong(handle, header) + elif dna_type.dna_type_id == b'float': + if dna_name.array_size > 1: + return [DNA_IO.read_float(handle, header) for i in range(dna_name.array_size)] + return DNA_IO.read_float(handle, header) + elif dna_type.dna_type_id == b'char': + if dna_size == 1: + # Single char, assume it's bitflag or int value, and not a string/bytes data... + return DNA_IO.read_char(handle, header) + if use_str: + if use_nil: + return DNA_IO.read_string0(handle, dna_name.array_size) + else: + return DNA_IO.read_string(handle, dna_name.array_size) + else: + if use_nil: + return DNA_IO.read_bytes0(handle, dna_name.array_size) + else: + return DNA_IO.read_bytes(handle, dna_name.array_size) + else: + raise NotImplementedError("%r exists but isn't pointer, can't resolve field %r" % + (path, dna_name.name_only), dna_name, dna_type) + + def field_set(self, header, handle, path, value): + assert(type(path) == bytes) + + field = self.field_from_path(header, handle, path) + if field is None: + raise KeyError("%r not found in %r" % + (path, [f.dna_name.name_only for f in self.fields])) + + dna_type = field.dna_type + dna_name = field.dna_name + + if dna_type.dna_type_id == b'char': + if type(value) is str: + return DNA_IO.write_string(handle, value, dna_name.array_size) + else: + return DNA_IO.write_bytes(handle, value, dna_name.array_size) + else: + raise NotImplementedError("Setting %r is not yet supported for %r" % + (dna_type, dna_name), dna_name, dna_type) + + +class DNA_IO: + """ + Module like class, for read-write utility functions. + + Only stores static methods & constants. + """ + + __slots__ = () + + def __new__(cls, *args, **kwargs): + raise RuntimeError("%s should not be instantiated" % cls) + + @staticmethod + def write_string(handle, astring, fieldlen): + assert(isinstance(astring, str)) + if len(astring) >= fieldlen: + stringw = astring[0:fieldlen] + else: + stringw = astring + '\0' + handle.write(stringw.encode('utf-8')) + + @staticmethod + def write_bytes(handle, astring, fieldlen): + assert(isinstance(astring, (bytes, bytearray))) + if len(astring) >= fieldlen: + stringw = astring[0:fieldlen] + else: + stringw = astring + b'\0' + + handle.write(stringw) + + @staticmethod + def read_bytes(handle, length): + data = handle.read(length) + return data + + @staticmethod + def read_bytes0(handle, length): + data = handle.read(length) + return DNA_IO.read_data0(data) + + @staticmethod + def read_string(handle, length): + return DNA_IO.read_bytes(handle, length).decode('utf-8') + + @staticmethod + def read_string0(handle, length): + return DNA_IO.read_bytes0(handle, length).decode('utf-8') + + @staticmethod + def read_data0_offset(data, offset): + add = data.find(b'\0', offset) - offset + return data[offset:offset + add] + + @staticmethod + def read_data0(data): + add = data.find(b'\0') + return data[:add] + + UCHAR = struct.Struct(b'b') + + @staticmethod + def read_char(handle, fileheader): + st = DNA_IO.UCHAR[fileheader.endian_index] + return st.unpack(handle.read(st.size))[0] + + USHORT = struct.Struct(b'H') + + @staticmethod + def read_ushort(handle, fileheader): + st = DNA_IO.USHORT[fileheader.endian_index] + return st.unpack(handle.read(st.size))[0] + + SSHORT = struct.Struct(b'h') + + @staticmethod + def read_short(handle, fileheader): + st = DNA_IO.SSHORT[fileheader.endian_index] + return st.unpack(handle.read(st.size))[0] + + UINT = struct.Struct(b'I') + + @staticmethod + def read_uint(handle, fileheader): + st = DNA_IO.UINT[fileheader.endian_index] + return st.unpack(handle.read(st.size))[0] + + SINT = struct.Struct(b'i') + + @staticmethod + def read_int(handle, fileheader): + st = DNA_IO.SINT[fileheader.endian_index] + return st.unpack(handle.read(st.size))[0] + + FLOAT = struct.Struct(b'f') + + @staticmethod + def read_float(handle, fileheader): + st = DNA_IO.FLOAT[fileheader.endian_index] + return st.unpack(handle.read(st.size))[0] + + ULONG = struct.Struct(b'Q') + + @staticmethod + def read_ulong(handle, fileheader): + st = DNA_IO.ULONG[fileheader.endian_index] + return st.unpack(handle.read(st.size))[0] + + @staticmethod + def read_pointer(handle, header): + """ + reads an pointer from a file handle + the pointer size is given by the header (BlendFileHeader) + """ + if header.pointer_size == 4: + st = DNA_IO.UINT[header.endian_index] + return st.unpack(handle.read(st.size))[0] + if header.pointer_size == 8: + st = DNA_IO.ULONG[header.endian_index] + return st.unpack(handle.read(st.size))[0] diff --git a/benchmark/configure.py b/benchmark/configure.py new file mode 100644 index 0000000..0782d21 --- /dev/null +++ b/benchmark/configure.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 + +import bpy + + +def setDeviceCPU(context, cpref): + cpref.compute_device_type = 'NONE' + return True + + +def isDisplayDevice(device): + return "(Display)" in device.name + + +def setUseRequestedDevice(context, cpref, device_type, requested_device): + import _cycles + # Empty device type means we'l ltry to render on a single card, + # preferably non-display one. + if requested_device == "": + device_found = False + # Try to enable first non-display card. + for device in cpref.devices: + if isDisplayDevice(device): + device.use = False + elif not device_found: + # Enable first non-display GPU. + device_found = True + device.use = True + else: + # Keep disanling rest of GPUs. + device.use = False + if not device_found: + # Only display devices, enable first of them. + cpref.devices[0].use = True + device_found = True + else: + device_found = False + for device in cpref.devices: + device_name = device.name.replace(" (Display)", "") + if device_name == requested_device: + device.use = True + device_found = True + else: + device.use = False + return device_found + + +def setDeviceCUDA(context, cpref, requested_device): + cpref.compute_device_type = 'CUDA' + return setUseRequestedDevice(context, cpref, 'CUDA', requested_device) + + +def setDeviceOpenCL(context, cpref, requested_device): + cpref.compute_device_type = 'OPENCL' + return setUseRequestedDevice(context, cpref, 'OPENCL', requested_device) + + +def setDeviceGPU(context, cpref, requested_device): + import _cycles + has_cuda = has_opencl = False + for device in _cycles.available_devices(): + if device[1] == 'CUDA': + has_cuda = True + if device[1] == 'OPENCL': + has_opencl = True + + if has_cuda: + return setDeviceCUDA(context, cpref, requested_device) + if has_opencl: + return setDeviceOpenCL(context, cpref, requested_device) + return False + + +def logComputeDevices(cpref): + device_type = cpref.compute_device_type + if device_type == 'NONE': + device_type = 'CPU' + print("Compute device type: {}" . format(device_type)) + if device_type == 'CPU': + import _cycles + for device in _cycles.available_devices(): + if device[1] == 'CPU': + print("Using compute device: {}" . format(device[0])) + else: + for device in cpref.devices: + if device.type != device_type: + continue + if device.use: + print("Using compute device: {}" . format(device.name)) + + +def logSystemInfo(cpref): + import json + info_devices = [] + for device in cpref.devices: + info_device = { + "name": device.name.rstrip(" (Display)"), + "type": device.type, + } + info_devices.append(info_device) + print("Benchmark Devices: {}" . format(json.dumps(info_devices))) + + +def main(): + import argparse + import sys + + argv = sys.argv + if "--" not in argv: + return + + parser = argparse.ArgumentParser( + description="Cycles benchmark helper script.") + parser.add_argument("--benchmark-warmup", + help="Do quick warm-up render pass", + action='store_true', + default=False) + parser.add_argument("--benchmark-device-type", + help="Device type to be rendered on", + default="CPU") + parser.add_argument("--benchmark-device", + help="Device to be rendered on", + default="") + parser.add_argument("--benchmark-system-info", + help="Dump whole system information", + action='store_true', + default=False) + + argv = argv[argv.index("--") + 1:] + args = parser.parse_args(argv) + + context = bpy.context + cpref = context.user_preferences.addons['cycles'].preferences + + # Adjust samples so we render real quick. + if args.benchmark_warmup: + for scene in bpy.data.scenes: + scene.cycles.samples = 1 + scene.cycles.aa_samples = 1 + + # Configure the compute device. + if args.benchmark_device_type == 'CPU': + device_ok = setDeviceCPU(context, cpref) + elif args.benchmark_device_type == 'CUDA': + device_ok = setDeviceCUDA(context, cpref, args.benchmark_device) + elif args.benchmark_device_type == 'OPENCL': + device_ok = setDeviceOpenCL(context, cpref, args.benchmark_device) + elif args.benchmark_device_type == 'GPU': + device_ok = setDeviceGPU(context, cpref, args.benchmark_device) + if not device_ok: + sys.exit(1) + + if args.benchmark_system_info: + logSystemInfo(cpref) + else: + logComputeDevices(cpref) + + +if __name__ == "__main__": + main() diff --git a/benchmark/farm.py b/benchmark/farm.py new file mode 100755 index 0000000..4c20356 --- /dev/null +++ b/benchmark/farm.py @@ -0,0 +1,351 @@ +#!/usr/bin/env python3 + +import argparse +import foundation +from foundation import (benchrunner, + buildbot, + config, + context, + logger, + system_info, + util) +import json +import os +import shutil +import sys +import tempfile + + +######################################## +# Parser helpers. +######################################## + + +def configureArgumentParser(): + parser = argparse.ArgumentParser( + description="Cycles benchmark helper script.") + parser.add_argument("-b", "--blender", + help="Full file path to Blender's binary " + + "to use for rendering", + default="") + parser.add_argument("-d", "--scenes-dir", + help="Directory with scenes", + default="") + # parser.add_argument('-s', '--scenes', + # nargs='+', + # help='Scenes to be rendered', + # default=[]) + parser.add_argument('-t', '--device-type', + help="Type of the device to render on", + default="CPU") + parser.add_argument('-n', '--device-name', + help="Device name to render on", + default="") + parser.add_argument('-v', '--verbose', + help="Do verbose logging", + action='store_true', + default=False) + return parser + +######################################## +# Configuration helpers. +######################################## + + +def injectDefaultConfiguration(config): + """ + For a specified configuration object, set all possible properties to their + default value. + """ + root_dir = util.getBundleRootDirectory() + section = { + "scenes_dir": os.path.join(root_dir, "scenes"), + "device_name": "", + } + config['farm'] = section + + +def injectArgparseConfiguration(config, args): + """ + Override settings wit harguments passed from the command line. + """ + section = config['farm'] + if args.blender: + section['blender'] = args.blender + if args.scenes_dir: + section['scenes_dir'] = args.scenes_dir + if args.device_type: + section['device_type'] = args.device_type + if args.device_name: + section['device_name'] = args.device_name + + +def readConfiguration(args): + """ + Read configuration file and return BenchmarkConfig with all the settings + we will need to use. + """ + config = foundation.config.BenchmarkConfig() + injectDefaultConfiguration(config) + read_configs = config.readGlobalConfig("farm") + if read_configs: + logger.INFO("Configuration was read from:") + for cfg in read_configs: + print(" " + util.stripSensitiveInfo(cfg)) + injectArgparseConfiguration(config, args) + return config + + +def checkConfiguration(config): + """ + Check whether configuration is complete and usable. + """ + required_keys = ('device_type', ) + known_device_types = ('CPU', 'GPU', 'CUDA', 'OPENCL', ) + logger.INFO("Validating configuration...") + # Check whether section exists. + if 'farm' not in config.sections(): + logger.INFO(" Missing configuration section for 'farm'.") + return False + section = config['farm'] + # Check whether required keys exists. + for required_key in required_keys: + if required_key not in section: + logger.INFO(" Missing configuration key {}" . + format(required_key)) + return False + # Check whether device configuration is correct. + device_type = section['device_type'] + if device_type not in known_device_types: + logger.INFO(" Unknown device type {}" . format(device_type)) + return False + if device_type != 'CPU': + if 'device_name' not in section or not section['device_name']: + logger.INFO(" Need to explicitly specify device name.") + return False + # Check whether directories are correct. + if not os.path.exists(section["scenes_dir"]): + logger.INFO(" Scenes directory does not exist.") + return False + if not section["output_dir"]: + logger.INFO(" Missing configuration for output directory.") + return False + return True + + +######################################## +# Temporary directories helpers. +######################################## + +def createTempDirectory(config): + """ + Usually creates an unique temporary directory and returns full path to it. + + In the development environment uses pre-defined directory, where all data + might already be existing (for example, blender might be already downloaded + and unpacked). + """ + temp_dir = tempfile.mkdtemp(prefix="blender-benchmark-") + return temp_dir + + +def deleteTempDirectory(config, temp_dir): + """ + Get rid of automatically created temp directory/ + """ + shutil.rmtree(temp_dir) + + +######################################## +# Latets Blender helper script. +######################################## + +def downloadLatestBlender(directory): + """ + Download latets Blender from buildbot to given directory. + """ + # TODO(sergey): This we need to change to currently running configuration. + latest_blender_url = buildbot.buildbotGetLatetsVersion("Linux", "64bit") + if not latest_blender_url: + logger.ERROR("Unable to figure out latest Blender version") + return + blender_filename = latest_blender_url.split('/')[-1] + logger.INFO("Found latest Blender: {}" . format(blender_filename)) + local_blender = os.path.join(directory, blender_filename) + logger.INFO("Downloading Blender...") + util.downloadFile(latest_blender_url, local_blender) + return local_blender + + +def findBlenderDirInDirectory(directory): + """ + Find Blender directory in given folder. + """ + for filename in os.listdir(directory): + if filename.startswith("blender"): + full_filename = os.path.join(directory, filename) + if os.path.isdir(full_filename): + return full_filename + return None + + +def findBlenderInDirectory(directory): + """ + Find blender executable file in given directory. Will recurse into + Blender directory in there + """ + blender_dir = findBlenderDirInDirectory(directory) + platform = sys.platform + if platform == 'linux': + return os.path.join(blender_dir, "blender") + else: + raise Exception("Need to support your OS!") + + +def getLatetsBlenderBinary(config, temp_dir): + """ + Get full file path to latest Blender executable which will be used for + actual benchmark. + + It will either use Blender from already specified location or it will + download latest Blender from buildbot. + """ + # Firts try to use Blender specified in configuration. + if 'blender' in config['farm']: + return config['farm']['blender'] + # Well, download and unpack the latest Blender from buildbot. + logger.INFO("Will get latest Blender from buildbot.") + blender_archive = downloadLatestBlender(temp_dir) + logger.INFO("Unpacking Blender...") + util.unpackArchive(blender_archive, temp_dir) + return findBlenderInDirectory(temp_dir) + +######################################## +# Results output. +######################################## + + +def latestDirGet(path): + """ + Get directory with bigger number in the given path + """ + max_file = None + for f in os.listdir(path): + x = int(f) + if not max_file or x > max_file: + max_file = x + return max_file + + +def ensureOutputDir(config): + output_dir = config['farm']['output_dir'] + if not os.path.exists(output_dir): + os.mkdir(output_dir) + max_dir = latestDirGet(output_dir) + if not max_dir: + max_dir = 0 + new_dir = str(max_dir + 1).zfill(8) + new_full_dir = os.path.join(output_dir, new_dir) + os.mkdir(new_full_dir) + return new_full_dir + + +def ensureImageOutputDir(results_output_dir): + images_output_dir = os.path.join(results_output_dir, "images") + os.mkdir(images_output_dir) + return images_output_dir + + +def getResultJSONString(ctx, results): + # Convert custom classes to dictionaries for easier JSON dump. + json_results = results + stats = json_results['stats'] + for scene in ctx.scenes: + if scene not in stats: + continue + if stats[scene]: + stats[scene] = stats[scene].asDict() + stats[scene]['result'] = 'OK' + else: + stats[scene] = {'result': 'CRASH'} + return json.dumps(json_results, sort_keys=True, indent=4) + + +def saveResults(ctx, results, output_dir): + json_string = getResultJSONString(ctx, results) + results_file = os.path.join(output_dir, "results.json") + with open(results_file, "w") as f: + f.write(json_string) + +######################################## +# Main logic. +######################################## + +def main(): + parser = configureArgumentParser() + args = parser.parse_args() + logger.VERBOSE = args.verbose + logger.init() + logger.HEADER("Cycles Benchmark Suite v{}, farm edition" . + format(foundation.VERSION)) + # Some platform independent directories to helper scripts. + script_directory = os.path.dirname(os.path.realpath(__file__)) + configure_script = os.path.join(script_directory, "configure.py") + # Read configuration file, so we know what we will be doing. + config = readConfiguration(args) + if not checkConfiguration(config): + logger.ERROR("Configuration is not complete or valid, aborting.") + return False + logger.INFO("Configuration looks reasonable, continuing.") + # Create temporary directory, all runtime files will be stored there. + temp_dir = createTempDirectory(config) + results_output_dir = ensureOutputDir(config) + images_output_dir = ensureImageOutputDir(results_output_dir) + try: + blender_binary = getLatetsBlenderBinary(config, temp_dir) + logger.INFO("Will benchmark the following Blender: {}". + format(util.stripSensitiveInfo(blender_binary))) + # Bechmark context initialization. + farm_config = config['farm'] + ctx = context.Context() + ctx.blender = blender_binary + ctx.configure_script = configure_script + ctx.scenes_dir = farm_config['scenes_dir'] + ctx.device_type = farm_config['device_type'] + ctx.device_name = farm_config['device_name'] + ctx.image_output_dir = images_output_dir + ctx.scenes = ctx.listAllScenes(ctx.scenes_dir) + # Print prelmiinary information. + blender_dvice_info = benchrunner.benchmarkGetDeviceInfo(ctx) + if not blender_dvice_info['device_type']: + logger.ERROR("Requested device can not be enabled in Blender.") + logger.INFO("Requested device details:") + logger.INFO(" Device type: {}" . format(ctx.device_type)) + logger.INFO(" Device name: {}" . format(ctx.device_name)) + all_stats = None + else: + logger.INFO("Configured device details:") + logger.INFO(" Device type: {}" . + format(blender_dvice_info["device_type"])) + logger.INFO(" Compute devices:") + for compute_device in blender_dvice_info["compute_devices"]: + logger.INFO(" {}" . format(compute_device)) + # Run benchmark. + all_stats = benchrunner.benchmarkAll(ctx) + # Gather all information together. + results = { + "blender_version": system_info.getBlenderVersion(ctx), + "system_info": system_info.gatherSystemInfo(ctx), + "device_info": blender_dvice_info, + "stats": all_stats if all_stats else {} + } + saveResults(ctx, results, results_output_dir) + return all_stats is not None + finally: + deleteTempDirectory(config, temp_dir) + return True + + +if __name__ == "__main__": + if not main(): + sys.exit(1) diff --git a/benchmark/foundation/__init__.py b/benchmark/foundation/__init__.py new file mode 100644 index 0000000..72bdd01 --- /dev/null +++ b/benchmark/foundation/__init__.py @@ -0,0 +1 @@ +VERSION = "0.1" diff --git a/benchmark/foundation/benchrunner.py b/benchmark/foundation/benchrunner.py new file mode 100644 index 0000000..a4992ba --- /dev/null +++ b/benchmark/foundation/benchrunner.py @@ -0,0 +1,162 @@ +import os +import time +import subprocess + +from foundation import (logger, + progress, + stats, + util) + + +def constructBenchmarkCommand(ctx, scene, blendfile, output_folder, cfra): + command = [ctx.blender, + "--background", + "--factory-startup", + "-noaudio", + "--debug-cycles", + "--enable-autoexec", + "--engine", "CYCLES", + blendfile] + if ctx.image_output_dir: + output = os.path.join(ctx.image_output_dir, scene) + "#" + command.extend(["--render-format", "PNG", + "--render-output", output]) + else: + command.extend(["--render-output", output_folder, + "--render-format", "PNG"]) + command.extend(["--python", ctx.configure_script, + "-f", str(cfra), + "--", "--benchmark-device-type", ctx.device_type]) + if ctx.device_name: + command.extend(["--benchmark-device", ctx.device_name]) + return command + + +def benchmarkBlenderWatched(command): + # Run Blender with configured command line. + logger.DEBUG("About to execuet command: {}" . format(command)) + start_time = time.time() + process = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + # Keep reading status while Blender is alive. + st = stats.Stats() + while True: + line = process.stdout.readline() + if line == b"" and process.poll() is not None: + break + line = line.decode().strip() + if line == "": + continue + if logger.VERBOSE: + print(line) + else: + logger.DEBUG("Line from stdout: {}" . format(line)) + st.update(line) + if st.current_tiles != 0: + elapsed_time = time.time() - start_time + elapsed_time_str = util.humanReadableTimeDifference(elapsed_time) + progress.progress(st.current_tiles, + st.total_tiles, + prefix="Path Tracing Tiles {}" . + format(elapsed_time_str)) + + # Clear line used by progress. + progress.progressClear() + + if process.returncode != 0: + logger.ERROR("Rendering crashed") + return None + logger.OK("Successfully rendered") + + return st + + +def benchmarkScene(ctx, scene): + logger.BOLD("Begin benchmark of scene {}" . format(scene)) + # Get usable full path to the corresponding .blend file. + blendfile = ctx.getSceneFilename(scene) + logger.DEBUG("File to use: {}" . format(blendfile)) + # Get command for rendering. + # TODO(sergey): Create some temp folder. + cfra = util.queryCurrentFrame(blendfile) + command = constructBenchmarkCommand(ctx, scene, blendfile, "/tmp/", cfra) + logger.DEBUG("Command for rendering: {}" . format(command)) + + logger.INFO("> Warm-up round, making sure everything is ready " + + "(this might take several minutes).") + warmup_command = command + ['--benchmark-warmup'] + benchmarkBlenderWatched(warmup_command) + # Remove resutl of warmup round. + if ctx.image_output_dir: + full_image_output = os.path.join(ctx.image_output_dir, scene) + \ + str(cfra) + ".png" + if os.path.exists(full_image_output): + os.remove(full_image_output) + # TODO(sergey): Consider doing several passes. + logger.INFO("> Doing real benchmark pass now.") + stats = benchmarkBlenderWatched(command) + # Rename file to more sensible name. + if ctx.image_output_dir: + if os.path.exists(full_image_output): + full_image_output_no_frame = \ + os.path.join(ctx.image_output_dir, scene) + ".png" + os.rename(full_image_output, full_image_output_no_frame) + if stats: + logger.INFO("Total render time: {}" . format( + util.humanReadableTimeDifference( + stats.pipeline_render_time))) + return stats + + +def benchmarkAll(ctx): + """ + Benchmark all scenes from the cntext with requested settings. + """ + # First of all, print summary of what we'll be doing. + ctx.printSummary() + if not ctx.verify(): + return False + all_stats = {} + for scene in ctx.scenes: + file_stats = benchmarkScene(ctx, scene) + all_stats[scene] = file_stats + return all_stats + + +def benchmarkGetDeviceInfo(ctx): + command = [ctx.blender, + "--background", + "--factory-startup", + "-noaudio", + "--enable-autoexec", + "--engine", "CYCLES", + "--python", ctx.configure_script, + "--", + "--benchmark-device-type", ctx.device_type] + if ctx.device_name: + command.extend(["--benchmark-device", ctx.device_name]) + process = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + stdout, stderr = process.communicate() + lines = stdout.decode().split("\n") + # Parse output + device_type = "" + compute_devices = [] + for line in lines: + if line.startswith("Compute device type:"): + device_type = line.split(':', 1)[1].strip() + elif line.startswith("Using compute device:"): + compute_devices.append(line.split(':', 1)[1].strip()) + return {"device_type": device_type, + "compute_devices": compute_devices} + + +def benchmarkPrintDeviceInfo(ctx): + device_info = benchmarkGetDeviceInfo(ctx) + logger.INFO(" Device type: {}" . format(device_info["device_type"])) + logger.INFO(" Compute devices:") + for compute_device in device_info["compute_devices"]: + logger.INFO(" {}" . format(compute_device)) diff --git a/benchmark/foundation/buildbot.py b/benchmark/foundation/buildbot.py new file mode 100644 index 0000000..3c80e53 --- /dev/null +++ b/benchmark/foundation/buildbot.py @@ -0,0 +1,79 @@ +import os +import re +import requests +from html.parser import HTMLParser + +BUILDBOT_URL = "https://builder.blender.org/" +BUILDBOT_DOWNLOAD_URL = BUILDBOT_URL + "download/" + + +class BuildbotHTMLParser(HTMLParser): + def __init__(self): + HTMLParser.__init__(self) + self._is_inside_official_table = False + self._is_official_table_finished = False + self.official_builds = [] + + def handle_starttag(self, tag, attrs): + tag_lower = tag.lower() + if tag_lower == 'a': + if self._is_inside_official_table: + for attr in attrs: + if attr[0].lower() == 'href': + href = attr[1] + self.official_builds.append(href) + elif tag_lower == 'table': + classes = () + for attr in attrs: + if attr[0].lower() == 'class': + classes = attr[1].lower().split() + if 'table-striped' in classes: + if self._is_inside_official_table: + self._is_inside_official_table = False + self._is_official_table_finished = True + else: + if not self._is_official_table_finished: + self._is_inside_official_table = True + + def handle_endtag(self, tag): + pass + + def handle_data(self, data): + pass + + +def _getBuildbotPlatformRegex(platform, bitness): + platform_lower = platform.lower() + if platform_lower in ("linux", "lin"): + if bitness.startswith("64"): + return re.compile(".*linux-glibc[0-9]+-x86_64.*") + elif bitness.startswith("32"): + return re.compile(".*linux-glibc[0-9]+-i686.*") + else: + # TOGO(sergey): Needs implementation + pass + return None + + +def buildbotGetLatetsVersion(platform, bitness): + """ + Get latest Blender version URL from buildbot website. + + Returns None if something is wrong. + """ + # Get content of the page. + r = requests.get(BUILDBOT_DOWNLOAD_URL) + if r.status_code != requests.codes.ok: + return None + # Parse the page. + parser = BuildbotHTMLParser() + parser.feed(r.text) + official_builds = parser.official_builds + # Get build which corresponds to requested platform. + regex = _getBuildbotPlatformRegex(platform, bitness) + if not regex: + return None + for build in official_builds: + if regex.match(build): + return BUILDBOT_DOWNLOAD_URL + build + return None diff --git a/benchmark/foundation/config.py b/benchmark/foundation/config.py new file mode 100644 index 0000000..9d33534 --- /dev/null +++ b/benchmark/foundation/config.py @@ -0,0 +1,53 @@ +import configparser +import foundation +from foundation import util +import os + + +class BenchmarkConfig: + """ + Generic configuration storage and parser. + """ + + def __init__(self): + self.config_ = configparser.ConfigParser() + + def readFromFile(self, filename): + """ + Read configuration from given file. File name is expected to be + a full file path to read from. + + Will do nothing if file does not exist. + """ + if os.path.exists(filename): + return self.config_.read(filename) + return [] + + def readGlobalConfig(self, name): + """ + Read named configuration from benchmark's configuration folder + """ + config_dir = util.getGlobalConfigDirectory() + filename = os.path.join(config_dir, name + ".cfg") + return self.readFromFile(filename) + + def dump(self): + """ + Dump configuration to screen for debugging purposes. + """ + for section_name in self.config_.sections(): + section = self.config_[section_name] + print("[{}]" . format(section_name)) + for key, value in section.items(): + print("{} = {} " . format(key, value)) + + # Bypass some handy methods to underlying configuration object. + + def sections(self): + return self.config_.sections() + + def __getitem__(self, key): + return self.config_.__getitem__(key) + + def __setitem__(self, key, value): + return self.config_.__setitem__(key, value) diff --git a/benchmark/foundation/context.py b/benchmark/foundation/context.py new file mode 100644 index 0000000..4436529 --- /dev/null +++ b/benchmark/foundation/context.py @@ -0,0 +1,100 @@ +import os + +from foundation import logger + + +class Context: + __slots__ = ('blender', + 'device_type', + 'device_name', + 'scenes', + 'scenes_dir', + 'configure_script', + 'image_output_dir') + + def __init__(self): + # Full path to blender binary. + self.blender = "blender" + # Type of the device to run on. It must be either CPU, CUDA or OpenCL. + self.device_type = 'CPU' + # Name of the device to render on. + self.device_name = 'NAME' + # By default we use empty list, it is up to the user to fill it in. + self.scenes = [] + # It is up to the user to provide proper path to scenes. + self.scenes_dir = "" + # Blender-side configuration script. + self.configure_script = "configure.py" + # Directory where render result images will be saved. + # Empty means no results are saved. + self.image_output_dir = "" + + def listAllScenes(self, directory): + import os + if not os.path.exists(directory): + logger.ERROR("Non-exiting directory {}" . format(directory)) + return [] + if not os.path.isdir(directory): + logger.ERROR("Not a directory {}" . format(directory)) + return [] + all_scenes = sorted(os.listdir(directory)) + usable_scenes = [] + for scene in all_scenes: + scene_filename = self.getSceneFilename(scene, do_checks=False) + if os.path.exists(scene_filename) and \ + os.path.isfile(scene_filename): + usable_scenes.append(scene) + return usable_scenes + + def printSummary(self): + logger.INFO("Benchmark summary:") + logger.INFO(" Device type: {}" . format(self.device_type)) + if self.device_name: + logger.INFO(" Device name: {}" . format(self.device_name)) + logger.INFO(" Scenes: {}" . format(", ".join(self.scenes))) + + def verify(self): + if not os.path.exists(self.blender): + logger.ERROR("Missing blender: {}" . format(self.blender)) + return False + if not os.path.isfile(self.blender): + logger.ERROR("Blender is not a file: {}" . format(self.blender)) + return False + if not os.path.exists(self.configure_script): + logger.ERROR("Missing configuration script: {}" . + format(self.configure_script)) + return False + if not os.path.isfile(self.configure_script): + logger.ERROR("Configuration script is not a file: {}" . + format(self.configure_script)) + return False + if self.image_output_dir: + if not os.path.exists(self.image_output_dir): + logger.ERROR("Missing image output directory: {}" . + format(self.image_output_dir)) + return False + if not os.path.isdir(self.image_output_dir): + logger.ERROR("Image out is not a directory: {}" . + format(self.image_output_dir)) + return False + return True + + def getDeviceFileSuffix(self): + if self.device_type == 'CPU': + return '_cpu' + elif self.device_type in ('CUDA', 'OPENCL', 'GPU'): + return '_gpu' + else: + logger.FATAL("Unknown device type: {}" . format(self.device_type)) + return "" + + def getSceneFilename(self, scene, do_checks=True): + suffix = self.getDeviceFileSuffix() + blendfile = scene + suffix + ".blend" + filepath = os.path.join(self.scenes_dir, scene, blendfile) + if do_checks: + if not os.path.exists(filepath): + logger.FATAL("File not file: {}" . format(filepath)) + if not os.path.isfile(filepath): + logger.FATAL("Scene is not a file: {}" . format(filepath)) + return filepath diff --git a/benchmark/foundation/logger.py b/benchmark/foundation/logger.py new file mode 100644 index 0000000..8e434fb --- /dev/null +++ b/benchmark/foundation/logger.py @@ -0,0 +1,95 @@ +class COLORS_DUMMY: + HEADER = '' + OKBLUE = '' + OKGREEN = '' + WARNING = '' + FAIL = '' + ENDC = '' + BOLD = '' + UNDERLINE = '' + + +class COLORS_ANSI: + HEADER = '\033[94m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + +VERBOSE = False +COLORS = COLORS_DUMMY + + +def supportsColor(): + """ + Returns True if the running system's terminal supports color, and False + otherwise. + """ + + import sys + import os + + plat = sys.platform + supported_platform = plat != 'Pocket PC' and (plat != 'win32' or + 'ANSICON' in os.environ) + # isatty is not always implemented, #6223. + is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() + if not supported_platform or not is_a_tty: + return False + return True + + +def HEADER(*args): + print(COLORS.HEADER + COLORS.BOLD, end="") + print(*args, end="") + print(COLORS.ENDC) + + +def WARNING(*args): + print(COLORS.WARNING + COLORS.BOLD, end="") + print(*args, end="") + print(COLORS.ENDC) + + +def ERROR(*args): + print(COLORS.FAIL + COLORS.BOLD, end="") + print(*args, end="") + print(COLORS.ENDC) + + +def OK(*args): + print(COLORS.OKGREEN + COLORS.BOLD, end="") + print(*args, end="") + print(COLORS.ENDC) + + +def BOLD(*args): + print(COLORS.BOLD, end="") + print(*args, end="") + print(COLORS.ENDC) + + +def INFO(*args): + print(*args) + + +def DEBUG(*args): + # TODO(sergey): Add check that debug is enabled. + if False: + print(*args) + + +def FATAL(*args): + import sys + ERROR(*args) + sys.exit(1) + + +def init(): + if not VERBOSE and supportsColor(): + global COLORS + COLORS = COLORS_ANSI diff --git a/benchmark/foundation/progress.py b/benchmark/foundation/progress.py new file mode 100644 index 0000000..a935850 --- /dev/null +++ b/benchmark/foundation/progress.py @@ -0,0 +1,34 @@ +import shutil +import sys + +from foundation import logger + + +def progress(count, total, prefix="", suffix=""): + if logger.VERBOSE: + return + + size = shutil.get_terminal_size((80, 20)) + + if prefix != "": + prefix = prefix + " " + if suffix != "": + suffix = " " + suffix + + bar_len = size.columns - len(prefix) - len(suffix) - 10 + filled_len = int(round(bar_len * count / float(total))) + + percents = round(100.0 * count / float(total), 1) + bar = '=' * filled_len + '-' * (bar_len - filled_len) + + sys.stdout.write('%s[%s] %s%%%s\r' % (prefix, bar, percents, suffix)) + sys.stdout.flush() + + +def progressClear(): + if logger.VERBOSE: + return + + size = shutil.get_terminal_size((80, 20)) + sys.stdout.write(" " * size.columns + "\r") + sys.stdout.flush() diff --git a/benchmark/foundation/stats.py b/benchmark/foundation/stats.py new file mode 100644 index 0000000..a37d36c --- /dev/null +++ b/benchmark/foundation/stats.py @@ -0,0 +1,88 @@ +import re + +from foundation import util + + +class Stats: + def __init__(self): + # Pepare some regex for parsing + self.re_path_tracing = re.compile( + ".*Path Tracing Tile ([0-9]+)/([0-9]+)" + + "(, Sample ([0-9]+)\/([0-9]+))?$") + self.re_total_render_time = re.compile( + ".*Total render time: ([0-9]+(\.[0-9]+)?)") + self.re_render_time_no_sync = re.compile( + ".*Render time \(without synchronization\): ([0-9]+(\.[0-9]+)?)") + self.re_pipeline_time = re.compile( + "Time: ([0-9:\.]+) \(Saving: ([0-9:\.]+)\)") + self.re_cycles_memory = re.compile( + ".*\| Mem:([0-9.]+[KM]?), Peak:([0-9.]+[KM]?) \|.*") + + # Render time stats. + self.total_render_time = "N/A" + self.render_time_no_sync = "N/A" + self.pipeline_render_time = "N/A" + + # Render memory stats. + self.device_peak_memory = "N/A" + self.device_memory_usage = "N/A" + + # Current stats. + self.current_tiles = 0 + self.total_tiles = 0 + + def update(self, line): + # Current tile progress. + match = self.re_path_tracing.match(line) + if match: + self.current_tiles = int(match.group(1)) + self.total_tiles = int(match.group(2)) + # Total render time. + match = self.re_total_render_time.match(line) + if match: + self.total_render_time = float(match.group(1)) + # Render time without sync. + match = self.re_render_time_no_sync.match(line) + if match: + self.render_time_no_sync = float(match.group(1)) + # Total pipeline time. + match = self.re_pipeline_time.match(line) + if match: + self.pipeline_render_time = \ + util.humanReadableTimeToSeconds(match.group(1)) + # Memory usage. + match = self.re_cycles_memory.match(line) + if match: + mem = util.humanReadableSizeToMegabytes(match.group(1)) + peak = util.humanReadableSizeToMegabytes(match.group(1)) + if self.device_memory_usage == "N/A" or \ + mem > self.device_memory_usage: + self.device_memory_usage = mem + if self.device_peak_memory == "N/A" or \ + mem > self.device_peak_memory: + self.device_peak_memory = mem + + def print(self): + # TODO(sergey): Check that all stats are available. + print("Total pipeline render time: {} ({} sec)" + . format(util.humanReadableTimeDifference( + self.pipeline_render_time), + self.pipeline_render_time)) + print("Total Cycles render time: {} ({} sec)" + . format(util.humanReadableTimeDifference( + self.total_render_time), + self.total_render_time)) + print("Pure Cycles render time (without sync): {} ({} sec)" + . format(util.humanReadableTimeDifference( + self.render_time_no_sync), + self.render_time_no_sync)) + print("Cycles memoty usage: {} ({} peak)" + . format(self.device_memory_usage, + self.device_peak_memory)) + + def asDict(self): + return {'total_render_time': self.total_render_time, + 'render_time_no_sync': self.render_time_no_sync, + 'pipeline_render_time': self.pipeline_render_time, + 'device_peak_memory': self.device_peak_memory, + 'device_memory_usage': self.device_memory_usage} diff --git a/benchmark/foundation/system_info.py b/benchmark/foundation/system_info.py new file mode 100644 index 0000000..21f1694 --- /dev/null +++ b/benchmark/foundation/system_info.py @@ -0,0 +1,73 @@ +import json +import platform +import subprocess +import sys + +from third_party import cpuinfo + + +def _getBlenderDeviceInfo(ctx): + PREFIX = "Benchmark Devices: " + command = [ctx.blender, + "--background", + "--factory-startup", + "-noaudio", + "--enable-autoexec", + "--engine", "CYCLES", + "--python", ctx.configure_script, + "--", + "--benchmark-system-info"] + process = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + stdout, stderr = process.communicate() + lines = stdout.decode().split("\n") + for line in lines: + if line.startswith(PREFIX): + return json.loads(line[len(PREFIX):]) + return [] + + +def getBlenderVersion(ctx): + INFO = ("build_date", + "build_time", + "build_commit_date", + "build_commit_time", + "build_hash") + command = [ctx.blender, "--version"] + process = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + stdout, stderr = process.communicate() + lines = stdout.decode().split("\n") + info = {} + for line in lines: + line = line.strip() + if line.startswith("Blender "): + version = line[8:].strip() + info['version'] = version + if not line.startswith("build "): + continue + tokens = line.split(":", 1) + tokens[0] = tokens[0].replace(" ", "_") + if tokens[0] in INFO: + info[tokens[0]] = tokens[1].strip() + return info + + +def gatherSystemInfo(ctx): + system_info = {} + system_info['bitness'] = platform.architecture()[0] + system_info['machine'] = platform.machine() + system_info['system'] = platform.system() + if system_info['system'] == "Linux": + distro = platform.linux_distribution() + system_info['dist_name'] = distro[0] + system_info['dist_version'] = distro[1] + # system_info['libc_version'] = "-".join(platform.libc_ver()) + # TODO(sergey): Make this to work on Windows and macOS + cpu_info = cpuinfo.get_cpu_info() + system_info['cpu_brand'] = cpu_info['brand'] + system_info['devices'] = _getBlenderDeviceInfo(ctx) + # TODO(sergey): query number of CPUs and threads. + return system_info diff --git a/benchmark/foundation/util.py b/benchmark/foundation/util.py new file mode 100644 index 0000000..3b7a6a2 --- /dev/null +++ b/benchmark/foundation/util.py @@ -0,0 +1,143 @@ +import foundation +from foundation import progress +import os +import requests +import tarfile + + +def humanReadableTimeDifference(seconds): + """ + Convert time difference in seconds to a human readable format. + + For example, time difference of 125 seconds will be returned as 2:05 + """ + + hours = int(seconds) // 60 // 60 + seconds = seconds - hours * 60 * 60 + minutes = int(seconds) // 60 + seconds = seconds - minutes * 60 + if hours == 0: + return "%02d:%05.2f" % (minutes, seconds) + else: + return "%02d:%02d:%05.2f" % (hours, minutes, seconds) + + +def humanReadableTimeToSeconds(time): + """ + Convert human readable string like HH:MM:SS to seconds. + """ + + tokens = time.split(".") + result = 0 + if len(tokens) == 2: + result = float("0." + tokens[1]) + mult = 1 + for token in reversed(tokens[0].split(":")): + result += int(token) * mult + mult *= 60 + return result + + +def queryMainScene(filepath, callbacks): + """ + Return the equivalent to bpy.context.scene + """ + + from blendfile import blendfile + + with blendfile.open_blend(filepath) as blend: + # There is no bpy.context.scene, we get it from the main window + window_manager = [block for block in blend.blocks + if block.code == b'WM'][0] + window = window_manager.get_pointer(b'winactive') + screen = window.get_pointer(b'screen') + scene = screen.get_pointer(b'scene') + + output = [] + for callback in callbacks: + output.append(callback(scene)) + return output + + +def queryCurrentFrame(filepath): + """ + Get frame number to render. + """ + + def get_cfra(scene): + return scene.get((b'r', b'cfra')) + cfra, = queryMainScene(filepath, [get_cfra]) + return cfra + + +def humanReadableSizeToMegabytes(size): + if size[-1] == 'K': + return float(size[:-1]) / 1024 + elif size[-1] == 'M': + return float(size[:-1]) + else: + return float(size) + + +def humanReadableSize(size): + return "{} Mb" . format(size) + + +def downloadFile(url, filename): + """ + Download file form given UTR and save it to filename + """ + r = requests.get(url, stream=True) + downloaded_size = 0 + total_size = 0 + if 'Content-length' in r.headers: + total_size = r.headers['Content-length'] + with open(filename, 'wb') as f: + for chunk in r.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + downloaded_size += len(chunk) + if total_size != 0: + progress.progress(downloaded_size, total_size) + if total_size != 0: + progress.progressClear() + + +def unpackArchive(filename, directory): + """ + Unpack archive to given + """ + if filename.endswith(".tar.bz2"): + tar = tarfile.open(name=filename, mode="r:bz2") + tar.extractall(directory) + else: + # TODO(sergey): Need to support more archive types. + pass + + +def stripSensitiveInfo(s): + """ + Strip any possibly sensitive information we want to avoid from the logs. + """ + root_dir = getBundleRootDirectory() + s = s.replace(root_dir, "") + return s + +######################################## +# Directories manipulation +######################################## + + +def getBundleRootDirectory(): + """ + Get fuill file path to the root directory of benchmark bundle. + """ + script_directory = os.path.dirname(os.path.realpath(__file__)) + return os.path.dirname(os.path.dirname(script_directory)) + + +def getGlobalConfigDirectory(): + """ + Get configuration directory global for the all components of this bundle. + """ + return os.path.join(getBundleRootDirectory(), "config") diff --git a/benchmark/main.py b/benchmark/main.py new file mode 100755 index 0000000..2a616f6 --- /dev/null +++ b/benchmark/main.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 + +import argparse +import foundation +from foundation import (benchrunner, + context, + logger, + system_info, + util) + + +def configureArgumentParser(): + parser = argparse.ArgumentParser( + description="Cycles benchmark helper script.") + parser.add_argument("-b", "--blender", + help="Full file path to Blender's binary " + + "to use for rendering", + default="blender") + parser.add_argument("-d", "--scenes-dir", + help="Directory with scenes", + default="") + parser.add_argument('-s', '--scenes', + nargs='+', + help='Scenes to be rendered', + default=[]) + parser.add_argument('-c', '--configure-script', + help="Blender-side configuration script", + default="configure.py") + parser.add_argument('-t', '--device-type', + help="Type of the device to render on", + default="CPU") + parser.add_argument('-n', '--device-name', + help="Device name to render on", + default="") + parser.add_argument('-f', '--full-dump', + help="Dump all available in formation", + action='store_true', + default=False) + parser.add_argument('-j', '--json', + help="When in full dump mode, dump JSON", + action='store_true', + default=False) + return parser + + +def _printFullResult(ctx, results): + print("") + print("=" * 40) + # Print system information. + sys_info = results['system_info'] + print("System info:") + print(" System: {} {}" . format(sys_info['system'], + sys_info['bitness'])) + if sys_info['system'] == "Linux": + print(" Linux distro: {}, {}" . format(sys_info['dist_name'], + sys_info['dist_version'])) + print(" CPU: {}" . format(sys_info['cpu_brand'])) + devices = sys_info['devices'] + if devices: + print(" Compute devices:") + for device in devices: + print(" - {}: {}" . format(device['type'], device['name'])) + # Print Blender version. + blender = results['blender_version'] + print("Blender:") + print(" Version: {}" . format(blender['version'])) + print(" Hash: {}" . format(blender['build_hash'])) + print(" Commit: {} {}" . format(blender['build_commit_date'], + blender['build_commit_time'])) + print(" Build: {} {}" . format(blender['build_date'], + blender['build_time'])) + # Print scenes status. + print("Nenchmark results:") + stats = results['stats'] + for scene in ctx.scenes: + if scene not in stats: + continue + stats = stats[scene] + print(" {}:" . format(scene)) + print(" - Engine render time: {}" . format( + util.humanReadableTimeDifference( + stats.total_render_time))) + print(" - Render time without sync: {}" . format( + util.humanReadableTimeDifference( + stats.render_time_no_sync))) + print(" - Total render time: {}" . format( + util.humanReadableTimeDifference( + stats.pipeline_render_time))) + print(" - Peak memory used on device: {}" . format( + util.humanReadableSize(stats.device_peak_memory))) + print(" - Memory used on device during rendering: {}" . format( + util.humanReadableSize(stats.device_memory_usage))) + + +def _printFullJSONResult(ctx, results): + import json + # Convert custom classes to dictionaries for easier JSON dump. + json_results = results + stats = json_results['stats'] + for scene in ctx.scenes: + if scene not in stats: + continue + stats[scene] = stats[scene].asDict() + print(json.dumps(json_results, sort_keys=True, indent=4)) + + +def _printBriefResult(ctx, results): + print("") + print("=" * 40) + stats = results['stats'] + for scene in ctx.scenes: + if scene not in stats: + continue + stats = stats[scene] + print("{}: {}" . format( + scene, + util.humanReadableTimeDifference( + stats.pipeline_render_time))) + + +def main(): + parser = configureArgumentParser() + args = parser.parse_args() + logger.init() + logger.HEADER("Cycles Benchmark Suite v{}" . format(foundation.VERSION)) + # Configure context. + ctx = context.Context() + ctx.blender = args.blender + ctx.scenes_dir = args.scenes_dir + ctx.configure_script = args.configure_script + ctx.device_type = args.device_type + ctx.device_name = args.device_name + if args.scenes: + ctx.scenes = args.scenes + else: + ctx.scenes = ctx.listAllScenes(args.scenes_dir) + logger.INFO("Requested device details:") + benchrunner.benchmarkPrintDeviceInfo(ctx) + # Run benchmark. + all_stats = benchrunner.benchmarkAll(ctx) + # Gather all information together. + result = { + "blender_version": system_info.getBlenderVersion(ctx), + "system_info": system_info.gatherSystemInfo(ctx), + "stats": all_stats if all_stats else {} + } + if args.full_dump: + if args.json: + _printFullJSONResult(ctx, result) + else: + _printFullResult(ctx, result) + else: + _printBriefResult(ctx, result) + + +if __name__ == "__main__": + main() diff --git a/benchmark/third_party/cpuinfo.py b/benchmark/third_party/cpuinfo.py new file mode 100644 index 0000000..49f4d7d --- /dev/null +++ b/benchmark/third_party/cpuinfo.py @@ -0,0 +1,1875 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2014-2017, Matthew Brennan Jones +# Py-cpuinfo gets CPU info with pure Python 2 & 3 +# It uses the MIT License +# It is hosted at: https://github.com/workhorsy/py-cpuinfo +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +CPUINFO_VERSION = (3, 3, 0) + +import os, sys +import glob +import re +import time +import platform +import multiprocessing +import ctypes +import pickle +import base64 +import subprocess + +try: + import _winreg as winreg +except ImportError as err: + try: + import winreg + except ImportError as err: + pass + +PY2 = sys.version_info[0] == 2 + + +class DataSource(object): + bits = platform.architecture()[0] + cpu_count = multiprocessing.cpu_count() + is_windows = platform.system().lower() == 'windows' + raw_arch_string = platform.machine() + can_cpuid = True + + @staticmethod + def has_proc_cpuinfo(): + return os.path.exists('/proc/cpuinfo') + + @staticmethod + def has_dmesg(): + return len(program_paths('dmesg')) > 0 + + @staticmethod + def has_var_run_dmesg_boot(): + return os.path.exists('/var/run/dmesg.boot') + + @staticmethod + def has_cpufreq_info(): + return len(program_paths('cpufreq-info')) > 0 + + @staticmethod + def has_sestatus(): + return len(program_paths('sestatus')) > 0 + + @staticmethod + def has_sysctl(): + return len(program_paths('sysctl')) > 0 + + @staticmethod + def has_isainfo(): + return len(program_paths('isainfo')) > 0 + + @staticmethod + def has_kstat(): + return len(program_paths('kstat')) > 0 + + @staticmethod + def has_sysinfo(): + return len(program_paths('sysinfo')) > 0 + + @staticmethod + def has_lscpu(): + return len(program_paths('lscpu')) > 0 + + @staticmethod + def has_ibm_pa_features(): + return len(program_paths('lsprop')) > 0 + + @staticmethod + def cat_proc_cpuinfo(): + return run_and_get_stdout(['cat', '/proc/cpuinfo']) + + @staticmethod + def cpufreq_info(): + return run_and_get_stdout(['cpufreq-info']) + + @staticmethod + def sestatus_allow_execheap(): + return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execheap"'])[1].strip().lower().endswith('on') + + @staticmethod + def sestatus_allow_execmem(): + return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execmem"'])[1].strip().lower().endswith('on') + + @staticmethod + def dmesg_a(): + return run_and_get_stdout(['dmesg', '-a']) + + @staticmethod + def cat_var_run_dmesg_boot(): + return run_and_get_stdout(['cat', '/var/run/dmesg.boot']) + + @staticmethod + def sysctl_machdep_cpu_hw_cpufrequency(): + return run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency']) + + @staticmethod + def isainfo_vb(): + return run_and_get_stdout(['isainfo', '-vb']) + + @staticmethod + def kstat_m_cpu_info(): + return run_and_get_stdout(['kstat', '-m', 'cpu_info']) + + @staticmethod + def sysinfo_cpu(): + return run_and_get_stdout(['sysinfo', '-cpu']) + + @staticmethod + def lscpu(): + return run_and_get_stdout(['lscpu']) + + @staticmethod + def ibm_pa_features(): + ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features') + if ibm_features: + return run_and_get_stdout(['lsprop', ibm_features[0]]) + + @staticmethod + def winreg_processor_brand(): + key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0") + processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0] + winreg.CloseKey(key) + return processor_brand + + @staticmethod + def winreg_vendor_id(): + key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0") + vendor_id = winreg.QueryValueEx(key, "VendorIdentifier")[0] + winreg.CloseKey(key) + return vendor_id + + @staticmethod + def winreg_raw_arch_string(): + key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment") + raw_arch_string = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0] + winreg.CloseKey(key) + return raw_arch_string + + @staticmethod + def winreg_hz_actual(): + key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0") + hz_actual = winreg.QueryValueEx(key, "~Mhz")[0] + winreg.CloseKey(key) + hz_actual = to_hz_string(hz_actual) + return hz_actual + + @staticmethod + def winreg_feature_bits(): + key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0") + feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0] + winreg.CloseKey(key) + return feature_bits + +def obj_to_b64(thing): + a = thing + b = pickle.dumps(a) + c = base64.b64encode(b) + d = c.decode('utf8') + return d + +def b64_to_obj(thing): + try: + a = base64.b64decode(thing) + b = pickle.loads(a) + return b + except: + return {} + +def run_and_get_stdout(command, pipe_command=None): + if not pipe_command: + p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + output = p1.communicate()[0] + if not PY2: + output = output.decode(encoding='UTF-8') + return p1.returncode, output + else: + p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + p2 = subprocess.Popen(pipe_command, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p1.stdout.close() + output = p2.communicate()[0] + if not PY2: + output = output.decode(encoding='UTF-8') + return p2.returncode, output + + +def program_paths(program_name): + paths = [] + exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep)) + path = os.environ['PATH'] + for p in os.environ['PATH'].split(os.pathsep): + p = os.path.join(p, program_name) + if os.access(p, os.X_OK): + paths.append(p) + for e in exts: + pext = p + e + if os.access(pext, os.X_OK): + paths.append(pext) + return paths + +def _get_field_actual(cant_be_number, raw_string, field_names): + for line in raw_string.splitlines(): + for field_name in field_names: + field_name = field_name.lower() + if ':' in line: + left, right = line.split(':', 1) + left = left.strip().lower() + right = right.strip() + if left == field_name and len(right) > 0: + if cant_be_number: + if not right.isdigit(): + return right + else: + return right + + return None + +def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names): + retval = _get_field_actual(cant_be_number, raw_string, field_names) + + # Convert the return value + if retval and convert_to: + try: + retval = convert_to(retval) + except: + retval = default_value + + # Return the default if there is no return value + if retval is None: + retval = default_value + + return retval + +def _get_hz_string_from_brand(processor_brand): + # Just return 0 if the processor brand does not have the Hz + if not 'hz' in processor_brand.lower(): + return (1, '0.0') + + hz_brand = processor_brand.lower() + scale = 1 + + if hz_brand.endswith('mhz'): + scale = 6 + elif hz_brand.endswith('ghz'): + scale = 9 + if '@' in hz_brand: + hz_brand = hz_brand.split('@')[1] + else: + hz_brand = hz_brand.rsplit(None, 1)[1] + + hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip() + hz_brand = to_hz_string(hz_brand) + + return (scale, hz_brand) + +def to_friendly_hz(ticks, scale): + # Get the raw Hz as a string + left, right = to_raw_hz(ticks, scale) + ticks = '{0}.{1}'.format(left, right) + + # Get the location of the dot, and remove said dot + dot_index = ticks.index('.') + ticks = ticks.replace('.', '') + + # Get the Hz symbol and scale + symbol = "Hz" + scale = 0 + if dot_index > 9: + symbol = "GHz" + scale = 9 + elif dot_index > 6: + symbol = "MHz" + scale = 6 + elif dot_index > 3: + symbol = "KHz" + scale = 3 + + # Get the Hz with the dot at the new scaled point + ticks = '{0}.{1}'.format(ticks[:-scale-1], ticks[-scale-1:]) + + # Format the ticks to have 4 numbers after the decimal + # and remove any superfluous zeroes. + ticks = '{0:.4f} {1}'.format(float(ticks), symbol) + ticks = ticks.rstrip('0') + + return ticks + +def to_raw_hz(ticks, scale): + # Scale the numbers + ticks = ticks.lstrip('0') + old_index = ticks.index('.') + ticks = ticks.replace('.', '') + ticks = ticks.ljust(scale + old_index+1, '0') + new_index = old_index + scale + ticks = '{0}.{1}'.format(ticks[:new_index], ticks[new_index:]) + left, right = ticks.split('.') + left, right = int(left), int(right) + return (left, right) + +def to_hz_string(ticks): + # Convert to string + ticks = '{0}'.format(ticks) + + # Add decimal if missing + if '.' not in ticks: + ticks = '{0}.0'.format(ticks) + + # Remove trailing zeros + ticks = ticks.rstrip('0') + + # Add one trailing zero for empty right side + if ticks.endswith('.'): + ticks = '{0}0'.format(ticks) + + return ticks + +def to_friendly_bytes(input): + if not input: + return input + + formats = { + r"^[0-9]+B$" : 'B', + r"^[0-9]+K$" : 'KB', + r"^[0-9]+M$" : 'MB', + r"^[0-9]+G$" : 'GB' + } + + for pattern, friendly_size in formats.items(): + if re.match(pattern, input): + return "{0} {1}".format(input[ : -1].strip(), friendly_size) + + return input + +def _parse_cpu_string(cpu_string): + # Get location of fields at end of string + fields_index = cpu_string.find('(', cpu_string.find('@')) + #print(fields_index) + + # Processor Brand + processor_brand = cpu_string + if fields_index != -1: + processor_brand = cpu_string[0 : fields_index].strip() + #print('processor_brand: ', processor_brand) + + fields = None + if fields_index != -1: + fields = cpu_string[fields_index : ] + #print('fields: ', fields) + + # Hz + scale, hz_brand = _get_hz_string_from_brand(processor_brand) + + # Various fields + vendor_id, stepping, model, family = (None, None, None, None) + if fields: + try: + fields = fields.rsplit('(', 1)[1].split(')')[0].split(',') + fields = [f.strip().lower() for f in fields] + fields = [f.split(':') for f in fields] + fields = [{f[0].strip() : f[1].strip()} for f in fields] + #print('fields: ', fields) + for field in fields: + name = list(field.keys())[0] + value = list(field.values())[0] + #print('name:{0}, value:{1}'.format(name, value)) + if name == 'origin': + vendor_id = value.strip('"') + elif name == 'stepping': + stepping = int(value.lstrip('0x'), 16) + elif name == 'model': + model = int(value.lstrip('0x'), 16) + elif name in ['fam', 'family']: + family = int(value.lstrip('0x'), 16) + except: + #raise + pass + + return (processor_brand, hz_brand, scale, vendor_id, stepping, model, family) + +def _parse_dmesg_output(output): + try: + # Get all the dmesg lines that might contain a CPU string + lines = output.split(' CPU0:')[1:] + \ + output.split(' CPU1:')[1:] + \ + output.split(' CPU:')[1:] + \ + output.split('\nCPU0:')[1:] + \ + output.split('\nCPU1:')[1:] + \ + output.split('\nCPU:')[1:] + lines = [l.split('\n')[0].strip() for l in lines] + + # Convert the lines to CPU strings + cpu_strings = [_parse_cpu_string(l) for l in lines] + + # Find the CPU string that has the most fields + best_string = None + highest_count = 0 + for cpu_string in cpu_strings: + count = sum([n is not None for n in cpu_string]) + if count > highest_count: + highest_count = count + best_string = cpu_string + + # If no CPU string was found, return {} + if not best_string: + return {} + + processor_brand, hz_actual, scale, vendor_id, stepping, model, family = best_string + + # Origin + if ' Origin=' in output: + fields = output[output.find(' Origin=') : ].split('\n')[0] + fields = fields.strip().split() + fields = [n.strip().split('=') for n in fields] + fields = [{n[0].strip().lower() : n[1].strip()} for n in fields] + #print('fields: ', fields) + for field in fields: + name = list(field.keys())[0] + value = list(field.values())[0] + #print('name:{0}, value:{1}'.format(name, value)) + if name == 'origin': + vendor_id = value.strip('"') + elif name == 'stepping': + stepping = int(value.lstrip('0x'), 16) + elif name == 'model': + model = int(value.lstrip('0x'), 16) + elif name in ['fam', 'family']: + family = int(value.lstrip('0x'), 16) + #print('FIELDS: ', (vendor_id, stepping, model, family)) + + # Features + flag_lines = [] + for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']: + if category in output: + flag_lines.append(output.split(category)[1].split('\n')[0]) + + flags = [] + for line in flag_lines: + line = line.split('<')[1].split('>')[0].lower() + for flag in line.split(','): + flags.append(flag) + flags.sort() + + # Convert from GHz/MHz string to Hz + scale, hz_advertised = _get_hz_string_from_brand(processor_brand) + + info = { + 'vendor_id' : vendor_id, + 'brand' : processor_brand, + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + if hz_advertised and hz_advertised != '0.0': + info['hz_advertised'] = to_friendly_hz(hz_advertised, scale) + info['hz_actual'] = to_friendly_hz(hz_actual, scale) + + if hz_advertised and hz_advertised != '0.0': + info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale) + info['hz_actual_raw'] = to_raw_hz(hz_actual, scale) + + return {k: v for k, v in info.items() if v} + except: + #raise + pass + + return {} + +def parse_arch(raw_arch_string): + arch, bits = None, None + raw_arch_string = raw_arch_string.lower() + + # X86 + if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', raw_arch_string): + arch = 'X86_32' + bits = 32 + elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', raw_arch_string): + arch = 'X86_64' + bits = 64 + # ARM + elif re.match('^armv8-a|aarch64$', raw_arch_string): + arch = 'ARM_8' + bits = 64 + elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', raw_arch_string): + arch = 'ARM_7' + bits = 32 + elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', raw_arch_string): + arch = 'ARM_8' + bits = 32 + # PPC + elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', raw_arch_string): + arch = 'PPC_32' + bits = 32 + elif re.match('^powerpc$|^ppc64$|^ppc64le$', raw_arch_string): + arch = 'PPC_64' + bits = 64 + # SPARC + elif re.match('^sparc32$|^sparc$', raw_arch_string): + arch = 'SPARC_32' + bits = 32 + elif re.match('^sparc64$|^sun4u$|^sun4v$', raw_arch_string): + arch = 'SPARC_64' + bits = 64 + + return (arch, bits) + +def is_bit_set(reg, bit): + mask = 1 << bit + is_set = reg & mask > 0 + return is_set + + +class CPUID(object): + def __init__(self): + # Figure out if SE Linux is on and in enforcing mode + self.is_selinux_enforcing = False + + # Just return if the SE Linux Status Tool is not installed + if not DataSource.has_sestatus(): + return + + # Figure out if we can execute heap and execute memory + can_selinux_exec_heap = DataSource.sestatus_allow_execheap() + can_selinux_exec_memory = DataSource.sestatus_allow_execmem() + self.is_selinux_enforcing = (not can_selinux_exec_heap or not can_selinux_exec_memory) + + def _asm_func(self, restype=None, argtypes=(), byte_code=[]): + byte_code = bytes.join(b'', byte_code) + address = None + + if DataSource.is_windows: + # Allocate a memory segment the size of the byte code, and make it executable + size = len(byte_code) + MEM_COMMIT = ctypes.c_ulong(0x1000) + PAGE_EXECUTE_READWRITE = ctypes.c_ulong(0x40) + address = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0), ctypes.c_size_t(size), MEM_COMMIT, PAGE_EXECUTE_READWRITE) + if not address: + raise Exception("Failed to VirtualAlloc") + + # Copy the byte code into the memory segment + memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr) + if memmove(address, byte_code, size) < 0: + raise Exception("Failed to memmove") + else: + # Allocate a memory segment the size of the byte code + size = len(byte_code) + address = ctypes.pythonapi.valloc(size) + if not address: + raise Exception("Failed to valloc") + + # Mark the memory segment as writeable only + if not self.is_selinux_enforcing: + WRITE = 0x2 + if ctypes.pythonapi.mprotect(address, size, WRITE) < 0: + raise Exception("Failed to mprotect") + + # Copy the byte code into the memory segment + if ctypes.pythonapi.memmove(address, byte_code, size) < 0: + raise Exception("Failed to memmove") + + # Mark the memory segment as writeable and executable only + if not self.is_selinux_enforcing: + WRITE_EXECUTE = 0x2 | 0x4 + if ctypes.pythonapi.mprotect(address, size, WRITE_EXECUTE) < 0: + raise Exception("Failed to mprotect") + + # Cast the memory segment into a function + functype = ctypes.CFUNCTYPE(restype, *argtypes) + fun = functype(address) + return fun, address + + def _run_asm(self, *byte_code): + # Convert the byte code into a function that returns an int + restype = None + if DataSource.bits == '64bit': + restype = ctypes.c_uint64 + else: + restype = ctypes.c_uint32 + argtypes = () + func, address = self._asm_func(restype, argtypes, byte_code) + + # Call the byte code like a function + retval = func() + + size = ctypes.c_size_t(len(byte_code)) + + # Free the function memory segment + if DataSource.is_windows: + MEM_RELEASE = ctypes.c_ulong(0x8000) + ctypes.windll.kernel32.VirtualFree(address, size, MEM_RELEASE) + else: + # Remove the executable tag on the memory + READ_WRITE = 0x1 | 0x2 + if ctypes.pythonapi.mprotect(address, size, READ_WRITE) < 0: + raise Exception("Failed to mprotect") + + ctypes.pythonapi.free(address) + + return retval + + # FIXME: We should not have to use different instructions to + # set eax to 0 or 1, on 32bit and 64bit machines. + def _zero_eax(self): + if DataSource.bits == '64bit': + return ( + b"\x66\xB8\x00\x00" # mov eax,0x0" + ) + else: + return ( + b"\x31\xC0" # xor ax,ax + ) + + def _one_eax(self): + if DataSource.bits == '64bit': + return ( + b"\x66\xB8\x01\x00" # mov eax,0x1" + ) + else: + return ( + b"\x31\xC0" # xor ax,ax + b"\x40" # inc ax + ) + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID + def get_vendor_id(self): + # EBX + ebx = self._run_asm( + self._zero_eax(), + b"\x0F\xA2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + self._zero_eax(), + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # EDX + edx = self._run_asm( + self._zero_eax(), + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # Each 4bits is a ascii letter in the name + vendor_id = [] + for reg in [ebx, edx, ecx]: + for n in [0, 8, 16, 24]: + vendor_id.append(chr((reg >> n) & 0xFF)) + vendor_id = ''.join(vendor_id) + + return vendor_id + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits + def get_info(self): + # EAX + eax = self._run_asm( + self._one_eax(), + b"\x0f\xa2" # cpuid + b"\xC3" # ret + ) + + # Get the CPU info + stepping = (eax >> 0) & 0xF # 4 bits + model = (eax >> 4) & 0xF # 4 bits + family = (eax >> 8) & 0xF # 4 bits + processor_type = (eax >> 12) & 0x3 # 2 bits + extended_model = (eax >> 16) & 0xF # 4 bits + extended_family = (eax >> 20) & 0xFF # 8 bits + + return { + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'processor_type' : processor_type, + 'extended_model' : extended_model, + 'extended_family' : extended_family + } + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported + def get_max_extension_support(self): + # Check for extension support + max_extension_support = self._run_asm( + b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000 + b"\x0f\xa2" # cpuid + b"\xC3" # ret + ) + + return max_extension_support + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits + def get_flags(self, max_extension_support): + # EDX + edx = self._run_asm( + self._one_eax(), + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + self._one_eax(), + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # Get the CPU flags + flags = { + 'fpu' : is_bit_set(edx, 0), + 'vme' : is_bit_set(edx, 1), + 'de' : is_bit_set(edx, 2), + 'pse' : is_bit_set(edx, 3), + 'tsc' : is_bit_set(edx, 4), + 'msr' : is_bit_set(edx, 5), + 'pae' : is_bit_set(edx, 6), + 'mce' : is_bit_set(edx, 7), + 'cx8' : is_bit_set(edx, 8), + 'apic' : is_bit_set(edx, 9), + #'reserved1' : is_bit_set(edx, 10), + 'sep' : is_bit_set(edx, 11), + 'mtrr' : is_bit_set(edx, 12), + 'pge' : is_bit_set(edx, 13), + 'mca' : is_bit_set(edx, 14), + 'cmov' : is_bit_set(edx, 15), + 'pat' : is_bit_set(edx, 16), + 'pse36' : is_bit_set(edx, 17), + 'pn' : is_bit_set(edx, 18), + 'clflush' : is_bit_set(edx, 19), + #'reserved2' : is_bit_set(edx, 20), + 'dts' : is_bit_set(edx, 21), + 'acpi' : is_bit_set(edx, 22), + 'mmx' : is_bit_set(edx, 23), + 'fxsr' : is_bit_set(edx, 24), + 'sse' : is_bit_set(edx, 25), + 'sse2' : is_bit_set(edx, 26), + 'ss' : is_bit_set(edx, 27), + 'ht' : is_bit_set(edx, 28), + 'tm' : is_bit_set(edx, 29), + 'ia64' : is_bit_set(edx, 30), + 'pbe' : is_bit_set(edx, 31), + + 'pni' : is_bit_set(ecx, 0), + 'pclmulqdq' : is_bit_set(ecx, 1), + 'dtes64' : is_bit_set(ecx, 2), + 'monitor' : is_bit_set(ecx, 3), + 'ds_cpl' : is_bit_set(ecx, 4), + 'vmx' : is_bit_set(ecx, 5), + 'smx' : is_bit_set(ecx, 6), + 'est' : is_bit_set(ecx, 7), + 'tm2' : is_bit_set(ecx, 8), + 'ssse3' : is_bit_set(ecx, 9), + 'cid' : is_bit_set(ecx, 10), + #'reserved3' : is_bit_set(ecx, 11), + 'fma' : is_bit_set(ecx, 12), + 'cx16' : is_bit_set(ecx, 13), + 'xtpr' : is_bit_set(ecx, 14), + 'pdcm' : is_bit_set(ecx, 15), + #'reserved4' : is_bit_set(ecx, 16), + 'pcid' : is_bit_set(ecx, 17), + 'dca' : is_bit_set(ecx, 18), + 'sse4_1' : is_bit_set(ecx, 19), + 'sse4_2' : is_bit_set(ecx, 20), + 'x2apic' : is_bit_set(ecx, 21), + 'movbe' : is_bit_set(ecx, 22), + 'popcnt' : is_bit_set(ecx, 23), + 'tscdeadline' : is_bit_set(ecx, 24), + 'aes' : is_bit_set(ecx, 25), + 'xsave' : is_bit_set(ecx, 26), + 'osxsave' : is_bit_set(ecx, 27), + 'avx' : is_bit_set(ecx, 28), + 'f16c' : is_bit_set(ecx, 29), + 'rdrnd' : is_bit_set(ecx, 30), + 'hypervisor' : is_bit_set(ecx, 31) + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + + # Get the Extended CPU flags + extended_flags = {} + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features + if max_extension_support == 7: + pass + # FIXME: Are we missing all these flags too? + # avx2 et cetera ... + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits + if max_extension_support >= 0x80000001: + # EBX # FIXME: This may need to be EDX instead + ebx = self._run_asm( + b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001 + b"\x0f\xa2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001 + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # Get the extended CPU flags + extended_flags = { + 'fpu' : is_bit_set(ebx, 0), + 'vme' : is_bit_set(ebx, 1), + 'de' : is_bit_set(ebx, 2), + 'pse' : is_bit_set(ebx, 3), + 'tsc' : is_bit_set(ebx, 4), + 'msr' : is_bit_set(ebx, 5), + 'pae' : is_bit_set(ebx, 6), + 'mce' : is_bit_set(ebx, 7), + 'cx8' : is_bit_set(ebx, 8), + 'apic' : is_bit_set(ebx, 9), + #'reserved' : is_bit_set(ebx, 10), + 'syscall' : is_bit_set(ebx, 11), + 'mtrr' : is_bit_set(ebx, 12), + 'pge' : is_bit_set(ebx, 13), + 'mca' : is_bit_set(ebx, 14), + 'cmov' : is_bit_set(ebx, 15), + 'pat' : is_bit_set(ebx, 16), + 'pse36' : is_bit_set(ebx, 17), + #'reserved' : is_bit_set(ebx, 18), + 'mp' : is_bit_set(ebx, 19), + 'nx' : is_bit_set(ebx, 20), + #'reserved' : is_bit_set(ebx, 21), + 'mmxext' : is_bit_set(ebx, 22), + 'mmx' : is_bit_set(ebx, 23), + 'fxsr' : is_bit_set(ebx, 24), + 'fxsr_opt' : is_bit_set(ebx, 25), + 'pdpe1gp' : is_bit_set(ebx, 26), + 'rdtscp' : is_bit_set(ebx, 27), + #'reserved' : is_bit_set(ebx, 28), + 'lm' : is_bit_set(ebx, 29), + '3dnowext' : is_bit_set(ebx, 30), + '3dnow' : is_bit_set(ebx, 31), + + 'lahf_lm' : is_bit_set(ecx, 0), + 'cmp_legacy' : is_bit_set(ecx, 1), + 'svm' : is_bit_set(ecx, 2), + 'extapic' : is_bit_set(ecx, 3), + 'cr8_legacy' : is_bit_set(ecx, 4), + 'abm' : is_bit_set(ecx, 5), + 'sse4a' : is_bit_set(ecx, 6), + 'misalignsse' : is_bit_set(ecx, 7), + '3dnowprefetch' : is_bit_set(ecx, 8), + 'osvw' : is_bit_set(ecx, 9), + 'ibs' : is_bit_set(ecx, 10), + 'xop' : is_bit_set(ecx, 11), + 'skinit' : is_bit_set(ecx, 12), + 'wdt' : is_bit_set(ecx, 13), + #'reserved' : is_bit_set(ecx, 14), + 'lwp' : is_bit_set(ecx, 15), + 'fma4' : is_bit_set(ecx, 16), + 'tce' : is_bit_set(ecx, 17), + #'reserved' : is_bit_set(ecx, 18), + 'nodeid_msr' : is_bit_set(ecx, 19), + #'reserved' : is_bit_set(ecx, 20), + 'tbm' : is_bit_set(ecx, 21), + 'topoext' : is_bit_set(ecx, 22), + 'perfctr_core' : is_bit_set(ecx, 23), + 'perfctr_nb' : is_bit_set(ecx, 24), + #'reserved' : is_bit_set(ecx, 25), + 'dbx' : is_bit_set(ecx, 26), + 'perftsc' : is_bit_set(ecx, 27), + 'pci_l2i' : is_bit_set(ecx, 28), + #'reserved' : is_bit_set(ecx, 29), + #'reserved' : is_bit_set(ecx, 30), + #'reserved' : is_bit_set(ecx, 31) + } + + # Get a list of only the flags that are true + extended_flags = [k for k, v in extended_flags.items() if v] + flags += extended_flags + + flags.sort() + return flags + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String + def get_processor_brand(self, max_extension_support): + processor_brand = "" + + # Processor brand string + if max_extension_support >= 0x80000004: + instructions = [ + b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002 + b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003 + b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004 + ] + for instruction in instructions: + # EAX + eax = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xC0" # mov ax,ax + b"\xC3" # ret + ) + + # EBX + ebx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xD8" # mov ax,bx + b"\xC3" # ret + ) + + # ECX + ecx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + # EDX + edx = self._run_asm( + instruction, # mov ax,0x8000000? + b"\x0f\xa2" # cpuid + b"\x89\xD0" # mov ax,dx + b"\xC3" # ret + ) + + # Combine each of the 4 bytes in each register into the string + for reg in [eax, ebx, ecx, edx]: + for n in [0, 8, 16, 24]: + processor_brand += chr((reg >> n) & 0xFF) + + # Strip off any trailing NULL terminators and white space + processor_brand = processor_brand.strip("\0").strip() + + return processor_brand + + # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features + def get_cache(self, max_extension_support): + cache_info = {} + + # Just return if the cache feature is not supported + if max_extension_support < 0x80000006: + return cache_info + + # ECX + ecx = self._run_asm( + b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006 + b"\x0f\xa2" # cpuid + b"\x89\xC8" # mov ax,cx + b"\xC3" # ret + ) + + cache_info = { + 'size_kb' : ecx & 0xFF, + 'line_size_b' : (ecx >> 12) & 0xF, + 'associativity' : (ecx >> 16) & 0xFFFF + } + + return cache_info + + def get_ticks(self): + retval = None + + if DataSource.bits == '32bit': + # Works on x86_32 + restype = None + argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)) + get_ticks_x86_32, address = self._asm_func(restype, argtypes, + [ + b"\x55", # push bp + b"\x89\xE5", # mov bp,sp + b"\x31\xC0", # xor ax,ax + b"\x0F\xA2", # cpuid + b"\x0F\x31", # rdtsc + b"\x8B\x5D\x08", # mov bx,[di+0x8] + b"\x8B\x4D\x0C", # mov cx,[di+0xc] + b"\x89\x13", # mov [bp+di],dx + b"\x89\x01", # mov [bx+di],ax + b"\x5D", # pop bp + b"\xC3" # ret + ] + ) + + high = ctypes.c_uint32(0) + low = ctypes.c_uint32(0) + + get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low)) + retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value + elif DataSource.bits == '64bit': + # Works on x86_64 + restype = ctypes.c_uint64 + argtypes = () + get_ticks_x86_64, address = self._asm_func(restype, argtypes, + [ + b"\x48", # dec ax + b"\x31\xC0", # xor ax,ax + b"\x0F\xA2", # cpuid + b"\x0F\x31", # rdtsc + b"\x48", # dec ax + b"\xC1\xE2\x20", # shl dx,byte 0x20 + b"\x48", # dec ax + b"\x09\xD0", # or ax,dx + b"\xC3", # ret + ] + ) + retval = get_ticks_x86_64() + + return retval + + def get_raw_hz(self): + start = self.get_ticks() + + time.sleep(1) + + end = self.get_ticks() + + ticks = (end - start) + + return ticks + +def actual_get_cpu_info_from_cpuid(): + ''' + Warning! This function has the potential to crash the Python runtime. + Do not call it directly. Use the _get_cpu_info_from_cpuid function instead. + It will safely call this function in another process. + ''' + # Get the CPU arch and bits + arch, bits = parse_arch(DataSource.raw_arch_string) + + # Return none if this is not an X86 CPU + if not arch in ['X86_32', 'X86_64']: + return obj_to_b64({}) + + # Return none if SE Linux is in enforcing mode + cpuid = CPUID() + if cpuid.is_selinux_enforcing: + return obj_to_b64({}) + + # Get the cpu info from the CPUID register + max_extension_support = cpuid.get_max_extension_support() + cache_info = cpuid.get_cache(max_extension_support) + info = cpuid.get_info() + + processor_brand = cpuid.get_processor_brand(max_extension_support) + + # Get the Hz and scale + hz_actual = cpuid.get_raw_hz() + hz_actual = to_hz_string(hz_actual) + + # Get the Hz and scale + scale, hz_advertised = _get_hz_string_from_brand(processor_brand) + info = { + 'vendor_id' : cpuid.get_vendor_id(), + 'hardware' : '', + 'brand' : processor_brand, + + 'hz_advertised' : to_friendly_hz(hz_advertised, scale), + 'hz_actual' : to_friendly_hz(hz_actual, 6), + 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), + 'hz_actual_raw' : to_raw_hz(hz_actual, 6), + + 'l2_cache_size' : to_friendly_bytes(cache_info['size_kb']), + 'l2_cache_line_size' : cache_info['line_size_b'], + 'l2_cache_associativity' : hex(cache_info['associativity']), + + 'stepping' : info['stepping'], + 'model' : info['model'], + 'family' : info['family'], + 'processor_type' : info['processor_type'], + 'extended_model' : info['extended_model'], + 'extended_family' : info['extended_family'], + 'flags' : cpuid.get_flags(max_extension_support) + } + + info = {k: v for k, v in info.items() if v} + return obj_to_b64(info) + +def _get_cpu_info_from_cpuid(): + ''' + Returns the CPU info gathered by querying the X86 cpuid register in a new process. + Returns {} on non X86 cpus. + Returns {} if SELinux is in enforcing mode. + ''' + + # Return {} if can't cpuid + if not DataSource.can_cpuid: + return {} + + # Get the CPU arch and bits + arch, bits = parse_arch(DataSource.raw_arch_string) + + # Return {} if this is not an X86 CPU + if not arch in ['X86_32', 'X86_64']: + return {} + + returncode, output = run_and_get_stdout([sys.executable, "-c", "import cpuinfo; print(cpuinfo.actual_get_cpu_info_from_cpuid())"]) + if returncode != 0: + return {} + info = b64_to_obj(output) + return info + +def _get_cpu_info_from_proc_cpuinfo(): + ''' + Returns the CPU info gathered from /proc/cpuinfo. + Returns {} if /proc/cpuinfo is not found. + ''' + try: + # Just return {} if there is no cpuinfo + if not DataSource.has_proc_cpuinfo(): + return {} + + returncode, output = DataSource.cat_proc_cpuinfo() + if returncode != 0: + return {} + + # Various fields + vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor') + processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor') + cache_size = _get_field(False, output, None, '', 'cache size') + stepping = _get_field(False, output, int, 0, 'stepping') + model = _get_field(False, output, int, 0, 'model') + family = _get_field(False, output, int, 0, 'cpu family') + hardware = _get_field(False, output, None, '', 'Hardware') + # Flags + flags = _get_field(False, output, None, None, 'flags', 'Features') + if flags: + flags = flags.split() + flags.sort() + + # Convert from MHz string to Hz + hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock') + hz_actual = hz_actual.lower().rstrip('mhz').strip() + hz_actual = to_hz_string(hz_actual) + + # Convert from GHz/MHz string to Hz + scale, hz_advertised = (0, None) + try: + scale, hz_advertised = _get_hz_string_from_brand(processor_brand) + except Exception: + pass + + info = { + 'hardware' : hardware, + 'brand' : processor_brand, + + 'l3_cache_size' : to_friendly_bytes(cache_size), + 'flags' : flags, + 'vendor_id' : vendor_id, + 'stepping' : stepping, + 'model' : model, + 'family' : family, + } + + # Make the Hz the same for actual and advertised if missing any + if not hz_advertised or hz_advertised == '0.0': + hz_advertised = hz_actual + scale = 6 + elif not hz_actual or hz_actual == '0.0': + hz_actual = hz_advertised + + # Add the Hz if there is one + if to_raw_hz(hz_advertised, scale) > (0, 0): + info['hz_advertised'] = to_friendly_hz(hz_advertised, scale) + info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale) + if to_raw_hz(hz_actual, scale) > (0, 0): + info['hz_actual'] = to_friendly_hz(hz_actual, 6) + info['hz_actual_raw'] = to_raw_hz(hz_actual, 6) + + info = {k: v for k, v in info.items() if v} + return info + except: + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_cpufreq_info(): + ''' + Returns the CPU info gathered from cpufreq-info. + Returns {} if cpufreq-info is not found. + ''' + try: + scale, hz_brand = 1, '0.0' + + if not DataSource.has_cpufreq_info(): + return {} + + returncode, output = DataSource.cpufreq_info() + if returncode != 0: + return {} + + hz_brand = output.split('current CPU frequency is')[1].split('\n')[0] + i = hz_brand.find('Hz') + assert(i != -1) + hz_brand = hz_brand[0 : i+2].strip().lower() + + if hz_brand.endswith('mhz'): + scale = 6 + elif hz_brand.endswith('ghz'): + scale = 9 + hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip() + hz_brand = to_hz_string(hz_brand) + + info = { + 'hz_advertised' : to_friendly_hz(hz_brand, scale), + 'hz_actual' : to_friendly_hz(hz_brand, scale), + 'hz_advertised_raw' : to_raw_hz(hz_brand, scale), + 'hz_actual_raw' : to_raw_hz(hz_brand, scale), + } + + info = {k: v for k, v in info.items() if v} + return info + except: + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_lscpu(): + ''' + Returns the CPU info gathered from lscpu. + Returns {} if lscpu is not found. + ''' + try: + if not DataSource.has_lscpu(): + return {} + + returncode, output = DataSource.lscpu() + if returncode != 0: + return {} + + info = {} + + new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz') + if new_hz: + new_hz = to_hz_string(new_hz) + scale = 6 + info['hz_advertised'] = to_friendly_hz(new_hz, scale) + info['hz_actual'] = to_friendly_hz(new_hz, scale) + info['hz_advertised_raw'] = to_raw_hz(new_hz, scale) + info['hz_actual_raw'] = to_raw_hz(new_hz, scale) + + vendor_id = _get_field(False, output, None, None, 'Vendor ID') + if vendor_id: + info['vendor_id'] = vendor_id + + brand = _get_field(False, output, None, None, 'Model name') + if brand: + info['brand'] = brand + + brand = _get_field(False, output, None, None, 'Model name') + if brand: + info['brand'] = brand + + family = _get_field(False, output, None, None, 'CPU family') + if family and family.isdigit(): + info['family'] = int(family) + + stepping = _get_field(False, output, None, None, 'Stepping') + if stepping and stepping.isdigit(): + info['stepping'] = int(stepping) + + model = _get_field(False, output, None, None, 'Model') + if model and model.isdigit(): + info['model'] = int(model) + + l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache') + if l1_data_cache_size: + info['l1_data_cache_size'] = to_friendly_bytes(l1_data_cache_size) + + l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache') + if l1_instruction_cache_size: + info['l1_instruction_cache_size'] = to_friendly_bytes(l1_instruction_cache_size) + + l2_cache_size = _get_field(False, output, None, None, 'L2 cache') + if l2_cache_size: + info['l2_cache_size'] = to_friendly_bytes(l2_cache_size) + + l3_cache_size = _get_field(False, output, None, None, 'L3 cache') + if l3_cache_size: + info['l3_cache_size'] = to_friendly_bytes(l3_cache_size) + + # Flags + flags = _get_field(False, output, None, None, 'flags', 'Features') + if flags: + flags = flags.split() + flags.sort() + info['flags'] = flags + + info = {k: v for k, v in info.items() if v} + return info + except: + #raise # NOTE: To have this throw on error, uncomment this line + return {} + +def _get_cpu_info_from_dmesg(): + ''' + Returns the CPU info gathered from dmesg. + Returns {} if dmesg is not found or does not have the desired info. + ''' + # Just return {} if there is no dmesg + if not DataSource.has_dmesg(): + return {} + + # If dmesg fails return {} + returncode, output = DataSource.dmesg_a() + if output == None or returncode != 0: + return {} + + return _parse_dmesg_output(output) + + +# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf +# page 767 +def _get_cpu_info_from_ibm_pa_features(): + ''' + Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features + Returns {} if lsprop is not found or ibm,pa-features does not have the desired info. + ''' + try: + # Just return {} if there is no lsprop + if not DataSource.has_ibm_pa_features(): + return {} + + # If ibm,pa-features fails return {} + returncode, output = DataSource.ibm_pa_features() + if output == None or returncode != 0: + return {} + + # Filter out invalid characters from output + value = output.split("ibm,pa-features")[1].lower() + value = [s for s in value if s in list('0123456789abcfed')] + value = ''.join(value) + + # Get data converted to Uint32 chunks + left = int(value[0 : 8], 16) + right = int(value[8 : 16], 16) + + # Get the CPU flags + flags = { + # Byte 0 + 'mmu' : is_bit_set(left, 0), + 'fpu' : is_bit_set(left, 1), + 'slb' : is_bit_set(left, 2), + 'run' : is_bit_set(left, 3), + #'reserved' : is_bit_set(left, 4), + 'dabr' : is_bit_set(left, 5), + 'ne' : is_bit_set(left, 6), + 'wtr' : is_bit_set(left, 7), + + # Byte 1 + 'mcr' : is_bit_set(left, 8), + 'dsisr' : is_bit_set(left, 9), + 'lp' : is_bit_set(left, 10), + 'ri' : is_bit_set(left, 11), + 'dabrx' : is_bit_set(left, 12), + 'sprg3' : is_bit_set(left, 13), + 'rislb' : is_bit_set(left, 14), + 'pp' : is_bit_set(left, 15), + + # Byte 2 + 'vpm' : is_bit_set(left, 16), + 'dss_2.05' : is_bit_set(left, 17), + #'reserved' : is_bit_set(left, 18), + 'dar' : is_bit_set(left, 19), + #'reserved' : is_bit_set(left, 20), + 'ppr' : is_bit_set(left, 21), + 'dss_2.02' : is_bit_set(left, 22), + 'dss_2.06' : is_bit_set(left, 23), + + # Byte 3 + 'lsd_in_dscr' : is_bit_set(left, 24), + 'ugr_in_dscr' : is_bit_set(left, 25), + #'reserved' : is_bit_set(left, 26), + #'reserved' : is_bit_set(left, 27), + #'reserved' : is_bit_set(left, 28), + #'reserved' : is_bit_set(left, 29), + #'reserved' : is_bit_set(left, 30), + #'reserved' : is_bit_set(left, 31), + + # Byte 4 + 'sso_2.06' : is_bit_set(right, 0), + #'reserved' : is_bit_set(right, 1), + #'reserved' : is_bit_set(right, 2), + #'reserved' : is_bit_set(right, 3), + #'reserved' : is_bit_set(right, 4), + #'reserved' : is_bit_set(right, 5), + #'reserved' : is_bit_set(right, 6), + #'reserved' : is_bit_set(right, 7), + + # Byte 5 + 'le' : is_bit_set(right, 8), + 'cfar' : is_bit_set(right, 9), + 'eb' : is_bit_set(right, 10), + 'lsq_2.07' : is_bit_set(right, 11), + #'reserved' : is_bit_set(right, 12), + #'reserved' : is_bit_set(right, 13), + #'reserved' : is_bit_set(right, 14), + #'reserved' : is_bit_set(right, 15), + + # Byte 6 + 'dss_2.07' : is_bit_set(right, 16), + #'reserved' : is_bit_set(right, 17), + #'reserved' : is_bit_set(right, 18), + #'reserved' : is_bit_set(right, 19), + #'reserved' : is_bit_set(right, 20), + #'reserved' : is_bit_set(right, 21), + #'reserved' : is_bit_set(right, 22), + #'reserved' : is_bit_set(right, 23), + + # Byte 7 + #'reserved' : is_bit_set(right, 24), + #'reserved' : is_bit_set(right, 25), + #'reserved' : is_bit_set(right, 26), + #'reserved' : is_bit_set(right, 27), + #'reserved' : is_bit_set(right, 28), + #'reserved' : is_bit_set(right, 29), + #'reserved' : is_bit_set(right, 30), + #'reserved' : is_bit_set(right, 31), + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + flags.sort() + + info = { + 'flags' : flags + } + info = {k: v for k, v in info.items() if v} + + return info + except: + return {} + + +def _get_cpu_info_from_cat_var_run_dmesg_boot(): + ''' + Returns the CPU info gathered from /var/run/dmesg.boot. + Returns {} if dmesg is not found or does not have the desired info. + ''' + # Just return {} if there is no /var/run/dmesg.boot + if not DataSource.has_var_run_dmesg_boot(): + return {} + + # If dmesg.boot fails return {} + returncode, output = DataSource.cat_var_run_dmesg_boot() + if output == None or returncode != 0: + return {} + + return _parse_dmesg_output(output) + + +def _get_cpu_info_from_sysctl(): + ''' + Returns the CPU info gathered from sysctl. + Returns {} if sysctl is not found. + ''' + try: + # Just return {} if there is no sysctl + if not DataSource.has_sysctl(): + return {} + + # If sysctl fails return {} + returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency() + if output == None or returncode != 0: + return {} + + # Various fields + vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor') + processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string') + cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size') + stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping') + model = _get_field(False, output, int, 0, 'machdep.cpu.model') + family = _get_field(False, output, int, 0, 'machdep.cpu.family') + + # Flags + flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split() + flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split()) + flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split()) + flags.sort() + + # Convert from GHz/MHz string to Hz + scale, hz_advertised = _get_hz_string_from_brand(processor_brand) + hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency') + hz_actual = to_hz_string(hz_actual) + + info = { + 'vendor_id' : vendor_id, + 'brand' : processor_brand, + + 'hz_advertised' : to_friendly_hz(hz_advertised, scale), + 'hz_actual' : to_friendly_hz(hz_actual, 0), + 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), + 'hz_actual_raw' : to_raw_hz(hz_actual, 0), + + 'l2_cache_size' : to_friendly_bytes(cache_size), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = {k: v for k, v in info.items() if v} + return info + except: + return {} + +def _get_cpu_info_from_sysinfo(): + ''' + Returns the CPU info gathered from sysinfo. + Returns {} if sysinfo is not found. + ''' + try: + # Just return {} if there is no sysinfo + if not DataSource.has_sysinfo(): + return {} + + # If sysinfo fails return {} + returncode, output = DataSource.sysinfo_cpu() + if output == None or returncode != 0: + return {} + + # Various fields + vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ') + processor_brand = output.split('CPU #0: "')[1].split('"\n')[0] + cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size') + stepping = int(output.split(', stepping ')[1].split(',')[0].strip()) + model = int(output.split(', model ')[1].split(',')[0].strip()) + family = int(output.split(', family ')[1].split(',')[0].strip()) + + # Flags + flags = [] + for line in output.split('\n'): + if line.startswith('\t\t'): + for flag in line.strip().lower().split(): + flags.append(flag) + flags.sort() + + # Convert from GHz/MHz string to Hz + scale, hz_advertised = _get_hz_string_from_brand(processor_brand) + hz_actual = hz_advertised + + info = { + 'vendor_id' : vendor_id, + 'brand' : processor_brand, + + 'hz_advertised' : to_friendly_hz(hz_advertised, scale), + 'hz_actual' : to_friendly_hz(hz_actual, scale), + 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), + 'hz_actual_raw' : to_raw_hz(hz_actual, scale), + + 'l2_cache_size' : to_friendly_bytes(cache_size), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = {k: v for k, v in info.items() if v} + return info + except: + return {} + +def _get_cpu_info_from_registry(): + ''' + FIXME: Is missing many of the newer CPU flags like sse3 + Returns the CPU info gathered from the Windows Registry. + Returns {} if not on Windows. + ''' + try: + # Just return {} if not on Windows + if not DataSource.is_windows: + return {} + + # Get the CPU name + processor_brand = DataSource.winreg_processor_brand() + + # Get the CPU vendor id + vendor_id = DataSource.winreg_vendor_id() + + # Get the CPU arch and bits + raw_arch_string = DataSource.winreg_raw_arch_string() + arch, bits = parse_arch(raw_arch_string) + + # Get the actual CPU Hz + hz_actual = DataSource.winreg_hz_actual() + hz_actual = to_hz_string(hz_actual) + + # Get the advertised CPU Hz + scale, hz_advertised = _get_hz_string_from_brand(processor_brand) + + # Get the CPU features + feature_bits = DataSource.winreg_feature_bits() + + def is_set(bit): + mask = 0x80000000 >> bit + retval = mask & feature_bits > 0 + return retval + + # http://en.wikipedia.org/wiki/CPUID + # http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean + # http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm + flags = { + 'fpu' : is_set(0), # Floating Point Unit + 'vme' : is_set(1), # V86 Mode Extensions + 'de' : is_set(2), # Debug Extensions - I/O breakpoints supported + 'pse' : is_set(3), # Page Size Extensions (4 MB pages supported) + 'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available + 'msr' : is_set(5), # Model Specific Registers + 'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages) + 'mce' : is_set(7), # Machine Check Exception supported + 'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available + 'apic' : is_set(9), # Local APIC present (multiprocessor operation support) + 'sepamd' : is_set(10), # Fast system calls (AMD only) + 'sep' : is_set(11), # Fast system calls + 'mtrr' : is_set(12), # Memory Type Range Registers + 'pge' : is_set(13), # Page Global Enable + 'mca' : is_set(14), # Machine Check Architecture + 'cmov' : is_set(15), # Conditional MOVe instructions + 'pat' : is_set(16), # Page Attribute Table + 'pse36' : is_set(17), # 36 bit Page Size Extensions + 'serial' : is_set(18), # Processor Serial Number + 'clflush' : is_set(19), # Cache Flush + #'reserved1' : is_set(20), # reserved + 'dts' : is_set(21), # Debug Trace Store + 'acpi' : is_set(22), # ACPI support + 'mmx' : is_set(23), # MultiMedia Extensions + 'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions + 'sse' : is_set(25), # SSE instructions + 'sse2' : is_set(26), # SSE2 (WNI) instructions + 'ss' : is_set(27), # self snoop + #'reserved2' : is_set(28), # reserved + 'tm' : is_set(29), # Automatic clock control + 'ia64' : is_set(30), # IA64 instructions + '3dnow' : is_set(31) # 3DNow! instructions available + } + + # Get a list of only the flags that are true + flags = [k for k, v in flags.items() if v] + flags.sort() + + info = { + 'vendor_id' : vendor_id, + 'brand' : processor_brand, + + 'hz_advertised' : to_friendly_hz(hz_advertised, scale), + 'hz_actual' : to_friendly_hz(hz_actual, 6), + 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), + 'hz_actual_raw' : to_raw_hz(hz_actual, 6), + + 'flags' : flags + } + + info = {k: v for k, v in info.items() if v} + return info + except: + return {} + +def _get_cpu_info_from_kstat(): + ''' + Returns the CPU info gathered from isainfo and kstat. + Returns {} if isainfo or kstat are not found. + ''' + try: + # Just return {} if there is no isainfo or kstat + if not DataSource.has_isainfo() or not DataSource.has_kstat(): + return {} + + # If isainfo fails return {} + returncode, flag_output = DataSource.isainfo_vb() + if flag_output == None or returncode != 0: + return {} + + # If kstat fails return {} + returncode, kstat = DataSource.kstat_m_cpu_info() + if kstat == None or returncode != 0: + return {} + + # Various fields + vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip() + processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip() + stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip()) + model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip()) + family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip()) + + # Flags + flags = flag_output.strip().split('\n')[-1].strip().lower().split() + flags.sort() + + # Convert from GHz/MHz string to Hz + scale = 6 + hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip() + hz_advertised = to_hz_string(hz_advertised) + + # Convert from GHz/MHz string to Hz + hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip() + hz_actual = to_hz_string(hz_actual) + + info = { + 'vendor_id' : vendor_id, + 'brand' : processor_brand, + + 'hz_advertised' : to_friendly_hz(hz_advertised, scale), + 'hz_actual' : to_friendly_hz(hz_actual, 0), + 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), + 'hz_actual_raw' : to_raw_hz(hz_actual, 0), + + 'stepping' : stepping, + 'model' : model, + 'family' : family, + 'flags' : flags + } + + info = {k: v for k, v in info.items() if v} + return info + except: + return {} + +def CopyNewFields(info, new_info): + keys = [ + 'vendor_id', 'hardware', 'brand', 'hz_advertised', 'hz_actual', + 'hz_advertised_raw', 'hz_actual_raw', 'arch', 'bits', 'count', + 'raw_arch_string', 'l2_cache_size', 'l2_cache_line_size', + 'l2_cache_associativity', 'stepping', 'model', 'family', + 'processor_type', 'extended_model', 'extended_family', 'flags', + 'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size' + ] + + for key in keys: + if new_info.get(key, None) and not info.get(key, None): + info[key] = new_info[key] + +def get_cpu_info(): + ''' + Returns the CPU info by using the best sources of information for your OS. + Returns {} if nothing is found. + ''' + + # Get the CPU arch and bits + arch, bits = parse_arch(DataSource.raw_arch_string) + + info = { + 'cpuinfo_version' : CPUINFO_VERSION, + 'arch' : arch, + 'bits' : bits, + 'count' : DataSource.cpu_count, + 'raw_arch_string' : DataSource.raw_arch_string, + } + + # Try the Windows registry + CopyNewFields(info, _get_cpu_info_from_registry()) + + # Try /proc/cpuinfo + CopyNewFields(info, _get_cpu_info_from_proc_cpuinfo()) + + # Try cpufreq-info + CopyNewFields(info, _get_cpu_info_from_cpufreq_info()) + + # Try LSCPU + CopyNewFields(info, _get_cpu_info_from_lscpu()) + + # Try sysctl + CopyNewFields(info, _get_cpu_info_from_sysctl()) + + # Try kstat + CopyNewFields(info, _get_cpu_info_from_kstat()) + + # Try dmesg + CopyNewFields(info, _get_cpu_info_from_dmesg()) + + # Try /var/run/dmesg.boot + CopyNewFields(info, _get_cpu_info_from_cat_var_run_dmesg_boot()) + + # Try lsprop ibm,pa-features + CopyNewFields(info, _get_cpu_info_from_ibm_pa_features()) + + # Try sysinfo + CopyNewFields(info, _get_cpu_info_from_sysinfo()) + + # Try querying the CPU cpuid register + CopyNewFields(info, _get_cpu_info_from_cpuid()) + + return info + +# Make sure we are running on a supported system +def _check_arch(): + arch, bits = parse_arch(DataSource.raw_arch_string) + if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']: + raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.") + +def main(): + try: + _check_arch() + except Exception as err: + sys.stderr.write(str(err) + "\n") + sys.exit(1) + + info = get_cpu_info() + if info: + print('cpuinfo version: {0}'.format(info.get('cpuinfo_version', ''))) + print('Vendor ID: {0}'.format(info.get('vendor_id', ''))) + print('Hardware Raw: {0}'.format(info.get('hardware', ''))) + print('Brand: {0}'.format(info.get('brand', ''))) + print('Hz Advertised: {0}'.format(info.get('hz_advertised', ''))) + print('Hz Actual: {0}'.format(info.get('hz_actual', ''))) + print('Hz Advertised Raw: {0}'.format(info.get('hz_advertised_raw', ''))) + print('Hz Actual Raw: {0}'.format(info.get('hz_actual_raw', ''))) + print('Arch: {0}'.format(info.get('arch', ''))) + print('Bits: {0}'.format(info.get('bits', ''))) + print('Count: {0}'.format(info.get('count', ''))) + + print('Raw Arch String: {0}'.format(info.get('raw_arch_string', ''))) + + print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', ''))) + print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', ''))) + print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', ''))) + print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', ''))) + print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', ''))) + print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', ''))) + print('Stepping: {0}'.format(info.get('stepping', ''))) + print('Model: {0}'.format(info.get('model', ''))) + print('Family: {0}'.format(info.get('family', ''))) + print('Processor Type: {0}'.format(info.get('processor_type', ''))) + print('Extended Model: {0}'.format(info.get('extended_model', ''))) + print('Extended Family: {0}'.format(info.get('extended_family', ''))) + print('Flags: {0}'.format(', '.join(info.get('flags', '')))) + else: + sys.stderr.write("Failed to find cpu info\n") + sys.exit(1) + + +if __name__ == '__main__': + main() +else: + _check_arch() diff --git a/config/farm.cfg.example b/config/farm.cfg.example new file mode 100644 index 0000000..a9c809e --- /dev/null +++ b/config/farm.cfg.example @@ -0,0 +1,4 @@ +[farm] +output_dir = /tmp/blender-benchmark-results +device_type = GPU +device_name = "GeForce GT 520M" \ No newline at end of file diff --git a/run.bat b/run.bat new file mode 100644 index 0000000..69c0f23 --- /dev/null +++ b/run.bat @@ -0,0 +1,26 @@ +@echo off + +rem set SCRIPTPATH_SLASH=%~dp0 +rem set SCRIPTPATH=%SCRIPTPATH_SLASH:~0,-1% +set SCRIPTPATH= +set BOOTSTRAP_VERSION=0.1 +set BLENDER_VERSION=2.78 + +reg Query "HKLM\Hardware\Description\System\CentralProcessor\0" | find /i "x86" > NUL && set BITNESS=32 || set BITNESS=64 + +echo Cycles Benchmark Suite bootstrap v%BOOTSTRAP_VERSION% +echo Detected OS: Windows %BITNESS%bit + +set BLENDER_DIR=%SCRIPTPATH%blender\windows%BITNESS% +set BLENDER_BIN=%BLENDER_DIR%\blender.exe +set SCENES_DIR=%SCRIPTPATH%scenes +set PYTHON=%BLENDER_DIR%\%BLENDER_VERSION%\python\bin\python.exe +set BENCHMARK=%SCRIPTPATH%benchmark\main.py +set CONFIGURE_SCRIPT=%SCRIPTPATH%benchmark\configure.py + + +echo %BLENDER_DIR% +echo Using Python from %PYTHON% +echo Running benchmark script %BENCHMARK% + +%PYTHON% %BENCHMARK% --blender "%BLENDER_BIN%" --scenes-dir "%SCENES_DIR%" --configure-script "%CONFIGURE_SCRIPT%" %* diff --git a/run.sh b/run.sh new file mode 100755 index 0000000..44b8649 --- /dev/null +++ b/run.sh @@ -0,0 +1,168 @@ +#!/bin/bash + +set -e + +UNAME=`which uname` + +# Self versioning info +BOOTSTRAP_VERSION="0.1" + +# Variables which are automatically detected based on particular blender distro +# we are benchmarking. +BLENDER_VERSION="" +PYTHON_VERSION="" +PYTHON_ABI="" + +Black='' +Blue='' +Green='' +Cyan='' +Red='' +Purple='' +Brown='' +LGray='' +DGray='' +LBlue='' +LGreen='' +LCyan='' +LRed='' +LPurple='' +Yellow='' +White='' +Normal='' + +# Check if stdout is a terminal. +if test -t 1; then + # See if it supports colors. + ncolors=$(tput colors) + if test -n "$ncolors" && test $ncolors -ge 8; then + Black='\033[00;30m' + Blue='\033[00;34m' + Green='\033[00;32m' + Cyan='\033[00;36m' + Red='\033[00;31m' + Purple='\033[00;35m' + Brown='\033[00;33m' + LGray='\033[00;37m' + DGray='\033[01;30m' + LBlue='\033[01;34m' + LGreen='\033[01;32m' + LCyan='\033[01;36m' + LRed='\033[01;31m' + LPurple='\033[01;35m' + Yellow='\033[01;33m' + White='\033[01;37m' + Normal='\033[00m' + fi +fi + +STRIP_SENSITIVE() { + echo "${@/$SCRIPTPATH/}" +} + +PRINT_HEADER() { + echo -e "${LBlue}`STRIP_SENSITIVE "${@}"`${Normal}" +} + +PRINT_ERROR() { + echo -e "${LRed}Error: `STRIP_SENSITIVE "${@}"`${Normal}" +} + +FATAL_ERROR() { + PRINT_ERROR "${@}" + exit 1 +} + +PRINT_INFO() { + echo -e `STRIP_SENSITIVE "${@}"` +} + +PRINT_HEADER "Cycles Benchmark Suite bootstrap v${BOOTSTRAP_VERSION}" + +# Check uname exists. +# Without this we would not be able to know which Blender to run. +if [ -z "${UNAME}" ]; then + PRINT_ERROR "Unable to find uname command." + exit 1 +fi + +# Check whether we support operation system. +KERNEL_NAME=`$UNAME -s` +PRINT_INFO "Detected OS: ${White}${KERNEL_NAME}${Normal}" + +case "${KERNEL_NAME}" in + Darwin) + SCRIPTPATH=`cd "$(dirname "${0}")"; pwd` + BLENDER_PLATFORM="macos" + # TODO(sergey): We assume only newer 64 bit MacOS machines. + BINESS="64" + ;; + + Linux) + SCRIPT=$(readlink -f $0) + SCRIPTPATH=`dirname $SCRIPT` + BLENDER_PLATFORM="linux" + MACHINE_TYPE=`uname -m` + # TODO(sergey): Handle other architectures than x86_64/i686 here. + if [ ${MACHINE_TYPE} == 'x86_64' ]; then + BITNESS="64" + else + BITNESS="32" + fi + ;; + + CYGWIN*|MINGW32*|MSYS*) + # TODO(sergey): Maybe support Cygwin in the future. + FATAL_ERROR "On Windows platform run.bat is to be used." + ;; + + *) + FATAL_ERROR "Unknown OS, can not continue." + ;; +esac + +if [ -z "${BLENDER_DIR}" ]; then + BLENDER_DIR="${SCRIPTPATH}/blender/${BLENDER_PLATFORM}${BITNESS}" +fi +BLENDER_BIN="${BLENDER_DIR}/blender" +SCENES_DIR="${SCRIPTPATH}/scenes" +BENCHMARK="${SCRIPTPATH}/benchmark/main.py" +CONFIGURE_SCRIPT="${SCRIPTPATH}/benchmark/configure.py" + +# Detect Blender version. +if [ ! -f "${BLENDER_BIN}" ]; then + FATAL_ERROR "Unable to find Blender executable." +fi +BLENDER_VERSION=`${BLENDER_BIN} --version | + head -n 1 | sed -r 's/Blender ([0-9]\.[0-9]{2}).*/\1/'` +PRINT_INFO "Detected Blender version: ${White}${BLENDER_VERSION}${Normal}" + +# Detect Python version used by Blender. +PYTHON_VERSION=`${BLENDER_BIN} -b --python-expr \ + 'import platform; print("Python Version: {}" . \ + format(platform.python_version()))' | + grep "Python Version" | + sed -r 's/.*\s([0-9]+)\.([0-9]+).*/\1.\2/'` +PRINT_INFO "Detected Python version: ${White}${PYTHON_VERSION}${Normal}" + +# Detect Python ABI +PYTHON="${BLENDER_DIR}/${BLENDER_VERSION}/python/bin/python${PYTHON_VERSION}" +if [ ! -f "${PYTHON}" ]; then + for ABI in m d md; do + PYTHON_ABI="${ABI}" + PYTHON="${BLENDER_DIR}/${BLENDER_VERSION}/python/bin/python${PYTHON_VERSION}${PYTHON_ABI}" + if [ -f "${PYTHON}" ]; then + break + fi + done +fi +PRINT_INFO "Detected Python ABI: ${White}${PYTHON_ABI}${Normal}" + +PRINT_INFO "Using Python from ${White}${PYTHON}${Normal}" +PRINT_INFO "Running benchmark script ${White}${BENCHMARK}${Normal}" + +"${PYTHON}" "${BENCHMARK}" \ + --blender "${BLENDER_BIN}" \ + --scenes-dir "${SCENES_DIR}" \ + --configure-script "${CONFIGURE_SCRIPT}" \ + "${@}"