Produce benchmark JSON schema v2
- No more '(Display)' strings in the GPU names, but store devices as `{'name': name, 'type': 'CPU/CUDA/OPENCL', 'is_display': bool}` - Introduces testing with py.test & pipenv. The test suite is far from complete, though.
This commit is contained in:
4
.coveragerc
Normal file
4
.coveragerc
Normal file
@@ -0,0 +1,4 @@
|
||||
[run]
|
||||
omit =
|
||||
# omit 3rd party modules
|
||||
benchmark/foundation/third_party/*
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -14,3 +14,7 @@ __pycache__
|
||||
/bundle/blender-benchmark-*.dmg
|
||||
/bundle/blender-benchmark-*.tar.bz2
|
||||
/bundle/blender-benchmark-*.zip
|
||||
|
||||
.cache
|
||||
.coverage
|
||||
.pytest_cache/
|
||||
|
14
Pipfile
Normal file
14
Pipfile
Normal file
@@ -0,0 +1,14 @@
|
||||
[[source]]
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
requests = "*"
|
||||
|
||||
[dev-packages]
|
||||
pytest = "*"
|
||||
pytest-cov = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "3.6"
|
157
Pipfile.lock
generated
Normal file
157
Pipfile.lock
generated
Normal file
@@ -0,0 +1,157 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "b4fb2aae21bb351279ef411be59c7870f51bd9790631cfc32456be1a42adbc50"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.6"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:376690d6f16d32f9d1fe8932551d80b23e9d393a8578c5633a2ed39a64861638",
|
||||
"sha256:456048c7e371c089d0a77a5212fb37a2c2dce1e24146e3b7e0261736aaeaa22a"
|
||||
],
|
||||
"version": "==2018.8.24"
|
||||
},
|
||||
"chardet": {
|
||||
"hashes": [
|
||||
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
|
||||
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
|
||||
],
|
||||
"version": "==3.0.4"
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
"sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e",
|
||||
"sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16"
|
||||
],
|
||||
"version": "==2.7"
|
||||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
"sha256:63b52e3c866428a224f97cab011de738c36aec0185aa91cfacd418b5d58911d1",
|
||||
"sha256:ec22d826a36ed72a7358ff3fe56cbd4ba69dd7a6718ffd450ff0e9df7a47ce6a"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.19.1"
|
||||
},
|
||||
"urllib3": {
|
||||
"hashes": [
|
||||
"sha256:a68ac5e15e76e7e5dd2b8f94007233e01effe3e50e8daddf69acfd81cb686baf",
|
||||
"sha256:b5725a0bd4ba422ab0e66e89e030c806576753ea3ee08554382c14e685d117b5"
|
||||
],
|
||||
"markers": "python_version != '3.2.*' and python_version != '3.0.*' and python_version != '3.3.*' and python_version != '3.1.*' and python_version < '4' and python_version >= '2.6'",
|
||||
"version": "==1.23"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"atomicwrites": {
|
||||
"hashes": [
|
||||
"sha256:240831ea22da9ab882b551b31d4225591e5e447a68c5e188db5b89ca1d487585",
|
||||
"sha256:a24da68318b08ac9c9c45029f4a10371ab5b20e4226738e150e6e7c571630ae6"
|
||||
],
|
||||
"version": "==1.1.5"
|
||||
},
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:4b90b09eeeb9b88c35bc642cbac057e45a5fd85367b985bd2809c62b7b939265",
|
||||
"sha256:e0d0eb91441a3b53dab4d9b743eafc1ac44476296a2053b6ca3af0b139faf87b"
|
||||
],
|
||||
"version": "==18.1.0"
|
||||
},
|
||||
"coverage": {
|
||||
"hashes": [
|
||||
"sha256:03481e81d558d30d230bc12999e3edffe392d244349a90f4ef9b88425fac74ba",
|
||||
"sha256:0b136648de27201056c1869a6c0d4e23f464750fd9a9ba9750b8336a244429ed",
|
||||
"sha256:10a46017fef60e16694a30627319f38a2b9b52e90182dddb6e37dcdab0f4bf95",
|
||||
"sha256:198626739a79b09fa0a2f06e083ffd12eb55449b5f8bfdbeed1df4910b2ca640",
|
||||
"sha256:23d341cdd4a0371820eb2b0bd6b88f5003a7438bbedb33688cd33b8eae59affd",
|
||||
"sha256:28b2191e7283f4f3568962e373b47ef7f0392993bb6660d079c62bd50fe9d162",
|
||||
"sha256:2a5b73210bad5279ddb558d9a2bfedc7f4bf6ad7f3c988641d83c40293deaec1",
|
||||
"sha256:2eb564bbf7816a9d68dd3369a510be3327f1c618d2357fa6b1216994c2e3d508",
|
||||
"sha256:337ded681dd2ef9ca04ef5d93cfc87e52e09db2594c296b4a0a3662cb1b41249",
|
||||
"sha256:3a2184c6d797a125dca8367878d3b9a178b6fdd05fdc2d35d758c3006a1cd694",
|
||||
"sha256:3c79a6f7b95751cdebcd9037e4d06f8d5a9b60e4ed0cd231342aa8ad7124882a",
|
||||
"sha256:3d72c20bd105022d29b14a7d628462ebdc61de2f303322c0212a054352f3b287",
|
||||
"sha256:3eb42bf89a6be7deb64116dd1cc4b08171734d721e7a7e57ad64cc4ef29ed2f1",
|
||||
"sha256:4635a184d0bbe537aa185a34193898eee409332a8ccb27eea36f262566585000",
|
||||
"sha256:56e448f051a201c5ebbaa86a5efd0ca90d327204d8b059ab25ad0f35fbfd79f1",
|
||||
"sha256:5a13ea7911ff5e1796b6d5e4fbbf6952381a611209b736d48e675c2756f3f74e",
|
||||
"sha256:69bf008a06b76619d3c3f3b1983f5145c75a305a0fea513aca094cae5c40a8f5",
|
||||
"sha256:6bc583dc18d5979dc0f6cec26a8603129de0304d5ae1f17e57a12834e7235062",
|
||||
"sha256:701cd6093d63e6b8ad7009d8a92425428bc4d6e7ab8d75efbb665c806c1d79ba",
|
||||
"sha256:7608a3dd5d73cb06c531b8925e0ef8d3de31fed2544a7de6c63960a1e73ea4bc",
|
||||
"sha256:76ecd006d1d8f739430ec50cc872889af1f9c1b6b8f48e29941814b09b0fd3cc",
|
||||
"sha256:7aa36d2b844a3e4a4b356708d79fd2c260281a7390d678a10b91ca595ddc9e99",
|
||||
"sha256:7d3f553904b0c5c016d1dad058a7554c7ac4c91a789fca496e7d8347ad040653",
|
||||
"sha256:7e1fe19bd6dce69d9fd159d8e4a80a8f52101380d5d3a4d374b6d3eae0e5de9c",
|
||||
"sha256:8c3cb8c35ec4d9506979b4cf90ee9918bc2e49f84189d9bf5c36c0c1119c6558",
|
||||
"sha256:9d6dd10d49e01571bf6e147d3b505141ffc093a06756c60b053a859cb2128b1f",
|
||||
"sha256:be6cfcd8053d13f5f5eeb284aa8a814220c3da1b0078fa859011c7fffd86dab9",
|
||||
"sha256:c1bb572fab8208c400adaf06a8133ac0712179a334c09224fb11393e920abcdd",
|
||||
"sha256:de4418dadaa1c01d497e539210cb6baa015965526ff5afc078c57ca69160108d",
|
||||
"sha256:e05cb4d9aad6233d67e0541caa7e511fa4047ed7750ec2510d466e806e0255d6",
|
||||
"sha256:f3f501f345f24383c0000395b26b726e46758b71393267aeae0bd36f8b3ade80"
|
||||
],
|
||||
"markers": "python_version != '3.0.*' and python_version < '4' and python_version != '3.2.*' and python_version >= '2.6' and python_version != '3.1.*'",
|
||||
"version": "==4.5.1"
|
||||
},
|
||||
"more-itertools": {
|
||||
"hashes": [
|
||||
"sha256:c187a73da93e7a8acc0001572aebc7e3c69daf7bf6881a2cea10650bd4420092",
|
||||
"sha256:c476b5d3a34e12d40130bc2f935028b5f636df8f372dc2c1c01dc19681b2039e",
|
||||
"sha256:fcbfeaea0be121980e15bc97b3817b5202ca73d0eae185b4550cbfce2a3ebb3d"
|
||||
],
|
||||
"version": "==4.3.0"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:6e3836e39f4d36ae72840833db137f7b7d35105079aee6ec4a62d9f80d594dd1",
|
||||
"sha256:95eb8364a4708392bae89035f45341871286a333f749c3141c20573d2b3876e1"
|
||||
],
|
||||
"markers": "python_version != '3.0.*' and python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*'",
|
||||
"version": "==0.7.1"
|
||||
},
|
||||
"py": {
|
||||
"hashes": [
|
||||
"sha256:3fd59af7435864e1a243790d322d763925431213b6b8529c6ca71081ace3bbf7",
|
||||
"sha256:e31fb2767eb657cbde86c454f02e99cb846d3cd9d61b318525140214fdc0e98e"
|
||||
],
|
||||
"markers": "python_version != '3.0.*' and python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*'",
|
||||
"version": "==1.5.4"
|
||||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:3459a123ad5532852d36f6f4501dfe1acf4af1dd9541834a164666aa40395b02",
|
||||
"sha256:96bfd45dbe863b447a3054145cd78a9d7f31475d2bce6111b133c0cc4f305118"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.7.2"
|
||||
},
|
||||
"pytest-cov": {
|
||||
"hashes": [
|
||||
"sha256:03aa752cf11db41d281ea1d807d954c4eda35cfa1b21d6971966cc041bbf6e2d",
|
||||
"sha256:890fe5565400902b0c78b5357004aab1c814115894f4f21370e2433256a3eeec"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.5.1"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
|
||||
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
|
||||
],
|
||||
"version": "==1.11.0"
|
||||
}
|
||||
}
|
||||
}
|
@@ -2,7 +2,6 @@
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
def setDeviceCPU(context, cpref):
|
||||
cpref.compute_device_type = 'NONE'
|
||||
return True
|
||||
@@ -123,6 +122,7 @@ def setDeviceGPU(context,
|
||||
|
||||
|
||||
def logComputeDevices(cpref):
|
||||
import json
|
||||
device_type = cpref.compute_device_type
|
||||
if device_type == 'NONE':
|
||||
device_type = 'CPU'
|
||||
@@ -131,13 +131,18 @@ def logComputeDevices(cpref):
|
||||
import _cycles
|
||||
for device in _cycles.available_devices():
|
||||
if device[1] == 'CPU':
|
||||
print("Using compute device: {}" . format(device[0]))
|
||||
info = {'name': device[0]}
|
||||
print("Using compute device: {}" . format(json.dumps(info, sort_keys=True)))
|
||||
else:
|
||||
for device in cpref.devices:
|
||||
if device.type != device_type:
|
||||
if device.type != device_type or not device.use:
|
||||
continue
|
||||
if device.use:
|
||||
print("Using compute device: {}" . format(device.name))
|
||||
|
||||
info = {
|
||||
'name': device.name.replace(' (Display)', ''),
|
||||
'is_display': '(Display)' in device.name,
|
||||
}
|
||||
print("Using compute device: {}" . format(json.dumps(info, sort_keys=True)))
|
||||
|
||||
|
||||
def logSystemInfo(cpref):
|
||||
@@ -148,8 +153,10 @@ def logSystemInfo(cpref):
|
||||
"name": device.name.replace(" (Display)", ""),
|
||||
"type": device.type,
|
||||
}
|
||||
if device.type != 'CPU':
|
||||
info_device["is_display"] = '(Display)' in device.name
|
||||
info_devices.append(info_device)
|
||||
print("Benchmark Devices: {}" . format(json.dumps(info_devices)))
|
||||
print("Benchmark Devices: {}" . format(json.dumps(info_devices, sort_keys=True)))
|
||||
|
||||
|
||||
def main():
|
||||
|
@@ -11,6 +11,8 @@ import tempfile
|
||||
SCRIPT_PATH = os.path.realpath(__file__)
|
||||
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
|
||||
|
||||
RESULT_JSON_SCHEMA_VERSION = 2
|
||||
|
||||
import foundation
|
||||
from foundation import (benchrunner,
|
||||
buildbot,
|
||||
@@ -286,8 +288,10 @@ def ensureImageOutputDir(results_output_dir):
|
||||
|
||||
|
||||
def getResultJSONString(ctx, results):
|
||||
import copy
|
||||
# Convert custom classes to dictionaries for easier JSON dump.
|
||||
json_results = results
|
||||
json_results = copy.deepcopy(results)
|
||||
json_results['schema_version'] = RESULT_JSON_SCHEMA_VERSION
|
||||
stats = json_results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
|
@@ -144,7 +144,9 @@ def benchmarkAll(ctx):
|
||||
return all_stats
|
||||
|
||||
|
||||
def benchmarkGetDeviceInfo(ctx):
|
||||
def benchmarkGetDeviceInfo(ctx) -> dict:
|
||||
import json
|
||||
|
||||
command = [ctx.blender,
|
||||
"--background",
|
||||
"--factory-startup",
|
||||
@@ -173,7 +175,9 @@ def benchmarkGetDeviceInfo(ctx):
|
||||
if line.startswith("Compute device type:"):
|
||||
device_type = line.split(':', 1)[1].strip()
|
||||
elif line.startswith("Using compute device:"):
|
||||
compute_devices.append(line.split(':', 1)[1].strip())
|
||||
devices_as_json = line.split(':', 1)[1].strip()
|
||||
devices = json.loads(devices_as_json)
|
||||
compute_devices.append(devices)
|
||||
elif line.startswith("CPU threads used:"):
|
||||
num_cpu_threads = int(line.split(':', 1)[1].strip())
|
||||
return {"device_type": device_type,
|
||||
|
@@ -2,6 +2,7 @@ import json
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
import typing
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
@@ -10,9 +11,12 @@ from .third_party import cpuinfo
|
||||
from .third_party import cpu_cores
|
||||
from .third_party.dateutil import parser
|
||||
|
||||
from . import context
|
||||
|
||||
|
||||
def _getBlenderDeviceInfo(ctx: context.Context) -> typing.List[dict]:
|
||||
prefix = "Benchmark Devices: "
|
||||
|
||||
def _getBlenderDeviceInfo(ctx):
|
||||
PREFIX = "Benchmark Devices: "
|
||||
command = [ctx.blender,
|
||||
"--background",
|
||||
"--factory-startup",
|
||||
@@ -28,8 +32,8 @@ def _getBlenderDeviceInfo(ctx):
|
||||
stdout, stderr = process.communicate()
|
||||
lines = stdout.decode().split("\n")
|
||||
for line in lines:
|
||||
if line.startswith(PREFIX):
|
||||
return json.loads(line[len(PREFIX):])
|
||||
if line.startswith(prefix):
|
||||
return json.loads(line[len(prefix):])
|
||||
return []
|
||||
|
||||
|
||||
|
@@ -1,168 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
SCRIPT_PATH = os.path.realpath(__file__)
|
||||
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
|
||||
|
||||
import argparse
|
||||
import foundation
|
||||
from foundation import (benchrunner,
|
||||
context,
|
||||
logger,
|
||||
system_info,
|
||||
util)
|
||||
|
||||
|
||||
def configureArgumentParser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cycles benchmark helper script.")
|
||||
parser.add_argument("-b", "--blender",
|
||||
help="Full file path to Blender's binary " +
|
||||
"to use for rendering",
|
||||
default="blender")
|
||||
parser.add_argument("-d", "--scenes-dir",
|
||||
help="Directory with scenes",
|
||||
default="")
|
||||
parser.add_argument('-s', '--scenes',
|
||||
nargs='+',
|
||||
help='Scenes to be rendered',
|
||||
default=[])
|
||||
parser.add_argument('-c', '--configure-script',
|
||||
help="Blender-side configuration script",
|
||||
default="configure.py")
|
||||
parser.add_argument('-t', '--device-type',
|
||||
help="Type of the device to render on",
|
||||
default="CPU")
|
||||
parser.add_argument('-n', '--device-name',
|
||||
help="Device name to render on",
|
||||
default="")
|
||||
parser.add_argument('-e', '--device-single',
|
||||
help="Use single device when multiple matches",
|
||||
action='store_true',
|
||||
default=False)
|
||||
parser.add_argument('-f', '--full-dump',
|
||||
help="Dump all available in formation",
|
||||
action='store_true',
|
||||
default=False)
|
||||
parser.add_argument('-j', '--json',
|
||||
help="When in full dump mode, dump JSON",
|
||||
action='store_true',
|
||||
default=False)
|
||||
return parser
|
||||
|
||||
|
||||
def _printFullResult(ctx, results):
|
||||
print("")
|
||||
print("=" * 40)
|
||||
# Print system information.
|
||||
sys_info = results['system_info']
|
||||
print("System info:")
|
||||
print(" System: {} {}" . format(sys_info['system'],
|
||||
sys_info['bitness']))
|
||||
if sys_info['system'] == "Linux":
|
||||
print(" Linux distro: {}, {}" . format(sys_info['dist_name'],
|
||||
sys_info['dist_version']))
|
||||
print(" CPU: {}" . format(sys_info['cpu_brand']))
|
||||
devices = sys_info['devices']
|
||||
if devices:
|
||||
print(" Compute devices:")
|
||||
for device in devices:
|
||||
print(" - {}: {}" . format(device['type'], device['name']))
|
||||
# Print Blender version.
|
||||
blender = results['blender_version']
|
||||
print("Blender:")
|
||||
print(" Version: {}" . format(blender['version']))
|
||||
print(" Hash: {}" . format(blender['build_hash']))
|
||||
print(" Commit: {} {}" . format(blender['build_commit_date'],
|
||||
blender['build_commit_time']))
|
||||
print(" Build: {} {}" . format(blender['build_date'],
|
||||
blender['build_time']))
|
||||
# Print scenes status.
|
||||
print("Nenchmark results:")
|
||||
stats = results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
scene_stats = stats[scene]
|
||||
print(" {}:" . format(scene))
|
||||
print(" - Engine render time: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
scene_stats.total_render_time)))
|
||||
print(" - Render time without sync: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
scene_stats.render_time_no_sync)))
|
||||
print(" - Total render time: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
scene_stats.pipeline_render_time)))
|
||||
print(" - Peak memory used on device: {}" . format(
|
||||
util.humanReadableSize(scene_stats.device_peak_memory)))
|
||||
print(" - Memory used on device during rendering: {}" . format(
|
||||
util.humanReadableSize(scene_stats.device_memory_usage)))
|
||||
|
||||
|
||||
def _printFullJSONResult(ctx, results):
|
||||
import json
|
||||
# Convert custom classes to dictionaries for easier JSON dump.
|
||||
json_results = results
|
||||
stats = json_results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
stats[scene] = stats[scene].asDict()
|
||||
print(json.dumps(json_results, sort_keys=True, indent=2))
|
||||
|
||||
|
||||
def _printBriefResult(ctx, results):
|
||||
print("")
|
||||
print("=" * 40)
|
||||
stats = results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
scene_stats = stats[scene]
|
||||
print("{}: {}" . format(
|
||||
scene,
|
||||
util.humanReadableTimeDifference(
|
||||
scene_stats.pipeline_render_time)))
|
||||
|
||||
|
||||
def main():
|
||||
parser = configureArgumentParser()
|
||||
args = parser.parse_args()
|
||||
logger.init()
|
||||
logger.HEADER("Cycles Benchmark Suite v{}" . format(foundation.VERSION))
|
||||
# Configure context.
|
||||
ctx = context.Context()
|
||||
ctx.blender = args.blender
|
||||
ctx.scenes_dir = args.scenes_dir
|
||||
ctx.configure_script = args.configure_script
|
||||
ctx.device_type = args.device_type
|
||||
ctx.device_name = args.device_name
|
||||
ctx.device_single = args.device_single
|
||||
if args.scenes:
|
||||
ctx.scenes = args.scenes
|
||||
else:
|
||||
ctx.scenes = ctx.listAllScenes(args.scenes_dir)
|
||||
logger.INFO("Requested device details:")
|
||||
benchrunner.benchmarkPrintDeviceInfo(ctx)
|
||||
# Run benchmark.
|
||||
all_stats = benchrunner.benchmarkAll(ctx)
|
||||
# Gather all information together.
|
||||
result = {
|
||||
"blender_version": system_info.getBlenderVersion(ctx),
|
||||
"system_info": system_info.gatherSystemInfo(ctx),
|
||||
"stats": all_stats if all_stats else {}
|
||||
}
|
||||
if args.full_dump:
|
||||
if args.json:
|
||||
_printFullJSONResult(ctx, result)
|
||||
else:
|
||||
_printFullResult(ctx, result)
|
||||
else:
|
||||
_printBriefResult(ctx, result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,2 +1,9 @@
|
||||
[pycodestyle]
|
||||
max-line-length = 120
|
||||
|
||||
[tool:pytest]
|
||||
addopts = -v
|
||||
--ignore bundle --ignore .git --ignore .cache --ignore config
|
||||
--cov benchmark
|
||||
--cov-report term-missing
|
||||
python_files = tests.py test_*.py *_tests.py
|
||||
|
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
138
tests/test_foundation_system_info.py
Normal file
138
tests/test_foundation_system_info.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import json
|
||||
import sys
|
||||
import unittest
|
||||
from unittest import mock
|
||||
import typing
|
||||
|
||||
from benchmark.foundation import system_info
|
||||
|
||||
|
||||
class FakeCycles:
|
||||
"""Injected as _cycles module so we can mock its results."""
|
||||
|
||||
@staticmethod
|
||||
def available_devices():
|
||||
return None
|
||||
|
||||
|
||||
class FakeDevice:
|
||||
def __init__(self, name: str, type: str, *, use: bool):
|
||||
self.name = name
|
||||
self.type = type
|
||||
self.use = use
|
||||
|
||||
def as_cycles(self) -> typing.Tuple[str, str]:
|
||||
return self.name, self.type
|
||||
|
||||
|
||||
class FakeCpref:
|
||||
def __init__(self, compute_device_type: str, devices: typing.List[FakeDevice]):
|
||||
assert compute_device_type in {'NONE', 'CPU', 'CUDA', 'OPENCL'}
|
||||
self.compute_device_type = compute_device_type
|
||||
self.devices = devices
|
||||
|
||||
|
||||
class AbstractFakeDevicesTest(unittest.TestCase):
|
||||
cpu = 'Intel Core i7-4790K CPU @ 4.00GHz'
|
||||
gpu = 'GeForce GTX 970' # display
|
||||
gpu2 = 'GeForce GTX 980' # non-display
|
||||
|
||||
compute_devices_as_printed = [
|
||||
{'name': cpu, 'type': 'CPU'},
|
||||
{'name': gpu, 'type': 'CUDA', 'is_display': True},
|
||||
{'name': gpu2, 'type': 'CUDA', 'is_display': False},
|
||||
]
|
||||
|
||||
cpref_devs = [
|
||||
FakeDevice(cpu, 'CPU', use=True),
|
||||
FakeDevice(f'{gpu} (Display)', 'CUDA', use=True),
|
||||
FakeDevice(gpu2, 'CUDA', use=False),
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
sys.modules['bpy'] = sys.modules[__name__] # don't fail on 'import bpy'
|
||||
sys.modules['_cycles'] = FakeCycles # don't fail on 'import _cycles'
|
||||
|
||||
# Import so that we can mock functions here.
|
||||
# noinspection PyUnresolvedReferences
|
||||
from benchmark import configure
|
||||
|
||||
def expected_benchmark_devices_output(self) -> str:
|
||||
return json.dumps(self.compute_devices_as_printed, sort_keys=True)
|
||||
|
||||
|
||||
class ConfigurePyTest(AbstractFakeDevicesTest):
|
||||
|
||||
@mock.patch('_cycles.available_devices')
|
||||
@mock.patch('benchmark.configure.print')
|
||||
def test_log_compute_devices_cpu(self, mock_print, mock_available_devices):
|
||||
from benchmark import configure
|
||||
|
||||
mock_available_devices.return_value = [dev.as_cycles() for dev in self.cpref_devs]
|
||||
cpref = FakeCpref(compute_device_type='NONE', devices=self.cpref_devs)
|
||||
configure.logComputeDevices(cpref)
|
||||
|
||||
expect_json = json.dumps({'name': self.cpu}, sort_keys=True)
|
||||
mock_print.assert_has_calls([
|
||||
mock.call('Compute device type: CPU'),
|
||||
mock.call(f'Using compute device: {expect_json}'),
|
||||
])
|
||||
|
||||
@mock.patch('_cycles.available_devices')
|
||||
@mock.patch('benchmark.configure.print')
|
||||
def test_log_compute_devices_gpu(self, mock_print, mock_available_devices):
|
||||
from benchmark import configure
|
||||
|
||||
mock_available_devices.return_value = [dev.as_cycles() for dev in self.cpref_devs]
|
||||
|
||||
cpref = FakeCpref(compute_device_type='CUDA', devices=self.cpref_devs)
|
||||
configure.logComputeDevices(cpref)
|
||||
|
||||
expect_json = json.dumps({'name': self.gpu, 'is_display': True}, sort_keys=True)
|
||||
mock_print.assert_has_calls([
|
||||
mock.call('Compute device type: CUDA'),
|
||||
mock.call(f'Using compute device: {expect_json}'),
|
||||
])
|
||||
|
||||
@mock.patch('benchmark.configure.print')
|
||||
def test_log_system_info(self, mock_print):
|
||||
from benchmark import configure
|
||||
|
||||
cpref = FakeCpref(compute_device_type='CUDA', devices=self.cpref_devs)
|
||||
configure.logSystemInfo(cpref)
|
||||
|
||||
mock_print.assert_has_calls([
|
||||
mock.call(f'Benchmark Devices: {self.expected_benchmark_devices_output()}'),
|
||||
])
|
||||
|
||||
|
||||
class BenchRunnerTest(AbstractFakeDevicesTest):
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_get_device_info_gpu(self, mock_popen):
|
||||
send_json = json.dumps({'name': self.gpu, 'is_display': True}, sort_keys=True)
|
||||
blender_output = ('\n'.join([
|
||||
'Nonsense lines',
|
||||
'Compute device type: GPU',
|
||||
f'Using compute device: {send_json}',
|
||||
'CPU threads used: 47',
|
||||
'More nonsense lines',
|
||||
])).encode()
|
||||
|
||||
mock_process = mock.Mock()
|
||||
mock_popen.return_value = mock_process
|
||||
mock_process.communicate.return_value = blender_output, b''
|
||||
|
||||
from benchmark.foundation import benchrunner, context
|
||||
|
||||
ctx = context.Context()
|
||||
info = benchrunner.benchmarkGetDeviceInfo(ctx)
|
||||
|
||||
expected_info = {
|
||||
"device_type": 'GPU',
|
||||
"compute_devices": [{'name': self.gpu, 'is_display': True}],
|
||||
"num_cpu_threads": 47
|
||||
}
|
||||
self.assertEqual(expected_info, info)
|
Reference in New Issue
Block a user