Produce benchmark JSON schema v2
- No more '(Display)' strings in the GPU names, but store devices as `{'name': name, 'type': 'CPU/CUDA/OPENCL', 'is_display': bool}` - Introduces testing with py.test & pipenv. The test suite is far from complete, though.
This commit is contained in:
@@ -2,7 +2,6 @@
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
def setDeviceCPU(context, cpref):
|
||||
cpref.compute_device_type = 'NONE'
|
||||
return True
|
||||
@@ -123,6 +122,7 @@ def setDeviceGPU(context,
|
||||
|
||||
|
||||
def logComputeDevices(cpref):
|
||||
import json
|
||||
device_type = cpref.compute_device_type
|
||||
if device_type == 'NONE':
|
||||
device_type = 'CPU'
|
||||
@@ -131,13 +131,18 @@ def logComputeDevices(cpref):
|
||||
import _cycles
|
||||
for device in _cycles.available_devices():
|
||||
if device[1] == 'CPU':
|
||||
print("Using compute device: {}" . format(device[0]))
|
||||
info = {'name': device[0]}
|
||||
print("Using compute device: {}" . format(json.dumps(info, sort_keys=True)))
|
||||
else:
|
||||
for device in cpref.devices:
|
||||
if device.type != device_type:
|
||||
if device.type != device_type or not device.use:
|
||||
continue
|
||||
if device.use:
|
||||
print("Using compute device: {}" . format(device.name))
|
||||
|
||||
info = {
|
||||
'name': device.name.replace(' (Display)', ''),
|
||||
'is_display': '(Display)' in device.name,
|
||||
}
|
||||
print("Using compute device: {}" . format(json.dumps(info, sort_keys=True)))
|
||||
|
||||
|
||||
def logSystemInfo(cpref):
|
||||
@@ -148,8 +153,10 @@ def logSystemInfo(cpref):
|
||||
"name": device.name.replace(" (Display)", ""),
|
||||
"type": device.type,
|
||||
}
|
||||
if device.type != 'CPU':
|
||||
info_device["is_display"] = '(Display)' in device.name
|
||||
info_devices.append(info_device)
|
||||
print("Benchmark Devices: {}" . format(json.dumps(info_devices)))
|
||||
print("Benchmark Devices: {}" . format(json.dumps(info_devices, sort_keys=True)))
|
||||
|
||||
|
||||
def main():
|
||||
|
@@ -11,6 +11,8 @@ import tempfile
|
||||
SCRIPT_PATH = os.path.realpath(__file__)
|
||||
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
|
||||
|
||||
RESULT_JSON_SCHEMA_VERSION = 2
|
||||
|
||||
import foundation
|
||||
from foundation import (benchrunner,
|
||||
buildbot,
|
||||
@@ -286,8 +288,10 @@ def ensureImageOutputDir(results_output_dir):
|
||||
|
||||
|
||||
def getResultJSONString(ctx, results):
|
||||
import copy
|
||||
# Convert custom classes to dictionaries for easier JSON dump.
|
||||
json_results = results
|
||||
json_results = copy.deepcopy(results)
|
||||
json_results['schema_version'] = RESULT_JSON_SCHEMA_VERSION
|
||||
stats = json_results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
|
@@ -144,7 +144,9 @@ def benchmarkAll(ctx):
|
||||
return all_stats
|
||||
|
||||
|
||||
def benchmarkGetDeviceInfo(ctx):
|
||||
def benchmarkGetDeviceInfo(ctx) -> dict:
|
||||
import json
|
||||
|
||||
command = [ctx.blender,
|
||||
"--background",
|
||||
"--factory-startup",
|
||||
@@ -173,7 +175,9 @@ def benchmarkGetDeviceInfo(ctx):
|
||||
if line.startswith("Compute device type:"):
|
||||
device_type = line.split(':', 1)[1].strip()
|
||||
elif line.startswith("Using compute device:"):
|
||||
compute_devices.append(line.split(':', 1)[1].strip())
|
||||
devices_as_json = line.split(':', 1)[1].strip()
|
||||
devices = json.loads(devices_as_json)
|
||||
compute_devices.append(devices)
|
||||
elif line.startswith("CPU threads used:"):
|
||||
num_cpu_threads = int(line.split(':', 1)[1].strip())
|
||||
return {"device_type": device_type,
|
||||
|
@@ -2,6 +2,7 @@ import json
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
import typing
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
@@ -10,9 +11,12 @@ from .third_party import cpuinfo
|
||||
from .third_party import cpu_cores
|
||||
from .third_party.dateutil import parser
|
||||
|
||||
from . import context
|
||||
|
||||
|
||||
def _getBlenderDeviceInfo(ctx: context.Context) -> typing.List[dict]:
|
||||
prefix = "Benchmark Devices: "
|
||||
|
||||
def _getBlenderDeviceInfo(ctx):
|
||||
PREFIX = "Benchmark Devices: "
|
||||
command = [ctx.blender,
|
||||
"--background",
|
||||
"--factory-startup",
|
||||
@@ -28,8 +32,8 @@ def _getBlenderDeviceInfo(ctx):
|
||||
stdout, stderr = process.communicate()
|
||||
lines = stdout.decode().split("\n")
|
||||
for line in lines:
|
||||
if line.startswith(PREFIX):
|
||||
return json.loads(line[len(PREFIX):])
|
||||
if line.startswith(prefix):
|
||||
return json.loads(line[len(prefix):])
|
||||
return []
|
||||
|
||||
|
||||
|
@@ -1,168 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
SCRIPT_PATH = os.path.realpath(__file__)
|
||||
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
|
||||
|
||||
import argparse
|
||||
import foundation
|
||||
from foundation import (benchrunner,
|
||||
context,
|
||||
logger,
|
||||
system_info,
|
||||
util)
|
||||
|
||||
|
||||
def configureArgumentParser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cycles benchmark helper script.")
|
||||
parser.add_argument("-b", "--blender",
|
||||
help="Full file path to Blender's binary " +
|
||||
"to use for rendering",
|
||||
default="blender")
|
||||
parser.add_argument("-d", "--scenes-dir",
|
||||
help="Directory with scenes",
|
||||
default="")
|
||||
parser.add_argument('-s', '--scenes',
|
||||
nargs='+',
|
||||
help='Scenes to be rendered',
|
||||
default=[])
|
||||
parser.add_argument('-c', '--configure-script',
|
||||
help="Blender-side configuration script",
|
||||
default="configure.py")
|
||||
parser.add_argument('-t', '--device-type',
|
||||
help="Type of the device to render on",
|
||||
default="CPU")
|
||||
parser.add_argument('-n', '--device-name',
|
||||
help="Device name to render on",
|
||||
default="")
|
||||
parser.add_argument('-e', '--device-single',
|
||||
help="Use single device when multiple matches",
|
||||
action='store_true',
|
||||
default=False)
|
||||
parser.add_argument('-f', '--full-dump',
|
||||
help="Dump all available in formation",
|
||||
action='store_true',
|
||||
default=False)
|
||||
parser.add_argument('-j', '--json',
|
||||
help="When in full dump mode, dump JSON",
|
||||
action='store_true',
|
||||
default=False)
|
||||
return parser
|
||||
|
||||
|
||||
def _printFullResult(ctx, results):
|
||||
print("")
|
||||
print("=" * 40)
|
||||
# Print system information.
|
||||
sys_info = results['system_info']
|
||||
print("System info:")
|
||||
print(" System: {} {}" . format(sys_info['system'],
|
||||
sys_info['bitness']))
|
||||
if sys_info['system'] == "Linux":
|
||||
print(" Linux distro: {}, {}" . format(sys_info['dist_name'],
|
||||
sys_info['dist_version']))
|
||||
print(" CPU: {}" . format(sys_info['cpu_brand']))
|
||||
devices = sys_info['devices']
|
||||
if devices:
|
||||
print(" Compute devices:")
|
||||
for device in devices:
|
||||
print(" - {}: {}" . format(device['type'], device['name']))
|
||||
# Print Blender version.
|
||||
blender = results['blender_version']
|
||||
print("Blender:")
|
||||
print(" Version: {}" . format(blender['version']))
|
||||
print(" Hash: {}" . format(blender['build_hash']))
|
||||
print(" Commit: {} {}" . format(blender['build_commit_date'],
|
||||
blender['build_commit_time']))
|
||||
print(" Build: {} {}" . format(blender['build_date'],
|
||||
blender['build_time']))
|
||||
# Print scenes status.
|
||||
print("Nenchmark results:")
|
||||
stats = results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
scene_stats = stats[scene]
|
||||
print(" {}:" . format(scene))
|
||||
print(" - Engine render time: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
scene_stats.total_render_time)))
|
||||
print(" - Render time without sync: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
scene_stats.render_time_no_sync)))
|
||||
print(" - Total render time: {}" . format(
|
||||
util.humanReadableTimeDifference(
|
||||
scene_stats.pipeline_render_time)))
|
||||
print(" - Peak memory used on device: {}" . format(
|
||||
util.humanReadableSize(scene_stats.device_peak_memory)))
|
||||
print(" - Memory used on device during rendering: {}" . format(
|
||||
util.humanReadableSize(scene_stats.device_memory_usage)))
|
||||
|
||||
|
||||
def _printFullJSONResult(ctx, results):
|
||||
import json
|
||||
# Convert custom classes to dictionaries for easier JSON dump.
|
||||
json_results = results
|
||||
stats = json_results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
stats[scene] = stats[scene].asDict()
|
||||
print(json.dumps(json_results, sort_keys=True, indent=2))
|
||||
|
||||
|
||||
def _printBriefResult(ctx, results):
|
||||
print("")
|
||||
print("=" * 40)
|
||||
stats = results['stats']
|
||||
for scene in ctx.scenes:
|
||||
if scene not in stats:
|
||||
continue
|
||||
scene_stats = stats[scene]
|
||||
print("{}: {}" . format(
|
||||
scene,
|
||||
util.humanReadableTimeDifference(
|
||||
scene_stats.pipeline_render_time)))
|
||||
|
||||
|
||||
def main():
|
||||
parser = configureArgumentParser()
|
||||
args = parser.parse_args()
|
||||
logger.init()
|
||||
logger.HEADER("Cycles Benchmark Suite v{}" . format(foundation.VERSION))
|
||||
# Configure context.
|
||||
ctx = context.Context()
|
||||
ctx.blender = args.blender
|
||||
ctx.scenes_dir = args.scenes_dir
|
||||
ctx.configure_script = args.configure_script
|
||||
ctx.device_type = args.device_type
|
||||
ctx.device_name = args.device_name
|
||||
ctx.device_single = args.device_single
|
||||
if args.scenes:
|
||||
ctx.scenes = args.scenes
|
||||
else:
|
||||
ctx.scenes = ctx.listAllScenes(args.scenes_dir)
|
||||
logger.INFO("Requested device details:")
|
||||
benchrunner.benchmarkPrintDeviceInfo(ctx)
|
||||
# Run benchmark.
|
||||
all_stats = benchrunner.benchmarkAll(ctx)
|
||||
# Gather all information together.
|
||||
result = {
|
||||
"blender_version": system_info.getBlenderVersion(ctx),
|
||||
"system_info": system_info.gatherSystemInfo(ctx),
|
||||
"stats": all_stats if all_stats else {}
|
||||
}
|
||||
if args.full_dump:
|
||||
if args.json:
|
||||
_printFullJSONResult(ctx, result)
|
||||
else:
|
||||
_printFullResult(ctx, result)
|
||||
else:
|
||||
_printBriefResult(ctx, result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Reference in New Issue
Block a user