This repository has been archived on 2023-02-09. You can view files and clone it, but cannot push or open issues or pull requests.
Files
blender-benchmark-bundle/benchmark/main.py
Sergey Sharybin 7f42fdf5e8 Initial implementation of blender benchmark addon
Just some glue logic to query progress and results from benchmark.

Needed to move files around, so oth standalone and addon are happy.
2018-08-02 17:38:55 +02:00

169 lines
6.0 KiB
Python
Executable File

#!/usr/bin/env python3
import os
import sys
SCRIPT_PATH = os.path.realpath(__file__)
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
import argparse
import foundation
from foundation import (benchrunner,
context,
logger,
system_info,
util)
def configureArgumentParser():
parser = argparse.ArgumentParser(
description="Cycles benchmark helper script.")
parser.add_argument("-b", "--blender",
help="Full file path to Blender's binary " +
"to use for rendering",
default="blender")
parser.add_argument("-d", "--scenes-dir",
help="Directory with scenes",
default="")
parser.add_argument('-s', '--scenes',
nargs='+',
help='Scenes to be rendered',
default=[])
parser.add_argument('-c', '--configure-script',
help="Blender-side configuration script",
default="configure.py")
parser.add_argument('-t', '--device-type',
help="Type of the device to render on",
default="CPU")
parser.add_argument('-n', '--device-name',
help="Device name to render on",
default="")
parser.add_argument('-e', '--device-single',
help="Use single device when multiple matches",
action='store_true',
default=False)
parser.add_argument('-f', '--full-dump',
help="Dump all available in formation",
action='store_true',
default=False)
parser.add_argument('-j', '--json',
help="When in full dump mode, dump JSON",
action='store_true',
default=False)
return parser
def _printFullResult(ctx, results):
print("")
print("=" * 40)
# Print system information.
sys_info = results['system_info']
print("System info:")
print(" System: {} {}" . format(sys_info['system'],
sys_info['bitness']))
if sys_info['system'] == "Linux":
print(" Linux distro: {}, {}" . format(sys_info['dist_name'],
sys_info['dist_version']))
print(" CPU: {}" . format(sys_info['cpu_brand']))
devices = sys_info['devices']
if devices:
print(" Compute devices:")
for device in devices:
print(" - {}: {}" . format(device['type'], device['name']))
# Print Blender version.
blender = results['blender_version']
print("Blender:")
print(" Version: {}" . format(blender['version']))
print(" Hash: {}" . format(blender['build_hash']))
print(" Commit: {} {}" . format(blender['build_commit_date'],
blender['build_commit_time']))
print(" Build: {} {}" . format(blender['build_date'],
blender['build_time']))
# Print scenes status.
print("Nenchmark results:")
stats = results['stats']
for scene in ctx.scenes:
if scene not in stats:
continue
scene_stats = stats[scene]
print(" {}:" . format(scene))
print(" - Engine render time: {}" . format(
util.humanReadableTimeDifference(
scene_stats.total_render_time)))
print(" - Render time without sync: {}" . format(
util.humanReadableTimeDifference(
scene_stats.render_time_no_sync)))
print(" - Total render time: {}" . format(
util.humanReadableTimeDifference(
scene_stats.pipeline_render_time)))
print(" - Peak memory used on device: {}" . format(
util.humanReadableSize(scene_stats.device_peak_memory)))
print(" - Memory used on device during rendering: {}" . format(
util.humanReadableSize(scene_stats.device_memory_usage)))
def _printFullJSONResult(ctx, results):
import json
# Convert custom classes to dictionaries for easier JSON dump.
json_results = results
stats = json_results['stats']
for scene in ctx.scenes:
if scene not in stats:
continue
stats[scene] = stats[scene].asDict()
print(json.dumps(json_results, sort_keys=True, indent=2))
def _printBriefResult(ctx, results):
print("")
print("=" * 40)
stats = results['stats']
for scene in ctx.scenes:
if scene not in stats:
continue
scene_stats = stats[scene]
print("{}: {}" . format(
scene,
util.humanReadableTimeDifference(
scene_stats.pipeline_render_time)))
def main():
parser = configureArgumentParser()
args = parser.parse_args()
logger.init()
logger.HEADER("Cycles Benchmark Suite v{}" . format(foundation.VERSION))
# Configure context.
ctx = context.Context()
ctx.blender = args.blender
ctx.scenes_dir = args.scenes_dir
ctx.configure_script = args.configure_script
ctx.device_type = args.device_type
ctx.device_name = args.device_name
ctx.device_single = args.device_single
if args.scenes:
ctx.scenes = args.scenes
else:
ctx.scenes = ctx.listAllScenes(args.scenes_dir)
logger.INFO("Requested device details:")
benchrunner.benchmarkPrintDeviceInfo(ctx)
# Run benchmark.
all_stats = benchrunner.benchmarkAll(ctx)
# Gather all information together.
result = {
"blender_version": system_info.getBlenderVersion(ctx),
"system_info": system_info.gatherSystemInfo(ctx),
"stats": all_stats if all_stats else {}
}
if args.full_dump:
if args.json:
_printFullJSONResult(ctx, result)
else:
_printFullResult(ctx, result)
else:
_printBriefResult(ctx, result)
if __name__ == "__main__":
main()