Initial implementation of results parser
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,5 +3,7 @@
|
||||
/blender
|
||||
# Ignore production configuration files.
|
||||
/config/*.cfg
|
||||
# Ignore generated datasets.
|
||||
/benchmark/website/dataset/*.js
|
||||
# Ignore Python cache
|
||||
__pycache__
|
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import foundation
|
||||
from foundation import (benchrunner,
|
||||
buildbot,
|
||||
@@ -240,7 +241,7 @@ def latestDirGet(path):
|
||||
def ensureOutputDir(config):
|
||||
output_dir = config['farm']['output_dir']
|
||||
if not os.path.exists(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
os.makedirs(output_dir)
|
||||
max_dir = latestDirGet(output_dir)
|
||||
if not max_dir:
|
||||
max_dir = 0
|
||||
@@ -268,7 +269,7 @@ def getResultJSONString(ctx, results):
|
||||
stats[scene]['result'] = 'OK'
|
||||
else:
|
||||
stats[scene] = {'result': 'CRASH'}
|
||||
return json.dumps(json_results, sort_keys=True, indent=4)
|
||||
return json.dumps(json_results, sort_keys=True, indent=2)
|
||||
|
||||
|
||||
def saveResults(ctx, results, output_dir):
|
||||
@@ -281,6 +282,7 @@ def saveResults(ctx, results, output_dir):
|
||||
# Main logic.
|
||||
########################################
|
||||
|
||||
|
||||
def main():
|
||||
parser = configureArgumentParser()
|
||||
args = parser.parse_args()
|
||||
@@ -333,7 +335,9 @@ def main():
|
||||
# Run benchmark.
|
||||
all_stats = benchrunner.benchmarkAll(ctx)
|
||||
# Gather all information together.
|
||||
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
|
||||
results = {
|
||||
"timestamp": timestamp,
|
||||
"blender_version": system_info.getBlenderVersion(ctx),
|
||||
"system_info": system_info.gatherSystemInfo(ctx),
|
||||
"device_info": blender_dvice_info,
|
||||
|
@@ -124,7 +124,7 @@ def stripSensitiveInfo(s):
|
||||
return s
|
||||
|
||||
########################################
|
||||
# Directories manipulation
|
||||
# Directories manipulation.
|
||||
########################################
|
||||
|
||||
|
||||
@@ -141,3 +141,49 @@ def getGlobalConfigDirectory():
|
||||
Get configuration directory global for the all components of this bundle.
|
||||
"""
|
||||
return os.path.join(getBundleRootDirectory(), "config")
|
||||
|
||||
|
||||
########################################
|
||||
# Devine name manipulation.
|
||||
########################################
|
||||
|
||||
|
||||
def deviceInfoAsString(device_info):
|
||||
"""
|
||||
Convert device information to a single string.
|
||||
"""
|
||||
device_type = device_info['device_type']
|
||||
compute_devices = device_info['compute_devices']
|
||||
if len(compute_devices) == 1:
|
||||
return compute_devices[0]
|
||||
raise Exception("Needs implementation")
|
||||
|
||||
########################################
|
||||
# Graphs manipulation.
|
||||
########################################
|
||||
|
||||
|
||||
def generateBarColor(index, alpha=None):
|
||||
"""
|
||||
Generate unique looking color for a given bar index.
|
||||
"""
|
||||
builtin_colors = ((255, 99, 132),
|
||||
(255, 159, 64),
|
||||
(255, 205, 86),
|
||||
(75, 192, 192),
|
||||
(54, 162, 235),
|
||||
(153, 102, 255),
|
||||
(201, 203, 207),
|
||||
(48, 103, 204),
|
||||
(220, 56, 18),
|
||||
(254, 155, 0),
|
||||
(15, 147, 25))
|
||||
color = (0, 0, 0)
|
||||
if index >= 0 and index < len(builtin_colors):
|
||||
color = builtin_colors[index]
|
||||
if alpha is None:
|
||||
return "rgb({}, {}, {})" . format(
|
||||
str(color[0]), str(color[1]), str(color[2]))
|
||||
else:
|
||||
return "rgba({}, {}, {}, {})" . format(
|
||||
str(color[0]), str(color[1]), str(color[2]), str(alpha))
|
||||
|
@@ -101,7 +101,7 @@ def _printFullJSONResult(ctx, results):
|
||||
if scene not in stats:
|
||||
continue
|
||||
stats[scene] = stats[scene].asDict()
|
||||
print(json.dumps(json_results, sort_keys=True, indent=4))
|
||||
print(json.dumps(json_results, sort_keys=True, indent=2))
|
||||
|
||||
|
||||
def _printBriefResult(ctx, results):
|
||||
|
259
benchmark/parse_results.py
Executable file
259
benchmark/parse_results.py
Executable file
@@ -0,0 +1,259 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import dateutil
|
||||
import foundation
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
from foundation import (config,
|
||||
logger,
|
||||
util,)
|
||||
from dateutil import parser
|
||||
|
||||
# TODO(sergey): Make it configurable.
|
||||
RESULTS_DIRECTORY = "/tmp/blender-benchmark/"
|
||||
|
||||
########################################
|
||||
# Base support class.
|
||||
########################################
|
||||
|
||||
|
||||
class ResultVisitor:
|
||||
"""
|
||||
Super class for all possible visitors of benchmark results.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def handleResults(self, results):
|
||||
"""
|
||||
Handle results (which is a decoded python dictionary)
|
||||
"""
|
||||
pass
|
||||
|
||||
def storeResults(self, dataset_dir):
|
||||
"""
|
||||
Store results to a JSON file.
|
||||
"""
|
||||
pass
|
||||
|
||||
########################################
|
||||
# Parser helpers.
|
||||
########################################
|
||||
|
||||
|
||||
def configureArgumentParser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cycles benchmark parser script.")
|
||||
parser.add_argument("-b", "--benchmark-dir",
|
||||
help="Directory with benchmark results",
|
||||
default="")
|
||||
parser.add_argument("-d", "--dataset-dir",
|
||||
help="Directory where datasets will be stored",
|
||||
default="")
|
||||
return parser
|
||||
|
||||
########################################
|
||||
# Configuration helpers.
|
||||
########################################
|
||||
|
||||
|
||||
def injectDefaultConfiguration(config):
|
||||
"""
|
||||
For a specified configuration object, set all possible properties to their
|
||||
default value.
|
||||
"""
|
||||
root_dir = util.getBundleRootDirectory()
|
||||
section = {"benchmark_dir": "",
|
||||
"dataset_dir": ""}
|
||||
config['parser'] = section
|
||||
|
||||
|
||||
def injectArgparseConfiguration(config, args):
|
||||
"""
|
||||
Override settings wit harguments passed from the command line.
|
||||
"""
|
||||
section = config['parser']
|
||||
if args.benchmark_dir:
|
||||
section['benchmark_dir'] = args.benchmark_dir
|
||||
if args.dataset_dir:
|
||||
section['dataset_dir'] = args.dataset_dir
|
||||
|
||||
|
||||
def readConfiguration(args):
|
||||
"""
|
||||
Read configuration file and return BenchmarkConfig with all the settings
|
||||
we will need to use.
|
||||
"""
|
||||
config = foundation.config.BenchmarkConfig()
|
||||
injectDefaultConfiguration(config)
|
||||
config.readGlobalConfig("parser")
|
||||
injectArgparseConfiguration(config, args)
|
||||
return config
|
||||
|
||||
|
||||
def checkConfiguration(config):
|
||||
"""
|
||||
Check whether configuration is complete and usable.
|
||||
"""
|
||||
logger.INFO("Validating configuration...")
|
||||
section = config['parser']
|
||||
# Check whether directories are correct.
|
||||
if not os.path.exists(section["benchmark_dir"]):
|
||||
logger.INFO(" Benchmark directory does not exist.")
|
||||
return False
|
||||
if not os.path.exists(section["dataset_dir"]):
|
||||
logger.INFO(" Dataset directory does not exist.")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
########################################
|
||||
# Results iteration implementation.
|
||||
########################################
|
||||
|
||||
def visitBencharkResult(directory, visitors):
|
||||
"""
|
||||
Take all actions needed when new benchmark result is found.
|
||||
"""
|
||||
results_filename = os.path.join(directory, "results.json")
|
||||
if not os.path.exists(results_filename):
|
||||
return
|
||||
with open(results_filename) as results_file:
|
||||
results = json.load(results_file)
|
||||
# Check results are usable.
|
||||
if 'stats' not in results:
|
||||
return
|
||||
for visitor in visitors:
|
||||
visitor.handleResults(results)
|
||||
|
||||
|
||||
def iterateBenchmarks(directory, visitors):
|
||||
"""
|
||||
Iterate over all benchmar results for a specific configuration.
|
||||
"""
|
||||
for filename in sorted(os.listdir(directory)):
|
||||
full_filename = os.path.join(directory, filename)
|
||||
if os.path.isdir(full_filename):
|
||||
visitBencharkResult(full_filename, visitors)
|
||||
|
||||
|
||||
def iterateBenchmarksRoot(directory, visitors):
|
||||
"""
|
||||
Iterate over all benchmark results, startting from top level where all
|
||||
benchmarks machines are storing their results.
|
||||
"""
|
||||
for filename in sorted(os.listdir(directory)):
|
||||
full_filename = os.path.join(directory, filename)
|
||||
if os.path.isdir(full_filename):
|
||||
iterateBenchmarks(full_filename, visitors)
|
||||
|
||||
########################################
|
||||
# Main logic.
|
||||
########################################
|
||||
|
||||
|
||||
class LatestResultVisitor(ResultVisitor):
|
||||
def __init__(self):
|
||||
ResultVisitor.__init__(self)
|
||||
self.devices_ = {}
|
||||
|
||||
def copyUsableStats(self, device, results):
|
||||
stats = results['stats']
|
||||
timestamp = dateutil.parser.parse(results['timestamp'])
|
||||
for scene_name in stats:
|
||||
stat = stats[scene_name]
|
||||
# Ignore benchmark results which crashed or aborted.
|
||||
if stat['result'] != 'OK':
|
||||
continue
|
||||
# Ignore benchmark results which ar eolder than existing ones.
|
||||
if scene_name in device and \
|
||||
"timestamp" in device[scene_name] and \
|
||||
timestamp < device[scene_name]["timestamp"]:
|
||||
continue
|
||||
device_scene_stat = dict(stat)
|
||||
device_scene_stat.pop("result")
|
||||
device_scene_stat['timestamp'] = timestamp
|
||||
device[scene_name] = device_scene_stat
|
||||
|
||||
def handleResults(self, results):
|
||||
if 'device_info' not in results or \
|
||||
'stats' not in results:
|
||||
return
|
||||
device_info = results['device_info']
|
||||
device_name = util.deviceInfoAsString(device_info)
|
||||
# If there were no stats for the device,
|
||||
if device_name not in self.devices_:
|
||||
self.devices_[device_name] = {}
|
||||
self.copyUsableStats(self.devices_[device_name], results)
|
||||
|
||||
def storeResults(self, dataset_dir):
|
||||
# Firts of all, gather all possible scenes.
|
||||
all_scenes = set()
|
||||
for device_name, stats in self.devices_.items():
|
||||
for scene_name in stats.keys():
|
||||
all_scenes.add(scene_name)
|
||||
all_scenes = list(all_scenes)
|
||||
# Gather datasets in format of charts.
|
||||
datasets = []
|
||||
device_index = 0
|
||||
for device_name, stats in self.devices_.items():
|
||||
data = []
|
||||
for scene_name in all_scenes:
|
||||
if scene_name not in stats:
|
||||
# TODO(sergey): How to indicate missing dataset?
|
||||
data.append(0)
|
||||
continue
|
||||
scene_stats = stats[scene_name]
|
||||
data.append(scene_stats['pipeline_render_time'])
|
||||
dataset = {
|
||||
"label": device_name,
|
||||
"borderWidth": 1,
|
||||
"backgroundColor": util.generateBarColor(device_index, 0.5),
|
||||
"borderColor": util.generateBarColor(device_index),
|
||||
"data": data,
|
||||
}
|
||||
datasets.append(dataset)
|
||||
device_index += 1
|
||||
# Prepare python dict before converting it to JSON.
|
||||
data = {
|
||||
"labels": all_scenes,
|
||||
"datasets": datasets,
|
||||
}
|
||||
code = "var data = " + json.dumps(data, sort_keys=True, indent=2) + ";"
|
||||
# Save dataset to disk.
|
||||
filename = os.path.join(dataset_dir, "latest_snapshot.js")
|
||||
with open(filename, "w") as f:
|
||||
f.write(code)
|
||||
|
||||
|
||||
def main():
|
||||
parser = configureArgumentParser()
|
||||
args = parser.parse_args()
|
||||
logger.init()
|
||||
# Read configuration file, so we know what we will be doing.
|
||||
config = readConfiguration(args)
|
||||
if not checkConfiguration(config):
|
||||
logger.ERROR("Configuration is not complete or valid, aborting.")
|
||||
return False
|
||||
# Construct list of all visitors whic hwe want.
|
||||
latest_results_visitor = LatestResultVisitor()
|
||||
# Do actual parse.
|
||||
logger.INFO("Iterating over all benchmark results...")
|
||||
visitors = [latest_results_visitor]
|
||||
iterateBenchmarksRoot(RESULTS_DIRECTORY, visitors)
|
||||
# Store results.
|
||||
logger.INFO("Storing results...")
|
||||
dataset_dir = config['parser']['dataset_dir']
|
||||
for visitor in visitors:
|
||||
visitor.storeResults(dataset_dir)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if not main():
|
||||
sys.exit(1)
|
21
benchmark/website/index.html
Normal file
21
benchmark/website/index.html
Normal file
@@ -0,0 +1,21 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Benchmark Results</title>
|
||||
<script src="source/chart.bundle.js"></script>
|
||||
<script src="source/utils.js"></script>
|
||||
<script src="dataset/latest_snapshot.js"></script>
|
||||
<link rel="stylesheet" href="style/main.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="container">
|
||||
<canvas id="canvas"></canvas>
|
||||
</div>
|
||||
<script>
|
||||
window.onload = function() {
|
||||
var ctx = document.getElementById("canvas").getContext("2d");
|
||||
buildChart(ctx, data);
|
||||
};
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
17220
benchmark/website/source/chart.bundle.js
Normal file
17220
benchmark/website/source/chart.bundle.js
Normal file
File diff suppressed because it is too large
Load Diff
48
benchmark/website/source/utils.js
Normal file
48
benchmark/website/source/utils.js
Normal file
@@ -0,0 +1,48 @@
|
||||
// Generate unique looking color for a given bar index.
|
||||
function generateBarColor(index) {
|
||||
var builtin_colors = [[255, 99, 132],
|
||||
[255, 159, 64],
|
||||
[255, 205, 86],
|
||||
[75, 192, 192],
|
||||
[54, 162, 235],
|
||||
[153, 102, 255],
|
||||
[201, 203, 207],
|
||||
[48, 103, 204],
|
||||
[220, 56, 18],
|
||||
[254, 155, 0],
|
||||
[15, 147, 25]];
|
||||
var color = [0, 0, 0];
|
||||
if (index >= 0 && index < builtin_colors.length) {
|
||||
color = builtin_colors[index];
|
||||
}
|
||||
return "rgb(" + color[0].toString() + ", " +
|
||||
color[1].toString() + ", " +
|
||||
color[2].toString() + ")";
|
||||
}
|
||||
|
||||
function buildChart(ctx, data) {
|
||||
var max_value = 0;
|
||||
for (dataset of data.datasets) {
|
||||
for (value of dataset.data) {
|
||||
max_value = Math.max(max_value, value);
|
||||
}
|
||||
}
|
||||
window.myBar = new Chart(
|
||||
ctx,
|
||||
{
|
||||
type: 'bar',
|
||||
data: data,
|
||||
options: { responsive: true,
|
||||
legend: {position: 'top'},
|
||||
title: {display: true,
|
||||
text: 'Benchmark Results'},
|
||||
scales: {xAxes: [{display: true,
|
||||
scaleLabel: {display: true,
|
||||
labelString: 'Scene'}}],
|
||||
yAxes: [{display: true,
|
||||
scaleLabel: {display: true,
|
||||
labelString: 'Render time (sec)'},
|
||||
ticks: {min: 0,
|
||||
max: Math.ceil(max_value * 1.25)}}]}}
|
||||
});
|
||||
}
|
9
benchmark/website/style/main.css
Normal file
9
benchmark/website/style/main.css
Normal file
@@ -0,0 +1,9 @@
|
||||
canvas {
|
||||
-moz-user-select: none;
|
||||
-webkit-user-select: none;
|
||||
-ms-user-select: none;
|
||||
}
|
||||
|
||||
#container {
|
||||
width: 75%;
|
||||
}
|
Reference in New Issue
Block a user