352 lines
12 KiB
Python
Executable File
352 lines
12 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
import argparse
|
|
import foundation
|
|
import json
|
|
import os
|
|
import sys
|
|
|
|
from foundation import (config,
|
|
logger,
|
|
util,)
|
|
|
|
########################################
|
|
# Base support class.
|
|
########################################
|
|
|
|
|
|
class ResultVisitor:
|
|
"""
|
|
Super class for all possible visitors of benchmark results.
|
|
"""
|
|
|
|
def __init__(self):
|
|
pass
|
|
|
|
def handleResults(self, results):
|
|
"""
|
|
Handle results (which is a decoded python dictionary)
|
|
"""
|
|
pass
|
|
|
|
def storeResults(self, dataset_dir):
|
|
"""
|
|
Store results to a JSON file.
|
|
"""
|
|
pass
|
|
|
|
########################################
|
|
# Parser helpers.
|
|
########################################
|
|
|
|
|
|
def configureArgumentParser():
|
|
parser = argparse.ArgumentParser(
|
|
description="Cycles benchmark parser script.")
|
|
parser.add_argument("-b", "--benchmark-dir",
|
|
help="Directory with benchmark results",
|
|
default="")
|
|
parser.add_argument("-d", "--dataset-dir",
|
|
help="Directory where datasets will be stored",
|
|
default="")
|
|
return parser
|
|
|
|
########################################
|
|
# Configuration helpers.
|
|
########################################
|
|
|
|
|
|
def injectDefaultConfiguration(config):
|
|
"""
|
|
For a specified configuration object, set all possible properties to their
|
|
default value.
|
|
"""
|
|
root_dir = util.getBundleRootDirectory()
|
|
section = {"benchmark_dir": "",
|
|
"dataset_dir": ""}
|
|
config['parser'] = section
|
|
|
|
|
|
def injectArgparseConfiguration(config, args):
|
|
"""
|
|
Override settings wit harguments passed from the command line.
|
|
"""
|
|
section = config['parser']
|
|
if args.benchmark_dir:
|
|
section['benchmark_dir'] = args.benchmark_dir
|
|
if args.dataset_dir:
|
|
section['dataset_dir'] = args.dataset_dir
|
|
|
|
|
|
def readConfiguration(args):
|
|
"""
|
|
Read configuration file and return BenchmarkConfig with all the settings
|
|
we will need to use.
|
|
"""
|
|
config = foundation.config.BenchmarkConfig()
|
|
injectDefaultConfiguration(config)
|
|
config.readGlobalConfig("parser")
|
|
injectArgparseConfiguration(config, args)
|
|
return config
|
|
|
|
|
|
def checkConfiguration(config):
|
|
"""
|
|
Check whether configuration is complete and usable.
|
|
"""
|
|
logger.INFO("Validating configuration...")
|
|
section = config['parser']
|
|
# Check whether directories are correct.
|
|
if not os.path.exists(section["benchmark_dir"]):
|
|
logger.INFO(" Benchmark directory does not exist.")
|
|
return False
|
|
if not os.path.exists(section["dataset_dir"]):
|
|
logger.INFO(" Dataset directory does not exist.")
|
|
return False
|
|
return True
|
|
|
|
|
|
########################################
|
|
# Results iteration implementation.
|
|
########################################
|
|
|
|
def visitBencharkResult(directory, visitors):
|
|
"""
|
|
Take all actions needed when new benchmark result is found.
|
|
"""
|
|
results_filename = os.path.join(directory, "results.json")
|
|
if not os.path.exists(results_filename):
|
|
return
|
|
with open(results_filename) as results_file:
|
|
results = json.load(results_file)
|
|
# Check results are usable.
|
|
if 'stats' not in results:
|
|
return
|
|
for visitor in visitors:
|
|
visitor.handleResults(results)
|
|
|
|
|
|
def iterateBenchmarks(directory, visitors):
|
|
"""
|
|
Iterate over all benchmar results for a specific configuration.
|
|
"""
|
|
for filename in sorted(os.listdir(directory)):
|
|
full_filename = os.path.join(directory, filename)
|
|
if os.path.isdir(full_filename):
|
|
visitBencharkResult(full_filename, visitors)
|
|
|
|
|
|
def iterateBenchmarksRoot(directory, visitors):
|
|
"""
|
|
Iterate over all benchmark results, startting from top level where all
|
|
benchmarks machines are storing their results.
|
|
"""
|
|
for filename in sorted(os.listdir(directory)):
|
|
full_filename = os.path.join(directory, filename)
|
|
if os.path.isdir(full_filename):
|
|
iterateBenchmarks(full_filename, visitors)
|
|
|
|
########################################
|
|
# Main logic.
|
|
########################################
|
|
|
|
|
|
class LatestResultVisitor(ResultVisitor):
|
|
def __init__(self):
|
|
ResultVisitor.__init__(self)
|
|
self.devices_ = {}
|
|
|
|
def copyUsableStats(self, device, results):
|
|
stats = results['stats']
|
|
blender_version = results['blender_version']
|
|
timestamp = util.blenderCommitUnixTimestamp(
|
|
blender_version['build_commit_date'],
|
|
blender_version['build_commit_time'])
|
|
for scene_name in stats:
|
|
stat = stats[scene_name]
|
|
# Ignore benchmark results which crashed or aborted.
|
|
if "result" not in stat or stat['result'] != 'OK':
|
|
continue
|
|
# Ignore benchmark results which ar eolder than existing ones.
|
|
if scene_name in device and \
|
|
"timestamp" in device[scene_name] and \
|
|
timestamp < device[scene_name]["timestamp"]:
|
|
continue
|
|
device_scene_stat = dict(stat)
|
|
device_scene_stat.pop("result")
|
|
device_scene_stat['timestamp'] = timestamp
|
|
device[scene_name] = device_scene_stat
|
|
|
|
def handleResults(self, results):
|
|
if 'device_info' not in results or \
|
|
'stats' not in results:
|
|
return
|
|
device_info = results['device_info']
|
|
if "device_name_override" in results:
|
|
device_name = results["device_name_override"]
|
|
else:
|
|
device_name = util.deviceInfoAsString(device_info)
|
|
# If there were no stats for the device,
|
|
if device_name not in self.devices_:
|
|
self.devices_[device_name] = {}
|
|
self.copyUsableStats(self.devices_[device_name], results)
|
|
|
|
def storeResults(self, dataset_dir):
|
|
# Firts of all, gather all possible scenes.
|
|
all_scenes = set()
|
|
for device_name, stats in self.devices_.items():
|
|
for scene_name in stats.keys():
|
|
all_scenes.add(scene_name)
|
|
all_scenes = sorted(list(all_scenes))
|
|
# Gather datasets in format of charts.
|
|
datasets = []
|
|
device_index = 0
|
|
for device_name in sorted(self.devices_.keys()):
|
|
stats = self.devices_[device_name]
|
|
data = []
|
|
for scene_name in all_scenes:
|
|
if scene_name not in stats:
|
|
data.append(None)
|
|
continue
|
|
scene_stats = stats[scene_name]
|
|
data.append(scene_stats['pipeline_render_time'])
|
|
dataset = {
|
|
"label": device_name,
|
|
"data": data,
|
|
}
|
|
datasets.append(dataset)
|
|
device_index += 1
|
|
# Prepare python dict before converting it to JSON.
|
|
data = {
|
|
"labels": all_scenes,
|
|
"datasets": datasets,
|
|
}
|
|
code = "var data = " + json.dumps(data, sort_keys=True, indent=2) + ";"
|
|
# Save dataset to disk.
|
|
filename = os.path.join(dataset_dir, "latest_snapshot.js")
|
|
with open(filename, "w") as f:
|
|
f.write(code)
|
|
|
|
|
|
class HistoryResultVisitor(ResultVisitor):
|
|
def __init__(self):
|
|
ResultVisitor.__init__(self)
|
|
self.devices_ = {}
|
|
|
|
def copyUsableStats(self, device, results):
|
|
stats = results['stats']
|
|
blender_version = results['blender_version']
|
|
timestamp = util.blenderCommitUnixTimestamp(
|
|
blender_version['build_commit_date'],
|
|
blender_version['build_commit_time'])
|
|
for scene_name in stats:
|
|
stat = stats[scene_name]
|
|
# Ignore benchmark results which crashed or aborted.
|
|
if "result" not in stat or stat['result'] != 'OK':
|
|
continue
|
|
device_scene_stat = dict(stat)
|
|
device_scene_stat.pop("result")
|
|
device_scene_stat['timestamp'] = timestamp
|
|
if scene_name not in device:
|
|
device[scene_name] = []
|
|
device[scene_name].append(device_scene_stat)
|
|
|
|
def handleResults(self, results):
|
|
if 'device_info' not in results or \
|
|
'stats' not in results:
|
|
return
|
|
device_info = results['device_info']
|
|
if "device_name_override" in results:
|
|
device_name = results["device_name_override"]
|
|
else:
|
|
device_name = util.deviceInfoAsString(device_info)
|
|
# If there were no stats for the device,
|
|
if device_name not in self.devices_:
|
|
self.devices_[device_name] = {}
|
|
self.copyUsableStats(self.devices_[device_name], results)
|
|
|
|
def removeDuplicated(self, stats_history):
|
|
new_stats_history = []
|
|
prev_timestamp = None
|
|
for stats in stats_history:
|
|
if stats['timestamp'] == prev_timestamp:
|
|
# TODO(sergey): Average somehow?
|
|
continue
|
|
new_stats_history.append(stats)
|
|
prev_timestamp = stats['timestamp']
|
|
return new_stats_history
|
|
|
|
def storeResults(self, dataset_dir):
|
|
# Firts of all, gather all possible scenes.
|
|
all_scenes = set()
|
|
for device_name, stats in self.devices_.items():
|
|
for scene_name in stats.keys():
|
|
all_scenes.add(scene_name)
|
|
all_scenes = sorted(list(all_scenes))
|
|
# Gather datasets in format of lines.
|
|
datasets = []
|
|
device_index = 0
|
|
for device_name in sorted(self.devices_.keys()):
|
|
stats_history = self.devices_[device_name]
|
|
for scene_name in all_scenes:
|
|
if scene_name not in stats_history:
|
|
continue
|
|
scene_stats_history = stats_history[scene_name]
|
|
data = []
|
|
sorted_scene_stats_history = sorted(
|
|
scene_stats_history,
|
|
key=lambda k: k['timestamp'])
|
|
uniq_scene_stats_history = self.removeDuplicated(
|
|
sorted_scene_stats_history)
|
|
for scene_stats in uniq_scene_stats_history:
|
|
timestamp = scene_stats['timestamp']
|
|
data.append({"x": timestamp.strftime("%d/%m/%y %H:%M"),
|
|
"y": scene_stats['pipeline_render_time']})
|
|
dataset = {
|
|
"device_name": device_name,
|
|
"scene_name": scene_name,
|
|
"data": data,
|
|
}
|
|
datasets.append(dataset)
|
|
device_index += 1
|
|
data = {
|
|
"scenes": all_scenes,
|
|
"datasets": datasets,
|
|
}
|
|
code = "var data = " + json.dumps(data, sort_keys=True, indent=2) + ";"
|
|
# Save dataset to disk.
|
|
filename = os.path.join(dataset_dir, "history.js")
|
|
with open(filename, "w") as f:
|
|
f.write(code)
|
|
|
|
|
|
def main():
|
|
parser = configureArgumentParser()
|
|
args = parser.parse_args()
|
|
logger.init()
|
|
# Read configuration file, so we know what we will be doing.
|
|
config = readConfiguration(args)
|
|
if not checkConfiguration(config):
|
|
logger.ERROR("Configuration is not complete or valid, aborting.")
|
|
return False
|
|
# Construct list of all visitors whic hwe want.
|
|
latest_results_visitor = LatestResultVisitor()
|
|
history_results_visitor = HistoryResultVisitor()
|
|
# Do actual parse.
|
|
logger.INFO("Iterating over all benchmark results...")
|
|
visitors = (latest_results_visitor,
|
|
history_results_visitor,)
|
|
iterateBenchmarksRoot(config['parser']['benchmark_dir'], visitors)
|
|
# Store results.
|
|
logger.INFO("Storing results...")
|
|
dataset_dir = config['parser']['dataset_dir']
|
|
for visitor in visitors:
|
|
visitor.storeResults(dataset_dir)
|
|
|
|
return True
|
|
|
|
|
|
if __name__ == "__main__":
|
|
if not main():
|
|
sys.exit(1)
|