Merge actual benchmark run operator into interface

This commit is contained in:
2018-08-03 14:57:21 +02:00
parent e96efb840a
commit 895b2b88e8
2 changed files with 287 additions and 228 deletions

View File

@@ -1,203 +0,0 @@
bl_info = {
"name": "Benchmark",
"author": "Sergey Sharybin",
"blender": (2, 80, 0),
"location": "",
"description": "Addon to provide benchmark functionality",
"warning": "",
"wiki_url": "",
"support": 'OFFICIAL',
"category": "Benchmark",
}
import datetime
import json
import os
import sys
import bpy
from threading import Thread
from bpy.props import IntProperty, FloatProperty
from .foundation import (benchrunner,
buildbot,
config,
context,
logger,
system_info,
util)
class ProgressProviderSink:
current_progress = 0.0
current_step = ''
def __init__(self):
self.current_progress = 0.0
self.current_step = ''
def progress(self, count, total, prefix="", suffix=""):
if total != 0:
self.current_progress = float(count) / float(total)
else:
self.current_progress = 0.0
def clear(self):
pass
def step(self, step_name):
self.current_step = step_name
class LoggerProviderSink:
def HEADER(self, *args):
pass
def WARNING(self, *args):
pass
def ERROR(self, *args):
pass
def OK(self, *args):
pass
def BOLD(self, *args):
pass
def INFO(self, *args):
pass
def DEBUG(self, *args):
pass
def FATAL(self, *args):
pass
def getResultJSONString(ctx, results):
# Convert custom classes to dictionaries for easier JSON dump.
json_results = results
stats = json_results['stats']
for scene in ctx.scenes:
if scene not in stats:
continue
if stats[scene]:
stats[scene] = stats[scene].asDict()
stats[scene]['result'] = 'OK'
else:
stats[scene] = {'result': 'CRASH'}
return json.dumps(json_results, sort_keys=True, indent=2)
def benchmark_thread(ctx):
# This is actual device configuration which is used to render the
# benchmark scene.
blender_device_info = benchrunner.benchmarkGetDeviceInfo(ctx)
if not blender_device_info['device_type']:
# TODO(sergey): Report an error somehow.
return
all_stats = benchrunner.benchmarkAll(ctx)
# Gather all information together.
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
old_executable = sys.executable
sys.executable = bpy.app.binary_path_python
results = {
"timestamp": timestamp,
"blender_version": system_info.getBlenderVersion(ctx),
"system_info": system_info.gatherSystemInfo(ctx),
"device_info": blender_device_info,
"stats": all_stats if all_stats else {}
}
sys.executable = old_executable
json_string = getResultJSONString(ctx, results)
print(json_string)
class BenchmarkOperator(bpy.types.Operator):
bl_idname = "wm.benchmark"
bl_label = "benchmark Operator"
first_mouse_x: IntProperty()
first_value: FloatProperty()
benchmark_context = None
thread = None
timer = None
progress_provider = None
logger_provider = None
def setup_sink(self):
self.progress_provider = ProgressProviderSink()
self.logger_provider = LoggerProviderSink()
foundation.progress.setProvider(self.progress_provider)
foundation.logger.setProvider(self.logger_provider)
def update_status(self):
rounded = int(self.progress_provider.current_progress * 100) / 100
step = self.progress_provider.current_step
print(f"{step}: {rounded}")
def done(self, context):
wm = context.window_manager
wm.event_timer_remove(self.timer)
# Restore all modifications to the benchmark foundation.
foundation.progress.restoreDefaultProvider()
foundation.logger.restoreDefaultProvider()
# Destroy objects of sinks.
del self.progress_provider
del self.logger_provider
self.progress_provider = None
self.logger_provider = None
def modal(self, context, event):
if event.type == 'TIMER':
if self.thread.is_alive():
self.update_status()
return {'PASS_THROUGH'}
else:
self.done(context)
return {'FINISHED'}
return {'PASS_THROUGH'}
def invoke(self, context, event):
# Before doing anything, make sure we have all sinks set up, so we do
# not miss any progress report.
self.setup_sink()
wm = context.window_manager
script_directory = os.path.dirname(os.path.realpath(__file__))
configure_script = os.path.join(script_directory, "configure.py")
ctx = foundation.context.Context()
ctx.blender = "<blender>"
ctx.configure_script = configure_script
ctx.scenes = ["<monkey>"]
ctx.scenes_dir = "<scenes>"
ctx.device_type = 'CPU'
# Only applies for GPU, should match Cycles name
ctx.device_name = ""
# Set this to True when having multiple GPUs of same name and only
# one of the mis to be enabled. Or when requesting GPU render without
# specifying GPU name.
ctx.device_single = True
# ctx.image_output_dir = "/tmp/"
self.benchmark_context = ctx
# Create thread for the actual benchmark.
self.thread = Thread(target = benchmark_thread,
args = (self.benchmark_context, ))
self.thread.start()
# Create timer to query thread status
self.timer = wm.event_timer_add(0.1, context.window)
# Register self as modal.
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.utils.register_class(BenchmarkOperator)
def unregister():
bpy.utils.unregister_class(BenchmarkOperator)
#if __name__ == "__main__":
# register()

View File

@@ -1,16 +1,34 @@
import datetime
import json
import os
import sys
from threading import Thread, Lock
import blf
import bpy
from bpy.types import Panel
import os
from bpy.props import IntProperty, FloatProperty
from ..foundation import (benchrunner,
buildbot,
config,
logger,
progress,
system_info,
util)
from ..foundation import context as benchmark_context
# Global state
result_platform = None
result_stats = None
################################################################################
# Global state.
global_result_platform = None
global_result_stats = None
global_result_dict = None
images = {}
progress = 0.0
current_progress = 0.0
progress_lock = Lock()
################################################################################
# Draw Utilities.
# Draw Utilities
font_id = 0
def viewport_size():
@@ -51,8 +69,16 @@ def draw_image(filepath, x, y, w, h):
gpu.draw.image(images[filepath], x, y, x + w, y + h)
# Draw
################################################################################
# Draw.
def benchmark_draw_post_pixel(arg1, arg2):
global progress_lock
progress_lock.acquire()
result_platform = global_result_platform
result_stats = global_result_stats
progress_lock.release()
ui_scale = bpy.context.user_preferences.system.ui_scale
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
@@ -78,7 +104,7 @@ def benchmark_draw_post_pixel(arg1, arg2):
progress_x = 0.0
progress_y = image_y + 1
progress_w = window_width * progress
progress_w = window_width * current_progress
progress_h = 15.0 * ui_scale
progress_color = [0.8, 1.0, 1.0, 0.2]
@@ -109,8 +135,160 @@ def benchmark_draw_post_pixel(arg1, arg2):
handle_draw = bpy.types.SpaceBenchmark.draw_handler_add(
benchmark_draw_post_pixel, (None, None), 'WINDOW', 'POST_PIXEL')
################################################################################
# Benchmark foundation integration.
class ProgressProviderSink:
current_progress = 0.0
current_step = ''
def __init__(self):
self.current_progress = 0.0
self.current_step = ''
def progress(self, count, total, prefix="", suffix=""):
if total != 0:
self.current_progress = float(count) / float(total)
else:
self.current_progress = 0.0
def clear(self):
self.current_progress = 0
def step(self, step_name):
if self.current_step != step_name:
self.current_step = step_name
self.current_progress = 0
class LoggerProviderSink:
def HEADER(self, *args):
pass
def WARNING(self, *args):
pass
def ERROR(self, *args):
pass
def OK(self, *args):
pass
def BOLD(self, *args):
pass
def INFO(self, *args):
pass
def DEBUG(self, *args):
pass
def FATAL(self, *args):
pass
################################################################################
# Benchmark thread.
def string_strip_trademark(name):
return name.replace("(R)", "").replace("(TM)", "")
def correct_device_name(name):
if (name.startswith("TITAN") or
name.startswith("Quadro") or
name.startswith("GeForce")):
return "Nvidia " + name;
if (name.startswith("Radeon")):
return "AMD " + name;
return name
def construct_gpu_string(system_info):
gpu_names = []
for device in system_info["devices"]:
if device["type"] == "CPU":
continue
gpu_names.append(correct_device_name(device["name"]))
return ", " . join(gpu_names)
def construct_platform_string(system_info):
"""
Construct human readable platform string to show in the interface.
"""
result = ""
result += "Operation System: {} {} bit" . format(system_info["system"],
system_info["bitness"])
result += "\nCPU: {}" . format(
string_strip_trademark(system_info["cpu_brand"]))
gpus = construct_gpu_string(system_info)
if gpus:
result += "\nGPU(s): {}" . format(gpus)
return result
def convert_result_to_json_dict(ctx, results):
# Convert custom classes to dictionaries for easier JSON dump.
json_results = results
stats = json_results['stats']
json_results['stats'] = []
for scene in ctx.scenes:
if scene not in stats:
continue
stat = stats[scene]
if stat:
stat = stat.asDict()
stat['result'] = 'OK'
else:
stat = {'result': 'CRASH'}
stat["scene"] = scene
json_results['stats'] .append(stat)
return json_results
def benchmark_thread(ctx):
global progress_lock, global_result_platform, global_result_stats
progress_lock.acquire()
global_result_stats = "Collecting system information..."
progress_lock.release()
# This is all system information Blender knows.
# NOTE: We override executable since cpuinfo uses it, and it is set
# to blender.
old_executable = sys.executable
sys.executable = bpy.app.binary_path_python
blender_system_info = system_info.gatherSystemInfo(ctx)
# This is actual device configuration which is used to render the
# benchmark scene.
blender_device_info = benchrunner.benchmarkGetDeviceInfo(ctx)
if not blender_device_info['device_type']:
# TODO(sergey): Report an error somehow.
return
progress_lock.acquire()
global_result_platform = construct_platform_string(blender_system_info)
progress_lock.release()
progress_lock.acquire()
global_result_stats = "Prepating render..."
progress_lock.release()
all_stats = benchrunner.benchmarkAll(ctx)
# Gather all information together.
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
result = convert_result_to_json_dict(ctx, {
"timestamp": timestamp,
"blender_version": system_info.getBlenderVersion(ctx),
"system_info": blender_system_info,
"device_info": blender_device_info,
"stats": all_stats if all_stats else {}
})
global global_result_dict
global_result_dict = result
################################################################################
# Panels
class BENCHMARK_PT_main(Panel):
bl_label = "Benchmark"
bl_options = {'HIDE_HEADER'}
@@ -118,8 +296,12 @@ class BENCHMARK_PT_main(Panel):
bl_region_type = 'WINDOW'
def draw(self, context):
if result_stats:
global progress_lock
progress_lock.acquire()
if global_result_stats:
progress_lock.release()
return
progress_lock.release()
layout = self.layout
@@ -151,33 +333,111 @@ class BENCHMARK_PT_main(Panel):
split.label()
################################################################################
# Operator
class BENCHMARK_OT_run(bpy.types.Operator):
"Run Blender benchmark"
bl_label = "Run Benchmark"
bl_idname = "benchmark.run"
def invoke(self, context, event):
global result_platform, result_stats, progress
result_platform = "CPU: Intel Core i11\nGPU: NVIDIA GTX 1180\nOS: Minix"
result_stats = "Render Time: infinite\nMemory Usage: 16KB"
progress = 0.01
benchmark_context = None
thread = None
timer = None
progress_provider = None
logger_provider = None
self._timer = context.window_manager.event_timer_add(0.01, context.window)
context.window_manager.modal_handler_add(self)
def setup_sink(self):
self.progress_provider = ProgressProviderSink()
self.logger_provider = LoggerProviderSink()
progress.setProvider(self.progress_provider)
logger.setProvider(self.logger_provider)
return {'RUNNING_MODAL'}
def update_status(self, context):
global global_result_stats
step = self.progress_provider.current_step
if step == 'WARM_UP':
global_result_stats = "Rendering warm-up pass..."
elif step == 'RUN':
global current_progress
current_progress = self.progress_provider.current_progress
global_result_stats = "Rendering..."
context.area.tag_redraw()
def done(self, context):
global global_result_stats, current_progress
wm = context.window_manager
wm.event_timer_remove(self.timer)
# Restore all modifications to the benchmark foundation.
progress.restoreDefaultProvider()
logger.restoreDefaultProvider()
# Destroy objects of sinks.
del self.progress_provider
del self.logger_provider
self.progress_provider = None
self.logger_provider = None
# Construct final stats string
global global_result_dict
global_result_stats = ""
for stat in global_result_dict["stats"]:
if global_result_stats:
global_result_stats += "\n"
if stat["result"] == "OK":
global_result_stats += "{}: {}" . format(stat["scene"],
util.humanReadableTimeDifference(stat["total_render_time"]))
else:
global_result_stats += "{}: {}" . format(stat["scene"],
stat["result"])
# Tag for nice redraw
current_progress = 0.0
context.area.tag_redraw()
def modal(self, context, event):
if event.type == 'TIMER':
global progress
progress = min(progress + 0.001, 1.0)
context.area.tag_redraw()
if progress == 1.0:
context.window_manager.event_timer_remove(self._timer)
if self.thread.is_alive():
self.update_status(context)
return {'PASS_THROUGH'}
else:
self.done(context)
return {'FINISHED'}
return {'PASS_THROUGH'}
def invoke(self, context, event):
global global_result_platform, global_result_stats
global_result_platform = ""
global_result_stats = "Initializing..."
context.area.tag_redraw()
# Before doing anything, make sure we have all sinks set up, so we do
# not miss any progress report.
self.setup_sink()
wm = context.window_manager
script_directory = os.path.dirname(os.path.realpath(__file__))
benchmark_script_directory = os.path.dirname(script_directory)
configure_script = os.path.join(benchmark_script_directory, "configure.py")
ctx = benchmark_context.Context()
ctx.blender = "<blender>"
ctx.configure_script = configure_script
ctx.scenes = ["<monkey>"]
ctx.scenes_dir = "<scenes_folder>"
ctx.device_type = 'CPU'
# Only applies for GPU, should match Cycles name
ctx.device_name = ""
# Set this to True when having multiple GPUs of same name and only
# one of the mis to be enabled. Or when requesting GPU render without
# specifying GPU name.
ctx.device_single = True
# ctx.image_output_dir = "/tmp/"
self.benchmark_context = ctx
# Create thread for the actual benchmark.
self.thread = Thread(target = benchmark_thread,
args = (self.benchmark_context, ))
self.thread.start()
# Create timer to query thread status
self.timer = wm.event_timer_add(0.1, context.window)
# Register self as modal.
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
@@ -209,8 +469,9 @@ class BENCHMARK_OT_save(bpy.types.Operator):
return {'RUNNING_MODAL'}
################################################################################
# Tweak User Preferences
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = [0.408, 0.690, 0.129, 1.0]
@@ -221,8 +482,9 @@ theme.benchmark.space.back = [0.26, 0.26, 0.26]
style = userpref.ui_styles[0]
style.widget.points = 12
################################################################################
# Registration
classes = (
BENCHMARK_PT_main,
BENCHMARK_OT_run,