917 lines
28 KiB
Python
917 lines
28 KiB
Python
import datetime
|
|
import json
|
|
import os
|
|
import platform
|
|
import subprocess
|
|
import sys
|
|
import tempfile
|
|
from threading import Thread
|
|
import bpy
|
|
from bpy.types import Panel, Scene
|
|
from bpy.props import EnumProperty, IntProperty, FloatProperty
|
|
from ..foundation import (benchrunner,
|
|
logger,
|
|
progress,
|
|
system_info,
|
|
util)
|
|
from ..foundation import context as benchmark_context
|
|
|
|
from .global_state import G
|
|
from . import draw
|
|
|
|
QUICK_SCENES = ["bmw27",
|
|
"classroom"]
|
|
|
|
COMPLETE_SCENES = ["barbershop_interior",
|
|
"bmw27",
|
|
"classroom",
|
|
"fishy_cat",
|
|
"koro",
|
|
"pavillon_barcelona"]
|
|
|
|
handle_draw = bpy.types.SpaceBenchmark.draw_handler_add(
|
|
draw.benchmark_draw_post_pixel, (None, None), 'WINDOW', 'POST_PIXEL')
|
|
|
|
|
|
################################################################################
|
|
# Benchmark foundation integration.
|
|
|
|
|
|
class ProgressProviderSink:
|
|
current_progress = 0.0
|
|
current_step = ''
|
|
current_scene = ''
|
|
process = None
|
|
|
|
def __init__(self):
|
|
self.current_progress = 0.0
|
|
self.current_step = ''
|
|
self.process = None
|
|
|
|
def progress(self, count, total, prefix="", suffix=""):
|
|
with G.progress_lock:
|
|
if total != 0:
|
|
self.current_progress = float(count) / float(total)
|
|
else:
|
|
self.current_progress = 0.0
|
|
|
|
def clear(self):
|
|
with G.progress_lock:
|
|
self.current_progress = 0
|
|
|
|
def step(self, step_name):
|
|
with G.progress_lock:
|
|
if self.current_step != step_name:
|
|
self.current_step = step_name
|
|
self.current_progress = 0
|
|
|
|
def scene(self, scene_name):
|
|
with G.progress_lock:
|
|
self.current_scene = scene_name
|
|
if scene_name:
|
|
G.scene_status[scene_name] = "Rendering..."
|
|
|
|
def scene_stats(self, scene_name, stats):
|
|
with G.progress_lock:
|
|
if stats:
|
|
G.scene_status[scene_name] = util.humanReadableTimeDifference(
|
|
stats.total_render_time)
|
|
else:
|
|
G.scene_status[scene_name] = "Crashed :("
|
|
|
|
def render_process(self, process):
|
|
self.process = process
|
|
|
|
def is_canceled(self):
|
|
with G.progress_lock:
|
|
return G.cancel
|
|
|
|
|
|
class LoggerProviderSink:
|
|
def HEADER(self, *args):
|
|
pass
|
|
|
|
def WARNING(self, *args):
|
|
pass
|
|
|
|
def ERROR(self, *args):
|
|
pass
|
|
|
|
def OK(self, *args):
|
|
pass
|
|
|
|
def BOLD(self, *args):
|
|
pass
|
|
|
|
def INFO(self, *args):
|
|
pass
|
|
|
|
def DEBUG(self, *args):
|
|
pass
|
|
|
|
def FATAL(self, *args):
|
|
pass
|
|
|
|
|
|
################################################################################
|
|
# Benchmark thread.
|
|
|
|
|
|
def string_strip_trademark(name):
|
|
return name.replace("(R)", "").replace("(TM)", "")
|
|
|
|
|
|
def correct_device_name(name):
|
|
if (name.startswith("TITAN") or
|
|
name.startswith("Quadro") or
|
|
name.startswith("GeForce")):
|
|
return "Nvidia " + name
|
|
if (name.startswith("Radeon")):
|
|
return "AMD " + name
|
|
return name
|
|
|
|
|
|
def get_gpu_names(system_info):
|
|
gpu_names = []
|
|
for device in system_info["devices"]:
|
|
if device["type"] == "CPU":
|
|
continue
|
|
gpu_names.append(correct_device_name(device["name"]))
|
|
return gpu_names
|
|
|
|
|
|
def indent_gpu_names(gpu_names):
|
|
indented_names = []
|
|
for name in gpu_names:
|
|
indented_names.append(" • " + name)
|
|
return indented_names
|
|
|
|
|
|
def construct_platform_string(system_info):
|
|
"""
|
|
Construct human readable platform string to show in the interface.
|
|
"""
|
|
result = ""
|
|
result += "OS: {} {}".format(system_info["system"],
|
|
system_info["bitness"])
|
|
result += "\nCPU: {}".format(
|
|
string_strip_trademark(system_info["cpu_brand"]))
|
|
gpu_names = get_gpu_names(system_info)
|
|
num_gpus = len(gpu_names)
|
|
if num_gpus:
|
|
if num_gpus == 1:
|
|
result += "\nGPU: {}".format(gpu_names[0])
|
|
else:
|
|
result += "\nGPUs:\n{}".format("\n".join(indent_gpu_names(gpu_names)))
|
|
return result
|
|
|
|
|
|
def convert_result_to_json_dict(ctx, results):
|
|
# Convert custom classes to dictionaries for easier JSON dump.
|
|
json_results = results
|
|
stats = json_results['scenes']
|
|
json_results['scenes'] = []
|
|
for scene in ctx.scenes:
|
|
if scene not in stats:
|
|
continue
|
|
stat = stats[scene]
|
|
if stat:
|
|
stat = stat.asDict()
|
|
stat['result'] = 'OK'
|
|
else:
|
|
stat = {'result': 'CRASH'}
|
|
json_results['scenes'].append({'name': scene,
|
|
'stats': stat})
|
|
return json_results
|
|
|
|
|
|
def system_info_get(ctx):
|
|
# This is all system information Blender knows.
|
|
# NOTE: We override executable since cpuinfo uses it, and it is set
|
|
# to blender.
|
|
old_executable = sys.executable
|
|
sys.executable = bpy.app.binary_path_python
|
|
info = system_info.gatherSystemInfo(ctx)
|
|
sys.executable = old_executable
|
|
return info
|
|
|
|
|
|
def modify_system_info(system_info):
|
|
compute_units = query_opencl_compute_units()
|
|
for device in system_info["devices"]:
|
|
device_type = device["type"]
|
|
if device_type != "OPENCL":
|
|
continue
|
|
index = find_first_device_index(compute_units, device['name'])
|
|
if index != -1:
|
|
if device["name"] == "Radeon RX Vega":
|
|
device["name"] += " " + compute_units[index][1]
|
|
del compute_units[index]
|
|
return system_info
|
|
|
|
|
|
def modify_device_info(device_info):
|
|
compute_device = bpy.context.scene.compute_device
|
|
device_type, device_name, compute_units, device_index = compute_device.split(":")
|
|
if device_info["device_type"] == "OPENCL":
|
|
compute_devices = []
|
|
for device in device_info["compute_devices"]:
|
|
if device == "Radeon RX Vega":
|
|
device += " " + compute_units
|
|
compute_devices.append(device)
|
|
device_info["compute_devices"] = compute_devices
|
|
return device_info
|
|
|
|
|
|
def benchmark_thread(ctx):
|
|
with G.progress_lock:
|
|
G.progress_status = "Collecting system information."
|
|
if G.cancel:
|
|
return
|
|
|
|
blender_system_info = system_info_get(ctx)
|
|
|
|
# This is actual device configuration which is used to render the
|
|
# benchmark scene.
|
|
blender_device_info = benchrunner.benchmarkGetDeviceInfo(ctx)
|
|
if not blender_device_info['device_type']:
|
|
# TODO(sergey): Report an error somehow.
|
|
return
|
|
|
|
with G.progress_lock:
|
|
G.result_platform = construct_platform_string(blender_system_info)
|
|
if G.cancel:
|
|
return
|
|
G.progress_status = "Prepating render."
|
|
|
|
all_stats = benchrunner.benchmarkAll(ctx)
|
|
# Gather all information together.
|
|
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
|
|
result = convert_result_to_json_dict(ctx, {
|
|
"timestamp": timestamp,
|
|
"blender_version": system_info.getBlenderVersion(ctx),
|
|
"system_info": modify_system_info(blender_system_info),
|
|
"device_info": modify_device_info(blender_device_info),
|
|
"scenes": all_stats if all_stats else {}
|
|
})
|
|
|
|
with G.progress_lock:
|
|
if G.cancel:
|
|
return
|
|
G.result_dict = result
|
|
|
|
|
|
################################################################################
|
|
# Panels
|
|
|
|
def ui_scale_factor(x):
|
|
# To position the buttons exactly we add spacing, but there can be
|
|
# rounding errors with non-integer DPI scaling. We roughly compensate
|
|
# for those here.
|
|
ui_scale = bpy.context.user_preferences.system.ui_scale
|
|
widget_height = 20 * ui_scale
|
|
return x * widget_height / int(widget_height)
|
|
|
|
|
|
class BENCHMARK_PT_main(Panel):
|
|
bl_label = "Benchmark"
|
|
bl_options = {'HIDE_HEADER'}
|
|
bl_space_type = 'BENCHMARK'
|
|
bl_region_type = 'WINDOW'
|
|
|
|
def draw_link(self, context):
|
|
layout = self.layout
|
|
|
|
col = layout.column()
|
|
col.scale_y = ui_scale_factor(0.35)
|
|
col.label(text="")
|
|
|
|
col = layout.column()
|
|
|
|
sub = col.row()
|
|
sub.alignment = 'RIGHT'
|
|
sub.emboss = 'LINK'
|
|
sub.scale_y = 1.5
|
|
sub.active = False
|
|
sub.operator("benchmark.opendata_link")
|
|
|
|
def draw_welcome(self, context):
|
|
layout = self.layout
|
|
|
|
split = layout.split(0.65)
|
|
split.label()
|
|
split = split.split(0.97)
|
|
|
|
col = split.column()
|
|
sub = col.row()
|
|
sub.scale_y = ui_scale_factor(64.0)
|
|
sub.separator()
|
|
|
|
compute_device_list_get(None, context)
|
|
if len(G.cached_compute_devices) > 1:
|
|
sub = col.row()
|
|
sub.scale_y = 1.5
|
|
sub.prop(context.scene, "compute_device", text="")
|
|
else:
|
|
sub = col.row()
|
|
sub.scale_y = 1.5
|
|
sub.label(text="")
|
|
|
|
sub = col.row()
|
|
sub.scale_y = 2.25
|
|
sub.operator("benchmark.run_quick", text="QUICK BENCHMARK")
|
|
|
|
col.separator()
|
|
|
|
sub = col.row()
|
|
sub.emboss = 'LINK'
|
|
sub.scale_y = 1.5
|
|
sub.operator("benchmark.run_complete", text="RUN COMPLETE BENCHMARK")
|
|
|
|
split.label()
|
|
|
|
self.draw_link(context)
|
|
|
|
def draw_submit(self, context):
|
|
layout = self.layout
|
|
|
|
split = layout.split(0.65)
|
|
split.label()
|
|
split = split.split(0.97)
|
|
|
|
col = split.column()
|
|
sub = col.row()
|
|
sub.scale_y = ui_scale_factor(64.0)
|
|
sub.separator()
|
|
|
|
sub = col.row()
|
|
sub.enabled = G.state != G.State.submitting
|
|
sub.scale_y = 2.25
|
|
text = "SHARE ONLINE"
|
|
if G.results_submitted and G.state != G.State.submitting:
|
|
if G.results_url:
|
|
# If we have a results URL, open it upon clicking the button
|
|
sub.operator("wm.url_open", text="Shared!").url = G.results_url
|
|
else:
|
|
sub.enabled = False
|
|
sub.operator("benchmark.share", text=text)
|
|
else:
|
|
if G.state == G.State.submitting:
|
|
text = "Submitting..."
|
|
elif G.submission_exception:
|
|
text = "Retry Submission"
|
|
sub.operator("benchmark.share", text=text)
|
|
|
|
if G.submission_exception:
|
|
sub.operator("benchmark.save_error_report", text="Save Error Report...")
|
|
|
|
sub = col.row()
|
|
subsub = sub.split()
|
|
subsub.emboss = 'LINK'
|
|
subsub.scale_y = 1.5
|
|
subsub.operator("benchmark.save", text="Save Locally...")
|
|
|
|
subsub = sub.split()
|
|
subsub.emboss = 'LINK'
|
|
subsub.scale_y = 1.5
|
|
subsub.operator("benchmark.restart", text="Start Again")
|
|
|
|
split.label('blablabla')
|
|
|
|
|
|
def draw(self, context):
|
|
with G.progress_lock:
|
|
state = G.state
|
|
|
|
draw_funcs = {
|
|
G.State.welcome: self.draw_welcome,
|
|
G.State.complete: self.draw_submit,
|
|
G.State.submitting: self.draw_submit,
|
|
}
|
|
func = draw_funcs.get(state, None)
|
|
if func:
|
|
func(context)
|
|
|
|
|
|
################################################################################
|
|
# Operator
|
|
|
|
def blender_benchmark_data_dir_get():
|
|
system = platform.system()
|
|
if system == "Linux" or system == "Windows":
|
|
return os.path.dirname(bpy.app.binary_path)
|
|
elif system == "Darwin":
|
|
return os.path.join(os.path.dirname(bpy.app.binary_path), "..", "Resources")
|
|
else:
|
|
raise Exception("Needs implementation")
|
|
|
|
|
|
def blender_executable_get():
|
|
benchmark_data_dir = blender_benchmark_data_dir_get()
|
|
system = platform.system()
|
|
if system == "Linux":
|
|
return os.path.join(benchmark_data_dir, "blender", "blender")
|
|
elif system == "Windows":
|
|
return os.path.join(benchmark_data_dir, "blender", "blender.exe")
|
|
elif system == "Darwin":
|
|
return os.path.join(benchmark_data_dir, "blender", "blender.app", "Contents", "MacOS",
|
|
"blender")
|
|
else:
|
|
raise Exception("Needs implementation")
|
|
|
|
|
|
def scenes_dir_get():
|
|
benchmark_data_dir = blender_benchmark_data_dir_get()
|
|
return os.path.join(benchmark_data_dir, "scenes")
|
|
|
|
|
|
def configure_script_get():
|
|
script_directory = os.path.dirname(os.path.realpath(__file__))
|
|
benchmark_script_directory = os.path.dirname(script_directory)
|
|
return os.path.join(benchmark_script_directory, "configure.py")
|
|
|
|
|
|
class BENCHMARK_OT_run_base(bpy.types.Operator):
|
|
run_type = 'QUICK' # or 'COMPLETE'
|
|
benchmark_context = None
|
|
thread = None
|
|
timer = None
|
|
progress_provider = None
|
|
logger_provider = None
|
|
tmpdir = None
|
|
|
|
def setup_sink(self):
|
|
self.progress_provider = ProgressProviderSink()
|
|
self.logger_provider = LoggerProviderSink()
|
|
progress.setProvider(self.progress_provider)
|
|
logger.setProvider(self.logger_provider)
|
|
|
|
def update_status(self, context):
|
|
with G.progress_lock:
|
|
step = self.progress_provider.current_step
|
|
if G.cancel:
|
|
G.progress_status = "Canceling..."
|
|
elif step == 'WARM_UP':
|
|
G.progress_status = "Rendering warm-up pass..."
|
|
elif step == 'RUN':
|
|
G.current_progress = self.progress_provider.current_progress
|
|
G.progress_status = "Rendering... Press Esc to stop."
|
|
context.area.tag_redraw()
|
|
# Path to currently displayed background image.
|
|
current_scene = self.progress_provider.current_scene
|
|
if current_scene:
|
|
G.background_image_path = os.path.join(
|
|
self.benchmark_context.scenes_dir,
|
|
current_scene,
|
|
current_scene + ".png")
|
|
else:
|
|
G.background_image_path = ""
|
|
# Update per-scene status string
|
|
G.result_stats = ""
|
|
for scene in G.scene_status:
|
|
G.result_stats += "{}: {}\n".format(
|
|
scene, G.scene_status[scene])
|
|
|
|
def done(self, context):
|
|
wm = context.window_manager
|
|
wm.event_timer_remove(self.timer)
|
|
# Restore all modifications to the benchmark foundation.
|
|
progress.restoreDefaultProvider()
|
|
logger.restoreDefaultProvider()
|
|
# Destroy objects of sinks.
|
|
del self.progress_provider
|
|
del self.logger_provider
|
|
self.progress_provider = None
|
|
self.logger_provider = None
|
|
# Construct final stats string
|
|
if G.cancel:
|
|
G.reset()
|
|
elif G.result_dict:
|
|
G.result_stats = ""
|
|
for name_stat in G.result_dict["scenes"]:
|
|
stat = name_stat["stats"]
|
|
if G.result_stats:
|
|
G.result_stats += "\n"
|
|
if stat["result"] == "OK":
|
|
G.result_stats += "{}: {}".format(name_stat['name'],
|
|
util.humanReadableTimeDifference(
|
|
stat["total_render_time"]))
|
|
else:
|
|
G.result_stats += "{}: {}".format(name_stat['name'],
|
|
stat["result"])
|
|
G.state = G.State.complete
|
|
else:
|
|
G.result_stats = ""
|
|
G.state = G.State.welcome
|
|
# TOGO(sergey): Use some more nice picture for the final slide.
|
|
G.background_image_path = ""
|
|
# Tag for nice redraw
|
|
with G.progress_lock:
|
|
G.progress_status = ''
|
|
G.current_progress = 0.0
|
|
context.area.tag_redraw()
|
|
self.tmpdir = None
|
|
|
|
def modal(self, context, event):
|
|
if event.type == 'TIMER':
|
|
if self.thread.is_alive():
|
|
self.update_status(context)
|
|
return {'PASS_THROUGH'}
|
|
else:
|
|
self.done(context)
|
|
return {'FINISHED'}
|
|
elif event.type == 'ESC':
|
|
self.cancel_request(context)
|
|
|
|
return {'PASS_THROUGH'}
|
|
|
|
def invoke(self, context, event):
|
|
with G.progress_lock:
|
|
G.cancel = False
|
|
G.result_platform = ""
|
|
G.progress_status = "Initializing..."
|
|
G.state = G.State.running
|
|
context.area.tag_redraw()
|
|
|
|
compute_device = context.scene.compute_device
|
|
device_type, device_name, compute_units, device_index = compute_device.split(":")
|
|
|
|
self.tmpdir = tempfile.TemporaryDirectory(prefix="blender_benchmark_")
|
|
|
|
# Before doing anything, make sure we have all sinks set up, so we do
|
|
# not miss any progress report.
|
|
self.setup_sink()
|
|
wm = context.window_manager
|
|
ctx = benchmark_context.Context()
|
|
ctx.blender = blender_executable_get()
|
|
ctx.configure_script = configure_script_get()
|
|
if self.run_type == 'QUICK':
|
|
ctx.scenes = QUICK_SCENES
|
|
else:
|
|
ctx.scenes = COMPLETE_SCENES
|
|
for scene in ctx.scenes:
|
|
G.scene_status[scene] = "Queued"
|
|
ctx.scenes_dir = scenes_dir_get()
|
|
ctx.device_type = device_type
|
|
ctx.device_name = device_name
|
|
ctx.single_compute_scene = True
|
|
ctx.image_output_dir = self.tmpdir.name
|
|
# Set this to True when having multiple GPUs of same name and only
|
|
# one of the mis to be enabled. Or when requesting GPU render without
|
|
# specifying GPU name.
|
|
ctx.device_single = True
|
|
ctx.device_index = device_index
|
|
# ctx.image_output_dir = "/tmp/"
|
|
self.benchmark_context = ctx
|
|
# Create thread for the actual benchmark.
|
|
self.thread = Thread(target=benchmark_thread,
|
|
args=(self.benchmark_context,))
|
|
self.thread.start()
|
|
# Create timer to query thread status
|
|
self.timer = wm.event_timer_add(0.1, context.window)
|
|
# Register self as modal.
|
|
context.window_manager.modal_handler_add(self)
|
|
return {'RUNNING_MODAL'}
|
|
|
|
def cancel_request(self, context):
|
|
with G.progress_lock:
|
|
G.cancel = True
|
|
context.area.tag_redraw()
|
|
|
|
if self.progress_provider.process:
|
|
if platform.system() == "Windows":
|
|
self.progress_provider.process.kill()
|
|
else:
|
|
import signal
|
|
self.progress_provider.process.send_signal(signal.SIGINT)
|
|
|
|
def cancel(self, context):
|
|
self.cancel_request(context)
|
|
if self.timer:
|
|
wm = context.window_manager
|
|
wm.event_timer_remove(self.timer)
|
|
if self.thread:
|
|
self.thread.join()
|
|
|
|
|
|
class BENCHMARK_OT_run_quick(BENCHMARK_OT_run_base):
|
|
"Run quick Blender benchmark"
|
|
bl_label = "Run Benchmark"
|
|
bl_idname = "benchmark.run_quick"
|
|
|
|
run_type = 'QUICK'
|
|
|
|
|
|
class BENCHMARK_OT_run_complete(BENCHMARK_OT_run_base):
|
|
"Run complete Blender benchmark (might take 1.5 hours to finish and 4GiB of GPU memory)"
|
|
bl_label = "Run Benchmark"
|
|
bl_idname = "benchmark.run_complete"
|
|
|
|
run_type = 'COMPLETE'
|
|
|
|
|
|
class BENCHMARK_OT_save(bpy.types.Operator):
|
|
bl_idname = "benchmark.save"
|
|
bl_label = "Save Benchmark Result"
|
|
|
|
filepath: bpy.props.StringProperty(
|
|
subtype='FILE_PATH',
|
|
options={'SKIP_SAVE'},
|
|
)
|
|
|
|
def execute(self, context):
|
|
with open(self.filepath, "w") as f:
|
|
f.write(json.dumps(G.result_dict, sort_keys=True, indent=2))
|
|
make_buttons_green()
|
|
return {'FINISHED'}
|
|
|
|
def invoke(self, context, event):
|
|
import os
|
|
make_buttons_default()
|
|
|
|
if not self.filepath:
|
|
self.filepath = os.path.join(
|
|
os.path.expanduser("~"), "benchmark-result.txt")
|
|
|
|
wm = context.window_manager
|
|
wm.fileselect_add(self)
|
|
return {'RUNNING_MODAL'}
|
|
|
|
def cancel(self, context):
|
|
make_buttons_green()
|
|
|
|
|
|
class BENCHMARK_OT_save_error_report(bpy.types.Operator):
|
|
bl_idname = "benchmark.save_error_report"
|
|
bl_label = "Save Error Report"
|
|
|
|
filepath: bpy.props.StringProperty(
|
|
subtype='FILE_PATH',
|
|
options={'SKIP_SAVE'},
|
|
)
|
|
|
|
@classmethod
|
|
def poll(cls, context) -> bool:
|
|
with G.progress_lock:
|
|
return G.submission_exception is not None
|
|
|
|
def execute(self, context):
|
|
import traceback
|
|
import functools
|
|
import pprint
|
|
from benchmark.version import version
|
|
|
|
with G.progress_lock, open(self.filepath, "w", encoding='utf-8') as outfile:
|
|
p = functools.partial(print, file=outfile)
|
|
p(20 * '=', 'Benchmark Client')
|
|
p('version:', version)
|
|
if 'MYDATA' in os.environ:
|
|
p('mydata URL:', os.environ['MYDATA'])
|
|
p()
|
|
|
|
p(20 * '=', 'Exception:')
|
|
# We don't have the traceback info here any more, hence 'exception only'
|
|
ex = G.submission_exception
|
|
exc_info = traceback.format_exception_only(type(ex), ex)
|
|
for line in exc_info:
|
|
p(line)
|
|
|
|
p()
|
|
p(20 * '=', 'State:')
|
|
g_dict = {k: v for k, v in G.__dict__.items()
|
|
if not k.startswith('_')}
|
|
p(pprint.pformat(g_dict, width=120))
|
|
|
|
make_buttons_green()
|
|
return {'FINISHED'}
|
|
|
|
def invoke(self, context, event):
|
|
import os
|
|
make_buttons_default()
|
|
|
|
if not self.filepath:
|
|
self.filepath = os.path.join(
|
|
os.path.expanduser("~"), "benchmark-error-report.txt")
|
|
|
|
wm = context.window_manager
|
|
wm.fileselect_add(self)
|
|
return {'RUNNING_MODAL'}
|
|
|
|
def cancel(self, context):
|
|
make_buttons_green()
|
|
|
|
|
|
class BENCHMARK_OT_share(bpy.types.Operator):
|
|
bl_idname = "benchmark.share"
|
|
bl_label = "Share Benchmark Result"
|
|
|
|
timer = None
|
|
thread = None
|
|
|
|
def modal(self, context, event):
|
|
if event.type == 'TIMER':
|
|
if self.thread.is_alive():
|
|
context.area.tag_redraw()
|
|
return {'PASS_THROUGH'}
|
|
else:
|
|
self.done(context)
|
|
return {'FINISHED'}
|
|
|
|
return {'PASS_THROUGH'}
|
|
|
|
def invoke(self, context, event):
|
|
from benchmark import submission
|
|
|
|
make_buttons_default()
|
|
self.thread = submission.submit_benchmark_bgthread(G.result_dict)
|
|
|
|
# Create timer to query thread status
|
|
wm = context.window_manager
|
|
self.timer = wm.event_timer_add(0.1, context.window)
|
|
|
|
# Register self as modal.
|
|
context.window_manager.modal_handler_add(self)
|
|
return {'RUNNING_MODAL'}
|
|
|
|
def done(self, context):
|
|
make_buttons_green()
|
|
|
|
if self.timer:
|
|
wm = context.window_manager
|
|
wm.event_timer_remove(self.timer)
|
|
if self.thread:
|
|
self.thread.join()
|
|
|
|
context.area.tag_redraw()
|
|
|
|
|
|
class BENCHMARK_OT_opendata_link(bpy.types.Operator):
|
|
bl_idname = "benchmark.opendata_link"
|
|
bl_label = "opendata.blender.org"
|
|
|
|
def invoke(self, context, event):
|
|
bpy.ops.wm.url_open('INVOKE_DEFAULT', url="https://opendata.blender.org")
|
|
return {'FINISHED'}
|
|
|
|
|
|
################################################################################
|
|
# Restart benchmark.
|
|
|
|
|
|
class BENCHMARK_OT_restart(bpy.types.Operator):
|
|
bl_idname = "benchmark.restart"
|
|
bl_label = "Go to a home screen and choose another benchmark to run"
|
|
|
|
def invoke(self, context, event):
|
|
G.reset()
|
|
return {'FINISHED'}
|
|
|
|
|
|
################################################################################
|
|
# Configuration.
|
|
|
|
|
|
def cl_query_executable_get():
|
|
benchmark_data_dir = blender_benchmark_data_dir_get()
|
|
system = platform.system()
|
|
if system == "Linux":
|
|
return os.path.join(benchmark_data_dir, "bin", "cl_query")
|
|
elif system == "Windows":
|
|
return os.path.join(benchmark_data_dir, "bin", "cl_query.exe")
|
|
elif system == "Darwin":
|
|
return os.path.join(benchmark_data_dir, "bin", "cl_query")
|
|
else:
|
|
raise Exception("Needs implementation")
|
|
|
|
|
|
def query_opencl_compute_units():
|
|
binary = cl_query_executable_get()
|
|
output = subprocess.run([binary], stdout=subprocess.PIPE).stdout
|
|
lines = output.splitlines()
|
|
compute_units = []
|
|
for line in lines:
|
|
(name, max_compute_units) = line.rsplit(b':', 1)
|
|
compute_units.append((name.decode(), max_compute_units.decode()))
|
|
return compute_units
|
|
|
|
|
|
def find_first_device_index(compute_units, device_name):
|
|
if not compute_units:
|
|
return -1
|
|
for index, value in enumerate(compute_units):
|
|
if value[0] == device_name:
|
|
return index
|
|
return -1
|
|
|
|
|
|
def compute_device_list_get(self, context):
|
|
if G.cached_compute_devices:
|
|
return G.cached_compute_devices
|
|
compute_devices = [('CPU:::', "CPU", "")]
|
|
if not G.cached_system_info:
|
|
ctx = benchmark_context.Context()
|
|
ctx.blender = blender_executable_get()
|
|
ctx.configure_script = configure_script_get()
|
|
G.cached_system_info = system_info_get(ctx)
|
|
compute_units = query_opencl_compute_units()
|
|
device_indices = {}
|
|
for device in G.cached_system_info["devices"]:
|
|
raw_device_name = device["name"]
|
|
device_type = device["type"]
|
|
if raw_device_name in device_indices:
|
|
device_indices[raw_device_name] += 1
|
|
device_index = device_indices[raw_device_name]
|
|
else:
|
|
device_indices[raw_device_name] = 0
|
|
device_index = 0
|
|
if device_type == "CPU":
|
|
continue
|
|
elif device_type == "OPENCL":
|
|
device_name = correct_device_name(device["name"])
|
|
index = find_first_device_index(compute_units, device['name'])
|
|
device_compute_units = ""
|
|
if index != -1:
|
|
if device["name"] == "Radeon RX Vega":
|
|
device_name += " " + compute_units[index][1]
|
|
device_compute_units = str(compute_units[index][1])
|
|
del compute_units[index]
|
|
device_id = "{}:{}:{}:{}".format(device_type,
|
|
device["name"],
|
|
device_compute_units,
|
|
device_index)
|
|
compute_devices.append((device_id, device_name, ""))
|
|
elif device_type == "CUDA":
|
|
device_name = correct_device_name(device["name"])
|
|
device_id = "{}:{}::{}".format(device_type,
|
|
device["name"],
|
|
device_index)
|
|
compute_devices.append((device_id, device_name, ""))
|
|
G.cached_compute_devices = compute_devices
|
|
return compute_devices
|
|
|
|
|
|
################################################################################
|
|
# Tweak User Preferences
|
|
|
|
|
|
default_wcol_tool_inner = None
|
|
default_wcol_tool_inner_sel = None
|
|
default_wcol_tool_outline = None
|
|
|
|
|
|
def backup_buttons_colors():
|
|
global default_wcol_tool_inner
|
|
global default_wcol_tool_inner_sel
|
|
global default_wcol_tool_outline
|
|
userpref = bpy.context.user_preferences
|
|
theme = userpref.themes[0]
|
|
default_wcol_tool_inner = theme.user_interface.wcol_tool.inner[:]
|
|
default_wcol_tool_inner_sel = theme.user_interface.wcol_tool.inner_sel[:]
|
|
default_wcol_tool_outline = theme.user_interface.wcol_tool.outline[:]
|
|
|
|
|
|
def make_buttons_green():
|
|
userpref = bpy.context.user_preferences
|
|
theme = userpref.themes[0]
|
|
theme.user_interface.wcol_tool.inner = [0.408, 0.590, 0.129, 1.0]
|
|
theme.user_interface.wcol_tool.inner_sel = [0.308, 0.490, 0.029, 1.0]
|
|
theme.user_interface.wcol_tool.outline = [0.408, 0.590, 0.129]
|
|
|
|
|
|
def make_buttons_default():
|
|
userpref = bpy.context.user_preferences
|
|
theme = userpref.themes[0]
|
|
theme.user_interface.wcol_tool.inner = default_wcol_tool_inner
|
|
theme.user_interface.wcol_tool.inner_sel = default_wcol_tool_inner_sel
|
|
theme.user_interface.wcol_tool.outline = default_wcol_tool_outline
|
|
|
|
|
|
userpref = bpy.context.user_preferences
|
|
theme = userpref.themes[0]
|
|
userpref.view.use_quit_dialog = False
|
|
theme.benchmark.space.back = [0.26, 0.26, 0.26]
|
|
backup_buttons_colors()
|
|
make_buttons_green()
|
|
|
|
style = userpref.ui_styles[0]
|
|
style.widget.points = 12
|
|
|
|
################################################################################
|
|
# Registration
|
|
|
|
classes = (
|
|
BENCHMARK_PT_main,
|
|
BENCHMARK_OT_restart,
|
|
BENCHMARK_OT_run_quick,
|
|
BENCHMARK_OT_run_complete,
|
|
BENCHMARK_OT_save,
|
|
BENCHMARK_OT_save_error_report,
|
|
BENCHMARK_OT_share,
|
|
BENCHMARK_OT_opendata_link,
|
|
)
|
|
|
|
Scene.compute_device = EnumProperty(
|
|
items=compute_device_list_get,
|
|
name="Compute Device",
|
|
description="Compute device to run benchmark on")
|