This repository has been archived on 2023-02-09. You can view files and clone it, but cannot push or open issues or pull requests.
Files
blender-benchmark-bundle/benchmark/space/__init__.py

813 lines
25 KiB
Python

import datetime
import json
import os
import platform
import subprocess
import sys
import tempfile
from threading import Thread
import bpy
from bpy.types import Panel, Scene
from bpy.props import EnumProperty, IntProperty, FloatProperty
from ..foundation import (benchrunner,
logger,
progress,
system_info,
util)
from ..foundation import context as benchmark_context
from .global_state import G
from . import draw
QUICK_SCENES = ["bmw27",
"classroom"]
COMPLETE_SCENES = ["barbershop_interior",
"bmw27",
"classroom",
"fishy_cat",
"koro",
"pavillon_barcelona"]
handle_draw = bpy.types.SpaceBenchmark.draw_handler_add(
draw.benchmark_draw_post_pixel, (None, None), 'WINDOW', 'POST_PIXEL')
################################################################################
# Benchmark foundation integration.
class ProgressProviderSink:
current_progress = 0.0
current_step = ''
current_scene = ''
process = None
def __init__(self):
self.current_progress = 0.0
self.current_step = ''
self.process = None
def progress(self, count, total, prefix="", suffix=""):
with G.progress_lock:
if total != 0:
self.current_progress = float(count) / float(total)
else:
self.current_progress = 0.0
def clear(self):
with G.progress_lock:
self.current_progress = 0
def step(self, step_name):
with G.progress_lock:
if self.current_step != step_name:
self.current_step = step_name
self.current_progress = 0
def scene(self, scene_name):
with G.progress_lock:
self.current_scene = scene_name
if scene_name:
G.scene_status[scene_name] = "Rendering..."
def scene_stats(self, scene_name, stats):
with G.progress_lock:
if stats:
G.scene_status[scene_name] = util.humanReadableTimeDifference(
stats.total_render_time)
else:
G.scene_status[scene_name] = "Crashed :("
def render_process(self, process):
self.process = process
def is_canceled(self):
with G.progress_lock:
return G.cancel
class LoggerProviderSink:
def HEADER(self, *args):
pass
def WARNING(self, *args):
pass
def ERROR(self, *args):
pass
def OK(self, *args):
pass
def BOLD(self, *args):
pass
def INFO(self, *args):
pass
def DEBUG(self, *args):
pass
def FATAL(self, *args):
pass
################################################################################
# Benchmark thread.
def string_strip_trademark(name):
return name.replace("(R)", "").replace("(TM)", "")
def correct_device_name(name):
if (name.startswith("TITAN") or
name.startswith("Quadro") or
name.startswith("GeForce")):
return "Nvidia " + name
if (name.startswith("Radeon")):
return "AMD " + name
return name
def get_gpu_names(system_info):
gpu_names = []
for device in system_info["devices"]:
if device["type"] == "CPU":
continue
gpu_names.append(correct_device_name(device["name"]))
return gpu_names
def indent_gpu_names(gpu_names):
indented_names = []
for name in gpu_names:
indented_names.append("" + name)
return indented_names
def construct_platform_string(system_info):
"""
Construct human readable platform string to show in the interface.
"""
result = ""
result += "OS: {} {}".format(system_info["system"],
system_info["bitness"])
result += "\nCPU: {}".format(
string_strip_trademark(system_info["cpu_brand"]))
gpu_names = get_gpu_names(system_info)
num_gpus = len(gpu_names)
if num_gpus:
if num_gpus == 1:
result += "\nGPU: {}".format(gpu_names[0])
else:
result += "\nGPUs:\n{}".format("\n".join(indent_gpu_names(gpu_names)))
return result
def convert_result_to_json_dict(ctx, results):
# Convert custom classes to dictionaries for easier JSON dump.
json_results = results
stats = json_results['scenes']
json_results['scenes'] = []
for scene in ctx.scenes:
if scene not in stats:
continue
stat = stats[scene]
if stat:
stat = stat.asDict()
stat['result'] = 'OK'
else:
stat = {'result': 'CRASH'}
json_results['scenes'].append({'name': scene,
'stats': stat})
return json_results
def system_info_get(ctx):
# This is all system information Blender knows.
# NOTE: We override executable since cpuinfo uses it, and it is set
# to blender.
old_executable = sys.executable
sys.executable = bpy.app.binary_path_python
info = system_info.gatherSystemInfo(ctx)
sys.executable = old_executable
return info
def modify_system_info(system_info):
compute_units = query_opencl_compute_units()
for device in system_info["devices"]:
device_type = device["type"]
if device_type != "OPENCL":
continue
index = find_first_device_index(compute_units, device['name'])
if index != -1:
if device["name"] == "Radeon RX Vega":
device["name"] += " " + compute_units[index][1]
del compute_units[index]
return system_info
def modify_device_info(device_info):
compute_device = bpy.context.scene.compute_device
device_type, device_name, compute_units, device_index = compute_device.split(":")
if device_info["device_type"] == "OPENCL":
compute_devices = []
for device in device_info["compute_devices"]:
if device == "Radeon RX Vega":
device += " " + compute_units
compute_devices.append(device)
device_info["compute_devices"] = compute_devices
return device_info
def benchmark_thread(ctx):
with G.progress_lock:
G.progress_status = "Collecting system information."
if G.cancel:
return
blender_system_info = system_info_get(ctx)
# This is actual device configuration which is used to render the
# benchmark scene.
blender_device_info = benchrunner.benchmarkGetDeviceInfo(ctx)
if not blender_device_info['device_type']:
# TODO(sergey): Report an error somehow.
return
with G.progress_lock:
G.result_platform = construct_platform_string(blender_system_info)
if G.cancel:
return
G.progress_status = "Prepating render."
all_stats = benchrunner.benchmarkAll(ctx)
# Gather all information together.
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
result = convert_result_to_json_dict(ctx, {
"timestamp": timestamp,
"blender_version": system_info.getBlenderVersion(ctx),
"system_info": modify_system_info(blender_system_info),
"device_info": modify_device_info(blender_device_info),
"scenes": all_stats if all_stats else {}
})
with G.progress_lock:
if G.cancel:
return
G.result_dict = result
################################################################################
# Panels
def ui_scale_factor(x):
# To position the buttons exactly we add spacing, but there can be
# rounding errors with non-integer DPI scaling. We roughly compensate
# for those here.
ui_scale = bpy.context.user_preferences.system.ui_scale
widget_height = 20 * ui_scale
return x * widget_height / int(widget_height)
class BENCHMARK_PT_main(Panel):
bl_label = "Benchmark"
bl_options = {'HIDE_HEADER'}
bl_space_type = 'BENCHMARK'
bl_region_type = 'WINDOW'
def draw_link(self, context):
layout = self.layout
col = layout.column()
col.scale_y = ui_scale_factor(0.35)
col.label(text="")
col = layout.column()
sub = col.row()
sub.alignment = 'RIGHT'
sub.emboss = 'LINK'
sub.scale_y = 1.5
sub.active = False
sub.operator("benchmark.opendata_link")
def draw_welcome(self, context):
layout = self.layout
split = layout.split(0.65)
split.label()
split = split.split(0.97)
col = split.column()
sub = col.row()
sub.scale_y = ui_scale_factor(64.0)
sub.separator()
compute_device_list_get(None, context)
if len(G.cached_compute_devices) > 1:
sub = col.row()
sub.scale_y = 1.5
sub.prop(context.scene, "compute_device", text="")
else:
sub = col.row()
sub.scale_y = 1.5
sub.label(text="")
sub = col.row()
sub.scale_y = 2.25
sub.operator("benchmark.run_quick", text="QUICK BENCHMARK")
col.separator()
sub = col.row()
sub.emboss = 'LINK'
sub.scale_y = 1.5
sub.operator("benchmark.run_complete", text="RUN COMPLETE BENCHMARK")
split.label()
self.draw_link(context)
def draw_submit(self, context):
layout = self.layout
split = layout.split(0.65)
split.label()
split = split.split(0.97)
col = split.column()
sub = col.row()
sub.scale_y = ui_scale_factor(64.0)
sub.separator()
sub = col.row()
sub.enabled = not G.results_submitted
sub.scale_y = 2.25
sub.operator("benchmark.share", text="SHARE ONLINE")
sub = col.row()
subsub = sub.split()
subsub.emboss = 'LINK'
subsub.scale_y = 1.5
subsub.operator("benchmark.save", text="Save Locally...")
subsub = sub.split()
subsub.emboss = 'LINK'
subsub.scale_y = 1.5
subsub.operator("benchmark.restart", text="Start Again")
split.label()
def draw(self, context):
screen_index = 0
with G.progress_lock:
if G.result_dict:
screen_index = 2
elif G.result_stats or G.progress_status:
screen_index = 1
if screen_index == 0:
self.draw_welcome(context)
elif screen_index == 2:
self.draw_submit(context)
################################################################################
# Operator
def blender_benchmark_data_dir_get():
system = platform.system()
if system == "Linux" or system == "Windows":
return os.path.dirname(bpy.app.binary_path)
elif system == "Darwin":
return os.path.join(os.path.dirname(bpy.app.binary_path), "..", "Resources")
else:
raise Exception("Needs implementation")
def blender_executable_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
system = platform.system()
if system == "Linux":
return os.path.join(benchmark_data_dir, "blender", "blender")
elif system == "Windows":
return os.path.join(benchmark_data_dir, "blender", "blender.exe")
elif system == "Darwin":
return os.path.join(benchmark_data_dir, "blender", "blender.app", "Contents", "MacOS",
"blender")
else:
raise Exception("Needs implementation")
def scenes_dir_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
return os.path.join(benchmark_data_dir, "scenes")
def configure_script_get():
script_directory = os.path.dirname(os.path.realpath(__file__))
benchmark_script_directory = os.path.dirname(script_directory)
return os.path.join(benchmark_script_directory, "configure.py")
class BENCHMARK_OT_run_base(bpy.types.Operator):
run_type = 'QUICK' # or 'COMPLETE'
benchmark_context = None
thread = None
timer = None
progress_provider = None
logger_provider = None
tmpdir = None
def setup_sink(self):
self.progress_provider = ProgressProviderSink()
self.logger_provider = LoggerProviderSink()
progress.setProvider(self.progress_provider)
logger.setProvider(self.logger_provider)
def update_status(self, context):
with G.progress_lock:
step = self.progress_provider.current_step
if G.cancel:
G.progress_status = "Canceling..."
elif step == 'WARM_UP':
G.progress_status = "Rendering warm-up pass..."
elif step == 'RUN':
G.current_progress = self.progress_provider.current_progress
G.progress_status = "Rendering... Press Esc to stop."
context.area.tag_redraw()
# Path to currently displayed background image.
current_scene = self.progress_provider.current_scene
if current_scene:
G.background_image_path = os.path.join(
self.benchmark_context.scenes_dir,
current_scene,
current_scene + ".png")
else:
G.background_image_path = ""
# Update per-scene status string
G.result_stats = ""
for scene in G.scene_status:
G.result_stats += "{}: {}\n".format(
scene, G.scene_status[scene])
def done(self, context):
wm = context.window_manager
wm.event_timer_remove(self.timer)
# Restore all modifications to the benchmark foundation.
progress.restoreDefaultProvider()
logger.restoreDefaultProvider()
# Destroy objects of sinks.
del self.progress_provider
del self.logger_provider
self.progress_provider = None
self.logger_provider = None
# Construct final stats string
if G.cancel:
G.reset()
elif G.result_dict:
G.result_stats = ""
for name_stat in G.result_dict["scenes"]:
stat = name_stat["stats"]
if G.result_stats:
G.result_stats += "\n"
if stat["result"] == "OK":
G.result_stats += "{}: {}".format(name_stat['name'],
util.humanReadableTimeDifference(
stat["total_render_time"]))
else:
G.result_stats += "{}: {}".format(name_stat['name'],
stat["result"])
else:
G.result_stats = ""
# TOGO(sergey): Use some more nice picture for the final slide.
G.background_image_path = ""
# Tag for nice redraw
with G.progress_lock:
G.progress_status = ''
G.current_progress = 0.0
context.area.tag_redraw()
self.tmpdir = None
def modal(self, context, event):
if event.type == 'TIMER':
if self.thread.is_alive():
self.update_status(context)
return {'PASS_THROUGH'}
else:
self.done(context)
return {'FINISHED'}
elif event.type == 'ESC':
self.cancel_request(context)
return {'PASS_THROUGH'}
def invoke(self, context, event):
G.cancel = False
G.result_platform = ""
G.progress_status = "Initializing..."
context.area.tag_redraw()
compute_device = context.scene.compute_device
device_type, device_name, compute_units, device_index = compute_device.split(":")
self.tmpdir = tempfile.TemporaryDirectory(prefix="blender_benchmark_")
# Before doing anything, make sure we have all sinks set up, so we do
# not miss any progress report.
self.setup_sink()
wm = context.window_manager
ctx = benchmark_context.Context()
ctx.blender = blender_executable_get()
ctx.configure_script = configure_script_get()
if self.run_type == 'QUICK':
ctx.scenes = QUICK_SCENES
else:
ctx.scenes = COMPLETE_SCENES
for scene in ctx.scenes:
G.scene_status[scene] = "Queued"
ctx.scenes_dir = scenes_dir_get()
ctx.device_type = device_type
ctx.device_name = device_name
ctx.single_compute_scene = True
ctx.image_output_dir = self.tmpdir.name
# Set this to True when having multiple GPUs of same name and only
# one of the mis to be enabled. Or when requesting GPU render without
# specifying GPU name.
ctx.device_single = True
ctx.device_index = device_index
# ctx.image_output_dir = "/tmp/"
self.benchmark_context = ctx
# Create thread for the actual benchmark.
self.thread = Thread(target=benchmark_thread,
args=(self.benchmark_context,))
self.thread.start()
# Create timer to query thread status
self.timer = wm.event_timer_add(0.1, context.window)
# Register self as modal.
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel_request(self, context):
with G.progress_lock:
G.cancel = True
context.area.tag_redraw()
if self.progress_provider.process:
if platform.system() == "Windows":
self.progress_provider.process.kill()
else:
import signal
self.progress_provider.process.send_signal(signal.SIGINT)
def cancel(self, context):
self.cancel_request(context)
if self.timer:
wm = context.window_manager
wm.event_timer_remove(self.timer)
if self.thread:
self.thread.join()
class BENCHMARK_OT_run_quick(BENCHMARK_OT_run_base):
"Run quick Blender benchmark"
bl_label = "Run Benchmark"
bl_idname = "benchmark.run_quick"
run_type = 'QUICK'
class BENCHMARK_OT_run_complete(BENCHMARK_OT_run_base):
"Run complete Blender benchmark (might take 1.5 hours to finish and 4GiB of GPU memory)"
bl_label = "Run Benchmark"
bl_idname = "benchmark.run_complete"
run_type = 'COMPLETE'
class BENCHMARK_OT_save(bpy.types.Operator):
bl_idname = "benchmark.save"
bl_label = "Save Benchmark Result"
filepath: bpy.props.StringProperty(
subtype='FILE_PATH',
options={'SKIP_SAVE'},
)
def execute(self, context):
with open(self.filepath, "w") as f:
f.write(json.dumps(G.result_dict, sort_keys=True, indent=2))
make_buttons_green()
return {'FINISHED'}
def invoke(self, context, event):
import os
make_buttons_default()
if not self.filepath:
self.filepath = os.path.join(
os.path.expanduser("~"), "benchmark-result.txt")
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
make_buttons_green()
class BENCHMARK_OT_share(bpy.types.Operator):
bl_idname = "benchmark.share"
bl_label = "Share Benchmark Result"
def execute(self, context):
from benchmark import submission
make_buttons_default()
print('Submitting benchmark')
try:
submission.submit_benchmark(G.result_dict)
except Exception as ex:
self.report({'ERROR'}, f'Error submitting results:\n{str(ex)[:100]}')
return {'CANCELLED'}
print('Submission done')
make_buttons_green()
G.results_submitted = True
return {'FINISHED'}
class BENCHMARK_OT_opendata_link(bpy.types.Operator):
bl_idname = "benchmark.opendata_link"
bl_label = "opendata.blender.org"
def invoke(self, context, event):
bpy.ops.wm.url_open('INVOKE_DEFAULT', url="https://opendata.blender.org")
return {'FINISHED'}
################################################################################
# Restart benchmark.
class BENCHMARK_OT_restart(bpy.types.Operator):
bl_idname = "benchmark.restart"
bl_label = "Go to a home screen and choose another benchmark to run"
def invoke(self, context, event):
G.reset()
return {'FINISHED'}
################################################################################
# Configuration.
def cl_query_executable_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
system = platform.system()
if system == "Linux":
return os.path.join(benchmark_data_dir, "bin", "cl_query")
elif system == "Windows":
return os.path.join(benchmark_data_dir, "bin", "cl_query.exe")
elif system == "Darwin":
return os.path.join(benchmark_data_dir, "bin", "cl_query")
else:
raise Exception("Needs implementation")
def query_opencl_compute_units():
binary = cl_query_executable_get()
output = subprocess.run([binary], stdout=subprocess.PIPE).stdout
lines = output.splitlines()
compute_units = []
for line in lines:
(name, max_compute_units) = line.rsplit(b':', 1)
compute_units.append((name.decode(), max_compute_units.decode()))
return compute_units
def find_first_device_index(compute_units, device_name):
if not compute_units:
return -1
for index, value in enumerate(compute_units):
if value[0] == device_name:
return index
return -1
def compute_device_list_get(self, context):
if G.cached_compute_devices:
return G.cached_compute_devices
compute_devices = [('CPU:::', "CPU", "")]
if not G.cached_system_info:
ctx = benchmark_context.Context()
ctx.blender = blender_executable_get()
ctx.configure_script = configure_script_get()
G.cached_system_info = system_info_get(ctx)
compute_units = query_opencl_compute_units()
device_indices = {}
for device in G.cached_system_info["devices"]:
raw_device_name = device["name"]
device_type = device["type"]
if raw_device_name in device_indices:
device_indices[raw_device_name] += 1
device_index = device_indices[raw_device_name]
else:
device_indices[raw_device_name] = 0
device_index = 0
if device_type == "CPU":
continue
elif device_type == "OPENCL":
device_name = correct_device_name(device["name"])
index = find_first_device_index(compute_units, device['name'])
device_compute_units = ""
if index != -1:
if device["name"] == "Radeon RX Vega":
device_name += " " + compute_units[index][1]
device_compute_units = str(compute_units[index][1])
del compute_units[index]
device_id = "{}:{}:{}:{}".format(device_type,
device["name"],
device_compute_units,
device_index)
compute_devices.append((device_id, device_name, ""))
elif device_type == "CUDA":
device_name = correct_device_name(device["name"])
device_id = "{}:{}::{}".format(device_type,
device["name"],
device_index)
compute_devices.append((device_id, device_name, ""))
G.cached_compute_devices = compute_devices
return compute_devices
################################################################################
# Tweak User Preferences
default_wcol_tool_inner = None
default_wcol_tool_inner_sel = None
default_wcol_tool_outline = None
def backup_buttons_colors():
global default_wcol_tool_inner
global default_wcol_tool_inner_sel
global default_wcol_tool_outline
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
default_wcol_tool_inner = theme.user_interface.wcol_tool.inner[:]
default_wcol_tool_inner_sel = theme.user_interface.wcol_tool.inner_sel[:]
default_wcol_tool_outline = theme.user_interface.wcol_tool.outline[:]
def make_buttons_green():
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = [0.408, 0.590, 0.129, 1.0]
theme.user_interface.wcol_tool.inner_sel = [0.308, 0.490, 0.029, 1.0]
theme.user_interface.wcol_tool.outline = [0.408, 0.590, 0.129]
def make_buttons_default():
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = default_wcol_tool_inner
theme.user_interface.wcol_tool.inner_sel = default_wcol_tool_inner_sel
theme.user_interface.wcol_tool.outline = default_wcol_tool_outline
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
userpref.view.use_quit_dialog = False
theme.benchmark.space.back = [0.26, 0.26, 0.26]
backup_buttons_colors()
make_buttons_green()
style = userpref.ui_styles[0]
style.widget.points = 12
################################################################################
# Registration
classes = (
BENCHMARK_PT_main,
BENCHMARK_OT_restart,
BENCHMARK_OT_run_quick,
BENCHMARK_OT_run_complete,
BENCHMARK_OT_save,
BENCHMARK_OT_share,
BENCHMARK_OT_opendata_link,
)
Scene.compute_device = EnumProperty(
items=compute_device_list_get,
name="Compute Device",
description="Compute device to run benchmark on")