This repository has been archived on 2023-02-09. You can view files and clone it, but cannot push or open issues or pull requests.
Files
blender-benchmark-bundle/benchmark/space/__init__.py
Sybren A. Stüvel 9af1ca1c49 Moved global state into a class G
This allows other modules to do `from benchmark.space import G` and use
the global state too. This is possible because the name `G` keeps
referencing the same object (a class) even when resetting (contrary to
the old `global_xxx = None`, which assigns a new object reference).
2018-08-14 13:33:56 +02:00

1042 lines
32 KiB
Python

import datetime
import json
import os
import platform
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import blf
import bpy
from bpy.types import Panel, Scene
from bpy.props import EnumProperty, IntProperty, FloatProperty
from ..foundation import (benchrunner,
logger,
progress,
system_info,
util)
from ..foundation import context as benchmark_context
################################################################################
# Global state.
QUICK_SCENES = ["bmw27",
"classroom"]
COMPLETE_SCENES = ["barbershop_interior",
"bmw27",
"classroom",
"fishy_cat",
"koro",
"pavillon_barcelona"]
class G:
"""Global state of the Benchmark Client."""
result_platform = None
progress_status = None
result_stats = None
result_dict = None
background_image_path = ""
scene_status = {}
cancel = False
cached_system_info = None
cached_compute_devices = None
results_submitted = False
images = {}
current_progress = 0.0
progress_lock = Lock()
@classmethod
def reset(cls):
"""Reset the global state."""
cls.result_platform = None
cls.progress_status = None
cls.result_stats = None
cls.result_dict = None
cls.background_image_path = ""
cls.scene_status = {}
cls.results_submitted = False
WELCOME_TEXT = "Run the Quick Benchmark on the selected device to\n" \
"get a fast measurement of your hardware's performance.\n" \
"Usually takes less than 30 minutes."
BLURB_TEXT = "Share your results with the world!\n" \
"Manage the uploaded benchmark data on your Blender ID."
################################################################################
# Draw Utilities.
font_id = 0
def viewport_size():
import bgl
viewport = bgl.Buffer(bgl.GL_INT, 4)
bgl.glGetIntegerv(bgl.GL_VIEWPORT, viewport)
return viewport[2], viewport[3]
def draw_text_center(text, x, y, shadow=False):
dim = blf.dimensions(font_id, text)
cx = x - int(dim[0] / 2)
cy = y - int(dim[1] / 2)
if shadow:
delta = 1
blf.color(font_id, 0.2, 0.2, 0.2, 1.0)
blf.position(font_id, cx + delta, cy - delta, 0)
blf.draw(font_id, text)
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
blf.position(font_id, cx, cy, 0)
blf.draw(font_id, text)
def draw_text_multiline(text, x, y, shadow=False):
ui_scale = bpy.context.user_preferences.system.ui_scale
height = int(blf.dimensions(font_id, "Dummy Text")[1])
space = int(8 * ui_scale)
for line in text.split('\n'):
if shadow:
delta = 1
blf.color(font_id, 0.2, 0.2, 0.2, 1.0)
blf.position(font_id, x + delta, y - height - delta, 0)
blf.draw(font_id, line)
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
blf.position(font_id, x, y - height, 0)
blf.draw(font_id, line)
y -= height + space
def draw_rect(x, y, w, h, color):
import gpu
gpu.draw.rect(x, y, x + w, y + h, color[0], color[1], color[2], color[3])
def draw_image(filepath, x, y, w, h):
if filepath not in G.images:
ima = bpy.data.images.load(filepath)
G.images[filepath] = ima
import gpu
gpu.draw.image(G.images[filepath], x, y, x + w, y + h)
################################################################################
# Draw.
def benchmark_draw_post_pixel(arg1, arg2):
G.progress_lock.acquire()
progress_status = G.progress_status
result_platform = G.result_platform
result_stats = G.result_stats
result_dict = G.result_dict
G.progress_lock.release()
ui_scale = bpy.context.user_preferences.system.ui_scale
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
window_width, window_height = viewport_size()
# Image
image_h = 370 * ui_scale
image_y = window_height - image_h
if G.background_image_path:
draw_image(G.background_image_path, 0, image_y, window_width, image_h)
else:
splash_dir = os.path.dirname(os.path.abspath(__file__))
splash_filepath = os.path.join(splash_dir, 'splash.png')
draw_image(splash_filepath, 0, image_y, window_width, image_h)
if result_dict:
x = 0.5 * window_width
y = 0.70 * window_height
score = 0
for name_stats in G.result_dict["scenes"]:
stat = name_stats['stats']
if stat["result"] == "OK":
score += stat["total_render_time"]
else:
score = -1
if score >= 0:
blf.size(font_id, int(32 * ui_scale), 72)
draw_text_center("Your Time: {}" . format(
util.humanReadableTimeDifference(score)), x, y, shadow=True)
else:
blf.size(font_id, int(18 * ui_scale), 72)
draw_text_center("Unfortunately, crash happened :(", x, y, shadow=True)
blf.size(font_id, int(24 * ui_scale), 72)
draw_text_center("You can still share data of succeeded scenes!",
x, y - 26 * ui_scale, shadow=True)
x = 50.0 * ui_scale
y = image_y - (image_y - 52 * ui_scale - 18 * 3 * ui_scale) * 0.5
blf.size(font_id, int(12 * ui_scale), 72)
draw_text_multiline(BLURB_TEXT, x, y)
elif result_stats or result_platform or progress_status:
blf.size(font_id, int(12 * ui_scale), 72)
x = 50.0 * ui_scale
y = image_y - 20 * ui_scale
# Stats
if result_platform:
draw_text_multiline(result_platform, 0.5 * window_width + x, y)
if result_stats:
draw_text_multiline(result_stats, x, y)
# Progress
progress_x = 0.0
progress_y = image_y + 1
progress_w = window_width * G.current_progress
progress_h = 15.0 * ui_scale
progress_color = [0.8, 1.0, 1.0, 0.2]
draw_rect(progress_x, progress_y, progress_w, progress_h, progress_color)
# Current status
if G.progress_status:
blf.size(font_id, int(18 * ui_scale), 72)
draw_text_multiline(G.progress_status,
progress_x + 8.0 * ui_scale,
progress_y + progress_h + int(22 * ui_scale),
shadow=True)
else:
# Title
x = 0.5 * window_width
y = 0.70 * window_height
blf.size(font_id, int(32 * ui_scale), 72)
draw_text_center("Blender Benchmark 1.0 Beta", x, y, shadow=True)
y -= 32 * ui_scale
blf.size(font_id, int(12 * ui_scale), 72)
draw_text_center("Free and Open Data for everyone.",
x, y, shadow=True)
x = 50.0 * ui_scale
y = image_y - (image_y - 52 * ui_scale - 18 * 3 * ui_scale) * 0.5
blf.size(font_id, int(12 * ui_scale), 72)
draw_text_multiline(WELCOME_TEXT, x, y)
# Bottom bar
bottom_x = 0
bottom_y = 0
bottom_w = window_width
bottom_h = 52 * ui_scale
bottom_color = [0.2, 0.2, 0.2, 1.0]
draw_rect(bottom_x, bottom_y, bottom_w, bottom_h, bottom_color)
# Logo
logo_width_unscaled = 326
logo_height_unscaled = 104
logo_dir = os.path.dirname(os.path.abspath(__file__))
logo_filepath = os.path.join(logo_dir, 'blender.png')
logo_scale_factor = 1.0
while logo_height_unscaled * logo_scale_factor > bottom_h:
logo_scale_factor *= 0.5
logo_width = logo_width_unscaled * logo_scale_factor
logo_height = logo_height_unscaled * logo_scale_factor
logo_padding = (bottom_h - logo_height) * 0.5
draw_image(logo_filepath,
logo_padding, logo_padding,
logo_width, logo_height)
handle_draw = bpy.types.SpaceBenchmark.draw_handler_add(
benchmark_draw_post_pixel, (None, None), 'WINDOW', 'POST_PIXEL')
################################################################################
# Benchmark foundation integration.
class ProgressProviderSink:
current_progress = 0.0
current_step = ''
current_scene = ''
process = None
def __init__(self):
self.current_progress = 0.0
self.current_step = ''
self.process = None
def progress(self, count, total, prefix="", suffix=""):
G.progress_lock.acquire()
if total != 0:
self.current_progress = float(count) / float(total)
else:
self.current_progress = 0.0
G.progress_lock.release()
def clear(self):
G.progress_lock.acquire()
self.current_progress = 0
G.progress_lock.release()
def step(self, step_name):
G.progress_lock.acquire()
if self.current_step != step_name:
self.current_step = step_name
self.current_progress = 0
G.progress_lock.release()
def scene(self, scene_name):
G.progress_lock.acquire()
self.current_scene = scene_name
if scene_name:
G.scene_status[scene_name] = "Rendering..."
G.progress_lock.release()
def scene_stats(self, scene_name, stats):
G.progress_lock.acquire()
if stats:
G.scene_status[scene_name] = util.humanReadableTimeDifference(
stats.total_render_time)
else:
G.scene_status[scene_name] = "Crashed :("
G.progress_lock.release()
def render_process(self, process):
self.process = process
def is_canceled(self):
return G.cancel
class LoggerProviderSink:
def HEADER(self, *args):
pass
def WARNING(self, *args):
pass
def ERROR(self, *args):
pass
def OK(self, *args):
pass
def BOLD(self, *args):
pass
def INFO(self, *args):
pass
def DEBUG(self, *args):
pass
def FATAL(self, *args):
pass
################################################################################
# Benchmark thread.
def string_strip_trademark(name):
return name.replace("(R)", "").replace("(TM)", "")
def correct_device_name(name):
if (name.startswith("TITAN") or
name.startswith("Quadro") or
name.startswith("GeForce")):
return "Nvidia " + name
if (name.startswith("Radeon")):
return "AMD " + name
return name
def get_gpu_names(system_info):
gpu_names = []
for device in system_info["devices"]:
if device["type"] == "CPU":
continue
gpu_names.append(correct_device_name(device["name"]))
return gpu_names
def indent_gpu_names(gpu_names):
indented_names = []
for name in gpu_names:
indented_names.append("" + name)
return indented_names
def construct_platform_string(system_info):
"""
Construct human readable platform string to show in the interface.
"""
result = ""
result += "OS: {} {}" . format(system_info["system"],
system_info["bitness"])
result += "\nCPU: {}" . format(
string_strip_trademark(system_info["cpu_brand"]))
gpu_names = get_gpu_names(system_info)
num_gpus = len(gpu_names)
if num_gpus:
if num_gpus == 1:
result += "\nGPU: {}" . format(gpu_names[0])
else:
result += "\nGPUs:\n{}" . format("\n" . join(indent_gpu_names(gpu_names)))
return result
def convert_result_to_json_dict(ctx, results):
# Convert custom classes to dictionaries for easier JSON dump.
json_results = results
stats = json_results['scenes']
json_results['scenes'] = []
for scene in ctx.scenes:
if scene not in stats:
continue
stat = stats[scene]
if stat:
stat = stat.asDict()
stat['result'] = 'OK'
else:
stat = {'result': 'CRASH'}
json_results['scenes'].append({'name': scene,
'stats': stat})
return json_results
def system_info_get(ctx):
# This is all system information Blender knows.
# NOTE: We override executable since cpuinfo uses it, and it is set
# to blender.
old_executable = sys.executable
sys.executable = bpy.app.binary_path_python
info = system_info.gatherSystemInfo(ctx)
sys.executable = old_executable
return info
def modify_system_info(system_info):
compute_units = query_opencl_compute_units()
for device in system_info["devices"]:
device_type = device["type"]
if device_type != "OPENCL":
continue
index = find_first_device_index(compute_units, device['name'])
if index != -1:
if device["name"] == "Radeon RX Vega":
device["name"] += " " + compute_units[index][1]
del compute_units[index]
return system_info
def modify_device_info(device_info):
compute_device = bpy.context.scene.compute_device
device_type, device_name, compute_units, device_index = compute_device.split(":")
if device_info["device_type"] == "OPENCL":
compute_devices = []
for device in device_info["compute_devices"]:
if device == "Radeon RX Vega":
device += " " + compute_units
compute_devices.append(device)
device_info["compute_devices"] = compute_devices
return device_info
def benchmark_thread(ctx):
G.progress_lock.acquire()
G.progress_status = "Collecting system information."
if G.cancel:
G.progress_lock.release()
return
G.progress_lock.release()
blender_system_info = system_info_get(ctx)
# This is actual device configuration which is used to render the
# benchmark scene.
blender_device_info = benchrunner.benchmarkGetDeviceInfo(ctx)
if not blender_device_info['device_type']:
# TODO(sergey): Report an error somehow.
return
G.progress_lock.acquire()
G.result_platform = construct_platform_string(blender_system_info)
if G.cancel:
G.progress_lock.release()
return
G.progress_status = "Prepating render."
G.progress_lock.release()
all_stats = benchrunner.benchmarkAll(ctx)
# Gather all information together.
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
result = convert_result_to_json_dict(ctx, {
"timestamp": timestamp,
"blender_version": system_info.getBlenderVersion(ctx),
"system_info": modify_system_info(blender_system_info),
"device_info": modify_device_info(blender_device_info),
"scenes": all_stats if all_stats else {}
})
G.progress_lock.acquire()
if G.cancel:
G.progress_lock.release()
return
G.progress_lock.release()
G.result_dict = result
################################################################################
# Panels
def ui_scale_factor(x):
# To position the buttons exactly we add spacing, but there can be
# rounding errors with non-integer DPI scaling. We roughly compensate
# for those here.
ui_scale = bpy.context.user_preferences.system.ui_scale
widget_height = 20 * ui_scale
return x * widget_height / int(widget_height)
class BENCHMARK_PT_main(Panel):
bl_label = "Benchmark"
bl_options = {'HIDE_HEADER'}
bl_space_type = 'BENCHMARK'
bl_region_type = 'WINDOW'
def draw_link(self, context):
layout = self.layout
col = layout.column()
col.scale_y = ui_scale_factor(0.35)
col.label(text="")
col = layout.column()
sub = col.row()
sub.alignment = 'RIGHT'
sub.emboss = 'LINK'
sub.scale_y = 1.5
sub.active = False
sub.operator("benchmark.opendata_link")
def draw_welcome(self, context):
layout = self.layout
split = layout.split(0.65)
split.label()
split = split.split(0.97)
col = split.column()
sub = col.row()
sub.scale_y = ui_scale_factor(64.0)
sub.separator()
compute_device_list_get(None, context)
if len(G.cached_compute_devices) > 1:
sub = col.row()
sub.scale_y = 1.5
sub.prop(context.scene, "compute_device", text="")
else:
sub = col.row()
sub.scale_y = 1.5
sub.label(text="")
sub = col.row()
sub.scale_y = 2.25
sub.operator("benchmark.run_quick", text="QUICK BENCHMARK")
col.separator()
sub = col.row()
sub.emboss = 'LINK'
sub.scale_y = 1.5
sub.operator("benchmark.run_complete", text="RUN COMPLETE BENCHMARK")
split.label()
self.draw_link(context)
def draw_submit(self, context):
layout = self.layout
split = layout.split(0.65)
split.label()
split = split.split(0.97)
col = split.column()
sub = col.row()
sub.scale_y = ui_scale_factor(64.0)
sub.separator()
sub = col.row()
sub.enabled = not G.results_submitted
sub.scale_y = 2.25
sub.operator("benchmark.share", text="SHARE ONLINE")
sub = col.row()
subsub = sub.split()
subsub.emboss = 'LINK'
subsub.scale_y = 1.5
subsub.operator("benchmark.save", text="Save Locally...")
subsub = sub.split()
subsub.emboss = 'LINK'
subsub.scale_y = 1.5
subsub.operator("benchmark.restart", text="Start Again")
split.label()
def draw(self, context):
screen_index = 0
G.progress_lock.acquire()
if G.result_dict:
screen_index = 2
elif G.result_stats or G.progress_status:
screen_index = 1
G.progress_lock.release()
if screen_index == 0:
self.draw_welcome(context)
elif screen_index == 2:
self.draw_submit(context)
################################################################################
# Operator
def blender_benchmark_data_dir_get():
system = platform.system()
if system == "Linux" or system == "Windows":
return os.path.dirname(bpy.app.binary_path)
elif system == "Darwin":
return os.path.join(os.path.dirname(bpy.app.binary_path), "..", "Resources")
else:
raise Exception("Needs implementation")
def blender_executable_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
system = platform.system()
if system == "Linux":
return os.path.join(benchmark_data_dir, "blender", "blender")
elif system == "Windows":
return os.path.join(benchmark_data_dir, "blender", "blender.exe")
elif system == "Darwin":
return os.path.join(benchmark_data_dir, "blender", "blender.app", "Contents", "MacOS", "blender")
else:
raise Exception("Needs implementation")
def scenes_dir_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
return os.path.join(benchmark_data_dir, "scenes")
def configure_script_get():
script_directory = os.path.dirname(os.path.realpath(__file__))
benchmark_script_directory = os.path.dirname(script_directory)
return os.path.join(benchmark_script_directory, "configure.py")
class BENCHMARK_OT_run_base(bpy.types.Operator):
run_type = 'QUICK' # or 'COMPLETE'
benchmark_context = None
thread = None
timer = None
progress_provider = None
logger_provider = None
tmpdir = None
def setup_sink(self):
self.progress_provider = ProgressProviderSink()
self.logger_provider = LoggerProviderSink()
progress.setProvider(self.progress_provider)
logger.setProvider(self.logger_provider)
def update_status(self, context):
G.progress_lock.acquire()
step = self.progress_provider.current_step
if G.cancel:
G.progress_status = "Canceling..."
elif step == 'WARM_UP':
G.progress_status = "Rendering warm-up pass..."
elif step == 'RUN':
G.current_progress = self.progress_provider.current_progress
G.progress_status = "Rendering... Press Esc to stop."
context.area.tag_redraw()
# Path to currently displayed background image.
current_scene = self.progress_provider.current_scene
if current_scene:
G.background_image_path = os.path.join(
self.benchmark_context.scenes_dir,
current_scene,
current_scene + ".png")
else:
G.background_image_path = ""
# Update per-scene status string
G.result_stats = ""
for scene in G.scene_status:
G.result_stats += "{}: {}\n".format(
scene, G.scene_status[scene])
G.progress_lock.release()
def done(self, context):
wm = context.window_manager
wm.event_timer_remove(self.timer)
# Restore all modifications to the benchmark foundation.
progress.restoreDefaultProvider()
logger.restoreDefaultProvider()
# Destroy objects of sinks.
del self.progress_provider
del self.logger_provider
self.progress_provider = None
self.logger_provider = None
# Construct final stats string
if G.cancel:
G.reset()
elif G.result_dict:
G.result_stats = ""
for name_stat in G.result_dict["scenes"]:
stat = name_stat["stats"]
if G.result_stats:
G.result_stats += "\n"
if stat["result"] == "OK":
G.result_stats += "{}: {}".format(name_stat['name'],
util.humanReadableTimeDifference(
stat["total_render_time"]))
else:
G.result_stats += "{}: {}".format(name_stat['name'],
stat["result"])
else:
G.result_stats = ""
# TOGO(sergey): Use some more nice picture for the final slide.
G.background_image_path = ""
# Tag for nice redraw
G.progress_status = None
G.current_progress = 0.0
context.area.tag_redraw()
self.tmpdir = None
def modal(self, context, event):
if event.type == 'TIMER':
if self.thread.is_alive():
self.update_status(context)
return {'PASS_THROUGH'}
else:
self.done(context)
return {'FINISHED'}
elif event.type == 'ESC':
self.cancel_request(context)
return {'PASS_THROUGH'}
def invoke(self, context, event):
G.cancel = False
G.result_platform = ""
G.progress_status = "Initializing..."
context.area.tag_redraw()
compute_device = context.scene.compute_device
device_type, device_name, compute_units, device_index = compute_device.split(":")
self.tmpdir = tempfile.TemporaryDirectory(prefix="blender_benchmark_")
# Before doing anything, make sure we have all sinks set up, so we do
# not miss any progress report.
self.setup_sink()
wm = context.window_manager
ctx = benchmark_context.Context()
ctx.blender = blender_executable_get()
ctx.configure_script = configure_script_get()
if self.run_type == 'QUICK':
ctx.scenes = QUICK_SCENES
else:
ctx.scenes = COMPLETE_SCENES
for scene in ctx.scenes:
G.scene_status[scene] = "Queued"
ctx.scenes_dir = scenes_dir_get()
ctx.device_type = device_type
ctx.device_name = device_name
ctx.single_compute_scene = True
ctx.image_output_dir = self.tmpdir.name
# Set this to True when having multiple GPUs of same name and only
# one of the mis to be enabled. Or when requesting GPU render without
# specifying GPU name.
ctx.device_single = True
ctx.device_index = device_index
# ctx.image_output_dir = "/tmp/"
self.benchmark_context = ctx
# Create thread for the actual benchmark.
self.thread = Thread(target=benchmark_thread,
args=(self.benchmark_context, ))
self.thread.start()
# Create timer to query thread status
self.timer = wm.event_timer_add(0.1, context.window)
# Register self as modal.
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel_request(self, context):
G.progress_lock.acquire()
G.cancel = True
context.area.tag_redraw()
G.progress_lock.release()
if self.progress_provider.process:
if platform.system() == "Windows":
self.progress_provider.process.kill()
else:
import signal
self.progress_provider.process.send_signal(signal.SIGINT)
def cancel(self, context):
self.cancel_request(context)
if self.timer:
wm = context.window_manager
wm.event_timer_remove(self.timer)
if self.thread:
self.thread.join()
class BENCHMARK_OT_run_quick(BENCHMARK_OT_run_base):
"Run quick Blender benchmark"
bl_label = "Run Benchmark"
bl_idname = "benchmark.run_quick"
run_type = 'QUICK'
class BENCHMARK_OT_run_complete(BENCHMARK_OT_run_base):
"Run complete Blender benchmark (might take 1.5 hours to finish and 4GiB of GPU memory)"
bl_label = "Run Benchmark"
bl_idname = "benchmark.run_complete"
run_type = 'COMPLETE'
class BENCHMARK_OT_save(bpy.types.Operator):
bl_idname = "benchmark.save"
bl_label = "Save Benchmark Result"
filepath: bpy.props.StringProperty(
subtype='FILE_PATH',
options={'SKIP_SAVE'},
)
def execute(self, context):
with open(self.filepath, "w") as f:
f.write(json.dumps(G.result_dict, sort_keys=True, indent=2))
make_buttons_green()
return {'FINISHED'}
def invoke(self, context, event):
import os
make_buttons_default()
if not self.filepath:
self.filepath = os.path.join(
os.path.expanduser("~"), "benchmark-result.txt")
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
make_buttons_green()
class BENCHMARK_OT_share(bpy.types.Operator):
bl_idname = "benchmark.share"
bl_label = "Share Benchmark Result"
def execute(self, context):
from benchmark import submission
make_buttons_default()
print('Submitting benchmark')
try:
submission.submit_benchmark(G.result_dict)
except Exception as ex:
self.report({'ERROR'}, f'Error submitting results:\n{str(ex)[:100]}')
return {'CANCELLED'}
print('Submission done')
make_buttons_green()
G.results_submitted = True
return {'FINISHED'}
class BENCHMARK_OT_opendata_link(bpy.types.Operator):
bl_idname = "benchmark.opendata_link"
bl_label = "opendata.blender.org"
def invoke(self, context, event):
bpy.ops.wm.url_open('INVOKE_DEFAULT', url="https://opendata.blender.org")
return {'FINISHED'}
################################################################################
# Restart benchmark.
class BENCHMARK_OT_restart(bpy.types.Operator):
bl_idname = "benchmark.restart"
bl_label = "Go to a home screen and choose another benchmark to run"
def invoke(self, context, event):
G.reset()
return {'FINISHED'}
################################################################################
# Configuration.
def cl_query_executable_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
system = platform.system()
if system == "Linux":
return os.path.join(benchmark_data_dir, "bin", "cl_query")
elif system == "Windows":
return os.path.join(benchmark_data_dir, "bin", "cl_query.exe")
elif system == "Darwin":
return os.path.join(benchmark_data_dir, "bin", "cl_query")
else:
raise Exception("Needs implementation")
def query_opencl_compute_units():
binary = cl_query_executable_get()
output = subprocess.run([binary], stdout=subprocess.PIPE).stdout
lines = output.splitlines()
compute_units = []
for line in lines:
(name, max_compute_units) = line.rsplit(b':', 1)
compute_units.append((name.decode(), max_compute_units.decode()))
return compute_units
def find_first_device_index(compute_units, device_name):
if not compute_units:
return -1
for index, value in enumerate(compute_units):
if value[0] == device_name:
return index
return -1
def compute_device_list_get(self, context):
if G.cached_compute_devices:
return G.cached_compute_devices
compute_devices = [('CPU:::', "CPU", "")]
if not G.cached_system_info:
ctx = benchmark_context.Context()
ctx.blender = blender_executable_get()
ctx.configure_script = configure_script_get()
G.cached_system_info = system_info_get(ctx)
compute_units = query_opencl_compute_units()
device_indices = {}
for device in G.cached_system_info["devices"]:
raw_device_name = device["name"]
device_type = device["type"]
if raw_device_name in device_indices:
device_indices[raw_device_name] += 1
device_index = device_indices[raw_device_name]
else:
device_indices[raw_device_name] = 0
device_index = 0
if device_type == "CPU":
continue
elif device_type == "OPENCL":
device_name = correct_device_name(device["name"])
index = find_first_device_index(compute_units, device['name'])
device_compute_units = ""
if index != -1:
if device["name"] == "Radeon RX Vega":
device_name += " " + compute_units[index][1]
device_compute_units = str(compute_units[index][1])
del compute_units[index]
device_id = "{}:{}:{}:{}" . format(device_type,
device["name"],
device_compute_units,
device_index)
compute_devices.append((device_id, device_name, ""))
elif device_type == "CUDA":
device_name = correct_device_name(device["name"])
device_id = "{}:{}::{}" . format(device_type,
device["name"],
device_index)
compute_devices.append((device_id, device_name, ""))
G.cached_compute_devices = compute_devices
return compute_devices
################################################################################
# Tweak User Preferences
default_wcol_tool_inner = None
default_wcol_tool_inner_sel = None
default_wcol_tool_outline = None
def backup_buttons_colors():
global default_wcol_tool_inner
global default_wcol_tool_inner_sel
global default_wcol_tool_outline
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
default_wcol_tool_inner = theme.user_interface.wcol_tool.inner[:]
default_wcol_tool_inner_sel = theme.user_interface.wcol_tool.inner_sel[:]
default_wcol_tool_outline = theme.user_interface.wcol_tool.outline[:]
def make_buttons_green():
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = [0.408, 0.590, 0.129, 1.0]
theme.user_interface.wcol_tool.inner_sel = [0.308, 0.490, 0.029, 1.0]
theme.user_interface.wcol_tool.outline = [0.408, 0.590, 0.129]
def make_buttons_default():
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = default_wcol_tool_inner
theme.user_interface.wcol_tool.inner_sel = default_wcol_tool_inner_sel
theme.user_interface.wcol_tool.outline = default_wcol_tool_outline
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
userpref.view.use_quit_dialog = False
theme.benchmark.space.back = [0.26, 0.26, 0.26]
backup_buttons_colors()
make_buttons_green()
style = userpref.ui_styles[0]
style.widget.points = 12
################################################################################
# Registration
classes = (
BENCHMARK_PT_main,
BENCHMARK_OT_restart,
BENCHMARK_OT_run_quick,
BENCHMARK_OT_run_complete,
BENCHMARK_OT_save,
BENCHMARK_OT_share,
BENCHMARK_OT_opendata_link,
)
Scene.compute_device = EnumProperty(
items=compute_device_list_get,
name="Compute Device",
description="Compute device to run benchmark on")