This repository has been archived on 2023-02-09. You can view files and clone it, but cannot push or open issues or pull requests.
Files
blender-benchmark-bundle/benchmark/space/__init__.py

1066 lines
34 KiB
Python

import datetime
import json
import os
import platform
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import blf
import bpy
from bpy.types import Panel, Scene
from bpy.props import EnumProperty, IntProperty, FloatProperty
from ..foundation import (benchrunner,
logger,
progress,
system_info,
util)
from ..foundation import context as benchmark_context
################################################################################
# Global state.
QUICK_SCENES = ["bmw27",
"classroom"]
COMPLETE_SCENES = ["barbershop_interior",
"bmw27",
"classroom",
"fishy_cat",
"koro",
"pavillon_barcelona"]
global_result_platform = None
global_progress_status = None
global_result_stats = None
global_result_dict = None
global_background_image_path = ""
global_scene_status = {}
global_cancel = False
global_cached_system_info = None
global_cached_compute_devices = None
global_results_submitted = False
images = {}
current_progress = 0.0
progress_lock = Lock()
WELCOME_TEXT = "Run the Quick Benchmark on the selected device to\n" \
"get a fast measurement of your hardware's performance.\n" \
"Usually takes less than 30 minutes."
BLURB_TEXT = "Share your results with the world!\n" \
"Manage the uploaded benchmark data on your Blender ID."
def reset_global_state():
global global_result_platform
global global_progress_status
global global_result_stats
global global_result_dict
global global_background_image_path
global global_scene_status
global global_results_submitted
global_result_platform = None
global_progress_status = None
global_result_stats = None
global_result_dict = None
global_background_image_path = ""
global_scene_status = {}
global_results_submitted = False
################################################################################
# Draw Utilities.
font_id = 0
def viewport_size():
import bgl
viewport = bgl.Buffer(bgl.GL_INT, 4)
bgl.glGetIntegerv(bgl.GL_VIEWPORT, viewport)
return viewport[2], viewport[3]
def draw_text_center(text, x, y, shadow=False):
dim = blf.dimensions(font_id, text)
cx = x - int(dim[0] / 2)
cy = y - int(dim[1] / 2)
if shadow:
delta = 1
blf.color(font_id, 0.2, 0.2, 0.2, 1.0)
blf.position(font_id, cx + delta, cy - delta, 0)
blf.draw(font_id, text)
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
blf.position(font_id, cx, cy, 0)
blf.draw(font_id, text)
def draw_text_multiline(text, x, y, shadow=False):
ui_scale = bpy.context.user_preferences.system.ui_scale
height = int(blf.dimensions(font_id, "Dummy Text")[1])
space = int(8 * ui_scale)
for line in text.split('\n'):
if shadow:
delta = 1
blf.color(font_id, 0.2, 0.2, 0.2, 1.0)
blf.position(font_id, x + delta, y - height - delta, 0)
blf.draw(font_id, line)
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
blf.position(font_id, x, y - height, 0)
blf.draw(font_id, line)
y -= height + space
def draw_rect(x, y, w, h, color):
import gpu
gpu.draw.rect(x, y, x + w, y + h, color[0], color[1], color[2], color[3])
def draw_image(filepath, x, y, w, h):
global images
if filepath not in images:
ima = bpy.data.images.load(filepath)
images[filepath] = ima
import gpu
gpu.draw.image(images[filepath], x, y, x + w, y + h)
################################################################################
# Draw.
def benchmark_draw_post_pixel(arg1, arg2):
global progress_lock
progress_lock.acquire()
progress_status = global_progress_status
result_platform = global_result_platform
result_stats = global_result_stats
result_dict = global_result_dict
progress_lock.release()
ui_scale = bpy.context.user_preferences.system.ui_scale
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
window_width, window_height = viewport_size()
# Image
image_h = 370 * ui_scale
image_y = window_height - image_h
if global_background_image_path:
draw_image(global_background_image_path, 0, image_y, window_width, image_h)
else:
splash_dir = os.path.dirname(os.path.abspath(__file__))
splash_filepath = os.path.join(splash_dir, 'splash.png')
draw_image(splash_filepath, 0, image_y, window_width, image_h)
if result_dict:
x = 0.5 * window_width
y = 0.70 * window_height
score = 0
for name_stats in global_result_dict["scenes"]:
stat = name_stats['stats']
if stat["result"] == "OK":
score += stat["total_render_time"]
else:
score = -1
if score >= 0:
blf.size(font_id, int(32 * ui_scale), 72)
draw_text_center("Your Time: {}" . format(
util.humanReadableTimeDifference(score)), x, y, shadow=True)
else:
blf.size(font_id, int(18 * ui_scale), 72)
draw_text_center("Unfortunately, crash happened :(", x, y, shadow=True)
blf.size(font_id, int(24 * ui_scale), 72)
draw_text_center("You can still share data of succeeded scenes!",
x, y - 26 * ui_scale, shadow=True)
x = 50.0 * ui_scale
y = image_y - (image_y - 52 * ui_scale - 18 * 3 * ui_scale) * 0.5
blf.size(font_id, int(12 * ui_scale), 72)
draw_text_multiline(BLURB_TEXT, x, y)
elif result_stats or result_platform or progress_status:
blf.size(font_id, int(12 * ui_scale), 72)
x = 50.0 * ui_scale
y = image_y - 20 * ui_scale
# Stats
if result_platform:
draw_text_multiline(result_platform, 0.5 * window_width + x, y)
if result_stats:
draw_text_multiline(result_stats, x, y)
# Progress
progress_x = 0.0
progress_y = image_y + 1
progress_w = window_width * current_progress
progress_h = 15.0 * ui_scale
progress_color = [0.8, 1.0, 1.0, 0.2]
draw_rect(progress_x, progress_y, progress_w, progress_h, progress_color)
# Current status
if global_progress_status:
blf.size(font_id, int(18 * ui_scale), 72)
draw_text_multiline(global_progress_status,
progress_x + 8.0 * ui_scale,
progress_y + progress_h + int(22 * ui_scale),
shadow=True)
else:
# Title
x = 0.5 * window_width
y = 0.70 * window_height
blf.size(font_id, int(32 * ui_scale), 72)
draw_text_center("Blender Benchmark 1.0 Beta", x, y, shadow=True)
y -= 32 * ui_scale
blf.size(font_id, int(12 * ui_scale), 72)
draw_text_center("Free and Open Data for everyone.",
x, y, shadow=True)
x = 50.0 * ui_scale
y = image_y - (image_y - 52 * ui_scale - 18 * 3 * ui_scale) * 0.5
blf.size(font_id, int(12 * ui_scale), 72)
draw_text_multiline(WELCOME_TEXT, x, y)
# Bottom bar
bottom_x = 0
bottom_y = 0
bottom_w = window_width
bottom_h = 52 * ui_scale
bottom_color = [0.2, 0.2, 0.2, 1.0]
draw_rect(bottom_x, bottom_y, bottom_w, bottom_h, bottom_color)
# Logo
logo_width_unscaled = 326
logo_height_unscaled = 104
logo_dir = os.path.dirname(os.path.abspath(__file__))
logo_filepath = os.path.join(logo_dir, 'blender.png')
logo_scale_factor = 1.0
while logo_height_unscaled * logo_scale_factor > bottom_h:
logo_scale_factor *= 0.5
logo_width = logo_width_unscaled * logo_scale_factor
logo_height = logo_height_unscaled * logo_scale_factor
logo_padding = (bottom_h - logo_height) * 0.5
draw_image(logo_filepath,
logo_padding, logo_padding,
logo_width, logo_height)
handle_draw = bpy.types.SpaceBenchmark.draw_handler_add(
benchmark_draw_post_pixel, (None, None), 'WINDOW', 'POST_PIXEL')
################################################################################
# Benchmark foundation integration.
class ProgressProviderSink:
current_progress = 0.0
current_step = ''
current_scene = ''
process = None
def __init__(self):
self.current_progress = 0.0
self.current_step = ''
self.process = None
def progress(self, count, total, prefix="", suffix=""):
progress_lock.acquire()
if total != 0:
self.current_progress = float(count) / float(total)
else:
self.current_progress = 0.0
progress_lock.release()
def clear(self):
progress_lock.acquire()
self.current_progress = 0
progress_lock.release()
def step(self, step_name):
progress_lock.acquire()
if self.current_step != step_name:
self.current_step = step_name
self.current_progress = 0
progress_lock.release()
def scene(self, scene_name):
progress_lock.acquire()
self.current_scene = scene_name
if scene_name:
global global_scene_status
global_scene_status[scene_name] = "Rendering..."
progress_lock.release()
def scene_stats(self, scene_name, stats):
progress_lock.acquire()
global global_scene_status
if stats:
global_scene_status[scene_name] = util.humanReadableTimeDifference(
stats.total_render_time)
else:
global_scene_status[scene_name] = "Crashed :("
progress_lock.release()
def render_process(self, process):
self.process = process
def is_canceled(self):
global global_cancel
return global_cancel
class LoggerProviderSink:
def HEADER(self, *args):
pass
def WARNING(self, *args):
pass
def ERROR(self, *args):
pass
def OK(self, *args):
pass
def BOLD(self, *args):
pass
def INFO(self, *args):
pass
def DEBUG(self, *args):
pass
def FATAL(self, *args):
pass
################################################################################
# Benchmark thread.
def string_strip_trademark(name):
return name.replace("(R)", "").replace("(TM)", "")
def correct_device_name(name):
if (name.startswith("TITAN") or
name.startswith("Quadro") or
name.startswith("GeForce")):
return "Nvidia " + name
if (name.startswith("Radeon")):
return "AMD " + name
return name
def get_gpu_names(system_info):
gpu_names = []
for device in system_info["devices"]:
if device["type"] == "CPU":
continue
gpu_names.append(correct_device_name(device["name"]))
return gpu_names
def indent_gpu_names(gpu_names):
indented_names = []
for name in gpu_names:
indented_names.append("" + name)
return indented_names
def construct_platform_string(system_info):
"""
Construct human readable platform string to show in the interface.
"""
result = ""
result += "OS: {} {}" . format(system_info["system"],
system_info["bitness"])
result += "\nCPU: {}" . format(
string_strip_trademark(system_info["cpu_brand"]))
gpu_names = get_gpu_names(system_info)
num_gpus = len(gpu_names)
if num_gpus:
if num_gpus == 1:
result += "\nGPU: {}" . format(gpu_names[0])
else:
result += "\nGPUs:\n{}" . format("\n" . join(indent_gpu_names(gpu_names)))
return result
def convert_result_to_json_dict(ctx, results):
# Convert custom classes to dictionaries for easier JSON dump.
json_results = results
stats = json_results['scenes']
json_results['scenes'] = []
for scene in ctx.scenes:
if scene not in stats:
continue
stat = stats[scene]
if stat:
stat = stat.asDict()
stat['result'] = 'OK'
else:
stat = {'result': 'CRASH'}
json_results['scenes'].append({'name': scene,
'stats': stat})
return json_results
def system_info_get(ctx):
# This is all system information Blender knows.
# NOTE: We override executable since cpuinfo uses it, and it is set
# to blender.
old_executable = sys.executable
sys.executable = bpy.app.binary_path_python
info = system_info.gatherSystemInfo(ctx)
sys.executable = old_executable
return info
def modify_system_info(system_info):
compute_units = query_opencl_compute_units()
for device in system_info["devices"]:
device_type = device["type"]
if device_type != "OPENCL":
continue
index = find_first_device_index(compute_units, device['name'])
if index != -1:
if device["name"] == "Radeon RX Vega":
device["name"] += " " + compute_units[index][1]
del compute_units[index]
return system_info
def modify_device_info(device_info):
compute_device = bpy.context.scene.compute_device
device_type, device_name, compute_units, device_index = compute_device.split(":")
if device_info["device_type"] == "OPENCL":
compute_devices = []
for device in device_info["compute_devices"]:
if device == "Radeon RX Vega":
device += " " + compute_units
compute_devices.append(device)
device_info["compute_devices"] = compute_devices
return device_info
def benchmark_thread(ctx):
global progress_lock, global_result_platform, global_progress_status
global global_cancel
progress_lock.acquire()
global_progress_status = "Collecting system information."
if global_cancel:
progress_lock.release()
return
progress_lock.release()
blender_system_info = system_info_get(ctx)
# This is actual device configuration which is used to render the
# benchmark scene.
blender_device_info = benchrunner.benchmarkGetDeviceInfo(ctx)
if not blender_device_info['device_type']:
# TODO(sergey): Report an error somehow.
return
progress_lock.acquire()
global_result_platform = construct_platform_string(blender_system_info)
if global_cancel:
progress_lock.release()
return
global_progress_status = "Prepating render."
progress_lock.release()
all_stats = benchrunner.benchmarkAll(ctx)
# Gather all information together.
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
result = convert_result_to_json_dict(ctx, {
"timestamp": timestamp,
"blender_version": system_info.getBlenderVersion(ctx),
"system_info": modify_system_info(blender_system_info),
"device_info": modify_device_info(blender_device_info),
"scenes": all_stats if all_stats else {}
})
progress_lock.acquire()
if global_cancel:
progress_lock.release()
return
progress_lock.release()
global global_result_dict
global_result_dict = result
################################################################################
# Panels
def ui_scale_factor(x):
# To position the buttons exactly we add spacing, but there can be
# rounding errors with non-integer DPI scaling. We roughly compensate
# for those here.
ui_scale = bpy.context.user_preferences.system.ui_scale
widget_height = 20 * ui_scale
return x * widget_height / int(widget_height)
class BENCHMARK_PT_main(Panel):
bl_label = "Benchmark"
bl_options = {'HIDE_HEADER'}
bl_space_type = 'BENCHMARK'
bl_region_type = 'WINDOW'
def draw_link(self, context):
layout = self.layout
col = layout.column()
col.scale_y = ui_scale_factor(0.35)
col.label(text="")
col = layout.column()
sub = col.row()
sub.alignment = 'RIGHT'
sub.emboss = 'LINK'
sub.scale_y = 1.5
sub.active = False
sub.operator("benchmark.opendata_link")
def draw_welcome(self, context):
layout = self.layout
split = layout.split(0.65)
split.label()
split = split.split(0.97)
col = split.column()
sub = col.row()
sub.scale_y = ui_scale_factor(64.0)
sub.separator()
compute_device_list_get(None, context)
if len(global_cached_compute_devices) > 1:
sub = col.row()
sub.scale_y = 1.5
sub.prop(context.scene, "compute_device", text="")
else:
sub = col.row()
sub.scale_y = 1.5
sub.label(text="")
sub = col.row()
sub.scale_y = 2.25
sub.operator("benchmark.run_quick", text="QUICK BENCHMARK")
col.separator()
sub = col.row()
sub.emboss = 'LINK'
sub.scale_y = 1.5
sub.operator("benchmark.run_complete", text="RUN COMPLETE BENCHMARK")
split.label()
self.draw_link(context)
def draw_submit(self, context):
layout = self.layout
split = layout.split(0.65)
split.label()
split = split.split(0.97)
col = split.column()
sub = col.row()
sub.scale_y = ui_scale_factor(64.0)
sub.separator()
sub = col.row()
sub.enabled = not global_results_submitted
sub.scale_y = 2.25
sub.operator("benchmark.share", text="SHARE ONLINE")
sub = col.row()
subsub = sub.split()
subsub.emboss = 'LINK'
subsub.scale_y = 1.5
subsub.operator("benchmark.save", text="Save Locally...")
subsub = sub.split()
subsub.emboss = 'LINK'
subsub.scale_y = 1.5
subsub.operator("benchmark.restart", text="Start Again")
split.label()
def draw(self, context):
screen_index = 0
global progress_lock
progress_lock.acquire()
if global_result_dict:
screen_index = 2
elif global_result_stats or global_progress_status:
screen_index = 1
progress_lock.release()
if screen_index == 0:
self.draw_welcome(context)
elif screen_index == 2:
self.draw_submit(context)
################################################################################
# Operator
def blender_benchmark_data_dir_get():
system = platform.system()
if system == "Linux" or system == "Windows":
return os.path.dirname(bpy.app.binary_path)
elif system == "Darwin":
return os.path.join(os.path.dirname(bpy.app.binary_path), "..", "Resources")
else:
raise Exception("Needs implementation")
def blender_executable_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
system = platform.system()
if system == "Linux":
return os.path.join(benchmark_data_dir, "blender", "blender")
elif system == "Windows":
return os.path.join(benchmark_data_dir, "blender", "blender.exe")
elif system == "Darwin":
return os.path.join(benchmark_data_dir, "blender", "blender.app", "Contents", "MacOS", "blender")
else:
raise Exception("Needs implementation")
def scenes_dir_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
return os.path.join(benchmark_data_dir, "scenes")
def configure_script_get():
script_directory = os.path.dirname(os.path.realpath(__file__))
benchmark_script_directory = os.path.dirname(script_directory)
return os.path.join(benchmark_script_directory, "configure.py")
class BENCHMARK_OT_run_base(bpy.types.Operator):
run_type = 'QUICK' # or 'COMPLETE'
benchmark_context = None
thread = None
timer = None
progress_provider = None
logger_provider = None
tmpdir = None
def setup_sink(self):
self.progress_provider = ProgressProviderSink()
self.logger_provider = LoggerProviderSink()
progress.setProvider(self.progress_provider)
logger.setProvider(self.logger_provider)
def update_status(self, context):
global global_progress_status, global_background_image_path
global global_cancel
progress_lock.acquire()
step = self.progress_provider.current_step
if global_cancel:
global_progress_status = "Canceling..."
elif step == 'WARM_UP':
global_progress_status = "Rendering warm-up pass..."
elif step == 'RUN':
global current_progress
current_progress = self.progress_provider.current_progress
global_progress_status = "Rendering... Press Esc to stop."
context.area.tag_redraw()
# Path to currently displayed background image.
current_scene = self.progress_provider.current_scene
if current_scene:
global_background_image_path = os.path.join(
self.benchmark_context.scenes_dir,
current_scene,
current_scene + ".png")
else:
global_background_image_path = ""
# Update per-scene status string
global global_result_stats
global_result_stats = ""
for scene in global_scene_status:
global_result_stats += "{}: {}\n" . format(
scene, global_scene_status[scene])
progress_lock.release()
def done(self, context):
global global_progress_status, global_result_stats, current_progress
global global_result_dict
wm = context.window_manager
wm.event_timer_remove(self.timer)
# Restore all modifications to the benchmark foundation.
progress.restoreDefaultProvider()
logger.restoreDefaultProvider()
# Destroy objects of sinks.
del self.progress_provider
del self.logger_provider
self.progress_provider = None
self.logger_provider = None
# Construct final stats string
if global_cancel:
global_result_dict = None
reset_global_state()
elif global_result_dict:
global_result_stats = ""
for name_stat in global_result_dict["scenes"]:
stat = name_stat["stats"]
if global_result_stats:
global_result_stats += "\n"
if stat["result"] == "OK":
global_result_stats += "{}: {}" . format(name_stat['name'],
util.humanReadableTimeDifference(
stat["total_render_time"]))
else:
global_result_stats += "{}: {}" . format(name_stat['name'],
stat["result"])
else:
global_result_stats = ""
# TOGO(sergey): Use some more nice picture for the final slide.
global global_background_image_path
global_background_image_path = ""
# Tag for nice redraw
global_progress_status = None
current_progress = 0.0
context.area.tag_redraw()
self.tmpdir = None
def modal(self, context, event):
if event.type == 'TIMER':
if self.thread.is_alive():
self.update_status(context)
return {'PASS_THROUGH'}
else:
self.done(context)
return {'FINISHED'}
elif event.type == 'ESC':
self.cancel_request(context)
return {'PASS_THROUGH'}
def invoke(self, context, event):
global global_result_platform, global_progress_status
global global_scene_status, global_cancel
global_cancel = False
global_result_platform = ""
global_progress_status = "Initializing..."
context.area.tag_redraw()
compute_device = context.scene.compute_device
device_type, device_name, compute_units, device_index = compute_device.split(":")
self.tmpdir = tempfile.TemporaryDirectory(prefix="blender_benchmark_")
# Before doing anything, make sure we have all sinks set up, so we do
# not miss any progress report.
self.setup_sink()
wm = context.window_manager
ctx = benchmark_context.Context()
ctx.blender = blender_executable_get()
ctx.configure_script = configure_script_get()
if self.run_type == 'QUICK':
ctx.scenes = QUICK_SCENES
else:
ctx.scenes = COMPLETE_SCENES
for scene in ctx.scenes:
global_scene_status[scene] = "Queued"
ctx.scenes_dir = scenes_dir_get()
ctx.device_type = device_type
ctx.device_name = device_name
ctx.single_compute_scene = True
ctx.image_output_dir = self.tmpdir.name
# Set this to True when having multiple GPUs of same name and only
# one of the mis to be enabled. Or when requesting GPU render without
# specifying GPU name.
ctx.device_single = True
ctx.device_index = device_index
# ctx.image_output_dir = "/tmp/"
self.benchmark_context = ctx
# Create thread for the actual benchmark.
self.thread = Thread(target=benchmark_thread,
args=(self.benchmark_context, ))
self.thread.start()
# Create timer to query thread status
self.timer = wm.event_timer_add(0.1, context.window)
# Register self as modal.
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel_request(self, context):
global global_cancel
progress_lock.acquire()
global_cancel = True
context.area.tag_redraw()
progress_lock.release()
if self.progress_provider.process:
if platform.system() == "Windows":
self.progress_provider.process.kill()
else:
import signal
self.progress_provider.process.send_signal(signal.SIGINT)
def cancel(self, context):
global global_cancel
self.cancel_request(context)
if self.timer:
wm = context.window_manager
wm.event_timer_remove(self.timer)
if self.thread:
self.thread.join()
class BENCHMARK_OT_run_quick(BENCHMARK_OT_run_base):
"Run quick Blender benchmark"
bl_label = "Run Benchmark"
bl_idname = "benchmark.run_quick"
run_type = 'QUICK'
class BENCHMARK_OT_run_complete(BENCHMARK_OT_run_base):
"Run complete Blender benchmark (might take 1.5 hours to finish and 4GiB of GPU memory)"
bl_label = "Run Benchmark"
bl_idname = "benchmark.run_complete"
run_type = 'COMPLETE'
class BENCHMARK_OT_save(bpy.types.Operator):
bl_idname = "benchmark.save"
bl_label = "Save Benchmark Result"
filepath: bpy.props.StringProperty(
subtype='FILE_PATH',
options={'SKIP_SAVE'},
)
def execute(self, context):
with open(self.filepath, "w") as f:
f.write(json.dumps(global_result_dict, sort_keys=True, indent=2))
make_buttons_green()
return {'FINISHED'}
def invoke(self, context, event):
import os
make_buttons_default()
if not self.filepath:
self.filepath = os.path.join(
os.path.expanduser("~"), "benchmark-result.txt")
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
make_buttons_green()
class BENCHMARK_OT_share(bpy.types.Operator):
bl_idname = "benchmark.share"
bl_label = "Share Benchmark Result"
def execute(self, context):
from benchmark import submission
global global_results_submitted
make_buttons_default()
print('Submitting benchmark')
try:
submission.submit_benchmark(global_result_dict)
except Exception as ex:
self.report({'ERROR'}, f'Error submitting results:\n{str(ex)[:100]}')
return {'CANCELLED'}
print('Submission done')
make_buttons_green()
global_results_submitted = True
return {'FINISHED'}
class BENCHMARK_OT_opendata_link(bpy.types.Operator):
bl_idname = "benchmark.opendata_link"
bl_label = "opendata.blender.org"
def invoke(self, context, event):
bpy.ops.wm.url_open('INVOKE_DEFAULT', url="https://opendata.blender.org")
return {'FINISHED'}
################################################################################
# Restart benchmark.
class BENCHMARK_OT_restart(bpy.types.Operator):
bl_idname = "benchmark.restart"
bl_label = "Go to a home screen and choose another benchmark to run"
def invoke(self, context, event):
reset_global_state()
return {'FINISHED'}
################################################################################
# Configuration.
def cl_query_executable_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
system = platform.system()
if system == "Linux":
return os.path.join(benchmark_data_dir, "bin", "cl_query")
elif system == "Windows":
return os.path.join(benchmark_data_dir, "bin", "cl_query.exe")
elif system == "Darwin":
return os.path.join(benchmark_data_dir, "bin", "cl_query")
else:
raise Exception("Needs implementation")
def query_opencl_compute_units():
binary = cl_query_executable_get()
output = subprocess.run([binary], stdout=subprocess.PIPE).stdout
lines = output.splitlines()
compute_units = []
for line in lines:
(name, max_compute_units) = line.rsplit(b':', 1)
compute_units.append((name.decode(), max_compute_units.decode()))
return compute_units
def find_first_device_index(compute_units, device_name):
if not compute_units:
return -1
for index, value in enumerate(compute_units):
if value[0] == device_name:
return index
return -1
def compute_device_list_get(self, context):
global global_cached_system_info
global global_cached_compute_devices
if global_cached_compute_devices:
return global_cached_compute_devices
compute_devices = [('CPU:::', "CPU", "")]
if not global_cached_system_info:
ctx = benchmark_context.Context()
ctx.blender = blender_executable_get()
ctx.configure_script = configure_script_get()
global_cached_system_info = system_info_get(ctx)
compute_units = query_opencl_compute_units()
device_indices = {}
for device in global_cached_system_info["devices"]:
raw_device_name = device["name"]
device_type = device["type"]
if raw_device_name in device_indices:
device_indices[raw_device_name] += 1
device_index = device_indices[raw_device_name]
else:
device_indices[raw_device_name] = 0
device_index = 0
if device_type == "CPU":
continue
elif device_type == "OPENCL":
device_name = correct_device_name(device["name"])
index = find_first_device_index(compute_units, device['name'])
device_compute_units = ""
if index != -1:
if device["name"] == "Radeon RX Vega":
device_name += " " + compute_units[index][1]
device_compute_units = str(compute_units[index][1])
del compute_units[index]
device_id = "{}:{}:{}:{}" . format(device_type,
device["name"],
device_compute_units,
device_index)
compute_devices.append((device_id, device_name, ""))
elif device_type == "CUDA":
device_name = correct_device_name(device["name"])
device_id = "{}:{}::{}" . format(device_type,
device["name"],
device_index)
compute_devices.append((device_id, device_name, ""))
global_cached_compute_devices = compute_devices
return compute_devices
################################################################################
# Tweak User Preferences
default_wcol_tool_inner = None
default_wcol_tool_inner_sel = None
default_wcol_tool_outline = None
def backup_buttons_colors():
global default_wcol_tool_inner
global default_wcol_tool_inner_sel
global default_wcol_tool_outline
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
default_wcol_tool_inner = theme.user_interface.wcol_tool.inner[:]
default_wcol_tool_inner_sel = theme.user_interface.wcol_tool.inner_sel[:]
default_wcol_tool_outline = theme.user_interface.wcol_tool.outline[:]
def make_buttons_green():
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = [0.408, 0.590, 0.129, 1.0]
theme.user_interface.wcol_tool.inner_sel = [0.308, 0.490, 0.029, 1.0]
theme.user_interface.wcol_tool.outline = [0.408, 0.590, 0.129]
def make_buttons_default():
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = default_wcol_tool_inner
theme.user_interface.wcol_tool.inner_sel = default_wcol_tool_inner_sel
theme.user_interface.wcol_tool.outline = default_wcol_tool_outline
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
userpref.view.use_quit_dialog = False
theme.benchmark.space.back = [0.26, 0.26, 0.26]
backup_buttons_colors()
make_buttons_green()
style = userpref.ui_styles[0]
style.widget.points = 12
################################################################################
# Registration
classes = (
BENCHMARK_PT_main,
BENCHMARK_OT_restart,
BENCHMARK_OT_run_quick,
BENCHMARK_OT_run_complete,
BENCHMARK_OT_save,
BENCHMARK_OT_share,
BENCHMARK_OT_opendata_link,
)
Scene.compute_device = EnumProperty(
items=compute_device_list_get,
name="Compute Device",
description="Compute device to run benchmark on")