This repository has been archived on 2023-02-09. You can view files and clone it, but cannot push or open issues or pull requests.
Files
blender-benchmark-bundle/benchmark/space/__init__.py

493 lines
14 KiB
Python

import datetime
import json
import os
import sys
from threading import Thread, Lock
import blf
import bpy
from bpy.types import Panel
from bpy.props import IntProperty, FloatProperty
from ..foundation import (benchrunner,
buildbot,
config,
logger,
progress,
system_info,
util)
from ..foundation import context as benchmark_context
################################################################################
# Global state.
global_result_platform = None
global_result_stats = None
global_result_dict = None
images = {}
current_progress = 0.0
progress_lock = Lock()
################################################################################
# Draw Utilities.
font_id = 0
def viewport_size():
import bgl
viewport = bgl.Buffer(bgl.GL_INT, 4)
bgl.glGetIntegerv(bgl.GL_VIEWPORT, viewport)
return viewport[2], viewport[3]
def draw_text_center(text, x, y):
dim = blf.dimensions(font_id, text)
cx = x - dim[0]/2
cy = y - dim[1]/2
blf.position(font_id, cx, cy, 0)
blf.draw(font_id, text)
def draw_text_multiline(text, x, y):
space = 8
for line in text.split('\n'):
dim = blf.dimensions(font_id, line)
y -= dim[1]
blf.position(font_id, x, y, 0)
blf.draw(font_id, line)
y -= space
def draw_rect(x, y, w, h, color):
import gpu
gpu.draw.rect(x, y, x + w, y + h, color[0], color[1], color[2], color[3])
def draw_image(filepath, x, y, w, h):
global images
if filepath not in images:
ima = bpy.data.images.load(filepath)
images[filepath] = ima
import gpu
gpu.draw.image(images[filepath], x, y, x + w, y + h)
################################################################################
# Draw.
def benchmark_draw_post_pixel(arg1, arg2):
global progress_lock
progress_lock.acquire()
result_platform = global_result_platform
result_stats = global_result_stats
progress_lock.release()
ui_scale = bpy.context.user_preferences.system.ui_scale
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
window_width, window_height = viewport_size()
# Image
image_h = 370 * ui_scale
image_y = window_height - image_h
splash_dir = os.path.dirname(os.path.abspath(__file__))
splash_filepath = os.path.join(splash_dir, 'splash.png')
draw_image(splash_filepath, 0, image_y, window_width, image_h)
if result_stats:
blf.size(font_id, int(12 * ui_scale), 72)
x = 50.0 * ui_scale
y = image_y - 20 * ui_scale
# Stats
draw_text_multiline(result_platform, x, y)
draw_text_multiline(result_stats, 0.5 * window_width + x, y)
progress_x = 0.0
progress_y = image_y + 1
progress_w = window_width * current_progress
progress_h = 15.0 * ui_scale
progress_color = [0.8, 1.0, 1.0, 0.2]
draw_rect(progress_x, progress_y, progress_w, progress_h, progress_color)
else:
# Title
x = 0.5 * window_width
y = 0.70 * window_height
blf.size(font_id, int(32 * ui_scale), 72)
draw_text_center("Blender Benchmark 1.0", x, y)
y -= 32 * ui_scale
blf.size(font_id, int(12 * ui_scale), 72)
draw_text_center("Explore the results on opendata.blender.org", x, y)
# Bottom bar
bottom_x = 0
bottom_y = 0
bottom_w = window_width
bottom_h = 52 * ui_scale
bottom_color = [0.2, 0.2, 0.2, 1.0]
draw_rect(bottom_x, bottom_y, bottom_w, bottom_h, bottom_color)
handle_draw = bpy.types.SpaceBenchmark.draw_handler_add(
benchmark_draw_post_pixel, (None, None), 'WINDOW', 'POST_PIXEL')
################################################################################
# Benchmark foundation integration.
class ProgressProviderSink:
current_progress = 0.0
current_step = ''
def __init__(self):
self.current_progress = 0.0
self.current_step = ''
def progress(self, count, total, prefix="", suffix=""):
if total != 0:
self.current_progress = float(count) / float(total)
else:
self.current_progress = 0.0
def clear(self):
self.current_progress = 0
def step(self, step_name):
if self.current_step != step_name:
self.current_step = step_name
self.current_progress = 0
class LoggerProviderSink:
def HEADER(self, *args):
pass
def WARNING(self, *args):
pass
def ERROR(self, *args):
pass
def OK(self, *args):
pass
def BOLD(self, *args):
pass
def INFO(self, *args):
pass
def DEBUG(self, *args):
pass
def FATAL(self, *args):
pass
################################################################################
# Benchmark thread.
def string_strip_trademark(name):
return name.replace("(R)", "").replace("(TM)", "")
def correct_device_name(name):
if (name.startswith("TITAN") or
name.startswith("Quadro") or
name.startswith("GeForce")):
return "Nvidia " + name;
if (name.startswith("Radeon")):
return "AMD " + name;
return name
def construct_gpu_string(system_info):
gpu_names = []
for device in system_info["devices"]:
if device["type"] == "CPU":
continue
gpu_names.append(correct_device_name(device["name"]))
return ", " . join(gpu_names)
def construct_platform_string(system_info):
"""
Construct human readable platform string to show in the interface.
"""
result = ""
result += "Operation System: {} {} bit" . format(system_info["system"],
system_info["bitness"])
result += "\nCPU: {}" . format(
string_strip_trademark(system_info["cpu_brand"]))
gpus = construct_gpu_string(system_info)
if gpus:
result += "\nGPU(s): {}" . format(gpus)
return result
def convert_result_to_json_dict(ctx, results):
# Convert custom classes to dictionaries for easier JSON dump.
json_results = results
stats = json_results['stats']
json_results['stats'] = []
for scene in ctx.scenes:
if scene not in stats:
continue
stat = stats[scene]
if stat:
stat = stat.asDict()
stat['result'] = 'OK'
else:
stat = {'result': 'CRASH'}
stat["scene"] = scene
json_results['stats'] .append(stat)
return json_results
def benchmark_thread(ctx):
global progress_lock, global_result_platform, global_result_stats
progress_lock.acquire()
global_result_stats = "Collecting system information..."
progress_lock.release()
# This is all system information Blender knows.
# NOTE: We override executable since cpuinfo uses it, and it is set
# to blender.
old_executable = sys.executable
sys.executable = bpy.app.binary_path_python
blender_system_info = system_info.gatherSystemInfo(ctx)
# This is actual device configuration which is used to render the
# benchmark scene.
blender_device_info = benchrunner.benchmarkGetDeviceInfo(ctx)
if not blender_device_info['device_type']:
# TODO(sergey): Report an error somehow.
return
progress_lock.acquire()
global_result_platform = construct_platform_string(blender_system_info)
progress_lock.release()
progress_lock.acquire()
global_result_stats = "Prepating render..."
progress_lock.release()
all_stats = benchrunner.benchmarkAll(ctx)
# Gather all information together.
timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat()
result = convert_result_to_json_dict(ctx, {
"timestamp": timestamp,
"blender_version": system_info.getBlenderVersion(ctx),
"system_info": blender_system_info,
"device_info": blender_device_info,
"stats": all_stats if all_stats else {}
})
global global_result_dict
global_result_dict = result
################################################################################
# Panels
class BENCHMARK_PT_main(Panel):
bl_label = "Benchmark"
bl_options = {'HIDE_HEADER'}
bl_space_type = 'BENCHMARK'
bl_region_type = 'WINDOW'
def draw(self, context):
global progress_lock
progress_lock.acquire()
if global_result_stats:
progress_lock.release()
return
progress_lock.release()
layout = self.layout
split = layout.split(0.65)
split.label()
split = split.split(0.97)
col = split.column()
sub = col.row()
sub.scale_y = 64.0
sub.separator()
sub = col.row()
sub.scale_y = 2.25
sub.operator("benchmark.run", text="QUICK BENCHMARK")
col.separator()
sub = col.row()
sub.emboss = 'NONE'
sub.scale_y = 1.5
sub.operator("benchmark.run", text="COMPLETE BENCHMARK (approx. 1.h)")
sub = col.row()
sub.emboss = 'NONE'
sub.scale_y = 1.5
sub.operator("benchmark.save", text="SAVE")
split.label()
################################################################################
# Operator
class BENCHMARK_OT_run(bpy.types.Operator):
"Run Blender benchmark"
bl_label = "Run Benchmark"
bl_idname = "benchmark.run"
benchmark_context = None
thread = None
timer = None
progress_provider = None
logger_provider = None
def setup_sink(self):
self.progress_provider = ProgressProviderSink()
self.logger_provider = LoggerProviderSink()
progress.setProvider(self.progress_provider)
logger.setProvider(self.logger_provider)
def update_status(self, context):
global global_result_stats
step = self.progress_provider.current_step
if step == 'WARM_UP':
global_result_stats = "Rendering warm-up pass..."
elif step == 'RUN':
global current_progress
current_progress = self.progress_provider.current_progress
global_result_stats = "Rendering..."
context.area.tag_redraw()
def done(self, context):
global global_result_stats, current_progress
wm = context.window_manager
wm.event_timer_remove(self.timer)
# Restore all modifications to the benchmark foundation.
progress.restoreDefaultProvider()
logger.restoreDefaultProvider()
# Destroy objects of sinks.
del self.progress_provider
del self.logger_provider
self.progress_provider = None
self.logger_provider = None
# Construct final stats string
global global_result_dict
global_result_stats = ""
for stat in global_result_dict["stats"]:
if global_result_stats:
global_result_stats += "\n"
if stat["result"] == "OK":
global_result_stats += "{}: {}" . format(stat["scene"],
util.humanReadableTimeDifference(stat["total_render_time"]))
else:
global_result_stats += "{}: {}" . format(stat["scene"],
stat["result"])
# Tag for nice redraw
current_progress = 0.0
context.area.tag_redraw()
def modal(self, context, event):
if event.type == 'TIMER':
if self.thread.is_alive():
self.update_status(context)
return {'PASS_THROUGH'}
else:
self.done(context)
return {'FINISHED'}
return {'PASS_THROUGH'}
def invoke(self, context, event):
global global_result_platform, global_result_stats
global_result_platform = ""
global_result_stats = "Initializing..."
context.area.tag_redraw()
# Before doing anything, make sure we have all sinks set up, so we do
# not miss any progress report.
self.setup_sink()
wm = context.window_manager
script_directory = os.path.dirname(os.path.realpath(__file__))
benchmark_script_directory = os.path.dirname(script_directory)
configure_script = os.path.join(benchmark_script_directory, "configure.py")
ctx = benchmark_context.Context()
ctx.blender = "<blender>"
ctx.configure_script = configure_script
ctx.scenes = ["<monkey>"]
ctx.scenes_dir = "<scenes_folder>"
ctx.device_type = 'CPU'
# Only applies for GPU, should match Cycles name
ctx.device_name = ""
# Set this to True when having multiple GPUs of same name and only
# one of the mis to be enabled. Or when requesting GPU render without
# specifying GPU name.
ctx.device_single = True
# ctx.image_output_dir = "/tmp/"
self.benchmark_context = ctx
# Create thread for the actual benchmark.
self.thread = Thread(target = benchmark_thread,
args = (self.benchmark_context, ))
self.thread.start()
# Create timer to query thread status
self.timer = wm.event_timer_add(0.1, context.window)
# Register self as modal.
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
# When users closes window
return
class BENCHMARK_OT_save(bpy.types.Operator):
bl_idname = "benchmark.save"
bl_label = "Save Benchmark Result"
filepath: bpy.props.StringProperty(
subtype='FILE_PATH',
options={'SKIP_SAVE'},
)
def execute(self, context):
print("Saving to " + self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
import os
if not self.filepath:
self.filepath = os.path.join(
os.path.expanduser("~"), "benchmark-result.txt")
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
################################################################################
# Tweak User Preferences
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = [0.408, 0.690, 0.129, 1.0]
theme.user_interface.wcol_tool.inner_sel = [0.308, 0.590, 0.029, 1.0]
theme.user_interface.wcol_tool.outline = [0.408, 0.690, 0.129]
theme.benchmark.space.back = [0.26, 0.26, 0.26]
style = userpref.ui_styles[0]
style.widget.points = 12
################################################################################
# Registration
classes = (
BENCHMARK_PT_main,
BENCHMARK_OT_run,
BENCHMARK_OT_save,
)