import datetime import json import os import sys from threading import Thread, Lock import blf import bpy from bpy.types import Panel from bpy.props import EnumProperty, IntProperty, FloatProperty from ..foundation import (benchrunner, buildbot, config, logger, progress, system_info, util) from ..foundation import context as benchmark_context ################################################################################ # Global state. QUICK_SCENES = ["bmw27", "classroom"] COMPLETE_SCENES = ["barbershop_interior", "bmw27", "classroom", "fishy_cat", "koro", "pavillon_barcelona", "victor"] global_result_platform = None global_progress_status = None global_result_stats = None global_result_dict = None global_background_image_path = "" global_scene_status = {} images = {} current_progress = 0.0 progress_lock = Lock() ################################################################################ # Draw Utilities. font_id = 0 def viewport_size(): import bgl viewport = bgl.Buffer(bgl.GL_INT, 4) bgl.glGetIntegerv(bgl.GL_VIEWPORT, viewport) return viewport[2], viewport[3] def draw_text_center(text, x, y): dim = blf.dimensions(font_id, text) cx = x - int(dim[0]/2) cy = y - int(dim[1]/2) blf.position(font_id, cx, cy, 0) blf.draw(font_id, text) def draw_text_multiline(text, x, y): ui_scale = bpy.context.user_preferences.system.ui_scale height = int(blf.dimensions(font_id, "Dummy Text")[1]) space = int(8 * ui_scale) for line in text.split('\n'): blf.position(font_id, x, y - height, 0) blf.draw(font_id, line) y -= height + space def draw_rect(x, y, w, h, color): import gpu gpu.draw.rect(x, y, x + w, y + h, color[0], color[1], color[2], color[3]) def draw_image(filepath, x, y, w, h): global images if filepath not in images: ima = bpy.data.images.load(filepath) images[filepath] = ima import gpu gpu.draw.image(images[filepath], x, y, x + w, y + h) ################################################################################ # Draw. def benchmark_draw_post_pixel(arg1, arg2): global progress_lock progress_lock.acquire() progress_status = global_progress_status result_platform = global_result_platform result_stats = global_result_stats result_dict = global_result_dict progress_lock.release() ui_scale = bpy.context.user_preferences.system.ui_scale blf.color(font_id, 1.0, 1.0, 1.0, 1.0) window_width, window_height = viewport_size() # Image image_h = 370 * ui_scale image_y = window_height - image_h if global_background_image_path: draw_image(global_background_image_path, 0, image_y, window_width, image_h) else: splash_dir = os.path.dirname(os.path.abspath(__file__)) splash_filepath = os.path.join(splash_dir, 'splash.png') draw_image(splash_filepath, 0, image_y, window_width, image_h) if result_dict: x = 0.5 * window_width y = 0.70 * window_height score = 0 for stat in global_result_dict["stats"]: if stat["result"] == "OK": score += stat["total_render_time"] else: score = -1 blf.size(font_id, int(32 * ui_scale), 72) if score >= 0: draw_text_center("Render time is: {}" . format( util.humanReadableTimeDifference(score)), x, y) else: # TODO(sergey): What is the score? draw_text_center("CRASH :(", x, y) elif result_stats or result_platform or progress_status: blf.size(font_id, int(12 * ui_scale), 72) x = 50.0 * ui_scale y = image_y - 20 * ui_scale # Stats if result_platform: draw_text_multiline(result_platform, 0.5 * window_width + x, y) if result_stats: draw_text_multiline(result_stats, x, y) # Progress progress_x = 0.0 progress_y = image_y + 1 progress_w = window_width * current_progress progress_h = 15.0 * ui_scale progress_color = [0.8, 1.0, 1.0, 0.2] draw_rect(progress_x, progress_y, progress_w, progress_h, progress_color) # Current status if global_progress_status: blf.size(font_id, int(18 * ui_scale), 72) draw_text_multiline(global_progress_status, progress_x + 8.0 * ui_scale, progress_y + progress_h + int(22 * ui_scale)) else: # Title x = 0.5 * window_width y = 0.70 * window_height blf.size(font_id, int(32 * ui_scale), 72) draw_text_center("Blender Benchmark 1.0", x, y) y -= 32 * ui_scale blf.size(font_id, int(12 * ui_scale), 72) draw_text_center("Explore the results on opendata.blender.org", x, y) # Bottom bar bottom_x = 0 bottom_y = 0 bottom_w = window_width bottom_h = 52 * ui_scale bottom_color = [0.2, 0.2, 0.2, 1.0] draw_rect(bottom_x, bottom_y, bottom_w, bottom_h, bottom_color) # Logo # TODO(sergey): Make it DPI aware. logo_dir = os.path.dirname(os.path.abspath(__file__)) logo_filepath = os.path.join(logo_dir, 'blender.png') draw_image(logo_filepath, 12, (bottom_h - 24) / 2, 85, 24) handle_draw = bpy.types.SpaceBenchmark.draw_handler_add( benchmark_draw_post_pixel, (None, None), 'WINDOW', 'POST_PIXEL') ################################################################################ # Benchmark foundation integration. class ProgressProviderSink: current_progress = 0.0 current_step = '' current_scene = '' def __init__(self): self.current_progress = 0.0 self.current_step = '' def progress(self, count, total, prefix="", suffix=""): progress_lock.acquire() if total != 0: self.current_progress = float(count) / float(total) else: self.current_progress = 0.0 progress_lock.release() def clear(self): progress_lock.acquire() self.current_progress = 0 progress_lock.release() def step(self, step_name): progress_lock.acquire() if self.current_step != step_name: self.current_step = step_name self.current_progress = 0 progress_lock.release() def scene(self, scene_name): progress_lock.acquire() self.current_scene = scene_name if scene_name: global global_scene_status global_scene_status[scene_name] = "Rendering..." progress_lock.release() def scene_stats(self, scene_name, stats): progress_lock.acquire() global global_scene_status if stats: global_scene_status[scene_name] = util.humanReadableTimeDifference( stats.total_render_time) else: global_scene_status[scene_name] = "Crashed :(" progress_lock.release() class LoggerProviderSink: def HEADER(self, *args): pass def WARNING(self, *args): pass def ERROR(self, *args): pass def OK(self, *args): pass def BOLD(self, *args): pass def INFO(self, *args): pass def DEBUG(self, *args): pass def FATAL(self, *args): pass ################################################################################ # Benchmark thread. def string_strip_trademark(name): return name.replace("(R)", "").replace("(TM)", "") def correct_device_name(name): if (name.startswith("TITAN") or name.startswith("Quadro") or name.startswith("GeForce")): return "Nvidia " + name; if (name.startswith("Radeon")): return "AMD " + name; return name def construct_gpu_string(system_info): gpu_names = [] for device in system_info["devices"]: if device["type"] == "CPU": continue gpu_names.append(correct_device_name(device["name"])) return ", " . join(gpu_names) def construct_platform_string(system_info): """ Construct human readable platform string to show in the interface. """ result = "" result += "Operation System: {} {} bit" . format(system_info["system"], system_info["bitness"]) result += "\nCPU: {}" . format( string_strip_trademark(system_info["cpu_brand"])) gpus = construct_gpu_string(system_info) if gpus: result += "\nGPU(s): {}" . format(gpus) return result def convert_result_to_json_dict(ctx, results): # Convert custom classes to dictionaries for easier JSON dump. json_results = results stats = json_results['stats'] json_results['stats'] = [] for scene in ctx.scenes: if scene not in stats: continue stat = stats[scene] if stat: stat = stat.asDict() stat['result'] = 'OK' else: stat = {'result': 'CRASH'} stat["scene_name"] = scene json_results['stats'] .append(stat) return json_results def benchmark_thread(ctx): global progress_lock, global_result_platform, global_progress_status progress_lock.acquire() global_progress_status = "Collecting system information..." progress_lock.release() # This is all system information Blender knows. # NOTE: We override executable since cpuinfo uses it, and it is set # to blender. old_executable = sys.executable sys.executable = bpy.app.binary_path_python blender_system_info = system_info.gatherSystemInfo(ctx) # This is actual device configuration which is used to render the # benchmark scene. blender_device_info = benchrunner.benchmarkGetDeviceInfo(ctx) if not blender_device_info['device_type']: # TODO(sergey): Report an error somehow. return progress_lock.acquire() global_result_platform = construct_platform_string(blender_system_info) progress_lock.release() progress_lock.acquire() global_progress_status = "Prepating render..." progress_lock.release() all_stats = benchrunner.benchmarkAll(ctx) # Gather all information together. timestamp = datetime.datetime.now(datetime.timezone.utc).isoformat() result = convert_result_to_json_dict(ctx, { "timestamp": timestamp, "blender_version": system_info.getBlenderVersion(ctx), "system_info": blender_system_info, "device_info": blender_device_info, "stats": all_stats if all_stats else {} }) global global_result_dict global_result_dict = result ################################################################################ # Panels class BENCHMARK_PT_main(Panel): bl_label = "Benchmark" bl_options = {'HIDE_HEADER'} bl_space_type = 'BENCHMARK' bl_region_type = 'WINDOW' def draw_welcome(self, context): layout = self.layout split = layout.split(0.65) split.label() split = split.split(0.97) col = split.column() sub = col.row() sub.scale_y = 64.0 sub.separator() sub = col.row() sub.scale_y = 2.25 props = sub.operator("benchmark.run", text="QUICK BENCHMARK") props.run_type = 'QUICK' col.separator() sub = col.row() sub.emboss = 'NONE' sub.scale_y = 1.5 props = sub.operator("benchmark.run", text="COMPLETE BENCHMARK (approx. 1.h)") props.run_type = 'COMPLETE' split.label() def draw_submit(self, context): layout = self.layout split = layout.split(0.65) split.label() split = split.split(0.97) col = split.column() sub = col.row() sub.scale_y = 64.0 sub.separator() sub = col.row() sub.scale_y = 2.25 sub.operator("benchmark.share", text="SHARE") sub = col.row() sub.emboss = 'NONE' sub.scale_y = 1.5 sub.operator("benchmark.save", text="SAVE") split.label() def draw(self, context): screen_index = 0 global progress_lock progress_lock.acquire() if global_result_dict: screen_index = 2 elif global_result_stats or global_progress_status: screen_index = 1 progress_lock.release() if screen_index == 0: self.draw_welcome(context) elif screen_index == 2: self.draw_submit(context) ################################################################################ # Operator class BENCHMARK_OT_run(bpy.types.Operator): "Run Blender benchmark" bl_label = "Run Benchmark" bl_idname = "benchmark.run" enum_run_type = ( ('QUICK', "QUICK", ""), ('COMPLETE', "COMPLETE", ""), ) run_type: EnumProperty( name="Run Type", description="", items=enum_run_type, default='QUICK', ) benchmark_context = None thread = None timer = None progress_provider = None logger_provider = None def setup_sink(self): self.progress_provider = ProgressProviderSink() self.logger_provider = LoggerProviderSink() progress.setProvider(self.progress_provider) logger.setProvider(self.logger_provider) def update_status(self, context): global global_progress_status, global_background_image_path progress_lock.acquire() step = self.progress_provider.current_step if step == 'WARM_UP': global_progress_status = "Rendering warm-up pass..." elif step == 'RUN': global current_progress current_progress = self.progress_provider.current_progress global_progress_status = "Rendering..." context.area.tag_redraw() # Path to currently displayed background image. current_scene = self.progress_provider.current_scene if current_scene: global_background_image_path = os.path.join( self.benchmark_context.scenes_dir, current_scene, current_scene + ".png") else: global_background_image_path = "" # Update per-scene status string global global_result_stats global_result_stats = "" for scene in global_scene_status: global_result_stats += "{}: {}\n" . format( scene, global_scene_status[scene]) progress_lock.release() def done(self, context): global global_progress_status, global_result_stats, current_progress wm = context.window_manager wm.event_timer_remove(self.timer) # Restore all modifications to the benchmark foundation. progress.restoreDefaultProvider() logger.restoreDefaultProvider() # Destroy objects of sinks. del self.progress_provider del self.logger_provider self.progress_provider = None self.logger_provider = None # Construct final stats string global global_result_dict global_result_stats = "" for stat in global_result_dict["stats"]: if global_result_stats: global_result_stats += "\n" if stat["result"] == "OK": global_result_stats += "{}: {}" . format(stat["scene_name"], util.humanReadableTimeDifference(stat["total_render_time"])) else: global_result_stats += "{}: {}" . format(stat["scene_name"], stat["result"]) # TOGO(sergey): Use some more nice picture for the final slide. global global_background_image_path global_background_image_path = "" # Tag for nice redraw global_progress_status = "" current_progress = 0.0 context.area.tag_redraw() def modal(self, context, event): if event.type == 'TIMER': if self.thread.is_alive(): self.update_status(context) return {'PASS_THROUGH'} else: self.done(context) return {'FINISHED'} return {'PASS_THROUGH'} def invoke(self, context, event): global global_result_platform, global_progress_status global global_scene_status global_result_platform = "" global_progress_status = "Initializing..." context.area.tag_redraw() # Before doing anything, make sure we have all sinks set up, so we do # not miss any progress report. self.setup_sink() wm = context.window_manager script_directory = os.path.dirname(os.path.realpath(__file__)) benchmark_binary_dir = os.path.dirname(bpy.app.binary_path) benchmark_script_directory = os.path.dirname(script_directory) configure_script = os.path.join( benchmark_script_directory, "configure.py") ctx = benchmark_context.Context() # TODO(sergey): Update for MacOS and Windows. ctx.blender = os.path.join(benchmark_binary_dir, "blender", "blender") ctx.configure_script = configure_script if self.run_type == 'QUICK': ctx.scenes = QUICK_SCENES else: ctx.scenes = COMPLETE_SCENES for scene in ctx.scenes: global_scene_status[scene] = "N/A" ctx.scenes_dir = os.path.join(benchmark_binary_dir, "scenes") ctx.device_type = 'CPU' # Only applies for GPU, should match Cycles name ctx.device_name = "" # Set this to True when having multiple GPUs of same name and only # one of the mis to be enabled. Or when requesting GPU render without # specifying GPU name. ctx.device_single = True # ctx.image_output_dir = "/tmp/" self.benchmark_context = ctx # Create thread for the actual benchmark. self.thread = Thread(target = benchmark_thread, args = (self.benchmark_context, )) self.thread.start() # Create timer to query thread status self.timer = wm.event_timer_add(0.1, context.window) # Register self as modal. context.window_manager.modal_handler_add(self) return {'RUNNING_MODAL'} def cancel(self, context): # When users closes window return class BENCHMARK_OT_save(bpy.types.Operator): bl_idname = "benchmark.save" bl_label = "Save Benchmark Result" filepath: bpy.props.StringProperty( subtype='FILE_PATH', options={'SKIP_SAVE'}, ) def execute(self, context): with open(self.filepath, "w") as f: f.write(json.dumps(global_result_dict, sort_keys=True, indent=2)) return {'FINISHED'} def invoke(self, context, event): import os if not self.filepath: self.filepath = os.path.join( os.path.expanduser("~"), "benchmark-result.txt") wm = context.window_manager wm.fileselect_add(self) return {'RUNNING_MODAL'} class BENCHMARK_OT_share(bpy.types.Operator): bl_idname = "benchmark.share" bl_label = "Share Benchmark Result" def invoke(self, context, event): return {'FINISHED'} ################################################################################ # Tweak User Preferences userpref = bpy.context.user_preferences theme = userpref.themes[0] theme.user_interface.wcol_tool.inner = [0.408, 0.690, 0.129, 1.0] theme.user_interface.wcol_tool.inner_sel = [0.308, 0.590, 0.029, 1.0] theme.user_interface.wcol_tool.outline = [0.408, 0.690, 0.129] theme.benchmark.space.back = [0.26, 0.26, 0.26] style = userpref.ui_styles[0] style.widget.points = 12 ################################################################################ # Registration classes = ( BENCHMARK_PT_main, BENCHMARK_OT_run, BENCHMARK_OT_save, BENCHMARK_OT_share, )