diff --git a/benchmark/configure.py b/benchmark/configure.py index 1965c53..9924d63 100644 --- a/benchmark/configure.py +++ b/benchmark/configure.py @@ -162,6 +162,12 @@ def main(): scene.cycles.samples = 1 scene.cycles.aa_samples = 1 + # Cycles device. + device = 'CPU' if args.benchmark_device_type == 'CPU' else 'GPU' + for scene in bpy.data.scenes: + scene.cycles.device = device + # TODO(sergey): Tweak tile size. + # Report number of CPU threads used. for scene in bpy.data.scenes: print("CPU threads used: {}" . format(scene.render.threads)) diff --git a/benchmark/foundation/context.py b/benchmark/foundation/context.py index 9b13763..284a9fa 100644 --- a/benchmark/foundation/context.py +++ b/benchmark/foundation/context.py @@ -11,7 +11,8 @@ class Context: 'scenes', 'scenes_dir', 'configure_script', - 'image_output_dir') + 'image_output_dir', + 'single_compute_scene') def __init__(self): # Full path to blender binary. @@ -31,6 +32,9 @@ class Context: # Directory where render result images will be saved. # Empty means no results are saved. self.image_output_dir = "" + # There is only one file for each of compute device types, which will + # do some post-load setup for compute device and tile size. + self.single_compute_scene = False def listAllScenes(self, directory): import os @@ -83,7 +87,9 @@ class Context: return True def getDeviceFileSuffix(self): - if self.device_type == 'CPU': + if self.single_compute_scene: + return "" + elif self.device_type == 'CPU': return '_cpu' elif self.device_type in ('CUDA', 'OPENCL', 'GPU'): return '_gpu' diff --git a/benchmark/space/__init__.py b/benchmark/space/__init__.py index 5bc6efa..f434e3b 100644 --- a/benchmark/space/__init__.py +++ b/benchmark/space/__init__.py @@ -6,7 +6,7 @@ import sys from threading import Thread, Lock import blf import bpy -from bpy.types import Panel +from bpy.types import Panel, Scene from bpy.props import EnumProperty, IntProperty, FloatProperty from ..foundation import (benchrunner, buildbot, @@ -38,6 +38,8 @@ global_result_dict = None global_background_image_path = "" global_scene_status = {} global_cancel = False +global_cached_system_info = None +global_cached_compute_devices = None images = {} current_progress = 0.0 progress_lock = Lock() @@ -342,6 +344,16 @@ def convert_result_to_json_dict(ctx, results): return json_results +def system_info_get(ctx): + # This is all system information Blender knows. + # NOTE: We override executable since cpuinfo uses it, and it is set + # to blender. + old_executable = sys.executable + sys.executable = bpy.app.binary_path_python + info = system_info.gatherSystemInfo(ctx) + sys.executable = old_executable + return info + def benchmark_thread(ctx): global progress_lock, global_result_platform, global_progress_status global global_cancel @@ -353,12 +365,7 @@ def benchmark_thread(ctx): return progress_lock.release() - # This is all system information Blender knows. - # NOTE: We override executable since cpuinfo uses it, and it is set - # to blender. - old_executable = sys.executable - sys.executable = bpy.app.binary_path_python - blender_system_info = system_info.gatherSystemInfo(ctx) + blender_system_info = system_info_get(ctx) # This is actual device configuration which is used to render the # benchmark scene. @@ -417,6 +424,11 @@ class BENCHMARK_PT_main(Panel): sub.scale_y = 64.0 sub.separator() + sub = col.row() + sub.scale_y = 1.5 + sub.prop(context.scene, "compute_device", text="") + sub.separator() + sub = col.row() sub.scale_y = 2.25 props = sub.operator("benchmark.run", text="QUICK BENCHMARK") @@ -492,6 +504,11 @@ def scenes_dir_get(): else: raise Exception("Needs implementation") +def configure_script_get(): + script_directory = os.path.dirname(os.path.realpath(__file__)) + benchmark_script_directory = os.path.dirname(script_directory) + return os.path.join(benchmark_script_directory, "configure.py") + class BENCHMARK_OT_run(bpy.types.Operator): "Run Blender benchmark" bl_label = "Run Benchmark" @@ -610,17 +627,16 @@ class BENCHMARK_OT_run(bpy.types.Operator): global_progress_status = "Initializing..." context.area.tag_redraw() + compute_device = context.scene.compute_device + device_type, device_name = compute_device.split(":") + # Before doing anything, make sure we have all sinks set up, so we do # not miss any progress report. self.setup_sink() wm = context.window_manager - script_directory = os.path.dirname(os.path.realpath(__file__)) - benchmark_script_directory = os.path.dirname(script_directory) - configure_script = os.path.join( - benchmark_script_directory, "configure.py") ctx = benchmark_context.Context() ctx.blender = blender_executable_get() - ctx.configure_script = configure_script + ctx.configure_script = configure_script_get() if self.run_type == 'QUICK': ctx.scenes = QUICK_SCENES else: @@ -628,9 +644,9 @@ class BENCHMARK_OT_run(bpy.types.Operator): for scene in ctx.scenes: global_scene_status[scene] = "N/A" ctx.scenes_dir = scenes_dir_get() - ctx.device_type = 'CPU' - # Only applies for GPU, should match Cycles name - ctx.device_name = "" + ctx.device_type = device_type + ctx.device_name = device_name + ctx.single_compute_scene = True # Set this to True when having multiple GPUs of same name and only # one of the mis to be enabled. Or when requesting GPU render without # specifying GPU name. @@ -719,6 +735,31 @@ class BENCHMARK_OT_share(bpy.types.Operator): return {'FINISHED'} +################################################################################ +# Configuration. + +def compute_device_list_get(self, context): + global global_cached_system_info + global global_cached_compute_devices + if global_cached_compute_devices: + return global_cached_compute_devices + compute_devices = [('CPU:', "CPU", "")] + if not global_cached_system_info: + ctx = benchmark_context.Context() + ctx.blender = blender_executable_get() + ctx.configure_script = configure_script_get() + global_cached_system_info = system_info_get(ctx) + for device in global_cached_system_info["devices"]: + device_type = device["type"] + if device_type == "CPU": + continue + elif device_type in ("OPENCL", "CUDA"): + device_name = correct_device_name(device["name"]) + device_id = "{}:{}" . format(device_type, device["name"]) + compute_devices.append((device_id, device_name, "")) + global_cached_compute_devices = compute_devices + return compute_devices + ################################################################################ # Tweak User Preferences @@ -727,6 +768,7 @@ theme = userpref.themes[0] theme.user_interface.wcol_tool.inner = [0.408, 0.690, 0.129, 1.0] theme.user_interface.wcol_tool.inner_sel = [0.308, 0.590, 0.029, 1.0] theme.user_interface.wcol_tool.outline = [0.408, 0.690, 0.129] +userpref.view.use_quit_dialog = False theme.benchmark.space.back = [0.26, 0.26, 0.26] style = userpref.ui_styles[0] @@ -741,3 +783,8 @@ classes = ( BENCHMARK_OT_save, BENCHMARK_OT_share, ) + +Scene.compute_device = EnumProperty( + items=compute_device_list_get, + name="Compute Device", + description="Compute device to run benchmark on")