Implement compute device selector

This commit is contained in:
2018-08-06 14:55:07 +02:00
parent 969776f4ad
commit 4719af789c
3 changed files with 76 additions and 17 deletions

View File

@@ -162,6 +162,12 @@ def main():
scene.cycles.samples = 1 scene.cycles.samples = 1
scene.cycles.aa_samples = 1 scene.cycles.aa_samples = 1
# Cycles device.
device = 'CPU' if args.benchmark_device_type == 'CPU' else 'GPU'
for scene in bpy.data.scenes:
scene.cycles.device = device
# TODO(sergey): Tweak tile size.
# Report number of CPU threads used. # Report number of CPU threads used.
for scene in bpy.data.scenes: for scene in bpy.data.scenes:
print("CPU threads used: {}" . format(scene.render.threads)) print("CPU threads used: {}" . format(scene.render.threads))

View File

@@ -11,7 +11,8 @@ class Context:
'scenes', 'scenes',
'scenes_dir', 'scenes_dir',
'configure_script', 'configure_script',
'image_output_dir') 'image_output_dir',
'single_compute_scene')
def __init__(self): def __init__(self):
# Full path to blender binary. # Full path to blender binary.
@@ -31,6 +32,9 @@ class Context:
# Directory where render result images will be saved. # Directory where render result images will be saved.
# Empty means no results are saved. # Empty means no results are saved.
self.image_output_dir = "" self.image_output_dir = ""
# There is only one file for each of compute device types, which will
# do some post-load setup for compute device and tile size.
self.single_compute_scene = False
def listAllScenes(self, directory): def listAllScenes(self, directory):
import os import os
@@ -83,7 +87,9 @@ class Context:
return True return True
def getDeviceFileSuffix(self): def getDeviceFileSuffix(self):
if self.device_type == 'CPU': if self.single_compute_scene:
return ""
elif self.device_type == 'CPU':
return '_cpu' return '_cpu'
elif self.device_type in ('CUDA', 'OPENCL', 'GPU'): elif self.device_type in ('CUDA', 'OPENCL', 'GPU'):
return '_gpu' return '_gpu'

View File

@@ -6,7 +6,7 @@ import sys
from threading import Thread, Lock from threading import Thread, Lock
import blf import blf
import bpy import bpy
from bpy.types import Panel from bpy.types import Panel, Scene
from bpy.props import EnumProperty, IntProperty, FloatProperty from bpy.props import EnumProperty, IntProperty, FloatProperty
from ..foundation import (benchrunner, from ..foundation import (benchrunner,
buildbot, buildbot,
@@ -38,6 +38,8 @@ global_result_dict = None
global_background_image_path = "" global_background_image_path = ""
global_scene_status = {} global_scene_status = {}
global_cancel = False global_cancel = False
global_cached_system_info = None
global_cached_compute_devices = None
images = {} images = {}
current_progress = 0.0 current_progress = 0.0
progress_lock = Lock() progress_lock = Lock()
@@ -342,6 +344,16 @@ def convert_result_to_json_dict(ctx, results):
return json_results return json_results
def system_info_get(ctx):
# This is all system information Blender knows.
# NOTE: We override executable since cpuinfo uses it, and it is set
# to blender.
old_executable = sys.executable
sys.executable = bpy.app.binary_path_python
info = system_info.gatherSystemInfo(ctx)
sys.executable = old_executable
return info
def benchmark_thread(ctx): def benchmark_thread(ctx):
global progress_lock, global_result_platform, global_progress_status global progress_lock, global_result_platform, global_progress_status
global global_cancel global global_cancel
@@ -353,12 +365,7 @@ def benchmark_thread(ctx):
return return
progress_lock.release() progress_lock.release()
# This is all system information Blender knows. blender_system_info = system_info_get(ctx)
# NOTE: We override executable since cpuinfo uses it, and it is set
# to blender.
old_executable = sys.executable
sys.executable = bpy.app.binary_path_python
blender_system_info = system_info.gatherSystemInfo(ctx)
# This is actual device configuration which is used to render the # This is actual device configuration which is used to render the
# benchmark scene. # benchmark scene.
@@ -417,6 +424,11 @@ class BENCHMARK_PT_main(Panel):
sub.scale_y = 64.0 sub.scale_y = 64.0
sub.separator() sub.separator()
sub = col.row()
sub.scale_y = 1.5
sub.prop(context.scene, "compute_device", text="")
sub.separator()
sub = col.row() sub = col.row()
sub.scale_y = 2.25 sub.scale_y = 2.25
props = sub.operator("benchmark.run", text="QUICK BENCHMARK") props = sub.operator("benchmark.run", text="QUICK BENCHMARK")
@@ -492,6 +504,11 @@ def scenes_dir_get():
else: else:
raise Exception("Needs implementation") raise Exception("Needs implementation")
def configure_script_get():
script_directory = os.path.dirname(os.path.realpath(__file__))
benchmark_script_directory = os.path.dirname(script_directory)
return os.path.join(benchmark_script_directory, "configure.py")
class BENCHMARK_OT_run(bpy.types.Operator): class BENCHMARK_OT_run(bpy.types.Operator):
"Run Blender benchmark" "Run Blender benchmark"
bl_label = "Run Benchmark" bl_label = "Run Benchmark"
@@ -610,17 +627,16 @@ class BENCHMARK_OT_run(bpy.types.Operator):
global_progress_status = "Initializing..." global_progress_status = "Initializing..."
context.area.tag_redraw() context.area.tag_redraw()
compute_device = context.scene.compute_device
device_type, device_name = compute_device.split(":")
# Before doing anything, make sure we have all sinks set up, so we do # Before doing anything, make sure we have all sinks set up, so we do
# not miss any progress report. # not miss any progress report.
self.setup_sink() self.setup_sink()
wm = context.window_manager wm = context.window_manager
script_directory = os.path.dirname(os.path.realpath(__file__))
benchmark_script_directory = os.path.dirname(script_directory)
configure_script = os.path.join(
benchmark_script_directory, "configure.py")
ctx = benchmark_context.Context() ctx = benchmark_context.Context()
ctx.blender = blender_executable_get() ctx.blender = blender_executable_get()
ctx.configure_script = configure_script ctx.configure_script = configure_script_get()
if self.run_type == 'QUICK': if self.run_type == 'QUICK':
ctx.scenes = QUICK_SCENES ctx.scenes = QUICK_SCENES
else: else:
@@ -628,9 +644,9 @@ class BENCHMARK_OT_run(bpy.types.Operator):
for scene in ctx.scenes: for scene in ctx.scenes:
global_scene_status[scene] = "N/A" global_scene_status[scene] = "N/A"
ctx.scenes_dir = scenes_dir_get() ctx.scenes_dir = scenes_dir_get()
ctx.device_type = 'CPU' ctx.device_type = device_type
# Only applies for GPU, should match Cycles name ctx.device_name = device_name
ctx.device_name = "" ctx.single_compute_scene = True
# Set this to True when having multiple GPUs of same name and only # Set this to True when having multiple GPUs of same name and only
# one of the mis to be enabled. Or when requesting GPU render without # one of the mis to be enabled. Or when requesting GPU render without
# specifying GPU name. # specifying GPU name.
@@ -719,6 +735,31 @@ class BENCHMARK_OT_share(bpy.types.Operator):
return {'FINISHED'} return {'FINISHED'}
################################################################################
# Configuration.
def compute_device_list_get(self, context):
global global_cached_system_info
global global_cached_compute_devices
if global_cached_compute_devices:
return global_cached_compute_devices
compute_devices = [('CPU:', "CPU", "")]
if not global_cached_system_info:
ctx = benchmark_context.Context()
ctx.blender = blender_executable_get()
ctx.configure_script = configure_script_get()
global_cached_system_info = system_info_get(ctx)
for device in global_cached_system_info["devices"]:
device_type = device["type"]
if device_type == "CPU":
continue
elif device_type in ("OPENCL", "CUDA"):
device_name = correct_device_name(device["name"])
device_id = "{}:{}" . format(device_type, device["name"])
compute_devices.append((device_id, device_name, ""))
global_cached_compute_devices = compute_devices
return compute_devices
################################################################################ ################################################################################
# Tweak User Preferences # Tweak User Preferences
@@ -727,6 +768,7 @@ theme = userpref.themes[0]
theme.user_interface.wcol_tool.inner = [0.408, 0.690, 0.129, 1.0] theme.user_interface.wcol_tool.inner = [0.408, 0.690, 0.129, 1.0]
theme.user_interface.wcol_tool.inner_sel = [0.308, 0.590, 0.029, 1.0] theme.user_interface.wcol_tool.inner_sel = [0.308, 0.590, 0.029, 1.0]
theme.user_interface.wcol_tool.outline = [0.408, 0.690, 0.129] theme.user_interface.wcol_tool.outline = [0.408, 0.690, 0.129]
userpref.view.use_quit_dialog = False
theme.benchmark.space.back = [0.26, 0.26, 0.26] theme.benchmark.space.back = [0.26, 0.26, 0.26]
style = userpref.ui_styles[0] style = userpref.ui_styles[0]
@@ -741,3 +783,8 @@ classes = (
BENCHMARK_OT_save, BENCHMARK_OT_save,
BENCHMARK_OT_share, BENCHMARK_OT_share,
) )
Scene.compute_device = EnumProperty(
items=compute_device_list_get,
name="Compute Device",
description="Compute device to run benchmark on")