diff --git a/benchmark/configure.py b/benchmark/configure.py index 3473da2..83a1330 100644 --- a/benchmark/configure.py +++ b/benchmark/configure.py @@ -16,7 +16,8 @@ def setUseRequestedDevice(context, cpref, device_type, requested_device, - device_single): + device_single, + requested_device_index): import _cycles # Empty device type means we'l ltry to render on a single card, # preferably non-display one. @@ -44,38 +45,60 @@ def setUseRequestedDevice(context, device_found = True else: device_found = False + device_index = 0 for device in cpref.devices: device_name = device.name.replace(" (Display)", "") if device_name == requested_device: - if not device_found or not device_single: + if requested_device_index != -1: + if device_index == requested_device_index: + device.use = True + device_found = True + else: + device.use = False + elif not device_found or not device_single: device.use = True device_found = True else: device.use = False + device_index += 1 else: device.use = False return device_found -def setDeviceCUDA(context, cpref, requested_device, device_single): +def setDeviceCUDA(context, + cpref, + requested_device, + device_single, + requested_device_index): cpref.compute_device_type = 'CUDA' return setUseRequestedDevice(context, cpref, 'CUDA', requested_device, - device_single) + device_single, + requested_device_index) -def setDeviceOpenCL(context, cpref, requested_device, device_single): +def setDeviceOpenCL(context, + cpref, + requested_device, + device_single, + requested_device_index): cpref.compute_device_type = 'OPENCL' return setUseRequestedDevice(context, cpref, 'OPENCL', requested_device, - device_single) + device_single, + requested_device_index) -def setDeviceGPU(context, cpref, requested_device, device_single): +def setDeviceGPU(context, + cpref, + requested_device, + device_single, + requested_device_index): import _cycles has_cuda = has_opencl = False for device in _cycles.available_devices(): @@ -85,9 +108,17 @@ def setDeviceGPU(context, cpref, requested_device, device_single): has_opencl = True if has_cuda: - return setDeviceCUDA(context, cpref, requested_device, device_single) + return setDeviceCUDA(context, + cpref, + requested_device, + device_single, + requested_device_index) if has_opencl: - return setDeviceOpenCL(context, cpref, requested_device, device_single) + return setDeviceOpenCL(context, + cpref, + requested_device, + device_single, + requested_device_index) return False @@ -145,6 +176,9 @@ def main(): help="Use single device when multiple are found", action='store_true', default=False) + parser.add_argument("--benchmark-device-index", + help="Use device of a given index whe nusing single device", + default=-1) parser.add_argument("--benchmark-system-info", help="Dump whole system information", action='store_true', @@ -182,17 +216,20 @@ def main(): device_ok = setDeviceCUDA(context, cpref, args.benchmark_device, - args.benchmark_device_single) + args.benchmark_device_single, + int(args.benchmark_device_index)) elif args.benchmark_device_type == 'OPENCL': device_ok = setDeviceOpenCL(context, cpref, args.benchmark_device, - args.benchmark_device_single) + args.benchmark_device_single, + int(args.benchmark_device_index)) elif args.benchmark_device_type == 'GPU': device_ok = setDeviceGPU(context, cpref, args.benchmark_device, - args.benchmark_device_single) + args.benchmark_device_single, + int(args.benchmark_device_index)) if not device_ok: sys.exit(1) diff --git a/benchmark/foundation/benchrunner.py b/benchmark/foundation/benchrunner.py index dbee9e2..dca0bec 100644 --- a/benchmark/foundation/benchrunner.py +++ b/benchmark/foundation/benchrunner.py @@ -32,6 +32,8 @@ def constructBenchmarkCommand(ctx, scene, blendfile, output_folder, cfra): command.extend(["--benchmark-device", ctx.device_name]) if ctx.device_single: command.extend(["--benchmark-device-single"]) + if ctx.device_index != -1: + command.extend(["--benchmark-device-index", ctx.device_index]) return command @@ -156,6 +158,8 @@ def benchmarkGetDeviceInfo(ctx): command.extend(["--benchmark-device", ctx.device_name]) if ctx.device_single: command.extend(["--benchmark-device-single"]) + if ctx.device_index != -1: + command.extend(["--benchmark-device-index", ctx.device_index]) process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) diff --git a/benchmark/foundation/context.py b/benchmark/foundation/context.py index 284a9fa..c15a209 100644 --- a/benchmark/foundation/context.py +++ b/benchmark/foundation/context.py @@ -8,6 +8,7 @@ class Context: 'device_type', 'device_name', 'device_single', + 'device_index', 'scenes', 'scenes_dir', 'configure_script', @@ -23,6 +24,8 @@ class Context: self.device_name = 'NAME' # Use single device when there are multiple one matching the name self.device_single = False + # Use specified device index when device_single is truth. + self.device_index = -1 # By default we use empty list, it is up to the user to fill it in. self.scenes = [] # It is up to the user to provide proper path to scenes. diff --git a/benchmark/space/__init__.py b/benchmark/space/__init__.py index 51ba20b..d0fb70b 100644 --- a/benchmark/space/__init__.py +++ b/benchmark/space/__init__.py @@ -417,7 +417,7 @@ def modify_system_info(system_info): def modify_device_info(device_info): compute_device = bpy.context.scene.compute_device - device_type, device_name, compute_units = compute_device.split(":") + device_type, device_name, compute_units, device_index = compute_device.split(":") if device_info["device_type"] == "OPENCL": compute_devices = [] for device in device_info["compute_devices"]: @@ -686,7 +686,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator): if global_cancel: global_result_dict = None reset_global_state() - else: + elif global_result_dict: global_result_stats = "" for name_stat in global_result_dict["scenes"]: stat = name_stat["stats"] @@ -699,6 +699,8 @@ class BENCHMARK_OT_run_base(bpy.types.Operator): else: global_result_stats += "{}: {}" . format(name_stat['name'], stat["result"]) + else: + global_result_stats = "" # TOGO(sergey): Use some more nice picture for the final slide. global global_background_image_path global_background_image_path = "" @@ -730,7 +732,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator): context.area.tag_redraw() compute_device = context.scene.compute_device - device_type, device_name, compute_units = compute_device.split(":") + device_type, device_name, compute_units, device_index = compute_device.split(":") self.tmpdir = tempfile.TemporaryDirectory(prefix="blender_benchmark_") @@ -756,6 +758,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator): # one of the mis to be enabled. Or when requesting GPU render without # specifying GPU name. ctx.device_single = True + ctx.device_index = device_index # ctx.image_output_dir = "/tmp/" self.benchmark_context = ctx # Create thread for the actual benchmark. @@ -910,15 +913,23 @@ def compute_device_list_get(self, context): global global_cached_compute_devices if global_cached_compute_devices: return global_cached_compute_devices - compute_devices = [('CPU::', "CPU", "")] + compute_devices = [('CPU:::', "CPU", "")] if not global_cached_system_info: ctx = benchmark_context.Context() ctx.blender = blender_executable_get() ctx.configure_script = configure_script_get() global_cached_system_info = system_info_get(ctx) compute_units = query_opencl_compute_units() + device_indices = {} for device in global_cached_system_info["devices"]: + raw_device_name = device["name"] device_type = device["type"] + if raw_device_name in device_indices: + device_indices[raw_device_name] += 1 + device_index = device_indices[raw_device_name] + else: + device_indices[raw_device_name] = 0 + device_index = 0 if device_type == "CPU": continue elif device_type == "OPENCL": @@ -930,11 +941,15 @@ def compute_device_list_get(self, context): device_name += " " + compute_units[index][1] device_compute_units = str(compute_units[index][1]) del compute_units[index] - device_id = "{}:{}:{}" . format(device_type, device["name"], device_compute_units) + device_id = "{}:{}:{}" . format(device_type, + device["name"], + device_compute_units) compute_devices.append((device_id, device_name, "")) elif device_type == "CUDA": device_name = correct_device_name(device["name"]) - device_id = "{}:{}:" . format(device_type, device["name"]) + device_id = "{}:{}::{}" . format(device_type, + device["name"], + device_index) compute_devices.append((device_id, device_name, "")) global_cached_compute_devices = compute_devices return compute_devices