diff --git a/benchmark/space/__init__.py b/benchmark/space/__init__.py index 5eafd65..13821a2 100644 --- a/benchmark/space/__init__.py +++ b/benchmark/space/__init__.py @@ -68,7 +68,6 @@ WELCOME_TEXT = "Run the Quick Benchmark on the selected device to\n" \ BLURB_TEXT = "Share your results with the world!\n" \ "Manage the uploaded benchmark data on your Blender ID." - ################################################################################ # Draw Utilities. @@ -168,7 +167,7 @@ def benchmark_draw_post_pixel(arg1, arg2): if score >= 0: blf.size(font_id, int(32 * ui_scale), 72) - draw_text_center("Your Time: {}" . format( + draw_text_center("Your Time: {}".format( util.humanReadableTimeDifference(score)), x, y, shadow=True) else: blf.size(font_id, int(18 * ui_scale), 72) @@ -256,6 +255,7 @@ def benchmark_draw_post_pixel(arg1, arg2): handle_draw = bpy.types.SpaceBenchmark.draw_handler_add( benchmark_draw_post_pixel, (None, None), 'WINDOW', 'POST_PIXEL') + ################################################################################ # Benchmark foundation integration. @@ -339,6 +339,7 @@ class LoggerProviderSink: def FATAL(self, *args): pass + ################################################################################ # Benchmark thread. @@ -378,17 +379,17 @@ def construct_platform_string(system_info): Construct human readable platform string to show in the interface. """ result = "" - result += "OS: {} {}" . format(system_info["system"], - system_info["bitness"]) - result += "\nCPU: {}" . format( + result += "OS: {} {}".format(system_info["system"], + system_info["bitness"]) + result += "\nCPU: {}".format( string_strip_trademark(system_info["cpu_brand"])) gpu_names = get_gpu_names(system_info) num_gpus = len(gpu_names) if num_gpus: if num_gpus == 1: - result += "\nGPU: {}" . format(gpu_names[0]) + result += "\nGPU: {}".format(gpu_names[0]) else: - result += "\nGPUs:\n{}" . format("\n" . join(indent_gpu_names(gpu_names))) + result += "\nGPUs:\n{}".format("\n".join(indent_gpu_names(gpu_names))) return result @@ -632,7 +633,8 @@ def blender_executable_get(): elif system == "Windows": return os.path.join(benchmark_data_dir, "blender", "blender.exe") elif system == "Darwin": - return os.path.join(benchmark_data_dir, "blender", "blender.app", "Contents", "MacOS", "blender") + return os.path.join(benchmark_data_dir, "blender", "blender.app", "Contents", "MacOS", + "blender") else: raise Exception("Needs implementation") @@ -713,7 +715,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator): if stat["result"] == "OK": G.result_stats += "{}: {}".format(name_stat['name'], util.humanReadableTimeDifference( - stat["total_render_time"])) + stat["total_render_time"])) else: G.result_stats += "{}: {}".format(name_stat['name'], stat["result"]) @@ -778,7 +780,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator): self.benchmark_context = ctx # Create thread for the actual benchmark. self.thread = Thread(target=benchmark_thread, - args=(self.benchmark_context, )) + args=(self.benchmark_context,)) self.thread.start() # Create timer to query thread status self.timer = wm.event_timer_add(0.1, context.window) @@ -882,6 +884,7 @@ class BENCHMARK_OT_opendata_link(bpy.types.Operator): bpy.ops.wm.url_open('INVOKE_DEFAULT', url="https://opendata.blender.org") return {'FINISHED'} + ################################################################################ # Restart benchmark. @@ -894,6 +897,7 @@ class BENCHMARK_OT_restart(bpy.types.Operator): G.reset() return {'FINISHED'} + ################################################################################ # Configuration. @@ -962,20 +966,21 @@ def compute_device_list_get(self, context): device_name += " " + compute_units[index][1] device_compute_units = str(compute_units[index][1]) del compute_units[index] - device_id = "{}:{}:{}:{}" . format(device_type, - device["name"], - device_compute_units, - device_index) + device_id = "{}:{}:{}:{}".format(device_type, + device["name"], + device_compute_units, + device_index) compute_devices.append((device_id, device_name, "")) elif device_type == "CUDA": device_name = correct_device_name(device["name"]) - device_id = "{}:{}::{}" . format(device_type, - device["name"], - device_index) + device_id = "{}:{}::{}".format(device_type, + device["name"], + device_index) compute_devices.append((device_id, device_name, "")) G.cached_compute_devices = compute_devices return compute_devices + ################################################################################ # Tweak User Preferences