Formatting according to PEP-8 using autopep8

I've added a `setup.cfg` file with `[pep8]` section to make it use the
120-char line length limit we have for all Blender/Python scripts.
This commit is contained in:
2018-08-14 12:14:54 +02:00
parent 05c01f2b21
commit 58644b911f
8 changed files with 84 additions and 42 deletions

View File

@@ -53,6 +53,7 @@ WELCOME_TEXT = "Run the Quick Benchmark on the selected device to\n" \
BLURB_TEXT = "Share your results with the world!\n" \
"Manage the uploaded benchmark data on your Blender ID."
def reset_global_state():
global global_result_platform
global global_progress_status
@@ -72,18 +73,21 @@ def reset_global_state():
################################################################################
# Draw Utilities.
font_id = 0
def viewport_size():
import bgl
viewport = bgl.Buffer(bgl.GL_INT, 4)
bgl.glGetIntegerv(bgl.GL_VIEWPORT, viewport)
return viewport[2], viewport[3]
def draw_text_center(text, x, y, shadow=False):
dim = blf.dimensions(font_id, text)
cx = x - int(dim[0]/2)
cy = y - int(dim[1]/2)
cx = x - int(dim[0] / 2)
cy = y - int(dim[1] / 2)
if shadow:
delta = 1
blf.color(font_id, 0.2, 0.2, 0.2, 1.0)
@@ -93,6 +97,7 @@ def draw_text_center(text, x, y, shadow=False):
blf.position(font_id, cx, cy, 0)
blf.draw(font_id, text)
def draw_text_multiline(text, x, y, shadow=False):
ui_scale = bpy.context.user_preferences.system.ui_scale
height = int(blf.dimensions(font_id, "Dummy Text")[1])
@@ -109,10 +114,12 @@ def draw_text_multiline(text, x, y, shadow=False):
blf.draw(font_id, line)
y -= height + space
def draw_rect(x, y, w, h, color):
import gpu
gpu.draw.rect(x, y, x + w, y + h, color[0], color[1], color[2], color[3])
def draw_image(filepath, x, y, w, h):
global images
if filepath not in images:
@@ -256,6 +263,7 @@ handle_draw = bpy.types.SpaceBenchmark.draw_handler_add(
################################################################################
# Benchmark foundation integration.
class ProgressProviderSink:
current_progress = 0.0
current_step = ''
@@ -300,7 +308,7 @@ class ProgressProviderSink:
global global_scene_status
if stats:
global_scene_status[scene_name] = util.humanReadableTimeDifference(
stats.total_render_time)
stats.total_render_time)
else:
global_scene_status[scene_name] = "Crashed :("
progress_lock.release()
@@ -341,18 +349,21 @@ class LoggerProviderSink:
################################################################################
# Benchmark thread.
def string_strip_trademark(name):
return name.replace("(R)", "").replace("(TM)", "")
def correct_device_name(name):
if (name.startswith("TITAN") or
name.startswith("Quadro") or
name.startswith("GeForce")):
return "Nvidia " + name;
name.startswith("Quadro") or
name.startswith("GeForce")):
return "Nvidia " + name
if (name.startswith("Radeon")):
return "AMD " + name;
return "AMD " + name
return name
def get_gpu_names(system_info):
gpu_names = []
for device in system_info["devices"]:
@@ -361,21 +372,23 @@ def get_gpu_names(system_info):
gpu_names.append(correct_device_name(device["name"]))
return gpu_names
def indent_gpu_names(gpu_names):
indented_names = []
for name in gpu_names:
indented_names.append("" + name)
return indented_names
def construct_platform_string(system_info):
"""
Construct human readable platform string to show in the interface.
"""
result = ""
result += "OS: {} {}" . format(system_info["system"],
system_info["bitness"])
system_info["bitness"])
result += "\nCPU: {}" . format(
string_strip_trademark(system_info["cpu_brand"]))
string_strip_trademark(system_info["cpu_brand"]))
gpu_names = get_gpu_names(system_info)
num_gpus = len(gpu_names)
if num_gpus:
@@ -385,6 +398,7 @@ def construct_platform_string(system_info):
result += "\nGPUs:\n{}" . format("\n" . join(indent_gpu_names(gpu_names)))
return result
def convert_result_to_json_dict(ctx, results):
# Convert custom classes to dictionaries for easier JSON dump.
json_results = results
@@ -400,7 +414,7 @@ def convert_result_to_json_dict(ctx, results):
else:
stat = {'result': 'CRASH'}
json_results['scenes'].append({'name': scene,
'stats': stat})
'stats': stat})
return json_results
@@ -414,6 +428,7 @@ def system_info_get(ctx):
sys.executable = old_executable
return info
def modify_system_info(system_info):
compute_units = query_opencl_compute_units()
for device in system_info["devices"]:
@@ -427,6 +442,7 @@ def modify_system_info(system_info):
del compute_units[index]
return system_info
def modify_device_info(device_info):
compute_device = bpy.context.scene.compute_device
device_type, device_name, compute_units, device_index = compute_device.split(":")
@@ -439,6 +455,7 @@ def modify_device_info(device_info):
device_info["compute_devices"] = compute_devices
return device_info
def benchmark_thread(ctx):
global progress_lock, global_result_platform, global_progress_status
global global_cancel
@@ -499,6 +516,7 @@ def ui_scale_factor(x):
widget_height = 20 * ui_scale
return x * widget_height / int(widget_height)
class BENCHMARK_PT_main(Panel):
bl_label = "Benchmark"
bl_options = {'HIDE_HEADER'}
@@ -616,6 +634,7 @@ def blender_benchmark_data_dir_get():
else:
raise Exception("Needs implementation")
def blender_executable_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
system = platform.system()
@@ -628,15 +647,18 @@ def blender_executable_get():
else:
raise Exception("Needs implementation")
def scenes_dir_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
return os.path.join(benchmark_data_dir, "scenes")
def configure_script_get():
script_directory = os.path.dirname(os.path.realpath(__file__))
benchmark_script_directory = os.path.dirname(script_directory)
return os.path.join(benchmark_script_directory, "configure.py")
class BENCHMARK_OT_run_base(bpy.types.Operator):
run_type = 'QUICK' # or 'COMPLETE'
benchmark_context = None
@@ -680,7 +702,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator):
global_result_stats = ""
for scene in global_scene_status:
global_result_stats += "{}: {}\n" . format(
scene, global_scene_status[scene])
scene, global_scene_status[scene])
progress_lock.release()
def done(self, context):
@@ -708,11 +730,11 @@ class BENCHMARK_OT_run_base(bpy.types.Operator):
global_result_stats += "\n"
if stat["result"] == "OK":
global_result_stats += "{}: {}" . format(name_stat['name'],
util.humanReadableTimeDifference(
stat["total_render_time"]))
util.humanReadableTimeDifference(
stat["total_render_time"]))
else:
global_result_stats += "{}: {}" . format(name_stat['name'],
stat["result"])
stat["result"])
else:
global_result_stats = ""
# TOGO(sergey): Use some more nice picture for the final slide.
@@ -776,8 +798,8 @@ class BENCHMARK_OT_run_base(bpy.types.Operator):
# ctx.image_output_dir = "/tmp/"
self.benchmark_context = ctx
# Create thread for the actual benchmark.
self.thread = Thread(target = benchmark_thread,
args = (self.benchmark_context, ))
self.thread = Thread(target=benchmark_thread,
args=(self.benchmark_context, ))
self.thread.start()
# Create timer to query thread status
self.timer = wm.event_timer_add(0.1, context.window)
@@ -807,6 +829,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator):
if self.thread:
self.thread.join()
class BENCHMARK_OT_run_quick(BENCHMARK_OT_run_base):
"Run quick Blender benchmark"
bl_label = "Run Benchmark"
@@ -814,6 +837,7 @@ class BENCHMARK_OT_run_quick(BENCHMARK_OT_run_base):
run_type = 'QUICK'
class BENCHMARK_OT_run_complete(BENCHMARK_OT_run_base):
"Run complete Blender benchmark (might take 1.5 hours to finish and 4GiB of GPU memory)"
bl_label = "Run Benchmark"
@@ -852,6 +876,7 @@ class BENCHMARK_OT_save(bpy.types.Operator):
def cancel(self, context):
make_buttons_green()
class BENCHMARK_OT_share(bpy.types.Operator):
bl_idname = "benchmark.share"
bl_label = "Share Benchmark Result"
@@ -872,6 +897,7 @@ class BENCHMARK_OT_share(bpy.types.Operator):
global_results_submitted = True
return {'FINISHED'}
class BENCHMARK_OT_opendata_link(bpy.types.Operator):
bl_idname = "benchmark.opendata_link"
bl_label = "opendata.blender.org"
@@ -883,6 +909,7 @@ class BENCHMARK_OT_opendata_link(bpy.types.Operator):
################################################################################
# Restart benchmark.
class BENCHMARK_OT_restart(bpy.types.Operator):
bl_idname = "benchmark.restart"
bl_label = "Go to a home screen and choose another benchmark to run"
@@ -894,6 +921,7 @@ class BENCHMARK_OT_restart(bpy.types.Operator):
################################################################################
# Configuration.
def cl_query_executable_get():
benchmark_data_dir = blender_benchmark_data_dir_get()
system = platform.system()
@@ -906,6 +934,7 @@ def cl_query_executable_get():
else:
raise Exception("Needs implementation")
def query_opencl_compute_units():
binary = cl_query_executable_get()
output = subprocess.run([binary], stdout=subprocess.PIPE).stdout
@@ -916,6 +945,7 @@ def query_opencl_compute_units():
compute_units.append((name.decode(), max_compute_units.decode()))
return compute_units
def find_first_device_index(compute_units, device_name):
if not compute_units:
return -1
@@ -924,6 +954,7 @@ def find_first_device_index(compute_units, device_name):
return index
return -1
def compute_device_list_get(self, context):
global global_cached_system_info
global global_cached_compute_devices
@@ -974,9 +1005,12 @@ def compute_device_list_get(self, context):
################################################################################
# Tweak User Preferences
default_wcol_tool_inner = None
default_wcol_tool_inner_sel = None
default_wcol_tool_outline = None
def backup_buttons_colors():
global default_wcol_tool_inner
global default_wcol_tool_inner_sel
@@ -987,6 +1021,7 @@ def backup_buttons_colors():
default_wcol_tool_inner_sel = theme.user_interface.wcol_tool.inner_sel[:]
default_wcol_tool_outline = theme.user_interface.wcol_tool.outline[:]
def make_buttons_green():
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
@@ -994,6 +1029,7 @@ def make_buttons_green():
theme.user_interface.wcol_tool.inner_sel = [0.308, 0.490, 0.029, 1.0]
theme.user_interface.wcol_tool.outline = [0.408, 0.590, 0.129]
def make_buttons_default():
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
@@ -1001,6 +1037,7 @@ def make_buttons_default():
theme.user_interface.wcol_tool.inner_sel = default_wcol_tool_inner_sel
theme.user_interface.wcol_tool.outline = default_wcol_tool_outline
userpref = bpy.context.user_preferences
theme = userpref.themes[0]
userpref.view.use_quit_dialog = False