Formatting according to PEP-8 using autopep8
I've added a `setup.cfg` file with `[pep8]` section to make it use the 120-char line length limit we have for all Blender/Python scripts.
This commit is contained in:
@@ -60,7 +60,7 @@ def benchmarkBlenderWatched(command):
|
||||
else:
|
||||
logger.DEBUG("Line from stdout: {}" . format(line))
|
||||
st.update(line)
|
||||
if st.current_tiles != 0 or st.current_sample != None:
|
||||
if st.current_tiles != 0 or st.current_sample is not None:
|
||||
elapsed_time = time.time() - start_time
|
||||
elapsed_time_str = util.humanReadableTimeDifference(elapsed_time)
|
||||
progress.progress(int(st.getCurrentProgress()),
|
||||
@@ -92,7 +92,7 @@ def benchmarkScene(ctx, scene):
|
||||
tmpdir = tempfile.TemporaryDirectory(prefix="benchmark_")
|
||||
cfra = util.queryCurrentFrame(blendfile)
|
||||
command = constructBenchmarkCommand(
|
||||
ctx, scene, blendfile, tmpdir.name, cfra)
|
||||
ctx, scene, blendfile, tmpdir.name, cfra)
|
||||
logger.DEBUG("Command for rendering: {}" . format(command))
|
||||
progress.step('WARM_UP')
|
||||
logger.INFO("> Warm-up round, making sure everything is ready " +
|
||||
@@ -104,7 +104,7 @@ def benchmarkScene(ctx, scene):
|
||||
# Remove resutl of warmup round.
|
||||
if ctx.image_output_dir:
|
||||
full_image_output = os.path.join(ctx.image_output_dir, scene) + \
|
||||
str(cfra) + ".png"
|
||||
str(cfra) + ".png"
|
||||
if os.path.exists(full_image_output):
|
||||
os.remove(full_image_output)
|
||||
# TODO(sergey): Consider doing several passes.
|
||||
|
@@ -23,47 +23,41 @@ class COLORS_ANSI:
|
||||
VERBOSE = False
|
||||
COLORS = COLORS_DUMMY
|
||||
|
||||
|
||||
class DefaultLoggerProvider:
|
||||
def HEADER(self, *args):
|
||||
print(COLORS.HEADER + COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def WARNING(self, *args):
|
||||
print(COLORS.WARNING + COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def ERROR(self, *args):
|
||||
print(COLORS.FAIL + COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def OK(self, *args):
|
||||
print(COLORS.OKGREEN + COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def BOLD(self, *args):
|
||||
print(COLORS.BOLD, end="")
|
||||
print(*args, end="")
|
||||
print(COLORS.ENDC)
|
||||
|
||||
|
||||
def INFO(self, *args):
|
||||
print(*args)
|
||||
|
||||
|
||||
def DEBUG(self, *args):
|
||||
# TODO(sergey): Add check that debug is enabled.
|
||||
if False:
|
||||
print(*args)
|
||||
|
||||
|
||||
def FATAL(self, *args):
|
||||
import sys
|
||||
ERROR(*args)
|
||||
@@ -72,6 +66,7 @@ class DefaultLoggerProvider:
|
||||
|
||||
LOGGER_PROVIDER = DefaultLoggerProvider()
|
||||
|
||||
|
||||
def HEADER(*args):
|
||||
LOGGER_PROVIDER.HEADER(*args)
|
||||
|
||||
|
@@ -3,11 +3,13 @@ import sys
|
||||
|
||||
from . import logger
|
||||
|
||||
|
||||
class DefaultProgressProvider:
|
||||
"""
|
||||
Default progress provider implementation, which draws progress
|
||||
bar in the console, unless current logging is set to evrbose mode.
|
||||
"""
|
||||
|
||||
def progress(self, count, total, prefix="", suffix=""):
|
||||
if logger.VERBOSE:
|
||||
return
|
||||
@@ -29,12 +31,12 @@ class DefaultProgressProvider:
|
||||
sys.stdout.flush()
|
||||
|
||||
def clear(self):
|
||||
if logger.VERBOSE:
|
||||
return
|
||||
if logger.VERBOSE:
|
||||
return
|
||||
|
||||
size = shutil.get_terminal_size((80, 20))
|
||||
sys.stdout.write(" " * size.columns + "\r")
|
||||
sys.stdout.flush()
|
||||
size = shutil.get_terminal_size((80, 20))
|
||||
sys.stdout.write(" " * size.columns + "\r")
|
||||
sys.stdout.flush()
|
||||
|
||||
def step(self, step_name):
|
||||
pass
|
||||
@@ -76,12 +78,15 @@ def step(step_name):
|
||||
def scene(scene_name):
|
||||
PROGRESS_PROVIDER.scene(scene_name)
|
||||
|
||||
|
||||
def scene_stats(scene_name, stats):
|
||||
PROGRESS_PROVIDER.scene_stats(scene_name, stats)
|
||||
|
||||
|
||||
def render_process(process):
|
||||
PROGRESS_PROVIDER.render_process(process)
|
||||
|
||||
|
||||
def is_canceled():
|
||||
return PROGRESS_PROVIDER.is_canceled()
|
||||
|
||||
|
@@ -89,16 +89,16 @@ class Stats:
|
||||
# TODO(sergey): Check that all stats are available.
|
||||
print("Total pipeline render time: {} ({} sec)"
|
||||
. format(util.humanReadableTimeDifference(
|
||||
self.pipeline_render_time),
|
||||
self.pipeline_render_time))
|
||||
self.pipeline_render_time),
|
||||
self.pipeline_render_time))
|
||||
print("Total Cycles render time: {} ({} sec)"
|
||||
. format(util.humanReadableTimeDifference(
|
||||
self.total_render_time),
|
||||
self.total_render_time))
|
||||
self.total_render_time),
|
||||
self.total_render_time))
|
||||
print("Pure Cycles render time (without sync): {} ({} sec)"
|
||||
. format(util.humanReadableTimeDifference(
|
||||
self.render_time_no_sync),
|
||||
self.render_time_no_sync))
|
||||
self.render_time_no_sync),
|
||||
self.render_time_no_sync))
|
||||
print("Cycles memoty usage: {} ({} peak)"
|
||||
. format(self.device_memory_usage,
|
||||
self.device_peak_memory))
|
||||
|
@@ -12,6 +12,7 @@ from .third_party import cpuinfo
|
||||
from .third_party import cpu_cores
|
||||
from .third_party.dateutil import parser
|
||||
|
||||
|
||||
def _getBlenderDeviceInfo(ctx):
|
||||
PREFIX = "Benchmark Devices: "
|
||||
command = [ctx.blender,
|
||||
|
2
benchmark/setup.cfg
Normal file
2
benchmark/setup.cfg
Normal file
@@ -0,0 +1,2 @@
|
||||
[pep8]
|
||||
max-line-length = 120
|
@@ -53,6 +53,7 @@ WELCOME_TEXT = "Run the Quick Benchmark on the selected device to\n" \
|
||||
BLURB_TEXT = "Share your results with the world!\n" \
|
||||
"Manage the uploaded benchmark data on your Blender ID."
|
||||
|
||||
|
||||
def reset_global_state():
|
||||
global global_result_platform
|
||||
global global_progress_status
|
||||
@@ -72,18 +73,21 @@ def reset_global_state():
|
||||
################################################################################
|
||||
# Draw Utilities.
|
||||
|
||||
|
||||
font_id = 0
|
||||
|
||||
|
||||
def viewport_size():
|
||||
import bgl
|
||||
viewport = bgl.Buffer(bgl.GL_INT, 4)
|
||||
bgl.glGetIntegerv(bgl.GL_VIEWPORT, viewport)
|
||||
return viewport[2], viewport[3]
|
||||
|
||||
|
||||
def draw_text_center(text, x, y, shadow=False):
|
||||
dim = blf.dimensions(font_id, text)
|
||||
cx = x - int(dim[0]/2)
|
||||
cy = y - int(dim[1]/2)
|
||||
cx = x - int(dim[0] / 2)
|
||||
cy = y - int(dim[1] / 2)
|
||||
if shadow:
|
||||
delta = 1
|
||||
blf.color(font_id, 0.2, 0.2, 0.2, 1.0)
|
||||
@@ -93,6 +97,7 @@ def draw_text_center(text, x, y, shadow=False):
|
||||
blf.position(font_id, cx, cy, 0)
|
||||
blf.draw(font_id, text)
|
||||
|
||||
|
||||
def draw_text_multiline(text, x, y, shadow=False):
|
||||
ui_scale = bpy.context.user_preferences.system.ui_scale
|
||||
height = int(blf.dimensions(font_id, "Dummy Text")[1])
|
||||
@@ -109,10 +114,12 @@ def draw_text_multiline(text, x, y, shadow=False):
|
||||
blf.draw(font_id, line)
|
||||
y -= height + space
|
||||
|
||||
|
||||
def draw_rect(x, y, w, h, color):
|
||||
import gpu
|
||||
gpu.draw.rect(x, y, x + w, y + h, color[0], color[1], color[2], color[3])
|
||||
|
||||
|
||||
def draw_image(filepath, x, y, w, h):
|
||||
global images
|
||||
if filepath not in images:
|
||||
@@ -256,6 +263,7 @@ handle_draw = bpy.types.SpaceBenchmark.draw_handler_add(
|
||||
################################################################################
|
||||
# Benchmark foundation integration.
|
||||
|
||||
|
||||
class ProgressProviderSink:
|
||||
current_progress = 0.0
|
||||
current_step = ''
|
||||
@@ -300,7 +308,7 @@ class ProgressProviderSink:
|
||||
global global_scene_status
|
||||
if stats:
|
||||
global_scene_status[scene_name] = util.humanReadableTimeDifference(
|
||||
stats.total_render_time)
|
||||
stats.total_render_time)
|
||||
else:
|
||||
global_scene_status[scene_name] = "Crashed :("
|
||||
progress_lock.release()
|
||||
@@ -341,18 +349,21 @@ class LoggerProviderSink:
|
||||
################################################################################
|
||||
# Benchmark thread.
|
||||
|
||||
|
||||
def string_strip_trademark(name):
|
||||
return name.replace("(R)", "").replace("(TM)", "")
|
||||
|
||||
|
||||
def correct_device_name(name):
|
||||
if (name.startswith("TITAN") or
|
||||
name.startswith("Quadro") or
|
||||
name.startswith("GeForce")):
|
||||
return "Nvidia " + name;
|
||||
name.startswith("Quadro") or
|
||||
name.startswith("GeForce")):
|
||||
return "Nvidia " + name
|
||||
if (name.startswith("Radeon")):
|
||||
return "AMD " + name;
|
||||
return "AMD " + name
|
||||
return name
|
||||
|
||||
|
||||
def get_gpu_names(system_info):
|
||||
gpu_names = []
|
||||
for device in system_info["devices"]:
|
||||
@@ -361,21 +372,23 @@ def get_gpu_names(system_info):
|
||||
gpu_names.append(correct_device_name(device["name"]))
|
||||
return gpu_names
|
||||
|
||||
|
||||
def indent_gpu_names(gpu_names):
|
||||
indented_names = []
|
||||
for name in gpu_names:
|
||||
indented_names.append(" • " + name)
|
||||
return indented_names
|
||||
|
||||
|
||||
def construct_platform_string(system_info):
|
||||
"""
|
||||
Construct human readable platform string to show in the interface.
|
||||
"""
|
||||
result = ""
|
||||
result += "OS: {} {}" . format(system_info["system"],
|
||||
system_info["bitness"])
|
||||
system_info["bitness"])
|
||||
result += "\nCPU: {}" . format(
|
||||
string_strip_trademark(system_info["cpu_brand"]))
|
||||
string_strip_trademark(system_info["cpu_brand"]))
|
||||
gpu_names = get_gpu_names(system_info)
|
||||
num_gpus = len(gpu_names)
|
||||
if num_gpus:
|
||||
@@ -385,6 +398,7 @@ def construct_platform_string(system_info):
|
||||
result += "\nGPUs:\n{}" . format("\n" . join(indent_gpu_names(gpu_names)))
|
||||
return result
|
||||
|
||||
|
||||
def convert_result_to_json_dict(ctx, results):
|
||||
# Convert custom classes to dictionaries for easier JSON dump.
|
||||
json_results = results
|
||||
@@ -400,7 +414,7 @@ def convert_result_to_json_dict(ctx, results):
|
||||
else:
|
||||
stat = {'result': 'CRASH'}
|
||||
json_results['scenes'].append({'name': scene,
|
||||
'stats': stat})
|
||||
'stats': stat})
|
||||
return json_results
|
||||
|
||||
|
||||
@@ -414,6 +428,7 @@ def system_info_get(ctx):
|
||||
sys.executable = old_executable
|
||||
return info
|
||||
|
||||
|
||||
def modify_system_info(system_info):
|
||||
compute_units = query_opencl_compute_units()
|
||||
for device in system_info["devices"]:
|
||||
@@ -427,6 +442,7 @@ def modify_system_info(system_info):
|
||||
del compute_units[index]
|
||||
return system_info
|
||||
|
||||
|
||||
def modify_device_info(device_info):
|
||||
compute_device = bpy.context.scene.compute_device
|
||||
device_type, device_name, compute_units, device_index = compute_device.split(":")
|
||||
@@ -439,6 +455,7 @@ def modify_device_info(device_info):
|
||||
device_info["compute_devices"] = compute_devices
|
||||
return device_info
|
||||
|
||||
|
||||
def benchmark_thread(ctx):
|
||||
global progress_lock, global_result_platform, global_progress_status
|
||||
global global_cancel
|
||||
@@ -499,6 +516,7 @@ def ui_scale_factor(x):
|
||||
widget_height = 20 * ui_scale
|
||||
return x * widget_height / int(widget_height)
|
||||
|
||||
|
||||
class BENCHMARK_PT_main(Panel):
|
||||
bl_label = "Benchmark"
|
||||
bl_options = {'HIDE_HEADER'}
|
||||
@@ -616,6 +634,7 @@ def blender_benchmark_data_dir_get():
|
||||
else:
|
||||
raise Exception("Needs implementation")
|
||||
|
||||
|
||||
def blender_executable_get():
|
||||
benchmark_data_dir = blender_benchmark_data_dir_get()
|
||||
system = platform.system()
|
||||
@@ -628,15 +647,18 @@ def blender_executable_get():
|
||||
else:
|
||||
raise Exception("Needs implementation")
|
||||
|
||||
|
||||
def scenes_dir_get():
|
||||
benchmark_data_dir = blender_benchmark_data_dir_get()
|
||||
return os.path.join(benchmark_data_dir, "scenes")
|
||||
|
||||
|
||||
def configure_script_get():
|
||||
script_directory = os.path.dirname(os.path.realpath(__file__))
|
||||
benchmark_script_directory = os.path.dirname(script_directory)
|
||||
return os.path.join(benchmark_script_directory, "configure.py")
|
||||
|
||||
|
||||
class BENCHMARK_OT_run_base(bpy.types.Operator):
|
||||
run_type = 'QUICK' # or 'COMPLETE'
|
||||
benchmark_context = None
|
||||
@@ -680,7 +702,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator):
|
||||
global_result_stats = ""
|
||||
for scene in global_scene_status:
|
||||
global_result_stats += "{}: {}\n" . format(
|
||||
scene, global_scene_status[scene])
|
||||
scene, global_scene_status[scene])
|
||||
progress_lock.release()
|
||||
|
||||
def done(self, context):
|
||||
@@ -708,11 +730,11 @@ class BENCHMARK_OT_run_base(bpy.types.Operator):
|
||||
global_result_stats += "\n"
|
||||
if stat["result"] == "OK":
|
||||
global_result_stats += "{}: {}" . format(name_stat['name'],
|
||||
util.humanReadableTimeDifference(
|
||||
stat["total_render_time"]))
|
||||
util.humanReadableTimeDifference(
|
||||
stat["total_render_time"]))
|
||||
else:
|
||||
global_result_stats += "{}: {}" . format(name_stat['name'],
|
||||
stat["result"])
|
||||
stat["result"])
|
||||
else:
|
||||
global_result_stats = ""
|
||||
# TOGO(sergey): Use some more nice picture for the final slide.
|
||||
@@ -776,8 +798,8 @@ class BENCHMARK_OT_run_base(bpy.types.Operator):
|
||||
# ctx.image_output_dir = "/tmp/"
|
||||
self.benchmark_context = ctx
|
||||
# Create thread for the actual benchmark.
|
||||
self.thread = Thread(target = benchmark_thread,
|
||||
args = (self.benchmark_context, ))
|
||||
self.thread = Thread(target=benchmark_thread,
|
||||
args=(self.benchmark_context, ))
|
||||
self.thread.start()
|
||||
# Create timer to query thread status
|
||||
self.timer = wm.event_timer_add(0.1, context.window)
|
||||
@@ -807,6 +829,7 @@ class BENCHMARK_OT_run_base(bpy.types.Operator):
|
||||
if self.thread:
|
||||
self.thread.join()
|
||||
|
||||
|
||||
class BENCHMARK_OT_run_quick(BENCHMARK_OT_run_base):
|
||||
"Run quick Blender benchmark"
|
||||
bl_label = "Run Benchmark"
|
||||
@@ -814,6 +837,7 @@ class BENCHMARK_OT_run_quick(BENCHMARK_OT_run_base):
|
||||
|
||||
run_type = 'QUICK'
|
||||
|
||||
|
||||
class BENCHMARK_OT_run_complete(BENCHMARK_OT_run_base):
|
||||
"Run complete Blender benchmark (might take 1.5 hours to finish and 4GiB of GPU memory)"
|
||||
bl_label = "Run Benchmark"
|
||||
@@ -852,6 +876,7 @@ class BENCHMARK_OT_save(bpy.types.Operator):
|
||||
def cancel(self, context):
|
||||
make_buttons_green()
|
||||
|
||||
|
||||
class BENCHMARK_OT_share(bpy.types.Operator):
|
||||
bl_idname = "benchmark.share"
|
||||
bl_label = "Share Benchmark Result"
|
||||
@@ -872,6 +897,7 @@ class BENCHMARK_OT_share(bpy.types.Operator):
|
||||
global_results_submitted = True
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class BENCHMARK_OT_opendata_link(bpy.types.Operator):
|
||||
bl_idname = "benchmark.opendata_link"
|
||||
bl_label = "opendata.blender.org"
|
||||
@@ -883,6 +909,7 @@ class BENCHMARK_OT_opendata_link(bpy.types.Operator):
|
||||
################################################################################
|
||||
# Restart benchmark.
|
||||
|
||||
|
||||
class BENCHMARK_OT_restart(bpy.types.Operator):
|
||||
bl_idname = "benchmark.restart"
|
||||
bl_label = "Go to a home screen and choose another benchmark to run"
|
||||
@@ -894,6 +921,7 @@ class BENCHMARK_OT_restart(bpy.types.Operator):
|
||||
################################################################################
|
||||
# Configuration.
|
||||
|
||||
|
||||
def cl_query_executable_get():
|
||||
benchmark_data_dir = blender_benchmark_data_dir_get()
|
||||
system = platform.system()
|
||||
@@ -906,6 +934,7 @@ def cl_query_executable_get():
|
||||
else:
|
||||
raise Exception("Needs implementation")
|
||||
|
||||
|
||||
def query_opencl_compute_units():
|
||||
binary = cl_query_executable_get()
|
||||
output = subprocess.run([binary], stdout=subprocess.PIPE).stdout
|
||||
@@ -916,6 +945,7 @@ def query_opencl_compute_units():
|
||||
compute_units.append((name.decode(), max_compute_units.decode()))
|
||||
return compute_units
|
||||
|
||||
|
||||
def find_first_device_index(compute_units, device_name):
|
||||
if not compute_units:
|
||||
return -1
|
||||
@@ -924,6 +954,7 @@ def find_first_device_index(compute_units, device_name):
|
||||
return index
|
||||
return -1
|
||||
|
||||
|
||||
def compute_device_list_get(self, context):
|
||||
global global_cached_system_info
|
||||
global global_cached_compute_devices
|
||||
@@ -974,9 +1005,12 @@ def compute_device_list_get(self, context):
|
||||
################################################################################
|
||||
# Tweak User Preferences
|
||||
|
||||
|
||||
default_wcol_tool_inner = None
|
||||
default_wcol_tool_inner_sel = None
|
||||
default_wcol_tool_outline = None
|
||||
|
||||
|
||||
def backup_buttons_colors():
|
||||
global default_wcol_tool_inner
|
||||
global default_wcol_tool_inner_sel
|
||||
@@ -987,6 +1021,7 @@ def backup_buttons_colors():
|
||||
default_wcol_tool_inner_sel = theme.user_interface.wcol_tool.inner_sel[:]
|
||||
default_wcol_tool_outline = theme.user_interface.wcol_tool.outline[:]
|
||||
|
||||
|
||||
def make_buttons_green():
|
||||
userpref = bpy.context.user_preferences
|
||||
theme = userpref.themes[0]
|
||||
@@ -994,6 +1029,7 @@ def make_buttons_green():
|
||||
theme.user_interface.wcol_tool.inner_sel = [0.308, 0.490, 0.029, 1.0]
|
||||
theme.user_interface.wcol_tool.outline = [0.408, 0.590, 0.129]
|
||||
|
||||
|
||||
def make_buttons_default():
|
||||
userpref = bpy.context.user_preferences
|
||||
theme = userpref.themes[0]
|
||||
@@ -1001,6 +1037,7 @@ def make_buttons_default():
|
||||
theme.user_interface.wcol_tool.inner_sel = default_wcol_tool_inner_sel
|
||||
theme.user_interface.wcol_tool.outline = default_wcol_tool_outline
|
||||
|
||||
|
||||
userpref = bpy.context.user_preferences
|
||||
theme = userpref.themes[0]
|
||||
userpref.view.use_quit_dialog = False
|
||||
|
@@ -28,11 +28,11 @@ if PY3:
|
||||
if sys.platform.startswith('java'):
|
||||
import platform
|
||||
os_name = platform.java_ver()[3][0]
|
||||
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
|
||||
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
|
||||
system = 'win32'
|
||||
elif os_name.startswith('Mac'): # "Mac OS X", etc.
|
||||
elif os_name.startswith('Mac'): # "Mac OS X", etc.
|
||||
system = 'darwin'
|
||||
else: # "Linux", "SunOS", "FreeBSD", etc.
|
||||
else: # "Linux", "SunOS", "FreeBSD", etc.
|
||||
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
||||
# are actually checked for and the rest of the module expects
|
||||
# *sys.platform* style strings.
|
||||
@@ -41,7 +41,6 @@ else:
|
||||
system = sys.platform
|
||||
|
||||
|
||||
|
||||
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific data dir for this application.
|
||||
|
||||
@@ -364,6 +363,7 @@ def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
|
||||
class AppDirs(object):
|
||||
"""Convenience wrapper for getting application dirs."""
|
||||
|
||||
def __init__(self, appname, appauthor=None, version=None, roaming=False,
|
||||
multipath=False):
|
||||
self.appname = appname
|
||||
@@ -390,7 +390,7 @@ class AppDirs(object):
|
||||
@property
|
||||
def site_config_dir(self):
|
||||
return site_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_cache_dir(self):
|
||||
@@ -479,6 +479,7 @@ def _get_win_folder_with_ctypes(csidl_name):
|
||||
|
||||
return buf.value
|
||||
|
||||
|
||||
def _get_win_folder_with_jna(csidl_name):
|
||||
import array
|
||||
from com.sun import jna
|
||||
@@ -505,6 +506,7 @@ def _get_win_folder_with_jna(csidl_name):
|
||||
|
||||
return dir
|
||||
|
||||
|
||||
if system == "win32":
|
||||
try:
|
||||
import win32com.shell
|
||||
|
Reference in New Issue
Block a user