Just some glue logic to query progress and results from benchmark. Needed to move files around, so oth standalone and addon are happy.
173 lines
6.1 KiB
Python
173 lines
6.1 KiB
Python
import os
|
|
import time
|
|
import subprocess
|
|
|
|
from . import (logger,
|
|
progress,
|
|
stats,
|
|
util)
|
|
|
|
|
|
def constructBenchmarkCommand(ctx, scene, blendfile, output_folder, cfra):
|
|
command = [ctx.blender,
|
|
"--background",
|
|
"--factory-startup",
|
|
"-noaudio",
|
|
"--debug-cycles",
|
|
"--enable-autoexec",
|
|
"--engine", "CYCLES",
|
|
blendfile]
|
|
if ctx.image_output_dir:
|
|
output = os.path.join(ctx.image_output_dir, scene) + "#"
|
|
command.extend(["--render-format", "PNG",
|
|
"--render-output", output])
|
|
else:
|
|
command.extend(["--render-output", output_folder,
|
|
"--render-format", "PNG"])
|
|
command.extend(["--python", ctx.configure_script,
|
|
"-f", str(cfra),
|
|
"--", "--benchmark-device-type", ctx.device_type])
|
|
if ctx.device_name:
|
|
command.extend(["--benchmark-device", ctx.device_name])
|
|
if ctx.device_single:
|
|
command.extend(["--benchmark-device-single"])
|
|
return command
|
|
|
|
|
|
def benchmarkBlenderWatched(command):
|
|
# Run Blender with configured command line.
|
|
logger.DEBUG("About to execute command: {}" . format(command))
|
|
start_time = time.time()
|
|
process = subprocess.Popen(command,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT)
|
|
|
|
# Keep reading status while Blender is alive.
|
|
st = stats.Stats()
|
|
while True:
|
|
line = process.stdout.readline()
|
|
if line == b"" and process.poll() is not None:
|
|
break
|
|
line = line.decode().strip()
|
|
if line == "":
|
|
continue
|
|
if logger.VERBOSE:
|
|
print(line)
|
|
else:
|
|
logger.DEBUG("Line from stdout: {}" . format(line))
|
|
st.update(line)
|
|
if st.current_tiles != 0:
|
|
elapsed_time = time.time() - start_time
|
|
elapsed_time_str = util.humanReadableTimeDifference(elapsed_time)
|
|
progress.progress(st.current_tiles,
|
|
st.total_tiles,
|
|
prefix="Path Tracing Tiles {}" .
|
|
format(elapsed_time_str))
|
|
|
|
# Clear line used by progress.
|
|
progress.progressClear()
|
|
|
|
if process.returncode != 0:
|
|
logger.ERROR("Rendering crashed")
|
|
return None
|
|
logger.OK("Successfully rendered")
|
|
|
|
return st
|
|
|
|
|
|
def benchmarkScene(ctx, scene):
|
|
logger.BOLD("Begin benchmark of scene {}" . format(scene))
|
|
# Get usable full path to the corresponding .blend file.
|
|
blendfile = ctx.getSceneFilename(scene)
|
|
logger.DEBUG("File to use: {}" . format(blendfile))
|
|
# Get command for rendering.
|
|
# TODO(sergey): Create some temp folder.
|
|
cfra = util.queryCurrentFrame(blendfile)
|
|
command = constructBenchmarkCommand(ctx, scene, blendfile, "/tmp/", cfra)
|
|
logger.DEBUG("Command for rendering: {}" . format(command))
|
|
progress.step('WARM_UP')
|
|
logger.INFO("> Warm-up round, making sure everything is ready " +
|
|
"(this might take several minutes).")
|
|
warmup_command = command + ['--benchmark-warmup']
|
|
benchmarkBlenderWatched(warmup_command)
|
|
# Remove resutl of warmup round.
|
|
if ctx.image_output_dir:
|
|
full_image_output = os.path.join(ctx.image_output_dir, scene) + \
|
|
str(cfra) + ".png"
|
|
if os.path.exists(full_image_output):
|
|
os.remove(full_image_output)
|
|
# TODO(sergey): Consider doing several passes.
|
|
progress.step('RUN')
|
|
logger.INFO("> Doing real benchmark pass now.")
|
|
stats = benchmarkBlenderWatched(command)
|
|
# Rename file to more sensible name.
|
|
if ctx.image_output_dir:
|
|
if os.path.exists(full_image_output):
|
|
full_image_output_no_frame = \
|
|
os.path.join(ctx.image_output_dir, scene) + ".png"
|
|
os.rename(full_image_output, full_image_output_no_frame)
|
|
if stats:
|
|
logger.INFO("Total render time: {}" . format(
|
|
util.humanReadableTimeDifference(
|
|
stats.pipeline_render_time)))
|
|
progress.step('')
|
|
return stats
|
|
|
|
|
|
def benchmarkAll(ctx):
|
|
"""
|
|
Benchmark all scenes from the cntext with requested settings.
|
|
"""
|
|
# First of all, print summary of what we'll be doing.
|
|
ctx.printSummary()
|
|
if not ctx.verify():
|
|
return False
|
|
all_stats = {}
|
|
for scene in ctx.scenes:
|
|
file_stats = benchmarkScene(ctx, scene)
|
|
all_stats[scene] = file_stats
|
|
return all_stats
|
|
|
|
|
|
def benchmarkGetDeviceInfo(ctx):
|
|
command = [ctx.blender,
|
|
"--background",
|
|
"--factory-startup",
|
|
"-noaudio",
|
|
"--enable-autoexec",
|
|
"--engine", "CYCLES",
|
|
"--python", ctx.configure_script,
|
|
"--",
|
|
"--benchmark-device-type", ctx.device_type]
|
|
if ctx.device_name:
|
|
command.extend(["--benchmark-device", ctx.device_name])
|
|
if ctx.device_single:
|
|
command.extend(["--benchmark-device-single"])
|
|
process = subprocess.Popen(command,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT)
|
|
stdout, stderr = process.communicate()
|
|
lines = stdout.decode().split("\n")
|
|
# Parse output
|
|
device_type = ""
|
|
compute_devices = []
|
|
num_cpu_threads = 0
|
|
for line in lines:
|
|
if line.startswith("Compute device type:"):
|
|
device_type = line.split(':', 1)[1].strip()
|
|
elif line.startswith("Using compute device:"):
|
|
compute_devices.append(line.split(':', 1)[1].strip())
|
|
elif line.startswith("CPU threads used:"):
|
|
num_cpu_threads = int(line.split(':', 1)[1].strip())
|
|
return {"device_type": device_type,
|
|
"compute_devices": compute_devices,
|
|
"num_cpu_threads": num_cpu_threads}
|
|
|
|
|
|
def benchmarkPrintDeviceInfo(ctx):
|
|
device_info = benchmarkGetDeviceInfo(ctx)
|
|
logger.INFO(" Device type: {}" . format(device_info["device_type"]))
|
|
logger.INFO(" Compute devices:")
|
|
for compute_device in device_info["compute_devices"]:
|
|
logger.INFO(" {}" . format(compute_device))
|