- No more '(Display)' strings in the GPU names, but store devices as `{'name': name, 'type': 'CPU/CUDA/OPENCL', 'is_display': bool}` - Introduces testing with py.test & pipenv. The test suite is far from complete, though.
194 lines
6.8 KiB
Python
194 lines
6.8 KiB
Python
import os
|
|
import tempfile
|
|
import time
|
|
import subprocess
|
|
|
|
from . import (logger,
|
|
progress,
|
|
stats,
|
|
util)
|
|
|
|
|
|
def constructBenchmarkCommand(ctx, scene, blendfile, output_folder, cfra):
|
|
command = [ctx.blender,
|
|
"--background",
|
|
"--factory-startup",
|
|
"-noaudio",
|
|
"--debug-cycles",
|
|
"--enable-autoexec",
|
|
"--engine", "CYCLES",
|
|
blendfile]
|
|
if ctx.image_output_dir:
|
|
output = os.path.join(ctx.image_output_dir, scene) + "#"
|
|
command.extend(["--render-format", "PNG",
|
|
"--render-output", output])
|
|
else:
|
|
command.extend(["--render-output", output_folder,
|
|
"--render-format", "PNG"])
|
|
command.extend(["--python", ctx.configure_script,
|
|
"-f", str(cfra),
|
|
"--", "--benchmark-device-type", ctx.device_type])
|
|
if ctx.device_name:
|
|
command.extend(["--benchmark-device", ctx.device_name])
|
|
if ctx.device_single:
|
|
command.extend(["--benchmark-device-single"])
|
|
if ctx.device_index != -1:
|
|
command.extend(["--benchmark-device-index", ctx.device_index])
|
|
return command
|
|
|
|
|
|
def benchmarkBlenderWatched(command):
|
|
# Run Blender with configured command line.
|
|
logger.DEBUG("About to execute command: {}" . format(command))
|
|
start_time = time.time()
|
|
process = subprocess.Popen(command,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT)
|
|
progress.render_process(process)
|
|
|
|
# Keep reading status while Blender is alive.
|
|
st = stats.Stats()
|
|
while True:
|
|
line = process.stdout.readline()
|
|
if line == b"" and process.poll() is not None:
|
|
break
|
|
line = line.decode().strip()
|
|
if line == "":
|
|
continue
|
|
if logger.VERBOSE:
|
|
print(line)
|
|
else:
|
|
logger.DEBUG("Line from stdout: {}" . format(line))
|
|
st.update(line)
|
|
if st.current_tiles != 0 or st.current_sample is not None:
|
|
elapsed_time = time.time() - start_time
|
|
elapsed_time_str = util.humanReadableTimeDifference(elapsed_time)
|
|
progress.progress(int(st.getCurrentProgress()),
|
|
100,
|
|
prefix="Path Tracing Tiles {}" .
|
|
format(elapsed_time_str))
|
|
|
|
# Clear line used by progress.
|
|
progress.progressClear()
|
|
|
|
progress.render_process(None)
|
|
|
|
if process.returncode != 0:
|
|
logger.ERROR("Rendering crashed")
|
|
return None
|
|
logger.OK("Successfully rendered")
|
|
|
|
return st
|
|
|
|
|
|
def benchmarkScene(ctx, scene):
|
|
progress.scene(scene)
|
|
logger.BOLD("Begin benchmark of scene {}" . format(scene))
|
|
# Get usable full path to the corresponding .blend file.
|
|
blendfile = ctx.getSceneFilename(scene)
|
|
logger.DEBUG("File to use: {}" . format(blendfile))
|
|
# Get command for rendering.
|
|
# TODO(sergey): Create some temp folder.
|
|
tmpdir = tempfile.TemporaryDirectory(prefix="benchmark_")
|
|
cfra = util.queryCurrentFrame(blendfile)
|
|
command = constructBenchmarkCommand(
|
|
ctx, scene, blendfile, tmpdir.name, cfra)
|
|
logger.DEBUG("Command for rendering: {}" . format(command))
|
|
progress.step('WARM_UP')
|
|
logger.INFO("> Warm-up round, making sure everything is ready " +
|
|
"(this might take several minutes).")
|
|
warmup_command = command + ['--benchmark-warmup']
|
|
benchmarkBlenderWatched(warmup_command)
|
|
if progress.is_canceled():
|
|
return None
|
|
# Remove resutl of warmup round.
|
|
if ctx.image_output_dir:
|
|
full_image_output = os.path.join(ctx.image_output_dir, scene) + \
|
|
str(cfra) + ".png"
|
|
if os.path.exists(full_image_output):
|
|
os.remove(full_image_output)
|
|
# TODO(sergey): Consider doing several passes.
|
|
progress.step('RUN')
|
|
logger.INFO("> Doing real benchmark pass now.")
|
|
stats = benchmarkBlenderWatched(command)
|
|
# Rename file to more sensible name.
|
|
if ctx.image_output_dir:
|
|
if os.path.exists(full_image_output):
|
|
full_image_output_no_frame = \
|
|
os.path.join(ctx.image_output_dir, scene) + ".png"
|
|
os.rename(full_image_output, full_image_output_no_frame)
|
|
if stats:
|
|
logger.INFO("Total render time: {}" . format(
|
|
util.humanReadableTimeDifference(
|
|
stats.pipeline_render_time)))
|
|
progress.step('')
|
|
progress.scene_stats(scene, stats)
|
|
progress.scene('')
|
|
return stats
|
|
|
|
|
|
def benchmarkAll(ctx):
|
|
"""
|
|
Benchmark all scenes from the cntext with requested settings.
|
|
"""
|
|
# First of all, print summary of what we'll be doing.
|
|
ctx.printSummary()
|
|
if not ctx.verify():
|
|
return False
|
|
all_stats = {}
|
|
for scene in ctx.scenes:
|
|
file_stats = benchmarkScene(ctx, scene)
|
|
all_stats[scene] = file_stats
|
|
if progress.is_canceled():
|
|
break
|
|
return all_stats
|
|
|
|
|
|
def benchmarkGetDeviceInfo(ctx) -> dict:
|
|
import json
|
|
|
|
command = [ctx.blender,
|
|
"--background",
|
|
"--factory-startup",
|
|
"-noaudio",
|
|
"--enable-autoexec",
|
|
"--engine", "CYCLES",
|
|
"--python", ctx.configure_script,
|
|
"--",
|
|
"--benchmark-device-type", ctx.device_type]
|
|
if ctx.device_name:
|
|
command.extend(["--benchmark-device", ctx.device_name])
|
|
if ctx.device_single:
|
|
command.extend(["--benchmark-device-single"])
|
|
if ctx.device_index != -1:
|
|
command.extend(["--benchmark-device-index", ctx.device_index])
|
|
process = subprocess.Popen(command,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT)
|
|
stdout, stderr = process.communicate()
|
|
lines = stdout.decode().split("\n")
|
|
# Parse output
|
|
device_type = ""
|
|
compute_devices = []
|
|
num_cpu_threads = 0
|
|
for line in lines:
|
|
if line.startswith("Compute device type:"):
|
|
device_type = line.split(':', 1)[1].strip()
|
|
elif line.startswith("Using compute device:"):
|
|
devices_as_json = line.split(':', 1)[1].strip()
|
|
devices = json.loads(devices_as_json)
|
|
compute_devices.append(devices)
|
|
elif line.startswith("CPU threads used:"):
|
|
num_cpu_threads = int(line.split(':', 1)[1].strip())
|
|
return {"device_type": device_type,
|
|
"compute_devices": compute_devices,
|
|
"num_cpu_threads": num_cpu_threads}
|
|
|
|
|
|
def benchmarkPrintDeviceInfo(ctx):
|
|
device_info = benchmarkGetDeviceInfo(ctx)
|
|
logger.INFO(" Device type: {}" . format(device_info["device_type"]))
|
|
logger.INFO(" Compute devices:")
|
|
for compute_device in device_info["compute_devices"]:
|
|
logger.INFO(" {}" . format(compute_device))
|