Buildbot: Support running benchmarks for PRs #9
@ -50,6 +50,8 @@ code_pipeline_patch_step_names = [
|
||||
"compile-code",
|
||||
"compile-gpu",
|
||||
"compile-install",
|
||||
"benchmark-code",
|
||||
"deliver-benchmark-results",
|
||||
"test-code",
|
||||
"sign-code-binaries",
|
||||
"package-code-binaries",
|
||||
@ -72,6 +74,12 @@ code_pipeline_lint_step_names = [
|
||||
"lint-code",
|
||||
]
|
||||
|
||||
# Steps for benchmarking.
|
||||
code_pipeline_benchmark_step_names = [
|
||||
"benchmark-code",
|
||||
"deliver-benchmark-results",
|
||||
]
|
||||
|
||||
# Steps for testing.
|
||||
code_pipeline_test_step_names = [
|
||||
"test-code",
|
||||
@ -213,6 +221,13 @@ scheduler_properties_patch = [
|
||||
buildbot.plugins.util.StringParameter(
|
||||
name="pull_revision", label="Pull Revision:", required=False, hide=True, size=80, default=""
|
||||
),
|
||||
buildbot.plugins.util.BooleanParameter(
|
||||
name="needs_benchmark",
|
||||
label="Benchmark -> run benchmark to compare performace",
|
||||
required=True,
|
||||
strict=True,
|
||||
default=False,
|
||||
),
|
||||
]
|
||||
|
||||
scheduler_properties_patch += scheduler_properties_common
|
||||
@ -300,6 +315,7 @@ def needs_do_code_pipeline_step(step):
|
||||
)
|
||||
needs_package_delivery = step.getProperty("needs_package_delivery")
|
||||
needs_gpu_binaries = step.getProperty("needs_gpu_binaries")
|
||||
needs_benchmark = step.getProperty("needs_benchmark")
|
||||
needs_skip_tests = step.getProperty("needs_skip_tests")
|
||||
|
||||
python_module = step.getProperty("python_module")
|
||||
@ -310,6 +326,8 @@ def needs_do_code_pipeline_step(step):
|
||||
needs_do_it = not needs_skip_tests
|
||||
elif step.name == "compile-gpu":
|
||||
needs_do_it = needs_package_delivery or needs_gpu_binaries
|
||||
elif step.name in code_pipeline_benchmark_step_names:
|
||||
needs_do_it = needs_benchmark
|
||||
elif is_package_delivery_step:
|
||||
needs_do_it = needs_package_delivery
|
||||
|
||||
@ -357,6 +375,31 @@ def create_deliver_code_binaries_step(worker_config, track_id, pipeline_type):
|
||||
)
|
||||
|
||||
|
||||
def create_deliver_benchmark_results_step(worker_config, track_id, pipeline_type):
|
||||
file_size_in_mb = 500 * 1024 * 1024
|
||||
worker_source_path = pathlib.Path(f"../../../../git/blender-{track_id}/build_package")
|
||||
master_dest_path = pathlib.Path(
|
||||
f"{worker_config.buildbot_download_folder}/{pipeline_type}"
|
||||
).expanduser()
|
||||
|
||||
benchmark_worker_source_path = worker_source_path / "benchmark"
|
||||
benchmark_master_dest_path = master_dest_path / "benchmarks"
|
||||
|
||||
return LinkMultipleFileUpload(
|
||||
name="deliver-benchmark-results",
|
||||
maxsize=file_size_in_mb,
|
||||
workdir=f"{benchmark_worker_source_path}",
|
||||
glob=True,
|
||||
workersrcs=["*"],
|
||||
masterdest=f"{benchmark_master_dest_path}",
|
||||
mode=0o644,
|
||||
url=f"../download/{pipeline_type}/benchmarks",
|
||||
description="running",
|
||||
descriptionDone="completed",
|
||||
alwaysRun=True,
|
||||
)
|
||||
|
||||
|
||||
def create_deliver_test_results_step(worker_config, track_id, pipeline_type):
|
||||
file_size_in_mb = 500 * 1024 * 1024
|
||||
worker_source_path = pathlib.Path(f"../../../../git/blender-{track_id}/build_package")
|
||||
@ -391,8 +434,12 @@ def create_deliver_test_results_step(worker_config, track_id, pipeline_type):
|
||||
def next_worker_code(worker_names_gpu, builder, workers, request):
|
||||
# Use a GPU worker if needed and supported for this platform.
|
||||
# NVIDIA worker is currently reserved for GPU builds only.
|
||||
# Benchmark also needs a GPU worker so that results are comparable
|
||||
# to the daily benchmark run.
|
||||
compatible_workers = []
|
||||
if request.properties.getProperty("needs_gpu_tests", False) and worker_names_gpu:
|
||||
needs_gpu_tests = request.properties.getProperty("needs_gpu_tests", False)
|
||||
needs_benchmark = request.properties.getProperty("needs_benchmark", False)
|
||||
if (needs_gpu_tests or needs_benchmark) and worker_names_gpu:
|
||||
for worker in workers:
|
||||
if worker.worker.workername in worker_names_gpu:
|
||||
compatible_workers.append(worker)
|
||||
@ -459,6 +506,8 @@ def populate(devops_env_id):
|
||||
step = create_deliver_code_binaries_step(worker_config, track_id, pipeline_type)
|
||||
elif step_name == "deliver-test-results":
|
||||
step = create_deliver_test_results_step(worker_config, track_id, pipeline_type)
|
||||
elif step_name == "deliver-benchmark-results":
|
||||
step = create_deliver_benchmark_results_step(worker_config, track_id, pipeline_type)
|
||||
else:
|
||||
needs_halt_on_failure = True
|
||||
if step_name in code_pipeline_test_step_names:
|
||||
@ -618,6 +667,7 @@ def populate(devops_env_id):
|
||||
),
|
||||
}
|
||||
if pipeline_type == "patch":
|
||||
trigger_properties["needs_benchmark"] = buildbot.plugins.util.Property("needs_benchmark")
|
||||
trigger_properties["patch_id"] = buildbot.plugins.util.Property("patch_id")
|
||||
trigger_properties["revision"] = buildbot.plugins.util.Property("revision")
|
||||
trigger_properties["build_configuration"] = buildbot.plugins.util.Property(
|
||||
|
@ -36,7 +36,7 @@ def create_deliver_step(devops_env_id):
|
||||
return LinkMultipleFileUpload(
|
||||
name="deliver",
|
||||
maxsize=file_size_in_mb,
|
||||
workdir=f"{worker_source_path}",
|
||||
workdir=f"{worker_source_path}/benchmark",
|
||||
glob=True,
|
||||
workersrcs=["main-*"],
|
||||
masterdest=f"{master_dest_path}",
|
||||
|
@ -16,18 +16,7 @@ import worker.utils
|
||||
import urllib.request
|
||||
|
||||
|
||||
def create_upload(
|
||||
builder: worker.blender.CodeBuilder, benchmark_path: pathlib.Path, revision: str
|
||||
) -> None:
|
||||
# Create package directory.
|
||||
branch = builder.branch_id.replace("blender-", "").replace("-release", "")
|
||||
name = f"{branch}-{builder.platform}-{builder.architecture}"
|
||||
package_dir = builder.package_dir / name
|
||||
|
||||
worker.utils.remove_dir(package_dir)
|
||||
os.makedirs(package_dir, exist_ok=True)
|
||||
|
||||
# Fetch existing summary
|
||||
def fetch_previous(builder: worker.blender.CodeBuilder, json_url: str, json_path: pathlib.Path):
|
||||
worker_config = conf.worker.get_config(builder.service_env_id)
|
||||
base_urls = {
|
||||
"LOCAL": str(worker_config.buildbot_download_folder),
|
||||
@ -35,37 +24,80 @@ def create_upload(
|
||||
"PROD": "https://builder.blender.org/download",
|
||||
}
|
||||
base_url = base_urls[builder.service_env_id]
|
||||
json_url = f"{base_url}/{json_url}"
|
||||
|
||||
summary_json_url = f"{base_url}/daily/benchmarks/{name}/summary.json"
|
||||
summary_json_path = package_dir / "summary.json"
|
||||
try:
|
||||
if builder.service_env_id == "LOCAL":
|
||||
worker.utils.copy_file(pathlib.Path(summary_json_url), summary_json_path)
|
||||
worker.utils.copy_file(pathlib.Path(json_url), json_path)
|
||||
else:
|
||||
urllib.request.urlretrieve(summary_json_url, summary_json_path)
|
||||
urllib.request.urlretrieve(json_url, json_path)
|
||||
return None
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
worker.utils.warning(f"Could not retrieve benchmark summary.json: {error_msg}")
|
||||
return str(e)
|
||||
|
||||
|
||||
def merge_results(old: pathlib.Path, new: pathlib.Path, out: pathlib.Path):
|
||||
combined = []
|
||||
if old.exists():
|
||||
combined = json.loads(old.read_text())
|
||||
combined += json.loads(new.read_text())
|
||||
out.write_text(json.dumps(combined, indent=2))
|
||||
|
||||
|
||||
def create_upload(
|
||||
builder: worker.blender.CodeBuilder, benchmark_path: pathlib.Path, revision: str
|
||||
) -> None:
|
||||
# Create package directory.
|
||||
branch = builder.branch_id.replace("blender-", "").replace("-release", "")
|
||||
baseline_name = f"{branch}-{builder.platform}-{builder.architecture}"
|
||||
if builder.patch_id:
|
||||
name = f"PR{builder.patch_id}-{builder.platform}-{builder.architecture}"
|
||||
else:
|
||||
name = baseline_name
|
||||
package_benchmark_dir = builder.package_dir / "benchmark" / name
|
||||
|
||||
worker.utils.remove_dir(package_benchmark_dir)
|
||||
os.makedirs(package_benchmark_dir, exist_ok=True)
|
||||
|
||||
# Fetch baseline summary
|
||||
baseline_json_url = f"daily/benchmarks/{baseline_name}/summary.json"
|
||||
baseline_json_path = package_benchmark_dir / "baseline.json"
|
||||
if error_msg := fetch_previous(builder, baseline_json_url, baseline_json_path):
|
||||
worker.utils.warning(f"Could not retrieve previous benchmark data: {error_msg}")
|
||||
|
||||
# Create json files in package directory.
|
||||
results_json_path = benchmark_path / "results.json"
|
||||
revision_json_path = package_dir / f"{revision}.json"
|
||||
revision_json_path = package_benchmark_dir / f"{revision}.json"
|
||||
|
||||
worker.utils.copy_file(results_json_path, revision_json_path)
|
||||
|
||||
summary_json = []
|
||||
if summary_json_path.exists():
|
||||
summary_json = json.loads(summary_json_path.read_text())
|
||||
summary_json += json.loads(results_json_path.read_text())
|
||||
summary_json_path.write_text(json.dumps(summary_json, indent=2))
|
||||
# Build new summary
|
||||
summary_json_path = package_benchmark_dir / "summary.json"
|
||||
if builder.patch_id:
|
||||
# Fetch patch summary
|
||||
patch_json_url = f"patch/benchmarks/{name}/summary.json"
|
||||
patch_json_path = package_benchmark_dir / "patch.json"
|
||||
# No need to warn on errors here, it's normal that there might not be a result yet
|
||||
fetch_previous(builder, patch_json_url, patch_json_path)
|
||||
|
||||
# Add new result to patch summary
|
||||
merge_results(patch_json_path, results_json_path, summary_json_path)
|
||||
|
||||
# Generate diff between baseline result and the patch results
|
||||
cmd = ["diff", baseline_json_path, summary_json_path]
|
||||
else:
|
||||
# Add new result to baseline summary
|
||||
merge_results(baseline_json_path, results_json_path, summary_json_path)
|
||||
|
||||
# Generate graph showing results over time
|
||||
cmd = ["graph", summary_json_path]
|
||||
|
||||
# Create html file in package directory.
|
||||
report_html_path = package_dir / "report.html"
|
||||
report_html_path = package_benchmark_dir / "report.html"
|
||||
cmd = [
|
||||
sys.executable,
|
||||
builder.code_path / "tests" / "performance" / "benchmark.py",
|
||||
"graph",
|
||||
summary_json_path,
|
||||
] + cmd + [
|
||||
"-o",
|
||||
report_html_path,
|
||||
]
|
||||
|
@ -17,6 +17,7 @@ import worker.utils
|
||||
import worker.blender.update
|
||||
import worker.blender.lint
|
||||
import worker.blender.compile
|
||||
import worker.blender.benchmark
|
||||
import worker.blender.test
|
||||
import worker.blender.sign
|
||||
import worker.blender.pack
|
||||
@ -30,6 +31,7 @@ if __name__ == "__main__":
|
||||
steps["compile-code"] = worker.blender.compile.compile_code
|
||||
steps["compile-gpu"] = worker.blender.compile.compile_gpu
|
||||
steps["compile-install"] = worker.blender.compile.compile_install
|
||||
steps["benchmark-code"] = worker.blender.benchmark.benchmark
|
||||
steps["test-code"] = worker.blender.test.test
|
||||
steps["sign-code-binaries"] = worker.blender.sign.sign
|
||||
steps["package-code-binaries"] = worker.blender.pack.pack
|
||||
|
Loading…
Reference in New Issue
Block a user