WIP: Single-frame job compiler #104194

Draft
k8ie wants to merge 30 commits from k8ie/flamenco:single-frame into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
1 changed files with 350 additions and 0 deletions

View File

@ -0,0 +1,350 @@
// SPDX-License-Identifier: GPL-3.0-or-later
const JOB_TYPE = {
label: "Single-frame Blender Render",
settings: [
// Settings for artists to determine:
{ key: "frame", type: "int32", required: true, eval: "C.scene.frame_current",
description: "Frame to render" },
{ key: "tile_size", type: "int32", default: 5, propargs: {min: 1, max: 100}, description: "Tile size for each Task (sizes are in % of each dimension)" },
// render_output_root + add_path_components determine the value of render_output_path.
{ key: "render_output_root", type: "string", subtype: "dir_path", required: true, visible: "submission",
description: "Base directory of where render output is stored. Will have some job-specific parts appended to it" },
{ key: "add_path_components", type: "int32", required: true, default: 0, propargs: {min: 0, max: 32}, visible: "submission",
description: "Number of path components of the current blend file to use in the render output path" },
{ key: "render_output_path", type: "string", subtype: "file_path", editable: false,
eval: "str(Path(abspath(settings.render_output_root), last_n_dir_parts(settings.add_path_components), jobname, 'frame_' + str(settings.frame), '{timestamp}'))",
description: "Final file path of where render output will be saved" },
// Automatically evaluated settings:
{ key: "blendfile", type: "string", required: true, description: "Path of the Blend file to render", visible: "web" },
{ key: "format", type: "string", required: true, eval: "C.scene.render.image_settings.file_format", visible: "web" },
{ key: "use_compositing", type: "bool", required: true, eval: "C.scene.use_nodes and C.scene.render.use_compositing", visible: "web" },
{ key: "use_denoising", type: "bool", required: true, eval: "C.scene.cycles.use_denoising", visible: "web",
description: "Toggles OpenImageDenoise" },
{ key: "image_file_extension", type: "string", required: true, eval: "C.scene.render.file_extension", visible: "hidden",
description: "File extension used for the final export" },
{ key: "resolution_x", type: "int32", required: true, eval: "C.scene.render.resolution_x", visible: "hidden" },
{ key: "resolution_y", type: "int32", required: true, eval: "C.scene.render.resolution_y", visible: "hidden" },
{ key: "resolution_percentage", type: "int32", required: true, eval: "C.scene.render.resolution_percentage", visible: "hidden" }
]
};
function compileJob(job) {
print("Single-frame Render job submitted");
print("job: ", job);
const settings = job.settings;
const renderOutput = renderOutputPath(job);
// Make sure that when the job is investigated later, it shows the
// actually-used render output:
settings.render_output_path = renderOutput;
const renderDir = path.dirname(renderOutput);
const renderTasks = authorRenderTasks(settings, renderDir, renderOutput);
const mergeTask = authorCreateMergeTask(settings, renderOutput);
const compositeTask = authorCreateCompositeTask(settings, renderOutput);
for (const rt of renderTasks) {
job.addTask(rt);
}
// All render tasks are a dependency of the merge task
for (const rt of renderTasks) {
mergeTask.addDependency(rt);
}
job.addTask(mergeTask);
compositeTask.addDependency(mergeTask);
job.addTask(compositeTask);
}
// Do field replacement on the render output path.
function renderOutputPath(job) {
let path = job.settings.render_output_path;
if (!path) {
throw "no render_output_path setting!";
}
return path.replace(/{([^}]+)}/g, (match, group0) => {
switch (group0) {
case "timestamp":
return formatTimestampLocal(job.created);
default:
return match;
}
});
}
function tileChunker(tile_size) {
let tiles = [];
const rows = Math.ceil(100 / tile_size);
const columns = Math.ceil(100 / tile_size);
for (let row = 0; row < rows; row++) {
for (let column = 0; column < columns; column++) {
tiles.push({"row": row, "column": column});
}
}
return tiles;
}
function authorRenderTasks(settings, renderDir, renderOutput) {
print("authorRenderTasks(", renderDir, renderOutput, ")");
let renderTasks = [];
let tiles = tileChunker(settings.tile_size);
print(tiles);
for (let tile of tiles) {
const task = author.Task(`render-r${tile.row}c${tile.column}`, "blender");
let pythonExpression = `
import bpy
render = bpy.context.scene.render
render.use_compositing = False
bpy.context.scene.cycles.use_denoising = False
render.image_settings.file_format = 'OPEN_EXR_MULTILAYER'
render.use_crop_to_border = True
tile_size_decimal = ${settings.tile_size} / 100
render.border_min_x = ${tile.column} * tile_size_decimal
render.border_max_x = tile_size_decimal + ${tile.column} * tile_size_decimal
render.border_min_y = ${tile.row} * tile_size_decimal
render.border_max_y = tile_size_decimal + ${tile.row} * tile_size_decimal`;
if (settings.use_denoising) {
pythonExpression += `
for layer in bpy.context.scene.view_layers:
layer['cycles']['denoising_store_passes'] = 1
layer.use_pass_vector = True`;
}
const command = author.Command("blender-render", {
exe: "{blender}",
exeArgs: "{blenderArgs}",
argsBefore: [],
blendfile: settings.blendfile,
args: [
"--python-exit-code", 1,
"--python-expr", pythonExpression,
"--render-output", path.join(renderDir, path.basename(renderOutput), "tiles", "r" + tile.row + "c" + tile.column + "_"),
"--render-frame", settings.frame
]
});
task.addCommand(command);
renderTasks.push(task);
}
return renderTasks;
}
function authorCreateMergeTask(settings, renderOutput) {
const task = author.Task(`merge`, "blender");
let pythonExpression = `
import pathlib
import bpy
import math
def normalize(number):
return round(-0.5 + (number - 0) * (0.5 - -0.5) / 1 - 0, 10)
basepath = pathlib.Path("${renderOutput}")
renders = basepath / "tiles"
filenames = list(renders.iterdir())
if len(filenames) <= 1:
print('This job only has one file, merging not required.')
print('Moving ' + str(renders / filenames[0].name) + ' to ' + str(basepath / 'MERGED.exr'))
(renders / filenames[0].name).rename(basepath / 'MERGED.exr')
exit()
row_max = math.ceil(100 / ${settings.tile_size}) - 1
bpy_scene = bpy.context.scene
bpy_render = bpy_scene.render
node_tree = bpy_scene.node_tree
bpy_render.resolution_x = ${settings.resolution_x}
bpy_render.resolution_y = ${settings.resolution_y}
bpy_render.resolution_percentage = ${settings.resolution_percentage}
bpy_render.use_compositing = True
bpy_scene.use_nodes = True
node_tree.nodes.clear()
bpy_render.image_settings.file_format = 'OPEN_EXR_MULTILAYER'
bpy_render.filepath = str(basepath / "MERGED")
denoising = "${settings.use_denoising}" == "true"
tile_size_decimal = ${settings.tile_size} / 100
# Takes the column and row number and creates a translate node with the right coordinates to align a tile
def create_translate(dimensions):
translate_node = node_tree.nodes.new(type='CompositorNodeTranslate')
translate_node.use_relative = True
for dimension in dimensions:
if dimensions[dimension] == 0:
translate_node.inputs[dimension].default_value = normalize(tile_size_decimal / 2)
else:
if dimensions[dimension] == row_max and 100 % ${settings.tile_size} != 0:
half_this_tile = ((100 % ${settings.tile_size}) / 100) / 2
else:
half_this_tile = (tile_size_decimal) / 2
translate_node.inputs[dimension].default_value = normalize((tile_size_decimal + (dimensions[dimension] - 1) * tile_size_decimal) + half_this_tile)
return translate_node
def align_tiles(input):
translated_list = []
for index, image_node in enumerate(image_nodes):
file_name = image_node.image.name_full
dimensions = {'X': int(file_name.split('c')[-1].split('_')[0]), 'Y': int(file_name.split('c')[0].replace('r', ''))}
translated_list.append(create_translate(dimensions))
node_tree.links.new(image_node.outputs[input], translated_list[index].inputs['Image'])
return translated_list
# Create a list of image nodes
image_nodes = []
for index, file in enumerate(filenames):
bpy.ops.image.open(filepath=str(renders / file.name), use_sequence_detection=False)
image = bpy.data.images[bpy.path.basename(file.name)]
image_nodes.append(node_tree.nodes.new(type='CompositorNodeImage'))
image_nodes[index].image = image
# Create translates for Combined, Albedo and Normal and put them in a list
combined_translates = align_tiles("Combined")
if denoising:
albedo_translates = align_tiles("Denoising Albedo")
normal_translates = align_tiles("Denoising Normal")
output_node = node_tree.nodes.new(type='CompositorNodeComposite')
alpha_over_nodes = []
albedo_mix_nodes = []
normal_mix_nodes = []
for index, node in enumerate(combined_translates):
if index == 0:
# Take the first two image nodes and combine them
alpha_over_nodes.append(node_tree.nodes.new(type='CompositorNodeAlphaOver'))
node_tree.links.new(combined_translates[0].outputs['Image'], alpha_over_nodes[index].inputs[1])
node_tree.links.new(combined_translates[1].outputs['Image'], alpha_over_nodes[index].inputs[2])
if denoising:
albedo_mix_nodes.append(node_tree.nodes.new(type='CompositorNodeMixRGB'))
albedo_mix_nodes[index].blend_type = 'ADD'
node_tree.links.new(albedo_translates[0].outputs['Image'], albedo_mix_nodes[index].inputs[1])
node_tree.links.new(albedo_translates[1].outputs['Image'], albedo_mix_nodes[index].inputs[2])
normal_mix_nodes.append(node_tree.nodes.new(type='CompositorNodeMixRGB'))
normal_mix_nodes[index].blend_type = 'ADD'
node_tree.links.new(normal_translates[0].outputs['Image'], normal_mix_nodes[index].inputs[1])
node_tree.links.new(normal_translates[1].outputs['Image'], normal_mix_nodes[index].inputs[2])
else:
# Take one image node and the previous alpha over node
alpha_over_nodes.append(node_tree.nodes.new(type='CompositorNodeAlphaOver'))
node_tree.links.new(alpha_over_nodes[index-1].outputs['Image'], alpha_over_nodes[index].inputs[1])
node_tree.links.new(combined_translates[index+1].outputs['Image'], alpha_over_nodes[index].inputs[2])
if denoising:
albedo_mix_nodes.append(node_tree.nodes.new(type='CompositorNodeMixRGB'))
albedo_mix_nodes[index].blend_type = 'ADD'
node_tree.links.new(albedo_mix_nodes[index-1].outputs['Image'], albedo_mix_nodes[index].inputs[1])
node_tree.links.new(albedo_translates[index+1].outputs['Image'], albedo_mix_nodes[index].inputs[2])
normal_mix_nodes.append(node_tree.nodes.new(type='CompositorNodeMixRGB'))
normal_mix_nodes[index].blend_type = 'ADD'
node_tree.links.new(normal_mix_nodes[index-1].outputs['Image'], normal_mix_nodes[index].inputs[1])
node_tree.links.new(normal_translates[index+1].outputs['Image'], normal_mix_nodes[index].inputs[2])
if index + 1 == len(combined_translates) - 1:
if denoising:
denoise_node = node_tree.nodes.new(type='CompositorNodeDenoise')
node_tree.links.new(alpha_over_nodes[index].outputs['Image'], denoise_node.inputs['Image'])
node_tree.links.new(albedo_mix_nodes[index].outputs['Image'], denoise_node.inputs['Albedo'])
node_tree.links.new(normal_mix_nodes[index].outputs['Image'], denoise_node.inputs['Normal'])
node_tree.links.new(denoise_node.outputs['Image'], output_node.inputs['Image'])
else:
# Link the last image node and feed the output into the composite node
node_tree.links.new(alpha_over_nodes[index].outputs['Image'], output_node.inputs['Image'])
break
bpy.ops.render.render(write_still=True)`;
const command = author.Command("blender-render", {
exe: "{blender}",
exeArgs: "{blenderArgs}",
argsBefore: [],
blendfile: settings.blendfile,
args: [
"--python-exit-code", 1,
"--python-expr", pythonExpression
]
});
task.addCommand(command);
return task;
}
function authorCreateCompositeTask(settings, renderOutput) {
let filename;
let pythonExpression = `
import pathlib
import bpy
C = bpy.context
basepath = pathlib.Path("${renderOutput}")
filename = "MERGED.exr"
`;
if (settings.use_compositing) {
// Do the full composite+export pipeline
// uses snippets from
// https://github.com/state-of-the-art/BlendNet/blob/master/BlendNet/script-compose.py#L94
pythonExpression += `
bpy.ops.image.open(filepath=str(basepath / filename), use_sequence_detection=False)
image = bpy.data.images[bpy.path.basename(filename)]
image_node = C.scene.node_tree.nodes.new(type='CompositorNodeImage')
image_node.image = image
nodes_to_remove = []
links_to_create = []
for node in C.scene.node_tree.nodes:
print('DEBUG: Checking node %s' % (node,))
if not isinstance(node, bpy.types.CompositorNodeRLayers) or node.scene != C.scene:
continue
nodes_to_remove.append(node)
print('INFO: Reconnecting %s links to render image' % (node,))
for link in C.scene.node_tree.links:
print('DEBUG: Checking link %s - %s' % (link.from_node, link.to_node))
if link.from_node != node:
continue
print('DEBUG: Found link %s - %s' % (link.from_socket, link.to_socket))
link_name = "Combined"
for output in image_node.outputs:
print('DEBUG: Checking output:', output.name, link_name)
if output.name != link_name:
continue
links_to_create.append((output, link))
break
for output, link in links_to_create:
print('INFO: Connecting "%s" output to %s.%s input' % (
output, link.to_node, link.to_socket
))
C.scene.node_tree.links.new(output, link.to_socket)
print("Removing the nodes could potentially break the pipeline")
for node in nodes_to_remove:
print('INFO: Removing %s' % (node,))
C.scene.node_tree.nodes.remove(node)
C.scene.render.filepath = str(basepath / "FINAL")
bpy.ops.render.render(write_still=True)`;
}
else {
if (settings.format == "OPEN_EXR_MULTILAYER") {
// Only rename
pythonExpression += `
(basepath / filename).rename(basepath / 'FINAL.exr')`;
}
else {
// Only export
pythonExpression += `
bpy.ops.image.open(filepath=str(basepath / filename), use_sequence_detection=False)
image = bpy.data.images[bpy.path.basename(filename)]
image.save_render(str(basepath / "FINAL") + "${settings.image_file_extension}")`;
}
}
const task = author.Task(`composite`, "blender");
const command = author.Command("blender-render", {
exe: "{blender}",
exeArgs: "{blenderArgs}",
argsBefore: [],
blendfile: settings.blendfile,
args: [
"--python-exit-code", 1,
"--python-expr", pythonExpression
]
});
task.addCommand(command);
return task;
}