WIP: Initial version of a single-frame job compiler #104189
@ -6,10 +6,9 @@ const JOB_TYPE = {
|
|||||||
// Settings for artists to determine:
|
// Settings for artists to determine:
|
||||||
{ key: "frame", type: "int32", required: true, eval: "C.scene.frame_current",
|
{ key: "frame", type: "int32", required: true, eval: "C.scene.frame_current",
|
||||||
description: "Frame to render"},
|
description: "Frame to render"},
|
||||||
{ key: "tile_size", type: "int32", default: 5, propargs: {min: 1, max: 100}, description: "Tile size for each Task (sizes are in % of the full image)",
|
{ key: "tile_size", type: "int32", default: 5, propargs: {min: 1, max: 100}, description: "Tile size for each Task (sizes are in % of each dimension)",
|
||||||
visible: "submission" },
|
visible: "submission" },
|
||||||
|
|
||||||
|
|
||||||
// render_output_root + add_path_components determine the value of render_output_path.
|
// render_output_root + add_path_components determine the value of render_output_path.
|
||||||
{ key: "render_output_root", type: "string", subtype: "dir_path", required: true, visible: "submission",
|
{ key: "render_output_root", type: "string", subtype: "dir_path", required: true, visible: "submission",
|
||||||
description: "Base directory of where render output is stored. Will have some job-specific parts appended to it"},
|
description: "Base directory of where render output is stored. Will have some job-specific parts appended to it"},
|
||||||
@ -27,8 +26,9 @@ const JOB_TYPE = {
|
|||||||
description: "Toggles OpenImageDenoise" },
|
description: "Toggles OpenImageDenoise" },
|
||||||
{ key: "image_file_extension", type: "string", required: true, eval: "C.scene.render.file_extension", visible: "hidden",
|
{ key: "image_file_extension", type: "string", required: true, eval: "C.scene.render.file_extension", visible: "hidden",
|
||||||
description: "File extension used for the final export" },
|
description: "File extension used for the final export" },
|
||||||
{ key: "samples", type: "string", required: true, eval: "f'1-{C.scene.cycles.samples}'", visible: "web",
|
{ key: "resolution_x", type: "int32", required: true, eval: "C.scene.render.resolution_x", visible: "hidden"},
|
||||||
description: "Total number of samples in the job" },
|
{ key: "resolution_y", type: "int32", required: true, eval: "C.scene.render.resolution_y", visible: "hidden"},
|
||||||
|
{ key: "resolution_percentage", type: "int32", required: true, eval: "C.scene.render.resolution_percentage", visible: "hidden"}
|
||||||
]
|
]
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -90,8 +90,8 @@ function tileChunker(tile_size) {
|
|||||||
let tiles = [];
|
let tiles = [];
|
||||||
const rows = Math.floor(100 / tile_size);
|
const rows = Math.floor(100 / tile_size);
|
||||||
const columns = Math.floor(100 / tile_size);
|
const columns = Math.floor(100 / tile_size);
|
||||||
for (let row = 1; row <= rows; row++) {
|
for (let row = 0; row < rows; row++) {
|
||||||
for (let column = 1; column <= columns; column++) {
|
for (let column = 0; column < columns; column++) {
|
||||||
tiles.push({"row": row, "column": column});
|
tiles.push({"row": row, "column": column});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -157,8 +157,16 @@ if len(filenames) <= 1:
|
|||||||
pathlib.Path(renders + filenames[0]).rename(basepath + 'MERGED.exr')
|
pathlib.Path(renders + filenames[0]).rename(basepath + 'MERGED.exr')
|
||||||
exit()
|
exit()
|
||||||
|
|
||||||
image_nodes = []
|
bpy.context.scene.render.resolution_x = ${settings.resolution_x}
|
||||||
|
bpy.context.scene.render.resolution_y = ${settings.resolution_y}
|
||||||
|
bpy.context.scene.render.resolution_percentage = ${settings.resolution_percentage}
|
||||||
|
bpy.context.scene.render.use_compositing = True
|
||||||
|
bpy.context.scene.use_nodes = True
|
||||||
|
bpy.context.scene.view_layers[0].use = False
|
||||||
|
bpy.context.scene.render.image_settings.file_format = 'OPEN_EXR_MULTILAYER'
|
||||||
|
bpy.context.scene.render.filepath = basepath + "MERGED"
|
||||||
|
|
||||||
|
image_nodes = []
|
||||||
for index, image in enumerate(filenames):
|
for index, image in enumerate(filenames):
|
||||||
bpy.ops.image.open(filepath=renders + image, use_sequence_detection=False)
|
bpy.ops.image.open(filepath=renders + image, use_sequence_detection=False)
|
||||||
image = bpy.data.images[bpy.path.basename(image)]
|
image = bpy.data.images[bpy.path.basename(image)]
|
||||||
@ -166,7 +174,6 @@ for index, image in enumerate(filenames):
|
|||||||
image_nodes[index].image = image
|
image_nodes[index].image = image
|
||||||
|
|
||||||
alpha_over_nodes = []
|
alpha_over_nodes = []
|
||||||
|
|
||||||
for index, node in enumerate(image_nodes):
|
for index, node in enumerate(image_nodes):
|
||||||
if index == 0:
|
if index == 0:
|
||||||
# Take the first two image nodes and combine them
|
# Take the first two image nodes and combine them
|
||||||
@ -182,7 +189,7 @@ for index, node in enumerate(image_nodes):
|
|||||||
# Link the last image node and feed the output into the composite node
|
# Link the last image node and feed the output into the composite node
|
||||||
bpy.context.scene.node_tree.links.new(alpha_over_nodes[index].outputs['Image'], bpy.context.scene.node_tree.nodes['Composite'].inputs['Image'])
|
bpy.context.scene.node_tree.links.new(alpha_over_nodes[index].outputs['Image'], bpy.context.scene.node_tree.nodes['Composite'].inputs['Image'])
|
||||||
break
|
break
|
||||||
`;
|
bpy.ops.render.render(write_still=True)`;
|
||||||
const command = author.Command("blender-render", {
|
const command = author.Command("blender-render", {
|
||||||
exe: "{blender}",
|
exe: "{blender}",
|
||||||
exeArgs: "{blenderArgs}",
|
exeArgs: "{blenderArgs}",
|
||||||
|
Loading…
Reference in New Issue
Block a user