WIP: Initial version of a single-frame job compiler #104189
@ -161,8 +161,10 @@ bpy.context.scene.use_nodes = True
|
||||
bpy.context.scene.node_tree.nodes.clear()
|
||||
bpy.context.scene.render.image_settings.file_format = 'OPEN_EXR_MULTILAYER'
|
||||
bpy.context.scene.render.filepath = basepath + "MERGED"
|
||||
denoising = "${settings.use_denoising}" == "true"
|
||||
|
||||
def create_translate(index, dimensions):
|
||||
# Takes the column and row number and creates a translate node with the right coordinates to align a tile
|
||||
def create_translate(dimensions):
|
||||
translate_node = bpy.context.scene.node_tree.nodes.new(type='CompositorNodeTranslate')
|
||||
translate_node.use_relative = True
|
||||
for dimension in dimensions:
|
||||
@ -174,53 +176,64 @@ def create_translate(index, dimensions):
|
||||
translate_node.inputs[dimension].default_value = normalize((${settings.tile_size} / 100 + (dimensions[dimension] - 1) * ${settings.tile_size} / 100) + (${settings.tile_size} / 100) / 2)
|
||||
return translate_node
|
||||
|
||||
image_node_translates = []
|
||||
def align_tiles(input):
|
||||
translated_list = []
|
||||
for index, image_node in enumerate(image_nodes):
|
||||
file_name = image_node.image.name_full
|
||||
dimensions = {'X': int(file_name.split('c')[-1].split('_')[0]), 'Y': int(file_name.split('c')[0].replace('r', ''))}
|
||||
translated_list.append(create_translate(dimensions))
|
||||
bpy.context.scene.node_tree.links.new(image_node.outputs[input], translated_list[index].inputs['Image'])
|
||||
return translated_list
|
||||
|
||||
# Create a list of image nodes
|
||||
image_nodes = []
|
||||
for index, image in enumerate(filenames):
|
||||
dimensions = {'X': int(image.split('c')[-1].split('_')[0]), 'Y': int(image.split('c')[0].replace('r', ''))}
|
||||
bpy.ops.image.open(filepath=renders + image, use_sequence_detection=False)
|
||||
image = bpy.data.images[bpy.path.basename(image)]
|
||||
image_node = bpy.context.scene.node_tree.nodes.new(type='CompositorNodeImage')
|
||||
image_node_translates.append(create_translate(index, dimensions))
|
||||
image_node.image = image
|
||||
bpy.context.scene.node_tree.links.new(image_node.outputs['Combined'], image_node_translates[index].inputs['Image'])
|
||||
image_nodes.append(bpy.context.scene.node_tree.nodes.new(type='CompositorNodeImage'))
|
||||
image_nodes[index].image = image
|
||||
|
||||
# Create translates for Combined, Albedo and Normal and put them in a list
|
||||
combined_translates = align_tiles("Combined")
|
||||
if denoising:
|
||||
albedo_translates = align_tiles("Denoising Albedo")
|
||||
normal_translates = align_tiles("Denoising Normal")
|
||||
|
||||
output_node = bpy.context.scene.node_tree.nodes.new(type='CompositorNodeComposite')
|
||||
denoising = "${settings.use_denoising}" == "true"
|
||||
alpha_over_nodes = []
|
||||
albedo_mix_nodes = []
|
||||
normal_mix_nodes = []
|
||||
|
||||
for index, node in enumerate(image_node_translates):
|
||||
for index, node in enumerate(combined_translates):
|
||||
if index == 0:
|
||||
# Take the first two image nodes and combine them
|
||||
alpha_over_nodes.append(bpy.context.scene.node_tree.nodes.new(type='CompositorNodeAlphaOver'))
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[0].outputs['Image'], alpha_over_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[1].outputs['Image'], alpha_over_nodes[index].inputs[2])
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[0].outputs['Image'], alpha_over_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[1].outputs['Image'], alpha_over_nodes[index].inputs[2])
|
||||
if denoising:
|
||||
albedo_mix_nodes.append(bpy.context.scene.node_tree.nodes.new(type='CompositorNodeMixRGB'))
|
||||
albedo_mix_nodes[index].blend_type = 'ADD'
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[0].outputs['Denoising Albedo'], albedo_mix_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[1].outputs['Denoising Albedo'], albedo_mix_nodes[index].inputs[2])
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[0].outputs['Denoising Albedo'], albedo_mix_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[1].outputs['Denoising Albedo'], albedo_mix_nodes[index].inputs[2])
|
||||
normal_mix_nodes.append(bpy.context.scene.node_tree.nodes.new(type='CompositorNodeMixRGB'))
|
||||
normal_mix_nodes[index].blend_type = 'ADD'
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[0].outputs['Denoising Normal'], normal_mix_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[1].outputs['Denoising Normal'], normal_mix_nodes[index].inputs[2])
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[0].outputs['Denoising Normal'], normal_mix_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[1].outputs['Denoising Normal'], normal_mix_nodes[index].inputs[2])
|
||||
else:
|
||||
# Take one image node and the previous alpha over node
|
||||
alpha_over_nodes.append(bpy.context.scene.node_tree.nodes.new(type='CompositorNodeAlphaOver'))
|
||||
bpy.context.scene.node_tree.links.new(alpha_over_nodes[index-1].outputs['Image'], alpha_over_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[index+1].outputs['Image'], alpha_over_nodes[index].inputs[2])
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[index+1].outputs['Image'], alpha_over_nodes[index].inputs[2])
|
||||
if denoising:
|
||||
albedo_mix_nodes.append(bpy.context.scene.node_tree.nodes.new(type='CompositorNodeMixRGB'))
|
||||
albedo_mix_nodes[index].blend_type = 'ADD'
|
||||
bpy.context.scene.node_tree.links.new(albedo_mix_nodes[index-1].outputs['Image'], albedo_mix_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[index+1].outputs['Denoising Albedo'], albedo_mix_nodes[index].inputs[2])
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[index+1].outputs['Denoising Albedo'], albedo_mix_nodes[index].inputs[2])
|
||||
normal_mix_nodes.append(bpy.context.scene.node_tree.nodes.new(type='CompositorNodeMixRGB'))
|
||||
normal_mix_nodes[index].blend_type = 'ADD'
|
||||
bpy.context.scene.node_tree.links.new(normal_mix_nodes[index-1].outputs['Image'], normal_mix_nodes[index].inputs[1])
|
||||
bpy.context.scene.node_tree.links.new(image_node_translates[index+1].outputs['Denoising Normal'], normal_mix_nodes[index].inputs[2])
|
||||
if index + 1 == len(image_node_translates) - 1:
|
||||
bpy.context.scene.node_tree.links.new(combined_translates[index+1].outputs['Denoising Normal'], normal_mix_nodes[index].inputs[2])
|
||||
if index + 1 == len(combined_translates) - 1:
|
||||
if denoising:
|
||||
denoise_node = bpy.context.scene.node_tree.nodes.new(type='CompositorNodeDenoise')
|
||||
bpy.context.scene.node_tree.links.new(alpha_over_nodes[index].outputs['Image'], denoise_node.inputs['Image'])
|
||||
|
Loading…
Reference in New Issue
Block a user