Asset Pipeline v2 #145

Closed
Nick Alberelli wants to merge 431 commits from (deleted):feature/asset-pipeline-v2 into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
2 changed files with 73 additions and 6 deletions
Showing only changes of commit dcee6fcba6 - Show all commits

View File

@ -1,6 +1,7 @@
import bpy import bpy
from . import transfer_functions from . import transfer_functions
from pathlib import Path
def update_transfer_data_ownership(transfer_data_item, target_obj: bpy.types.Object): def update_transfer_data_ownership(transfer_data_item, target_obj: bpy.types.Object):
@ -106,3 +107,57 @@ def pull_task_layer(
# TODO Move transferrable data onto obj owned by others # TODO Move transferrable data onto obj owned by others
apply_transfer_data(context, transfer_data, target_col, source_tl) apply_transfer_data(context, transfer_data, target_col, source_tl)
def find_published_file_version(file):
return int(file.name.split(".")[1].replace("v", ""))
def find_published_file(current_file: Path):
publish_dir = current_file.parent.joinpath("publish")
if not publish_dir.exists():
return
published_files = list(current_file.parent.joinpath("publish").glob('*.blend'))
published_files.sort(key=find_published_file_version)
return published_files[-1]
def import_data_from_lib(
libpath: Path,
data_category: str,
data_name: str,
link: bool = False,
):
noun = "Appended"
if link:
noun = "Linked"
with bpy.data.libraries.load(libpath.as_posix(), relative=True, link=link) as (
data_from,
data_to,
):
if data_name not in eval(f"data_from.{data_category}"):
print(
f"Failed to import {data_category} {data_name} from {libpath.as_posix()}. Doesn't exist in file.",
)
# Check if datablock with same name already exists in blend file.
try:
eval(f"bpy.data.{data_category}['{data_name}']")
except KeyError:
pass
else:
print(
f"{data_name} already in bpy.data.{data_category} of this blendfile.",
)
# Append data block.
eval(f"data_to.{data_category}.append('{data_name}')")
print(f"{noun}:{data_name} from library: {libpath.as_posix()}")
if link:
return eval(
f"bpy.data.{data_category}['{data_name}', '{bpy.path.relpath(libpath.as_posix())}']"
)
return eval(f"bpy.data.{data_category}['{data_name}']")

View File

@ -1,6 +1,8 @@
import bpy import bpy
from . import core from . import core
from pathlib import Path
from . import asset_suffix
class ASSETPIPE_OT_update_ownership(bpy.types.Operator): class ASSETPIPE_OT_update_ownership(bpy.types.Operator):
@ -48,15 +50,26 @@ class ASSETPIPE_OT_pull_test(bpy.types.Operator):
bl_label = 'Pull from Publish' bl_label = 'Pull from Publish'
def execute(self, context): def execute(self, context):
task_col = bpy.data.collections["CH-chr_test"] # TODO replace hard coded value
asset_suffix.add_suffix_to_hierarchy(task_col, "TASK")
current_file = Path(bpy.data.filepath)
pub_file = core.find_published_file(current_file)
col_name = "CH-chr_test" # TODO replace hard coded value
core.import_data_from_lib(pub_file, "collections", col_name)
appended_col = bpy.data.collections["CH-chr_test"] # TODO find appended data
asset_suffix.add_suffix_to_hierarchy(appended_col, "PUBLISH")
task_layer_col = bpy.data.collections["CH-chr_test.TASK"]
publish_col = bpy.data.collections["CH-chr_test.PUBLISH"]
# TODO fix pull function to work with multiple files
return {'FINISHED'}
# Find current task Layer # Find current task Layer
task_layer_col = context.collection task_layer_col = context.collection
current_task_layer = task_layer_col.name.split('.')[-1] current_task_layer = task_layer_col.name.split('.')[-1]
# Find PUBLUSH Collection
for col in context.scene.collection.children_recursive:
if "PUB" in col.name:
publish_col = col
core.pull_task_layer( core.pull_task_layer(
context, context,
source_col=publish_col, source_col=publish_col,
@ -64,7 +77,6 @@ class ASSETPIPE_OT_pull_test(bpy.types.Operator):
source_tl="PUB", source_tl="PUB",
target_tl=current_task_layer, target_tl=current_task_layer,
) )
return {'FINISHED'}
classes = ( classes = (