rename/reshuffle modules

This commit is contained in:
2014-11-05 14:48:10 +01:00
parent 4ebe917ee1
commit 8b1085edd1
6 changed files with 27 additions and 25 deletions

785
modules/blendfile.py Normal file
View File

@@ -0,0 +1,785 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
#
# (c) 2009, At Mind B.V. - Jeroen Bakker
# (c) 2014, Blender Foundation - Campbell Barton
import os
import struct
import logging
import gzip
import tempfile
log = logging.getLogger("blendfile")
FILE_BUFFER_SIZE = 1024 * 1024
# -----------------------------------------------------------------------------
# module global routines
#
# read routines
# open a filename
# determine if the file is compressed
# and returns a handle
def open_blend(filename, access="rb"):
"""Opens a blend file for reading or writing pending on the access
supports 2 kind of blend files. Uncompressed and compressed.
Known issue: does not support packaged blend files
"""
handle = open(filename, access)
magic = handle.read(7)
if magic == b"BLENDER":
log.debug("normal blendfile detected")
handle.seek(0, os.SEEK_SET)
bfile = BlendFile(handle)
bfile.is_compressed = False
bfile.filepath_orig = filename
return bfile
else:
log.debug("gzip blendfile detected?")
handle.close()
log.debug("decompressing started")
fs = gzip.open(filename, "rb")
handle = tempfile.TemporaryFile()
data = fs.read(FILE_BUFFER_SIZE)
while data:
handle.write(data)
data = fs.read(FILE_BUFFER_SIZE)
log.debug("decompressing finished")
fs.close()
log.debug("resetting decompressed file")
handle.seek(os.SEEK_SET, 0)
bfile = BlendFile(handle)
bfile.is_compressed = True
bfile.filepath_orig = filename
return bfile
def align(offset, by):
n = by - 1
return (offset + n) & ~n
# -----------------------------------------------------------------------------
# module classes
class BlendFile:
"""
Blend file.
"""
__slots__ = (
# file (result of open())
"handle",
# str (original name of the file path)
"filepath_orig",
# BlendFileHeader
"header",
# struct.Struct
"block_header_struct",
# BlendFileBlock
"blocks",
# [DNAStruct, ...]
"structs",
# dict {b'StructName': sdna_index}
# (where the index is an index into 'structs')
"sdna_index_from_id",
# dict {addr_old: block}
"block_from_offset",
# int
"code_index",
# bool (did we make a change)
"is_modified",
# bool (is file gzipped)
"is_compressed",
)
def __init__(self, handle):
log.debug("initializing reading blend-file")
self.handle = handle
self.header = BlendFileHeader(handle)
self.block_header_struct = self.header.create_block_header_struct()
self.blocks = []
self.code_index = {}
block = BlendFileBlock(handle, self)
while block.code != b'ENDB':
if block.code == b'DNA1':
(self.structs,
self.sdna_index_from_id,
) = BlendFile.decode_structs(self.header, block, handle)
else:
handle.seek(block.size, os.SEEK_CUR)
self.blocks.append(block)
self.code_index.setdefault(block.code, []).append(block)
block = BlendFileBlock(handle, self)
self.is_modified = False
self.blocks.append(block)
# cache (could lazy init, incase we never use?)
self.block_from_offset = {block.addr_old: block for block in self.blocks if block.code != b'ENDB'}
def find_blocks_from_code(self, code):
assert(type(code) == bytes)
if code not in self.code_index:
return []
return self.code_index[code]
def find_block_from_offset(self, offset):
# same as looking looping over all blocks,
# then checking ``block.addr_old == offset``
assert(type(offset) is int)
return self.block_from_offset.get(offset)
def close(self):
"""
Close the blend file
writes the blend file to disk if changes has happened
"""
if not self.is_modified:
self.handle.close()
else:
handle = self.handle
if self.is_compressed:
log.debug("close compressed blend file")
handle.seek(os.SEEK_SET, 0)
log.debug("compressing started")
fs = gzip.open(self.filepath_orig, "wb")
data = handle.read(FILE_BUFFER_SIZE)
while data:
fs.write(data)
data = handle.read(FILE_BUFFER_SIZE)
fs.close()
log.debug("compressing finished")
handle.close()
def ensure_subtype_smaller(self, sdna_index_curr, sdna_index_next):
# never refine to a smaller type
if (self.structs[sdna_index_curr].size >
self.structs[sdna_index_next].size):
raise RuntimeError("cant refine to smaller type (%s -> %s)" %
(self.file.structs[sdna_index_curr].dna_type_id.decode('ascii'),
self.file.structs[sdna_index_next].dna_type_id.decode('ascii')))
@staticmethod
def decode_structs(header, block, handle):
"""
DNACatalog is a catalog of all information in the DNA1 file-block
"""
log.debug("building DNA catalog")
shortstruct = DNA_IO.USHORT[header.endian_index]
shortstruct2 = struct.Struct(header.endian_str + b'HH')
intstruct = DNA_IO.UINT[header.endian_index]
data = handle.read(block.size)
types = []
names = []
structs = []
sdna_index_from_id = {}
offset = 8
names_len = intstruct.unpack_from(data, offset)[0]
offset += 4
log.debug("building #%d names" % names_len)
for i in range(names_len):
tName = DNA_IO.read_data0(data, offset)
offset = offset + len(tName) + 1
names.append(DNAName(tName))
del names_len
offset = align(offset, 4)
offset += 4
types_len = intstruct.unpack_from(data, offset)[0]
offset += 4
log.debug("building #%d types" % types_len)
for i in range(types_len):
dna_type_id = DNA_IO.read_data0(data, offset)
# None will be replaced by the DNAStruct, below
types.append(DNAStruct(dna_type_id))
offset += len(dna_type_id) + 1
offset = align(offset, 4)
offset += 4
log.debug("building #%d type-lengths" % types_len)
for i in range(types_len):
tLen = shortstruct.unpack_from(data, offset)[0]
offset = offset + 2
types[i].size = tLen
del types_len
offset = align(offset, 4)
offset += 4
structs_len = intstruct.unpack_from(data, offset)[0]
offset += 4
log.debug("building #%d structures" % structs_len)
for sdna_index in range(structs_len):
d = shortstruct2.unpack_from(data, offset)
struct_type_index = d[0]
offset += 4
dna_struct = types[struct_type_index]
sdna_index_from_id[dna_struct.dna_type_id] = sdna_index
structs.append(dna_struct)
fields_len = d[1]
dna_offset = 0
for field_index in range(fields_len):
d2 = shortstruct2.unpack_from(data, offset)
field_type_index = d2[0]
field_name_index = d2[1]
offset += 4
dna_type = types[field_type_index]
dna_name = names[field_name_index]
if dna_name.is_pointer or dna_name.is_method_pointer:
dna_size = header.pointer_size * dna_name.array_size
else:
dna_size = dna_type.size * dna_name.array_size
field = DNAField(dna_type, dna_name, dna_size, dna_offset)
dna_struct.fields.append(field)
dna_struct.field_from_name[dna_name.name_only] = field
dna_offset += dna_size
return structs, sdna_index_from_id
class BlendFileBlock:
"""
Instance of a struct.
"""
__slots__ = (
# BlendFile
"file",
"code",
"size",
"addr_old",
"sdna_index",
"count",
"file_offset",
)
def __str__(self):
return ("<%s.%s (%s), size=%d at %s>" %
# fields=[%s]
(self.__class__.__name__,
self.dna_type.dna_type_id.decode('ascii'),
self.code.decode(),
self.size,
# b", ".join(f.dna_name.name_only for f in self.dna_type.fields).decode('ascii'),
hex(self.addr_old),
))
def __init__(self, handle, bfile):
OLDBLOCK = struct.Struct(b'4sI')
self.file = bfile
data = handle.read(bfile.block_header_struct.size)
# header size can be 8, 20, or 24 bytes long
# 8: old blend files ENDB block (exception)
# 20: normal headers 32 bit platform
# 24: normal headers 64 bit platform
if len(data) > 15:
blockheader = bfile.block_header_struct.unpack(data)
self.code = blockheader[0].partition(b'\0')[0]
if self.code != b'ENDB':
self.size = blockheader[1]
self.addr_old = blockheader[2]
self.sdna_index = blockheader[3]
self.count = blockheader[4]
self.file_offset = handle.tell()
else:
self.size = 0
self.addr_old = 0
self.sdna_index = 0
self.count = 0
self.file_offset = 0
else:
blockheader = OLDBLOCK.unpack(data)
self.code = blockheader[0].partition(b'\0')[0]
self.code = DNA_IO.read_data0(blockheader[0], 0)
self.size = 0
self.addr_old = 0
self.sdna_index = 0
self.count = 0
self.file_offset = 0
@property
def dna_type(self):
return self.file.structs[self.sdna_index]
def refine_type_from_index(self, sdna_index_next):
assert(type(sdna_index_next) is int)
sdna_index_curr = self.sdna_index
self.file.ensure_subtype_smaller(sdna_index_curr, sdna_index_next)
self.sdna_index = sdna_index_next
def refine_type(self, dna_type_id):
assert(type(dna_type_id) is bytes)
self.refine_type_from_index(self.file.sdna_index_from_id[dna_type_id])
def get(self, path,
default=...,
sdna_index_refine=None,
use_nil=True, use_str=True,
):
if sdna_index_refine is None:
sdna_index_refine = self.sdna_index
else:
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
dna_struct = self.file.structs[sdna_index_refine]
self.file.handle.seek(self.file_offset, os.SEEK_SET)
return dna_struct.field_get(
self.file.header, self.file.handle, path,
default=default,
use_nil=use_nil, use_str=use_str,
)
def set(self, path, value,
sdna_index_refine=None,
):
if sdna_index_refine is None:
sdna_index_refine = self.sdna_index
else:
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
dna_struct = self.file.structs[sdna_index_refine]
self.file.handle.seek(self.file_offset, os.SEEK_SET)
self.file.is_modified = True
return dna_struct.field_set(
self.file.header, self.file.handle, path, value)
# ---------------
# Utility get/set
#
# avoid inline pointer casting
def get_pointer(self, path, sdna_index_refine=None):
if sdna_index_refine is None:
sdna_index_refine = self.sdna_index
result = self.get(path, sdna_index_refine=sdna_index_refine)
assert(self.file.structs[sdna_index_refine].field_from_path(self.file.header, self.file.handle, path).dna_name.is_pointer)
if result != 0:
# possible (but unlikely)
# that this fails and returns None
# maybe we want to raise some exception in this case
return self.file.find_block_from_offset(result)
else:
return None
# ----------------------
# Python convenience API
# dict like access
def __getitem__(self, item):
return self.get(item, use_str=False)
def __setitem__(self, item, value):
self.set(item, value)
def keys(self):
return (f.dna_name.name_only for f in self.dna_type.fields)
def values(self):
return (self[k] for k in self.keys())
def items(self):
return ((k, self[k]) for k in self.keys())
# -----------------------------------------------------------------------------
# Read Magic
#
# magic = str
# pointer_size = int
# is_little_endian = bool
# version = int
class BlendFileHeader:
"""
BlendFileHeader allocates the first 12 bytes of a blend file
it contains information about the hardware architecture
"""
__slots__ = (
# str
"magic",
# int 4/8
"pointer_size",
# bool
"is_little_endian",
# int
"version",
# str, used to pass to 'struct'
"endian_str",
# int, used to index common types
"endian_index",
)
def __init__(self, handle):
FILEHEADER = struct.Struct(b'7s1s1s3s')
log.debug("reading blend-file-header")
values = FILEHEADER.unpack(handle.read(FILEHEADER.size))
self.magic = values[0]
pointer_size_id = values[1]
if pointer_size_id == b'-':
self.pointer_size = 8
elif pointer_size_id == b'_':
self.pointer_size = 4
else:
assert(0)
endian_id = values[2]
if endian_id == b'v':
self.is_little_endian = True
self.endian_str = b'<'
self.endian_index = 0
elif endian_id == b'V':
self.is_little_endian = False
self.endian_index = 1
self.endian_str = b'>'
else:
assert(0)
version_id = values[3]
self.version = int(version_id)
def create_block_header_struct(self):
return struct.Struct(b''.join((
self.endian_str,
b'4sI',
b'I' if self.pointer_size == 4 else b'Q',
b'II',
)))
class DNAName:
"""
DNAName is a C-type name stored in the DNA
"""
__slots__ = (
"name_full",
"name_only",
"is_pointer",
"is_method_pointer",
"array_size",
)
def __init__(self, name_full):
self.name_full = name_full
self.name_only = self.calc_name_only()
self.is_pointer = self.calc_is_pointer()
self.is_method_pointer = self.calc_is_method_pointer()
self.array_size = self.calc_array_size()
def as_reference(self, parent):
if parent is None:
result = b''
else:
result = parent + b'.'
result = result + self.name_only
return result
def calc_name_only(self):
result = self.name_full.strip(b'*()')
index = result.find(b'[')
if index != -1:
result = result[:index]
return result
def calc_is_pointer(self):
return (b'*' in self.name_full)
def calc_is_method_pointer(self):
return (b'(*' in self.name_full)
def calc_array_size(self):
result = 1
temp = self.name_full
index = temp.find(b'[')
while index != -1:
index_2 = temp.find(b']')
result *= int(temp[index + 1:index_2])
temp = temp[index_2 + 1:]
index = temp.find(b'[')
return result
class DNAField:
"""
DNAField is a coupled DNAStruct and DNAName
and cache offset for reuse
"""
__slots__ = (
# DNAName
"dna_name",
# tuple of 3 items
# [bytes (struct name), int (struct size), DNAStruct]
"dna_type",
# size on-disk
"dna_size",
# cached info (avoid looping over fields each time)
"dna_offset",
)
def __init__(self, dna_type, dna_name, dna_size, dna_offset):
self.dna_type = dna_type
self.dna_name = dna_name
self.dna_size = dna_size
self.dna_offset = dna_offset
class DNAStruct:
"""
DNAStruct is a C-type structure stored in the DNA
"""
__slots__ = (
"dna_type_id",
"size",
"fields",
"field_from_name",
)
def __init__(self, dna_type_id):
self.dna_type_id = dna_type_id
self.fields = []
self.field_from_name = {}
def field_from_path(self, header, handle, path):
assert(type(path) == bytes)
# support 'id.name'
name, _, name_tail = path.partition(b'.')
# support 'mtex[1].tex'
# note, multi-dimensional arrays not supported
# FIXME: 'mtex[1]' works, but not 'mtex[1].tex', why is this???
if name.endswith(b']'):
name, _, index = name[:-1].partition(b'[')
index = int(index)
else:
index = 0
field = self.field_from_name.get(name)
if field is not None:
handle.seek(field.dna_offset, os.SEEK_CUR)
if index != 0:
if field.dna_name.is_pointer:
index_offset = header.pointer_size * index
else:
index_offset = field.dna_type.size * index
assert(index_offset < field.dna_size)
handle.seek(index_offset, os.SEEK_CUR)
if name_tail == b'':
return field
else:
return field.dna_type.field_from_path(header, handle, name_tail)
def field_get(self, header, handle, path,
default=...,
use_nil=True, use_str=True,
):
assert(type(path) == bytes)
field = self.field_from_path(header, handle, path)
if field is None:
if default is not ...:
return default
else:
raise KeyError("%r not found in %r (%r)" % (path, [f.dna_name.name_only for f in self.fields], self.dna_type_id))
dna_type = field.dna_type
dna_name = field.dna_name
if dna_name.is_pointer:
return DNA_IO.read_pointer(handle, header)
elif dna_type.dna_type_id == b'int':
return DNA_IO.read_int(handle, header)
elif dna_type.dna_type_id == b'short':
return DNA_IO.read_short(handle, header)
elif dna_type.dna_type_id == b'float':
return DNA_IO.read_float(handle, header)
elif dna_type.dna_type_id == b'char':
if use_str:
if use_nil:
return DNA_IO.read_string0(handle, dna_name.array_size)
else:
return DNA_IO.read_string(handle, dna_name.array_size)
else:
if use_nil:
return DNA_IO.read_bytes0(handle, dna_name.array_size)
else:
return DNA_IO.read_bytes(handle, dna_name.array_size)
else:
raise NotImplementedError("%r exists but isn't pointer, can't resolve field %r" % (path, dna_name.name))
def field_set(self, header, handle, path, value):
assert(type(path) == bytes)
field = self.field_from_path(header, handle, path)
if field is None:
raise KeyError("%r not found in %r" % (path, [f.dna_name.name_only for f in self.fields]))
dna_type = field.dna_type
dna_name = field.dna_name
if dna_type.dna_type_id == b'char':
if type(value) is str:
return DNA_IO.write_string(handle, value, dna_name.array_size)
else:
return DNA_IO.write_bytes(handle, value, dna_name.array_size)
else:
raise NotImplementedError("Setting %r is not yet supported" % dna_type[0])
class DNA_IO:
"""
Module like class, for read-write utility functions.
Only stores static methods & constants.
"""
__slots__ = ()
# Methods for read/write,
# these are only here to avoid clogging global-namespace
@staticmethod
def write_string(handle, astring, fieldlen):
assert(isinstance(astring, str))
stringw = ""
if len(astring) >= fieldlen:
stringw = astring[0:fieldlen]
else:
stringw = astring + '\0'
handle.write(stringw.encode('utf-8'))
@staticmethod
def write_bytes(handle, astring, fieldlen):
assert(isinstance(astring, (bytes, bytearray)))
stringw = b''
if len(astring) >= fieldlen:
stringw = astring[0:fieldlen]
else:
stringw = astring + b'\0'
handle.write(stringw)
_STRING = [struct.Struct("%ds" % i) for i in range(0, 2048)]
@staticmethod
def _string_struct(length):
if length < len(DNA_IO._STRING):
st = DNA_IO._STRING[length]
else:
st = struct.Struct("%ds" % length)
return st
@staticmethod
def read_bytes(handle, length):
st = DNA_IO._string_struct(length)
data = st.unpack(handle.read(st.size))[0]
return data
@staticmethod
def read_bytes0(handle, length):
st = DNA_IO._string_struct(length)
data = st.unpack(handle.read(st.size))[0]
return DNA_IO.read_data0(data, 0)
@staticmethod
def read_string(handle, length):
return DNA_IO.read_bytes(handle, length).decode('utf-8')
@staticmethod
def read_string0(handle, length):
return DNA_IO.read_bytes0(handle, length).decode('utf-8')
@staticmethod
def read_data0(data, offset):
"""
Reads a zero terminating String from a file handle
"""
add = data.find(b'\0', offset) - offset
st = DNA_IO._string_struct(add)
return st.unpack_from(data, offset)[0]
USHORT = struct.Struct(b'<H'), struct.Struct(b'>H')
@staticmethod
def read_ushort(handle, fileheader):
st = DNA_IO.USHORT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
UINT = struct.Struct(b'<I'), struct.Struct(b'>I')
@staticmethod
def read_uint(handle, fileheader):
st = DNA_IO.UINT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
SINT = struct.Struct(b'<i'), struct.Struct(b'>i')
@staticmethod
def read_int(handle, fileheader):
st = DNA_IO.SINT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
@staticmethod
def read_float(handle, fileheader):
return struct.unpack(fileheader.endian_str + b'f', handle.read(4))[0]
SSHORT = struct.Struct(b'<h'), struct.Struct(b'>h')
@staticmethod
def read_short(handle, fileheader):
st = DNA_IO.SSHORT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
ULONG = struct.Struct(b'<Q'), struct.Struct(b'>Q')
@staticmethod
def read_ulong(handle, fileheader):
st = DNA_IO.ULONG[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
@staticmethod
def read_pointer(handle, header):
"""
reads an pointer from a file handle
the pointer size is given by the header (BlendFileHeader)
"""
if header.pointer_size == 4:
st = DNA_IO.UINT[header.endian_index]
return st.unpack(handle.read(st.size))[0]
if header.pointer_size == 8:
st = DNA_IO.ULONG[header.endian_index]
return st.unpack(handle.read(st.size))[0]

337
modules/blendfile_pack.py Executable file
View File

@@ -0,0 +1,337 @@
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
import blendfile_path_walker
TIMEIT = True
# ------------------
# Ensure module path
import os
import sys
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "modules"))
if path not in sys.path:
sys.path.append(path)
del os, sys, path
# --------
def pack(blendfile_src, blendfile_dst, mode='FILE',
deps_remap=None, paths_remap=None, paths_uuid=None,
# yield reports
report=None):
"""
:param deps_remap: Store path deps_remap info as follows.
{"file.blend": {"path_new": "path_old", ...}, ...}
:type deps_remap: dict or None
"""
# Internal details:
# - we copy to a temp path before operating on the blend file
# so we can modify in-place.
# - temp files are only created once, (if we never touched them before),
# this way, for linked libraries - a single blend file may be used
# multiple times, each access will apply new edits ontop of the old ones.
# - we track which libs we have touched (using 'lib_visit' arg),
# this means that the same libs wont be touched many times to modify the same data
# also prevents cyclic loops from crashing.
import os
import shutil
from bam_utils.system import colorize
path_temp_files = set()
path_copy_files = set()
SUBDIR = b'data'
TEMP_SUFFIX = b'@'
# TODO, make configurable
WRITE_JSON_REMAP = True
if report is None:
report = lambda msg: msg
yield report("%s: %r...\n" % (colorize("\nscanning deps", color='bright_green'), blendfile_src))
if TIMEIT:
import time
t = time.time()
def temp_remap_cb(filepath, level):
"""
Create temp files in the destination path.
"""
filepath = blendfile_path_walker.utils.compatpath(filepath)
if level == 0:
filepath_tmp = os.path.join(base_dir_dst, os.path.basename(filepath)) + TEMP_SUFFIX
else:
filepath_tmp = os.path.join(base_dir_dst, SUBDIR, os.path.basename(filepath)) + TEMP_SUFFIX
filepath_tmp = os.path.normpath(filepath_tmp)
# only overwrite once (so we can write into a path already containing files)
if filepath_tmp not in path_temp_files:
shutil.copy(filepath, filepath_tmp)
path_temp_files.add(filepath_tmp)
return filepath_tmp
# base_dir_src = os.path.dirname(blendfile_src)
base_dir_dst = os.path.dirname(blendfile_dst)
base_dir_dst_subdir = os.path.join(base_dir_dst, SUBDIR)
if not os.path.exists(base_dir_dst_subdir):
os.makedirs(base_dir_dst_subdir)
lib_visit = {}
fp_blend_basename_last = b''
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
blendfile_src,
readonly=False,
temp_remap_cb=temp_remap_cb,
recursive=True,
lib_visit=lib_visit,
):
if fp_blend_basename_last != fp_blend_basename:
yield report(" %s: %s\n" % (colorize("blend", color='blue'), fp.basedir + fp_blend_basename))
fp_blend_basename_last = fp_blend_basename
# assume the path might be relative
path_src_orig = fp.filepath
path_rel = blendfile_path_walker.utils.compatpath(path_src_orig)
path_base = path_rel.split(os.sep.encode('ascii'))[-1]
path_src = blendfile_path_walker.utils.abspath(path_rel, fp.basedir)
# rename in the blend
path_dst = os.path.join(base_dir_dst_subdir, path_base)
if fp.level == 0:
path_dst_final = b"//" + os.path.join(SUBDIR, path_base)
else:
path_dst_final = b'//' + path_base
fp.filepath = path_dst_final
# add to copy-list
# never copy libs (handled separately)
if not isinstance(fp, blendfile_path_walker.FPElem_block_path) or fp.userdata[0].code != b'LI':
path_copy_files.add((path_src, path_dst))
if deps_remap is not None:
# this needs to become JSON later... ugh, need to use strings
deps_remap.setdefault(
fp_blend_basename.decode('utf-8'),
{})[path_dst_final.decode('utf-8')] = path_src_orig.decode('utf-8')
del lib_visit, fp_blend_basename_last
if TIMEIT:
print(" Time: %.4f\n" % (time.time() - t))
yield report(("%s: %d files\n") %
(colorize("\narchiving", color='bright_green'), len(path_copy_files) + 1))
# handle deps_remap and file renaming
if deps_remap is not None:
blendfile_src_basename = os.path.basename(blendfile_src).decode('utf-8')
blendfile_dst_basename = os.path.basename(blendfile_dst).decode('utf-8')
if blendfile_src_basename != blendfile_dst_basename:
if mode != 'ZIP':
deps_remap[blendfile_dst_basename] = deps_remap[blendfile_src_basename]
del deps_remap[blendfile_src_basename]
del blendfile_src_basename, blendfile_dst_basename
# store path mapping {dst: src}
if paths_remap is not None:
for src, dst in path_copy_files:
# TODO. relative to project-basepath
paths_remap[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = src.decode('utf-8')
# main file XXX, should have better way!
paths_remap[os.path.basename(blendfile_src).decode('utf-8')] = blendfile_src.decode('utf-8')
if paths_uuid is not None:
from bam_utils.system import sha1_from_file
for src, dst in path_copy_files:
paths_uuid[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = sha1_from_file(src)
# XXX, better way to store temp target
blendfile_dst_tmp = temp_remap_cb(blendfile_src, 0)
paths_uuid[os.path.basename(blendfile_src).decode('utf-8')] = sha1_from_file(blendfile_dst_tmp)
del blendfile_dst_tmp
del sha1_from_file
# --------------------
# Handle File Copy/Zip
if mode == 'FILE':
blendfile_dst_tmp = temp_remap_cb(blendfile_src, 0)
shutil.move(blendfile_dst_tmp, blendfile_dst)
path_temp_files.remove(blendfile_dst_tmp)
# strip TEMP_SUFFIX
for fn in path_temp_files:
shutil.copyfile(fn, fn[:-1])
for src, dst in path_copy_files:
assert(b'.blend' not in dst)
if not os.path.exists(src):
yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
else:
yield report(" %s: %r -> %r\n" % (colorize("copying", color='blue'), src, dst))
shutil.copy(src, dst)
yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
elif mode == 'ZIP':
import zipfile
with zipfile.ZipFile(blendfile_dst.decode('utf-8'), 'w', zipfile.ZIP_DEFLATED) as zip:
for fn in path_temp_files:
yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), fn))
zip.write(fn.decode('utf-8'),
arcname=os.path.relpath(fn[:-1], base_dir_dst).decode('utf-8'))
os.remove(fn)
shutil.rmtree(base_dir_dst_subdir)
for src, dst in path_copy_files:
assert(b'.blend' not in dst)
if not os.path.exists(src):
yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
else:
yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), src))
zip.write(src.decode('utf-8'),
arcname=os.path.relpath(dst, base_dir_dst).decode('utf-8'))
if WRITE_JSON_REMAP:
import json
def write_dict_as_json(fn, dct):
zip.writestr(
fn,
json.dumps(dct,
check_circular=False,
# optional (pretty)
sort_keys=True, indent=4, separators=(',', ': '),
).encode('utf-8'))
if deps_remap is not None:
write_dict_as_json(".bam_deps_remap.json", deps_remap)
if paths_remap is not None:
write_dict_as_json(".bam_paths_remap.json", paths_remap)
if paths_uuid is not None:
write_dict_as_json(".bam_paths_uuid.json", paths_uuid)
del write_dict_as_json
yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
else:
raise Exception("%s not a known mode" % mode)
def create_argparse():
import os
import argparse
usage_text = (
"Run this script to extract blend-files(s) to a destination path:" +
os.path.basename(__file__) +
"--input=FILE --output=FILE [options]")
parser = argparse.ArgumentParser(description=usage_text)
# for main_render() only, but validate args.
parser.add_argument(
"-i", "--input", dest="path_src", metavar='FILE', required=True,
help="Input path(s) or a wildcard to glob many files")
parser.add_argument(
"-o", "--output", dest="path_dst", metavar='DIR', required=True,
help="Output file or a directory when multiple inputs are passed")
parser.add_argument(
"-m", "--mode", dest="mode", metavar='MODE', required=False,
choices=('FILE', 'ZIP'), default='FILE',
help="Output file or a directory when multiple inputs are passed")
parser.add_argument(
"-r", "--deps_remap", dest="deps_remap", metavar='FILE',
help="Write out the path mapping to a JSON file")
parser.add_argument(
"-s", "--paths_remap", dest="paths_remap", metavar='FILE',
help="Write out the original paths to a JSON file")
parser.add_argument(
"-u", "--paths_uuid", dest="paths_uuid", metavar='FILE',
help="Write out the original paths UUID to a JSON file")
return parser
def main():
import sys
parser = create_argparse()
args = parser.parse_args(sys.argv[1:])
encoding = sys.getfilesystemencoding()
deps_remap = {} if args.deps_remap else None
paths_remap = {} if args.paths_remap else None
paths_uuid = {} if args.paths_uuid else None
for msg in pack(
args.path_src.encode(encoding),
args.path_dst.encode(encoding),
args.mode,
deps_remap=deps_remap,
paths_remap=paths_remap,
paths_uuid=paths_uuid,
):
print(msg)
def write_dict_as_json(fn, dct):
with open(fn, 'w', encoding='utf-8') as f:
import json
json.dump(
dct, f, ensure_ascii=False,
check_circular=False,
# optional (pretty)
sort_keys=True, indent=4, separators=(',', ': '),
)
if deps_remap is not None:
write_dict_as_json(args.deps_remap, deps_remap)
if paths_remap is not None:
write_dict_as_json(args.paths_remap, paths_remap)
if paths_uuid is not None:
write_dict_as_json(args.paths_uuid, paths_uuid)
del write_dict_as_json
if __name__ == "__main__":
main()

129
modules/blendfile_pack_restore.py Executable file
View File

@@ -0,0 +1,129 @@
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
"""
This script takes Blend-File and remaps their paths to the original locations.
(needed for uploading to the server)
"""
VERBOSE = 1
import blendfile_path_walker
def blendfile_remap(blendfile_src, blendpath_dst, deps_remap):
import os
def temp_remap_cb(filepath, level):
"""
Simply point to the output dir.
"""
basename = os.path.basename(blendfile_src)
filepath_tmp = os.path.join(blendpath_dst, basename)
# ideally we could avoid copying _ALL_ blends
# TODO(cam)
import shutil
shutil.copy(filepath, filepath_tmp)
return filepath_tmp
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
blendfile_src,
readonly=False,
temp_remap_cb=temp_remap_cb,
recursive=False,
):
# path_dst_final - current path in blend.
# path_src_orig - original path from JSON.
path_dst_final = fp.filepath.decode('utf-8')
path_src_orig = deps_remap.get(path_dst_final)
if path_src_orig is not None:
fp.filepath = path_src_orig.encode('utf-8')
if VERBOSE:
print(" Remapping:", path_dst_final, "->", path_src_orig)
def pack_restore(blendfile_dir_src, blendfile_dir_dst, pathmap):
import os
for dirpath, dirnames, filenames in os.walk(blendfile_dir_src):
if dirpath.startswith(b"."):
continue
for filename in filenames:
if os.path.splitext(filename)[1].lower() == b".blend":
remap = pathmap.get(filename.decode('utf-8'))
if remap is not None:
filepath = os.path.join(dirpath, filename)
# main function call
blendfile_remap(filepath, blendfile_dir_dst, remap)
def create_argparse():
import os
import argparse
usage_text = (
"Run this script to remap blend-file(s) paths using a JSON file created by 'packer.py':" +
os.path.basename(__file__) +
"--input=DIR --remap=JSON [options]")
parser = argparse.ArgumentParser(description=usage_text)
# for main_render() only, but validate args.
parser.add_argument(
"-i", "--input", dest="path_src", metavar='DIR', required=True,
help="Input path(s) or a wildcard to glob many files")
parser.add_argument(
"-o", "--output", dest="path_dst", metavar='DIR', required=True,
help="Output directory ")
parser.add_argument(
"-r", "--deps_remap", dest="deps_remap", metavar='JSON', required=True,
help="JSON file containing the path remapping info")
return parser
def main():
import sys
import json
parser = create_argparse()
args = parser.parse_args(sys.argv[1:])
encoding = sys.getfilesystemencoding()
with open(args.deps_remap, 'r', encoding='utf-8') as f:
pathmap = json.load(f)
pack_restore(
args.path_src.encode(encoding),
args.path_dst.encode(encoding),
pathmap,
)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,644 @@
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
VERBOSE = True
TIMEIT = True
class C_defs:
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
# DNA_sequence_types.h (Sequence.type)
SEQ_TYPE_IMAGE = 0
SEQ_TYPE_META = 1
SEQ_TYPE_SCENE = 2
SEQ_TYPE_MOVIE = 3
SEQ_TYPE_SOUND_RAM = 4
SEQ_TYPE_SOUND_HD = 5
SEQ_TYPE_MOVIECLIP = 6
SEQ_TYPE_MASK = 7
SEQ_TYPE_EFFECT = 8
IMA_SRC_FILE = 1
IMA_SRC_SEQUENCE = 2
IMA_SRC_MOVIE = 3
if VERBOSE:
_A = open("/tmp/a.log", 'w')
class log_deps:
@staticmethod
def info(msg):
_A.write(msg)
_A.write("\n")
def set_as_str(s):
if s is None:
return "None"
else:
return (", ".join(sorted(i.decode('ascii') for i in sorted(s))))
class FPElem:
"""
Tiny filepath class to hide blendfile.
"""
__slots__ = (
"basedir",
# library link level
"level",
"userdata",
)
def __init__(self, basedir, level,
# subclasses get/set functions should use
userdata):
self.basedir = basedir
self.level = level
# subclass must call
self.userdata = userdata
# --------
# filepath
@property
def filepath(self):
return self._get_cb()
@filepath.setter
def filepath(self, filepath):
self._set_cb(filepath)
class FPElem_block_path(FPElem):
"""
Simple block-path:
userdata = (block, path)
"""
__slots__ = ()
def _get_cb(self):
block, path = self.userdata
return block[path]
def _set_cb(self, filepath):
block, path = self.userdata
block[path] = filepath
class FPElem_sequence_single(FPElem):
"""
Movie sequence
userdata = (block, path)
"""
__slots__ = ()
def _get_cb(self):
block, path, sub_block, sub_path = self.userdata
return block[path] + sub_block[sub_path]
def _set_cb(self, filepath):
block, path, sub_block, sub_path = self.userdata
# TODO, os.sep
a, b = filepath.rsplit(b'/', 1)
block[path] = a + b'/'
sub_block[sub_path] = b
class FilePath:
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
# ------------------------------------------------------------------------
# Main function to visit paths
@staticmethod
def visit_from_blend(
filepath,
# never modify the blend
readonly=True,
# callback that creates a temp file and returns its path.
temp_remap_cb=None,
# recursive options
recursive=False,
# list of ID block names we want to load, or None to load all
block_codes=None,
# root when we're loading libs indirectly
rootdir=None,
level=0,
# dict of id's used so we don't follow these links again
# prevents cyclic references too!
# {lib_path: set([block id's ...])}
lib_visit=None,
):
# print(level, block_codes)
import os
if VERBOSE:
indent_str = " " * level
# print(indent_str + "Opening:", filepath)
# print(indent_str + "... blocks:", block_codes)
log_deps.info("~")
log_deps.info("%s%s" % (indent_str, filepath.decode('utf-8')))
log_deps.info("%s%s" % (indent_str, set_as_str(block_codes)))
basedir = os.path.dirname(os.path.abspath(filepath))
if rootdir is None:
rootdir = basedir
if recursive and (level > 0) and (block_codes is not None):
# prevent from expanding the
# same datablock more then once
expand_codes = set()
# {lib_id: {block_ids... }}
expand_codes_idlib = {}
# libraries used by this blend
block_codes_idlib = set()
# only for this block
def _expand_codes_add_test(block, code):
# return True, if the ID should be searched further
#
# we could investigate a better way...
# Not to be accessing ID blocks at this point. but its harmless
if code == b'ID':
assert(code == block.code)
if recursive:
expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
return False
else:
len_prev = len(expand_codes)
expand_codes.add(block[b'id.name'])
return (len_prev != len(expand_codes))
def block_expand(block, code):
assert(block.code == code)
if _expand_codes_add_test(block, code):
yield block
assert(block.code == code)
fn = ExpandID.expand_funcs.get(code)
if fn is not None:
for sub_block in fn(block):
if sub_block is not None:
yield from block_expand(sub_block, sub_block.code)
else:
if code == b'ID':
yield block
else:
expand_codes = None
# set below
expand_codes_idlib = None
# never set
block_codes_idlib = None
def block_expand(block, code):
assert(block.code == code)
yield block
# ------
# Define
#
# - iter_blocks_id(code)
# - iter_blocks_idlib()
if block_codes is None:
def iter_blocks_id(code):
return blend.find_blocks_from_code(code)
def iter_blocks_idlib():
return blend.find_blocks_from_code(b'LI')
else:
def iter_blocks_id(code):
for block in blend.find_blocks_from_code(code):
if block[b'id.name'] in block_codes:
yield from block_expand(block, code)
if block_codes_idlib is not None:
def iter_blocks_idlib():
for block in blend.find_blocks_from_code(b'LI'):
if block[b'name'] in block_codes_idlib:
yield from block_expand(block, b'LI')
else:
def iter_blocks_idlib():
return blend.find_blocks_from_code(b'LI')
if temp_remap_cb is not None:
filepath_tmp = temp_remap_cb(filepath, level)
else:
filepath_tmp = filepath
# store info to pass along with each iteration
extra_info = rootdir, os.path.basename(filepath)
import blendfile
blend = blendfile.open_blend(filepath_tmp, "rb" if readonly else "r+b")
for code in blend.code_index.keys():
# handle library blocks as special case
if ((len(code) != 2) or
(code in {
# libraries handled below
b'LI',
b'ID',
# unneeded
b'WM',
b'SN', # bScreen
})):
continue
# if VERBOSE:
# print(" Scanning", code)
for block in iter_blocks_id(code):
yield from FilePath.from_block(block, basedir, extra_info, level)
# print("A:", expand_codes)
# print("B:", block_codes)
if VERBOSE:
log_deps.info("%s%s" % (indent_str, set_as_str(expand_codes)))
if recursive:
if expand_codes_idlib is None:
expand_codes_idlib = {}
for block in blend.find_blocks_from_code(b'ID'):
expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
# look into libraries
lib_all = []
for lib_id, lib_block_codes in sorted(expand_codes_idlib.items()):
lib = blend.find_block_from_offset(lib_id)
lib_path = lib[b'name']
# get all data needed to read the blend files here (it will be freed!)
# lib is an address at the moment, we only use as a way to group
lib_all.append((lib_path, lib_block_codes))
# import IPython; IPython.embed()
# ensure we expand indirect linked libs
if block_codes_idlib is not None:
block_codes_idlib.add(lib_path)
# do this after, incase we mangle names above
for block in iter_blocks_idlib():
yield from FilePath.from_block(block, basedir, extra_info, level)
blend.close()
# ----------------
# Handle Recursive
if recursive:
# now we've closed the file, loop on other files
# note, sorting - isn't needed, it just gives predictable load-order.
for lib_path, lib_block_codes in lib_all:
lib_path_abs = os.path.normpath(utils.compatpath(utils.abspath(lib_path, basedir)))
# if we visited this before,
# check we don't follow the same links more than once
lib_block_codes_existing = lib_visit.setdefault(lib_path_abs, set())
lib_block_codes -= lib_block_codes_existing
# don't touch them again
lib_block_codes_existing.update(lib_block_codes)
# print("looking for", lib_block_codes)
# import IPython; IPython.embed()
if VERBOSE:
print((indent_str + " "), "Library: ", filepath, " -> ", lib_path_abs, sep="")
# print((indent_str + " "), lib_block_codes)
yield from FilePath.visit_from_blend(
lib_path_abs,
readonly=readonly,
temp_remap_cb=temp_remap_cb,
recursive=True,
block_codes=lib_block_codes,
rootdir=rootdir,
level=level + 1,
lib_visit=lib_visit,
)
# ------------------------------------------------------------------------
# Direct filepaths from Blocks
#
# (no expanding or following references)
@staticmethod
def from_block(block, basedir, extra_info, level):
assert(block.code != b'DATA')
fn = FilePath._from_block_dict.get(block.code)
if fn is not None:
yield from fn(block, basedir, extra_info, level)
@staticmethod
def _from_block_MC(block, basedir, extra_info, level):
# TODO, image sequence
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
@staticmethod
def _from_block_IM(block, basedir, extra_info, level):
if block[b'source'] not in {C_defs.IMA_SRC_FILE, C_defs.IMA_SRC_SEQUENCE, C_defs.IMA_SRC_MOVIE}:
return
if block[b'packedfile']:
return
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
@staticmethod
def _from_block_VF(block, basedir, extra_info, level):
if block[b'packedfile']:
return
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
@staticmethod
def _from_block_SO(block, basedir, extra_info, level):
if block[b'packedfile']:
return
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
@staticmethod
def _from_block_ME(block, basedir, extra_info, level):
block_external = block.get_pointer(b'ldata.external')
if block_external is not None:
yield FPElem_block_path(basedir, level, (block_external, b'filename')), extra_info
@staticmethod
def _from_block_SC(block, basedir, extra_info, level):
block_ed = block.get_pointer(b'ed')
if block_ed is not None:
sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
def seqbase(someseq):
for item in someseq:
item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
if item_type >= C_defs.SEQ_TYPE_EFFECT:
pass
elif item_type == C_defs.SEQ_TYPE_META:
yield from seqbase(bf_utils.iter_ListBase(item.get_pointer(b'seqbase.first', sdna_index_refine=sdna_index_Sequence)))
else:
item_strip = item.get_pointer(b'strip', sdna_index_refine=sdna_index_Sequence)
if item_strip is None: # unlikely!
continue
item_stripdata = item_strip.get_pointer(b'stripdata')
if item_type == C_defs.SEQ_TYPE_IMAGE:
# TODO, multiple images
yield FPElem_sequence_single(basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
elif item_type == C_defs.SEQ_TYPE_MOVIE:
yield FPElem_sequence_single(basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
elif item_type == C_defs.SEQ_TYPE_SOUND_RAM:
pass
elif item_type == C_defs.SEQ_TYPE_SOUND_HD:
pass
yield from seqbase(bf_utils.iter_ListBase(block_ed.get_pointer(b'seqbase.first')))
@staticmethod
def _from_block_LI(block, basedir, extra_info, level):
if block.get(b'packedfile', None):
return
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
# _from_block_IM --> {b'IM': _from_block_IM, ...}
_from_block_dict = {
k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
if isinstance(s_fn, staticmethod)
if k.startswith("_from_block_")
}
class bf_utils:
@staticmethod
def iter_ListBase(block):
while block:
yield block
block = block.file.find_block_from_offset(block[b'next'])
def iter_array(block, length=-1):
assert(block.code == b'DATA')
import blendfile
import os
handle = block.file.handle
header = block.file.header
for i in range(length):
block.file.handle.seek(block.file_offset + (header.pointer_size * i), os.SEEK_SET)
offset = blendfile.DNA_IO.read_pointer(handle, header)
sub_block = block.file.find_block_from_offset(offset)
yield sub_block
# -----------------------------------------------------------------------------
# ID Expand
class ExpandID:
# fake module
#
# TODO:
#
# Array lookups here are _WAY_ too complicated,
# we need some nicer way to represent pointer indirection (easy like in C!)
# but for now, use what we have.
#
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def _expand_generic_material(block):
array_len = block.get(b'totcol')
if array_len != 0:
array = block.get_pointer(b'mat')
for sub_block in bf_utils.iter_array(array, array_len):
yield sub_block
@staticmethod
def _expand_generic_mtex(block):
field = block.dna_type.field_from_name[b'mtex']
array_len = field.dna_size // block.file.header.pointer_size
for i in range(array_len):
path = ('mtex[%d]' % i).encode('ascii')
item = block.get_pointer(path)
if item:
yield item.get_pointer(b'tex')
yield item.get_pointer(b'object')
@staticmethod
def _expand_generic_nodetree(block):
assert(block.dna_type.dna_type_id == b'bNodeTree')
sdna_index_bNode = block.file.sdna_index_from_id[b'bNode']
for item in bf_utils.iter_ListBase(block.get_pointer(b'nodes.first')):
item_type = item.get(b'type', sdna_index_refine=sdna_index_bNode)
if item_type != 221: # CMP_NODE_R_LAYERS
yield item.get_pointer(b'id', sdna_index_refine=sdna_index_bNode)
def _expand_generic_nodetree_id(block):
block_ntree = block.get_pointer(b'nodetree')
if block_ntree is not None:
yield from ExpandID._expand_generic_nodetree(block_ntree)
@staticmethod
def _expand_generic_animdata(block):
block_adt = block.get_pointer(b'adt')
if block_adt:
yield block_adt.get_pointer(b'action')
# TODO, NLA
@staticmethod
def expand_OB(block): # 'Object'
yield from ExpandID._expand_generic_animdata(block)
yield block.get_pointer(b'data')
yield block.get_pointer(b'dup_group')
yield block.get_pointer(b'proxy')
yield block.get_pointer(b'proxy_group')
@staticmethod
def expand_ME(block): # 'Mesh'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_material(block)
@staticmethod
def expand_CU(block): # 'Curve'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_material(block)
sub_block = block.get_pointer(b'vfont')
if sub_block is not None:
yield sub_block
yield block.get_pointer(b'vfontb')
yield block.get_pointer(b'vfonti')
yield block.get_pointer(b'vfontbi')
@staticmethod
def expand_MB(block): # 'MBall'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_material(block)
@staticmethod
def expand_LA(block): # 'Lamp'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield from ExpandID._expand_generic_mtex(block)
@staticmethod
def expand_MA(block): # 'Material'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield from ExpandID._expand_generic_mtex(block)
yield block.get_pointer(b'group')
@staticmethod
def expand_TE(block): # 'Tex'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield block.get_pointer(b'ima')
@staticmethod
def expand_WO(block): # 'World'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield from ExpandID._expand_generic_mtex(block)
@staticmethod
def expand_NT(block): # 'bNodeTree'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree(block)
@staticmethod
def expand_SC(block): # 'Scene'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield block.get_pointer(b'world')
sdna_index_Base = block.file.sdna_index_from_id[b'Base']
for item in bf_utils.iter_ListBase(block.get_pointer(b'base.first')):
yield item.get_pointer(b'object', sdna_index_refine=sdna_index_Base)
@staticmethod
def expand_GR(block): # 'Group'
sdna_index_GroupObject = block.file.sdna_index_from_id[b'GroupObject']
for item in bf_utils.iter_ListBase(block.get_pointer(b'gobject.first')):
yield item.get_pointer(b'ob', sdna_index_refine=sdna_index_GroupObject)
# expand_GR --> {b'GR': expand_GR, ...}
expand_funcs = {
k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
if isinstance(s_fn, staticmethod)
if k.startswith("expand_")
}
# -----------------------------------------------------------------------------
# Packing Utility
class utils:
# fake module
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def abspath(path, start, library=None):
import os
if path.startswith(b'//'):
# if library:
# start = os.path.dirname(abspath(library.filepath))
return os.path.join(start, path[2:])
return path
if __import__("os").sep == '/':
@staticmethod
def compatpath(path):
return path.replace(b'\\', b'/')
else:
@staticmethod
def compatpath(path):
# keep '//'
return path[:2] + path[2:].replace(b'/', b'\\')