re-arrange modules, preparing for python-package-index
This commit is contained in:
1
bam/__init__.py
Normal file
1
bam/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
811
bam/blend/blendfile.py
Normal file
811
bam/blend/blendfile.py
Normal file
@@ -0,0 +1,811 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
#
|
||||
# ***** END GPL LICENCE BLOCK *****
|
||||
#
|
||||
# (c) 2009, At Mind B.V. - Jeroen Bakker
|
||||
# (c) 2014, Blender Foundation - Campbell Barton
|
||||
|
||||
import os
|
||||
import struct
|
||||
import logging
|
||||
import gzip
|
||||
import tempfile
|
||||
|
||||
log = logging.getLogger("blendfile")
|
||||
log.setLevel(logging.ERROR)
|
||||
|
||||
FILE_BUFFER_SIZE = 1024 * 1024
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# module global routines
|
||||
#
|
||||
# read routines
|
||||
# open a filename
|
||||
# determine if the file is compressed
|
||||
# and returns a handle
|
||||
def open_blend(filename, access="rb"):
|
||||
"""Opens a blend file for reading or writing pending on the access
|
||||
supports 2 kind of blend files. Uncompressed and compressed.
|
||||
Known issue: does not support packaged blend files
|
||||
"""
|
||||
handle = open(filename, access)
|
||||
magic_test = b"BLENDER"
|
||||
magic = handle.read(len(magic_test))
|
||||
if magic == magic_test:
|
||||
log.debug("normal blendfile detected")
|
||||
handle.seek(0, os.SEEK_SET)
|
||||
bfile = BlendFile(handle)
|
||||
bfile.is_compressed = False
|
||||
bfile.filepath_orig = filename
|
||||
return bfile
|
||||
elif magic[:2] == b'\x1f\x8b':
|
||||
log.debug("gzip blendfile detected")
|
||||
handle.close()
|
||||
log.debug("decompressing started")
|
||||
fs = gzip.open(filename, "rb")
|
||||
data = fs.read(FILE_BUFFER_SIZE)
|
||||
magic = data[:len(magic_test)]
|
||||
if magic == magic_test:
|
||||
handle = tempfile.TemporaryFile()
|
||||
while data:
|
||||
handle.write(data)
|
||||
data = fs.read(FILE_BUFFER_SIZE)
|
||||
log.debug("decompressing finished")
|
||||
fs.close()
|
||||
log.debug("resetting decompressed file")
|
||||
handle.seek(os.SEEK_SET, 0)
|
||||
bfile = BlendFile(handle)
|
||||
bfile.is_compressed = True
|
||||
bfile.filepath_orig = filename
|
||||
return bfile
|
||||
else:
|
||||
raise Exception("filetype inside gzip not a blend")
|
||||
else:
|
||||
raise Exception("filetype not a blend or a gzip blend")
|
||||
|
||||
|
||||
def align(offset, by):
|
||||
n = by - 1
|
||||
return (offset + n) & ~n
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# module classes
|
||||
|
||||
|
||||
class BlendFile:
|
||||
"""
|
||||
Blend file.
|
||||
"""
|
||||
__slots__ = (
|
||||
# file (result of open())
|
||||
"handle",
|
||||
# str (original name of the file path)
|
||||
"filepath_orig",
|
||||
# BlendFileHeader
|
||||
"header",
|
||||
# struct.Struct
|
||||
"block_header_struct",
|
||||
# BlendFileBlock
|
||||
"blocks",
|
||||
# [DNAStruct, ...]
|
||||
"structs",
|
||||
# dict {b'StructName': sdna_index}
|
||||
# (where the index is an index into 'structs')
|
||||
"sdna_index_from_id",
|
||||
# dict {addr_old: block}
|
||||
"block_from_offset",
|
||||
# int
|
||||
"code_index",
|
||||
# bool (did we make a change)
|
||||
"is_modified",
|
||||
# bool (is file gzipped)
|
||||
"is_compressed",
|
||||
)
|
||||
|
||||
def __init__(self, handle):
|
||||
log.debug("initializing reading blend-file")
|
||||
self.handle = handle
|
||||
self.header = BlendFileHeader(handle)
|
||||
self.block_header_struct = self.header.create_block_header_struct()
|
||||
self.blocks = []
|
||||
self.code_index = {}
|
||||
|
||||
block = BlendFileBlock(handle, self)
|
||||
while block.code != b'ENDB':
|
||||
if block.code == b'DNA1':
|
||||
(self.structs,
|
||||
self.sdna_index_from_id,
|
||||
) = BlendFile.decode_structs(self.header, block, handle)
|
||||
else:
|
||||
handle.seek(block.size, os.SEEK_CUR)
|
||||
|
||||
self.blocks.append(block)
|
||||
self.code_index.setdefault(block.code, []).append(block)
|
||||
|
||||
block = BlendFileBlock(handle, self)
|
||||
self.is_modified = False
|
||||
self.blocks.append(block)
|
||||
|
||||
# cache (could lazy init, incase we never use?)
|
||||
self.block_from_offset = {block.addr_old: block for block in self.blocks if block.code != b'ENDB'}
|
||||
|
||||
def find_blocks_from_code(self, code):
|
||||
assert(type(code) == bytes)
|
||||
if code not in self.code_index:
|
||||
return []
|
||||
return self.code_index[code]
|
||||
|
||||
def find_block_from_offset(self, offset):
|
||||
# same as looking looping over all blocks,
|
||||
# then checking ``block.addr_old == offset``
|
||||
assert(type(offset) is int)
|
||||
return self.block_from_offset.get(offset)
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Close the blend file
|
||||
writes the blend file to disk if changes has happened
|
||||
"""
|
||||
if not self.is_modified:
|
||||
self.handle.close()
|
||||
else:
|
||||
handle = self.handle
|
||||
if self.is_compressed:
|
||||
log.debug("close compressed blend file")
|
||||
handle.seek(os.SEEK_SET, 0)
|
||||
log.debug("compressing started")
|
||||
fs = gzip.open(self.filepath_orig, "wb")
|
||||
data = handle.read(FILE_BUFFER_SIZE)
|
||||
while data:
|
||||
fs.write(data)
|
||||
data = handle.read(FILE_BUFFER_SIZE)
|
||||
fs.close()
|
||||
log.debug("compressing finished")
|
||||
|
||||
handle.close()
|
||||
|
||||
def ensure_subtype_smaller(self, sdna_index_curr, sdna_index_next):
|
||||
# never refine to a smaller type
|
||||
if (self.structs[sdna_index_curr].size >
|
||||
self.structs[sdna_index_next].size):
|
||||
|
||||
raise RuntimeError("cant refine to smaller type (%s -> %s)" %
|
||||
(self.structs[sdna_index_curr].dna_type_id.decode('ascii'),
|
||||
self.structs[sdna_index_next].dna_type_id.decode('ascii')))
|
||||
|
||||
@staticmethod
|
||||
def decode_structs(header, block, handle):
|
||||
"""
|
||||
DNACatalog is a catalog of all information in the DNA1 file-block
|
||||
"""
|
||||
log.debug("building DNA catalog")
|
||||
shortstruct = DNA_IO.USHORT[header.endian_index]
|
||||
shortstruct2 = struct.Struct(header.endian_str + b'HH')
|
||||
intstruct = DNA_IO.UINT[header.endian_index]
|
||||
|
||||
data = handle.read(block.size)
|
||||
types = []
|
||||
names = []
|
||||
|
||||
structs = []
|
||||
sdna_index_from_id = {}
|
||||
|
||||
offset = 8
|
||||
names_len = intstruct.unpack_from(data, offset)[0]
|
||||
offset += 4
|
||||
|
||||
log.debug("building #%d names" % names_len)
|
||||
for i in range(names_len):
|
||||
tName = DNA_IO.read_data0(data, offset)
|
||||
offset = offset + len(tName) + 1
|
||||
names.append(DNAName(tName))
|
||||
del names_len
|
||||
|
||||
offset = align(offset, 4)
|
||||
offset += 4
|
||||
types_len = intstruct.unpack_from(data, offset)[0]
|
||||
offset += 4
|
||||
log.debug("building #%d types" % types_len)
|
||||
for i in range(types_len):
|
||||
dna_type_id = DNA_IO.read_data0(data, offset)
|
||||
# None will be replaced by the DNAStruct, below
|
||||
types.append(DNAStruct(dna_type_id))
|
||||
offset += len(dna_type_id) + 1
|
||||
|
||||
offset = align(offset, 4)
|
||||
offset += 4
|
||||
log.debug("building #%d type-lengths" % types_len)
|
||||
for i in range(types_len):
|
||||
tLen = shortstruct.unpack_from(data, offset)[0]
|
||||
offset = offset + 2
|
||||
types[i].size = tLen
|
||||
del types_len
|
||||
|
||||
offset = align(offset, 4)
|
||||
offset += 4
|
||||
|
||||
structs_len = intstruct.unpack_from(data, offset)[0]
|
||||
offset += 4
|
||||
log.debug("building #%d structures" % structs_len)
|
||||
for sdna_index in range(structs_len):
|
||||
d = shortstruct2.unpack_from(data, offset)
|
||||
struct_type_index = d[0]
|
||||
offset += 4
|
||||
dna_struct = types[struct_type_index]
|
||||
sdna_index_from_id[dna_struct.dna_type_id] = sdna_index
|
||||
structs.append(dna_struct)
|
||||
|
||||
fields_len = d[1]
|
||||
dna_offset = 0
|
||||
|
||||
for field_index in range(fields_len):
|
||||
d2 = shortstruct2.unpack_from(data, offset)
|
||||
field_type_index = d2[0]
|
||||
field_name_index = d2[1]
|
||||
offset += 4
|
||||
dna_type = types[field_type_index]
|
||||
dna_name = names[field_name_index]
|
||||
if dna_name.is_pointer or dna_name.is_method_pointer:
|
||||
dna_size = header.pointer_size * dna_name.array_size
|
||||
else:
|
||||
dna_size = dna_type.size * dna_name.array_size
|
||||
|
||||
field = DNAField(dna_type, dna_name, dna_size, dna_offset)
|
||||
dna_struct.fields.append(field)
|
||||
dna_struct.field_from_name[dna_name.name_only] = field
|
||||
dna_offset += dna_size
|
||||
|
||||
return structs, sdna_index_from_id
|
||||
|
||||
|
||||
class BlendFileBlock:
|
||||
"""
|
||||
Instance of a struct.
|
||||
"""
|
||||
__slots__ = (
|
||||
# BlendFile
|
||||
"file",
|
||||
"code",
|
||||
"size",
|
||||
"addr_old",
|
||||
"sdna_index",
|
||||
"count",
|
||||
"file_offset",
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return ("<%s.%s (%s), size=%d at %s>" %
|
||||
# fields=[%s]
|
||||
(self.__class__.__name__,
|
||||
self.dna_type.dna_type_id.decode('ascii'),
|
||||
self.code.decode(),
|
||||
self.size,
|
||||
# b", ".join(f.dna_name.name_only for f in self.dna_type.fields).decode('ascii'),
|
||||
hex(self.addr_old),
|
||||
))
|
||||
|
||||
def __init__(self, handle, bfile):
|
||||
OLDBLOCK = struct.Struct(b'4sI')
|
||||
|
||||
self.file = bfile
|
||||
|
||||
data = handle.read(bfile.block_header_struct.size)
|
||||
# header size can be 8, 20, or 24 bytes long
|
||||
# 8: old blend files ENDB block (exception)
|
||||
# 20: normal headers 32 bit platform
|
||||
# 24: normal headers 64 bit platform
|
||||
if len(data) > 15:
|
||||
|
||||
blockheader = bfile.block_header_struct.unpack(data)
|
||||
self.code = blockheader[0].partition(b'\0')[0]
|
||||
if self.code != b'ENDB':
|
||||
self.size = blockheader[1]
|
||||
self.addr_old = blockheader[2]
|
||||
self.sdna_index = blockheader[3]
|
||||
self.count = blockheader[4]
|
||||
self.file_offset = handle.tell()
|
||||
else:
|
||||
self.size = 0
|
||||
self.addr_old = 0
|
||||
self.sdna_index = 0
|
||||
self.count = 0
|
||||
self.file_offset = 0
|
||||
else:
|
||||
blockheader = OLDBLOCK.unpack(data)
|
||||
self.code = blockheader[0].partition(b'\0')[0]
|
||||
self.code = DNA_IO.read_data0(blockheader[0], 0)
|
||||
self.size = 0
|
||||
self.addr_old = 0
|
||||
self.sdna_index = 0
|
||||
self.count = 0
|
||||
self.file_offset = 0
|
||||
|
||||
@property
|
||||
def dna_type(self):
|
||||
return self.file.structs[self.sdna_index]
|
||||
|
||||
def refine_type_from_index(self, sdna_index_next):
|
||||
assert(type(sdna_index_next) is int)
|
||||
sdna_index_curr = self.sdna_index
|
||||
self.file.ensure_subtype_smaller(sdna_index_curr, sdna_index_next)
|
||||
self.sdna_index = sdna_index_next
|
||||
|
||||
def refine_type(self, dna_type_id):
|
||||
assert(type(dna_type_id) is bytes)
|
||||
self.refine_type_from_index(self.file.sdna_index_from_id[dna_type_id])
|
||||
|
||||
def get(self, path,
|
||||
default=...,
|
||||
sdna_index_refine=None,
|
||||
use_nil=True, use_str=True,
|
||||
base_index=0,
|
||||
):
|
||||
|
||||
if sdna_index_refine is None:
|
||||
sdna_index_refine = self.sdna_index
|
||||
else:
|
||||
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
|
||||
|
||||
dna_struct = self.file.structs[sdna_index_refine]
|
||||
ofs = self.file_offset
|
||||
|
||||
if base_index != 0:
|
||||
assert(base_index < self.count)
|
||||
ofs += (self.size // self.count) * base_index
|
||||
|
||||
self.file.handle.seek(ofs, os.SEEK_SET)
|
||||
return dna_struct.field_get(
|
||||
self.file.header, self.file.handle, path,
|
||||
default=default,
|
||||
use_nil=use_nil, use_str=use_str,
|
||||
)
|
||||
|
||||
def set(self, path, value,
|
||||
sdna_index_refine=None,
|
||||
):
|
||||
|
||||
if sdna_index_refine is None:
|
||||
sdna_index_refine = self.sdna_index
|
||||
else:
|
||||
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
|
||||
|
||||
dna_struct = self.file.structs[sdna_index_refine]
|
||||
self.file.handle.seek(self.file_offset, os.SEEK_SET)
|
||||
self.file.is_modified = True
|
||||
return dna_struct.field_set(
|
||||
self.file.header, self.file.handle, path, value)
|
||||
|
||||
# ---------------
|
||||
# Utility get/set
|
||||
#
|
||||
# avoid inline pointer casting
|
||||
def get_pointer(
|
||||
self, path,
|
||||
default=...,
|
||||
sdna_index_refine=None,
|
||||
base_index=0,
|
||||
):
|
||||
if sdna_index_refine is None:
|
||||
sdna_index_refine = self.sdna_index
|
||||
result = self.get(path, default, sdna_index_refine=sdna_index_refine, base_index=base_index)
|
||||
|
||||
# default
|
||||
if type(result) is not int:
|
||||
return result
|
||||
|
||||
assert(self.file.structs[sdna_index_refine].field_from_path(self.file.header, self.file.handle, path).dna_name.is_pointer)
|
||||
if result != 0:
|
||||
# possible (but unlikely)
|
||||
# that this fails and returns None
|
||||
# maybe we want to raise some exception in this case
|
||||
return self.file.find_block_from_offset(result)
|
||||
else:
|
||||
return None
|
||||
|
||||
# ----------------------
|
||||
# Python convenience API
|
||||
|
||||
# dict like access
|
||||
def __getitem__(self, item):
|
||||
return self.get(item, use_str=False)
|
||||
|
||||
def __setitem__(self, item, value):
|
||||
self.set(item, value)
|
||||
|
||||
def keys(self):
|
||||
return (f.dna_name.name_only for f in self.dna_type.fields)
|
||||
|
||||
def values(self):
|
||||
return (self[k] for k in self.keys())
|
||||
|
||||
def items(self):
|
||||
return ((k, self[k]) for k in self.keys())
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Read Magic
|
||||
#
|
||||
# magic = str
|
||||
# pointer_size = int
|
||||
# is_little_endian = bool
|
||||
# version = int
|
||||
|
||||
|
||||
class BlendFileHeader:
|
||||
"""
|
||||
BlendFileHeader allocates the first 12 bytes of a blend file
|
||||
it contains information about the hardware architecture
|
||||
"""
|
||||
__slots__ = (
|
||||
# str
|
||||
"magic",
|
||||
# int 4/8
|
||||
"pointer_size",
|
||||
# bool
|
||||
"is_little_endian",
|
||||
# int
|
||||
"version",
|
||||
# str, used to pass to 'struct'
|
||||
"endian_str",
|
||||
# int, used to index common types
|
||||
"endian_index",
|
||||
)
|
||||
|
||||
def __init__(self, handle):
|
||||
FILEHEADER = struct.Struct(b'7s1s1s3s')
|
||||
|
||||
log.debug("reading blend-file-header")
|
||||
values = FILEHEADER.unpack(handle.read(FILEHEADER.size))
|
||||
self.magic = values[0]
|
||||
pointer_size_id = values[1]
|
||||
if pointer_size_id == b'-':
|
||||
self.pointer_size = 8
|
||||
elif pointer_size_id == b'_':
|
||||
self.pointer_size = 4
|
||||
else:
|
||||
assert(0)
|
||||
endian_id = values[2]
|
||||
if endian_id == b'v':
|
||||
self.is_little_endian = True
|
||||
self.endian_str = b'<'
|
||||
self.endian_index = 0
|
||||
elif endian_id == b'V':
|
||||
self.is_little_endian = False
|
||||
self.endian_index = 1
|
||||
self.endian_str = b'>'
|
||||
else:
|
||||
assert(0)
|
||||
|
||||
version_id = values[3]
|
||||
self.version = int(version_id)
|
||||
|
||||
def create_block_header_struct(self):
|
||||
return struct.Struct(b''.join((
|
||||
self.endian_str,
|
||||
b'4sI',
|
||||
b'I' if self.pointer_size == 4 else b'Q',
|
||||
b'II',
|
||||
)))
|
||||
|
||||
|
||||
class DNAName:
|
||||
"""
|
||||
DNAName is a C-type name stored in the DNA
|
||||
"""
|
||||
__slots__ = (
|
||||
"name_full",
|
||||
"name_only",
|
||||
"is_pointer",
|
||||
"is_method_pointer",
|
||||
"array_size",
|
||||
)
|
||||
|
||||
def __init__(self, name_full):
|
||||
self.name_full = name_full
|
||||
self.name_only = self.calc_name_only()
|
||||
self.is_pointer = self.calc_is_pointer()
|
||||
self.is_method_pointer = self.calc_is_method_pointer()
|
||||
self.array_size = self.calc_array_size()
|
||||
|
||||
def as_reference(self, parent):
|
||||
if parent is None:
|
||||
result = b''
|
||||
else:
|
||||
result = parent + b'.'
|
||||
|
||||
result = result + self.name_only
|
||||
return result
|
||||
|
||||
def calc_name_only(self):
|
||||
result = self.name_full.strip(b'*()')
|
||||
index = result.find(b'[')
|
||||
if index != -1:
|
||||
result = result[:index]
|
||||
return result
|
||||
|
||||
def calc_is_pointer(self):
|
||||
return (b'*' in self.name_full)
|
||||
|
||||
def calc_is_method_pointer(self):
|
||||
return (b'(*' in self.name_full)
|
||||
|
||||
def calc_array_size(self):
|
||||
result = 1
|
||||
temp = self.name_full
|
||||
index = temp.find(b'[')
|
||||
|
||||
while index != -1:
|
||||
index_2 = temp.find(b']')
|
||||
result *= int(temp[index + 1:index_2])
|
||||
temp = temp[index_2 + 1:]
|
||||
index = temp.find(b'[')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class DNAField:
|
||||
"""
|
||||
DNAField is a coupled DNAStruct and DNAName
|
||||
and cache offset for reuse
|
||||
"""
|
||||
__slots__ = (
|
||||
# DNAName
|
||||
"dna_name",
|
||||
# tuple of 3 items
|
||||
# [bytes (struct name), int (struct size), DNAStruct]
|
||||
"dna_type",
|
||||
# size on-disk
|
||||
"dna_size",
|
||||
# cached info (avoid looping over fields each time)
|
||||
"dna_offset",
|
||||
)
|
||||
|
||||
def __init__(self, dna_type, dna_name, dna_size, dna_offset):
|
||||
self.dna_type = dna_type
|
||||
self.dna_name = dna_name
|
||||
self.dna_size = dna_size
|
||||
self.dna_offset = dna_offset
|
||||
|
||||
|
||||
class DNAStruct:
|
||||
"""
|
||||
DNAStruct is a C-type structure stored in the DNA
|
||||
"""
|
||||
__slots__ = (
|
||||
"dna_type_id",
|
||||
"size",
|
||||
"fields",
|
||||
"field_from_name",
|
||||
)
|
||||
|
||||
def __init__(self, dna_type_id):
|
||||
self.dna_type_id = dna_type_id
|
||||
self.fields = []
|
||||
self.field_from_name = {}
|
||||
|
||||
def field_from_path(self, header, handle, path):
|
||||
assert(type(path) == bytes)
|
||||
# support 'id.name'
|
||||
name, _, name_tail = path.partition(b'.')
|
||||
|
||||
# support 'mtex[1].tex'
|
||||
# note, multi-dimensional arrays not supported
|
||||
# FIXME: 'mtex[1]' works, but not 'mtex[1].tex', why is this???
|
||||
if name.endswith(b']'):
|
||||
name, _, index = name[:-1].partition(b'[')
|
||||
index = int(index)
|
||||
else:
|
||||
index = 0
|
||||
|
||||
field = self.field_from_name.get(name)
|
||||
|
||||
if field is not None:
|
||||
handle.seek(field.dna_offset, os.SEEK_CUR)
|
||||
if index != 0:
|
||||
if field.dna_name.is_pointer:
|
||||
index_offset = header.pointer_size * index
|
||||
else:
|
||||
index_offset = field.dna_type.size * index
|
||||
assert(index_offset < field.dna_size)
|
||||
handle.seek(index_offset, os.SEEK_CUR)
|
||||
if name_tail == b'':
|
||||
return field
|
||||
else:
|
||||
return field.dna_type.field_from_path(header, handle, name_tail)
|
||||
|
||||
def field_get(self, header, handle, path,
|
||||
default=...,
|
||||
use_nil=True, use_str=True,
|
||||
):
|
||||
assert(type(path) == bytes)
|
||||
|
||||
field = self.field_from_path(header, handle, path)
|
||||
if field is None:
|
||||
if default is not ...:
|
||||
return default
|
||||
else:
|
||||
raise KeyError("%r not found in %r (%r)" % (path, [f.dna_name.name_only for f in self.fields], self.dna_type_id))
|
||||
|
||||
dna_type = field.dna_type
|
||||
dna_name = field.dna_name
|
||||
|
||||
if dna_name.is_pointer:
|
||||
return DNA_IO.read_pointer(handle, header)
|
||||
elif dna_type.dna_type_id == b'int':
|
||||
return DNA_IO.read_int(handle, header)
|
||||
elif dna_type.dna_type_id == b'short':
|
||||
return DNA_IO.read_short(handle, header)
|
||||
elif dna_type.dna_type_id == b'float':
|
||||
return DNA_IO.read_float(handle, header)
|
||||
elif dna_type.dna_type_id == b'char':
|
||||
if use_str:
|
||||
if use_nil:
|
||||
return DNA_IO.read_string0(handle, dna_name.array_size)
|
||||
else:
|
||||
return DNA_IO.read_string(handle, dna_name.array_size)
|
||||
else:
|
||||
if use_nil:
|
||||
return DNA_IO.read_bytes0(handle, dna_name.array_size)
|
||||
else:
|
||||
return DNA_IO.read_bytes(handle, dna_name.array_size)
|
||||
else:
|
||||
raise NotImplementedError("%r exists but isn't pointer, can't resolve field %r" % (path, dna_name.name))
|
||||
|
||||
def field_set(self, header, handle, path, value):
|
||||
assert(type(path) == bytes)
|
||||
|
||||
field = self.field_from_path(header, handle, path)
|
||||
if field is None:
|
||||
raise KeyError("%r not found in %r" % (path, [f.dna_name.name_only for f in self.fields]))
|
||||
|
||||
dna_type = field.dna_type
|
||||
dna_name = field.dna_name
|
||||
|
||||
if dna_type.dna_type_id == b'char':
|
||||
if type(value) is str:
|
||||
return DNA_IO.write_string(handle, value, dna_name.array_size)
|
||||
else:
|
||||
return DNA_IO.write_bytes(handle, value, dna_name.array_size)
|
||||
else:
|
||||
raise NotImplementedError("Setting %r is not yet supported" % dna_type[0])
|
||||
|
||||
|
||||
class DNA_IO:
|
||||
"""
|
||||
Module like class, for read-write utility functions.
|
||||
|
||||
Only stores static methods & constants.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
# Methods for read/write,
|
||||
# these are only here to avoid clogging global-namespace
|
||||
|
||||
@staticmethod
|
||||
def write_string(handle, astring, fieldlen):
|
||||
assert(isinstance(astring, str))
|
||||
stringw = ""
|
||||
if len(astring) >= fieldlen:
|
||||
stringw = astring[0:fieldlen]
|
||||
else:
|
||||
stringw = astring + '\0'
|
||||
handle.write(stringw.encode('utf-8'))
|
||||
|
||||
@staticmethod
|
||||
def write_bytes(handle, astring, fieldlen):
|
||||
assert(isinstance(astring, (bytes, bytearray)))
|
||||
stringw = b''
|
||||
if len(astring) >= fieldlen:
|
||||
stringw = astring[0:fieldlen]
|
||||
else:
|
||||
stringw = astring + b'\0'
|
||||
|
||||
handle.write(stringw)
|
||||
|
||||
_STRING = [struct.Struct("%ds" % i) for i in range(0, 2048)]
|
||||
|
||||
@staticmethod
|
||||
def _string_struct(length):
|
||||
if length < len(DNA_IO._STRING):
|
||||
st = DNA_IO._STRING[length]
|
||||
else:
|
||||
st = struct.Struct("%ds" % length)
|
||||
return st
|
||||
|
||||
@staticmethod
|
||||
def read_bytes(handle, length):
|
||||
st = DNA_IO._string_struct(length)
|
||||
data = st.unpack(handle.read(st.size))[0]
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def read_bytes0(handle, length):
|
||||
st = DNA_IO._string_struct(length)
|
||||
data = st.unpack(handle.read(st.size))[0]
|
||||
return DNA_IO.read_data0(data, 0)
|
||||
|
||||
@staticmethod
|
||||
def read_string(handle, length):
|
||||
return DNA_IO.read_bytes(handle, length).decode('utf-8')
|
||||
|
||||
@staticmethod
|
||||
def read_string0(handle, length):
|
||||
return DNA_IO.read_bytes0(handle, length).decode('utf-8')
|
||||
|
||||
@staticmethod
|
||||
def read_data0(data, offset):
|
||||
"""
|
||||
Reads a zero terminating String from a file handle
|
||||
"""
|
||||
add = data.find(b'\0', offset) - offset
|
||||
st = DNA_IO._string_struct(add)
|
||||
return st.unpack_from(data, offset)[0]
|
||||
|
||||
USHORT = struct.Struct(b'<H'), struct.Struct(b'>H')
|
||||
|
||||
@staticmethod
|
||||
def read_ushort(handle, fileheader):
|
||||
st = DNA_IO.USHORT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
UINT = struct.Struct(b'<I'), struct.Struct(b'>I')
|
||||
|
||||
@staticmethod
|
||||
def read_uint(handle, fileheader):
|
||||
st = DNA_IO.UINT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
SINT = struct.Struct(b'<i'), struct.Struct(b'>i')
|
||||
|
||||
@staticmethod
|
||||
def read_int(handle, fileheader):
|
||||
st = DNA_IO.SINT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
@staticmethod
|
||||
def read_float(handle, fileheader):
|
||||
return struct.unpack(fileheader.endian_str + b'f', handle.read(4))[0]
|
||||
|
||||
SSHORT = struct.Struct(b'<h'), struct.Struct(b'>h')
|
||||
|
||||
@staticmethod
|
||||
def read_short(handle, fileheader):
|
||||
st = DNA_IO.SSHORT[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
ULONG = struct.Struct(b'<Q'), struct.Struct(b'>Q')
|
||||
|
||||
@staticmethod
|
||||
def read_ulong(handle, fileheader):
|
||||
st = DNA_IO.ULONG[fileheader.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
|
||||
@staticmethod
|
||||
def read_pointer(handle, header):
|
||||
"""
|
||||
reads an pointer from a file handle
|
||||
the pointer size is given by the header (BlendFileHeader)
|
||||
"""
|
||||
if header.pointer_size == 4:
|
||||
st = DNA_IO.UINT[header.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
||||
if header.pointer_size == 8:
|
||||
st = DNA_IO.ULONG[header.endian_index]
|
||||
return st.unpack(handle.read(st.size))[0]
|
427
bam/blend/blendfile_pack.py
Executable file
427
bam/blend/blendfile_pack.py
Executable file
@@ -0,0 +1,427 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
#
|
||||
# ***** END GPL LICENCE BLOCK *****
|
||||
|
||||
from bam.blend import blendfile_path_walker
|
||||
|
||||
TIMEIT = False
|
||||
|
||||
# ------------------
|
||||
# Ensure module path
|
||||
import os
|
||||
import sys
|
||||
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "modules"))
|
||||
if path not in sys.path:
|
||||
sys.path.append(path)
|
||||
del os, sys, path
|
||||
# --------
|
||||
|
||||
|
||||
# ----------------------
|
||||
# debug low level output
|
||||
#
|
||||
# ... when internals _really_ fail & we want to know why
|
||||
def _dbg(text):
|
||||
import sys
|
||||
from bam_utils.system import colorize
|
||||
if type(text) is bytes:
|
||||
text = text.decode('utf-8')
|
||||
sys.__stdout__.write(colorize(text, color='red') + "\n")
|
||||
sys.__stdout__.flush()
|
||||
|
||||
|
||||
def _relpath_remap(
|
||||
path_src,
|
||||
base_dir_src,
|
||||
fp_basedir,
|
||||
blendfile_src_dir_fakeroot,
|
||||
):
|
||||
|
||||
import os
|
||||
|
||||
if not os.path.isabs(path_src):
|
||||
# Absolute win32 paths on a unix system
|
||||
# cause bad issues!
|
||||
if len(path_src) >= 2:
|
||||
if path_src[0] != b'/'[0] and path_src[1] == b':'[0]:
|
||||
pass
|
||||
else:
|
||||
raise Exception("Internal error 'path_src' -> %r must be absolute" % path_src)
|
||||
|
||||
path_src = os.path.normpath(path_src)
|
||||
path_dst = os.path.relpath(path_src, base_dir_src)
|
||||
|
||||
if blendfile_src_dir_fakeroot is None:
|
||||
# /foo/../bar.png --> /foo/__/bar.png
|
||||
path_dst = path_dst.replace(b'..', b'__')
|
||||
path_dst = os.path.normpath(path_dst)
|
||||
else:
|
||||
if b'..' in path_dst:
|
||||
# remap, relative to project root
|
||||
|
||||
# paths
|
||||
path_dst = os.path.join(blendfile_src_dir_fakeroot, path_dst)
|
||||
path_dst = os.path.normpath(path_dst)
|
||||
# if there are paths outside the root still...
|
||||
# This means they are outside the project directory, We dont support this,
|
||||
# so name accordingly
|
||||
if b'..' in path_dst:
|
||||
# SHOULD NEVER HAPPEN
|
||||
path_dst = path_dst.replace(b'..', b'__nonproject__')
|
||||
path_dst = b'_' + path_dst
|
||||
|
||||
# _dbg(b"FINAL A: " + path_dst)
|
||||
path_dst_final = os.path.join(os.path.relpath(base_dir_src, fp_basedir), path_dst)
|
||||
path_dst_final = os.path.normpath(path_dst_final)
|
||||
# _dbg(b"FINAL B: " + path_dst_final)
|
||||
|
||||
return path_dst, path_dst_final
|
||||
|
||||
|
||||
def pack(
|
||||
# store the blendfile relative to this directory, can be:
|
||||
# os.path.dirname(blendfile_src)
|
||||
# but in some cases we wan't to use a path higher up.
|
||||
# base_dir_src,
|
||||
blendfile_src, blendfile_dst, mode='FILE',
|
||||
paths_remap_relbase=None,
|
||||
deps_remap=None, paths_remap=None, paths_uuid=None,
|
||||
# load every libs dep, not just used deps.
|
||||
all_deps=False,
|
||||
# yield reports
|
||||
report=None,
|
||||
|
||||
# The project path, eg:
|
||||
# /home/me/myproject/mysession/path/to/blend/file.blend
|
||||
# the path would be: b'path/to/blend'
|
||||
#
|
||||
# This is needed so we can choose to store paths
|
||||
# relative to project or relative to the current file.
|
||||
#
|
||||
# When None, map _all_ paths are relative to the current blend.
|
||||
# converting: '../../bar' --> '__/__/bar'
|
||||
# so all paths are nested and not moved outside the session path.
|
||||
blendfile_src_dir_fakeroot=None,
|
||||
):
|
||||
"""
|
||||
:param deps_remap: Store path deps_remap info as follows.
|
||||
{"file.blend": {"path_new": "path_old", ...}, ...}
|
||||
|
||||
:type deps_remap: dict or None
|
||||
"""
|
||||
|
||||
# Internal details:
|
||||
# - we copy to a temp path before operating on the blend file
|
||||
# so we can modify in-place.
|
||||
# - temp files are only created once, (if we never touched them before),
|
||||
# this way, for linked libraries - a single blend file may be used
|
||||
# multiple times, each access will apply new edits ontop of the old ones.
|
||||
# - we track which libs we have touched (using 'lib_visit' arg),
|
||||
# this means that the same libs wont be touched many times to modify the same data
|
||||
# also prevents cyclic loops from crashing.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from bam.utils.system import colorize
|
||||
|
||||
# first check args are OK
|
||||
# fakeroot _cant_ start with a separator, since we prepend chars to it.
|
||||
assert((blendfile_src_dir_fakeroot is None) or
|
||||
(not blendfile_src_dir_fakeroot.startswith(os.sep.encode('ascii'))))
|
||||
|
||||
path_temp_files = set()
|
||||
path_copy_files = set()
|
||||
|
||||
TEMP_SUFFIX = b'@'
|
||||
|
||||
if report is None:
|
||||
report = lambda msg: msg
|
||||
|
||||
yield report("%s: %r...\n" % (colorize("\nscanning deps", color='bright_green'), blendfile_src))
|
||||
|
||||
if TIMEIT:
|
||||
import time
|
||||
t = time.time()
|
||||
|
||||
base_dir_src = os.path.dirname(blendfile_src)
|
||||
base_dir_dst = os.path.dirname(blendfile_dst)
|
||||
# _dbg(blendfile_src)
|
||||
# _dbg(blendfile_dst)
|
||||
|
||||
if mode == 'ZIP':
|
||||
base_dir_dst_temp = os.path.join(base_dir_dst, b'__blendfile_temp__')
|
||||
else:
|
||||
base_dir_dst_temp = os.path.join(base_dir_dst, b'__blendfile_pack__')
|
||||
|
||||
def temp_remap_cb(filepath, rootdir):
|
||||
"""
|
||||
Create temp files in the destination path.
|
||||
"""
|
||||
filepath = blendfile_path_walker.utils.compatpath(filepath)
|
||||
|
||||
# first remap this blend file to the location it will end up (so we can get images relative to _that_)
|
||||
# TODO(cam) cache the results
|
||||
fp_basedir_conv = _relpath_remap(os.path.join(rootdir, b'dummy'), base_dir_src, base_dir_src, blendfile_src_dir_fakeroot)[0]
|
||||
fp_basedir_conv = os.path.join(base_dir_src, os.path.dirname(fp_basedir_conv))
|
||||
|
||||
# then get the file relative to the new location
|
||||
filepath_tmp = _relpath_remap(filepath, base_dir_src, fp_basedir_conv, blendfile_src_dir_fakeroot)[0]
|
||||
filepath_tmp = os.path.normpath(os.path.join(base_dir_dst_temp, filepath_tmp)) + TEMP_SUFFIX
|
||||
|
||||
# only overwrite once (so we can write into a path already containing files)
|
||||
if filepath_tmp not in path_temp_files:
|
||||
os.makedirs(os.path.dirname(filepath_tmp), exist_ok=True)
|
||||
shutil.copy(filepath, filepath_tmp)
|
||||
path_temp_files.add(filepath_tmp)
|
||||
return filepath_tmp
|
||||
|
||||
lib_visit = {}
|
||||
fp_blend_basename_last = b''
|
||||
|
||||
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
|
||||
blendfile_src,
|
||||
readonly=False,
|
||||
temp_remap_cb=temp_remap_cb,
|
||||
recursive=True,
|
||||
recursive_all=all_deps,
|
||||
lib_visit=lib_visit,
|
||||
):
|
||||
|
||||
# we could pass this in!
|
||||
fp_blend = os.path.join(fp.basedir, fp_blend_basename)
|
||||
|
||||
if fp_blend_basename_last != fp_blend_basename:
|
||||
yield report(" %s: %s\n" % (colorize("blend", color='blue'), fp_blend))
|
||||
fp_blend_basename_last = fp_blend_basename
|
||||
|
||||
# assume the path might be relative
|
||||
path_src_orig = fp.filepath
|
||||
path_rel = blendfile_path_walker.utils.compatpath(path_src_orig)
|
||||
path_src = blendfile_path_walker.utils.abspath(path_rel, fp.basedir)
|
||||
path_src = os.path.normpath(path_src)
|
||||
|
||||
# destination path realtive to the root
|
||||
# assert(b'..' not in path_src)
|
||||
assert(b'..' not in base_dir_src)
|
||||
|
||||
# first remap this blend file to the location it will end up (so we can get images relative to _that_)
|
||||
# TODO(cam) cache the results
|
||||
fp_basedir_conv = _relpath_remap(fp_blend, base_dir_src, base_dir_src, blendfile_src_dir_fakeroot)[0]
|
||||
fp_basedir_conv = os.path.join(base_dir_src, os.path.dirname(fp_basedir_conv))
|
||||
|
||||
# then get the file relative to the new location
|
||||
path_dst, path_dst_final = _relpath_remap(path_src, base_dir_src, fp_basedir_conv, blendfile_src_dir_fakeroot)
|
||||
|
||||
path_dst = os.path.join(base_dir_dst, path_dst)
|
||||
|
||||
path_dst_final = b'//' + path_dst_final
|
||||
fp.filepath = path_dst_final
|
||||
|
||||
# add to copy-list
|
||||
# never copy libs (handled separately)
|
||||
if not isinstance(fp, blendfile_path_walker.FPElem_block_path) or fp.userdata[0].code != b'LI':
|
||||
path_copy_files.add((path_src, path_dst))
|
||||
|
||||
for file_list in (
|
||||
blendfile_path_walker.utils.find_sequence_paths(path_src) if fp.is_sequence else (),
|
||||
fp.files_siblings(),
|
||||
):
|
||||
|
||||
_src_dir = os.path.dirname(path_src)
|
||||
_dst_dir = os.path.dirname(path_dst)
|
||||
path_copy_files.update(
|
||||
{(os.path.join(_src_dir, f), os.path.join(_dst_dir, f))
|
||||
for f in file_list
|
||||
})
|
||||
del _src_dir, _dst_dir
|
||||
|
||||
if deps_remap is not None:
|
||||
# this needs to become JSON later... ugh, need to use strings
|
||||
deps_remap.setdefault(
|
||||
fp_blend_basename.decode('utf-8'),
|
||||
{})[path_dst_final.decode('utf-8')] = path_src_orig.decode('utf-8')
|
||||
|
||||
del lib_visit, fp_blend_basename_last
|
||||
|
||||
if TIMEIT:
|
||||
print(" Time: %.4f\n" % (time.time() - t))
|
||||
|
||||
yield report(("%s: %d files\n") %
|
||||
(colorize("\narchiving", color='bright_green'), len(path_copy_files) + 1))
|
||||
|
||||
# handle deps_remap and file renaming
|
||||
if deps_remap is not None:
|
||||
blendfile_src_basename = os.path.basename(blendfile_src).decode('utf-8')
|
||||
blendfile_dst_basename = os.path.basename(blendfile_dst).decode('utf-8')
|
||||
|
||||
if blendfile_src_basename != blendfile_dst_basename:
|
||||
if mode != 'ZIP':
|
||||
deps_remap[blendfile_dst_basename] = deps_remap[blendfile_src_basename]
|
||||
del deps_remap[blendfile_src_basename]
|
||||
del blendfile_src_basename, blendfile_dst_basename
|
||||
|
||||
# store path mapping {dst: src}
|
||||
if paths_remap is not None:
|
||||
|
||||
if paths_remap_relbase is not None:
|
||||
relbase = lambda fn: os.path.relpath(fn, paths_remap_relbase)
|
||||
else:
|
||||
relbase = lambda fn: fn
|
||||
|
||||
for src, dst in path_copy_files:
|
||||
# TODO. relative to project-basepath
|
||||
paths_remap[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = relbase(src).decode('utf-8')
|
||||
# main file XXX, should have better way!
|
||||
paths_remap[os.path.basename(blendfile_src).decode('utf-8')] = relbase(blendfile_src).decode('utf-8')
|
||||
|
||||
del relbase
|
||||
|
||||
if paths_uuid is not None:
|
||||
from bam.utils.system import uuid_from_file
|
||||
|
||||
for src, dst in path_copy_files:
|
||||
paths_uuid[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = uuid_from_file(src)
|
||||
# XXX, better way to store temp target
|
||||
blendfile_dst_tmp = temp_remap_cb(blendfile_src, base_dir_src)
|
||||
paths_uuid[os.path.basename(blendfile_src).decode('utf-8')] = uuid_from_file(blendfile_dst_tmp)
|
||||
|
||||
# blend libs
|
||||
for dst in path_temp_files:
|
||||
k = os.path.relpath(dst[:-len(TEMP_SUFFIX)], base_dir_dst_temp).decode('utf-8')
|
||||
if k not in paths_uuid:
|
||||
paths_uuid[k] = uuid_from_file(dst)
|
||||
del k
|
||||
|
||||
del blendfile_dst_tmp
|
||||
del uuid_from_file
|
||||
|
||||
# --------------------
|
||||
# Handle File Copy/Zip
|
||||
|
||||
if mode == 'FILE':
|
||||
blendfile_dst_tmp = temp_remap_cb(blendfile_src, base_dir_src)
|
||||
|
||||
shutil.move(blendfile_dst_tmp, blendfile_dst)
|
||||
path_temp_files.remove(blendfile_dst_tmp)
|
||||
|
||||
# strip TEMP_SUFFIX
|
||||
for fn in path_temp_files:
|
||||
shutil.move(fn, fn[:-1])
|
||||
|
||||
for src, dst in path_copy_files:
|
||||
assert(b'.blend' not in dst)
|
||||
|
||||
if not os.path.exists(src):
|
||||
yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
|
||||
else:
|
||||
yield report(" %s: %r -> %r\n" % (colorize("copying", color='blue'), src, dst))
|
||||
shutil.copy(src, dst)
|
||||
|
||||
yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
|
||||
|
||||
elif mode == 'ZIP':
|
||||
import zipfile
|
||||
with zipfile.ZipFile(blendfile_dst.decode('utf-8'), 'w', zipfile.ZIP_DEFLATED) as zip_handle:
|
||||
for fn in path_temp_files:
|
||||
yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), fn))
|
||||
zip_handle.write(
|
||||
fn.decode('utf-8'),
|
||||
arcname=os.path.relpath(fn[:-1], base_dir_dst_temp).decode('utf-8'))
|
||||
os.remove(fn)
|
||||
|
||||
shutil.rmtree(base_dir_dst_temp)
|
||||
|
||||
for src, dst in path_copy_files:
|
||||
assert(not dst.endswith(b'.blend'))
|
||||
|
||||
if not os.path.exists(src):
|
||||
yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
|
||||
else:
|
||||
yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), src))
|
||||
zip_handle.write(src.decode('utf-8'),
|
||||
arcname=os.path.relpath(dst, base_dir_dst).decode('utf-8'))
|
||||
|
||||
"""
|
||||
_dbg(b"")
|
||||
_dbg(b"REAL_FILE: " + dst)
|
||||
_dbg(b"RELATIVE_FILE: " + os.path.relpath(dst, base_dir_dst))
|
||||
"""
|
||||
|
||||
yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
|
||||
else:
|
||||
raise Exception("%s not a known mode" % mode)
|
||||
|
||||
|
||||
def create_argparse():
|
||||
import os
|
||||
import argparse
|
||||
|
||||
usage_text = (
|
||||
"Run this script to extract blend-files(s) to a destination path:" +
|
||||
os.path.basename(__file__) +
|
||||
"--input=FILE --output=FILE [options]")
|
||||
|
||||
parser = argparse.ArgumentParser(description=usage_text)
|
||||
|
||||
# for main_render() only, but validate args.
|
||||
parser.add_argument(
|
||||
"-i", "--input", dest="path_src", metavar='FILE', required=True,
|
||||
help="Input path(s) or a wildcard to glob many files",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o", "--output", dest="path_dst", metavar='DIR', required=True,
|
||||
help="Output file or a directory when multiple inputs are passed",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-m", "--mode", dest="mode", metavar='MODE', required=False,
|
||||
choices=('FILE', 'ZIP'), default='FILE',
|
||||
help="Output file or a directory when multiple inputs are passed",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-q", "--quiet", dest="use_quiet", action='store_true', required=False,
|
||||
help="Suppress status output",
|
||||
)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
import sys
|
||||
|
||||
parser = create_argparse()
|
||||
args = parser.parse_args(sys.argv[1:])
|
||||
|
||||
encoding = sys.getfilesystemencoding()
|
||||
|
||||
if args.use_quiet:
|
||||
report = lambda msg: None
|
||||
else:
|
||||
report = lambda msg: print(msg, end="")
|
||||
|
||||
for msg in pack(
|
||||
args.path_src.encode('utf-8'),
|
||||
args.path_dst.encode('utf-8'),
|
||||
args.mode,
|
||||
):
|
||||
report(msg)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
143
bam/blend/blendfile_pack_restore.py
Executable file
143
bam/blend/blendfile_pack_restore.py
Executable file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
#
|
||||
# ***** END GPL LICENCE BLOCK *****
|
||||
|
||||
"""
|
||||
This script takes Blend-File and remaps their paths to the original locations.
|
||||
|
||||
(needed for uploading to the server)
|
||||
"""
|
||||
|
||||
VERBOSE = 1
|
||||
|
||||
from bam.blend import blendfile_path_walker
|
||||
|
||||
|
||||
def blendfile_remap(
|
||||
blendfile_src, blendpath_dst,
|
||||
deps_remap=None, deps_remap_cb=None,
|
||||
deps_remap_cb_userdata=None,
|
||||
):
|
||||
import os
|
||||
|
||||
def temp_remap_cb(filepath, level):
|
||||
"""
|
||||
Simply point to the output dir.
|
||||
"""
|
||||
basename = os.path.basename(blendfile_src)
|
||||
filepath_tmp = os.path.join(blendpath_dst, basename)
|
||||
|
||||
# ideally we could avoid copying _ALL_ blends
|
||||
# TODO(cam)
|
||||
import shutil
|
||||
shutil.copy(filepath, filepath_tmp)
|
||||
|
||||
return filepath_tmp
|
||||
|
||||
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
|
||||
blendfile_src,
|
||||
readonly=False,
|
||||
temp_remap_cb=temp_remap_cb,
|
||||
recursive=False,
|
||||
):
|
||||
|
||||
# path_dst_final - current path in blend.
|
||||
# path_src_orig - original path from JSON.
|
||||
|
||||
path_dst_final_b = fp.filepath
|
||||
|
||||
# support 2 modes, callback or dictionary
|
||||
if deps_remap_cb is not None:
|
||||
path_src_orig = deps_remap_cb(path_dst_final_b, deps_remap_cb_userdata)
|
||||
if path_src_orig is not None:
|
||||
fp.filepath = path_src_orig
|
||||
if VERBOSE:
|
||||
print(" Remapping:", path_dst_final_b, "->", path_src_orig)
|
||||
else:
|
||||
path_dst_final = path_dst_final_b.decode('utf-8')
|
||||
path_src_orig = deps_remap.get(path_dst_final)
|
||||
if path_src_orig is not None:
|
||||
fp.filepath = path_src_orig.encode('utf-8')
|
||||
if VERBOSE:
|
||||
print(" Remapping:", path_dst_final, "->", path_src_orig)
|
||||
|
||||
|
||||
def pack_restore(blendfile_dir_src, blendfile_dir_dst, pathmap):
|
||||
import os
|
||||
|
||||
for dirpath, dirnames, filenames in os.walk(blendfile_dir_src):
|
||||
if dirpath.startswith(b"."):
|
||||
continue
|
||||
|
||||
for filename in filenames:
|
||||
if os.path.splitext(filename)[1].lower() == b".blend":
|
||||
remap = pathmap.get(filename.decode('utf-8'))
|
||||
if remap is not None:
|
||||
filepath = os.path.join(dirpath, filename)
|
||||
|
||||
# main function call
|
||||
blendfile_remap(filepath, blendfile_dir_dst, remap)
|
||||
|
||||
|
||||
def create_argparse():
|
||||
import os
|
||||
import argparse
|
||||
|
||||
usage_text = (
|
||||
"Run this script to remap blend-file(s) paths using a JSON file created by 'packer.py':" +
|
||||
os.path.basename(__file__) +
|
||||
"--input=DIR --remap=JSON [options]")
|
||||
|
||||
parser = argparse.ArgumentParser(description=usage_text)
|
||||
|
||||
# for main_render() only, but validate args.
|
||||
parser.add_argument(
|
||||
"-i", "--input", dest="path_src", metavar='DIR', required=True,
|
||||
help="Input path(s) or a wildcard to glob many files")
|
||||
parser.add_argument(
|
||||
"-o", "--output", dest="path_dst", metavar='DIR', required=True,
|
||||
help="Output directory ")
|
||||
parser.add_argument(
|
||||
"-r", "--deps_remap", dest="deps_remap", metavar='JSON', required=True,
|
||||
help="JSON file containing the path remapping info")
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
import sys
|
||||
import json
|
||||
|
||||
parser = create_argparse()
|
||||
args = parser.parse_args(sys.argv[1:])
|
||||
|
||||
encoding = sys.getfilesystemencoding()
|
||||
|
||||
with open(args.deps_remap, 'r', encoding='utf-8') as f:
|
||||
pathmap = json.load(f)
|
||||
|
||||
pack_restore(
|
||||
args.path_src.encode(encoding),
|
||||
args.path_dst.encode(encoding),
|
||||
pathmap,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
281
bam/blend/blendfile_path_remap.py
Normal file
281
bam/blend/blendfile_path_remap.py
Normal file
@@ -0,0 +1,281 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
#
|
||||
# ***** END GPL LICENCE BLOCK *****
|
||||
|
||||
"""
|
||||
Module for remapping paths from one directory to another.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# private utility functions
|
||||
|
||||
def _is_blend(f):
|
||||
return f.lower().endswith(b'.blend')
|
||||
|
||||
|
||||
def _warn__ascii(msg):
|
||||
print(" warning: %s" % msg)
|
||||
|
||||
|
||||
def _info__ascii(msg):
|
||||
print(msg)
|
||||
|
||||
|
||||
def _warn__json(msg):
|
||||
import json
|
||||
print(json.dumps(("warning", msg)), end=",\n")
|
||||
|
||||
def _info__json(msg):
|
||||
import json
|
||||
print(json.dumps(("info", msg)), end=",\n")
|
||||
|
||||
|
||||
def _uuid_from_file(fn, block_size=1 << 20):
|
||||
with open(fn, 'rb') as f:
|
||||
# first get the size
|
||||
f.seek(0, os.SEEK_END)
|
||||
size = f.tell()
|
||||
f.seek(0, os.SEEK_SET)
|
||||
# done!
|
||||
|
||||
import hashlib
|
||||
sha1 = hashlib.new('sha512')
|
||||
while True:
|
||||
data = f.read(block_size)
|
||||
if not data:
|
||||
break
|
||||
sha1.update(data)
|
||||
return (hex(size)[2:] + sha1.hexdigest()).encode()
|
||||
|
||||
|
||||
def _iter_files(paths, check_ext=None):
|
||||
# note, sorting isn't needed
|
||||
# just gives predictable output
|
||||
for p in paths:
|
||||
p = os.path.abspath(p)
|
||||
for dirpath, dirnames, filenames in sorted(os.walk(p)):
|
||||
# skip '.svn'
|
||||
if dirpath.startswith(b'.') and dirpath != b'.':
|
||||
continue
|
||||
|
||||
for filename in sorted(filenames):
|
||||
if check_ext is None or check_ext(filename):
|
||||
filepath = os.path.join(dirpath, filename)
|
||||
yield filepath
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Public Functions
|
||||
|
||||
def start(
|
||||
paths,
|
||||
is_quiet=False,
|
||||
dry_run=False,
|
||||
use_json=False,
|
||||
):
|
||||
|
||||
if use_json:
|
||||
warn = _warn__json
|
||||
info = _info__json
|
||||
else:
|
||||
warn = _warn__ascii
|
||||
info = _info__ascii
|
||||
|
||||
if use_json:
|
||||
print("[")
|
||||
|
||||
# {(sha1, length): "filepath"}
|
||||
remap_uuid = {}
|
||||
|
||||
# relative paths which don't exist,
|
||||
# don't complain when they're missing on remap.
|
||||
# {f_src: [relative path deps, ...]}
|
||||
remap_lost = {}
|
||||
|
||||
# all files we need to map
|
||||
# absolute paths
|
||||
files_to_map = set()
|
||||
|
||||
# TODO, validate paths aren't nested! ["/foo", "/foo/bar"]
|
||||
# it will cause problems touching files twice!
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# First walk over all blends
|
||||
from bam.blend import blendfile_path_walker
|
||||
|
||||
for blendfile_src in _iter_files(paths, check_ext=_is_blend):
|
||||
if not is_quiet:
|
||||
info("blend read: %r" % blendfile_src)
|
||||
|
||||
remap_lost[blendfile_src] = remap_lost_blendfile_src = set()
|
||||
|
||||
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
|
||||
blendfile_src,
|
||||
readonly=True,
|
||||
recursive=False,
|
||||
):
|
||||
# TODO. warn when referencing files outside 'paths'
|
||||
|
||||
# so we can update the reference
|
||||
f_abs = fp.filepath_absolute
|
||||
f_abs = os.path.normpath(f_abs)
|
||||
if os.path.exists(f_abs):
|
||||
files_to_map.add(f_abs)
|
||||
else:
|
||||
if not is_quiet:
|
||||
warn("file %r not found!" % f_abs)
|
||||
|
||||
# don't complain about this file being missing on remap
|
||||
remap_lost_blendfile_src.add(fp.filepath)
|
||||
|
||||
# so we can know where its moved to
|
||||
files_to_map.add(blendfile_src)
|
||||
del blendfile_path_walker
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Store UUID
|
||||
#
|
||||
# note, sorting is only to give predictable warnings/behavior
|
||||
for f in sorted(files_to_map):
|
||||
f_uuid = _uuid_from_file(f)
|
||||
|
||||
f_match = remap_uuid.get(f_uuid)
|
||||
if f_match is not None:
|
||||
if not is_quiet:
|
||||
warn("duplicate file found! (%r, %r)" % (f_match, f))
|
||||
|
||||
remap_uuid[f_uuid] = f
|
||||
|
||||
# now find all deps
|
||||
remap_data_args = (
|
||||
remap_uuid,
|
||||
remap_lost,
|
||||
)
|
||||
|
||||
if use_json:
|
||||
if not remap_uuid:
|
||||
print("\"nothing to remap!\"")
|
||||
else:
|
||||
print("\"complete\"")
|
||||
print("]")
|
||||
else:
|
||||
if not remap_uuid:
|
||||
print("Nothing to remap!")
|
||||
|
||||
return remap_data_args
|
||||
|
||||
|
||||
def finish(
|
||||
paths, remap_data_args,
|
||||
is_quiet=False,
|
||||
force_relative=False,
|
||||
dry_run=False,
|
||||
use_json=False,
|
||||
):
|
||||
|
||||
if use_json:
|
||||
warn = _warn__json
|
||||
info = _info__json
|
||||
else:
|
||||
warn = _warn__ascii
|
||||
info = _info__ascii
|
||||
|
||||
if use_json:
|
||||
print("[")
|
||||
|
||||
(remap_uuid,
|
||||
remap_lost,
|
||||
) = remap_data_args
|
||||
|
||||
remap_src_to_dst = {}
|
||||
remap_dst_to_src = {}
|
||||
|
||||
for f_dst in _iter_files(paths):
|
||||
f_uuid = _uuid_from_file(f_dst)
|
||||
f_src = remap_uuid.get(f_uuid)
|
||||
if f_src is not None:
|
||||
remap_src_to_dst[f_src] = f_dst
|
||||
remap_dst_to_src[f_dst] = f_src
|
||||
|
||||
# now the fun begins, remap _all_ paths
|
||||
from bam.blend import blendfile_path_walker
|
||||
|
||||
for blendfile_dst in _iter_files(paths, check_ext=_is_blend):
|
||||
blendfile_src = remap_dst_to_src.get(blendfile_dst)
|
||||
if blendfile_src is None:
|
||||
if not is_quiet:
|
||||
warn("new blendfile added since beginning 'remap': %r" % blendfile_dst)
|
||||
continue
|
||||
|
||||
# not essential, just so we can give more meaningful errors
|
||||
remap_lost_blendfile_src = remap_lost[blendfile_src]
|
||||
|
||||
if not is_quiet:
|
||||
info("blend write: %r -> %r" % (blendfile_src, blendfile_dst))
|
||||
|
||||
blendfile_src_basedir = os.path.dirname(blendfile_src)
|
||||
blendfile_dst_basedir = os.path.dirname(blendfile_dst)
|
||||
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
|
||||
blendfile_dst,
|
||||
readonly=False,
|
||||
recursive=False,
|
||||
):
|
||||
# TODO. warn when referencing files outside 'paths'
|
||||
|
||||
# so we can update the reference
|
||||
f_src_orig = fp.filepath
|
||||
|
||||
if f_src_orig in remap_lost_blendfile_src:
|
||||
# this file never existed, so we can't remap it
|
||||
continue
|
||||
|
||||
is_relative = f_src_orig.startswith(b'//')
|
||||
if is_relative:
|
||||
f_src_abs = fp.filepath_absolute_resolve(basedir=blendfile_src_basedir)
|
||||
else:
|
||||
f_src_abs = f_src_orig
|
||||
|
||||
f_src_abs = os.path.normpath(f_src_abs)
|
||||
f_dst_abs = remap_src_to_dst.get(f_src_abs)
|
||||
|
||||
if f_dst_abs is None:
|
||||
if not is_quiet:
|
||||
warn("file %r not found in map!" % f_src_abs)
|
||||
continue
|
||||
|
||||
# now remap!
|
||||
if is_relative or force_relative:
|
||||
f_dst_final = b'//' + os.path.relpath(f_dst_abs, blendfile_dst_basedir)
|
||||
else:
|
||||
f_dst_final = f_dst_abs
|
||||
|
||||
if f_dst_final != f_src_orig:
|
||||
if not dry_run:
|
||||
fp.filepath = f_dst_final
|
||||
if not is_quiet:
|
||||
info("remap %r -> %r" % (f_src_abs, f_dst_abs))
|
||||
|
||||
del blendfile_path_walker
|
||||
|
||||
if use_json:
|
||||
print("\"complete\"\n]")
|
||||
|
792
bam/blend/blendfile_path_walker.py
Normal file
792
bam/blend/blendfile_path_walker.py
Normal file
@@ -0,0 +1,792 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
#
|
||||
# ***** END GPL LICENCE BLOCK *****
|
||||
|
||||
import os
|
||||
VERBOSE = os.environ.get('BAM_VERBOSE', False)
|
||||
TIMEIT = False
|
||||
|
||||
|
||||
class C_defs:
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
raise RuntimeError("%s should not be instantiated" % cls)
|
||||
|
||||
# DNA_sequence_types.h (Sequence.type)
|
||||
SEQ_TYPE_IMAGE = 0
|
||||
SEQ_TYPE_META = 1
|
||||
SEQ_TYPE_SCENE = 2
|
||||
SEQ_TYPE_MOVIE = 3
|
||||
SEQ_TYPE_SOUND_RAM = 4
|
||||
SEQ_TYPE_SOUND_HD = 5
|
||||
SEQ_TYPE_MOVIECLIP = 6
|
||||
SEQ_TYPE_MASK = 7
|
||||
SEQ_TYPE_EFFECT = 8
|
||||
|
||||
IMA_SRC_FILE = 1
|
||||
IMA_SRC_SEQUENCE = 2
|
||||
IMA_SRC_MOVIE = 3
|
||||
|
||||
|
||||
if VERBOSE:
|
||||
import logging
|
||||
log_deps = logging.getLogger("path_walker")
|
||||
del logging
|
||||
|
||||
def set_as_str(s):
|
||||
if s is None:
|
||||
return "None"
|
||||
else:
|
||||
return (", ".join(sorted(i.decode('ascii') for i in sorted(s))))
|
||||
|
||||
|
||||
class FPElem:
|
||||
"""
|
||||
Tiny filepath class to hide blendfile.
|
||||
"""
|
||||
|
||||
__slots__ = (
|
||||
"basedir",
|
||||
|
||||
# library link level
|
||||
"level",
|
||||
|
||||
# True when this is apart of a sequence (image or movieclip)
|
||||
"is_sequence",
|
||||
|
||||
"userdata",
|
||||
)
|
||||
|
||||
def __init__(self, basedir, level,
|
||||
# subclasses get/set functions should use
|
||||
userdata):
|
||||
self.basedir = basedir
|
||||
self.level = level
|
||||
self.is_sequence = False
|
||||
|
||||
# subclass must call
|
||||
self.userdata = userdata
|
||||
|
||||
def files_siblings(self):
|
||||
return ()
|
||||
|
||||
# --------
|
||||
# filepath
|
||||
|
||||
def filepath_absolute_resolve(self, basedir=None):
|
||||
"""
|
||||
Resolve the filepath, with the option to override the basedir.
|
||||
"""
|
||||
filepath = self.filepath
|
||||
if filepath.startswith(b'//'):
|
||||
if basedir is None:
|
||||
basedir = self.basedir
|
||||
return os.path.normpath(os.path.join(
|
||||
basedir,
|
||||
utils.compatpath(filepath[2:]),
|
||||
))
|
||||
else:
|
||||
return utils.compatpath(filepath)
|
||||
|
||||
@property
|
||||
def filepath(self):
|
||||
return self._get_cb()
|
||||
|
||||
@filepath.setter
|
||||
def filepath(self, filepath):
|
||||
self._set_cb(filepath)
|
||||
|
||||
@property
|
||||
def filepath_absolute(self):
|
||||
return self.filepath_absolute_resolve()
|
||||
|
||||
|
||||
class FPElem_block_path(FPElem):
|
||||
"""
|
||||
Simple block-path:
|
||||
userdata = (block, path)
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def _get_cb(self):
|
||||
block, path = self.userdata
|
||||
return block[path]
|
||||
|
||||
def _set_cb(self, filepath):
|
||||
block, path = self.userdata
|
||||
block[path] = filepath
|
||||
|
||||
|
||||
class FPElem_sequence_single(FPElem):
|
||||
"""
|
||||
Movie sequence
|
||||
userdata = (block, path)
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def _get_cb(self):
|
||||
block, path, sub_block, sub_path = self.userdata
|
||||
return block[path] + sub_block[sub_path]
|
||||
|
||||
def _set_cb(self, filepath):
|
||||
block, path, sub_block, sub_path = self.userdata
|
||||
|
||||
head, sep, tail = utils.splitpath(filepath)
|
||||
|
||||
block[path] = head + sep
|
||||
sub_block[sub_path] = tail
|
||||
|
||||
class FPElem_sequence_image_seq(FPElem_sequence_single):
|
||||
"""
|
||||
Image sequence
|
||||
userdata = (block, path)
|
||||
"""
|
||||
__slots__ = ()
|
||||
def files_siblings(self):
|
||||
block, path, sub_block, sub_path = self.userdata
|
||||
|
||||
array = block.get_pointer(b'stripdata')
|
||||
files = [array.get(b'name', use_str=False, base_index=i) for i in range(array.count)]
|
||||
return files
|
||||
|
||||
|
||||
class FilePath:
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
raise RuntimeError("%s should not be instantiated" % cls)
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Main function to visit paths
|
||||
@staticmethod
|
||||
def visit_from_blend(
|
||||
filepath,
|
||||
|
||||
# never modify the blend
|
||||
readonly=True,
|
||||
# callback that creates a temp file and returns its path.
|
||||
temp_remap_cb=None,
|
||||
|
||||
# recursive options
|
||||
recursive=False,
|
||||
# recurse all indirectly linked data
|
||||
# (not just from the initially referenced blend file)
|
||||
recursive_all=False,
|
||||
# list of ID block names we want to load, or None to load all
|
||||
block_codes=None,
|
||||
# root when we're loading libs indirectly
|
||||
rootdir=None,
|
||||
level=0,
|
||||
# dict of id's used so we don't follow these links again
|
||||
# prevents cyclic references too!
|
||||
# {lib_path: set([block id's ...])}
|
||||
lib_visit=None,
|
||||
):
|
||||
# print(level, block_codes)
|
||||
import os
|
||||
|
||||
if VERBOSE:
|
||||
indent_str = " " * level
|
||||
# print(indent_str + "Opening:", filepath)
|
||||
# print(indent_str + "... blocks:", block_codes)
|
||||
|
||||
log_deps.info("~")
|
||||
log_deps.info("%s%s" % (indent_str, filepath.decode('utf-8')))
|
||||
log_deps.info("%s%s" % (indent_str, set_as_str(block_codes)))
|
||||
|
||||
basedir = os.path.dirname(os.path.abspath(filepath))
|
||||
if rootdir is None:
|
||||
rootdir = basedir
|
||||
|
||||
if lib_visit is None:
|
||||
lib_visit = {}
|
||||
|
||||
if recursive and (level > 0) and (block_codes is not None) and (recursive_all is False):
|
||||
# prevent from expanding the
|
||||
# same datablock more then once
|
||||
expand_codes = set()
|
||||
# {lib_id: {block_ids... }}
|
||||
expand_codes_idlib = {}
|
||||
|
||||
# libraries used by this blend
|
||||
block_codes_idlib = set()
|
||||
|
||||
# only for this block
|
||||
def _expand_codes_add_test(block, code):
|
||||
# return True, if the ID should be searched further
|
||||
#
|
||||
# we could investigate a better way...
|
||||
# Not to be accessing ID blocks at this point. but its harmless
|
||||
if code == b'ID':
|
||||
assert(code == block.code)
|
||||
if recursive:
|
||||
expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
|
||||
return False
|
||||
else:
|
||||
id_name = block[b'id.name']
|
||||
|
||||
# if we touched this already, don't touch again
|
||||
# FIXME, works in some cases but not others
|
||||
'''
|
||||
if id_name not in block_codes:
|
||||
return False
|
||||
'''
|
||||
|
||||
len_prev = len(expand_codes)
|
||||
expand_codes.add(id_name)
|
||||
return (len_prev != len(expand_codes))
|
||||
|
||||
def block_expand(block, code):
|
||||
assert(block.code == code)
|
||||
if _expand_codes_add_test(block, code):
|
||||
yield block
|
||||
|
||||
assert(block.code == code)
|
||||
fn = ExpandID.expand_funcs.get(code)
|
||||
if fn is not None:
|
||||
for sub_block in fn(block):
|
||||
if sub_block is not None:
|
||||
yield from block_expand(sub_block, sub_block.code)
|
||||
else:
|
||||
if code == b'ID':
|
||||
yield block
|
||||
else:
|
||||
expand_codes = None
|
||||
|
||||
# set below
|
||||
expand_codes_idlib = None
|
||||
|
||||
# never set
|
||||
block_codes_idlib = None
|
||||
|
||||
def block_expand(block, code):
|
||||
assert(block.code == code)
|
||||
yield block
|
||||
|
||||
# ------
|
||||
# Define
|
||||
#
|
||||
# - iter_blocks_id(code)
|
||||
# - iter_blocks_idlib()
|
||||
if block_codes is None:
|
||||
def iter_blocks_id(code):
|
||||
return blend.find_blocks_from_code(code)
|
||||
|
||||
def iter_blocks_idlib():
|
||||
return blend.find_blocks_from_code(b'LI')
|
||||
else:
|
||||
def iter_blocks_id(code):
|
||||
for block in blend.find_blocks_from_code(code):
|
||||
if block[b'id.name'] in block_codes:
|
||||
yield from block_expand(block, code)
|
||||
|
||||
if block_codes_idlib is not None:
|
||||
def iter_blocks_idlib():
|
||||
for block in blend.find_blocks_from_code(b'LI'):
|
||||
# TODO, this should work but in fact mades some libs not link correctly.
|
||||
if block[b'name'] in block_codes_idlib:
|
||||
yield from block_expand(block, b'LI')
|
||||
else:
|
||||
def iter_blocks_idlib():
|
||||
return blend.find_blocks_from_code(b'LI')
|
||||
|
||||
if temp_remap_cb is not None:
|
||||
filepath_tmp = temp_remap_cb(filepath, rootdir)
|
||||
else:
|
||||
filepath_tmp = filepath
|
||||
|
||||
# store info to pass along with each iteration
|
||||
extra_info = rootdir, os.path.basename(filepath)
|
||||
|
||||
from bam.blend import blendfile
|
||||
blend = blendfile.open_blend(filepath_tmp, "rb" if readonly else "r+b")
|
||||
|
||||
for code in blend.code_index.keys():
|
||||
# handle library blocks as special case
|
||||
if ((len(code) != 2) or
|
||||
(code in {
|
||||
# libraries handled below
|
||||
b'LI',
|
||||
b'ID',
|
||||
# unneeded
|
||||
b'WM',
|
||||
b'SN', # bScreen
|
||||
})):
|
||||
|
||||
continue
|
||||
|
||||
# if VERBOSE:
|
||||
# print(" Scanning", code)
|
||||
|
||||
for block in iter_blocks_id(code):
|
||||
yield from FilePath.from_block(block, basedir, extra_info, level)
|
||||
|
||||
# print("A:", expand_codes)
|
||||
# print("B:", block_codes)
|
||||
if VERBOSE:
|
||||
log_deps.info("%s%s" % (indent_str, set_as_str(expand_codes)))
|
||||
|
||||
if recursive:
|
||||
|
||||
if expand_codes_idlib is None:
|
||||
expand_codes_idlib = {}
|
||||
for block in blend.find_blocks_from_code(b'ID'):
|
||||
expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
|
||||
|
||||
# look into libraries
|
||||
lib_all = []
|
||||
|
||||
for lib_id, lib_block_codes in sorted(expand_codes_idlib.items()):
|
||||
lib = blend.find_block_from_offset(lib_id)
|
||||
lib_path = lib[b'name']
|
||||
|
||||
# get all data needed to read the blend files here (it will be freed!)
|
||||
# lib is an address at the moment, we only use as a way to group
|
||||
|
||||
lib_all.append((lib_path, lib_block_codes))
|
||||
# import IPython; IPython.embed()
|
||||
|
||||
# ensure we expand indirect linked libs
|
||||
if block_codes_idlib is not None:
|
||||
block_codes_idlib.add(lib_path)
|
||||
|
||||
# do this after, incase we mangle names above
|
||||
for block in iter_blocks_idlib():
|
||||
yield from FilePath.from_block(block, basedir, extra_info, level)
|
||||
|
||||
blend.close()
|
||||
|
||||
# ----------------
|
||||
# Handle Recursive
|
||||
if recursive:
|
||||
# now we've closed the file, loop on other files
|
||||
|
||||
# note, sorting - isn't needed, it just gives predictable load-order.
|
||||
for lib_path, lib_block_codes in lib_all:
|
||||
lib_path_abs = os.path.normpath(utils.compatpath(utils.abspath(lib_path, basedir)))
|
||||
|
||||
# if we visited this before,
|
||||
# check we don't follow the same links more than once
|
||||
lib_block_codes_existing = lib_visit.setdefault(lib_path_abs, set())
|
||||
lib_block_codes -= lib_block_codes_existing
|
||||
|
||||
# don't touch them again
|
||||
lib_block_codes_existing.update(lib_block_codes)
|
||||
|
||||
# print("looking for", lib_block_codes)
|
||||
|
||||
if not lib_block_codes:
|
||||
if VERBOSE:
|
||||
print((indent_str + " "), "Library Skipped (visited): ", filepath, " -> ", lib_path_abs, sep="")
|
||||
continue
|
||||
|
||||
if not os.path.exists(lib_path_abs):
|
||||
if VERBOSE:
|
||||
print((indent_str + " "), "Library Missing: ", filepath, " -> ", lib_path_abs, sep="")
|
||||
continue
|
||||
|
||||
# import IPython; IPython.embed()
|
||||
if VERBOSE:
|
||||
print((indent_str + " "), "Library: ", filepath, " -> ", lib_path_abs, sep="")
|
||||
# print((indent_str + " "), lib_block_codes)
|
||||
yield from FilePath.visit_from_blend(
|
||||
lib_path_abs,
|
||||
readonly=readonly,
|
||||
temp_remap_cb=temp_remap_cb,
|
||||
recursive=True,
|
||||
block_codes=lib_block_codes,
|
||||
rootdir=rootdir,
|
||||
level=level + 1,
|
||||
lib_visit=lib_visit,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Direct filepaths from Blocks
|
||||
#
|
||||
# (no expanding or following references)
|
||||
|
||||
@staticmethod
|
||||
def from_block(block, basedir, extra_info, level):
|
||||
assert(block.code != b'DATA')
|
||||
fn = FilePath._from_block_dict.get(block.code)
|
||||
if fn is not None:
|
||||
yield from fn(block, basedir, extra_info, level)
|
||||
|
||||
@staticmethod
|
||||
def _from_block_MC(block, basedir, extra_info, level):
|
||||
# TODO, image sequence
|
||||
fp = FPElem_block_path(basedir, level, (block, b'name'))
|
||||
fp.is_sequence = True
|
||||
yield fp, extra_info
|
||||
|
||||
@staticmethod
|
||||
def _from_block_IM(block, basedir, extra_info, level):
|
||||
# old files miss this
|
||||
image_source = block.get(b'source', C_defs.IMA_SRC_FILE)
|
||||
if image_source not in {C_defs.IMA_SRC_FILE, C_defs.IMA_SRC_SEQUENCE, C_defs.IMA_SRC_MOVIE}:
|
||||
return
|
||||
if block[b'packedfile']:
|
||||
return
|
||||
|
||||
fp = FPElem_block_path(basedir, level, (block, b'name'))
|
||||
if image_source == C_defs.IMA_SRC_SEQUENCE:
|
||||
fp.is_sequence = True
|
||||
yield fp, extra_info
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _from_block_VF(block, basedir, extra_info, level):
|
||||
if block[b'packedfile']:
|
||||
return
|
||||
if block[b'name'] != b'<builtin>': # builtin font
|
||||
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
|
||||
|
||||
@staticmethod
|
||||
def _from_block_SO(block, basedir, extra_info, level):
|
||||
if block[b'packedfile']:
|
||||
return
|
||||
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
|
||||
|
||||
@staticmethod
|
||||
def _from_block_ME(block, basedir, extra_info, level):
|
||||
block_external = block.get_pointer(b'ldata.external', None)
|
||||
if block_external is None:
|
||||
block_external = block.get_pointer(b'fdata.external', None)
|
||||
|
||||
if block_external is not None:
|
||||
yield FPElem_block_path(basedir, level, (block_external, b'filename')), extra_info
|
||||
|
||||
@staticmethod
|
||||
def _from_block_SC(block, basedir, extra_info, level):
|
||||
block_ed = block.get_pointer(b'ed')
|
||||
if block_ed is not None:
|
||||
sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
|
||||
|
||||
def seqbase(someseq):
|
||||
for item in someseq:
|
||||
item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
|
||||
|
||||
if item_type >= C_defs.SEQ_TYPE_EFFECT:
|
||||
pass
|
||||
elif item_type == C_defs.SEQ_TYPE_META:
|
||||
yield from seqbase(bf_utils.iter_ListBase(item.get_pointer(b'seqbase.first', sdna_index_refine=sdna_index_Sequence)))
|
||||
else:
|
||||
item_strip = item.get_pointer(b'strip', sdna_index_refine=sdna_index_Sequence)
|
||||
if item_strip is None: # unlikely!
|
||||
continue
|
||||
item_stripdata = item_strip.get_pointer(b'stripdata')
|
||||
|
||||
if item_type == C_defs.SEQ_TYPE_IMAGE:
|
||||
yield FPElem_sequence_image_seq(basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
|
||||
elif item_type == C_defs.SEQ_TYPE_MOVIE:
|
||||
yield FPElem_sequence_single(basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
|
||||
elif item_type == C_defs.SEQ_TYPE_SOUND_RAM:
|
||||
pass
|
||||
elif item_type == C_defs.SEQ_TYPE_SOUND_HD:
|
||||
pass
|
||||
|
||||
yield from seqbase(bf_utils.iter_ListBase(block_ed.get_pointer(b'seqbase.first')))
|
||||
|
||||
@staticmethod
|
||||
def _from_block_LI(block, basedir, extra_info, level):
|
||||
if block.get(b'packedfile', None):
|
||||
return
|
||||
|
||||
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
|
||||
|
||||
# _from_block_IM --> {b'IM': _from_block_IM, ...}
|
||||
_from_block_dict = {
|
||||
k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
|
||||
if isinstance(s_fn, staticmethod)
|
||||
if k.startswith("_from_block_")
|
||||
}
|
||||
|
||||
|
||||
class bf_utils:
|
||||
@staticmethod
|
||||
def iter_ListBase(block):
|
||||
while block:
|
||||
yield block
|
||||
block = block.file.find_block_from_offset(block[b'next'])
|
||||
|
||||
def iter_array(block, length=-1):
|
||||
assert(block.code == b'DATA')
|
||||
from bam.blend import blendfile
|
||||
import os
|
||||
handle = block.file.handle
|
||||
header = block.file.header
|
||||
|
||||
for i in range(length):
|
||||
block.file.handle.seek(block.file_offset + (header.pointer_size * i), os.SEEK_SET)
|
||||
offset = blendfile.DNA_IO.read_pointer(handle, header)
|
||||
sub_block = block.file.find_block_from_offset(offset)
|
||||
yield sub_block
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# ID Expand
|
||||
|
||||
class ExpandID:
|
||||
# fake module
|
||||
#
|
||||
# TODO:
|
||||
#
|
||||
# Array lookups here are _WAY_ too complicated,
|
||||
# we need some nicer way to represent pointer indirection (easy like in C!)
|
||||
# but for now, use what we have.
|
||||
#
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
raise RuntimeError("%s should not be instantiated" % cls)
|
||||
|
||||
@staticmethod
|
||||
def _expand_generic_material(block):
|
||||
array_len = block.get(b'totcol')
|
||||
if array_len != 0:
|
||||
array = block.get_pointer(b'mat')
|
||||
for sub_block in bf_utils.iter_array(array, array_len):
|
||||
yield sub_block
|
||||
|
||||
@staticmethod
|
||||
def _expand_generic_mtex(block):
|
||||
field = block.dna_type.field_from_name[b'mtex']
|
||||
array_len = field.dna_size // block.file.header.pointer_size
|
||||
|
||||
for i in range(array_len):
|
||||
path = ('mtex[%d]' % i).encode('ascii')
|
||||
item = block.get_pointer(path)
|
||||
if item:
|
||||
yield item.get_pointer(b'tex')
|
||||
yield item.get_pointer(b'object')
|
||||
|
||||
@staticmethod
|
||||
def _expand_generic_nodetree(block):
|
||||
assert(block.dna_type.dna_type_id == b'bNodeTree')
|
||||
|
||||
sdna_index_bNode = block.file.sdna_index_from_id[b'bNode']
|
||||
for item in bf_utils.iter_ListBase(block.get_pointer(b'nodes.first')):
|
||||
item_type = item.get(b'type', sdna_index_refine=sdna_index_bNode)
|
||||
|
||||
if item_type != 221: # CMP_NODE_R_LAYERS
|
||||
yield item.get_pointer(b'id', sdna_index_refine=sdna_index_bNode)
|
||||
|
||||
def _expand_generic_nodetree_id(block):
|
||||
block_ntree = block.get_pointer(b'nodetree', None)
|
||||
if block_ntree is not None:
|
||||
yield from ExpandID._expand_generic_nodetree(block_ntree)
|
||||
|
||||
@staticmethod
|
||||
def _expand_generic_animdata(block):
|
||||
block_adt = block.get_pointer(b'adt')
|
||||
if block_adt:
|
||||
yield block_adt.get_pointer(b'action')
|
||||
# TODO, NLA
|
||||
|
||||
@staticmethod
|
||||
def expand_OB(block): # 'Object'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield block.get_pointer(b'data')
|
||||
yield block.get_pointer(b'dup_group')
|
||||
|
||||
yield block.get_pointer(b'proxy')
|
||||
yield block.get_pointer(b'proxy_group')
|
||||
|
||||
# 'ob->pose->chanbase[...].custom'
|
||||
block_pose = block.get_pointer(b'pose')
|
||||
if block_pose is not None:
|
||||
assert(block_pose.dna_type.dna_type_id == b'bPose')
|
||||
sdna_index_bPoseChannel = block_pose.file.sdna_index_from_id[b'bPoseChannel']
|
||||
for item in bf_utils.iter_ListBase(block_pose.get_pointer(b'chanbase.first')):
|
||||
item_custom = item.get_pointer(b'custom', sdna_index_refine=sdna_index_bPoseChannel)
|
||||
if item_custom is not None:
|
||||
yield item_custom
|
||||
|
||||
@staticmethod
|
||||
def expand_ME(block): # 'Mesh'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_material(block)
|
||||
|
||||
@staticmethod
|
||||
def expand_CU(block): # 'Curve'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_material(block)
|
||||
|
||||
sub_block = block.get_pointer(b'vfont')
|
||||
if sub_block is not None:
|
||||
yield sub_block
|
||||
yield block.get_pointer(b'vfontb')
|
||||
yield block.get_pointer(b'vfonti')
|
||||
yield block.get_pointer(b'vfontbi')
|
||||
|
||||
@staticmethod
|
||||
def expand_MB(block): # 'MBall'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_material(block)
|
||||
|
||||
@staticmethod
|
||||
def expand_LA(block): # 'Lamp'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_nodetree_id(block)
|
||||
yield from ExpandID._expand_generic_mtex(block)
|
||||
|
||||
@staticmethod
|
||||
def expand_MA(block): # 'Material'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_nodetree_id(block)
|
||||
yield from ExpandID._expand_generic_mtex(block)
|
||||
|
||||
yield block.get_pointer(b'group')
|
||||
|
||||
@staticmethod
|
||||
def expand_TE(block): # 'Tex'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_nodetree_id(block)
|
||||
yield block.get_pointer(b'ima')
|
||||
|
||||
@staticmethod
|
||||
def expand_WO(block): # 'World'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_nodetree_id(block)
|
||||
yield from ExpandID._expand_generic_mtex(block)
|
||||
|
||||
@staticmethod
|
||||
def expand_NT(block): # 'bNodeTree'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_nodetree(block)
|
||||
|
||||
@staticmethod
|
||||
def expand_SC(block): # 'Scene'
|
||||
yield from ExpandID._expand_generic_animdata(block)
|
||||
yield from ExpandID._expand_generic_nodetree_id(block)
|
||||
yield block.get_pointer(b'world')
|
||||
|
||||
sdna_index_Base = block.file.sdna_index_from_id[b'Base']
|
||||
for item in bf_utils.iter_ListBase(block.get_pointer(b'base.first')):
|
||||
yield item.get_pointer(b'object', sdna_index_refine=sdna_index_Base)
|
||||
|
||||
block_ed = block.get_pointer(b'ed')
|
||||
if block_ed is not None:
|
||||
sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
|
||||
|
||||
def seqbase(someseq):
|
||||
for item in someseq:
|
||||
item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
|
||||
|
||||
if item_type >= C_defs.SEQ_TYPE_EFFECT:
|
||||
pass
|
||||
elif item_type == C_defs.SEQ_TYPE_META:
|
||||
yield from seqbase(bf_utils.iter_ListBase(item.get_pointer(b'seqbase.first', sdna_index_refine=sdna_index_Sequence)))
|
||||
else:
|
||||
if item_type == C_defs.SEQ_TYPE_SCENE:
|
||||
yield item.get_pointer(b'scene')
|
||||
elif item_type == C_defs.SEQ_TYPE_MOVIECLIP:
|
||||
yield item.get_pointer(b'clip')
|
||||
elif item_type == C_defs.SEQ_TYPE_MASK:
|
||||
yield item.get_pointer(b'mask')
|
||||
|
||||
yield from seqbase(bf_utils.iter_ListBase(block_ed.get_pointer(b'seqbase.first')))
|
||||
|
||||
@staticmethod
|
||||
def expand_GR(block): # 'Group'
|
||||
sdna_index_GroupObject = block.file.sdna_index_from_id[b'GroupObject']
|
||||
for item in bf_utils.iter_ListBase(block.get_pointer(b'gobject.first')):
|
||||
yield item.get_pointer(b'ob', sdna_index_refine=sdna_index_GroupObject)
|
||||
|
||||
# expand_GR --> {b'GR': expand_GR, ...}
|
||||
expand_funcs = {
|
||||
k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
|
||||
if isinstance(s_fn, staticmethod)
|
||||
if k.startswith("expand_")
|
||||
}
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Packing Utility
|
||||
|
||||
|
||||
class utils:
|
||||
# fake module
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
raise RuntimeError("%s should not be instantiated" % cls)
|
||||
|
||||
@staticmethod
|
||||
def abspath(path, start, library=None):
|
||||
import os
|
||||
if path.startswith(b'//'):
|
||||
# if library:
|
||||
# start = os.path.dirname(abspath(library.filepath))
|
||||
return os.path.join(start, path[2:])
|
||||
return path
|
||||
|
||||
if __import__("os").sep == '/':
|
||||
@staticmethod
|
||||
def compatpath(path):
|
||||
return path.replace(b'\\', b'/')
|
||||
else:
|
||||
@staticmethod
|
||||
def compatpath(path):
|
||||
# keep '//'
|
||||
return path[:2] + path[2:].replace(b'/', b'\\')
|
||||
|
||||
@staticmethod
|
||||
def splitpath(path):
|
||||
"""
|
||||
Splits the path using either slashes
|
||||
"""
|
||||
split1 = path.rpartition(b'/')
|
||||
split2 = path.rpartition(b'\\')
|
||||
if len(split1[0]) > len(split2[0]):
|
||||
return split1
|
||||
else:
|
||||
return split2
|
||||
|
||||
def find_sequence_paths(filepath, use_fullpath=True):
|
||||
# supports str, byte paths
|
||||
basedir, filename = os.path.split(filepath)
|
||||
if not os.path.exists(basedir):
|
||||
return []
|
||||
|
||||
filename_noext, ext = os.path.splitext(filename)
|
||||
|
||||
from string import digits
|
||||
if isinstance(filepath, bytes):
|
||||
digits = digits.encode()
|
||||
filename_nodigits = filename_noext.rstrip(digits)
|
||||
|
||||
if len(filename_nodigits) == len(filename_noext):
|
||||
# input isn't from a sequence
|
||||
return []
|
||||
|
||||
files = os.listdir(basedir)
|
||||
files[:] = [
|
||||
f for f in files
|
||||
if f.startswith(filename_nodigits) and
|
||||
f.endswith(ext) and
|
||||
f[len(filename_nodigits):-len(ext) if ext else -1].isdigit()
|
||||
]
|
||||
if use_fullpath:
|
||||
files[:] = [
|
||||
os.path.join(basedir, f) for f in files
|
||||
]
|
||||
|
||||
return files
|
1260
bam/cli.py
Executable file
1260
bam/cli.py
Executable file
File diff suppressed because it is too large
Load Diff
93
bam/utils/system.py
Normal file
93
bam/utils/system.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# ***** BEGIN GPL LICENSE BLOCK *****
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
#
|
||||
# ***** END GPL LICENCE BLOCK *****
|
||||
|
||||
_USE_COLOR = True
|
||||
if _USE_COLOR:
|
||||
color_codes = {
|
||||
'black': '\033[0;30m',
|
||||
'bright_gray': '\033[0;37m',
|
||||
'blue': '\033[0;34m',
|
||||
'white': '\033[1;37m',
|
||||
'green': '\033[0;32m',
|
||||
'bright_blue': '\033[1;34m',
|
||||
'cyan': '\033[0;36m',
|
||||
'bright_green': '\033[1;32m',
|
||||
'red': '\033[0;31m',
|
||||
'bright_cyan': '\033[1;36m',
|
||||
'purple': '\033[0;35m',
|
||||
'bright_red': '\033[1;31m',
|
||||
'yellow': '\033[0;33m',
|
||||
'bright_purple':'\033[1;35m',
|
||||
'dark_gray': '\033[1;30m',
|
||||
'bright_yellow':'\033[1;33m',
|
||||
'normal': '\033[0m',
|
||||
}
|
||||
|
||||
def colorize(msg, color=None):
|
||||
return (color_codes[color] + msg + color_codes['normal'])
|
||||
else:
|
||||
def colorize(msg, color=None):
|
||||
return msg
|
||||
|
||||
|
||||
def uuid_from_file(fn, block_size=1 << 20):
|
||||
"""
|
||||
Returns an arbitrary sized unique ASCII string based on the file contents.
|
||||
(exact hashing method may change).
|
||||
"""
|
||||
with open(fn, 'rb') as f:
|
||||
# first get the size
|
||||
import os
|
||||
f.seek(0, os.SEEK_END)
|
||||
size = f.tell()
|
||||
f.seek(0, os.SEEK_SET)
|
||||
del os
|
||||
# done!
|
||||
|
||||
import hashlib
|
||||
sha1 = hashlib.new('sha512')
|
||||
while True:
|
||||
data = f.read(block_size)
|
||||
if not data:
|
||||
break
|
||||
sha1.update(data)
|
||||
# skip the '0x'
|
||||
return hex(size)[2:] + sha1.hexdigest()
|
||||
|
||||
|
||||
def write_json_to_zip(zip_handle, path, data=None):
|
||||
import json
|
||||
zip_handle.writestr(
|
||||
path,
|
||||
json.dumps(
|
||||
data,
|
||||
check_circular=False,
|
||||
# optional (pretty)
|
||||
sort_keys=True, indent=4, separators=(',', ': '),
|
||||
).encode('utf-8'))
|
||||
|
||||
|
||||
def write_json_to_file(path, data):
|
||||
import json
|
||||
with open(path, 'w') as file_handle:
|
||||
json.dump(
|
||||
data, file_handle, ensure_ascii=False,
|
||||
check_circular=False,
|
||||
# optional (pretty)
|
||||
sort_keys=True, indent=4, separators=(',', ': '),
|
||||
)
|
Reference in New Issue
Block a user