Initial revision

This commit is contained in:
Hans Lambermont
2002-10-12 11:37:38 +00:00
commit 12315f4d0e
1699 changed files with 444708 additions and 0 deletions

View File

@@ -0,0 +1,6 @@
#mcf 'vendor' packages
#These packages are free software, provided without warranty or
#guarantee, if you use them, you must agree to use them at your
#own risk. Please see the file license.txt for full license
#details.

View File

@@ -0,0 +1,6 @@
'''
mcf.utils package
'''

View File

@@ -0,0 +1,169 @@
'''
Destructive Functions for "collapsing" Sequences into single levels
>>> from mcf.utils import collapse
>>> collapse.test([[[1],[2,3]],[[]],[4],5,[6]])
[1, 2, 3, 4, 5, 6] # note that is the same root list
>>> collapse.collapse2([[[1],[2,3]],[[]],(4,()),(5,),[6]])
[1, 2, 3, 4, 5, 6] # note is the same root list
'''
import copy, types, sys
from types import ListType, TupleType # this now only supports the obsolete stuff...
def hyperCollapse( inlist, allowedmap, type=type, list=list, itype=types.InstanceType, maxint= sys.maxint):
'''
Destructively flatten a mixed hierarchy to a single level.
Non-recursive, many speedups and obfuscations by Tim Peters :)
'''
try:
# for every possible index
for ind in xrange( maxint):
# while that index currently holds a list
expandable = 1
while expandable:
expandable = 0
if allowedmap.has_key( type(inlist[ind]) ):
# expand that list into the index (and subsequent indicies)
inlist[ind:ind+1] = list( inlist[ind])
expandable = 1
# alternately you could iterate through checking for isinstance on all possible
# classes, but that would be very slow
elif type( inlist[ind] ) is itype and allowedmap.has_key( inlist[ind].__class__ ):
# here figure out some way to generically expand that doesn't risk
# infinite loops...
templist = []
for x in inlist[ind]:
templist.append( x)
inlist[ind:ind+1] = templist
expandable = 1
except IndexError:
pass
return inlist
def collapse(inlist, type=type, ltype=types.ListType, maxint= sys.maxint):
'''
Destructively flatten a list hierarchy to a single level.
Non-recursive, and (as far as I can see, doesn't have any
glaring loopholes).
Further speedups and obfuscations by Tim Peters :)
'''
try:
# for every possible index
for ind in xrange( maxint):
# while that index currently holds a list
while type(inlist[ind]) is ltype:
# expand that list into the index (and subsequent indicies)
inlist[ind:ind+1] = inlist[ind]
#ind = ind+1
except IndexError:
pass
return inlist
def collapse_safe(inlist):
'''
As collapse, but works on a copy of the inlist
'''
return collapse( inlist[:] )
def collapse2(inlist, ltype=(types.ListType, types.TupleType), type=type, maxint= sys.maxint ):
'''
Destructively flatten a list hierarchy to a single level.
Will expand tuple children as well, but will fail if the
top level element is not a list.
Non-recursive, and (as far as I can see, doesn't have any
glaring loopholes).
'''
ind = 0
try:
while 1:
while type(inlist[ind]) in ltype:
try:
inlist[ind:ind+1] = inlist[ind]
except TypeError:
inlist[ind:ind+1] = list(inlist[ind])
ind = ind+1
except IndexError:
pass
return inlist
def collapse2_safe(inlist):
'''
As collapse2, but works on a copy of the inlist
'''
return collapse( list(inlist) )
def old_buggy_collapse(inlist):
'''Always return a one-level list of all the non-list elements in listin,
rewritten to be non-recursive 96-12-28 Note that the new versions work
on the original list, not a copy of the original.'''
if type(inlist)==TupleType:
inlist = list(inlist)
elif type(inlist)!=ListType:
return [inlist]
x = 0
while 1:
try:
y = inlist[x]
if type(y) == ListType:
ylen = len(y)
if ylen == 1:
inlist[x] = y[0]
if type(inlist[x]) == ListType:
x = x - 1 # need to collapse that list...
elif ylen == 0:
del(inlist[x])
x = x-1 # list has been shortened
else:
inlist[x:x+1]=y
x = x+1
except IndexError:
break
return inlist
def old_buggy_collapse2(inlist):
'''As collapse, but also collapse tuples, rewritten 96-12-28 to be non-recursive'''
if type(inlist)==TupleType:
inlist = list(inlist)
elif type(inlist)!=ListType:
return [inlist]
x = 0
while 1:
try:
y = inlist[x]
if type(y) in [ListType, TupleType]:
ylen = len(y)
if ylen == 1:
inlist[x] = y[0]
if type(inlist[x]) in [ListType,TupleType]:
x = x-1 #(to deal with that element)
elif ylen == 0:
del(inlist[x])
x = x-1 # list has been shortened, will raise exception with tuples...
else:
inlist[x:x+1]=list(y)
x = x+1
except IndexError:
break
return inlist
def oldest_buggy_collapse(listin):
'Always return a one-level list of all the non-list elements in listin'
if type(listin) == ListType:
return reduce(lambda x,y: x+y, map(collapse, listin), [])
else: return [listin]
def oldest_buggy_collapse2(seqin):
if type(seqin) in [ListType, TupleType]:
return reduce(lambda x,y: x+y, map(collapse2, seqin), [])
else:
return [seqin]

View File

@@ -0,0 +1,83 @@
'''
Module to allow for "copying" Numeric arrays,
(and thereby also matrices and userarrays)
standard arrays, classes and modules
(last two are not actually copied, but hey :) ).
Could do approximately the same thing with
copy_reg, but would be inefficient because
of passing the data into and out of strings.
To use, just import this module.
'''
# altered 98.11.05, moved copy out of NUMPY test
import copy
try: # in case numpy not installed
import Numeric
def _numpyarray_copy(somearray, memo=None):
'''
Simple function for getting a copy of a NUMPY array
'''
if memo == None:
memo = {} # yeah, I know, not _really_ necessary
# see if already done this item, return the copy if we have...
d = id(somearray)
try:
return memo[d]
except KeyError:
pass
temp = Numeric.array(somearray, copy=1)
memo[d] = temp
return temp
# now make it available to the copying functions
copy._copy_dispatch[Numeric.ArrayType] = _numpyarray_copy
copy._deepcopy_dispatch[Numeric.ArrayType] = _numpyarray_copy
except ImportError: # Numeric not installed...
pass
try: # in case array not installed
import array
def _array_copy(somearray, memo = None):
'''
Simple function for getting a copy of a standard array.
'''
if memo == None:
memo = {} # yeah, I know, not _really_ necessary
# see if already done this item, return the copy if we have...
d = id(somearray)
try:
return memo[d]
except KeyError:
pass
newarray = somearay[:]
memo[d] = newarray
return newarray
# now make it available to the copying functions
copy._copy_dispatch[ array.ArrayType ] = _array_copy
copy._deepcopy_dispatch[ array.ArrayType ] = _array_copy
except ImportError:
pass
import types
def _module_copy(somemodule, memo = None):
'''
Modules we will always treat as themselves during copying???
'''
return somemodule
# now make it available to the copying functions
copy._copy_dispatch[ types.ModuleType ] = _module_copy
copy._deepcopy_dispatch[ types.ModuleType ] = _module_copy
def _class_copy(someclass, memo=None):
'''
Again, classes are considered immutable, they are
just returned as themselves, not as new objects.
'''
return someclass
# now make it available to the copying functions
#copy._copy_dispatch[ types.ClassType ] = _class_copy
copy._deepcopy_dispatch[ types.ClassType ] = _class_copy

View File

@@ -0,0 +1,190 @@
'''
Extend cpickle storage to include modules, and builtin functions/methods
To use, just import this module.
'''
import copy_reg
### OBJECTS WHICH ARE RESTORED THROUGH IMPORTS
# MODULES
def pickle_module(module):
'''
Store a module to a pickling stream, must be available for
reimport during unpickling
'''
return unpickle_imported_code, ('import %s'%module.__name__, module.__name__)
# FUNCTIONS, METHODS (BUILTIN)
def pickle_imported_code(funcmeth):
'''
Store a reference to an imported element (such as a function/builtin function,
Must be available for reimport during unpickling.
'''
module = _whichmodule(funcmeth)
return unpickle_imported_code, ('from %s import %s'%(module.__name__,funcmeth.__name__),funcmeth.__name__)
import types, regex
import_filter = regex.compile('''\(from [A-Za-z0-9_\.]+ \)?import [A-Za-z0-9_\.]+''') # note the limitations on whitespace
getattr_filter = regex.compile('''[A-Za-z0-9_\.]+''') # note we allow you to use x.y.z here
# MODULES, AND FUNCTIONS
def unpickle_imported_code(impstr,impname):
'''
Attempt to load a reference to a module or other imported code (such as functions/builtin functions)
'''
if import_filter.match(impstr) != len(impstr) or getattr_filter.match(impname)!= len(impname):
import sys
sys.stderr.write('''Possible attempt to smuggle arbitrary code into pickle file (see module cpickle_extend).\nPassed code was %s\n%s\n'''%(impstr,impname))
del(sys)
else:
ns = {}
try:
exec (impstr) in ns # could raise all sorts of errors, of course, and is still dangerous when you have no control over the modules on your system! Do not allow for untrusted code!!!
return eval(impname, ns)
except:
import sys
sys.stderr.write('''Error unpickling module %s\n None returned, will likely raise errors.'''%impstr)
return None
# Modules
copy_reg.pickle(type(regex),pickle_module,unpickle_imported_code)
# builtin functions/methods
copy_reg.pickle(type(regex.compile),pickle_imported_code, unpickle_imported_code)
del(regex) # to keep the namespace neat as possible
### INSTANCE METHODS
'''
The problem with instance methods is that they are almost always
stored inside a class somewhere. We really need a new type: reference
that lets us just say "y.this"
We also need something that can reliably find burried functions :( not
likely to be easy or clean...
then filter for x is part of the set
'''
import new
def pickle_instance_method(imeth):
'''
Use the (rather surprisingly clean) internals of
the method to store a reference to a method. Might
be better to use a more general "get the attribute
'x' of this object" system, but I haven't written that yet :)
'''
klass = imeth.im_class
funcimp = _imp_meth(imeth)
self = imeth.im_self # will be None for UnboundMethodType
return unpickle_instance_method, (funcimp,self,klass)
def unpickle_instance_method(funcimp,self,klass):
'''
Attempt to restore a reference to an instance method,
the instance has already been recreated by the system
as self, so we just call new.instancemethod
'''
funcimp = apply(unpickle_imported_code, funcimp)
return new.instancemethod(func,self,klass)
copy_reg.pickle(types.MethodType, pickle_instance_method, unpickle_instance_method)
copy_reg.pickle(types.UnboundMethodType, pickle_instance_method, unpickle_instance_method)
### Arrays
try:
import array
LittleEndian = array.array('i',[1]).tostring()[0] == '\001'
def pickle_array(somearray):
'''
Store a standard array object, inefficient because of copying to string
'''
return unpickle_array, (somearray.typecode, somearray.tostring(), LittleEndian)
def unpickle_array(typecode, stringrep, origendian):
'''
Restore a standard array object
'''
newarray = array.array(typecode)
newarray.fromstring(stringrep)
# floats are always big-endian, single byte elements don't need swapping
if origendian != LittleEndian and typecode in ('I','i','h','H'):
newarray.byteswap()
return newarray
copy_reg.pickle(array.ArrayType, pickle_array, unpickle_array)
except ImportError: # no arrays
pass
### NUMPY Arrays
try:
import Numeric
LittleEndian = Numeric.array([1],'i').tostring()[0] == '\001'
def pickle_numpyarray(somearray):
'''
Store a numpy array, inefficent, but should work with cPickle
'''
return unpickle_numpyarray, (somearray.typecode(), somearray.shape, somearray.tostring(), LittleEndian)
def unpickle_numpyarray(typecode, shape, stringval, origendian):
'''
Restore a numpy array
'''
newarray = Numeric.fromstring(stringval, typecode)
Numeric.reshape(newarray, shape)
if origendian != LittleEndian and typecode in ('I','i','h','H'):
# this doesn't seem to work correctly, what's byteswapped doing???
return newarray.byteswapped()
else:
return newarray
copy_reg.pickle(Numeric.ArrayType, pickle_numpyarray, unpickle_numpyarray)
except ImportError:
pass
### UTILITY FUNCTIONS
classmap = {}
def _whichmodule(cls):
"""Figure out the module in which an imported_code object occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the class cannot be found, return __main__.
Copied here from the standard pickle distribution
to prevent another import
"""
if classmap.has_key(cls):
return classmap[cls]
clsname = cls.__name__
for name, module in sys.modules.items():
if name != '__main__' and \
hasattr(module, clsname) and \
getattr(module, clsname) is cls:
break
else:
name = '__main__'
classmap[cls] = name
return name
import os, string, sys
def _imp_meth(im):
'''
One-level deep recursion on finding methods, i.e. we can
find them only if the class is at the top level.
'''
fname = im.im_func.func_code.co_filename
tail = os.path.splitext(os.path.split(fname)[1])[0]
ourkeys = sys.modules.keys()
possibles = filter(lambda x,tail=tail: x[-1] == tail, map(string.split, ourkeys, ['.']*len(ourkeys)))
# now, iterate through possibles to find the correct class/function
possibles = map(string.join, possibles, ['.']*len(possibles))
imp_string = _search_modules(possibles, im.im_func)
return imp_string
def _search_modules(possibles, im_func):
for our_mod_name in possibles:
our_mod = sys.modules[our_mod_name]
if hasattr(our_mod, im_func.__name__) and getattr(our_mod, im_func.__name__).im_func is im_func:
return 'from %s import %s'%(our_mod.__name__, im_func.__name__), im_func.__name__
for key,val in our_mod.__dict__.items():
if hasattr(val, im_func.__name__) and getattr(val, im_func.__name__).im_func is im_func:
return 'from %s import %s'%(our_mod.__name__,key), '%s.%s'%(key,im_func.__name__)
raise '''No import string calculable for %s'''%im_func

View File

@@ -0,0 +1,80 @@
'''
DictBool:
Simplistic (and slow) implementation of Boolean operations for
dictionaries... really these should be implemented in C, but I
can't do that till I have MSVC++, which I don't really want to
buy... this will have to do in the meantime.
>>> from mcf.utils import dictbool
>>> a = {1:2}; b = {2:3}; c={4:5,6:7,8:9,1:5}
>>> dictbool.union(a,b,c) # overwrite a with b and the result with c
{1: 5, 2: 3, 4: 5, 8: 9, 6: 7}
>>> dictbool.collectunion(a,b,c) # collect all possible for each key
{1: [2, 5], 2: [3], 4: [5], 8: [9], 6: [7]}
>>> dictbool.intersect(a,b,c) # no common elements in all three
{}
>>> dictbool.intersect(a,c) # one element is common to both
{1: [2, 5]}
'''
def union(*args):
'''
Build a new dictionary with the key,val from all args,
first overwritten by second, overwritten by third etc.
Rewritten for Python 1.5 on 98.03.31
'''
temp = {}
for adict in args:
# following is the 1.5 version
temp.update(adict)
# for key,val in adict.items():
# temp[key] = val
return temp
def collectunion(*args):
'''
As union, save instead of overwriting, all vals are
returned in lists, and duplicates are appended to those
lists.
'''
temp = {}
for adict in args:
for key,val in adict.items():
try:
temp[key].append(val)
except KeyError:
temp[key] = [val]
return temp
def intersect(*args):
'''
Build a new dictionary with those keys common to all args,
the vals of the new dict are lists of length len(args), where
list[ind] is the value of args[ind] for that key.
'''
args = map(lambda x: (len(x),x), args)
args.sort()
temp = {}
master = args[0][1]
rest = map(lambda x: x[1], args[1:])
for var,val in master.items():
tempval = [val]
for slave in rest:
try:
tempval.append(slave[var])
except KeyError:
tempval = None
break
if tempval:
temp[var] = tempval
return temp

View File

@@ -0,0 +1,91 @@
nullval = (1,)
class DSort:
'''
A "dependency" sorting class, used to order elements
according to declared "dependencies" (many-to-one relationships)
Is not a beautiful algo, but it works (or seems to)
Requires hashable values for all elements.
This is a quick hack, use at your own risk!
Basic usage:
Create a DSort mysorter
for each element q which is part of the set to sort, call:
mysorter.rule( dsort.nullval, q)
# this is not strictly necessary for elements which are
# dependent on other objects, but it is necessary for
# those which are not. Generally it's easiest to call
# the null rule for each element.
for each rule x depends on y, call:
mysorter.rule( x, y)
when _all_ rules are entered, call
try:
sortedlist = mysorter.sort()
except ValueError:
handle recursive dependencies here...
For an example of real-life use, see the VRML lineariser.
'''
def __init__(self, recurseError=None ):
self.dependon = {nullval:[0]}
self.recurseError = recurseError
def rule( self, depon, deps):
'''
Register a "rule". Both elements must be hashable values.
See the class' documentation for usage.
'''
# print '''registering rule:''', depon, deps
if self.dependon.has_key( deps ) and depon is not nullval:
self.dependon[ deps ].append( depon )
elif depon is not nullval:
self.dependon[ deps ] = [-1, depon]
elif not self.dependon.has_key( deps ):
self.dependon[ deps ] = [-1 ]
def sort( self ):
'''
Get the sorted results as a list
'''
for key, value in self.dependon.items():
self._dsort( key, value)
temp = []
for key, value in self.dependon.items():
temp.append( (value[0], key) )
temp.sort()
temp.reverse()
temp2 = []
for x,y in temp:
temp2.append( y )
# following adds the elements with no dependencies
temp2[len(temp2):] = self.dependon[ nullval ][1:]
return temp2
def _dsort( self, key, value ):
if value[0] == -2:
if self.recurseError:
raise ValueError, '''Dependencies were recursive!'''
else:
if __debug__:
print '''Recursive dependency discovered and ignored in dsort.Dsort._dsort on %s:%s'''%(key, value)
return 1 # we know it has at least one reference...
elif value[0] == -1: # haven't yet calculated this rdepth
value[0] = -2
tempval = [0]
for x in value[1:]:
try:
tempval.append( 1 + self._dsort( x, self.dependon[x]) )
except KeyError:
self.dependon[ nullval ].append( x ) # is an unreferenced element
tempval.append( 1 )
value[0] = max( tempval )
return value[0]
else:
return value[0]
'''
from mcf.utils import dsort
>>> x = dsort.DSort()
>>> map( x.rule, [1,2,2,4,5,4], [2,3,4,5,6,3] )
[None, None, None, None, None, None]
>>> x.sort()
'''

View File

@@ -0,0 +1,91 @@
'''
Dummy Class, intended as an abstract class for the creation
of base/builtin classes with slightly altered functionality
uses _base as the name of an instance of the base datatype,
mapping all special functions to that name.
>>> from mcf.utils import dummy
>>> j = dummy.Dummy({})
>>> j['this'] = 23
>>> j
{'this': 23}
>>> class example(dummy.Dummy):
... def __repr__(self):
... return '<example: %s>'%`self._base`
>>> k = example([])
>>> k # uses the __repr__ function
<example: []>
>>> k.append # finds the attribute of the _base
<built-in method append of list object at 501830>
'''
import types, copy
class Dummy:
'Abstract class for slightly altering functionality of objects (including builtins)'
def __init__(self, val=None):
'Initialisation, should be overridden'
if val and type(val)== types.InstanceType and hasattr(val, '_base'):
# Dict is used because subclasses often want to override
# the setattr function
self.__dict__['_base']=copy.copy(val.__dict__['_base'])
else:
self.__dict__['_base'] = val
def __repr__(self):
'Return a string representation'
return repr(self._base)
def __str__(self):
'Convert to a string'
return str(self._base)
def __cmp__(self,other):
'Compare to other value'
# altered 98.03.17 from if...elif...else statement
return cmp(self._base, other)
def __getitem__(self, key):
'Get an item by index'
return self._base[key]
def __setitem__(self, key, val):
'Set an item by index'
self._base[key]=val
def __len__(self):
'return the length of the self'
return len(self._base)
def __delitem__(self, key):
'remove an item by index'
del(self._base[key])
def __getslice__(self, i, j):
'retrieve a slice by indexes'
return self._base[i:j]
def __setslice__(self, i, j, val):
'set a slice by indexes to values'
self._base[i:j]=val
def __delslice__(self, i, j):
'remove a slice by indexes'
del(self._base[i:j])
def __nonzero__(self):
if self._base:
return 1
else:
return 0
def __getattr__(self, attr):
'find an attribute when normal lookup fails, will raise a KeyError if missing _base attribute'
try:
return getattr( self.__dict__['_base'], attr)
except (AttributeError, KeyError):
try:
return self.__dict__['_base'][attr]
except (KeyError,TypeError):
pass
raise AttributeError, attr

View File

@@ -0,0 +1,37 @@
'''
err.py Encapsulated writing to sys.stderr
The idea of this module is that, for a GUI system (or a more advanced UI),
you can just import a different err module (or object) and keep
your code the same. (For instance, you often want a status window
which flashes warnings and info, and have error messages pop up an
alert to get immediate attention.
'''
import sys
def err(message, Code=0):
'''
report an error, with an optional error code
'''
if Code:
sys.stderr.write('Error #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Error: %s\n'%message)
def warn(message, Code=0):
'''
report a warning, with an optional error code
'''
if Code:
sys.stderr.write('Warning #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Warning: %s\n'%message)
def info(message, Code=0):
'''
report information/status, with an optional error code
'''
if Code:
sys.stderr.write('Info #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Info: %s\n'%message)

View File

@@ -0,0 +1,19 @@
'''
Make either cPickle or pickle available as the virtual
module mcf.utils.pickle. This allows you to use a single
import statement:
from mcf.utils import extpkl, pickle
and then use that pickle, knowing that you have the best
available pickling engine.
'''
defaultset = ('import cPickle', 'cPickle')
import sys, mcf.utils
from mcf.utils import cpickle_extend
try:
import cPickle
pickle = cPickle
except:
import pickle
sys.modules['mcf.utils.pickle'] = mcf.utils.pickle = pickle

View File

@@ -0,0 +1,65 @@
### WARNING:
# I don't have a clue what I'm doing here!
import win32api
### Following is the "normal" approach,
### but it requires loading the entire win32con file (which is big)
### for two values...
##import win32con
##HKEY_CLASSES_ROOT = win32con.HKEY_CLASSES_ROOT
##REG_SZ = win32con.REG_SZ
### These are the hard-coded values, should work everywhere as far as I know...
HKEY_CLASSES_ROOT = 0x80000000
REG_SZ= 1
def associate( extension, filetype, description="", commands=(), iconfile="" ):
'''Warning: I don't have a clue what I'm doing here!
extension -- extension including "." character, e.g. .proc
filetype -- formal name, no spaces allowed, e.g. SkeletonBuilder.RulesFile
description -- human-readable description of the file type
commands -- sequence of (command, commandline), e.g. (("Open", "someexe.exe %1"),)
iconfile -- optional default icon file for the filetype
'''
win32api.RegSetValue(
HKEY_CLASSES_ROOT,
extension,
REG_SZ,
filetype
)
if description:
win32api.RegSetValue(
HKEY_CLASSES_ROOT ,
filetype,
REG_SZ,
description
)
if iconfile:
win32api.RegSetValue(
HKEY_CLASSES_ROOT ,
"%(filetype)s\\DefaultIcon" % locals(),
REG_SZ,
iconfile
)
for (command, commandline) in commands:
win32api.RegSetValue(
HKEY_CLASSES_ROOT ,
"%(filetype)s\\Shell\\%(command)s" % locals(),
REG_SZ,
command,
)
win32api.RegSetValue(
HKEY_CLASSES_ROOT ,
"%(filetype)s\\Shell\\%(command)s\\Command" % locals(),
REG_SZ,
commandline
)
if __name__ == "__main__":
associate(
".proc",
"SkeletonBuilder.Processing",
"SkeletonBuilder Processing File",
(("Open", '''z:\\skeletonbuilder\\skeletonbuilder.exe "%1" %*'''),),
'''z:\\skeletonbuilder\\bitmaps\\skeletonbuildericon.ico''',
)

View File

@@ -0,0 +1,30 @@
'''
This utility allows a python system to find a file in it's
directory. To do this, you need to pass it a function object from
a module in the correct directory. I know there must be a better
way to do this, but I haven't seen it yet. Incidentally, the
current directory should be _different_ from the module in which
the function is contained, otherwise this function will go off into
the root directory.
Currently this has to be called with the current directory a directory
other than the directory we're trying to find... need a better solution
for this kind of thing... a python registry would be great :)
NOTE: as of Python 1.5, this module should be obsolete! As soon as I
have verified that all of my code is fixed, it will be moved to the unused
directories.
'''
import os,sys
def findourfile(function, filename):
'''
Given the function, return a path to the a file in the
same directory with 'filename'. We also let the caller
know if the file already exists.
'''
ourfilename = os.path.split(function.func_code.co_filename)[0]+os.sep+filename
exists = os.path.exists(ourfilename)
return (exists,ourfilename)

View File

@@ -0,0 +1,201 @@
'''
Simple Hierarchic Walking functions for use with hierobj-type objects.
Provide for recurse-safe processing. Currently only provide depth-first
processing, and don't provide means for ignoring branches of the tree
during processing. For an example of breadth-first processing, see
mcf.pars.int.index.indutils. For more complex hierarchic processing,
see the mcf.walker package.
Originally these functions were only methods of the hierobj class (they
still are methods of it). I've split them out to allow them to be
imported selectively by other classes (some classes will only want
the simple walking functions, and not want to be bothered with the
methods which hierobj uses to keep track of its particular internal
structures.
'''
def hier_rapply(self, function,arglist=None,argdict={},moreattr = '__childlist__'):
'''
Safely apply a function to self and all children for
the function's side effects. Discard the return values
that function returns.
function
function to apply
arglist
(self,)+arglist is the set of arguments passed to function
argdict
passed as namedargs to the function
moreattr
the attribute representing the children of a node
'''
alreadydone = {}
tobedone = [self]
if arglist or argdict:
if not arglist: arglist=[self]
else:
arglist.insert(0,self) # we could insert anything... self is convenient
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
arglist[0]=object
apply(function,tuple(arglist),argdict)
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
else: # no arglist or argdict
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
function(object)
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
def hier_rreturn(self, function,arglist=None,argdict={},moreattr = '__childlist__'):
'''
Safely apply a function to self and all children,
collect the results in a list and return.
function
function to apply
arglist
(self,)+arglist is the set of arguments passed to function
argdict
passed as namedargs to the function
moreattr
the attribute representing the children of a node
'''
alreadydone = {}
tobedone = [self]
results = []
if arglist or argdict:
if not arglist: arglist=[self]
else:
arglist.insert(0,self) # or anything you feel like
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
arglist[0]=object
results.append(apply(function,tuple(arglist),argdict))
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
else:
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
results.append(function(object))
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
return results
def hier_rgetattr(self, attrname, multiple=1, moreattr = '__childlist__'):
'''
Recursively collect the values for attrname and
return as a list.
attrname
attribute to collect
arglist
(self,)+arglist is the set of arguments passed to function
argdict
passed as namedargs to the function
moreattr
the attribute representing the children of a node
'''
alreadydone = {}
tobedone = [self]
results = []
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
try:
if multiple:
results.append(getattr(object, attrname))
else:
return getattr(object, attrname)
except AttributeError:
pass
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
return results
def hier_rmethod(self, methodname,arglist=(),argdict={},moreattr = '__childlist__'):
'''
return the result of calling every object's method methodname,
as for hier_rreturn otherwise.
methodname
method to call
arglist
(self,)+arglist is the set of arguments passed to function
argdict
passed as namedargs to the function
moreattr
the attribute representing the children of a node
'''
alreadydone = {}
tobedone = [self]
results = []
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
try:
results.append(apply(getattr(object,methodname),arglist,argdict))
except:
pass
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
return results

View File

@@ -0,0 +1,16 @@
'''
Hierarchic 'Dummy' objects
'''
import hierobj, dummy
class HierobjDummy(hierobj.Hierobj,dummy.Dummy):
'''
An Hierarchic Dummy object, which provides direct access to its
children through object[x] interfaces, allows "index" "count"
etceteras by returning the corresponding attributes of the _base.
'''
def __init__(self, parent=None, childlist=None):
hierobj.Hierobj.__init__(self, parent, childlist)
self._base = self.__childlist__ #set by init function above

View File

@@ -0,0 +1,133 @@
'''
Generic Hierarchic Objects Module
Hierobj's store their children (which can be anything) in their
__childlist__ attribute, and provide methods for walking the
hierarchy, either collecting results or not.
The index function returns an index of the objects (effectively a
flattened copy of the hierarchy)
97-03-17 Added ability to pass arguments to hier_rapply and hier_rreturn.
97-10-31 Removed dependencies on mcf.store
'''
import copy,types
import singletonlist, hier_rx
class Hierobj:
'''
An abstract class which handles hierarchic functions and information
# remade as a DAG 97-04-02, also reduced memory overhead for
hier-r* functions by using while-del-IndexError construct versus
for loop (probably makes it slower though)
If you require a true hierarchy, use the TrueHierobj class below...
'''
def __init__(self, parent=None, childlist=None):
if parent is None: # passed no parents
self.__dict__['__parent__'] = []
elif type(parent) == types.ListType: # passed a list of parents
self.__dict__['__parent__'] = parent
else: # passed a single parent
self.__dict__['__parent__'] = [parent]
self.__dict__['__childlist__'] = childlist or []
for child in self.__childlist__:
try:
child.__parent__.append(self)
except:
pass
# import simple hierarchic processing methods
hier_rapply = hier_rx.hier_rapply
hier_rreturn = hier_rx.hier_rreturn
hier_rgetattr = hier_rx.hier_rgetattr
hier_rmethod = hier_rx.hier_rmethod
def hier_addchild(self, child):
'''
Add a single child to the childlist
'''
self.__childlist__.append(child)
try:
# Hierobj-aware child
child.__parent__.append(self) # raises error if not hier_obj aware
except (TypeError, AttributeError):
# Non Hierobj-aware child
pass
append = hier_addchild
def hier_remchild(self, child):
'''
Breaks the child relationship with child, including the
reciprocal parent relationship
'''
try:
self.__childlist__.remove(child)
try:
child.hier_remparent(self) # if this fails, no problem
except AttributeError: pass
except (AttributeError,ValueError):
return 0 # didn't manage to remove the child
return 1 # succeeded
def hier_remparent(self, parent):
'''
Normally only called by hier_remchild of the parent,
just removes the parent from the child's parent list,
but leaves child in parent's childlist
'''
try:
self.__parent__.remove(parent)
except (AttributeError,ValueError):
return 0
return 1
def hier_replacewith(self,newel):
'''
As far as the hierarchy is concerned, the new element
is exactly the same as the old element, it has all
the same children, all the same parents. The old
element becomes completely disconnected from the hierarchy,
but it still retains all of its references
For every parent, replace this as a child
For every child, replace this as the parent
'''
for parent in self.__parent__:
try:
parent.hier_replacechild(self, newel)
except AttributeError:
pass
for child in self.__childlist__:
try:
child.hier_replaceparent(self,parent)
except AttributeError:
pass
def hier_replaceparent(self, oldparent, newparent):
ind = self.__parent__.index(oldparent)
self.__parent__[ind] = newparent
def hier_replacechild(self, oldchild, newchild):
ind = self.__childlist__.index(oldchild)
self.__childlist__[ind] = newchild
class TrueHierobj(Hierobj):
'''
An inefficient implementation of an Hierobj which limits the
__parent__ attribute to a single element. This will likely be
_slower_ than an equivalent Hierobj. That will have to be fixed
eventually.
'''
def __init__(self, parent=None, childlist=[]):
if parent is None: # passed no parents
self.__dict__['__parent__'] = singletonlist.SingletonList()
else: # passed a single parent
self.__dict__['__parent__'] = singletonlist.SingletonList(parent)
self.__dict__['__childlist__'] = copy.copy(childlist)
for child in self.__childlist__:
try:
child.__parent__.append(self)
except:
pass
def index(grove):
'''
Returns a flattened version of the grove
'''
return grove.hier_rreturn(lambda x: x)

View File

@@ -0,0 +1,38 @@
class inplace:
def __add__( self, num ):
self.base = self.base + num
return self.base
def __sub__( self, num ):
self.base = self.base - num
return self.base
def __init__(self, base ):
self.base = base
def __repr__(self ):
return repr( self.base)
def __str__(self ):
return str( self.base)
__radd__ = __add__
def __mul__(self, num ):
return self.base * num
def __div__(self, num ):
return self.base / num
def __mod__(self, num ):
return self.base % num
def __neg__(self ):
return - abs( self.base)
def __pos__(self ):
return abs( self.base)
def __abs__(self ):
return abs( self.base )
def __inv__(self ):
return -self.base
def __lshift__(self, num ):
return self.base << num
def __rshift__(self, num ):
return self.base >> num
def __and__(self, num ):
return self.base and num
def __or__(self, num ):
return self.base or num
def value( self ):
return self.base

View File

@@ -0,0 +1,224 @@
'''
NameSpace v0.04:
A "NameSpace" is an object wrapper around a _base dictionary
which allows chaining searches for an 'attribute' within that
dictionary, or any other namespace which is defined as part
of the search path (depending on the downcascade variable, is
either the hier-parents or the hier-children).
You can assign attributes to the namespace normally, and read
them normally. (setattr, getattr, a.this = that, a.this)
I use namespaces for writing parsing systems, where I want to
differentiate between sources (have multiple sources that I can
swap into or out of the namespace), but want to be able to get
at them through a single interface. There is a test function
which gives you an idea how to use the system.
In general, call NameSpace(someobj), where someobj is a dictionary,
a module, or another NameSpace, and it will return a NameSpace which
wraps up the keys of someobj. To add a namespace to the NameSpace,
just call the append (or hier_addchild) method of the parent namespace
with the child as argument.
### NOTE: if you pass a module (or anything else with a dict attribute),
names which start with '__' will be removed. You can avoid this by
pre-copying the dict of the object and passing it as the arg to the
__init__ method.
### NOTE: to properly pickle and/or copy module-based namespaces you
will likely want to do: from mcf.utils import extpkl, copy_extend
### Changes:
97.05.04 -- Altered to use standard hierobj interface, cleaned up
interface by removing the "addparent" function, which is reachable
by simply appending to the __parent__ attribute, though normally
you would want to use the hier_addchild or append functions, since
they let both objects know about the addition (and therefor the
relationship will be restored if the objects are stored and unstored)
97.06.26 -- Altered the getattr function to reduce the number of
situations in which infinite lookup loops could be created
(unfortunately, the cost is rather high). Made the downcascade
variable harden (resolve) at init, instead of checking for every
lookup. (see next note)
97.08.29 -- Discovered some _very_ weird behaviour when storing
namespaces in mcf.store dbases. Resolved it by storing the
__namespace_cascade__ attribute as a normal attribute instead of
using the __unstore__ mechanism... There was really no need to
use the __unstore__, but figuring out how a functions saying
self.__dict__['__namespace_cascade__'] = something
print `self.__dict__['__namespace_cascade__']` can print nothing
is a bit beyond me. (without causing an exception, mind you)
97.11.15 Found yet more errors, decided to make two different
classes of namespace. Those based on modules now act similar
to dummy objects, that is, they let you modify the original
instead of keeping a copy of the original and modifying that.
98.03.15 -- Eliminated custom pickling methods as they are no longer
needed for use with Python 1.5final
98.03.15 -- Fixed bug in items, values, etceteras with module-type
base objects.
'''
import copy, types, string
import hierobj
class NameSpace(hierobj.Hierobj):
'''
An hierarchic NameSpace, allows specification of upward or downward
chaining search for resolving names
'''
def __init__(self, val = None, parents=None, downcascade=1,children=[]):
'''
A NameSpace can be initialised with a dictionary, a dummied
dictionary, another namespace, or something which has a __dict__
attribute.
Note that downcascade is hardened (resolved) at init, not at
lookup time.
'''
hierobj.Hierobj.__init__(self, parents, children)
self.__dict__['__downcascade__'] = downcascade # boolean
if val is None:
self.__dict__['_base'] = {}
else:
if type( val ) == types.StringType:
# this is a reference to a module which has been pickled
val = __import__( val, {},{}, string.split( val, '.') )
try:
# See if val's a dummy-style object which has a _base
self.__dict__['_base']=copy.copy(val._base)
except (AttributeError,KeyError):
# not a dummy-style object... see if it has a dict attribute...
try:
if type(val) != types.ModuleType:
val = copy.copy(val.__dict__)
except (AttributeError, KeyError):
pass
# whatever val is now, it's going to become our _base...
self.__dict__['_base']=val
# harden (resolve) the reference to downcascade to speed attribute lookups
if downcascade: self.__dict__['__namespace_cascade__'] = self.__childlist__
else: self.__dict__['__namespace_cascade__'] = self.__parent__
def __setattr__(self, var, val):
'''
An attempt to set an attribute should place the attribute in the _base
dictionary through a setitem call.
'''
# Note that we use standard attribute access to allow ObStore loading if the
# ._base isn't yet available.
try:
self._base[var] = val
except TypeError:
setattr(self._base, var, val)
def __getattr__(self,var):
## print '__getattr__', var
return self.__safe_getattr__(var, {}) # the {} is a stopdict
def __safe_getattr__(self, var,stopdict):
'''
We have a lot to do in this function, if the attribute is an unloaded
but stored attribute, we need to load it. If it's not in the stored
attributes, then we need to load the _base, then see if it's in the
_base.
If it's not found by then, then we need to check our resource namespaces
and see if it's in them.
'''
# we don't have a __storedattr__ or it doesn't have this key...
if var != '_base':
try:
return self._base[var]
except (KeyError,TypeError), x:
try:
return getattr(self._base, var)
except AttributeError:
pass
try: # with pickle, it tries to get the __setstate__ before restoration is complete
for cas in self.__dict__['__namespace_cascade__']:
try:
stopdict[id(cas)] # if succeeds, we've already tried this child
# no need to do anything, if none of the children succeeds we will
# raise an AttributeError
except KeyError:
stopdict[id(cas)] = None
return cas.__safe_getattr__(var,stopdict)
except (KeyError,AttributeError):
pass
raise AttributeError, var
def items(self):
try:
return self._base.items()
except AttributeError:
pass
try:
return self._base.__dict__.items()
except AttributeError:
pass
def keys(self):
try:
return self._base.keys()
except AttributeError:
pass
try:
return self._base.__dict__.keys()
except AttributeError:
pass
def has_key( self, key ):
try:
return self._base.has_key( key)
except AttributeError:
pass
try:
return self._base.__dict__.has_key( key)
except AttributeError:
pass
def values(self):
try:
return self._base.values()
except AttributeError:
pass
try:
return self._base.__dict__.values()
except AttributeError:
pass
def __getinitargs__(self):
if type( self._base ) is types.ModuleType:
base = self._base.__name__
else:
base = self._base
return (base, self.__parent__, self.__downcascade__, self.__childlist__)
def __getstate__(self):
return None
def __setstate__(self,*args):
pass
def __deepcopy__(self, memo=None):
d = id(self)
if memo is None:
memo = {}
elif memo.has_key(d):
return memo[d]
if type(self._base) == types.ModuleType:
rest = tuple(map( copy.deepcopy, (self.__parent__, self.__downcascade__, self.__childlist__) ))
new = apply(self.__class__, (self._base,)+rest )
else:
new = tuple(map( copy.deepcopy, (self._base, self.__parent__, self.__downcascade__, self.__childlist__) ))
return new
## def __del__( self, id=id ):
## print 'del namespace', id( self )
def test():
import string
a = NameSpace(string)
del(string)
a.append(NameSpace({'a':23,'b':42}))
import math
a.append(NameSpace(math))
print 'The returned object should allow access to the attributes of the string,\nand math modules, and two simple variables "a" and "b" (== 23 and42 respectively)'
return a

View File

@@ -0,0 +1,78 @@
'''
Generic quoting functions (very fast),
generalised to allow use in any number of
situations, but normally you'll want to create
a new function based on these patterns which
has the default args you need. This will
prevent an extra function call.
'''
import string, regex
# create a translator which is fully worked out...
def _quote(somestring,trans,start='"',stop='"'):
'''
Return a quoted version of somestring.
'''
# would be _so_ much better if we could use the
# getitem, consider...
# return '%s%s%s'%(start,string.join(map(trans.__getitem__, somestring), ''),stop)
temp = list(somestring)
for charno in xrange(len(temp)):
temp[charno]= trans[temp[charno]]
return '%s%s%s'%(start,string.join(temp, ''),stop)
def compilerex(trans):
'''
Compiles a suitable regex from a dictionary
translation table. Should be used at design
time in most cases to improve speed. Note:
is not a very intelligent algo. You could
do better by creating a character-class []
for the single-character keys and then the
groups for the or-ing after it, but I've not
got the time at the moment.
'''
keyset = trans.keys()
multitrans = []
for x in range(len(keyset)):
if len(keyset[x]) != len(trans[keyset[x]]):
multitrans.append((keyset[x],trans[keyset[x]]))
if len(keyset[x])!= 1:
keyset[x] = '\(%s\)'%keyset[x]
if multitrans:
return 1,regex.compile(string.join(keyset,'\|'))
def quote2(somestring,trans,rex,start='',stop=''):
'''
Should be a faster version of _quote once
the regex is built. Rex should be a simple
or'ing of all characters requiring substitution,
use character ranges whereever possible (should
be in most cases)
'''
temp = list(somestring)
curpos = 0
try:
while rex.search(somestring,curpos) != -1:
pos = rex.regs[0]
print pos
replacement = list(trans[rex.group(0)])
temp[pos[0]:pos[1]] = replacement
curpos = pos[0]+len(replacement)
except (IndexError,regex.error):
pass
return '%s%s%s'%(start,string.join(temp, ''),stop)
# compatability
_quote2 = quote2
def reprq(obj, qtype):
'''
Return representation of a string obj as a string with qtype
quotes surrounding it. Usable when linearising Python objects
to languages which have only a particular type of string. (Such
as VRML). This is not a generalised nor a particularly reliable
solution. You should use the _quote2 function instead.
'''
return '%s%s%s'%(qtype,string.join(string.split(string.join(string.split(obj, '\\'), '\\\\'), qtype), '\\%s'%qtype),qtype)

View File

@@ -0,0 +1,64 @@
''' Classes which match ranges, sets, or anything at all. '''
import dummy # provides storage functions as well as a few others
class BetwVal(dummy.Dummy):
'''
Matches any object greater than smaller and less than larger
'''
def __init__(self, first, second):
if first <= second:
dummy.Dummy.__init__(self, [first, second])
else:
dummy.Dummy.__init__(self, [second, first])
def __getinitargs__(self):
return (self._base[0], self._base[1])
def __cmp__(self, object):
'''The Guts of the Class, allows standard comparison operators'''
if self._base[0]<=object:
if self._base[1] >=object:
return 0
else: return 1
else: return -1
def __repr__(self):
return '%s(%s,%s)'% (self.__class__.__name__,`self._base[0]`,`self._base[1]`)
class WInVal(dummy.Dummy):
'''
Matches any value in the sequential object used as initialiser
Doesn't gracefully handle situations where not found, as it just
returns a -1
'''
def __init__(self,seq):
self._base = seq
def __cmp__(self, object):
''' Standard comparison operators '''
for x in self._base:
if x == object:
return 0
return -1
def __repr__(self):
return '%s(%s)'% (self.__class__.__name__,`self._base`)
class ExceptVal(WInVal):
'''
A negative Version of WInVal
'''
def __cmp__(self, object):
for x in self._base:
if x == object:
return -1
return 0
class AnyVal:
'''
Matches anything at all
'''
def __init__(self):
pass
def __getinitargs__(self):
return ()
def __cmp__(self, object):
return 0
def __repr__(self):
return 'AnyVal()'

View File

@@ -0,0 +1,158 @@
import win32api, win32con, string, types
def _getDataType( data, coerce = 1 ):
'''
Return a tuple of dataType, data for a given object
automatically converts non-string-or-tuple-data into
strings by calling pickle.dumps
'''
if type( data ) is types.StringType:
return win32con.REG_SZ, data
elif type( data ) is types.IntType:
return win32con.REG_DWORD, data
# what about attempting to convert Longs, floats, etceteras to ints???
elif coerce:
import pickle
return win32con.REG_SZ, pickle.dumps( data )
else:
raise TypeError, '''Unsupported datatype for registry, use getDataType( data, coerce=1) to store types other than string/int.'''
def _getBaseKey( fullPathSpec ):
'''
Split a "full path specification" registry key
into its root and subpath components
'''
key = ''
subkey = fullPathSpec
# while loop will strip off preceding \\ characters
while subkey and not key:
key, subkey = string.split( fullPathSpec, '\\', 1 )
try:
return getattr( win32con, key ), subkey
except AttributeError:
raise '''Unknown root key %s in registry path %s'''% (key, fullPathSpec)
def RegSetValue( key, valuename='', data='', allowPickling=1 ):
'''
Set a registry value by providing a fully-specified
registry key (and an optional sub-key/value name),
and a data element. If allowPickling is true, the
data element can be any picklable element, otherwise
data element must be a string or integer.
'''
root, subkey = _getBaseKey( key )
dataType, data = _getDataType( data, allowPickling )
try:
hKey = win32api.RegOpenKeyEx( root , subkey, 0, win32con.KEY_ALL_ACCESS) # could we use a lesser access model?
except:
hKey = win32api.RegCreateKey( root, subkey )
try:
if not valuename: # the default value
win32api.RegSetValue( hKey, valuename, dataType, data )
else: # named sub-value
win32api.RegSetValueEx( hKey, valuename, 0, dataType, data )
finally:
win32api.RegCloseKey( hKey)
def RegQueryValue( key, valuename='', pickling=0 ):
'''
Get a registry value by providing a fully-specified
registry key (and an optional sub-key/value name)
If pickling is true, the data element will be
unpickled before being returned.
'''
#print 'key', key
root, subkey = _getBaseKey( key )
if not valuename: # the default value
data, type = win32api.RegQueryValue( root , subkey)
else:
try:
#print root, subkey
hKey = win32api.RegOpenKeyEx( root, subkey, 0, win32con.KEY_READ)
#print hKey, valuename
try:
data, type = win32api.RegQueryValueEx( hKey, valuename )
except: #
data, type = None, 0 # value is not available...
pickling = None
finally:
win32api.RegCloseKey( hKey)
if pickling:
import pickle
data = pickle.loads( data )
return data
# following constants seem to reflect where path data is stored on NT machines
# no idea if it'll work on a 95 machine
def AddPathEntry( newEntry, user = 1, prepend=0 ):
'''
Add or remove path entry on NT, use prepend == -1 for removal,
use prepend == 0 for append, prepend= 1 for prepending to the
current path.
'''
if user:
user = 'USER'
else:
user = 'MACHINE'
key, valuename = COMMON_KEYS[ (user, 'PATH') ]
_PathManager( key, valuename, newEntry, prepend )
def PyExecutables( user = 1, prepend=0 ):
'''
Register/Deregister Python files as executables
'''
if user:
user = 'USER'
else:
user = 'MACHINE'
key, valuename = COMMON_KEYS[ (user, 'PYEXECUTABLES') ]
# the default executables + Python scripts...
if prepend < 0: # are to eliminate only .py
newEntry = '.PY'
else:
newEntry = '.PY;.COM;.EXE;.BAT;.CMD'
_PathManager( key, valuename, newEntry, prepend )
def _PathManager( key, valuename, newEntry, prepend=0, eliminate_duplicates=1 ):
'''
Create a new Path entry on NT machines (or kill an old one)
user determines whether to alter the USER or the Machine's path
prepend
1 -> add newEntry to start
0 -> add newEntry to end
-1 -> don't add newEntry
eliminate_duplicates determines whether to kill equal paths
All values are converted to lower case
'''
# get current value...
curval = RegQueryValue( key, valuename ) or ''
# split into elements
curval = string.split( string.lower(curval), ';' )
if type( newEntry ) not in (types.ListType, types.TupleType):
newEntry = string.split( string.lower(newEntry), ';' )
# eliminate duplicates of the newEntry
curval = filter( None, curval) # strip out null entries
if eliminate_duplicates:
newval = []
for p in curval:
if p not in newEntry:
newval.append( p )
curval = newval
if prepend == 1:
curval = list(newEntry) + curval
elif prepend == 0:
curval = curval + list( newEntry )
elif prepend == -1: # this call is just killing the path entry
pass
#now do the recombination
curval = string.join( curval, ';' )
RegSetValue( key, valuename, curval )
COMMON_KEYS = {
('USER','PATH') : ('''HKEY_CURRENT_USER\\Environment''', 'path'),
('MACHINE','PATH') : ('''HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment''', 'path'),
('USER','PYEXECUTABLES') : ('''HKEY_CURRENT_USER\\Environment''', 'pathext'),
('MACHINE','PYEXECUTABLES') : ('''HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment''', 'pathext')
}

View File

@@ -0,0 +1,33 @@
import sys, string
class Reloader:
'''
Class allows for reloading all modules imported
after the instance is created. Normally you will
use this by doing:
import <anything you don't want reloaded>
from mcf.utils import reloader
<do testing and rewriting>
reloader.go()
'''
def __init__(self):
self.keys = sys.modules.keys()
def __call__(self, *args, **namedargs):
done = []
for key, val in sys.modules.items():
if key not in self.keys:
try:
reload( val )
done.append( key )
except (ImportError):
print '''Couldn't reload module:''', key
except (TypeError): # for None's
# is a flag to prevent reloading
pass
if done:
print '''Reloaded:''', string.join( done, ', ')
else:
print '''No modules reloaded'''
# the default reloader...
go = Reloader()

View File

@@ -0,0 +1,104 @@
class SingletonList:
'''
A SingletonList always has a length of one or 0,
appends overwrite the single element, iteration will
return precisely one element. Attempts to get any item
other than 0 will raise an IndexError or return the single
item depending on whether the 'raiseIndexError' flag is
true or false (generally it should be true except if the
for x in SingletonList: construct is known never to be
used, since this construct will create an infinite loop
if we never raise an IndexError).
'''
def __init__(self,base=None,raiseIndexError=1):
self._base = base
self.raiseIndexError = raiseIndexError
def __len__(self):
'''
The length is 0 if no _base, 1 if a base
'''
if hasattr(self, '_base'):
return 1
else:
return 0
def __getitem__(self,ind):
'''
Get the item if ind == 0, else raise an IndexError or return
the item, depending on the raiseIndexError flag
'''
if ind == 0:
try:
return self._base
except AttributeError:
raise IndexError, ind
elif self.raiseIndexError:
raise IndexError, ind
else:
return self._base
def __setitem__(self,ind, item):
'''
The item is to become the base
'''
self._base = item
def __delitem__(self,ind):
'''
Delete the base, regardless of the index used
'''
try:
del(self._base)
except AttributeError:
raise IndexError, ind
def append(self,item):
'''
Replace the base with the item
'''
self._base = item
def index(self,item):
'''
if the item is the base, return the only valid index (0)
'''
try:
if item == self._base:
return 0
except:
pass
raise ValueError, item
def count(self, item):
'''
If the item is the base, we have one, else 0
'''
try:
if item == self._base:
return 1
except:
pass
return 0
insert = __setitem__
def remove(self, item):
'''
if the item is the base, delete the base, else ValueError
'''
try:
if item == self._base:
del(self._base)
return
except:
pass
raise ValueError, item
def reverse(self):
pass
def sort(self):
pass
def __repr__(self):
try:
return '[%s]'%`self._base`
except AttributeError:
return '[]'
# store and copy functions
# def __getinitargs__(self):
# return (self._base,self.raiseIndexError)
# def __getstate__(self,*args,**namedargs):
# pass
# def __setstate__(self,*args,**namedargs):
# pass

View File

@@ -0,0 +1,251 @@
'''
Generate module for holding temporary classes which
will be reconstructed into the same module to allow
cPickle and the like to properly import them.
Note: You _must_ pickle a reference to the tempclassmodule
_before_ you pickle any instances which use the classes stored
in the module! Also, the classes cannot reference anything
in their dictionary or bases tuples which are not normally
pickleable (in particular, you can't subclass a class in the
same tempclassmodule or a tempclassmodule which you cannot
guarantee will be loaded before the dependent classes. (i.e.
by guaranteeing they will be pickled first)
'''
import new, time, string, sys, types
def buildModule(packagename, basename, rebuild=None, initialcontents=None):
'''
Dynamically build a module or rebuild one, generates
a persistent ID/name if not rebuilding. The persistent
ID is the value of basename+`time.time()` with the decimal
point removed (i.e. a long string of digits). Packagename
must be an importable package! Will raise an ImportError
otherwise. Also, for easy reconstitution, basename must not
include any decimal points.
initialcontents is a dictionary (or list) of elements which will be
added to the new module.
'''
if rebuild == None:
timestamp = `time.time()`
decpos = string.find(timestamp,'.')
basename = basename+timestamp[:decpos]+timestamp[decpos+1:]
name = string.join((packagename, basename), '.')
a = {}
b = {}
try: # see if we've already loaded this module...
mod = __import__( name, {},{}, string.split( name, '.'))
if initialcontents:
_updateFrom(mod, initialcontents)
return mod.__name__, mod
except ImportError:
pass
mod = new.module(name)
sys.modules[name] = mod
# following is just to make sure the package is loaded before attempting to alter it...
__import__( packagename, {}, {}, string.split(packagename) )
## exec 'import %s'%(packagename) in a, b ### Security Risk!
setattr(sys.modules[ packagename ], basename, mod)
# now do the update if there were initial contents...
if initialcontents:
_updateFrom(mod, initialcontents)
return name, mod
def buildClassIn(module, *classargs, **namedclassargs):
'''
Build a new class and register it in the module
as if it were really defined there.
'''
print module, classargs, namedclassargs
namedclassargs["__temporary_class__"] = 1
newclass = new.classobj(classargs[0], classargs[1], namedclassargs)
newclass.__module__ = module.__name__
setattr(module, newclass.__name__, newclass)
return newclass
def addClass(module, classobj):
'''
Insert a classobj into the tempclassmodule, setting the
class' __module__ attribute to point to this tempclassmodule
'''
classobj.__module__ = module.__name__
setattr(module, classobj.__name__, classobj)
setattr( classobj, "__temporary_class__", 1)
def delClass(module, classobj):
'''
Remove this class from the module, Note: after running this
the classobj is no longer able to be pickled/unpickled unless
it is subsequently added to another module. This is because
it's __module__ attribute is now pointing to a module which
is no longer going to save its definition!
'''
try:
delattr(module, classobj.__name__)
except AttributeError:
pass
def _packageName(modulename):
decpos = string.rfind(modulename, '.')
return modulename[:decpos], modulename[decpos+1:]
def _updateFrom(module, contentsource):
'''
For dealing with unknown datatypes (those passed in by the user),
we want to check and make sure we're building the classes correctly.
'''
# often will pass in a protoNamespace from which to update (during cloning)
if type(contentsource) in ( types.DictType, types.InstanceType):
contentsource = contentsource.values()
# contentsource should now be a list of classes or class-building tuples
for val in contentsource:
if type(val) is types.ClassType:
try:
addClass(module, val)
except:
pass
elif type(val) is types.TupleType:
try:
apply(buildClassIn, (module,)+val)
except:
pass
def deconstruct(templatemodule):
'''
Return a tuple which can be passed to reconstruct
in order to get a rebuilt version of the module
after pickling. i.e. apply(reconstruct, deconstruct(tempmodule))
is the equivalent of doing a deepcopy on the tempmodule.
'''
## import pdb
## pdb.set_trace()
classbuilder = []
for name, classobj in templatemodule.__dict__.items():
if type(classobj) is types.ClassType: # only copy class objects, could do others, but these are special-purpose modules, not general-purpose ones.
classbuilder.append( deconstruct_class( classobj) )
## import pdb
## pdb.set_trace()
return (templatemodule.__name__, classbuilder)
## except AttributeError:
## print templatemodule
## print classbuilder
def deconstruct_class( classobj ):
'''
Pull apart a class into a tuple of values which can be used
to reconstruct it through a call to buildClassIn
'''
if not hasattr( classobj, "__temporary_class__"):
# this is a regular class, re-import on load...
return (classobj.__module__, classobj.__name__)
else:
# this is a temporary class which can be deconstructed
bases = []
for classobject in classobj.__bases__:
bases.append( deconstruct_class (classobject) )
return (classobj.__name__, tuple (bases), classobj.__dict__)
def reconstruct(modulename, classbuilder):
'''
Rebuild a temporary module and all of its classes
from the structure created by deconstruct.
i.e. apply(reconstruct, deconstruct(tempmodule))
is the equivalent of doing a deepcopy on the tempmodule.
'''
## import pdb
## pdb.set_trace()
mname, newmod = apply(buildModule, _packageName(modulename)+(1,) ) # 1 signals reconstruct
reconstruct_classes( newmod, classbuilder )
return newmod
def reconstruct_classes( module, constructors ):
'''
Put a class back together from the tuple of values
created by deconstruct_class.
'''
classes = []
import pprint
pprint.pprint( constructors)
for constructor in constructors:
if len (constructor) == 2:
module, name = constructor
# this is a standard class, re-import
temporarymodule = __import__(
module,
{},{},
string.split(module)+[name]
)
classobject =getattr (temporarymodule, name)
else:
# this is a class which needs to be re-constructed
(name, bases,namedarguments) = constructor
bases = tuple( reconstruct_classes( module, bases ))
classobject = apply (
buildClassIn,
(module, name, bases), # name and bases are the args to the class constructor along with the dict contents in namedarguments
namedarguments,
)
classes.append (classobject)
return classes
def destroy(tempmodule):
'''
Destroy the module to allow the system to do garbage collection
on it. I'm not sure that the system really does do gc on modules,
but one would hope :)
'''
name = tempmodule.__name__
tempmodule.__dict__.clear() # clears references to the classes
try:
del(sys.modules[name])
except KeyError:
pass
packagename, modname = _packageName(name)
try:
delattr(sys.modules[ packagename ], modname)
except AttributeError:
pass
del( tempmodule ) # no, I don't see any reason to do it...
return None
def deepcopy(templatemodule, packagename=None, basename=None):
'''
Rebuild the whole Module and it's included classes
(just the classes). Note: This will _not_ make instances
based on the old classes point to the new classes!
The value of this function is likely to be minimal given
this restriction. For pickling use deconstruct/reconstruct
for simple copying just return the module.
'''
name, classbuilder = deconstruct( templatemodule )
if packagename is None:
tp, tb = _packageName( name )
if packagename is None:
packagename = tp
if basename is None:
basename = tb
newmod = buildModule(packagename, basename, initialcontents=classbuilder )
return newmod
if __name__ == "__main__":
def testPickle ():
import mcf.vrml.prototype
name, module = buildModule( 'mcf.vrml.temp', 'scenegraph' )
buildClassIn( module, 'this', () )
buildClassIn( module, 'that', (mcf.vrml.prototype.ProtoTypeNode,) )
## import pdb
## pdb.set_trace()
import pprint
pprint.pprint( deconstruct( module ))
name,builder = deconstruct( module )
destroy( module)
return reconstruct(name, builder)
t = testPickle()
print t

View File

@@ -0,0 +1,50 @@
'''
Classes of Types
Often you want to be able to say:
if type(obj) in MutableTypes:
yada
This module is intended to make that easier.
Just import and use :)
'''
import types
MutableTypes = [ types.ListType, types.DictType, types.InstanceType ]
MutableSequenceTypes = [ types.ListType ]
SequenceTypes = [ types.ListType, types.StringType, types.TupleType ]
NumericTypes = [ types.IntType, types.FloatType, types.LongType, types.ComplexType ]
MappingTypes = [ types.DictType ]
def regarray():
if globals().has_key('array'):
return 1
try:
import array
SequenceTypes.append( array.ArrayType )
MutableTypes.append( array.ArrayType )
MutableSequenceTypes.append( array.ArrayType )
return 1
except ImportError:
return 0
def regnumpy():
'''
Call if you want to register numpy arrays
according to their types.
'''
if globals().has_key('Numeric'):
return 1
try:
import Numeric
SequenceTypes.append( Numeric.ArrayType )
MutableTypes.append( Numeric.ArrayType )
MutableSequenceTypes.append( Numeric.ArrayType )
return 1
except ImportError:
return 0
# for now, I'm going to always register these, if the module becomes part of the base distribution
# it might be better to leave it out so numpy isn't always getting loaded...
regarray()
regnumpy()

View File

@@ -0,0 +1,17 @@
import string
def userquery( prompt, choices, contextdata = '', defaultind=0 ):
if contextdata:
print 'Contextual Information:', contextdata
for x in range( len( choices ) ):
print '(%s)'%x, `choices[x]`
choice = raw_input( prompt+( '(%s):'%defaultind ) )
if not choice:
return choices[ defaultind ]
try:
choice = string.atoi( choice )
return choices[ choice]
except IndexError :
return choices[ defaultind ]
except ValueError:
return choice

View File

@@ -0,0 +1,17 @@
'''
Module giving a float representation
of the interpreter major version (1.4, 1.5 etceteras)
ver -- Float representation of the current interpreter version
Note: Since I no longer have any Python 1.4 modules, this module is
no longer in use by me. I intend to leave it here for the next version
jump :) .
'''
import regex, sys, string
ver = string.atof(sys.version[:regex.match('[0-9.]*', sys.version)])
### Clean up namespace
del(regex)
del(sys)
del(string)

View File

@@ -0,0 +1,46 @@
'''
Really simplistic walker-processable hierobjects, doesn't
have parent attributes, every element has an __attrDict__
item and a childlist. This is different from the mechanisms
we'll want to use for multi-tree systems, but it's fairly
close. Should be fairly simply worked with.
'''
class WalkerAble:
'''
Simple hierarchic objects with the following elements
__attrDict__ -- app-specific attributes
__childlist__ -- childen of this node
__gi__ -- "type" or Generic Indicator of this node
__childlist__append__ -- as you'd expect, method on childlist to add an element
'''
def __init__(self, childlist=None, attrDict=None, gi=None):
self.__dict__['__attrDict__'] = attrDict or {}
self.__dict__['__childlist__'] = childlist or []
self.__dict__['__gi__'] = gi or ''
self.__dict__['__childlist__append__'] = self.__childlist__.append
def __getattr__(self, attrName):
'''
Note: you can store attributes with the same names as
the reserved names, but to get them back, you'll need
to read it directly out of the attrDict
'''
if attrName != '__attrDict__':
try:
return self.__attrDict__[attrName]
except KeyError:
pass
raise AttributeError, attrName
def __setattr__(self, attrName, attrVal):
self.__attrDict__[attrName] = attrVal
def __setGI__(self, gi):
self.__dict__['__gi__'] = gi
def __repr__(self):
return '''<WalkerAble %(__gi__)s %(__attrDict__)s %(__childlist__)s>'''%self.__dict__
# copy functions
# def __getinitargs__(self):
# return (self.__childlist__, self.__attrDict__, self.__gi__)