Initial revision

This commit is contained in:
Hans Lambermont
2002-10-12 11:37:38 +00:00
commit 12315f4d0e
1699 changed files with 444708 additions and 0 deletions

349
intern/python/Blender.py Normal file
View File

@@ -0,0 +1,349 @@
#! /usr/bin/env python
#######################
# (c) Jan Walter 2000 #
#######################
# CVS
# $Author$
# $Date$
# $RCSfile$
# $Revision$
"""This is the Python API for Blender"""
def _findNewName(name, names):
import string
words = string.split(name, ".")
basename = words[0]
newname = name
num = 1
while newname in names:
newname = basename + ".%03d" % num
num = num + 1
return newname
###################
# Blender classes #
###################
class Camera:
def __init__(self, name, Lens = 35.0, ClipSta = 0.1, ClipEnd = 100.0):
self.name = name
self.ipos = {}
self.Lens = Lens
self.ClipSta = ClipSta
self.ClipEnd = ClipEnd
class Curve:
def __init__(self, name):
# ...
self.name = name
self.ipos = {}
self.materials = []
class Ika:
def __init__(self, name):
self.name = name
class Ipo:
def __init__(self, name):
self.name = name
class Lamp:
def __init__(self, name, Energ = 1.0, R = 1.0, G = 1.0, B = 1.0,
SpoSi = 45.0,
OfsX = 0.0, OfsY = 0.0, OfsZ = 0.0,
SizeX = 1.0, SizeY = 1.0, SizeZ = 1.0):
self.name = name
self.ipos = {}
self.Energ = Energ
self.R = R
self.G = G
self.B = B
self.SpoSi = SpoSi
self.OfsX = OfsX
self.OfsY = OfsY
self.OfsZ = OfsZ
self.SizeX = SizeX
self.SizeY = SizeY
self.SizeZ = SizeZ
class Material:
def __init__(self, name,
R = 0.8, G = 0.8, B = 0.8,
SpecR = 1.0, SpecG = 1.0, SpecB = 1.0,
MirR = 1.0, MirG = 1.0, MirB = 1.0,
Ref = 0.8, Alpha = 1.0, Emit = 0.0, Amb = 0.5,
Spec = 0.5, Hard = 50):
self.name = name
self.ipos = {}
self.R = R
self.G = G
self.B = B
self.SpecR = SpecR
self.SpecG = SpecG
self.SpecB = SpecB
self.MirR = MirR
self.MirG = MirG
self.MirB = MirB
self.Ref = Ref
self.Alpha = Alpha
self.Emit = Emit
self.Amb = Amb
self.Spec = Spec
self.Hard = Hard
class Matrix:
def __init__(self):
self.elements = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
def __repr__(self):
str = "%s" % self.elements
return str
class Mesh:
"""Creates an (empty) instance of a Blender mesh.\n\
E.g.: "m = Blender.Mesh('Plane')"\n\
To create faces first add vertices with \n\
"i1 = m.addVertex(x, y, z, nx, ny, nz, r = -1.0, r = 0.0, b = 0.0)"\n\
then create faces with "index = m.addFace(i1, i2, i3, i4, isSmooth)"."""
_meshes = {}
def __init__(self, name):
self.name = name
self.ipos = {}
self.materials = []
self.vertices = []
self.normals = []
self.colors = []
self.faces = []
if name in Mesh._meshes.keys():
print 'Mesh "%s" already exists ...' % name
self.name = _findNewName(name, Mesh._meshes.keys())
print '... so it will be called "%s"' % self.name
Mesh._meshes[self.name] = self
def __repr__(self):
str = 'Mesh(name = "%s",\n' % self.name
str = str + ' vertices = %s,\n' % len(self.vertices)
str = str + ' faces = %s)' % len(self.faces)
return str
def addFace(self, i1, i2, i3, i4, isSmooth):
"""addFace(self, i1, i2, i3, i4)"""
self.faces.append([i1, i2, i3, i4, isSmooth])
return (len(self.faces) - 1)
def addVertex(self, x, y, z, nx, ny, nz, r = -1.0, g = 0.0, b = 0.0):
"""addVertex(self, x, y, z, nx, ny, nz, r = -1.0, g = 0.0, b = 0.0)"""
self.vertices.append([x, y, z])
self.normals.append([nx, ny, nz])
if r != -1.0:
self.colors.append([r, g, b])
return (len(self.vertices) - 1)
class MetaBall:
def __init__(self, name):
self.name = name
class Object:
"""Creates an instance of a Blender object"""
_objects = {}
def __init__(self, name):
self.name = name
self.ipos = {}
self.materials = []
self.matrix = Matrix()
self.data = None
self.type = None
if name in Object._objects.keys():
print 'Object "%s" already exists ...' % name
self.name = _findNewName(name, Object._objects.keys())
print '... so it will be called "%s"' % self.name
Object._objects[self.name] = self
def __repr__(self):
str = 'Object(name = "%s",\n' % self.name
str = str + ' matrix = %s,\n' % self.matrix
if self.type:
str = str + ' data = %s("%s"))' % (self.type, self.data)
else:
str = str + ' data = None)'
return str
class Scene:
"""Creates an instance of a Blender scene"""
_scenes = {}
def __init__(self, name):
self.name = name
self.objects = []
## self.camera = None
## self.world = None
Scene._scenes[self.name] = self
def __repr__(self):
str = 'Scene(name = "%s", \n' % self.name
str = str + ' objects = %s)' % len(self.objects)
return str
def addObject(self, object):
"""addObject(self, object)"""
self.objects.append(object.name)
return (len(self.objects) - 1)
class Surface:
def __init__(self, name):
self.name = name
self.ipos = {}
self.materials = []
# ...
class Text(Surface):
def __init__(self, name):
Surface.__init__(name)
##############
# primitives #
##############
def addMesh(type, sceneName):
"""Blender.addMesh(type, sceneName)\n\
where type is one of ["Plane"]"""
if type == "Plane":
object = Object(type)
mesh = Mesh(type)
i1 = mesh.addVertex(+1.0, +1.0, 0.0, 0.0, 0.0, 1.0)
i2 = mesh.addVertex(+1.0, -1.0, 0.0, 0.0, 0.0, 1.0)
i3 = mesh.addVertex(-1.0, -1.0, 0.0, 0.0, 0.0, 1.0)
i4 = mesh.addVertex(-1.0, +1.0, 0.0, 0.0, 0.0, 1.0)
mesh.addFace(i1, i4, i3, i2, 0)
connect("OB" + object.name, "ME" + mesh.name)
connect("SC" + sceneName, "OB" + object.name)
return object.name, mesh.name
elif type == "Cube":
pass
elif type == "Circle":
pass
elif type == "UVsphere":
pass
elif type == "Icosphere":
pass
elif type == "Cylinder":
pass
elif type == "Tube":
pass
elif type == "Cone":
pass
elif type == "Grid":
pass
else:
raise TypeError
def addCurve(type):
if type == "Bezier Curve":
pass
elif type == "Bezier Circle":
pass
elif type == "Nurbs Curve":
pass
elif type == "Nurbs Circle":
pass
elif type == "Path":
pass
else:
raise TypeError
def addSurface(type):
if type == "Curve":
pass
elif type == "Circle":
pass
elif type == "Surface":
pass
elif type == "Tube":
pass
elif type == "Sphere":
pass
elif type == "Donut":
pass
else:
raise TypeError
def connect(objName1, objName2):
"""connect(objName1, objName2)"""
if objName1[:2] == "OB" and objName2[:2] == "ME":
obj1 = getObject(objName1[2:])
obj1.data = objName2[2:]
obj1.type = "Mesh"
elif objName1[:2] == "SC" and objName2[:2] == "OB":
obj1 = getScene(objName1[2:])
obj2 = getObject(objName2[2:])
obj1.addObject(obj2)
else:
print "ERROR: connect(%s, %s)" % (objName1, objName2)
def getCurrentScene():
"""getCurrentScene()"""
return Scene._scenes[0]
def getMesh(name):
"""getMesh(name)"""
if name in Mesh._meshes.keys():
return Mesh._meshes[name]
else:
return None
def getObject(name):
"""getObject(name)"""
if name in Object._objects.keys():
return Object._objects[name]
else:
return None
def getScene(name):
"""getScene(name)"""
if name in Scene._scenes.keys():
return Scene._scenes[name]
else:
return None
def testBlender():
scene = Scene("1")
print scene
objName, meshName = addMesh("Plane", "1")
print scene
obj = Object("Plane")
connect("OB" + obj.name, "ME" + meshName)
connect("SC" + scene.name, "OB" + obj.name)
print scene
for name in scene.objects:
obj = getObject(name)
print obj
if obj.type == "Mesh":
mesh = getMesh(obj.data)
print mesh
print mesh.vertices
print mesh.faces
Mesh("Plane")
# print global data
print Scene._scenes
print Object._objects
print Mesh._meshes
if __name__ == "__main__":
testBlender()

7
intern/python/README Normal file
View File

@@ -0,0 +1,7 @@
setenv CFLAGS "-g -Wall -I/usr/local/Python-1.6/Include"
setenv LDFLAGS "-L/usr/local/Python-1.6"
aclocal
autoconf
automake -a
./configure
make

View File

@@ -0,0 +1,873 @@
/**
* $Id$
* ***** BEGIN GPL/BL DUAL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version. The Blender
* Foundation also sells licenses for use in proprietary software under
* the Blender License. See http://www.blender.org/BL/ for information
* about this.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): none yet.
*
* ***** END GPL/BL DUAL LICENSE BLOCK *****
*/
/***************************************************************************
main.c - description
-------------------
begin : Fri Sep 15 19:19:43 CEST 2000
copyright : (C) 2000 by Jan Walter
email : jan@blender.nl
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
/* CVS */
/* $Author$ */
/* $Date$ */
/* $RCSfile$ */
/* $Revision$ */
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include "Python.h"
static PyObject* ErrorObject;
static PyObject* _scene;
static PyObject* blend_connect(PyObject* self, PyObject* args);
/**************/
/* structures */
/**************/
typedef struct {
PyObject_HEAD
char name[24];
PyObject* vertices;
PyObject* normals;
PyObject* faces;
} mshobject;
staticforward PyTypeObject Mshtype;
typedef struct {
PyObject_HEAD
char name[24];
PyObject* matrix;
PyObject* data;
PyObject* type;
} objobject;
staticforward PyTypeObject Objtype;
typedef struct {
PyObject_HEAD
char name[24];
PyObject* objects;
} sceobject;
staticforward PyTypeObject Scetype;
/********/
/* mesh */
/********/
static char msh_addFace__doc__[] =
"addFace(self, i1, i2, i3, i4, isSmooth)"
;
static PyObject*
msh_addFace(mshobject* self, PyObject* args)
{
int index;
int i1, i2, i3, i4;
int isSmooth;
PyObject *item = NULL;
if (!PyArg_ParseTuple(args, "iiiii", &i1, &i2, &i3, &i4, &isSmooth))
{
return NULL;
}
item = PyList_New(5);
PyList_SetItem(item, 0, PyInt_FromLong(i1));
PyList_SetItem(item, 1, PyInt_FromLong(i2));
PyList_SetItem(item, 2, PyInt_FromLong(i3));
PyList_SetItem(item, 3, PyInt_FromLong(i4));
PyList_SetItem(item, 4, PyInt_FromLong(isSmooth));
PyList_Append(self->faces, item);
index = PyList_Size(self->faces) - 1;
return PyInt_FromLong(index);
}
static char msh_addVertex__doc__[] =
"addVertex(self, x, y, z, nx, ny, nz)"
;
static PyObject*
msh_addVertex(mshobject* self, PyObject* args)
{
int index;
float x, y, z, nx, ny, nz;
PyObject *item1 = NULL;
PyObject *item2 = NULL;
if (!PyArg_ParseTuple(args, "ffffff", &x, &y, &z, &nx, &ny, &nz))
{
return NULL;
}
item1 = PyList_New(3);
item2 = PyList_New(3);
PyList_SetItem(item1, 0, PyFloat_FromDouble(x));
PyList_SetItem(item1, 1, PyFloat_FromDouble(y));
PyList_SetItem(item1, 2, PyFloat_FromDouble(z));
PyList_SetItem(item2, 0, PyFloat_FromDouble(nx));
PyList_SetItem(item2, 1, PyFloat_FromDouble(ny));
PyList_SetItem(item2, 2, PyFloat_FromDouble(nz));
PyList_Append(self->vertices, item1);
PyList_Append(self->normals, item2);
index = PyList_Size(self->vertices) - 1;
return PyInt_FromLong(index);
}
static struct PyMethodDef msh_methods[] = {
{"addFace", (PyCFunction)msh_addFace,
METH_VARARGS, msh_addFace__doc__},
{"addVertex", (PyCFunction)msh_addVertex,
METH_VARARGS, msh_addVertex__doc__},
{ NULL, NULL }
};
static mshobject*
newmshobject(char* name)
{
mshobject* self;
self = PyObject_NEW(mshobject, &Mshtype);
if (self == NULL)
{
return NULL;
}
strcpy(self->name, name);
self->vertices = PyList_New(0);
self->normals = PyList_New(0);
self->faces = PyList_New(0);
return self;
}
static void
msh_dealloc(mshobject* self)
{
mshobject* msh = (mshobject*) self;
Py_DECREF(msh->vertices);
Py_DECREF(msh->normals);
Py_DECREF(msh->faces);
PyMem_DEL(self);
}
static int
msh_print(mshobject* self, FILE* fp, int flags)
{
fprintf(fp, "Mesh(name = \"%s\",\n", self->name);
fprintf(fp, " vertices = %d,\n", PyList_Size(self->vertices));
fprintf(fp, " faces = %d)\n", PyList_Size(self->faces));
return 0;
}
static PyObject*
msh_repr(mshobject* self)
{
PyObject* s;
s = PyString_FromString("Mesh()\n");
return s;
}
static PyObject*
msh_str(mshobject* self)
{
PyObject* s;
s = PyString_FromString("Mesh()\n");
return s;
}
#include "structmember.h"
static struct memberlist msh_memberlist[] = {
/* XXXX Add lines like { "foo", T_INT, OFF(foo), RO } */
{"vertices", T_OBJECT, offsetof(mshobject, vertices), RO},
{"normals", T_OBJECT, offsetof(mshobject, normals), RO},
{"faces", T_OBJECT, offsetof(mshobject, faces), RO},
{NULL}
};
static PyObject*
msh_getattr(mshobject* self, char* name)
{
PyObject* rv;
/* XXXX Add your own getattr code here */
rv = PyMember_Get((char*) self, msh_memberlist, name);
if (rv)
{
return rv;
}
PyErr_Clear();
return Py_FindMethod(msh_methods, (PyObject*)self, name);
}
static int
msh_setattr(mshobject* self, char* name, PyObject* v)
{
/* XXXX Add your own setattr code here */
if ( v == NULL )
{
PyErr_SetString(PyExc_AttributeError, "Cannot delete attribute");
return -1;
}
return PyMember_Set((char*)/*XXXX*/0, msh_memberlist, name, v);
}
static char Mshtype__doc__[] =
""
;
static PyTypeObject Mshtype = {
PyObject_HEAD_INIT(&PyType_Type)
0, /*ob_size*/
"Mesh", /*tp_name*/
sizeof(mshobject), /*tp_basicsize*/
0, /*tp_itemsize*/
/* methods */
(destructor) msh_dealloc, /*tp_dealloc*/
(printfunc) msh_print, /*tp_print*/
(getattrfunc) msh_getattr, /*tp_getattr*/
(setattrfunc) msh_setattr, /*tp_setattr*/
(cmpfunc) 0, /*tp_compare*/
(reprfunc) msh_repr, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
(hashfunc) 0, /*tp_hash*/
(ternaryfunc) 0, /*tp_call*/
(reprfunc) msh_str, /*tp_str*/
/* Space for future expansion */
0L,0L,0L,0L,
Mshtype__doc__ /* Documentation string */
};
/**********/
/* object */
/**********/
static struct PyMethodDef obj_methods[] = {
{ NULL, NULL }
};
static objobject*
newobjobject(char* name)
{
objobject* self = NULL;
PyObject* row1 = NULL;
PyObject* row2 = NULL;
PyObject* row3 = NULL;
PyObject* row4 = NULL;
self = PyObject_NEW(objobject, &Objtype);
if (self == NULL)
{
return NULL;
}
strcpy(self->name, name);
self->matrix = PyList_New(4);
row1 = PyList_New(4);
row2 = PyList_New(4);
row3 = PyList_New(4);
row4 = PyList_New(4);
PyList_SetItem(row1, 0, PyInt_FromLong(1));
PyList_SetItem(row1, 1, PyInt_FromLong(0));
PyList_SetItem(row1, 2, PyInt_FromLong(0));
PyList_SetItem(row1, 3, PyInt_FromLong(0));
PyList_SetItem(row2, 0, PyInt_FromLong(0));
PyList_SetItem(row2, 1, PyInt_FromLong(1));
PyList_SetItem(row2, 2, PyInt_FromLong(0));
PyList_SetItem(row2, 3, PyInt_FromLong(0));
PyList_SetItem(row3, 0, PyInt_FromLong(0));
PyList_SetItem(row3, 1, PyInt_FromLong(0));
PyList_SetItem(row3, 2, PyInt_FromLong(1));
PyList_SetItem(row3, 3, PyInt_FromLong(0));
PyList_SetItem(row4, 0, PyInt_FromLong(0));
PyList_SetItem(row4, 1, PyInt_FromLong(0));
PyList_SetItem(row4, 2, PyInt_FromLong(0));
PyList_SetItem(row4, 3, PyInt_FromLong(1));
PyList_SetItem(self->matrix, 0, row1);
PyList_SetItem(self->matrix, 1, row2);
PyList_SetItem(self->matrix, 2, row3);
PyList_SetItem(self->matrix, 3, row4);
Py_INCREF(Py_None);
self->data = Py_None;
Py_INCREF(Py_None);
self->type = Py_None;
return self;
}
static void
obj_dealloc(objobject* self)
{
objobject* obj = (objobject*) self;
Py_DECREF(obj->matrix);
Py_DECREF(obj->data);
Py_DECREF(obj->type);
PyMem_DEL(self);
}
static int
obj_print(objobject* self, FILE* fp, int flags)
{
fprintf(fp, "Object(name = \"%s\",\n", self->name);
/* fprintf(fp, " matrix = %s,\n", */
/* PyString_AsString(mtx_repr((mtxobject*) self->matrix))); */
if (self->type == Py_None)
{
fprintf(fp, " data = None)\n");
}
else
{
fprintf(fp, " data = %s(\"%s\"))\n",
PyString_AsString(self->type),
((mshobject*) self->data)->name);
}
return 0;
}
static PyObject*
obj_repr(objobject* self)
{
PyObject* s;
s = PyString_FromString("Object()\n");
return s;
}
static PyObject*
obj_str(objobject* self)
{
PyObject* s;
s = PyString_FromString("Object()\n");
return s;
}
#include "structmember.h"
static struct memberlist obj_memberlist[] = {
/* XXXX Add lines like { "foo", T_INT, OFF(foo), RO } */
{"data", T_OBJECT, offsetof(objobject, data), RO},
{"matrix", T_OBJECT, offsetof(objobject, matrix), RO},
{"type", T_OBJECT, offsetof(objobject, type), RO},
{NULL}
};
static PyObject*
obj_getattr(objobject* self, char* name)
{
PyObject* rv;
/* XXXX Add your own getattr code here */
rv = PyMember_Get((char*) self, obj_memberlist, name);
if (rv)
{
return rv;
}
PyErr_Clear();
return Py_FindMethod(obj_methods, (PyObject*)self, name);
}
static int
obj_setattr(objobject* self, char* name, PyObject* v)
{
/* XXXX Add your own setattr code here */
if ( v == NULL )
{
PyErr_SetString(PyExc_AttributeError, "Cannot delete attribute");
return -1;
}
return PyMember_Set((char*)/*XXXX*/0, obj_memberlist, name, v);
}
static char Objtype__doc__[] =
""
;
static PyTypeObject Objtype = {
PyObject_HEAD_INIT(&PyType_Type)
0, /*ob_size*/
"Object", /*tp_name*/
sizeof(objobject), /*tp_basicsize*/
0, /*tp_itemsize*/
/* methods */
(destructor) obj_dealloc, /*tp_dealloc*/
(printfunc) obj_print, /*tp_print*/
(getattrfunc) obj_getattr, /*tp_getattr*/
(setattrfunc) obj_setattr, /*tp_setattr*/
(cmpfunc) 0, /*tp_compare*/
(reprfunc) obj_repr, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
(hashfunc) 0, /*tp_hash*/
(ternaryfunc) 0, /*tp_call*/
(reprfunc) obj_str, /*tp_str*/
/* Space for future expansion */
0L,0L,0L,0L,
Objtype__doc__ /* Documentation string */
};
/*********/
/* scene */
/*********/
static char sce_addObject__doc__[] =
"addObject(self, object)"
;
static PyObject*
sce_addObject(sceobject* self, PyObject* args)
{
int index;
PyObject* object = NULL;
if (!PyArg_ParseTuple(args, "O", &object))
{
return NULL;
}
PyList_Append(self->objects, object);
index = PyList_Size(self->objects) - 1;
return PyInt_FromLong(index);
}
static struct PyMethodDef sce_methods[] = {
{"addObject", (PyCFunction)sce_addObject,
METH_VARARGS, sce_addObject__doc__},
{ NULL, NULL }
};
static sceobject*
newsceobject(char* name)
{
sceobject* self;
self = PyObject_NEW(sceobject, &Scetype);
if (self == NULL)
{
return NULL;
}
strcpy(self->name, name);
self->objects = PyList_New(0);
return self;
}
static void
sce_dealloc(sceobject* self)
{
sceobject* sce = (sceobject*) self;
Py_DECREF(sce->objects);
PyMem_DEL(self);
}
static int
sce_print(sceobject* self, FILE* fp, int flags)
{
fprintf(fp, "Scene(name = \"%s\",\n", self->name);
fprintf(fp, " objects = %d)\n", PyList_Size(self->objects));
return 0;
}
static PyObject*
sce_repr(sceobject* self)
{
PyObject* s;
s = PyString_FromString("Scene()\n");
return s;
}
static PyObject*
sce_str(sceobject* self)
{
PyObject* s;
s = PyString_FromString("Scene()\n");
return s;
}
#include "structmember.h"
static struct memberlist sce_memberlist[] = {
/* XXXX Add lines like { "foo", T_INT, OFF(foo), RO } */
{"objects", T_OBJECT, offsetof(sceobject, objects), RO},
{NULL}
};
static PyObject*
sce_getattr(sceobject* self, char* name)
{
PyObject* rv;
/* XXXX Add your own getattr code here */
rv = PyMember_Get((char*) self, sce_memberlist, name);
if (rv)
{
return rv;
}
PyErr_Clear();
return Py_FindMethod(sce_methods, (PyObject*)self, name);
}
static int
sce_setattr(sceobject* self, char* name, PyObject* v)
{
/* XXXX Add your own setattr code here */
if ( v == NULL )
{
PyErr_SetString(PyExc_AttributeError, "Cannot delete attribute");
return -1;
}
return PyMember_Set((char*)/*XXXX*/0, sce_memberlist, name, v);
}
static char Scetype__doc__[] =
""
;
static PyTypeObject Scetype = {
PyObject_HEAD_INIT(&PyType_Type)
0, /*ob_size*/
"Scene", /*tp_name*/
sizeof(sceobject), /*tp_basicsize*/
0, /*tp_itemsize*/
/* methods */
(destructor) sce_dealloc, /*tp_dealloc*/
(printfunc) sce_print, /*tp_print*/
(getattrfunc) sce_getattr, /*tp_getattr*/
(setattrfunc) sce_setattr, /*tp_setattr*/
(cmpfunc) 0, /*tp_compare*/
(reprfunc) sce_repr, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
(hashfunc) 0, /*tp_hash*/
(ternaryfunc) 0, /*tp_call*/
(reprfunc) sce_str, /*tp_str*/
/* Space for future expansion */
0L,0L,0L,0L,
Scetype__doc__ /* Documentation string */
};
static char blend_Mesh__doc__[] =
"Creates an (empty) instance of a Blender mesh.\n\
E.g.: \"m = Blender.Mesh('Plane')\"\n\
To create faces first add vertices with \n\
\"i1 = m.addVertex(x, y, z, nx, ny, nz)\"\n\
then create faces with \"index = m.addFace(i1, i2, i3, i4, isSmooth)\".\
"
;
static PyObject*
blend_Mesh(PyObject* self, PyObject* args)
{
if (!PyArg_ParseTuple(args, ""))
{
return NULL;
}
Py_INCREF(Py_None);
return Py_None;
}
static char blend_Object__doc__[] =
"Creates an instance of a Blender object"
;
static PyObject*
blend_Object(PyObject* self, PyObject* args)
{
char* name = NULL;
if (!PyArg_ParseTuple(args, "s", &name))
{
return NULL;
}
return ((PyObject*) newobjobject(name));
}
static char blend_Scene__doc__[] =
"Creates an instance of a Blender scene"
;
static PyObject*
blend_Scene(PyObject* self, PyObject* args)
{
char* name = NULL;
if (!PyArg_ParseTuple(args, "s", &name))
{
return NULL;
}
return ((PyObject*) newsceobject(name));
}
static char blend_addMesh__doc__[] =
"Blender.addMesh(type, scene)\n\
where type is one of [\"Plane\"]"
;
static PyObject*
blend_addMesh(PyObject* self, PyObject* args)
{
char* type = NULL;
PyObject* scene = NULL;
PyObject* tuple = NULL;
PyObject* object = NULL;
PyObject* mesh = NULL;
PyObject* index = NULL;
PyObject* indices = NULL;
if (!PyArg_ParseTuple(args, "sO", &type, &scene))
{
return NULL;
}
if (strcmp(type, "Plane") == 0)
{
object = (PyObject*) newobjobject(type);
mesh = (PyObject*) newmshobject(type);
indices = PyList_New(5);
/* vertices */
index = msh_addVertex((mshobject*) mesh,
Py_BuildValue("ffffff",
1.0, 1.0, 0.0, 0.0, 0.0, 1.0));
PyList_SetItem(indices, 0, index);
index = msh_addVertex((mshobject*) mesh,
Py_BuildValue("ffffff",
1.0, -1.0, 0.0, 0.0, 0.0, 1.0));
PyList_SetItem(indices, 1, index);
index = msh_addVertex((mshobject*) mesh,
Py_BuildValue("ffffff",
-1.0, -1.0, 0.0, 0.0, 0.0, 1.0));
PyList_SetItem(indices, 2, index);
index = msh_addVertex((mshobject*) mesh,
Py_BuildValue("ffffff",
-1.0, 1.0, 0.0, 0.0, 0.0, 1.0));
PyList_SetItem(indices, 3, index);
PyList_SetItem(indices, 4, PyInt_FromLong(0)); /* smooth flag */
/* faces */
msh_addFace((mshobject*) mesh,
Py_BuildValue("OOOOO",
PyList_GetItem(indices, 0),
PyList_GetItem(indices, 3),
PyList_GetItem(indices, 2),
PyList_GetItem(indices, 1),
PyList_GetItem(indices, 4)));
/* connection */
blend_connect(self, Py_BuildValue("OO", object, mesh));
blend_connect(self, Py_BuildValue("OO", scene, object));
/* return value */
tuple = PyTuple_New(2);
PyTuple_SetItem(tuple, 0, object);
PyTuple_SetItem(tuple, 1, mesh);
return tuple;
}
Py_INCREF(Py_None);
return Py_None;
}
static char blend_connect__doc__[] =
"connect(obj1, obj2)"
;
static PyObject*
blend_connect(PyObject* self, PyObject* args)
{
PyObject* obj1 = NULL;
PyObject* obj2 = NULL;
if (!PyArg_ParseTuple(args, "OO", &obj1, &obj2))
{
return NULL;
}
if (obj1->ob_type == &Objtype)
{
if (obj2->ob_type == &Mshtype)
{
Py_INCREF(obj2);
((objobject*) obj1)->data = obj2;
((objobject*) obj1)->type = PyString_FromString("Mesh");
}
}
else if (obj1->ob_type == &Scetype)
{
if (obj2->ob_type == &Objtype)
{
sce_addObject((sceobject*) obj1, Py_BuildValue("(O)", obj2));
}
}
Py_INCREF(Py_None);
return Py_None;
}
static char blend_getCurrentScene__doc__[] =
"getCurrentScene()"
;
static PyObject*
blend_getCurrentScene(PyObject* self, PyObject* args)
{
if (!PyArg_ParseTuple(args, ""))
{
return NULL;
}
Py_INCREF(_scene);
return _scene;
}
/* List of methods defined in the module */
static struct PyMethodDef blend_methods[] = {
{"Mesh", (PyCFunction) blend_Mesh,
METH_VARARGS, blend_Mesh__doc__},
{"Object", (PyCFunction) blend_Object,
METH_VARARGS, blend_Object__doc__},
{"Scene", (PyCFunction) blend_Scene,
METH_VARARGS, blend_Scene__doc__},
{"addMesh", (PyCFunction) blend_addMesh,
METH_VARARGS, blend_addMesh__doc__},
{"connect", (PyCFunction) blend_connect,
METH_VARARGS, blend_connect__doc__},
{"getCurrentScene", (PyCFunction) blend_getCurrentScene,
METH_VARARGS, blend_getCurrentScene__doc__},
{ NULL, (PyCFunction) NULL, 0, NULL }
};
/* Initialization function for the module (*must* be called initBlender) */
static char Blender_module_documentation[] =
"This is the Python API for Blender"
;
void
initBlender()
{
PyObject* m;
PyObject* d;
/* Create the module and add the functions */
m = Py_InitModule4("Blender", blend_methods,
Blender_module_documentation,
(PyObject*)NULL,PYTHON_API_VERSION);
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
ErrorObject = PyString_FromString("Blender.error");
PyDict_SetItemString(d, "error", ErrorObject);
/* XXXX Add constants here */
_scene = (PyObject*) newsceobject("1");
PyDict_SetItemString(d, "_scene", _scene);
/* Check for errors */
if (PyErr_Occurred())
{
Py_FatalError("can't initialize module Blender");
}
}
int main(int argc, char* argv[])
{
char filename[] = "test.py";
FILE* fp = NULL;
Py_SetProgramName("blender");
Py_Initialize();
initBlender();
fp = fopen(filename, "r");
PyRun_AnyFile(fp, filename);
Py_Finalize();
return EXIT_SUCCESS;
}

View File

@@ -0,0 +1,52 @@
import Blender
def printModuleInfo():
# module information
names = dir(Blender)
print names
for name in names:
execString = ('print "Blender.' + name + ':",\n' +
'if type(Blender.' + name + ') == type(""):\n' +
' print Blender.' + name + '\n' +
'elif type(Blender.' + name +
') == type(Blender.addMesh) or type(Blender.' + name +
') == type(Blender.Object):\n' +
' print Blender.' + name + '.__doc__\n' +
'else:\n' +
' print type(Blender.' + name + ')\n')
exec execString
print "#" * 79
def testModule():
# get current scene
scene = Blender.getCurrentScene()
print scene
# create object and mesh (primitives)
obj, msh = Blender.addMesh("Plane", scene)
print "obj ="
print obj
print "msh ="
print msh
print "vertices:"
for vertex in msh.vertices:
print vertex
print "faces:"
for face in msh.faces:
print face
# create object only and share mesh
obj2 = Blender.Object("Plane2")
print obj2
Blender.connect(obj2, msh)
Blender.connect(scene, obj2)
print obj2
print obj2.data
print "vertices:"
for vertex in obj2.data.vertices:
print vertex
print "faces:"
for face in obj2.data.faces:
print face
print scene
printModuleInfo()
testModule()

View File

@@ -0,0 +1,14 @@
# $Id$
# This is the makefile for the bytecode freezing of all modules which
# the main file depends on (last argument in importer rule)
SRCDIR = ../modules
TARGETDIR = ../../../source/blender/bpython/frozen
PYFLAGS = -S -O
default: importer
importer:
python $(PYFLAGS) freeze.py -d -x os -x pprint -x Blender -I $(SRCDIR) -o $(TARGETDIR) $(SRCDIR)/VRMLmain.py

173
intern/python/freeze/README Normal file
View File

@@ -0,0 +1,173 @@
THE FREEZE SCRIPT
=================
(Directions for Windows are at the end of this file.)
What is Freeze?
---------------
Freeze make it possible to ship arbitrary Python programs to people
who don't have Python. The shipped file (called a "frozen" version of
your Python program) is an executable, so this only works if your
platform is compatible with that on the receiving end (this is usually
a matter of having the same major operating system revision and CPU
type).
The shipped file contains a Python interpreter and large portions of
the Python run-time. Some measures have been taken to avoid linking
unneeded modules, but the resulting binary is usually not small.
The Python source code of your program (and of the library modules
written in Python that it uses) is not included in the binary --
instead, the compiled byte-code (the instruction stream used
internally by the interpreter) is incorporated. This gives some
protection of your Python source code, though not much -- a
disassembler for Python byte-code is available in the standard Python
library. At least someone running "strings" on your binary won't see
the source.
How does Freeze know which modules to include?
----------------------------------------------
Previous versions of Freeze used a pretty simple-minded algorithm to
find the modules that your program uses, essentially searching for
lines starting with the word "import". It was pretty easy to trick it
into making mistakes, either missing valid import statements, or
mistaking string literals (e.g. doc strings) for import statements.
This has been remedied: Freeze now uses the regular Python parser to
parse the program (and all its modules) and scans the generated byte
code for IMPORT instructions. It may still be confused -- it will not
know about calls to the __import__ built-in function, or about import
statements constructed on the fly and executed using the 'exec'
statement, and it will consider import statements even when they are
unreachable (e.g. "if 0: import foobar").
This new version of Freeze also knows about Python's new package
import mechanism, and uses exactly the same rules to find imported
modules and packages. One exception: if you write 'from package
import *', Python will look into the __all__ variable of the package
to determine which modules are to be imported, while Freeze will do a
directory listing.
One tricky issue: Freeze assumes that the Python interpreter and
environment you're using to run Freeze is the same one that would be
used to run your program, which should also be the same whose sources
and installed files you will learn about in the next section. In
particular, your PYTHONPATH setting should be the same as for running
your program locally. (Tip: if the program doesn't run when you type
"python hello.py" there's little chance of getting the frozen version
to run.)
How do I use Freeze?
--------------------
Normally, you should be able to use it as follows:
python freeze.py hello.py
where hello.py is your program and freeze.py is the main file of
Freeze (in actuality, you'll probably specify an absolute pathname
such as /usr/joe/python/Tools/freeze/freeze.py).
What do I do next?
------------------
Freeze creates a number of files: frozen.c, config.c and Makefile,
plus one file for each Python module that gets included named
M_<module>.c. To produce the frozen version of your program, you can
simply type "make". This should produce a binary file. If the
filename argument to Freeze was "hello.py", the binary will be called
"hello".
Note: you can use the -o option to freeze to specify an alternative
directory where these files are created. This makes it easier to
clean up after you've shipped the frozen binary. You should invoke
"make" in the given directory.
Freezing Tkinter programs
-------------------------
Unfortunately, it is currently not possible to freeze programs that
use Tkinter. It *seems* to work, but when you ship the frozen program
to a site without a Tcl/Tk installation, it will fail with a complaint
about missing Tcl/Tk initialization files.
A workaround would be possible, in which the Tcl/Tk library files are
incorporated in a frozen Python module as string literals and written
to a temporary location when the program runs; this is currently left
as an exercise for the reader. (If you implement this, please post to
the Python newsgroup!)
Of course, you can also simply require that Tcl/Tk is required on the
target installation.
A warning against shared library modules
----------------------------------------
When your Python installation uses shared library modules, these will
not be incorporated in the frozen program. Again, the frozen program
will work when you test it, but it won't work when you ship it to a
site without a Python installation.
Freeze prints a warning when this is the case at the end of the
freezing process:
Warning: unknown modules remain: ...
When this occurs, the best thing to do is usually to rebuild Python
using static linking only.
Troubleshooting
---------------
If you have trouble using Freeze for a large program, it's probably
best to start playing with a really simple program first (like the file
hello.py). If you can't get that to work there's something
fundamentally wrong -- perhaps you haven't installed Python. To do a
proper install, you should do "make install" in the Python root
directory.
Usage under Windows 95 or NT
----------------------------
Under Windows 95 or NT, you *must* use the -p option and point it to
the top of the Python source tree.
WARNING: the resulting executable is not self-contained; it requires
the Python DLL, currently PYTHON20.DLL (it does not require the
standard library of .py files though). It may also require one or
more extension modules loaded from .DLL or .PYD files; the module
names are printed in the warning message about remaining unknown
modules.
The driver script generates a Makefile that works with the Microsoft
command line C compiler (CL). To compile, run "nmake"; this will
build a target "hello.exe" if the source was "hello.py". Only the
files frozenmain.c and frozen.c are used; no config.c is generated or
used, since the standard DLL is used.
In order for this to work, you must have built Python using the VC++
(Developer Studio) 5.0 compiler. The provided project builds
python20.lib in the subdirectory pcbuild\Release of thje Python source
tree, and this is where the generated Makefile expects it to be. If
this is not the case, you can edit the Makefile or (probably better)
winmakemakefile.py (e.g., if you are using the 4.2 compiler, the
python20.lib file is generated in the subdirectory vc40 of the Python
source tree).
You can freeze programs that use Tkinter, but Tcl/Tk must be installed
on the target system.
It is possible to create frozen programs that don't have a console
window, by specifying the option '-s windows'.
--Guido van Rossum (home page: http://www.python.org/~guido/)

View File

@@ -0,0 +1,12 @@
$Id$
This is a modification of the freeze.py script used to freeze python
modules as byte code in Blender.
To create this byte code, simply type 'make'. Freeze will then generate
the C source files in the TARGETDIR (specified in the Makefile), provided
that you have a valid python installation.
Be warned: testing of the module dependencies is needed, as these are
resolved AT RUNTIME!

View File

@@ -0,0 +1,47 @@
_orig_open = open
class _BkFile:
def __init__(self, file, mode, bufsize):
import os
self.__filename = file
self.__backup = file + '~'
try:
os.unlink(self.__backup)
except os.error:
pass
try:
os.rename(file, self.__backup)
except os.error:
self.__backup = None
self.__file = _orig_open(file, mode, bufsize)
self.closed = self.__file.closed
self.fileno = self.__file.fileno
self.flush = self.__file.flush
self.isatty = self.__file.isatty
self.mode = self.__file.mode
self.name = self.__file.name
self.read = self.__file.read
self.readinto = self.__file.readinto
self.readline = self.__file.readline
self.readlines = self.__file.readlines
self.seek = self.__file.seek
self.softspace = self.__file.softspace
self.tell = self.__file.tell
self.truncate = self.__file.truncate
self.write = self.__file.write
self.writelines = self.__file.writelines
def close(self):
self.__file.close()
if self.__backup is None:
return
import filecmp
if filecmp.cmp(self.__backup, self.__filename, shallow = 0):
import os
os.unlink(self.__filename)
os.rename(self.__backup, self.__filename)
def open(file, mode = 'r', bufsize = -1):
if 'w' not in mode:
return _orig_open(file, mode, bufsize)
return _BkFile(file, mode, bufsize)

View File

@@ -0,0 +1,91 @@
# Check for a module in a set of extension directories.
# An extension directory should contain a Setup file
# and one or more .o files or a lib.a file.
import os
import string
import parsesetup
def checkextensions(unknown, extensions):
files = []
modules = []
edict = {}
for e in extensions:
setup = os.path.join(e, 'Setup')
liba = os.path.join(e, 'lib.a')
if not os.path.isfile(liba):
liba = None
edict[e] = parsesetup.getsetupinfo(setup), liba
for mod in unknown:
for e in extensions:
(mods, vars), liba = edict[e]
if not mods.has_key(mod):
continue
modules.append(mod)
if liba:
# If we find a lib.a, use it, ignore the
# .o files, and use *all* libraries for
# *all* modules in the Setup file
if liba in files:
break
files.append(liba)
for m in mods.keys():
files = files + select(e, mods, vars,
m, 1)
break
files = files + select(e, mods, vars, mod, 0)
break
return files, modules
def select(e, mods, vars, mod, skipofiles):
files = []
for w in mods[mod]:
w = treatword(w)
if not w:
continue
w = expandvars(w, vars)
for w in string.split(w):
if skipofiles and w[-2:] == '.o':
continue
# Assume $var expands to absolute pathname
if w[0] not in ('-', '$') and w[-2:] in ('.o', '.a'):
w = os.path.join(e, w)
if w[:2] in ('-L', '-R') and w[2:3] != '$':
w = w[:2] + os.path.join(e, w[2:])
files.append(w)
return files
cc_flags = ['-I', '-D', '-U']
cc_exts = ['.c', '.C', '.cc', '.c++']
def treatword(w):
if w[:2] in cc_flags:
return None
if w[:1] == '-':
return w # Assume loader flag
head, tail = os.path.split(w)
base, ext = os.path.splitext(tail)
if ext in cc_exts:
tail = base + '.o'
w = os.path.join(head, tail)
return w
def expandvars(str, vars):
i = 0
while i < len(str):
i = k = string.find(str, '$', i)
if i < 0:
break
i = i+1
var = str[i:i+1]
i = i+1
if var == '(':
j = string.find(str, ')', i)
if j < 0:
break
var = str[i:j]
i = j+1
if vars.has_key(var):
str = str[:k] + vars[var] + str[i:]
i = k
return str

View File

@@ -0,0 +1,190 @@
"""Extension management for Windows.
Under Windows it is unlikely the .obj files are of use, as special compiler options
are needed (primarily to toggle the behavior of "public" symbols.
I dont consider it worth parsing the MSVC makefiles for compiler options. Even if
we get it just right, a specific freeze application may have specific compiler
options anyway (eg, to enable or disable specific functionality)
So my basic stragtegy is:
* Have some Windows INI files which "describe" one or more extension modules.
(Freeze comes with a default one for all known modules - but you can specify
your own).
* This description can include:
- The MSVC .dsp file for the extension. The .c source file names
are extraced from there.
- Specific compiler/linker options
- Flag to indicate if Unicode compilation is expected.
At the moment the name and location of this INI file is hardcoded,
but an obvious enhancement would be to provide command line options.
"""
import os, string, sys
try:
import win32api
except ImportError:
win32api = None # User has already been warned
class CExtension:
"""An abstraction of an extension implemented in C/C++
"""
def __init__(self, name, sourceFiles):
self.name = name
# A list of strings defining additional compiler options.
self.sourceFiles = sourceFiles
# A list of special compiler options to be applied to
# all source modules in this extension.
self.compilerOptions = []
# A list of .lib files the final .EXE will need.
self.linkerLibs = []
def GetSourceFiles(self):
return self.sourceFiles
def AddCompilerOption(self, option):
self.compilerOptions.append(option)
def GetCompilerOptions(self):
return self.compilerOptions
def AddLinkerLib(self, lib):
self.linkerLibs.append(lib)
def GetLinkerLibs(self):
return self.linkerLibs
def checkextensions(unknown, extra_inis, prefix):
# Create a table of frozen extensions
defaultMapName = os.path.join( os.path.split(sys.argv[0])[0], "extensions_win32.ini")
if not os.path.isfile(defaultMapName):
sys.stderr.write("WARNING: %s can not be found - standard extensions may not be found" % mapFileName)
else:
# must go on end, so other inis can override.
extra_inis.append(defaultMapName)
ret = []
for mod in unknown:
for ini in extra_inis:
# print "Looking for", mod, "in", win32api.GetFullPathName(ini),"...",
defn = get_extension_defn( mod, ini, prefix )
if defn is not None:
# print "Yay - found it!"
ret.append( defn )
break
# print "Nope!"
else: # For not broken!
sys.stderr.write("No definition of module %s in any specified map file.\n" % (mod))
return ret
def get_extension_defn(moduleName, mapFileName, prefix):
if win32api is None: return None
os.environ['PYTHONPREFIX'] = prefix
dsp = win32api.GetProfileVal(moduleName, "dsp", "", mapFileName)
if dsp=="":
return None
# We allow environment variables in the file name
dsp = win32api.ExpandEnvironmentStrings(dsp)
# If the path to the .DSP file is not absolute, assume it is relative
# to the description file.
if not os.path.isabs(dsp):
dsp = os.path.join( os.path.split(mapFileName)[0], dsp)
# Parse it to extract the source files.
sourceFiles = parse_dsp(dsp)
if sourceFiles is None:
return None
module = CExtension(moduleName, sourceFiles)
# Put the path to the DSP into the environment so entries can reference it.
os.environ['dsp_path'] = os.path.split(dsp)[0]
os.environ['ini_path'] = os.path.split(mapFileName)[0]
cl_options = win32api.GetProfileVal(moduleName, "cl", "", mapFileName)
if cl_options:
module.AddCompilerOption(win32api.ExpandEnvironmentStrings(cl_options))
exclude = win32api.GetProfileVal(moduleName, "exclude", "", mapFileName)
exclude = string.split(exclude)
if win32api.GetProfileVal(moduleName, "Unicode", 0, mapFileName):
module.AddCompilerOption('/D UNICODE /D _UNICODE')
libs = string.split(win32api.GetProfileVal(moduleName, "libs", "", mapFileName))
for lib in libs:
module.AddLinkerLib(win32api.ExpandEnvironmentStrings(lib))
for exc in exclude:
if exc in module.sourceFiles:
modules.sourceFiles.remove(exc)
return module
# Given an MSVC DSP file, locate C source files it uses
# returns a list of source files.
def parse_dsp(dsp):
# print "Processing", dsp
# For now, only support
ret = []
dsp_path, dsp_name = os.path.split(dsp)
try:
lines = open(dsp, "r").readlines()
except IOError, msg:
sys.stderr.write("%s: %s\n" % (dsp, msg))
return None
for line in lines:
fields = string.split(string.strip(line), "=", 2)
if fields[0]=="SOURCE":
if string.lower(os.path.splitext(fields[1])[1]) in ['.cpp', '.c']:
ret.append( win32api.GetFullPathName(os.path.join(dsp_path, fields[1] ) ) )
return ret
def write_extension_table(fname, modules):
fp = open(fname, "w")
try:
fp.write (ext_src_header)
# Write fn protos
for module in modules:
# bit of a hack for .pyd's as part of packages.
name = string.split(module.name,'.')[-1]
fp.write('extern void init%s(void);\n' % (name) )
# Write the table
fp.write (ext_tab_header)
for module in modules:
name = string.split(module.name,'.')[-1]
fp.write('\t{"%s", init%s},\n' % (name, name) )
fp.write (ext_tab_footer)
fp.write(ext_src_footer)
finally:
fp.close()
ext_src_header = """\
#include "Python.h"
"""
ext_tab_header = """\
static struct _inittab extensions[] = {
"""
ext_tab_footer = """\
/* Sentinel */
{0, 0}
};
"""
ext_src_footer = """\
extern DL_IMPORT(int) PyImport_ExtendInittab(struct _inittab *newtab);
int PyInitFrozenExtensions()
{
return PyImport_ExtendInittab(extensions);
}
"""

461
intern/python/freeze/freeze.py Executable file
View File

@@ -0,0 +1,461 @@
#! /usr/bin/env python
# changes made by strubi@blender.nl
"""Freeze a Python script into a binary.
usage: freeze [options...] script [module]...
Options:
-p prefix: This is the prefix used when you ran ``make install''
in the Python build directory.
(If you never ran this, freeze won't work.)
The default is whatever sys.prefix evaluates to.
It can also be the top directory of the Python source
tree; then -P must point to the build tree.
-P exec_prefix: Like -p but this is the 'exec_prefix', used to
install objects etc. The default is whatever sys.exec_prefix
evaluates to, or the -p argument if given.
If -p points to the Python source tree, -P must point
to the build tree, if different.
-e extension: A directory containing additional .o files that
may be used to resolve modules. This directory
should also have a Setup file describing the .o files.
On Windows, the name of a .INI file describing one
or more extensions is passed.
More than one -e option may be given.
-o dir: Directory where the output files are created; default '.'.
-m: Additional arguments are module names instead of filenames.
-a package=dir: Additional directories to be added to the package's
__path__. Used to simulate directories added by the
package at runtime (eg, by OpenGL and win32com).
More than one -a option may be given for each package.
-l file: Pass the file to the linker (windows only)
-d: Debugging mode for the module finder.
-q: Make the module finder totally quiet.
-h: Print this help message.
-x module Exclude the specified module.
-i filename: Include a file with additional command line options. Used
to prevent command lines growing beyond the capabilities of
the shell/OS. All arguments specified in filename
are read and the -i option replaced with the parsed
params (note - quoting args in this file is NOT supported)
-s subsystem: Specify the subsystem (For Windows only.);
'console' (default), 'windows', 'service' or 'com_dll'
-w: Toggle Windows (NT or 95) behavior.
(For debugging only -- on a win32 platform, win32 behavior
is automatic.)
Arguments:
script: The Python script to be executed by the resulting binary.
module ...: Additional Python modules (referenced by pathname)
that will be included in the resulting binary. These
may be .py or .pyc files. If -m is specified, these are
module names that are search in the path instead.
NOTES:
In order to use freeze successfully, you must have built Python and
installed it ("make install").
The script should not use modules provided only as shared libraries;
if it does, the resulting binary is not self-contained.
"""
# Import standard modules
import getopt
import os
import string
import sys
# Import the freeze-private modules
import checkextensions
import modulefinder
import makeconfig
import makefreeze
import makemakefile
import parsesetup
import bkfile
# Main program
def main():
# overridable context
prefix = None # settable with -p option
exec_prefix = None # settable with -P option
extensions = []
exclude = [] # settable with -x option
addn_link = [] # settable with -l, but only honored under Windows.
path = sys.path[:]
modargs = 0
debug = 1
odir = ''
win = sys.platform[:3] == 'win'
# default the exclude list for each platform
if win: exclude = exclude + [
'dos', 'dospath', 'mac', 'macpath', 'macfs', 'MACFS', 'posix', 'os2', 'ce']
# modules that are imported by the Python runtime
#implicits = ["site", "exceptions"]
implicits = ["exceptions"]
# output files
frozen_c = 'frozen.c'
config_c = 'config.c'
target = 'a.out' # normally derived from script name
makefile = 'Makefile.freeze'
subsystem = 'console'
# parse command line by first replacing any "-i" options with the file contents.
pos = 1
while pos < len(sys.argv)-1: # last option can not be "-i", so this ensures "pos+1" is in range!
if sys.argv[pos] == '-i':
try:
options = string.split(open(sys.argv[pos+1]).read())
except IOError, why:
usage("File name '%s' specified with the -i option can not be read - %s" % (sys.argv[pos+1], why) )
# Replace the '-i' and the filename with the read params.
sys.argv[pos:pos+2] = options
pos = pos + len(options) - 1 # Skip the name and the included args.
pos = pos + 1
# Now parse the command line with the extras inserted.
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:de:hmo:p:P:I:qs:wx:l:')
except getopt.error, msg:
usage('getopt error: ' + str(msg))
# proces option arguments
for o, a in opts:
if o == '-h':
print __doc__
return
if o == '-d':
debug = debug + 1
if o == '-e':
extensions.append(a)
if o == '-I': # include path
path.append(a)
if o == '-m':
modargs = 1
if o == '-o':
odir = a
if o == '-p':
prefix = a
if o == '-P':
exec_prefix = a
if o == '-q':
debug = 0
if o == '-w':
win = not win
if o == '-s':
if not win:
usage("-s subsystem option only on Windows")
subsystem = a
if o == '-x':
exclude.append(a)
if o == '-l':
addn_link.append(a)
if o == '-a':
apply(modulefinder.AddPackagePath, tuple(string.split(a,"=", 2)))
# default prefix and exec_prefix
if not exec_prefix:
if prefix:
exec_prefix = prefix
else:
exec_prefix = sys.exec_prefix
if not prefix:
prefix = sys.prefix
# determine whether -p points to the Python source tree
ishome = os.path.exists(os.path.join(prefix, 'Python', 'ceval.c'))
# locations derived from options
version = sys.version[:3]
if win:
extensions_c = 'frozen_extensions.c'
if ishome:
print "(Using Python source directory)"
binlib = exec_prefix
incldir = os.path.join(prefix, 'Include')
config_h_dir = exec_prefix
config_c_in = os.path.join(prefix, 'Modules', 'config.c.in')
frozenmain_c = os.path.join(prefix, 'Python', 'frozenmain.c')
makefile_in = os.path.join(exec_prefix, 'Modules', 'Makefile')
if win:
frozendllmain_c = os.path.join(exec_prefix, 'Pc\\frozen_dllmain.c')
else:
binlib = os.path.join(exec_prefix,
'lib', 'python%s' % version, 'config')
incldir = os.path.join(prefix, 'include', 'python%s' % version)
config_h_dir = os.path.join(exec_prefix, 'include',
'python%s' % version)
config_c_in = os.path.join(binlib, 'config.c.in')
frozenmain_c = os.path.join(binlib, 'frozenmain.c')
makefile_in = os.path.join(binlib, 'Makefile')
frozendllmain_c = os.path.join(binlib, 'frozen_dllmain.c')
supp_sources = []
defines = []
includes = ['-I' + incldir, '-I' + config_h_dir]
# sanity check of directories and files
check_dirs = [prefix, exec_prefix, binlib, incldir]
if not win: check_dirs = check_dirs + extensions # These are not directories on Windows.
for dir in check_dirs:
if not os.path.exists(dir):
usage('needed directory %s not found' % dir)
if not os.path.isdir(dir):
usage('%s: not a directory' % dir)
if win:
files = supp_sources + extensions # extensions are files on Windows.
else:
files = [config_c_in, makefile_in] + supp_sources
for file in supp_sources:
if not os.path.exists(file):
usage('needed file %s not found' % file)
if not os.path.isfile(file):
usage('%s: not a plain file' % file)
if not win:
for dir in extensions:
setup = os.path.join(dir, 'Setup')
if not os.path.exists(setup):
usage('needed file %s not found' % setup)
if not os.path.isfile(setup):
usage('%s: not a plain file' % setup)
# check that enough arguments are passed
if not args:
usage('at least one filename argument required')
# check that file arguments exist
for arg in args:
if arg == '-m':
break
# if user specified -m on the command line before _any_
# file names, then nothing should be checked (as the
# very first file should be a module name)
if modargs:
break
if not os.path.exists(arg):
usage('argument %s not found' % arg)
if not os.path.isfile(arg):
usage('%s: not a plain file' % arg)
# process non-option arguments
scriptfile = args[0]
modules = args[1:]
# derive target name from script name
base = os.path.basename(scriptfile)
base, ext = os.path.splitext(base)
if base:
if base != scriptfile:
target = base
else:
target = base + '.bin'
# handle -o option
base_frozen_c = frozen_c
base_config_c = config_c
base_target = target
if odir and not os.path.isdir(odir):
try:
os.mkdir(odir)
print "Created output directory", odir
except os.error, msg:
usage('%s: mkdir failed (%s)' % (odir, str(msg)))
base = ''
if odir:
base = os.path.join(odir, '')
frozen_c = os.path.join(odir, frozen_c)
config_c = os.path.join(odir, config_c)
target = os.path.join(odir, target)
makefile = os.path.join(odir, makefile)
if win: extensions_c = os.path.join(odir, extensions_c)
# Handle special entry point requirements
# (on Windows, some frozen programs do not use __main__, but
# import the module directly. Eg, DLLs, Services, etc
custom_entry_point = None # Currently only used on Windows
python_entry_is_main = 1 # Is the entry point called __main__?
# handle -s option on Windows
if win:
import winmakemakefile
try:
custom_entry_point, python_entry_is_main = \
winmakemakefile.get_custom_entry_point(subsystem)
except ValueError, why:
usage(why)
# Actual work starts here...
# collect all modules of the program
dir = os.path.dirname(scriptfile)
path[0] = dir
mf = modulefinder.ModuleFinder(path, debug, exclude)
if win and subsystem=='service':
# If a Windows service, then add the "built-in" module.
mod = mf.add_module("servicemanager")
mod.__file__="dummy.pyd" # really built-in to the resulting EXE
for mod in implicits:
mf.import_hook(mod)
for mod in modules:
if mod == '-m':
modargs = 1
continue
if modargs:
if mod[-2:] == '.*':
mf.import_hook(mod[:-2], None, ["*"])
else:
mf.import_hook(mod)
else:
mf.load_file(mod)
# Add the main script as either __main__, or the actual module name.
if python_entry_is_main:
mf.run_script(scriptfile)
else:
mf.load_file(scriptfile)
if debug > 0:
mf.report()
print
dict = mf.modules
# generate output for frozen modules
files = makefreeze.makefreeze(base, dict, debug, custom_entry_point, 1)
# look for unfrozen modules (builtin and of unknown origin)
builtins = []
unknown = []
mods = dict.keys()
mods.sort()
for mod in mods:
if dict[mod].__code__:
continue
if not dict[mod].__file__:
builtins.append(mod)
else:
unknown.append(mod)
# search for unknown modules in extensions directories (not on Windows)
addfiles = []
frozen_extensions = [] # Windows list of modules.
if unknown or (not win and builtins):
if not win:
addfiles, addmods = \
checkextensions.checkextensions(unknown+builtins,
extensions)
for mod in addmods:
if mod in unknown:
unknown.remove(mod)
builtins.append(mod)
else:
# Do the windows thang...
import checkextensions_win32
# Get a list of CExtension instances, each describing a module
# (including its source files)
frozen_extensions = checkextensions_win32.checkextensions(
unknown, extensions, prefix)
for mod in frozen_extensions:
unknown.remove(mod.name)
# report unknown modules
if unknown:
sys.stderr.write('Warning: unknown modules remain: %s\n' %
string.join(unknown))
# windows gets different treatment
if win:
# Taking a shortcut here...
import winmakemakefile, checkextensions_win32
checkextensions_win32.write_extension_table(extensions_c,
frozen_extensions)
# Create a module definition for the bootstrap C code.
xtras = [frozenmain_c, os.path.basename(frozen_c),
frozendllmain_c, os.path.basename(extensions_c)] + files
maindefn = checkextensions_win32.CExtension( '__main__', xtras )
frozen_extensions.append( maindefn )
outfp = open(makefile, 'w')
try:
winmakemakefile.makemakefile(outfp,
locals(),
frozen_extensions,
os.path.basename(target))
finally:
outfp.close()
return
# generate config.c and Makefile
builtins.sort()
infp = open(config_c_in)
outfp = bkfile.open(config_c, 'w')
try:
makeconfig.makeconfig(infp, outfp, builtins)
finally:
outfp.close()
infp.close()
cflags = defines + includes + ['$(OPT)']
libs = [os.path.join(binlib, 'libpython$(VERSION).a')]
somevars = {}
if os.path.exists(makefile_in):
makevars = parsesetup.getmakevars(makefile_in)
for key in makevars.keys():
somevars[key] = makevars[key]
somevars['CFLAGS'] = string.join(cflags) # override
files = ['$(OPT)', '$(LDFLAGS)', base_config_c, base_frozen_c] + \
files + supp_sources + addfiles + libs + \
['$(MODLIBS)', '$(LIBS)', '$(SYSLIBS)']
outfp = bkfile.open(makefile, 'w')
try:
makemakefile.makemakefile(outfp, somevars, files, base_target)
finally:
outfp.close()
# Done!
if odir:
print 'Now run "make" in', odir,
print 'to build the target:', base_target
else:
print 'Now run "make" to build the target:', base_target
# Print usage message and exit
def usage(msg):
sys.stdout = sys.stderr
print "Error:", msg
print "Use ``%s -h'' for help" % sys.argv[0]
sys.exit(2)
main()

View File

@@ -0,0 +1 @@
print 'Hello world...'

View File

@@ -0,0 +1,61 @@
import regex
# Write the config.c file
never = ['marshal', '__main__', '__builtin__', 'sys', 'exceptions']
def makeconfig(infp, outfp, modules, with_ifdef=0):
m1 = regex.compile('-- ADDMODULE MARKER 1 --')
m2 = regex.compile('-- ADDMODULE MARKER 2 --')
while 1:
line = infp.readline()
if not line: break
outfp.write(line)
if m1 and m1.search(line) >= 0:
m1 = None
for mod in modules:
if mod in never:
continue
if with_ifdef:
outfp.write("#ifndef init%s\n"%mod)
outfp.write('extern void init%s();\n' % mod)
if with_ifdef:
outfp.write("#endif\n")
elif m2 and m2.search(line) >= 0:
m2 = None
for mod in modules:
if mod in never:
continue
outfp.write('\t{"%s", init%s},\n' %
(mod, mod))
if m1:
sys.stderr.write('MARKER 1 never found\n')
elif m2:
sys.stderr.write('MARKER 2 never found\n')
# Test program.
def test():
import sys
if not sys.argv[3:]:
print 'usage: python makeconfig.py config.c.in outputfile',
print 'modulename ...'
sys.exit(2)
if sys.argv[1] == '-':
infp = sys.stdin
else:
infp = open(sys.argv[1])
if sys.argv[2] == '-':
outfp = sys.stdout
else:
outfp = open(sys.argv[2], 'w')
makeconfig(infp, outfp, sys.argv[3:])
if outfp != sys.stdout:
outfp.close()
if infp != sys.stdin:
infp.close()
if __name__ == '__main__':
test()

View File

@@ -0,0 +1,115 @@
##
##
## Customized makefreeze for NaN
##
##
## 1.11.2001, strubi@blender.nl
##
##
import marshal
import string
import bkfile
# Write a file containing frozen code for the modules in the dictionary.
header = """
#include "Python.h"
static struct _frozen _PyImport_FrozenModules[] = {
"""
trailer = """\
{0, 0, 0} /* sentinel */
};
"""
# if __debug__ == 0 (i.e. -O option given), set Py_OptimizeFlag in frozen app.
main_entry_point = """
int
main(argc, argv)
int argc;
char **argv;
{
extern int Py_FrozenMain(int, char **);
init_frozen_modules();
return Py_FrozenMain(argc, argv);
}
"""
default_entry_point = """
void
init_frozenmodules(void)
{
""" + ((not __debug__ and """
Py_OptimizeFlag++;
""") or "") + """
PyImport_FrozenModules = _PyImport_FrozenModules;
}
"""
HEADER = """
/* This is a generated file, containing frozen bytecode.
* Check $(HOME)/develop/intern/python/freeze/README for more information.
*/
"""
def makefreeze(base, dict, debug=0, entry_point = None, exclude_main = 0):
if entry_point is None: entry_point = default_entry_point
done = []
files = []
mods = dict.keys()
if exclude_main:
mods.remove("__main__")
mods.sort()
for mod in mods:
m = dict[mod]
mangled = string.join(string.split(mod, "."), "__")
if m.__code__:
file = 'M_' + mangled + '.c'
outfp = bkfile.open(base + file, 'w')
outfp.write(HEADER)
files.append(file)
if debug:
print "freezing", mod, "..."
str = marshal.dumps(m.__code__)
size = len(str)
if m.__path__:
# Indicate package by negative size
size = -size
done.append((mod, mangled, size))
writecode(outfp, mangled, str)
outfp.close()
if debug:
print "generating table of frozen modules"
outfp = bkfile.open(base + 'frozen.c', 'w')
for mod, mangled, size in done:
outfp.write('extern unsigned char M_%s[];\n' % mangled)
outfp.write(header)
for mod, mangled, size in done:
outfp.write('\t{"%s", M_%s, %d},\n' % (mod, mangled, size))
outfp.write(trailer)
outfp.write(entry_point)
outfp.close()
#outfp = bkfile.open(base + 'main.c', 'w')
#outfp.write(main_entry_point)
#outfp.close()
return files
# Write a C initializer for a module containing the frozen python code.
# The array is called M_<mod>.
def writecode(outfp, mod, str):
outfp.write('unsigned char M_%s[] = {' % mod)
for i in range(0, len(str), 16):
outfp.write('\n\t')
for c in str[i:i+16]:
outfp.write('%d,' % ord(c))
outfp.write('\n};\n')
## def writecode(outfp, mod, str):
## outfp.write('unsigned char M_%s[%d] = "%s";\n' % (mod, len(str),
## string.join(map(lambda s: `s`[1:-1], string.split(str, '"')), '\\"')))

View File

@@ -0,0 +1,57 @@
# Write the actual Makefile.
##
##
## Customized makemakefile for NaN
##
##
## 1.11.2001, strubi@blender.nl
##
##
import os
import string
def makemakefile(outfp, makevars, files, target):
outfp.write("# Makefile generated by freeze.py script\n\n")
target = "frozen"
libtarget = "lib" + target
targetlib = libtarget + ".a"
#targetlib = "libpyfrozen.a"
keys = makevars.keys()
keys.sort()
for key in keys:
outfp.write("%s=%s\n" % (key, makevars[key]))
outfp.write("\nall: %s\n\n" % libtarget)
deps = []
for i in range(len(files)):
file = files[i]
if file[-2:] == '.c':
base = os.path.basename(file)
dest = base[:-2] + '.o'
# outfp.write("%s: %s\n" % (dest, file))
# outfp.write("\t$(CC) $(CFLAGS) -c %s\n" % file)
files[i] = dest
deps.append(dest)
mainfile = 'M___main__.o'
try:
deps.remove(mainfile)
except:
pass
outfp.write("OBJS = %s\n" % string.join(deps))
# libfiles.remove('M___main__.o') # don't link with __main__
outfp.write("\n%s: $(OBJS)\n" % (libtarget))
outfp.write("\t$(AR) ruv %s $(OBJS)\n" % (targetlib))
outfp.write("\n%s: %s $(OBJS)\n" % (target, mainfile))
outfp.write("\t$(CC) %s %s -o %s $(LDLAST)\n" %
(mainfile, " ".join(deps), target))
outfp.write("\nclean:\n\t-rm -f *.o *.a %s\n" % target)

View File

@@ -0,0 +1,444 @@
"""Find modules used by a script, using introspection."""
import dis
import imp
import marshal
import os
import re
import string
import sys
if sys.platform=="win32":
# On Windows, we can locate modules in the registry with
# the help of the win32api package.
try:
import win32api
except ImportError:
print "The win32api module is not available - modules listed"
print "in the registry will not be found."
win32api = None
IMPORT_NAME = dis.opname.index('IMPORT_NAME')
IMPORT_FROM = dis.opname.index('IMPORT_FROM')
STORE_NAME = dis.opname.index('STORE_NAME')
STORE_FAST = dis.opname.index('STORE_FAST')
STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
STORE_OPS = [STORE_NAME, STORE_FAST, STORE_GLOBAL]
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
def __repr__(self):
s = "Module(%s" % `self.__name__`
if self.__file__ is not None:
s = s + ", %s" % `self.__file__`
if self.__path__ is not None:
s = s + ", %s" % `self.__path__`
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes = []):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print " ",
print str,
for arg in args:
print repr(arg),
print
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
apply(self.msg, args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
apply(self.msg, args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
fp = open(pathname)
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
fp = open(pathname)
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None):
self.msg(3, "import_hook", name, caller, fromlist)
parent = self.determine_parent(caller)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
def determine_parent(self, caller):
self.msgin(4, "determine_parent", caller)
if not caller:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = string.rfind(pname, '.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = string.find(name, '.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = string.find(tail, '.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError, "No module named " + mname
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
suffixes = [".py", ".pyc", ".pyo"]
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if self.badmodules.has_key(fqname):
self.msgout(3, "import_module -> None")
if parent:
self.badmodules[fqname][parent.__name__] = None
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, (suffix, mode, type)):
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError, "Bad magic number in %s" % pathname
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def scan_code(self, co, m):
code = co.co_code
n = len(code)
i = 0
lastname = None
while i < n:
c = code[i]
i = i+1
op = ord(c)
if op >= dis.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
if op == IMPORT_NAME:
name = lastname = co.co_names[oparg]
if not self.badmodules.has_key(lastname):
try:
self.import_hook(name, m)
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
if not self.badmodules.has_key(name):
self.badmodules[name] = {}
self.badmodules[name][m.__name__] = None
elif op == IMPORT_FROM:
name = co.co_names[oparg]
assert lastname is not None
if not self.badmodules.has_key(lastname):
try:
self.import_hook(lastname, m, [name])
except ImportError, msg:
self.msg(2, "ImportError:", str(msg))
fullname = lastname + "." + name
if not self.badmodules.has_key(fullname):
self.badmodules[fullname] = {}
self.badmodules[fullname][m.__name__] = None
elif op in STORE_OPS:
# Skip; each IMPORT_FROM is followed by a STORE_* opcode
pass
else:
lastname = None
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
def add_module(self, fqname):
if self.modules.has_key(fqname):
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path):
if name in self.excludes:
self.msgout(3, "find_module -> Excluded")
raise ImportError, name
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
# Emulate the Registered Module support on Windows.
if sys.platform=="win32" and win32api is not None:
HKEY_LOCAL_MACHINE = 0x80000002
try:
pathname = win32api.RegQueryValue(HKEY_LOCAL_MACHINE, "Software\\Python\\PythonCore\\%s\\Modules\\%s" % (sys.winver, name))
fp = open(pathname, "rb")
# XXX - To do - remove the hard code of C_EXTENSION.
stuff = "", "rb", imp.C_EXTENSION
return fp, pathname, stuff
except win32api.error:
pass
path = self.path
return imp.find_module(name, path)
def report(self):
print
print " %-25s %s" % ("Name", "File")
print " %-25s %s" % ("----", "----")
# Print modules found
keys = self.modules.keys()
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print "P",
else:
print "m",
print "%-25s" % key, m.__file__ or ""
# Print missing modules
keys = self.badmodules.keys()
keys.sort()
for key in keys:
# ... but not if they were explicitly excluded.
if key not in self.excludes:
mods = self.badmodules[key].keys()
mods.sort()
print "?", key, "from", string.join(mods, ', ')
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error, msg:
print msg
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + string.split(a, os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print "path:"
for item in path:
print " ", `item`
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
if __name__ == '__main__':
try:
test()
except KeyboardInterrupt:
print "\n[interrupt]"

View File

@@ -0,0 +1,98 @@
# Parse Makefiles and Python Setup(.in) files.
import regex
import string
# Extract variable definitions from a Makefile.
# Return a dictionary mapping names to values.
# May raise IOError.
makevardef = regex.compile('^\([a-zA-Z0-9_]+\)[ \t]*=\(.*\)')
def getmakevars(filename):
variables = {}
fp = open(filename)
try:
while 1:
line = fp.readline()
if not line:
break
if makevardef.match(line) < 0:
continue
name, value = makevardef.group(1, 2)
# Strip trailing comment
i = string.find(value, '#')
if i >= 0:
value = value[:i]
value = string.strip(value)
variables[name] = value
finally:
fp.close()
return variables
# Parse a Python Setup(.in) file.
# Return two dictionaries, the first mapping modules to their
# definitions, the second mapping variable names to their values.
# May raise IOError.
setupvardef = regex.compile('^\([a-zA-Z0-9_]+\)=\(.*\)')
def getsetupinfo(filename):
modules = {}
variables = {}
fp = open(filename)
try:
while 1:
line = fp.readline()
if not line:
break
# Strip comments
i = string.find(line, '#')
if i >= 0:
line = line[:i]
if setupvardef.match(line) >= 0:
name, value = setupvardef.group(1, 2)
variables[name] = string.strip(value)
else:
words = string.split(line)
if words:
modules[words[0]] = words[1:]
finally:
fp.close()
return modules, variables
# Test the above functions.
def test():
import sys
import os
if not sys.argv[1:]:
print 'usage: python parsesetup.py Makefile*|Setup* ...'
sys.exit(2)
for arg in sys.argv[1:]:
base = os.path.basename(arg)
if base[:8] == 'Makefile':
print 'Make style parsing:', arg
v = getmakevars(arg)
prdict(v)
elif base[:5] == 'Setup':
print 'Setup style parsing:', arg
m, v = getsetupinfo(arg)
prdict(m)
prdict(v)
else:
print arg, 'is neither a Makefile nor a Setup file'
print '(name must begin with "Makefile" or "Setup")'
def prdict(d):
keys = d.keys()
keys.sort()
for key in keys:
value = d[key]
print "%-15s" % key, str(value)
if __name__ == '__main__':
test()

View File

@@ -0,0 +1,146 @@
import sys, os, string
# Template used then the program is a GUI program
WINMAINTEMPLATE = """
#include <windows.h>
int WINAPI WinMain(
HINSTANCE hInstance, // handle to current instance
HINSTANCE hPrevInstance, // handle to previous instance
LPSTR lpCmdLine, // pointer to command line
int nCmdShow // show state of window
)
{
extern int Py_FrozenMain(int, char **);
PyImport_FrozenModules = _PyImport_FrozenModules;
return Py_FrozenMain(__argc, __argv);
}
"""
SERVICETEMPLATE = """
extern int PythonService_main(int, char **);
int main( int argc, char **argv)
{
PyImport_FrozenModules = _PyImport_FrozenModules;
return PythonService_main(argc, argv);
}
"""
subsystem_details = {
# -s flag : (C entry point template), (is it __main__?), (is it a DLL?)
'console' : (None, 1, 0),
'windows' : (WINMAINTEMPLATE, 1, 0),
'service' : (SERVICETEMPLATE, 0, 0),
'com_dll' : ("", 0, 1),
}
def get_custom_entry_point(subsystem):
try:
return subsystem_details[subsystem][:2]
except KeyError:
raise ValueError, "The subsystem %s is not known" % subsystem
def makemakefile(outfp, vars, files, target):
save = sys.stdout
try:
sys.stdout = outfp
realwork(vars, files, target)
finally:
sys.stdout = save
def realwork(vars, moddefns, target):
version_suffix = `sys.version_info[0]`+`sys.version_info[1]`
print "# Makefile for Microsoft Visual C++ generated by freeze.py script"
print
print 'target = %s' % target
print 'pythonhome = %s' % vars['prefix']
print
print 'DEBUG=0 # Set to 1 to use the _d versions of Python.'
print '!IF $(DEBUG)'
print 'debug_suffix=_d'
print 'c_debug=/Zi /Od /DDEBUG /D_DEBUG'
print 'l_debug=/DEBUG'
print 'temp_dir=Build\\Debug'
print '!ELSE'
print 'debug_suffix='
print 'c_debug=/Ox'
print 'l_debug='
print 'temp_dir=Build\\Release'
print '!ENDIF'
print
print '# The following line assumes you have built Python using the standard instructions'
print '# Otherwise fix the following line to point to the library.'
print 'pythonlib = "$(pythonhome)/pcbuild/python%s$(debug_suffix).lib"' % version_suffix
print
# We only ever write one "entry point" symbol - either
# "main" or "WinMain". Therefore, there is no need to
# pass a subsystem switch to the linker as it works it
# out all by itself. However, the subsystem _does_ determine
# the file extension and additional linker flags.
target_link_flags = ""
target_ext = ".exe"
if subsystem_details[vars['subsystem']][2]:
target_link_flags = "-dll"
target_ext = ".dll"
print "# As the target uses Python%s.dll, we must use this compiler option!" % version_suffix
print "cdl = /MD"
print
print "all: $(target)$(debug_suffix)%s" % (target_ext)
print
print '$(temp_dir):'
print ' if not exist $(temp_dir)\. mkdir $(temp_dir)'
print
objects = []
libs = ["shell32.lib", "comdlg32.lib", "wsock32.lib", "user32.lib", "oleaut32.lib"]
for moddefn in moddefns:
print "# Module", moddefn.name
for file in moddefn.sourceFiles:
base = os.path.basename(file)
base, ext = os.path.splitext(base)
objects.append(base + ".obj")
print '$(temp_dir)\%s.obj: "%s"' % (base, file)
print "\t@$(CC) -c -nologo /Fo$* $(cdl) $(c_debug) /D BUILD_FREEZE",
print '"-I$(pythonhome)/Include" "-I$(pythonhome)/PC" \\'
print "\t\t$(cflags) $(cdebug) $(cinclude) \\"
extra = moddefn.GetCompilerOptions()
if extra:
print "\t\t%s \\" % (string.join(extra),)
print '\t\t"%s"' % file
print
# Add .lib files this module needs
for modlib in moddefn.GetLinkerLibs():
if modlib not in libs:
libs.append(modlib)
print "ADDN_LINK_FILES=",
for addn in vars['addn_link']: print '"%s"' % (addn),
print ; print
print "OBJS=",
for obj in objects: print '"$(temp_dir)\%s"' % (obj),
print ; print
print "LIBS=",
for lib in libs: print '"%s"' % (lib),
print ; print
print "$(target)$(debug_suffix)%s: $(temp_dir) $(OBJS)" % (target_ext)
print "\tlink -out:$(target)$(debug_suffix)%s %s" % (target_ext, target_link_flags),
print "\t$(OBJS) \\"
print "\t$(LIBS) \\"
print "\t$(ADDN_LINK_FILES) \\"
print "\t$(pythonlib) $(lcustom) $(l_debug)\\"
print "\t$(resources)"
print
print "clean:"
print "\t-rm -f *.obj"
print "\t-rm -f $(target).exe"

225
intern/python/ivexport.py Normal file
View File

@@ -0,0 +1,225 @@
#######################
# (c) Jan Walter 2000 #
#######################
# CVS
# $Author$
# $Date$
# $RCSfile$
# $Revision$
import Blender
class InventorExport:
def __init__(self, filename):
self.file = open(filename, "w")
def beginObject(self, object):
self.file.write(" Separator {\n")
def endObject(self, object):
self.file.write(" }\n")
def export(self, scene):
print "exporting ..."
self.writeHeader()
for name in scene.objects:
object = Blender.getObject(name)
self.beginObject(object)
self.writeObject(object)
self.endObject(object)
self.writeEnd()
def writeEnd(self):
self.file.write("}\n")
self.file.close()
print "... finished"
def writeFaces(self, faces, smooth, colors, materials, texture):
self.file.write(" IndexedFaceSet {\n")
# colors
if colors:
self.file.write(" vertexProperty VertexProperty {\n")
self.file.write(" orderedRGBA [\n")
for color in colors[:-1]:
r = hex(int(color[0] * 255))
if len(r) == 3:
r = r + "0"
g = hex(int(color[1] * 255))
if len(g) == 3:
g = g + "0"
b = hex(int(color[2] * 255))
if len(b) == 3:
b = b + "0"
colstr = r + g[2:] + b[2:]
self.file.write(" %sff,\n" % colstr)
color = colors[-1]
r = hex(int(color[0] * 255))
if len(r) == 3:
r = r + "0"
g = hex(int(color[1] * 255))
if len(g) == 3:
g = g + "0"
b = hex(int(color[2] * 255))
if len(b) == 3:
b = b + "0"
colstr = r + g[2:] + b[2:]
self.file.write(" %sff\n" % colstr)
self.file.write(" ]\n")
self.file.write(" materialBinding PER_VERTEX_INDEXED\n")
self.file.write(" }\n")
# coordinates
self.file.write(" coordIndex [\n")
for face in faces[:-1]:
if face[4] != smooth:
pass
elif face[2] == 0 and face[3] == 0:
print "can't export lines at the moment ..."
elif face[3] == 0:
self.file.write(" %s, %s, %s, -1,\n" %
(face[0], face[1], face[2]))
else:
self.file.write(" %s, %s, %s, %s, -1,\n"%
(face[0], face[1], face[2], face[3]))
face = faces[-1]
if face[4] != smooth:
pass
elif face[2] == 0 and face[3] == 0:
print "can't export lines at the moment ..."
elif face[3] == 0:
self.file.write(" %s, %s, %s, -1,\n" %
(face[0], face[1], face[2]))
else:
self.file.write(" %s, %s, %s, %s, -1,\n"%
(face[0], face[1], face[2], face[3]))
self.file.write(" ]\n")
# materials
if not colors and materials:
self.file.write(" materialIndex [\n")
for face in faces[:-1]:
if face[4] != smooth:
pass
else:
self.file.write(" %s,\n" % face[5])
face = faces[-1]
if face[4] != smooth:
pass
else:
self.file.write(" %s\n" % face[5])
self.file.write(" ]\n")
# texture coordinates
if texture:
self.file.write(" textureCoordIndex [\n")
index = 0
for face in faces:
if face[3] == 0:
self.file.write(" " +
"%s, %s, %s, -1,\n" %
(index, index+1, index+2))
else:
self.file.write(" " +
"%s, %s, %s, %s, -1,\n" %
(index, index+1, index+2, index+3))
index = index + 4
self.file.write(" ]\n")
self.file.write(" }\n")
def writeHeader(self):
self.file.write("#Inventor V2.1 ascii\n\n")
self.file.write("Separator {\n")
self.file.write(" ShapeHints {\n")
self.file.write(" vertexOrdering COUNTERCLOCKWISE\n")
self.file.write(" }\n")
def writeMaterials(self, materials):
if materials:
self.file.write(" Material {\n")
self.file.write(" diffuseColor [\n")
for name in materials[:-1]:
material = Blender.getMaterial(name)
self.file.write(" %s %s %s,\n" %
(material.R, material.G, material.B))
name = materials[-1]
material = Blender.getMaterial(name)
self.file.write(" %s %s %s\n" %
(material.R, material.G, material.B))
self.file.write(" ]\n")
self.file.write(" }\n")
self.file.write(" MaterialBinding {\n")
self.file.write(" value PER_FACE_INDEXED\n")
self.file.write(" }\n")
def writeMatrix(self, matrix):
self.file.write(" MatrixTransform {\n")
self.file.write(" matrix %s %s %s %s\n" %
(matrix[0][0], matrix[0][1],
matrix[0][2], matrix[0][3]))
self.file.write(" %s %s %s %s\n" %
(matrix[1][0], matrix[1][1],
matrix[1][2], matrix[1][3]))
self.file.write(" %s %s %s %s\n" %
(matrix[2][0], matrix[2][1],
matrix[2][2], matrix[2][3]))
self.file.write(" %s %s %s %s\n" %
(matrix[3][0], matrix[3][1],
matrix[3][2], matrix[3][3]))
self.file.write(" }\n")
def writeNormals(self, normals):
self.file.write(" Normal {\n")
self.file.write(" vector [\n")
for normal in normals[:-1]:
self.file.write(" %s %s %s,\n" %
(normal[0], normal[1], normal[2]))
normal = normals[-1]
self.file.write(" %s %s %s\n" %
(normal[0], normal[1], normal[2]))
self.file.write(" ]\n")
self.file.write(" }\n")
def writeObject(self, object):
if object.type == "Mesh":
mesh = Blender.getMesh(object.data)
self.writeMatrix(object.matrix)
self.writeMaterials(object.materials)
self.writeTexture(mesh.texture, mesh.texcoords)
self.writeVertices(mesh.vertices)
self.writeFaces(mesh.faces, 0, mesh.colors, object.materials,
mesh.texture)
self.writeNormals(mesh.normals)
self.writeFaces(mesh.faces, 1, mesh.colors, object.materials,
mesh.texture)
else:
print "can't export %s at the moment ..." % object.type
def writeTexture(self, texture, texcoords):
if texture:
self.file.write(" Texture2 {\n")
self.file.write(' filename "%s"\n' % texture)
self.file.write(" }\n")
self.file.write(" TextureCoordinate2 {\n")
self.file.write(" point [\n")
for texcoord in texcoords:
self.file.write(" %s %s,\n" %
(texcoord[0], texcoord[1]))
self.file.write(" ]\n")
self.file.write(" }\n")
self.file.write(" TextureCoordinateBinding {\n")
self.file.write(" value PER_VERTEX_INDEXED\n")
self.file.write(" }\n")
def writeVertices(self, vertices):
self.file.write(" Coordinate3 {\n")
self.file.write(" point [\n")
for vertex in vertices[:-1]:
self.file.write(" %s %s %s,\n" %
(vertex[0], vertex[1], vertex[2]))
vertex = vertices[-1]
self.file.write(" %s %s %s\n" %
(vertex[0], vertex[1], vertex[2]))
self.file.write(" ]\n")
self.file.write(" }\n")
ivexport = InventorExport("test.iv")
scene = Blender.getCurrentScene()
ivexport.export(scene)

View File

@@ -0,0 +1 @@
from _Blender.BGL import *

View File

@@ -0,0 +1,106 @@
"""The Blender Camera module
This module provides access to **Camera** objects in Blender
Example::
from Blender import Camera, Object, Scene
c = Camera.New('ortho') # create new ortho camera data
c.lens = 35.0 # set lens value
cur = Scene.getCurrent() # get current Scene
ob = Object.New('Camera') # make camera object
ob.link(c) # link camera data with this object
cur.link(ob) # link object into scene
cur.setCurrentCamera(ob) # make this camera the active
"""
import shadow
import _Blender.Camera as _Camera
class Camera(shadow.hasIPO):
"""Wrapper for Camera DataBlock
Attributes
lens -- The lens value
clipStart -- The clipping start of the view frustum
clipEnd -- The end clipping plane of the view frustum
type -- The camera type:
0: perspective camera,
1: orthogonal camera - (see Types)
mode -- Drawing mode; see Modes
"""
_emulation = {'Lens' : "lens",
'ClSta' : "clipStart",
'ClEnd' : "clipEnd",
}
Types = {'persp' : 0,
'ortho' : 1,
}
Modes = {'showLimits' : 1,
'showMist' : 2,
}
def __init__(self, object):
self._object = object
def getType(self):
"""Returns camera type: "ortho" or "persp" """
if self.type == self.Types['ortho']:
return 'ortho'
else:
return 'persp'
def setType(self, type):
"""Sets Camera type to 'type' which must be one of ["persp", "ortho"]"""
self._object.type = self.Types[type]
def setMode(self, *modes):
"""Sets Camera modes *the nice way*, instead of direct access
of the 'mode' member.
This function takes a variable number of string arguments of the types
listed in self.Modes.
Example::
c = Camera.New()
c.setMode('showMist', 'showLimits')
"""
flags = 0
try:
for a in modes:
flags |= self.Modes[a]
except:
raise TypeError, "mode must be one of %s" % self.Modes.keys()
self.mode = flags
def __repr__(self):
return "[Camera \"%s\"]" % self.name
def New(type = 'persp'):
"""Creates new camera Object and returns it. 'type', if specified,
must be one of Types"""
cam = Camera(_Camera.New())
cam.setType(type)
return cam
def get(name = None):
"""Returns the Camera with name 'name', if given. Otherwise, a list
of all Cameras is returned"""
if name:
return Camera(_Camera.get(name))
else:
return shadow._List(_Camera.get(), Camera)
Get = get # emulation

View File

@@ -0,0 +1 @@
from _Blender.Draw import *

View File

@@ -0,0 +1,52 @@
"""The Blender Image module
This module provides (yet) basic support for Blender *Image* data blocks
Example::
from Blender import Image
im = Image.Load('dead-parrot.jpg')
"""
import _Blender.Image as _Image
import shadow
class Image(shadow.shadow):
"""Image DataBlock object
See above example on how to create instances of Image objects.
Attributes
xrep -- Texture image tiling factor (subdivision) in X
yrep -- Texture image tiling factor (subdivision) in Y
LATER:
* Image buffer access
* better loading / saving of images
"""
pass
def get(name):
"""If 'name' given, the Image 'name' is returned if existing, 'None' otherwise.
If no name is given, a list of all Images is returned"""
pass
def Load(filename):
"""Returns image from file 'filename' as Image object if found, 'None' else."""
pass
def New(name):
"""This function is currently not implemented"""
pass
# override all functions again, the above classes are just made
# for documentation
get = _Image.get
Get = get
Load = _Image.Load

View File

@@ -0,0 +1,279 @@
"""The Blender Ipo module
This module provides access to **Ipo** objects in Blender.
An Ipo object is a datablock of IpoCurves which control properties of
an object in time.
Note that IpoCurves assigned to rotation values (which must be specified
in radians) appear scaled in the IpoWindow (which is in fact true, due
to the fact that conversion to an internal unit of 10.0 angles happens).
Example::
from Blender import Ipo, Object
ipo = Ipo.New('Object', 'ObIpo') # Create object ipo with name 'ObIpo'
curve = ipo.addCurve('LocY') # add IpoCurve for LocY
curve.setInterpolation('Bezier') # set interpolation type
curve.setExtrapolation('CyclicLinear') # set extrapolation type
curve.addBezier((0.0, 0.0)) # add automatic handle bezier point
curve.addBezier((20.0, 5.0), 'Free', (10.0, 4.0)) # specify left handle, right auto handle
curve.addBezier((30.0, 1.0), 'Vect') # automatic split handle
curve.addBezier((100.0, 1.0)) # auto handle
curve.update() # recalculate curve handles
curve.eval(35.0) # evaluate curve at 35.0
ob = Object.get('Plane')
ob.setIpo(ipo) # assign ipo to object
"""
import _Blender.Ipo as _Ipo
import shadow
_RotIpoCurves = ["RotX", "RotY", "RotZ", "dRotX", "dRotY", "dRotZ"]
_radian_factor = 5.72957814 # 18.0 / 3.14159255
def _convertBPoint(b):
f = _radian_factor
newb = BezierPoint()
p = b.pt
q = newb.pt
q[0], q[1] = (p[0], f * p[1])
p = b.h1
q = newb.h1
q[0], q[1] = (p[0], f * p[1])
p = b.h2
q = newb.h2
q[0], q[1] = (p[0], f * p[1])
return newb
class IpoBlock(shadow.shadowEx):
"""Wrapper for Blender Ipo DataBlock
Attributes
curves -- list of owned IpoCurves
"""
def get(self, channel = None):
"""Returns curve with channel identifier 'channel', which is one of the properties
listed in the Ipo Window, 'None' if not found.
If 'channel' is not specified, all curves are returned in a list"""
if channel:
for c in self._object.curves:
if c.name == channel:
return IpoCurve(c)
return None
else:
return map(lambda x: IpoCurve(x), self._object.curves)
def __getitem__(self, k):
"""Emulates dictionary syntax, e.g. ipocurve = ipo['LocX']"""
curve = self.get(k)
if not curve:
raise KeyError, "Ipo does not have a curve for channel %s" % k
return curve
def __setitem__(self, k, val):
"""Emulates dictionary syntax, e.g. ipo['LocX'] = ipocurve"""
c = self.addCurve(k, val)
has_key = get # dict emulation
items = get # dict emulation
def keys(self):
return map(lambda x: x.name, self.get())
def addCurve(self, channel, curve = None):
"""Adds a curve of channel type 'channel' to the Ipo Block. 'channel' must be one of
the object properties listed in the Ipo Window. If 'curve' is not specified,
an empty curve is created, otherwise, the existing IpoCurve 'curve' is copied and
added to the IpoBlock 'self'.
In any case, the added curve is returned.
"""
if curve:
if curve.__class__.__name__ != "IpoCurve":
raise TypeError, "IpoCurve expected"
c = self._object.addCurve(channel, curve._object)
### RotIpo conversion hack
if channel in _RotIpoCurves:
print "addCurve, converting", curve.name
c.points = map(_convertBPoint, curve.bezierPoints)
else:
c.points = curve.bezierPoints
else:
c = self._object.addCurve(channel)
return IpoCurve(c)
_getters = { 'curves' : get }
class BezierPoint:
"""BezierPoint object
Attributes
pt -- Coordinates of the Bezier point
h1 -- Left handle coordinates
h2 -- Right handle coordinates
h1t -- Left handle type (see IpoCurve.addBezier(...) )
h2t -- Right handle type
"""
BezierPoint = _Ipo.BezTriple # override
class IpoCurve(shadow.shadowEx):
"""Wrapper for Blender IpoCurve
Attributes
bezierPoints -- A list of BezierPoints (see class BezierPoint),
defining the curve shape
"""
InterpolationTypes = _Ipo.InterpolationTypes
ExtrapolationTypes = _Ipo.ExtrapolationTypes
def __init__(self, object):
self._object = object
self.__dict__['bezierPoints'] = self._object.points
def __getitem__(self, k):
"""Emulate a sequence of BezierPoints"""
print k, type(k)
return self.bezierPoints[k]
def __repr__(self):
return "[IpoCurve %s]" % self.name
def __len__(self):
return len(self.bezierPoints)
def eval(self, time):
"""Returns float value of curve 'self' evaluated at time 'time' which
must be a float."""
return self._object.eval(time)
def addBezier(self, p, leftType = 'Auto', left = None, rightType = None, right = None):
"""Adds a Bezier triple to the IpoCurve.
The following values are float tuples (x,y), denoting position of a control vertex:
p -- The position of the Bezier point
left -- The position of the leftmost handle
right -- The position of the rightmost handle
'leftType', 'rightType' must be one of:
"Auto" -- automatic handle calculation. In this case, 'left' and 'right' don't need to be specified
"Vect" -- automatic split handle calculation. 'left' and 'right' are disregarded.
"Align" -- Handles are aligned automatically. In this case, 'right' does not need to be specified.
"Free" -- Handles can be set freely - this requires both arguments 'left' and 'right'.
"""
b = _Ipo.BezTriple()
b.pt[0], b.pt[1] = (p[0], p[1])
b.h1t = leftType
if rightType:
b.h2t = rightType
else:
b.h2t = leftType
if left:
b.h1[0], b.h1[1] = (left[0], left[1])
if right:
b.h2[0], b.h2[1] = (right[0], right[1])
self.__dict__['bezierPoints'].append(b)
return b
def update(self, noconvert = 0):
# This is an ugly fix for the 'broken' storage of Rotation
# ipo values. The angles are stored in units of 10.0 degrees,
# which is totally inconsistent with anything I know :-)
# We can't (at the moment) change the internals, so we
# apply a conversion kludge..
if self._object.name in _RotIpoCurves and not noconvert:
points = map(_convertBPoint, self.bezierPoints)
else:
points = self.bezierPoints
self._object.points = points
self._object.update()
def getInterpolationType(self, ipotype):
"Returns the Interpolation type - see also IpoCurve.InterpolationTypes"
return self._object.getInterpolationType()
def setInterpolationType(self, ipotype):
"""Sets the interpolation type which must be one of IpoCurve.InterpolationTypes"""
try:
self._object.setInterpolationType(ipotype)
except:
raise TypeError, "must be one of %s" % self.InterpolationTypes.keys()
def getExtrapolationType(self, ipotype):
"Returns the Extrapolation type - see also IpoCurve.ExtrapolationTypes"
return self._object.getExtrapolationType()
def setExtrapolationType(self, ipotype):
"""Sets the interpolation type which must be one of IpoCurve.ExtrapolationTypes"""
try:
self._object.setInterpolationType(ipotype)
except:
raise TypeError, "must be one of %s" % self.ExtrapolationTypes.keys()
def New(blocktype, name = None):
"""Returns a new IPO block of type 'blocktype' which must be one of:
["Object", "Camera", "World", "Material"]
"""
if name:
i = _Ipo.New(blocktype, name)
else:
i = _Ipo.New(blocktype)
return IpoBlock(i)
def Eval(ipocurve, time): # emulation code
"""This function is just there for compatibility.
Use IpoCurve.eval(time) instead"""
return ipocurve.eval(time)
def Recalc(ipocurve): # emulation code
"""This function is just there for compatibility. Note that Ipos
assigned to rotation values will *not* get converted to the proper
unit of radians.
In the new style API, use IpoCurve.update() instead"""
return ipocurve.update(1)
def get(name = None):
"""If 'name' given, the Ipo 'name' is returned if existing, 'None' otherwise.
If no name is given, a list of all Ipos is returned"""
if name:
ipo = _Ipo.get(name)
if ipo:
return IpoBlock(ipo)
else:
return None
else:
return shadow._List(_Ipo.get(), IpoBlock)
Get = get # emulation

View File

@@ -0,0 +1,168 @@
"""The Blender Lamp module
This module provides control over **Lamp** objects in Blender.
Example::
from Blender import Lamp
l = Lamp.New('Spot')
l.setMode('square', 'shadow')
ob = Object.New('Lamp')
ob.link(l)
"""
import _Blender.Lamp as _Lamp
import shadow
_validBufferSizes = [512, 768, 1024, 1536, 2560]
def _setBufferSize(self, bufsize):
"""Set the lamp's buffersize. This function makes sure that a valid
bufferSize value is set (unlike setting lamp.bufferSize directly)"""
if bufsize not in _validBufferSizes:
print """Buffer size should be one of:
%s
Setting to default 512""" % _validBufferSizes
bufsize = 512
self._object.bufferSize = bufsize
class Lamp(shadow.hasIPO, shadow.hasModes):
"""Wrapper for Blender Lamp DataBlock
Attributes
mode -- Lamp mode value - see EditButtons. Do not access directly
See setMode()
type -- Lamp type value - see EditButtons. No direct access, please.
See setType()
col -- RGB vector (R, G, B) of lamp colour
energy -- Intensity (float)
dist -- clipping distance of a spot lamp or decay range
spotSize -- float angle (in degrees) of spot cone
(between 0.0 and 180.0)
spotBlend -- value defining the blurriness of the spot edge
haloInt -- Halo intensity
clipStart -- shadow buffer clipping start
clipStart -- shadow buffer clipping end
bias -- The bias value for the shadowbuffer routine
softness -- The filter value for the shadow blurring
samples -- Number of samples in shadow calculation - the
larger, the better
bufferSize -- Size of the shadow buffer which should be one of:
[512, 768, 1024, 1536, 2560]
haloStep -- Number of steps in halo calculation - the smaller, the
the better (and slower). A value of 0 disables shadow
halo calculation
"""
_emulation = {'Energ' : "energy",
'SpoSi' : "spotSize",
'SpoBl' : "SpotBlend",
'HaInt' : "haloInt",
'Dist' : "dist",
'Quad1' : "quad1",
'Quad2' : "quad2",
}
_setters = {'bufferSize' : _setBufferSize}
t = _Lamp.Types
Types = {'Lamp' : t.LOCAL,
'Spot' : t.SPOT,
'Sun' : t.SUN,
'Hemi' : t.HEMI,
}
t = _Lamp.Modes
Modes = {'quad' : t.QUAD,
'sphere' : t.SPHERE,
'shadow' : t.SHAD,
'halo' : t.HALO,
'layer' : t.LAYER,
'negative' : t.NEG,
'onlyShadow' : t.ONLYSHADOW,
'square' : t.SQUARE,
}
del t
def __repr__(self):
return "[Lamp \"%s\"]" % self.name
def setType(self, name):
"""Set the Lamp type of Lamp 'self'. 'name' must be a string of:
* 'Lamp': A standard point light source
* 'Spot': A spot light
* 'Sun' : A unidirectional light source, very far away (like a Sun!)
* 'Hemi': A diffuse hemispherical light source (daylight without sun)"""
try:
self._object.type = self.Types[name]
except:
raise TypeError, "type must be one of %s" % self.Types.keys()
def getType(self):
"""Returns the lamp's type as string. See setType()"""
for k in self.Types.keys():
if self.Types[k] == self.type:
return k
def getMode(self):
"""Returns the Lamp modes as a list of strings"""
return shadow._getModeBits(self.Modes, self._object.mode)
def setMode(self, *args):
"""Set the Lamp mode of Lamp 'self'. This function takes a variable number
of string arguments of the types listed in self.Modes.
Example::
l = Lamp.New()
l.setMode('quad', 'shadow')
"""
print args
self._object.mode = shadow._setModeBits(self.Modes, args)
def getBufferSize(self):
return self.bufferSize
def New(type = "Lamp", name = "Lamp"):
"""Returns a new Lamp datablock of type 'type' and optional name 'name'
"""
t = Lamp.Types[type]
rawlamp = _Lamp.New()
rawlamp.type = t
rawlamp.name = name
return Lamp(rawlamp)
def get(name = None):
"""If 'name' given, the Lamp 'name' is returned if existing, 'None' otherwise.
If no name is given, a list of all Lamps is returned"""
if name:
return Lamp(_Lamp.get(name))
else:
return shadow._List(_Lamp.get(), Lamp)
Types = _Lamp.Types

View File

@@ -0,0 +1,251 @@
"""The Blender Material module
This module provides access to *Material* datablocks
Example::
from Blender import Material, NMesh, Object, Scene
m = Material.New() # create free Material datablock
m.rgbCol = (1.0, 0.0, 0.3) # assign RGB values
mesh = NMesh.GetRaw() # get new mesh
mesh.addMaterial(m) # add material to mesh
object = Object.New('Mesh') # create new object
object.link(mesh) # link mesh data to object
Scene.getCurrent().link(ob) # link object to current scene
"""
import _Blender.Material as _Material
import shadow
#import Blender.Curve as Curve
# These are getters and setters needed for emulation
def _getRGB(obj):
return (obj.R, obj.G, obj.B)
def _getSpec(obj):
return (obj.specR, obj.specG, obj.specB)
def _getMir(obj):
return (obj.mirR, obj.mirG, obj.mirB)
def _setRGB(obj, rgb):
obj.R, obj.G, obj.B = rgb
def _setSpec(obj, rgb):
obj.specR, obj.specG, obj.specB = rgb
def _setMir(obj, rgb):
obj.mirR, obj.mirG, obj.mirB = rgb
class Material(shadow.hasIPO, shadow.hasModes):
"""Material DataBlock object
See example in the Material module documentation on how to create
an instance of a Material object.
Attributes
The following attributes are colour vectors (r, g, b)
rgbCol -- The color vector (R, G, B).
The RGB values can be accessed individually as .R, .G and .B
specCol -- Specularity color vector (specR, specG, specG)
mirCol -- Mirror color vector (mirR, mirG, mirB)
The following are float values:
alpha -- The transparency
ref -- Reflectivity float value
emit -- Emit intensity value
amb -- Ambient intensity value
spec -- specularity value
specTransp -- Specular transpareny
haloSize -- Halo size
mode -- The material mode bit vector - see Material.ModeFlags
hard -- The hardness value
"""
_emulation = {'Mode' : "mode",
'Ref' : "ref",
'HaSize' : "haloSize",
'SpTra' : "specTransp",
'Alpha' : "alpha",
'Spec' : "spec",
'Emit' : "emit",
'Hard' : "hard",
'Amb' : "amb",
}
_getters = {'rgbCol' : _getRGB,
'specCol' : _getSpec,
'mirCol' : _getMir,
}
_setters = {'rgbCol' : _setRGB,
'specCol' : _setSpec,
'mirCol' : _setMir,
}
t = _Material.Modes
Modes = {'traceable' : t.TRACEABLE,
'shadow' : t.SHADOW,
'shadeless' : t.SHADELESS,
'wire' : t.WIRE,
'vcolLight' : t.VCOL_LIGHT,
'vcolPaint' : t.VCOL_PAINT,
'zTransp' : t.ZTRANSP,
'zInvert' : t.ZINVERT,
'onlyShadow': t.ONLYSHADOW,
'star' : t.STAR,
'texFace' : t.TEXFACE,
'noMist' : t.NOMIST,
}
t = _Material.HaloModes
HaloModes = { "rings" : t.RINGS,
"lines" : t.LINES,
"tex" : t.TEX,
"haloPuno": t.PUNO,
"shade" : t.SHADE,
"flare" : t.FLARE,
}
del t
def setMode(self, *args):
"""Set the mode of 'self'. This function takes a variable number
of string arguments of the types listed in self.Modes.
Example::
m = Material.New()
m.setMode('shadow', 'wire')
"""
flags = 0
try:
for a in args:
flags |= self.Modes[a]
except:
raise TypeError, "mode must be one of" % self.Modes.keys()
self._object.mode = flags
def setHaloMode(self, *args):
"""Sets the material to Halo mode.
This function takes a variable number of string arguments of the types
listed in self.HaloModes"""
flags = _Material.Modes.HALO
try:
for a in args:
flags |= self.HaloModes[a]
except:
raise TypeError, "mode must be one of" % self.HaloModes.keys()
self._object.mode = flags
class ModeFlags:
"""Readonly dictionary
...containing Material mode bitvectors:
|------------------------------------------|
| Name | Description |
|==========================================|
| TRACEABLE | visible for shadow lamps |
|------------------------------------------|
| SHADOW | cast shadow |
|------------------------------------------|
| SHADELESS | do not shade |
|------------------------------------------|
| WIRE | draw in wireframe |
|------------------------------------------|
| VCOL_LIGHT | use vertex colors |
| | with lighting |
|------------------------------------------|
| VCOL_PAINT | vertex colours |
|------------------------------------------|
| HALO | Halo material |
|------------------------------------------|
| ZTRANSP | Z transparency |
|------------------------------------------|
| ZINVERT | invert Z |
|------------------------------------------|
| ONLYSHADOW | only shadow, but |
| | don't render |
|------------------------------------------|
| STAR | ? |
|------------------------------------------|
| TEXFACE | textured faces |
|------------------------------------------|
| NOMIST | disable mist |
|------------------------------------------|
These mode flags directly represent the buttons in the Material parameters
window (EditButtons)
Example::
# be 'm' a material
from Blender.Material.Modes import *
m.mode |= (TRACEABLE + WIRE) # Set 'wire' and 'traceable' flagsd
m.mode &= ~SHADELESS # clear 'shadeless' flag
"""
t = _Material.Modes
TRACEABLE = t.TRACEABLE
SHADOW = t.SHADOW
SHADELESS = t.SHADELESS
WIRE = t.WIRE
VCOL_LIGHT = t.VCOL_LIGHT
VCOL_PAINT = t.VCOL_PAINT
HALO = t.HALO
ZTRANSP = t.ZTRANSP
ZINVERT = t.ZINVERT
ONLYSHADOW = t.ONLYSHADOW
STAR = t.STAR
TEXFACE = t.TEXFACE
NOMIST = t.NOMIST
del t
# override:
ModeFlags = _Material.Modes
def get(name = None):
"""If 'name' given, the Material 'name' is returned if existing, 'None' otherwise.
If no name is given, a list of all Materials is returned"""
if name:
return Material(_Material.get(name))
else:
return shadow._List(_Material.get(), Material)
Get = get # emulation
def New(name = None):
"""Creates a new, empty Material and returns it.
Example::
from Blender import Material
mat = Material.New()
"""
mat = Material(_Material.New())
if name:
mat.name = name
return mat

View File

@@ -0,0 +1,250 @@
"""The Blender Mesh module
This module provides routines for more extensive mesh manipulation.
Later, this Mesh type will also allow interactive access (like in
EditMode).
In the Publisher, Ngons will also be supported (and converted to
triangles on mesh.update(). The following code demonstrates
creation of an Ngon.
Example::
from Blender import Mesh, Object, Scene
m = Mesh.New() # new empty mesh
vlist = []
vlist.append(m.addVert((-0.0, -1.0, 0.0)))
vlist.append(m.addVert((1.0, 0.0, 0.0)))
vlist.append(m.addVert((1.0, 1.0, 0.0)))
vlist.append(m.addVert((0.0, 3.0, 0.0)))
vlist.append(m.addVert((-1.0, 2.0, 0.0)))
vlist.append(m.addVert((-3.0, 1.0, 0.0)))
vlist.append(m.addVert((-3.0, 3.0, 0.0)))
vlist.append(m.addVert((-4.0, 3.0, 0.0)))
vlist.append(m.addVert((-4.0, 0.0, 0.0)))
f = m.addFace(vlist)
# do some calculations: top project vertex coordinates to
# UV coordinates and normalize them to the square [0.0, 1.0]*[0.0, 1.0]
uvlist = map(lambda x: (x.co[0], x.co[1]), vlist)
maxx = max(map(lambda x: x[0], uvlist))
maxy = max(map(lambda x: x[1], uvlist))
minx = min(map(lambda x: x[0], uvlist))
miny = min(map(lambda x: x[1], uvlist))
len = max((maxx - minx), (maxy - miny))
offx = -minx / len
offy = -miny / len
f.uv = map(lambda x: (x[0]/len + offx, x[1]/len + offy), uvlist) # assign UV coordinates by 'top' projection
m.update() # update and triangulate mesh
ob = Object.New('Mesh') # create new Object
ob.link(m) # link mesh data
sc = Scene.getCurrent() # get current Scene
sc.link(ob) # link Object to scene
"""
from Blender.Types import NMFaceType
import Blender.Material as Material
from _Blender import NMesh as _NMesh
FACEFLAGS = _NMesh.Const
DEFAULTFLAGS = FACEFLAGS.LIGHT + FACEFLAGS.DYNAMIC
import shadow
def makeFace(f):
face = _NMesh.Face()
for v in f:
face.v.append(v)
face.uv.append((v.uvco[0], v.uvco[1]))
return face
def toTriangles(ngon):
from utils import tesselation
# This should be a Publisher only feature...once the tesselation
# is improved. The GLU tesselator of Mesa < 4.0 is crappy...
if len(ngon.uv) == len(ngon.v):
i = 0
for v in ngon.v:
v.uvco = ngon.uv[i]
i += 1
return tesselation.NgonAsTriangles(ngon, makeFace) # return triangles
def Color(r, g, b, a = 1.0):
return _NMesh.Col(255 * r, 255 * g, 255 * b, 255 * a)
class Vert: #shadow NMVert class for the tesselator
"""Vertex wrapper class
This class emulates a float coordinate vector triple
"""
def __init__(self):
self.vert = None
self.uv = []
def __len__(self):
return 3
def __setitem__(self, i, val):
self.vert[i] = val
def __getitem__(self, i):
return self.vert.co[i]
class Face:
"""Face wrapper class
This class emulates a list of vertex references
"""
def __init__(self, vlist):
self.v= vlist
self.uv = []
def __len__(self):
return len(self.v)
def __setitem__(self, i, val):
self.v[i] = val
def __getitem__(self, i):
return self.v[i]
# override:
Vert = _NMesh.Vert
Face = _NMesh.Face
class rawMesh:
"""Wrapper for raw Mesh data"""
def __init__(self, object = None):
if object:
self._object = object
else:
self._object = _NMesh.GetRaw()
self.flags = DEFAULTFLAGS
self.smooth = 0
self.recalc_normals = 1
self.faces = self._object.faces[:]
def __getattr__(self, name):
if name == 'vertices':
return self._object.verts
elif name == 'has_col':
return self._object.hasVertexColours()
elif name == 'has_uv':
return self._object.hasFaceUV()
else:
return getattr(self._object, name)
def __repr__(self):
return "Mesh: %d faces, %d vertices" % (len(self.faces), len(self.verts))
def hasFaceUV(self, true = None):
"""Sets the per-face UV texture flag, if 'true' specified (either
0 or 1). Returns the texture flag in any case."""
if true == None:
return self._object.hasFaceUV()
return self._object.hasFaceUV(true)
def hasVertexUV(self, true = None):
"""Sets the per-vertex UV texture flag, if 'true' specified (either
0 or 1). Returns the texture flag in any case."""
if true == None:
return self._object.hasVertexUV()
return self._object.hasVertexUV(true)
def hasVertexColours(self, true = None):
"""Sets the per-face UV texture flag, if 'true' specified (either
0 or 1). Returns the texture flag in any case."""
if true == None:
return self._object.hasVertexColours()
return self._object.hasVertexColours(true)
def addVert(self, v):
"""Adds a vertex to the mesh and returns a reference to it. 'v' can
be a float triple or any data type emulating a sequence, containing the
coordinates of the vertex. Note that the returned value references an
*owned* vertex"""
vert = _NMesh.Vert(v[0], v[1], v[2])
self._object.verts.append(vert)
return vert
def addFace(self, vlist, flags = None, makedefaultUV = 0):
"""Adds a face to the mesh and returns a reference to it. 'vlist'
must be a list of vertex references returned by addVert().
Note that the returned value references an *owned* face"""
if type(vlist) == NMFaceType:
face = vlist
else:
n = len(vlist)
face = _NMesh.Face(vlist)
if makedefaultUV:
face.uv = defaultUV[:n]
self.faces.append(face)
# turn on default flags:
if not flags:
face.mode = self.flags
else:
face.mode = flags
return face
def update(self):
"""Updates the mesh datablock in Blender"""
o = self._object
o = self._object
o.faces = []
smooth = self.smooth
for f in self.faces:
if len(f) > 4: #it's a NGON
faces = toTriangles(f)
for nf in faces:
nf.smooth = smooth
o.faces.append(nf)
else:
o.faces.append(f)
o.update()
def link(self, material):
"""Link material 'material' with the mesh. Note that a mesh can
currently have up to 16 materials, which are referenced by
Face().materialIndex"""
mats = self._object.materials
if material in mats:
print "material already assigned to mesh"
return
mats.append(material._object)
def unlink(self, material):
"""Unlink (remove) material 'material' from the mesh. Note
that the material indices per face need to be updated."""
self._object.materials.remove(material._object)
def setMaterials(self, materials = []):
"""Sets materials. 'materials' must be a list of valid material objects
Note that a mesh can currently have up to 16 materials, which are referenced
by Face().materialIndex"""
self._object.materials = (map(lambda x: x._object, materials))
def getMaterials(self, materials = []):
"""Returns materials assigned to the mesh"""
return shadow._List(self._object.materials, Material.Material)
def New():
return rawMesh()
def get(name = None):
"""If 'name' given, the Mesh 'name' is returned if existing, 'None' otherwise."""
if name:
ob = _NMesh.GetRaw(name)
if ob:
return rawMesh(ob)
else:
return None
else:
raise SystemError, "get() for Meshes is not yet supported"

View File

@@ -0,0 +1,192 @@
"""The Blender NMesh module
This module provides access to the raw **Mesh** data block.
Examples will not be given, as the life time of this module will be
most probably limited. Use the 'Mesh' module instead.
"""
import _Blender.NMesh as _NMesh
import shadow
class Mesh(shadow.shadow):
"""The NMesh object
This contains a copy of the raw mesh object data.
Attributes
verts -- A list of vertices of type 'Vert'
faces -- List of faces of type 'Face'
"""
def update(self):
"""updates the mesh object in Blender with the modified mesh data"""
self._object.update()
class Vert:
"""Vertex object
Attributes
co -- The vertex coordinates (x, y, z)
no -- Vertex normal vector (nx, ny, nz)
uvco -- Vertex texture ("sticky") coordinates
index -- The vertex index, if owned by a mesh
"""
class Face:
"""Face object
Attributes
mode -- Display mode, see NMesh.FaceModes
flag -- flag bit vector, specifying selection flags.
see NMesh.FaceFlags
transp -- transparency mode bit vector; see NMesh.FaceTranspModes
v -- List of Face vertices
col -- List of Vertex colours
materialIndex -- Material index (referring to one of the Materials in
the Meshes material list, see Mesh documentation
smooth -- Flag whether smooth normals should be calculated (1 = yes)
image -- Reference to texture image object
uv -- A list of per-face UV coordinates:
[(u0, v0), (u1, v1), (u2, v2), .. ]
"""
class Col:
"""Colour object
See NMesh module documentation for an example.
Attributes
r, g, b, a -- The RGBA components of the colour
A component must lie in the range of [0, 255]
"""
class FaceModes:
"""Face mode bit flags
BILLBOARD -- always orient after camera
DYNAMIC -- respond to collisions
INVISIBLE -- invisible face
HALO -- halo face, always point to camera
LIGHT -- dynamic lighting
OBCOL -- use object colour instead of vertex colours
SHADOW -- shadow type
SHAREDCOL -- shared vertex colors (per vertex)
TEX -- has texture image
TILES -- uses tiled image
TWOSIDE -- twosided face
"""
t = _NMesh.Const
BILLBOARD = t.BILLBOARD
DYNAMIC = t.DYNAMIC
INVISIBLE = t.INVISIBLE
HALO = t.HALO
LIGHT = t.LIGHT
OBCOL = t.OBCOL
SHADOW = t.SHADOW
SHAREDCOL = t.SHAREDCOL
TEX = t.TEX
TILES = t.TILES
TWOSIDE = t.TWOSIDE
del t
class FaceTranspModes:
"""Readonly dictionary
...containing Face transparency draw modes. They are of type 'enum', i.e.
can not be combined like a bit vector.
SOLID -- draw solid
ADD -- add to background(halo)
ALPHA -- draw with transparency
SUB -- subtract from background
"""
t = _NMesh.Const
SOLID = t.SOLID
ADD = t.ADD
ALPHA = t.ALPHA
SUB = t.SUB
del t
class FaceFlags:
"""Readonly dictionary
...containing Face flags bitvectors:
SELECT -- selected
HIDE -- hidden
ACTIVE -- the active face
"""
t = _NMesh.Const
SELECT = t.SELECT
HIDE = t.HIDE
ACTIVE = t.ACTIVE
del t
def New(name = None):
"""Creates a new NMesh mesh object and returns it"""
pass
def GetRaw(name = None):
"""If 'name' specified, the Mesh object with 'name' is returned, 'None'
if not existant. Otherwise, a new empty Mesh is initialized and returned."""
pass
def PutRaw(mesh, name = "Mesh"):
"""Creates a Mesh Object instance in Blender, i.e. a Mesh Object in the
current Scene and returns a reference to it. If 'name' specified, the Mesh
'name' is overwritten. In this case, no Object reference is returned."""
pass
def GetRawFromObject(name):
"""This returns the mesh as used by the object, which
means it contains all deformations and modifications."""
pass
# override all these functions again, because we only used them for
# documentation -- NMesh will be no longer supported in future
New = _NMesh.New
GetRaw = _NMesh.GetRaw
PutRaw = _NMesh.PutRaw
GetRawFromObject = _NMesh.GetRawFromObject
Const = _NMesh.Const
Vert = _NMesh.Vert
Face = _NMesh.Face
Col = _NMesh.Col
def NMesh(data):
return data

View File

@@ -0,0 +1,391 @@
##
## Blender API mid level layer 01/2002 // strubi@blender.nl
##
## $Id$
##
"""The Blender Object module
This module provides **Object** manipulation routines.
Example::
from Blender import Object
ob = Object.get('Plane')
actobj = Object.getSelected()[0] # get active Object
print actobj.loc # print position
ob.makeParent([actobj]) # make ob the parent of actobj
"""
import _Blender.Object as _Object
import shadow
reload(shadow) # XXX
class _C:
pass
InstanceType = type(_C())
del _C # don't export this
def _Empty_nodata(obj):
return None
class Object(shadow.hasIPO):
"""Blender Object
A Blender Object (note the capital O) is the instance of a 3D structure,
or rather, the Object that is (normally) visible in your Blender Scene.
An instance of a Blender Object object is created by::
from Blender import Object
ob = Object.New(type) # type must be a valid type string,
# see Object.Types
...
Attributes
Note that it is in general not recommended to access the Object's
attributes directly. Please rather use the get-/set- functions instead.
loc -- position vector (LocX, LocY, LocZ)
dloc -- delta position vector (dLocX, dLocY, dLocZ)
rot -- euler rotation vector (RotX, RotY, RotZ).
Warning: this may change in future.
drot -- delta rotation euler vector (dRotX, dRotY, dRotZ)
Warning: this may change in future.
size -- scale vector (SizeX, SizeY, SizeZ)
dsize -- delta scale vector (dSizeX, dSizeY, dSizeZ)
layer -- layer bitvector (20 bit), defining what layers the object is
visible in
The following items are listed here only for compatibility to older
scripts and are READ-ONLY! **USE the get- functions instead!**
data -- reference to the data object (e.g. Mesh, Camera, Lamp, etc.)
parent -- reference to the parent object, if existing, 'None' otherwise.
track -- reference to the tracked object, if existing, 'None' otherwise.
This bit mask can be read and written:
colbits -- the Material usage mask. A set bit #n means:
The Material #n in the *Object's* material list is used.
Otherwise, the Material #n of the Objects *Data* material list
is displayed.
"""
def __init__(self, object = None):
"""Returns an empty shadow Object"""
self._object = object
def __repr__(self):
return "[Object \"%s\"]" % self.name
def link(self, data):
"""Links Object 'self' with data 'data'. The data type must match
the Object's type, so you cannot link a Lamp to a mesh type Object.
'data' can also be an Ipo object (IpoBlock)
"""
from _Blender import Types
# special case for NMesh:
if type(data) == Types.NMeshType:
return self._object.link(data)
elif type(data) == InstanceType:
if data.__class__.__name__ == "rawMesh":
data.update() # update mesh
elif data.__class__.__name__ == "IpoBlock":
self.setIpo(data)
return shadow._link(self, data)
def copy(self):
"""Returns a copy of 'self'.
This is a true, linked copy, i.e. the copy shares the same data as the
original. The returned object is *free*, meaning, not linked to any scene."""
return Object(self._object.copy())
#def clone(self):
#"""Makes a clone of the specified object in the current scene and
##returns its reference"""
#return Object(self._object.clone())
def shareFrom(self, object):
"""Link data of 'self' with data of 'object'. This works only if
'object' has the same type as 'self'."""
return Object(self._object.shareFrom(object._object))
def getMatrix(self):
"""Returns the object matrix"""
return self._object.getMatrix()
def getInverseMatrix(self):
"""Returns the object's inverse matrix"""
return self._object.getInverseMatrix()
def getData(self):
"Returns the Datablock object containing the object's data, e.g. Mesh"
t = self._object.getType()
data = self._object.data
try:
return self._dataWrappers[t][1](data)
except:
raise TypeError, "getData() not yet supported for this object type"
def getDeformData(self):
"""Returns the Datablock object containing the object's deformed data.
Currently, this is only supported for a Mesh"""
import _Blender.NMesh as _NMesh
t = self._object.getType()
if t == self.Types['Mesh']:
data = _NMesh.GetRawFromObject(self.name)
return self._dataWrappers[t][1](data)
else:
raise TypeError, "getDeformData() not yet supported for this object type"
def getType(self):
"Returns type string of Object, which is one of Object.Types.keys()"
t = self._object.getType()
try:
return self._dataWrappers[t][0]
except:
return "<unsupported>"
def getParent(self):
"Returns object's parent object"
if self._object.parent:
return Object(self._object.parent)
return None
def getTracked(self):
"Returns object's tracked object"
if self._object.track:
return Object(self._object.track)
return None
# FUTURE FEATURE :-) :
# def getLocation():
# """Returns the object's location (x, y, z).
#By default, the location vector is always relative to the object's parent.
#If the location of another coordinate system is wanted, specify 'origin' by
#the object whose coordinate system the location should be calculated in.
#If world coordinates are wanted, set 'relative' = "World"."""
def getLocation(self, relative = None):
"""Returns the object's location (x, y, z). For the moment,
'relative' has no effect."""
l = self._object.loc
return (l[0], l[1], l[2])
def setLocation(self, location, relative = None):
"""Sets the object's location. 'location' must be a vector triple.
See 'getLocation()' about relative coordinate systems."""
l = self._object.loc # make sure this is copied
l[0], l[1], l[2] = location
def getDeltaLocation(self):
"""Returns the object's delta location (x, y, z)"""
l = self._object.dloc
return (l[0], l[1], l[2])
def setDeltaLocation(self, delta_location):
"""Sets the object's delta location which must be a vector triple"""
l = self._object.dloc # make sure this is copied
l[0], l[1], l[2] = delta_location
def getEuler(self):
"""Returns the object's rotation as Euler rotation vector
(rotX, rotY, rotZ)"""
e = self._object.rot
return (e[0], e[1], e[2])
def setEuler(self, euler = (0.0, 0.0, 0.0)):
"""Sets the object's rotation according to the specified Euler angles.
'euler' must be a vector triple"""
e = self._object.rot
e[0], e[1], e[2] = euler
def makeParent(self, objlist, mode = 0, fast = 0):
"""Makes 'self' the parent of the objects in 'objlist' which must be
a list of valid Objects.
If specified:
mode -- 0: make parent with inverse
1: without inverse
fast -- 0: update scene hierarchy automatically
1: don't update scene hierarchy (faster). In this case, you
must explicitely update the Scene hierarchy, see:
'Blender.Scene.getCurrent().update()'"""
list = map(lambda x: x._object, objlist)
return Object(self._object.makeParent(list, mode, fast))
def clrParent(self, mode = 0, fast = 0):
"""Clears parent object.
If specified:
mode -- 2: keep object transform
fast > 0 -- don't update scene hierarchy (faster)"""
return Object(self._object.clrParent(mode, fast))
def getMaterials(self):
"""Returns list of materials assigned to the object"""
from Blender import Material
return shadow._List(self._object.getMaterials(), Material.Material)
def setMaterials(self, materials = []):
"""Sets materials. 'materials' must be a list of valid material objects"""
o = self._object
old_mask = o.colbits
o.colbits = -1 # set material->object linking
o.setMaterials(map(lambda x: x._object, materials))
o.colbits = old_mask
def materialUsage(self, flag):
"""Determines the way the material is used and returns status.
'flag' = 'Data' : Materials assigned to the object's data are shown. (default)
'flag' = 'Object' : Materials assigned to the object are shown.
The second case is desired when the object's data wants to be shared among
objects, but not the Materials assigned to their data. See also 'colbits'
attribute for more (and no future compatible) control."""
if flag == "Object":
self._object.colbits = -1
elif flag == "Data":
self._object.colbits = 0
return self._object.colbits
else:
raise TypeError, "unknown mode %s" % flag
_getters = {}
from Blender import Mesh, Camera, Lamp
t = _Object.Types
Types = {"Camera" : t.CAMERA,
"Empty" : t.EMPTY,
"Lamp" : t.LAMP,
"Mesh" : t.MESH,
}
# create lookup table for data wrappers
_dataWrappers = range(max(Types.values()) + 1)
_dataWrappers[t.MESH] = ("Mesh", Mesh.rawMesh)
_dataWrappers[t.CAMERA] = ("Camera", Camera.Camera)
_dataWrappers[t.LAMP] = ("Lamp", Lamp.Lamp)
_dataWrappers[t.EMPTY] = ("Empty", _Empty_nodata)
t = _Object.DrawTypes
DrawTypes = {"Bounds" : t.BOUNDBOX,
"Wire" : t.WIRE,
"Solid" : t.SOLID,
"Shaded" : t.SHADED,
}
t = _Object.DrawModes
DrawModes = {"axis" : t.AXIS,
"boundbox" : t.BOUNDBOX,
"texspace" : t.TEXSPACE,
"name" : t.NAME,
}
del t
del Mesh, Camera, Lamp
def getDrawMode(self):
"""Returns the Object draw modes as a list of strings"""
return shadow._getModeBits(self.DrawModes, self._object.drawMode)
def setDrawMode(self, *args):
"""Sets the Object's drawing modes as a list of strings"""
self._object.drawMode = shadow._setModeBits(self.DrawModes, args)
def getDrawType(self):
"""Returns the Object draw type"""
for k in self.DrawTypes.keys():
if self.DrawTypes[k] == self.drawType:
return k
def setDrawType(self, name):
"""Sets the Object draw type. 'name' must be one of:
* 'Bounds' : Draw bounding box only
* 'Wire' : Draw in wireframe mode
* 'Solid' : Draw solid
* 'Shaded' : Draw solid, shaded and textures
"""
try:
self._object.drawType = self.DrawTypes[name]
except:
raise TypeError, "type must be one of %s" % self.DrawTypes.keys()
##################
# MODULE FUNCTIONS
def New(objtype, name = None):
"""Creates a new, empty object and returns it.
'objtype' is a string and must be one of::
Camera
Empty
Mesh
Lamp
More object types will be supported in future.
Example::
ob = Object.New('Camera')
"""
if type(objtype) == type(0):
obj = Object(_Object.New(objtype)) # emulate old syntax
else:
t = Object.Types[objtype]
obj = Object(_Object.New(t))
return obj
def get(name = None):
"""If 'name' given, the Object 'name' is returned if existing, 'None' otherwise.
If no name is given, a list of all Objects is returned"""
if name:
ob = _Object.get(name)
if ob:
return Object(ob)
else:
return None
else:
return shadow._List(_Object.get(), Object)
Get = get # emulation
def getSelected():
"""Returns a list of selected Objects in the active layer(s).
The active object is the first in the list, if visible"""
return shadow._List(_Object.getSelected(), Object)
GetSelected = getSelected # emulation
Types = _Object.Types # for compatibility

View File

@@ -0,0 +1,143 @@
"""The Blender Scene module
This module provides *Scene* manipulation routines.
Example::
from Blender import Scene
curscene = Scene.getCurrent()
ob = curscene.getChildren()[0] # first object
newscene = Scene.New('testscene')
cam = curscene.getCurrentCamera() # get current camera object
newscene.link(ob) # link 'ob' to Scene
newscene.link(cam)
newscene.makeCurrent() # make current Scene
"""
import _Blender.Scene as _Scene
from Object import Object
import shadow
class Scene(shadow.shadowEx):
"""Wrapper for Scene DataBlock
"""
def link(self, object):
"""Links Object 'object' into Scene 'self'."""
# This is a strange workaround; Python does not release
# 'self' (and thus self._object) when an exception in the C API occurs.
# Therefore, we catch that exception and do it ourselves..
# Maybe Python 2.2 is able to resolve this reference dependency ?
try:
return self._object.link(object._object)
except:
del self._object
raise
def unlink(self, object):
"""Unlinks (deletes) Object 'object' from Scene."""
ret = self._object.unlink(object._object)
return ret
def copy(self, duplicate_objects = 1):
"""Returns a copy of itself.
The optional argument defines, how the Scene's children objects are
duplicated::
0: Link Objects
1: Link Object data
2: Full Copy"""
return Scene(self._object.copy(duplicate_objects))
def update(self):
"""Updates scene 'self'.
This function explicitely resorts the base list of a newly created object
hierarchy."""
return self._object.update()
def makeCurrent(self):
"""Makes 'self' the current Scene"""
return self._object.makeCurrent()
def frameSettings(self, start = None, end = None, current = None):
"""Sets or retrieves the Scene's frame settings.
If the frame arguments are specified, they are set.
A tuple (start, end, current) is returned in any case."""
if start and end and current:
return self._object.frameSettings(start, end, current)
else:
return self._object.frameSettings()
def currentFrame(self, frame = None):
"""If 'frame' is given, the current frame is set and returned in any case"""
if frame:
return self._object.frameSettings(-1, -1, frame)
return self._object.frameSettings()[2]
def startFrame(self, frame = None):
"""If 'frame' is given, the start frame is set and returned in any case"""
if frame:
return self._object.frameSettings(frame, -1, -1)
return self._object.frameSettings()[0]
def endFrame(self, frame = None):
"""If 'frame' is given, the end frame is set and returned in any case"""
if frame:
return self._object.frameSettings(-1, frame, -1)
return self._object.frameSettings()[1]
def getChildren(self):
"""Returns a list of the Scene's children Objects"""
return shadow._List(self._object.getChildren(), Object)
def getCurrentCamera(self):
"""Returns current active camera Object"""
cam = self._object.getCurrentCamera()
if cam:
return Object(cam)
def setCurrentCamera(self, object):
"""Sets the current active camera Object 'object'"""
return self._object.setCurrentCamera(object._object)
def getRenderdir(self):
"""Returns directory where rendered images are saved to"""
return self._object.getRenderdir(self._object)
def getBackbufdir(self):
"""Returns the Backbuffer images location"""
return self._object.getBackbufdir(self._object)
# Module methods
def New(name = 'Scene'):
"""Creates and returns new Scene with (optionally given) name"""
return Scene(_Scene.New(name))
def get(name = None):
"""Returns a Scene object with name 'name' if given, None if not existing,
or a list of all Scenes otherwise."""
if name:
ob = _Scene.get(name)
if ob:
return Scene(ob)
else:
return None
else:
return shadow._List(_Scene.get(), Scene)
Get = get # emulation
def getCurrent():
"""Returns the currently active Scene"""
sc = Scene(_Scene.getCurrent())
return sc
def unlink(scene):
"""Removes the Scene 'scene' from Blender"""
if scene._object.name == _Scene.getCurrent().name:
raise SystemError, "current Scene can not be removed!"
for ob in scene.getChildren():
scene.unlink(ob)
return _Scene.unlink(scene._object)

View File

@@ -0,0 +1,57 @@
"""The Blender Text module
This module lets you manipulate the Text buffers inside Blender.
Text objects are currently owned by the Text editor in Blender.
Example::
from Blender import Text
text = Text.New('Text') # create new text buffer
text.write('hello') # write string
Text.unlink(text) # delete
"""
import _Blender.Text as _Text
class Text:
"""Wrapper for Text DataBlock"""
def clear(self):
"""Clears the Text objects text buffer"""
pass
def write(self, string):
"""Appends 'string' to the text buffer"""
pass
def asLines(self):
"""Returns the text buffer as a list of lines (strings)"""
pass
def set(self, attr, val):
"""Set the Text attribute of name 'name' to value 'val'.
Currently supported::
follow_cursor : 1: Text output follows the cursor"""
# Module methods
def New(name = None):
"""Creates new empty Text with (optionally given) name and returns it"""
pass
def get(name = None):
"""Returns a Text object with name 'name' if given, 'None' if not existing,
or a list of all Text objects in Blender otherwise."""
pass
def unlink(text):
"""Removes the Text 'text' from the Blender text window"""
pass
# override:
New = _Text.New
get = _Text.get
unlink = _Text.unlink

View File

@@ -0,0 +1 @@
from _Blender.Types import *

View File

@@ -0,0 +1,65 @@
"""The Blender Window module
This module currently only supports redrawing commands of windows.
Later on, it will allow screen manipulations and access to Window
properties"""
import _Blender.Window as _Window
t = _Window.Types
Const = t # emulation
Types = { 'View' : t.VIEW3D,
'Ipo' : t.IPO,
'Oops' : t.OOPS,
'Button' : t.BUTS,
'File' : t.FILE,
'Image' : t.IMAGE,
'Text' : t.TEXT,
'Action' : t.ACTION,
}
del t
def Redraw(t= 'View'):
"""Redraws all windows of the type 't' which must be one of:
* "View" - The 3D view
* "Ipo" - The Ipo Window
* "Oops" - The OOPS (scenegraph) window
* "Button" - The Button Window
* "File" - The File Window
* "Image" - The Image Window (UV editor)
* "Text" - The Text editor
* "Action" - The Action Window"""
if type(t) == type(1):
return _Window.Redraw(t)
try:
_Window.Redraw(Types[t])
except:
raise TypeError, "type must be one of %s" % Types.keys()
def RedrawAll():
"""Redraws the whole screen"""
_Window.RedrawAll()
def drawProgressBar(val, text):
"""Draws a progress bar behind the Blender version information.
'val' is a float value <= 1.0, 'text' contains info about what is currently
being done.
This function must be called with 'val' = 0.0 at start and end of the executed
(and probably time consuming) action.
The user may cancel the progress with the 'Esc' key, in this case, 0 is returned,
1 else."""
return _Window.draw_progressbar(val, text)
draw_progressbar = _Window.draw_progressbar # emulation
QRedrawAll = _Window.QRedrawAll

View File

@@ -0,0 +1,157 @@
import _Blender.World as _World
import shadow
def _getAmbCol(obj):
return obj.ambR, obj.ambG, obj.ambB
def _setAmbCol(obj, rgb):
obj.ambR, obj.ambG, obj.ambB = rgb
def _getZenCol(obj):
return obj.zenR, obj.zenG, obj.zenB
def _setZenCol(obj, rgb):
obj.zenR, obj.zenG, obj.zenB = rgb
def _getHorCol(obj):
return obj.horR, obj.horG, obj.horB
def _setHorCol(obj, rgb):
obj.horR, obj.horG, obj.horB = rgb
def _setMist(obj, mist):
obj.mistStart = mist.start
obj.mistDepth = mist.depth
obj.mistHeight = mist.height
obj.mistType = mist.type
def _getMist(obj):
mist = Mist()
mist.start = obj.mistStart
mist.depth = obj.mistDepth
mist.height = obj.mistHeight
mist.type = obj.mistType
return mist
class World(shadow.hasIPO, shadow.hasModes):
"""Wrapper for Blender World DataBlock
Attributes
horCol -- horizon colour triple '(r, g, b)' where r, g, b must lie
in the range of [0.0, 1.0]
zenCol -- zenith colour triple
ambCol -- ambient colour triple
exposure -- exposure value
mist -- mist structure, see class Mist
starDensity -- star density (the higher, the more stars)
starMinDist -- the minimum distance to the camera
starSize -- size of the stars
starColNoise -- star colour noise
gravity -- The gravity constant (9.81 for earth gravity)
"""
SkyTypes = {'blend' : 1,
'real' : 2,
'paper' : 4,
}
Modes = {'mist' : 1,
'stars' : 2,
}
_emulation = {'Expos' : "exposure",
'HorR' : "horR",
'HorG' : "horG",
'HorB' : "horB",
'ZenR' : "zenR",
'ZenG' : "zenG",
'ZenB' : "zenB",
'StarDi' : "starDensity",
'StarSi' : "starSize",
'MisSta' : "mistStart",
'MisDi' : "mistDepth",
'MisHi' : "mistHeight",
}
_setters = {'horCol' : _getHorCol,
'zenCol' : _getZenCol,
'ambCol' : _getAmbCol,
'mist' : _getMist,
}
_setters = {'horCol' : _setHorCol,
'zenCol' : _setZenCol,
'ambCol' : _setAmbCol,
'mist' : _setMist,
}
def getSkyType(self):
"""Returns a list of the set Sky properties, see setSkyType()"""
list = []
for k in self.SkyTypes.keys():
i = self.SkyTypes[k]
if self._object.skyType & i:
list.append(k)
return list
def setSkyType(self, *args):
"""Set the sky type. This function takes a variable number
of string arguments of ['blend', 'real', 'paper']"""
flags = 0
try:
for a in args:
flags |= self.SkyTypes[a]
except:
raise TypeError, "mode must be one of" % self.SkyTypes.keys()
self._object.skyType = flags
class Mist:
"""Mist structure
Attributes
start -- start of the mist
depth -- depth of the "mist wall"
height -- height of the mist layer
"""
Types = { 'quadratic' : 0,
'linear' : 1,
'sqrt' : 2,
}
def __init__(self):
self.start = 0.0
self.depth = 0.0
self.height = 0.0
self.type = 0
def setType(self, name):
"""Set the Mist type (one of ['quadratic', 'linear', 'sqrt'])"""
try:
t = self.Types[name]
else:
raise TypeError, "type must be one of %s" % self.Types.keys()
self.type = t
def getType(self):
"""Returns the Mist type as string. See setType()"""
for k in self.Types.keys():
if self.Types[k] == self.type:
return k

View File

@@ -0,0 +1,23 @@
#
# The Blender main module wrapper
# (c) 06/2001, NaN // strubi@blender.nl
__all__ = ["Object", "Image", "NMesh", "Window", "Mesh", "Tools", "sys",
"Lamp", "Scene", "Draw", "Camera", "Material", "Types", "Ipo",
"BGL"]
import _Blender
Get = _Blender.Get
Redraw = _Blender.Redraw
link = _Blender.link
bylink = _Blender.bylink
import Object, Image, Mesh, Window, Tools, sys, Lamp, Scene, Draw, Camera
import Material, NMesh, BGL, Types, Ipo, Text
deg = lambda x: 0.0174532925199 * x # conversion from degrees to radians
import __builtin__
__builtin__.deg = deg

View File

@@ -0,0 +1,195 @@
#
# Blender mid level modules
# author: strubi@blender.nl
#
#
"""Shadow class module
These classes shadow the internal Blender objects
There is no need for you to use the shadow module really - it is
just there for documentation. Blender object classes with a common
subset of function members derive from these sub classes.
"""
def _List(list, Wrapper):
"""This function returns list of wrappers, taking a list of raw objects
and the wrapper method"""
return map(Wrapper, list)
def _getModeBits(dict, attr):
list = []
for k in dict.keys():
i = dict[k]
if attr & i:
list.append(k)
return list
def _setModeBits(dict, args):
flags = 0
try:
for a in args:
flags |= dict[a]
except:
raise TypeError, "mode must be one of %s" % dict.keys()
return flags
def _link(self, data):
"""Links Object 'self' with data 'data'. The data type must match
the Object's type, so you cannot link a Lamp to a mesh type Object"""
try:
self._object.link(data._object)
except:
print "Users:", self._object.users
class shadow:
"""This is the shadow base class"""
_getters = {}
_setters = {}
_emulation = {}
def __init__(self, object):
self._object = object
def __getattr__(self, a):
try:
return getattr(self._object, a)
except:
if self._emulation.has_key(a):
return getattr(self._object, self._emulation[a])
elif self._getters.has_key(a):
return self._getters[a](self)
else:
raise AttributeError, a
def __setattr__(self, a, val):
if a == "_object":
self.__dict__['_object'] = val
return
try:
setattr(self.__dict__['_object'], a, val)
except:
if self._emulation.has_key(a):
setattr(self.__dict__['_object'], self._emulation[a], val)
elif self._setters.has_key(a):
self._setters[a](self, val)
else:
raise AttributeError, a
link = _link
def rename(self, name):
"""Tries to set the name of the object to 'name'. If the name already
exists, a unique name is created by appending a version number (e.g. '.001')
to 'name'. The effective name is returned."""
self._object.name = name
return self._object.name
def _getattrEx(self, a):
if self._emulation.has_key(a):
return getattr(self._object, self._emulation[a])
elif self._getters.has_key(a):
return self._getters[a](self)
else:
return getattr(self._object, a)
class shadowEx:
"""This is the shadow base class with a minor change; check for
emulation attributes happens before access to the raw object's attributes"""
_getters = {}
_setters = {}
_emulation = {}
def __del__(self):
self.__dict__.clear()
def __init__(self, object):
self._object = object
def __getattr__(self, a):
return _getattrEx(self, a)
def __setattr__(self, a, val):
if a == "_object":
self.__dict__['_object'] = val
return
if self._emulation.has_key(a):
setattr(self.__dict__['_object'], self._emulation[a], val)
elif self._setters.has_key(a):
self._setters[a](self, val)
else:
setattr(self.__dict__['_object'], a, val)
def __repr__(self):
return repr(self._object)
def rename(self, name):
"""Tries to set the name of the object to 'name'. If the name already
exists, a unique name is created by appending a version number (e.g. '.001')
to 'name'. The effective name is returned."""
self._object.name = name
return self._object.name
link = _link
class hasIPO(shadowEx):
"""Object class which has Ipo curves assigned"""
def getIpo(self):
"Returns the Ipo assigned to 'self'"
import Ipo
return Ipo.IpoBlock(self._object.ipo)
def setIpo(self, ipo):
"Assigns the IpoBlock 'ipo' to 'self'"
return self._object.assignIpo(ipo._object)
def __getattr__(self, a):
if a == "ipo":
print "ipo member access deprecated, use self.getIpo() instead!"
return self.getIpo()
else:
return _getattrEx(self, a)
class hasModes(shadowEx):
"""Object class which has different Modes"""
def getMode(self):
"""Returns a list of the modes which are set for 'self'"""
list = []
for k in self.Modes.keys():
i = self.Modes[k]
if self._object.mode & i:
list.append(k)
return list
def setMode(self, *args):
"""Set the mode of 'self'. This function takes a variable number
of string arguments of the types listed in self.Modes"""
flags = 0
try:
for a in args:
flags |= self.Modes[a]
except:
raise TypeError, "mode must be one of" % self.Modes.keys()
self._object.mode = flags
class dict:
"""readonly dictionary shadow"""
_emulation = {}
def __init__(self, dict):
self._dict = dict
def __getitem__(self, key):
try:
return self._dict[key]
except:
key = _emulation[key]
return self._dict[key]
def __repr__(self):
return repr(self._dict)

View File

@@ -0,0 +1,20 @@
from _Blender.sys import *
sep = dirsep # path separator ('/' or '\')
class Path:
def dirname(self, name):
return dirname(name)
def join(self, a, *p):
path = a
for b in p:
if b[:1] == dirsep:
path = b
elif path == '' or path[-1:] == dirsep:
path = path + b
else:
path = path + dirsep + b
return path
path = Path()

View File

@@ -0,0 +1,4 @@
__all__ = ["importer", "importloader"]
import importloader

View File

@@ -0,0 +1,34 @@
class importer:
def __init__(self,writer=None):
self.writer = writer
self.filename = None
self.file = None
self.ext = ""
def readfile(self, name):
file = open(name, "r")
if not file:
return 0
self.file = file
self.filename = name
self.lines = file.readlines()
def close(self):
if self.filename:
self.file.close()
def checkmagic(self, name):
# return 1 if magic true (format verified), 0 else
return 0
def parse(self, data):
# parse and convert the data shere
pass
class writer:
def __init__(self, args = None):
pass
def mesh(self, me, name):
pass
_inst = importer()
readfile = _inst.readfile
close = _inst.close
checkmagic = _inst.checkmagic
parse = _inst.parse

View File

@@ -0,0 +1,988 @@
# VRML import prototype
#
# strubi@blender.nl
#
"""VRML import module
This is a prototype for VRML97 file import
Supported:
- Object hierarchies, transform collapsing (optional)
- Meshes (IndexedFaceSet, no Basic primitives yet)
- Materials
- Textures (jpg, tga), conversion option from alien formats
"""
import Blender.sys as os # Blender os emulation
from beta import Scenegraph
Transform = Scenegraph.Transform
import beta.Objects
_b = beta.Objects
#from Blender import Mesh
Color = _b.Color
DEFAULTFLAGS = _b.DEFAULTFLAGS
FACEFLAGS = _b.FACEFLAGS
shadowNMesh = _b.shadowNMesh
quat = Scenegraph.quat # quaternion math
vect = quat.vect # vector math module
from vrml import loader
#### GLOBALS
OB = Scenegraph.Object.Types # CONST values
LA = Scenegraph.Lamp.Types
g_level = 1
g_supported_fileformats = ["jpg", "jpeg", "tga"]
#### OPTIONS
OPTIONS = {'cylres' : 16, # resolution of cylinder
'flipnormals' : 0, # flip normals (force)
'mat_as_vcol' : 0, # material as vertex color - warning, this increases mem usage drastically on big files
'notextures' : 0, # no textures - saves some memory
'collapseDEFs' : 0, # collapse DEF nodes
'collapseTF' : 0, # collapse Transforms (as far as possible,
# i.e. currently to Object transform level)
}
#### CONSTANTS
LAYER_EMPTY = (1 << 2)
LAYER_LAMP = (1 << 4)
LAYER_CAMERA = 1 + (1 << 4)
CREASE_ANGLE_THRESHOLD = 0.45 # radians
PARSE_TIME = (loader.parser.IMPORT_PARSE_TIME )
PROCESS_TIME = (1.0 - PARSE_TIME )
PROGRESS_DEPTH = loader.parser.PROGRESS_DEPTH
VERBOSE_DEPTH = PROGRESS_DEPTH
#### DEBUG
def warn(text):
print "###", text
def debug2(text):
print (g_level - 1) * 4 * " " + text
def verbose(text):
print text
def quiet(text):
pass
debug = quiet
#### ERROR message filtering:
g_error = {} # dictionary for non-fatal errors to mark whether an error
# was already reported
def clrError():
global g_error
g_error['toomanyfaces'] = 0
def isError(name):
return g_error[name]
def setError(name):
global g_error
g_error[name] = 1
#### ERROR handling
class baseError:
def __init__(self, value):
self.value = value
def __str__(self):
return `self.value`
class MeshError(baseError):
pass
UnfinishedError = loader.parser.UnfinishedError
##########################################################
# HELPER ROUTINES
def assignImage(f, img):
f.image = img
def assignUV(f, uv):
if len(uv) != len(f.v):
uv = uv[:len(f.v)]
#raise MeshError, "Number of UV coordinates does not match number of vertices in face"
f.uv = []
for u in uv:
f.uv.append((u[0], u[1])) # make sure it's a tuple
#### VRML STUFF
# this is used for transform collapsing
class TransformStack:
def __init__(self):
self.stack = [Transform()]
def push(self, t):
self.stack.append(t)
def pop(self):
return self.stack.pop()
def last(self):
return self.stack[-1]
def fromVRMLTransform(tfnode):
t = Transform()
s = tfnode.scale
t.scale = (s[0], s[1], s[2])
r = tfnode.rotation
if r[0] == 0.0 and r[1] == 0.0 and r[2] == 0.0:
rotaxis = (0.0, 0.0, 1.0)
ang = 0.0
else:
rotaxis = vect.norm3(r[:3])
ang = r[3]
#t.rotation = (rotaxis, ang)
t.calcRotfromAxis((rotaxis, ang))
tr = tfnode.translation
t.translation = (tr[0], tr[1], tr[2])
# XXX more to come..
return t
### TODO: enable material later on
#class dummyMaterial:
#def setMode(self, *args):
#pass
def fromVRMLMaterial(mat):
name = mat.DEF
from Blender import Material
m = Material.New(name)
m.rgbCol = mat.diffuseColor
m.alpha = 1.0 - mat.transparency
m.emit = vect.len3(mat.emissiveColor)
if m.Emit > 0.01:
if vect.cross(mat.diffuseColor, mat.emissiveColor) > 0.01 * m.Emit:
m.rgbCol = mat.emissiveColor
m.ref = 1.0
m.spec = mat.shininess
m.specCol = mat.specularColor
m.amb = mat.ambientIntensity
return m
# override:
#def fromVRMLMaterial(mat):
# return dummyMaterial()
def buildVRMLTextureMatrix(tr):
from math import sin, cos
newMat = vect.Matrix
newVec = vect.Vector
# rotmatrix
s = tr.scale
t = tr.translation
c = tr.center
phi = tr.rotation
SR = newMat()
C = newMat()
C[2] = newVec(c[0], c[1], 1.0)
if abs(phi) > 0.00001:
SR[0] = newVec(s[0] * cos(phi), s[1] * sin(phi), 0.0)
SR[1] = newVec(-s[0] * sin(phi), s[1] * cos(phi), 0.0)
else:
SR[0] = newVec(s[0], 0.0, 0.0)
SR[1] = newVec(0.0, s[1], 0.0)
SR = C * SR * C.inverse() # rotate & scale about rotation center
T = newMat()
T[2] = newVec(t[0], t[1], 1.0)
return SR * T # texture transform matrix
def imageConvert(fromfile, tofile):
"""This should convert from a image file to another file, type is determined
automatically (on extension). It's currently just a stub - users can override
this function to implement their own converters"""
return 0 # we just fail in general
def addImage(path, filename):
"returns a possibly existing image which is imported by Blender"
from Blender import Image
img = None
try:
r = filename.rindex('.')
except:
return None
naked = filename[:r]
ext = filename[r+1:].lower()
if path:
name = os.sep.join([path, filename])
file = os.sep.join([path, naked])
else:
name = filename
file = naked
if not ext in g_supported_fileformats:
tgafile = file + '.tga'
jpgfile = file + '.jpg'
for f in tgafile, jpgfile: # look for jpg, tga
try:
img = Image.Load(f)
if img:
verbose("couldn't load %s (unsupported).\nFound %s instead" % (name, f))
return img
except IOError, msg:
pass
try:
imgfile = open(name, "rb")
imgfile.close()
except IOError, msg:
warn("Image %s not found" % name)
return None
verbose("Format unsupported, trying to convert to %s" % tgafile)
if not imageConvert(name, tgafile):
warn("image conversion failed")
return None
else:
return Image.Load(tgafile)
return None # failed
try:
img = Image.Load(name)
except IOError, msg:
warn("Image %s not found" % name)
return img
# ok, is supported
def callMethod(_class, method, vnode, newnode, warn = 1):
meth = None
try:
meth = getattr(_class, method)
except AttributeError:
if warn:
unknownType(method)
return None, None
if meth:
return meth(vnode, parent = newnode)
def unknownType(type):
warn("unsupported:" + repr(type))
def getChildren(vnode):
try:
children = vnode.children
except:
children = None
return children
def getNodeType(vnode):
return vnode.__gi__
GroupingNodeTypes = ["Group", "Collision", "Anchor", "Billboard", "Inline",
"LOD", "Switch", "Transform"]
################################################################################
#
#### PROCESSING CLASSES
class NullProcessor:
def __init__(self, tstack = TransformStack()):
self.stack = tstack
self.walker = None
self.mesh = None
self.ObjectNode = Scenegraph.NodefromData # may be altered...
self.MaterialCache = {}
self.ImageCache = {}
# This is currently not used XXX
class DEFcollapser(NullProcessor):
"""This is for collapsing DEF Transform nodes into a single object"""
def __init__(self):
self.collapsedNodes = []
def Transform(self, curnode, parent, **kw):
name = curnode.DEF
if not name: # node is a DEF node
return None, None
return children, None
class Processor(NullProcessor):
"""The processor class defines the handler for a VRML Scenegraph node.
Definition of a handler method simply happens by use of the VRML Scenegraph
entity name.
A handler usually creates a new Scenegraph node in the target scenegraph,
converting the data from the given VRML node.
A handler takes the arguments:
curnode: the currently visited VRML node
parent: the previously generated target scenegraph parent node
**kw: additional keywords
It MUST return: (children, newBnode) where:
children: the children of the current VRML node. These will be further
processed by the processor. If this is not wanted (because they
might have been processed by the handler), None must be returned.
newBnode: the newly created target node or None.
"""
def _handleProto(self, curnode, parent, **kw):
p = curnode.PROTO
if not p.sceneGraph:
print curnode.__gi__, "unsupported"
return None, None
def _dummy(self, curnode, parent, **kw):
print curnode.sceneGraph
return None, None
#def __getattr__(self, name):
#"""If method is not statically defined, look up prototypes"""
#return self._handleProto
def _currentTransform(self):
return self.stack.last()
def _parent(self, curnode, parent, trans):
name = curnode.DEF
children = getChildren(curnode)
debug("children: %s" % children)
objects = []
transforms = []
groups = []
isempty = 0
for c in children:
type = getNodeType(c)
if type == 'Transform':
transforms.append(c)
elif type in GroupingNodeTypes:
groups.append(c)
#else:
elif hasattr(self, type):
objects.append(c)
if transforms or groups or len(objects) != 1:
# it's an empty
if not name:
name = 'EMPTY'
Bnode = self.ObjectNode(None, OB.EMPTY, name) # empty Blender Object node
if options['layers']:
Bnode.object.Layer = LAYER_EMPTY
Bnode.transform = trans
Bnode.update()
isempty = 1
parent.insert(Bnode)
else: # don't insert extra empty if only one object has children
Bnode = parent
for node in objects:
c, new = self.walker.walk(node, Bnode)
if not isempty: # only apply transform if no extra transform empty in hierarchy
new.transform = trans
Bnode.insert(new)
for node in transforms:
self.walker.walk(node, Bnode)
for node in groups:
self.walker.walk(node, Bnode)
return None, None
def sceneGraph(self, curnode, parent, **kw):
parent.type = 'ROOT'
return curnode.children, None
def Transform(self, curnode, parent, **kw):
# we support 'center' and 'scaleOrientation' by inserting
# another Empty in between the Transforms
t = fromVRMLTransform(curnode)
cur = self._currentTransform()
chainable = 0
if OPTIONS['collapseTF']:
try:
cur = cur * t # chain transforms
except:
cur = self._currentTransform()
chainable = 1
self.stack.push(cur)
# here comes the tricky hacky transformation conversion
# TODO: SR not supported yet
if chainable == 1: # collapse, but not chainable
# insert extra transform:
Bnode = self.ObjectNode(None, OB.EMPTY, 'Transform') # Empty
Bnode.transform = cur
parent.insert(Bnode)
parent = Bnode
c = curnode.center
if c != [0.0, 0.0, 0.0]:
chainable = 1
trans = Transform()
trans.translation = (-c[0], -c[1], -c[2])
tr = t.translation
t.translation = (tr[0] + c[0], tr[1] + c[1], tr[2] + c[2])
Bnode = self.ObjectNode(None, OB.EMPTY, 'C') # Empty
Bnode.transform = t
parent.insert(Bnode)
parent = Bnode
else:
trans = t
if chainable == 2: # collapse and is chainable
# don't parent, insert into root node:
for c in getChildren(curnode):
dummy, node = self.walker.walk(c, parent) # skip transform node, insert into parent
if node: # a valid Blender node
node.transform = cur
else:
self._parent(curnode, parent, trans)
self.stack.pop()
return None, None
def Switch(self, curnode, parent, **kw):
return None, None
def Group(self, curnode, parent, **kw):
if OPTIONS['collapseTF']:
cur = self._currentTransform()
# don't parent, insert into root node:
children = getChildren(curnode)
for c in children:
dummy, node = self.walker.walk(c, parent) # skip transform node, insert into parent
if node: # a valid Blender node
node.transform = cur
else:
t = Transform()
self._parent(curnode, parent, t)
return None, None
def Collision(self, curnode, parent, **kw):
return self.Group(curnode, parent)
# def LOD(self, curnode, parent, **kw):
# c, node = self.walker.walk(curnode.level[0], parent)
# parent.insert(node)
# return None, None
def Appearance(self, curnode, parent, **kw):
# material colors:
mat = curnode.material
self.curColor = mat.diffuseColor
name = mat.DEF
if name:
if self.MaterialCache.has_key(name):
self.curmaterial = self.MaterialCache[name]
else:
m = fromVRMLMaterial(mat)
self.MaterialCache[name] = m
self.curmaterial = m
else:
if curnode.DEF:
name = curnode.DEF
if self.MaterialCache.has_key(name):
self.curmaterial = self.MaterialCache[name]
else:
m = fromVRMLMaterial(mat)
self.MaterialCache[name] = m
self.curmaterial = m
else:
self.curmaterial = fromVRMLMaterial(mat)
try:
name = curnode.texture.url[0]
except:
name = None
if name:
if self.ImageCache.has_key(name):
self.curImage = self.ImageCache[name]
else:
self.ImageCache[name] = self.curImage = addImage(self.curpath, name)
else:
self.curImage = None
tr = curnode.textureTransform
if tr:
self.curtexmatrix = buildVRMLTextureMatrix(tr)
else:
self.curtexmatrix = None
return None, None
def Shape(self, curnode, parent, **kw):
name = curnode.DEF
debug(name)
#self.mesh = Mesh.rawMesh()
self.mesh = shadowNMesh()
self.mesh.name = name
# don't mess with the order of these..
if curnode.appearance:
self.walker.preprocess(curnode.appearance, self.walker.preprocessor)
else:
# no appearance, get colors from shape (vertex colors)
self.curColor = None
self.curImage = None
self.walker.preprocess(curnode.geometry, self.walker.preprocessor)
if hasattr(self, 'curmaterial'):
self.mesh.assignMaterial(self.curmaterial)
meshobj = self.mesh.write() # write mesh
del self.mesh
bnode = Scenegraph.ObjectNode(meshobj, OB.MESH, name)
if name:
curnode.setTargetnode(bnode) # mark as already processed
return None, bnode
def Box(self, curnode, parent, **kw):
col = apply(Color, self.curColor)
faces = []
x, y, z = curnode.size
x *= 0.5; y *= 0.5; z *= 0.5
name = curnode.DEF
m = self.mesh
v0 = m.addVert((-x, -y, -z))
v1 = m.addVert(( x, -y, -z))
v2 = m.addVert(( x, y, -z))
v3 = m.addVert((-x, y, -z))
v4 = m.addVert((-x, -y, z))
v5 = m.addVert(( x, -y, z))
v6 = m.addVert(( x, y, z))
v7 = m.addVert((-x, y, z))
flags = DEFAULTFLAGS
if not self.curImage:
uvflag = 1
else:
uvflag = 0
m.addFace([v3, v2, v1, v0], flags, uvflag)
m.addFace([v0, v1, v5, v4], flags, uvflag)
m.addFace([v1, v2, v6, v5], flags, uvflag)
m.addFace([v2, v3, v7, v6], flags, uvflag)
m.addFace([v3, v0, v4, v7], flags, uvflag)
m.addFace([v4, v5, v6, v7], flags, uvflag)
for f in m.faces:
f.col = [col, col, col, col]
return None, None
def Viewpoint(self, curnode, parent, **kw):
t = Transform()
r = curnode.orientation
name = 'View_' + curnode.description
t.calcRotfromAxis((r[:3], r[3]))
t.translation = curnode.position
Bnode = self.ObjectNode(None, OB.CAMERA, name) # Empty
Bnode.object.Layer = LAYER_CAMERA
Bnode.transform = t
return None, Bnode
def DirectionalLight(self, curnode, parent, **kw):
loc = (0.0, 10.0, 0.0)
l = self._lamp(curnode, loc)
l.object.data.type = LA.SUN
return None, l
def PointLight(self, curnode, parent, **kw):
l = self._lamp(curnode, curnode.location)
l.object.data.type = LA.LOCAL
return None, l
def _lamp(self, curnode, location):
t = Transform()
name = curnode.DEF
energy = curnode.intensity
t.translation = location
Bnode = self.ObjectNode(None, OB.LAMP, "Lamp")
Bnode.object.data.energy = energy * 5.0
if options['layers']:
Bnode.object.Layer = LAYER_LAMP
Bnode.transform = t
return Bnode
def IndexedFaceSet(self, curnode, **kw):
matxvec = vect.matxvec
mesh = self.mesh
debug("IFS, read mesh")
texcoo = curnode.texCoord
uvflag = 0
if curnode.color:
colors = curnode.color.color
if curnode.colorIndex: # we have color indices
colindex = curnode.colorIndex
else:
colindex = curnode.coordIndex
if not texcoo:
uvflag = 1
else:
colors = None
faceflags = DEFAULTFLAGS
if not texcoo and OPTIONS['mat_as_vcol'] and self.curColor:
uvflag = 1
col = apply(Color, self.curColor)
elif self.curImage:
faceflags += FACEFLAGS.TEX
# MAKE VERTICES
coo = curnode.coord
ncoo = len(coo.point)
if curnode.normal: # normals defined
normals = curnode.normal.vector
if curnode.normalPerVertex and len(coo.point) == len(normals):
self.mesh.recalc_normals = 0
normindex = curnode.normalIndex
i = 0
for v in coo.point:
newv = mesh.addVert(v)
n = newv.no
n[0], n[1], n[2] = normals[normindex[i]]
i += 1
else:
for v in coo.point:
mesh.addVert(v)
else:
for v in coo.point:
mesh.addVert(v)
if curnode.creaseAngle < CREASE_ANGLE_THRESHOLD:
self.mesh.smooth = 1
nvertices = len(mesh.vertices)
if nvertices != ncoo:
print "todo: %d, done: %d" % (ncoo, nvertices)
raise RuntimeError, "FATAL: could not create all vertices"
# MAKE FACES
index = curnode.coordIndex
vlist = []
flip = OPTIONS['flipnormals']
facecount = 0
vertcount = 0
cols = []
if curnode.colorPerVertex: # per vertex colors
for i in index:
if i == -1:
if flip or (curnode.ccw == 0 and not flip): # counterclockwise face def
vlist.reverse()
f = mesh.addFace(vlist, faceflags, uvflag)
if uvflag or colors:
f.col = cols
cols = []
vlist = []
else:
if colors:
col = apply(Color, colors[colindex[vertcount]])
cols.append(col)
vertcount += 1
v = mesh.vertices[i]
vlist.append(v)
else: # per face colors
for i in index:
if i == -1:
if flip or (curnode.ccw == 0 and not flip): # counterclockwise face def
vlist.reverse()
f = mesh.addFace(vlist, faceflags, uvflag)
facecount += 1
if colors:
col = apply(Color, colors[colindex[facecount]])
cols = len(f.v) * [col]
if uvflag or colors:
f.col = cols
vlist = []
else:
v = mesh.vertices[i]
vlist.append(v)
# TEXTURE COORDINATES
if not texcoo:
return None, None
self.curmaterial.setMode("traceable", "shadow", "texFace")
m = self.curtexmatrix
if m: # texture transform exists:
for uv in texcoo.point:
v = (uv[0], uv[1], 1.0)
v1 = matxvec(m, v)
uv[0], uv[1] = v1[0], v1[1]
UVindex = curnode.texCoordIndex
if not UVindex:
UVindex = curnode.coordIndex
# go assign UVs
self.mesh.hasFaceUV(1)
j = 0
uv = []
for i in UVindex:
if i == -1: # flush
if not curnode.ccw:
uv.reverse()
assignUV(f, uv)
assignImage(f, self.curImage)
uv = []
j +=1
else:
f = mesh.faces[j]
uv.append(texcoo.point[i])
return None, None
class PostProcessor(NullProcessor):
def Shape(self, curnode, **kw):
pass
return None, None
def Transform(self, curnode, **kw):
return None, None
class Walker:
"""The node visitor (walker) class for VRML nodes"""
def __init__(self, pre, post = NullProcessor(), progress = None):
self.scene = Scenegraph.BScene()
self.preprocessor = pre
self.postprocessor = post
pre.walker = self # processor knows about walker
post.walker = self
self.nodes = 1
self.depth = 0
self.progress = progress
self.processednodes = 0
def walk(self, vnode, parent):
"""Essential walker routine. It walks along the scenegraph nodes and
processes them according to its pre/post processor methods.
The preprocessor methods return the children of the node remaining
to be processed or None. Also, a new created target node is returned.
If the target node is == None, the current node will be skipped in the
target scenegraph generation. If it is a valid node, the walker routine
inserts it into the 'parent' node of the target scenegraph, which
must be a valid root node on first call, leading us to the example usage:
p = Processor()
w = Walker(p, PostProcessor())
root = Scenegraph.RootNode()
w.walk(SG, root) # SG is a VRML scenegraph
"""
global g_level #XXX
self.depth += 1
g_level = self.depth
if self.depth < PROGRESS_DEPTH:
self.processednodes += 1
if self.progress:
ret = self.progress(PARSE_TIME + PROCESS_TIME * float(self.processednodes) / self.nodes)
if not ret:
progress(1.0)
raise UnfinishedError, "User cancelled conversion"
# if vnode has already been processed, call Linker method, Processor method otherwise
id = vnode.DEF # get name
if not id:
id = 'Object'
processed = vnode.getTargetnode()
if processed: # has been processed ?
debug("linked obj: %s" % id)
children, bnode = self.link(processed, parent)
else:
children, bnode = self.preprocess(vnode, parent)
if not bnode:
bnode = parent # pass on
else:
parent.insert(bnode) # insert into SG
if children:
for c in children:
self.walk(c, bnode)
if not processed:
self.postprocess(vnode, bnode)
self.depth -= 1
return children, bnode
def link(self, bnode, parent):
"""Link already processed data"""
# link data:
new = bnode.clone()
if not new:
raise RuntimeError, "couldn't clone object"
return None, new
def preprocess(self, vnode, newnode = None):
"""Processes a VRML node 'vnode' and returns a custom node. The processor must
be specified in 'p'.
Optionally, a custom parent node (previously created) is passed as 'newnode'."""
pre = "pre"
nodetype = vnode.__gi__
debug(pre + "process:" + repr(nodetype) + " " + vnode.DEF)
return callMethod(self.preprocessor, nodetype, vnode, newnode)
def postprocess(self, vnode, newnode = None):
"""Postprocessing of a VRML node, see Walker.preprocess()"""
nodetype = vnode.__gi__
pre = "post"
debug(pre + "process:" + repr(nodetype) + " " + vnode.DEF)
return callMethod(self.postprocessor, nodetype, vnode, newnode, 0)
testfile2 = '/home/strubi/exotic/wrl/BrownTrout1.wrl'
testfile = '/home/strubi/exotic/wrl/examples/VRML_Model_HSL.wrl'
def fix_VRMLaxes(root, scale):
from Blender import Object, Scene
q = quat.fromRotAxis((1.0, 0.0, 0.0), 1.57079)
empty = Object.New(OB.EMPTY)
empty.layer = LAYER_EMPTY
Scene.getCurrent().link(empty)
node = Scenegraph.ObjectNode(empty, None, "VRMLscene")
node.transform.rotation = q
if scale:
node.transform.scale = (0.01, 0.01, 0.01)
for c in root.children:
node.insert(c)
node.update()
root.children = [node]
#################################################################
# these are the routines that must be provided for the importer
# interface in blender
def checkmagic(name):
"check for file magic"
f = open(name, "r")
magic = loader.getFileType(f)
f.close()
if magic == 'vrml':
return 1
elif magic == 'gzip':
verbose("gzipped file detected")
try:
import gzip
except ImportError, value:
warn("Importing gzip module: %s" % value)
return 0
f = gzip.open(name, 'rb')
header = f.readline()
f.close()
if header[:10] == "#VRML V2.0":
return 1
else:
return 0
print "unknown file"
return 0
g_infotxt = ""
def progress(done):
from Blender import Window
ret = Window.draw_progressbar(done, g_infotxt)
return ret
class Counter:
def __init__(self):
self._count = 0
self.depth = 0
def count(self, node):
if self.depth >= PROGRESS_DEPTH:
return 0
self.depth += 1
self._count += 1
if not getChildren(node):
self.depth -= 1
return 0
else:
for c in node.children:
self.count(c)
self.depth -= 1
return self._count
################################################################################
# MAIN ROUTINE
def importfile(name):
global g_infotxt
global options
global DEFAULTFLAGS
from Blender import Get # XXX
options = Get('vrmloptions')
DEFAULTFLAGS = FACEFLAGS.LIGHT + FACEFLAGS.DYNAMIC
if options['twoside']:
print "TWOSIDE"
DEFAULTFLAGS |= FACEFLAGS.TWOSIDE
clrError()
g_infotxt = "load & parse file..."
progress(0.0)
root = Scenegraph.RootNode()
try:
l = loader.Loader(name, progress)
SG = l.load()
p = Processor()
w = Walker(p, PostProcessor(), progress)
g_infotxt = "convert data..."
p.curpath = os.path.dirname(name)
print "counting nodes...",
c = Counter()
nodes = c.count(SG)
print "done."
w.nodes = nodes # let walker know about number of nodes parsed # XXX
w.walk(SG, root)
except UnfinishedError, msg:
print msg
progress(1.0)
fix_VRMLaxes(root, options['autoscale']) # rotate coordinate system: in VRML, y is up!
root.update() # update baselist for proper display
return root

View File

@@ -0,0 +1,17 @@
"""This module contains a list of valid importers in 'importers'. At runtime,
importer modules can be registered by the 'register' function."""
__all__ = ["VRMLimporter"]
importers = __all__
import VRMLimporter
def register(importer):
"""Register an file importer"""
methods = ["checkmagic", "importfile"]
for m in methods:
if not hasattr(importer, m):
raise TypeError, "This is not an importer"
importers.append(importer)

View File

@@ -0,0 +1,23 @@
# this is the importloader which blender calls on unknown
# file types
import importer
supported= {'wrl': importer.VRMLimporter}
def process(name):
# run through importerlist and check for magic
m = None
for modname in importer.importers:
mod = getattr(importer, modname)
if mod.checkmagic(name):
m = mod
break
if not m:
return 0
m.importfile(name)
#except:
#import sys
#print "Import failed"sys.exc_value
return 1

View File

@@ -0,0 +1,39 @@
""" Constants for sets (of characters)
(c) Copyright Marc-Andre Lemburg; All Rights Reserved.
See the documentation for further information on copyrights,
or contact the author (mal@lemburg.com).
"""
import string
# Simple character strings
a2z = 'abcdefghijklmnopqrstuvwxyz'
A2Z = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
umlaute = '<EFBFBD><EFBFBD><EFBFBD><EFBFBD>'
Umlaute = '<EFBFBD><EFBFBD><EFBFBD>'
alpha = A2Z + a2z
german_alpha = A2Z + a2z + umlaute + Umlaute
number = '0123456789'
alphanumeric = alpha + number
white = ' \t\v'
newline = '\r\n'
formfeed = '\f'
whitespace = white + newline + formfeed
any = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
# Precompiled as sets, e.g. a2z_set = set(a2z)
a2z_set = '\000\000\000\000\000\000\000\000\000\000\000\000\376\377\377\007\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
A2Z_set = '\000\000\000\000\000\000\000\000\376\377\377\007\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
alpha_set = '\000\000\000\000\000\000\000\000\376\377\377\007\376\377\377\007\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
german_alpha_set = '\000\000\000\000\000\000\000\000\376\377\377\007\376\377\377\007\000\000\000\000\000\000\000\000\020\000@\220\020\000@\020'
number_set = '\000\000\000\000\000\000\377\003\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
alphanumeric_set = '\000\000\000\000\000\000\377\003\376\377\377\007\376\377\377\007\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
white_set = '\000\002\000\000\001\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
newline_set = '\000$\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
whitespace_set = '\000&\000\000\001\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
nonwhitespace_set = '\377\301\377\377\376\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377'
any_set = '\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377\377'
# Clean up
del string

View File

@@ -0,0 +1,348 @@
""" Constants for writing tag tables
The documentation in this file is obsoleted by the HTML docs in
the Doc/ subdirectory of the package. Constants defined here must
match those in mxTextTools/mxte.h.
(c) Copyright Marc-Andre Lemburg; All Rights Reserved.
See the documentation for further information on copyrights,
or contact the author (mal@lemburg.com).
"""
#########################################################################
# This file contains the definitions and constants used by the tagging
# engine:
#
# 1. Matching Tables
# 2. Commands & Constants
# 3. Matching Functions
# 4. Callable tagobjects
# 5. Calling the engine & Taglists
#
#########################################################################
# 1. Matching Tables:
#
# these are tuples of tuples, each entry having the following meaning:
#
# syntax: (tag, cmd, chars|table|fct [,jne] [,je=1])
# tag = object used to mark this section, if it matches
# cmd = command (see below)
# chars = match one or more of these characters
# table = table to use for matching characters
# fct = function to call (see below)
# jne = if the current character doesn't match, jump this
# many table entries relative to the current entry
# je = if we have a match make a relative jump of this length
#
# * a table matches a string iff the end of the table is reached
# (that is: an index is requested that is beyond the end-of-table)
# * a table is not matched if a tag is not matched and no jne is given;
# if it is matched then processing simply moves on to the next entry
# * marking is done by adding the matching slice in the string
# together with the marking object to the tag list; if the object is
# None, then it will not be appended to the taglist list
# * if the flag CallTag is set in cmd, then instead of appending
# matches to the taglist, the tagobj will be called (see below)
#
# TIP: if you are getting an error 'call of a non-function' while
# writing a table definition, you probably have a missing ','
# somewhere in the tuple !
#
# For examples see the tag*.py - files that came with this engine.
#
#########################################################################
# 2. Commands & Constants
#
#
#
# some useful constants for writing matching tables
#
To = None # good for cmd=Jump
Here = None # good for cmd=Fail and EOF
MatchOk = 20000 # somewhere beyond the end of the tag table...
MatchFail = -20000 # somewhere beyond the start of the tag table...
ToEOF = -1 # good for cmd=Move
ThisTable = 999 # to recursively match using the current table;
# can be passed as argument to Table and SubTable
# instead of a tuple
#
# commands and flags passed in cmd (see below)
#
# note: I might add some further commands to this list, if needed
# (the numbers will then probably change, but not the
# names)
#
# convention: a command "matches", if and only if it moves the
# current position at least one character; a command "reads"
# characters the characters, if they match ok
#
# notations:
#
# x refers to the current position in the string
# len refers to the string length or what the function tag() is told to
# believe it to be (i.e. the engine only looks at the slice text[x:len])
# text refers to the text string
# jne is the optional relative jump distance in case the command
# did not match, i.e. x before and after applying the command
# are the same (if not given the current table is considered
# not to match)
# je is the optional relative jump distance in case the command
# did match (it defaults to +1)
#
# commands
Fail = 0 # this will always fail (position remains unchanged)
Jump = 0 # jump to jne (position remains unchanged)
# match & read chars
AllIn = 11 # all chars in match (at least one)
AllNotIn = 12 # all chars not in match (at least one)
Is = 13 # current char must be == match (matches one char)
IsIn = 14 # current char must be in match (matches one char)
IsNot = 15 # current char must be be != match (matches one char)
IsNotIn = 15 # current char must be not be in match (matches one char)
AllInSet = 31
IsInSet = 32
# match & read for whole words
Word = 21 # the next chars must be those in match
WordStart = 22 # all chars up to the first occ. of match (at least one)
WordEnd = 23 # same as WordStart, accept that the text pointer
# is moved behind the match
NoWord = WordStart # all chars up to the first occ. of match (at least one)
# match using search objects BMS or FS
sWordStart = 111 # all chars up to the first occ. of match (may be 0 chars)
sWordEnd = 112 # same as WordStart, accept that the text pointer
# is moved behind the match
sFindWord = 113 # find match and process the found slice only (ignoring
# the chars that lead up to the match); positions
# the text pointer right after the match like WordEnd
# functions & tables
Call = 201 # call match(text,x,len) as function (see above)
CallArg = 202 # match has to be a 2-tuple (fct,arg), then
# fct(text,x,len,arg) is called; the return value is taken
# as new x; it is considered matching if the new x is
# different than the x before the call -- like always
# (note: arg has to be *one* object, e.g. a tuple)
Table = 203 # match using table (given in match)
SubTable = 207 # match using sub table (given in match); the sub table
# uses the same taglist as the calling table
TableInList = 204 # same as Table, but match is a tuple (list,index)
# and the table list[index] is used as matching
# table
SubTableInList = 208
# same as TableInList, but the sub table
# uses the same taglist as the calling table
# specials
EOF = 1 # current position must be EOF, e.g. >= len(string)
Skip = 2 # skip match (must be an integer) chars; note: this cmd
# always matches ok, so jne doesn't have any meaning in
# this context
Move = 3 # move the current text position to match (if negative,
# the text length + 1 (!) is added, thus -1 moves to the
# EOF, -2 to the last char and so on); note: this cmd
# always matches ok, so jne doesn't have any meaning in
# this context
# loops
Loop = 205 # loop-construct
#
# (tagobj,Loop,Count,jne,je) - sets/decrements the
# loop variable for current table according to the
# following rules:
# 1. the first time the engine passes this entry
# sets the loop variable to Count and continues
# without reading any character, but saving the
# current position in text
# 2. the next time, it decrements the loop variable
# and checks if it is < 0:
# (a) if it is, then the tagobj is added to the
# taglist with the slice (saved position, current
# position) and processing continues at entry
# current + jne
# (b) else, processing continues at entry current + je
# Note: if you jump out of the loop while the loop
# variable is still > 0, then you *must*
# reset the loop mechanism with
# (None,LoopControl,Reset)
# Note: you can skip the remaining loops by calling
# (None,LoopControl,Break) and jumping back
# to the Loop-entry; this sets the loop
# variable to 0
# Note: tables cannot have nested loops within their
# context; you can have nested loops in nested
# tables though (there is one loop var per
# tag()-call which takes place every time
# a table match is done)
#
LoopControl = 206 # controls the loop variable (always succeeds, i.e.
# jne has no meaning);
# match may be one of:
Break = 0 # * sets the loop variable to 0, thereby allowing
# to skip the remaining loops
Reset = -1 # * resets the loop mechanism (see note above)
#
# See tagLoop.py for some examples.
##########################################################################
#
# Flags (to be '+'ed with the above command code)
#
CallTag = 256 # call tagobj(taglist,text,l,r,subtags)
# upon successfully matching the slice [l:r] in text
# * taglist is the current list tags found (may be None)
# * subtags is a sub-list, passed when a subtable was used
# to do the matching -- it is None otherwise !)
#
# example entry with CallTag-flag set:
#
# (found_a_tag,CallTag+Table,tagtable)
# -- if tagtable matches the current text position,
# found_a_tag(taglist,text,l,r,newtaglist) is called and
# the match is *not* appended to the taglist by the tagging
# engine (the function would have to do this, in case it is needed)
AppendToTagobj = 512 # this appends the slice found to the tagobj, assuming
# that it is a Python list:
# does a tagobj.append((None,l,r,subtags)) call
# Alias for b/w comp.
AppendToTag = AppendToTagobj
AppendTagobj = 1024 # don't append (tagobj,l,r,subtags) to the taglist,
# but only tagobj itself; the information in l,r,subtags
# is lost, yet this can be used to write tag tables
# whose output can be used directly by tag.join()
AppendMatch = 2048 # append the match to the taglist instead of
# the tag object; this produces non-standard
# taglists !
#########################################################################
# 3. Matching Functions
#
# syntax:
#
# fct(s,x,len_s)
# where s = string we are working on
# x = current index in s where we wnat to match something
# len_s = 'length' of s, this is how far the search may be
# conducted in s, not necessarily the true length of s
#
# * the function has to return the index of the char right after
# matched string, e.g.
#
# 'xyzabc' ---> 'xyz' matches ---> return x+3
#
# * if the string doesn't match simply return x; in other words:
# the function has to return the matching slice's right index
# * you can use this to match e.g. 10 characters of a certain kind,
# or any word out of a given list, etc.
# * note: you cannot give the function additional parameters from within
# the matching table, so it has to know everything it needs to
# know a priori; use dynamic programming !
#
# some examples (not needed, since all are implemented by commands)
#
#
#def matchword(x):
# s = """
#def a(s,x,len_text):
# y = x+%i
# if s[x:y] == %s: return y
# return x
#"""
# exec s % (len(x),repr(x))
# return a
#
#def rejectword(x):
# s = """
#def a(s,x,len_text):
# while x < len(s) and s[x:x+%i] != %s:
# x = x + 1
# return x
#"""
# exec s % (len(x),repr(x))
# return a
#
#def HTML_Comment(s,x,len_text):
# while x < len_text and s[x:x+3] != '-->':
# x = x + 1
# return x
#
#
#########################################################################
# 4. Callable tagobjects
#
# a sample callable tagobj:
#
#
#def test(taglist,text,l,r,newtaglist):
#
# print 'found',repr(text[l:r])[:40],(l,r)
#
#
#########################################################################
# 5. Calling the engine & Taglists
#
# The function
# tag(text,table,start=0,len_text=len(text),taglistinit=[])
# found in mxTextTools:
#
# This function does all the matching according to the above rules.
# You give it a text string and a tag table and it will
# start processing the string starting from 'start' (which defaults to 0)
# and continue working until it reaches the 'EOF', i.e. len_text (which
# defaults to the text length). It thus tags the slice text[start:len_text].
#
# The function will create a list of found tags in the following
# format (which I call taglist):
#
# (tagobj,l,r,subtaglist)
#
# where: tagobj = specified tag object taken from the table
# [l:r] = slice that matched the tag in text
# subtaglist = if matching was done using a subtable
# this is the taglist it produced; in all other
# cases this will be None
#
# * if you pass None as taglistinit, then no taglist will be created,
# i.e. only CallTag commands will have any effect. (This saves
# temporary memory for big files)
# * the function returns a tuple:
# (success, taglist, nextindex)
# where: success = 0/1
# taglist = the produced list or None
# nextindex = the index+1 of the last char that matched
# (in case of failure, this points to the beginning
# of the substring that caused the problem)
#
### Module init.
def _module_init():
global id2cmd
import types
id2cmd = {}
IntType = types.IntType
for cmd,value in globals().items():
if type(value) == IntType:
if value == 0:
id2cmd[0] = 'Fail/Jump'
else:
id2cmd[value] = cmd
_module_init()

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,766 @@
""" mxTextTools - A tools package for fast text processing.
(c) Copyright Marc-Andre Lemburg; All Rights Reserved.
See the documentation for further information on copyrights,
or contact the author (mal@lemburg.com).
"""
import string,types
#
# import the C module and the version number
#
from mxTextTools import *
from mxTextTools import __version__
#
# import the symbols needed to write tag tables
#
from Constants.TagTables import *
#
# import the some handy character sets
#
from Constants.Sets import *
#
# format and print tables, taglists and joinlists:
#
def format_entry(table,i,
TupleType=types.TupleType):
""" Returns a pp-formatted tag table entry as string
"""
e = table[i]
jne = 0
je = 1
t,c,m = e[:3]
if len(e)>3: jne = e[3]
if len(e)>4: je = e[4]
flags,cmd = divmod(c,256)
c = id2cmd[cmd]
if type(m) == TupleType and c in ('Table','SubTable'):
m = '<table>'
elif m == None:
m = 'Here/To'
else:
m = repr(m)
if len(m) > 17:
m = m[:17]+'...'
return '%-15.15s : %-30s : jne=%+i : je=%+i' % \
(repr(t),'%-.15s : %s'%(c,m),jne,je)
def format_table(table,i=-1):
""" Returns a pp-formatted version of the tag table as string """
l = []
for j in range(len(table)):
if i == j:
l.append('--> '+format_entry(table,j))
else:
l.append(' '+format_entry(table,j))
return string.join(l,'\n')+'\n'
def print_tagtable(table):
""" Print the tag table
"""
print format_table(table)
def print_tags(text,tags,indent=0):
""" Print the taglist tags for text using the given indent level
"""
for tag,l,r,subtags in tags:
tagname = repr(tag)
if len(tagname) > 20:
tagname = tagname[:20] + '...'
target = repr(text[l:r])
if len(target) > 60:
target = target[:60] + '...'
if subtags == None:
print ' '+indent*' |',tagname,': ',target,(l,r)
else:
print ' '+indent*' |',tagname,': ',target,(l,r)
print_tags(text,subtags,indent+1)
def print_joinlist(joins,indent=0,
StringType=types.StringType):
""" Print the joinlist joins using the given indent level
"""
for j in joins:
if type(j) == StringType:
text = repr(j)
if len(text) > 40:
text = text[:40] + '...'
print ' '+indent*' |',text,' (len = %i)' % len(j)
else:
text = j[0]
l,r = j[1:3]
text = repr(text[l:r])
if len(text) > 40:
text = text[:40] + '...'
print ' '+indent*' |',text,' (len = %i)' % (r-l),(l,r)
def normlist(jlist,
StringType=types.StringType):
""" Return a normalized joinlist.
All tuples in the joinlist are turned into real strings. The
resulting list is a equivalent copy of the joinlist only
consisting of strings.
"""
l = [''] * len(jlist)
for i in range(len(jlist)):
entry = jlist[i]
if type(entry) == StringType:
l[i] = entry
else:
l[i] = entry[0][entry[1]:entry[2]]
return l
#
# aid for matching from a list of words
#
def _lookup_dict(l,index=0):
d = {}
for w in l:
c = w[index]
if d.has_key(c):
d[c].append(w)
else:
d[c] = [w]
return d
def word_in_list(l):
""" Creates a lookup table that matches the words in l
"""
t = []
d = _lookup_dict(l)
keys = d.keys()
if len(keys) < 18: # somewhat arbitrary bound
# fast hint for small sets
t.append((None,IsIn,string.join(d.keys(),'')))
t.append((None,Skip,-1))
# test groups
for c, group in d.items():
t.append(None) # hint will be filled in later
i = len(t)-1
for w in group:
t.append((None,Word,w[1:],+1,MatchOk))
t.append((None,Fail,Here))
# add hint
t[i] = (None,Is,c,len(t)-i)
t.append((None,Fail,Here))
return tuple(t)
#
# Extra stuff useful in combination with the C functions
#
def replace(text,what,with,start=0,stop=None,
SearchObject=BMS,join=join,joinlist=joinlist,tag=tag,
string_replace=string.replace,type=type,
StringType=types.StringType):
"""A fast replacement for string.replace.
what can be given as string or search object.
This function is a good example for the AppendTagobj-flag usage
(the taglist can be used directly as joinlist).
"""
if type(what) == StringType:
so = SearchObject(what)
else:
so = what
what = so.match
if stop is None:
if start == 0 and len(what) < 2:
return string_replace(text,what,with)
stop = len(text)
t = ((text,sWordStart,so,+2),
# Found something, replace and continue searching
(with,Skip+AppendTagobj,len(what),-1,-1),
# Rest of text
(text,Move,ToEOF)
)
found,taglist,last = tag(text,t,start,stop)
if not found:
return text
return join(taglist)
# Alternative (usually slower) versions using different techniques:
def _replace2(text,what,with,start=0,stop=None,
join=join,joinlist=joinlist,tag=tag,
StringType=types.StringType,BMS=BMS):
"""Analogon to string.replace; returns a string with all occurences
of what in text[start:stop] replaced by with
- uses a one entry tag-table and a Boyer-Moore-Search-object
- what can be a string or a BMS/FS search object
- it's faster than string.replace in those cases, where
the what-string gets long and/or many replacements are found;
faster meaning from a few percent up to many times as fast
- start and stop define the slice of text to work in
- stop defaults to len(text)
"""
if stop is None:
stop = len(text)
if type(what) == StringType:
what=BMS(what)
t = ((with,sFindWord,what,+1,+0),)
found,taglist,last = tag(text,t,start,stop)
if not found:
return text
return join(joinlist(text,taglist))
def _replace3(text,what,with,
join=string.join,FS=FS,
StringType=types.StringType):
if type(what) == StringType:
what=FS(what)
slices = what.findall(text)
if not slices:
return text
l = []
x = 0
for left,right in slices:
l.append(text[x:left] + with)
x = right
l.append(text[x:])
return join(l,'')
def _replace4(text,what,with,
join=join,joinlist=joinlist,tag=tag,FS=FS,
StringType=types.StringType):
if type(what) == StringType:
what=FS(what)
slices = what.findall(text)
if not slices:
return text
repl = [None]*len(slices)
for i in range(len(slices)):
repl[i] = (with,)+slices[i]
return join(joinlist(text,repl))
def find(text,what,start=0,stop=None,
SearchObject=FS):
""" A faster replacement for string.find().
Uses a search object for the task. Returns the position of the
first occurance of what in text[start:stop]. stop defaults to
len(text). Returns -1 in case no occurance was found.
"""
if stop:
return SearchObject(what).find(text,start,stop)
else:
return SearchObject(what).find(text,start)
def findall(text,what,start=0,stop=None,
SearchObject=FS):
""" Find all occurances of what in text.
Uses a search object for the task. Returns a list of slice
tuples (l,r) marking the all occurances in
text[start:stop]. stop defaults to len(text). Returns an
empty list in case no occurance was found.
"""
if stop:
return SearchObject(what).findall(text,start,stop)
else:
return SearchObject(what).findall(text,start)
def split(text,sep,start=0,stop=None,translate=None,
SearchObject=FS):
""" A faster replacement for string.split().
Uses a search object for the task. Returns the result of
cutting the text[start:stop] string into snippets at every sep
occurance in form of a list of substrings. translate is passed
to the search object as translation string.
XXX convert to a C function... or even better, add as method
to search objects.
"""
if translate:
so = SearchObject(sep,translate)
else:
so = SearchObject(sep)
if stop:
cuts = so.findall(text,start,stop)
else:
cuts = so.findall(text,start)
l = 0
list = []
append = list.append
for left,right in cuts:
append(text[l:left])
l = right
append(text[l:])
return list
# helper for tagdict
def _tagdict(text,dict,prefix,taglist):
for o,l,r,s in taglist:
pfx = prefix + str(o)
dict[pfx] = text[l:r]
if s:
_tagdict(text,dict,pfx+'.',s)
def tagdict(text,*args):
""" Tag a text just like the function tag() and then convert
its output into a dictionary where the tagobjects reference
their respective strings
- this function emulates the interface of tag()
- in contrast to tag() this funtion *does* make copies
of the found stings
- returns a tuple (rc,tagdict,next) with the same meaning
of rc and next as tag(); tagdict is the new dictionary -
None in case rc is 0
"""
rc,taglist,next = apply(tag,(text,)+args)
if not rc:
return (rc,None,next)
d = {}
tagdict = _tagdict
for o,l,r,s in taglist:
pfx = str(o)
d[pfx] = text[l:r]
if s:
tagdict(text,dict,pfx+'.',s)
return (rc,d,next)
def invset(chars):
""" Return a set with all characters *except* the ones in chars.
"""
return set(chars,0)
def is_whitespace(text,start=0,stop=None,
nonwhitespace=nonwhitespace_set,setfind=setfind):
""" Return 1 iff text[start:stop] only contains whitespace
characters (as defined in Constants/Sets.py), 0 otherwise.
"""
if stop is None:
stop = len(text)
i = setfind(text,nonwhitespace,start,stop)
return (i < 0)
def collapse(text,seperator=' ',
join=join,setsplit=setsplit,collapse_set=set(newline+whitespace)):
""" Eliminates newline characters and compresses whitespace
characters into one space.
The result is a one line text string. Tim Peters will like
this function called with '-' seperator ;-)
"""
return join(setsplit(text,collapse_set),seperator)
_linesplit_table = (
(None,Is,'\r',+1),
(None,Is,'\n',+1),
('line',AllInSet+AppendMatch,set('\r\n',0),+1,-2),
(None,EOF,Here,+1,MatchOk),
('empty line',Skip+AppendMatch,0,0,-4),
)
def splitlines(text,
tag=tag,linesplit_table=_linesplit_table):
""" Split text into a list of single lines.
The following combinations are considered to be line-ends:
'\r', '\r\n', '\n'; they may be used in any combination. The
line-end indicators are removed from the strings prior to
adding them to the list.
This function allows dealing with text files from Macs, PCs
and Unix origins in a portable way.
"""
return tag(text,linesplit_table)[1]
_linecount_table = (
(None,Is,'\r',+1),
(None,Is,'\n',+1),
('line',AllInSet+AppendTagobj,set('\r\n',0),+1,-2),
(None,EOF,Here,+1,MatchOk),
('empty line',Skip+AppendTagobj,0,0,-4),
)
def countlines(text,
linecount_table=_linecount_table):
""" Returns the number of lines in text.
Line ends are treated just like for splitlines() in a
portable way.
"""
return len(tag(text,linecount_table)[1])
_wordsplit_table = (
(None,AllInSet,whitespace_set,+1),
('word',AllInSet+AppendMatch,nonwhitespace_set,+1,-1),
(None,EOF,Here,+1,MatchOk),
)
def splitwords(text,
setsplit=setsplit,whitespace_set=whitespace_set):
""" Split text into a list of single words.
Words are separated by whitespace. The whitespace is stripped
before adding the words to the list.
"""
return setsplit(text,whitespace_set)
#
# Testing and benchmarking
#
# Taken from my hack.py module:
import time
class _timer:
""" timer class with a quite obvious interface
- .start() starts a fairly accurate CPU-time timer plus an
absolute timer
- .stop() stops the timer and returns a tuple: the CPU-time in seconds
and the absolute time elapsed since .start() was called
"""
utime = 0
atime = 0
def start(self,
clock=time.clock,time=time.time):
self.atime = time()
self.utime = clock()
def stop(self,
clock=time.clock,time=time.time):
self.utime = clock() - self.utime
self.atime = time() - self.atime
return self.utime,self.atime
def usertime(self,
clock=time.clock,time=time.time):
self.utime = clock() - self.utime
self.atime = time() - self.atime
return self.utime
def abstime(self,
clock=time.clock,time=time.time):
self.utime = clock() - self.utime
self.atime = time() - self.atime
return self.utime
def __str__(self):
return '%0.2fu %0.2fa sec.' % (self.utime,self.atime)
def _bench(file='mxTextTools/mxTextTools.c'):
def mismatch(orig,new):
print
for i in range(len(orig)):
if orig[i] != new[i]:
break
else:
print 'Length mismatch: orig=%i new=%i' % (len(orig),len(new))
if len(orig) > len(new):
print 'Missing chars:'+repr(orig[len(new):])
else:
print 'Excess chars:'+repr(new[len(orig):])
print
return
print 'Mismatch at offset %i:' % i
print (orig[i-100:i]
+ '<- %s != %s ->' % (repr(orig[i]),repr(new[i]))
+ orig[i+1:i+100])
print
text = open(file).read()
import string
t = _timer()
print 'Working on a %i byte string' % len(text)
if 0:
print
print 'Replacing strings'
print '-'*72
print
for what,with in (('m','M'),('mx','MX'),('mxText','MXTEXT'),
('hmm','HMM'),('hmmm','HMM'),('hmhmm','HMM')):
print 'Replace "%s" with "%s"' % (what,with)
t.start()
for i in range(100):
rtext = string.replace(text,what,with)
print 'with string.replace:',t.stop(),'sec.'
t.start()
for i in range(100):
ttext = replace(text,what,with)
print 'with tag.replace:',t.stop(),'sec.'
if ttext != rtext:
print 'results are NOT ok !'
print '-'*72
mismatch(rtext,ttext)
t.start()
for i in range(100):
ttext = _replace2(text,what,with)
print 'with tag._replace2:',t.stop(),'sec.'
if ttext != rtext:
print 'results are NOT ok !'
print '-'*72
print rtext
t.start()
for i in range(100):
ttext = _replace3(text,what,with)
print 'with tag._replace3:',t.stop(),'sec.'
if ttext != rtext:
print 'results are NOT ok !'
print '-'*72
print rtext
t.start()
for i in range(100):
ttext = _replace4(text,what,with)
print 'with tag._replace4:',t.stop(),'sec.'
if ttext != rtext:
print 'results are NOT ok !'
print '-'*72
print rtext
print
if 0:
print
print 'String lower/upper'
print '-'*72
print
op = string.lower
t.start()
for i in range(1000):
op(text)
t.stop()
print ' string.lower:',t
op = string.upper
t.start()
for i in range(1000):
op(text)
t.stop()
print ' string.upper:',t
op = upper
t.start()
for i in range(1000):
op(text)
t.stop()
print ' TextTools.upper:',t
op = lower
t.start()
for i in range(1000):
op(text)
t.stop()
print ' TextTools.lower:',t
print 'Testing...',
ltext = string.lower(text)
assert ltext == lower(text)
utext = string.upper(text)
assert utext == upper(text)
print 'ok.'
if 0:
print
print 'Joining lists'
print '-'*72
print
l = setsplit(text,whitespace_set)
op = string.join
t.start()
for i in range(1000):
op(l)
t.stop()
print ' string.join:',t
op = join
t.start()
for i in range(1000):
op(l)
t.stop()
print ' TextTools.join:',t
op = string.join
t.start()
for i in range(1000):
op(l,' ')
t.stop()
print ' string.join with seperator:',t
op = join
t.start()
for i in range(1000):
op(l,' ')
t.stop()
print ' TextTools.join with seperator:',t
if 0:
print
print 'Creating join lists'
print '-'*72
print
repl = []
for i in range(0,len(text),10):
repl.append(str(i),i,i+1)
op = joinlist
t.start()
for i in range(1000):
op(text,repl)
t.stop()
print ' TextTools.joinlist:',t
if 0:
print
print 'Splitting text'
print '-'*72
print
op = string.split
t.start()
for i in range(100):
op(text)
t.stop()
print ' string.split whitespace:',t,'(',len(op(text)),'snippets )'
op = setsplit
ws = whitespace_set
t.start()
for i in range(100):
op(text,ws)
t.stop()
print ' TextTools.setsplit whitespace:',t,'(',len(op(text,ws)),'snippets )'
assert string.split(text) == setsplit(text,ws)
op = string.split
sep = 'a'
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' string.split at "a":',t,'(',len(op(text,sep)),'snippets )'
op = split
sep = 'a'
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' TextTools.split at "a":',t,'(',len(op(text,sep)),'snippets )'
op = charsplit
sep = 'a'
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' TextTools.charsplit at "a":',t,'(',len(op(text,sep)),'snippets )'
op = setsplit
sep = set('a')
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' TextTools.setsplit at "a":',t,'(',len(op(text,sep)),'snippets )'
# Note: string.split and setsplit don't work identically !
op = string.split
sep = 'int'
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' string.split at "int":',t,'(',len(op(text,sep)),'snippets )'
op = split
sep = 'int'
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' TextTools.split at "int":',t,'(',len(op(text,sep)),'snippets )'
op = setsplit
sep = set('int')
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' TextTools.setsplit at "i", "n", "t":',t,'(',len(op(text,sep)),'snippets )'
op = string.split
sep = 'register'
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' string.split at "register":',t,'(',len(op(text,sep)),'snippets )'
op = split
sep = 'register'
t.start()
for i in range(100):
op(text,sep)
t.stop()
print ' TextTools.split at "register":',t,'(',len(op(text,sep)),'snippets )'
if __name__=='__main__':
_bench()

View File

@@ -0,0 +1,48 @@
""" mxTextTools - A tools package for fast text processing.
(c) Copyright Marc-Andre Lemburg; All Rights Reserved.
See the documentation for further information on copyrights,
or contact the author (mal@lemburg.com).
"""
__package_info__ = """
BEGIN PYTHON-PACKAGE-INFO 1.0
Title: mxTextTools - Tools for fast text processing
Current-Version: 1.1.1
Home-Page: http://starship.skyport.net/~lemburg/mxTextTools.html
Primary-Site: http://starship.skyport.net/~lemburg/mxTextTools-1.1.1.zip
This package provides several different functions and mechanisms
to do fast text text processing. Amongst these are character set
operations, parsing & tagging tools (using a finite state machine
executing byte code) and common things such as Boyer-Moore search
objects. For full documentation see the home page.
END PYTHON-PACKAGE-INFO
"""
from TextTools import *
from TextTools import __version__
### Make the types pickleable:
# Shortcuts for pickle (reduces the pickle's length)
def _BMS(match,translate):
return BMS(match,translate)
def _FS(match,translate):
return FS(match,translate)
# Module init
class modinit:
### Register the two types
import copy_reg
def pickle_BMS(so):
return _BMS,(so.match,so.translate)
def pickle_FS(so):
return _FS,(so.match,so.translate)
copy_reg.pickle(BMSType,
pickle_BMS,
_BMS)
copy_reg.pickle(FSType,
pickle_FS,
_FS)
del modinit

View File

@@ -0,0 +1,17 @@
""" mxTextTools - A tools package for fast text processing.
(c) Copyright Marc-Andre Lemburg; All Rights Reserved.
See the documentation for further information on copyrights,
or contact the author (mal@lemburg.com).
"""
from mxTextTools import *
from mxTextTools import __version__
#
# Make BMS take the role of FS in case the Fast Search object was not built
#
try:
FS
except NameError:
FS = BMS
FSType = BMSType

View File

@@ -0,0 +1,3 @@
# this file is the entry point for freeze.py
from Converter import importloader

View File

@@ -0,0 +1,167 @@
from Blender import Scene
import Blender.NMesh as _NMesh
import Blender.Material as Material
defaultUV = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
FACEFLAGS = _NMesh.Const
DEFAULTFLAGS = FACEFLAGS.LIGHT + FACEFLAGS.DYNAMIC
curface = None
tessfaces = None
def error():
pass
def beginPolygon():
global curface
global tessfaces
curface = _NMesh.Face()
def endPolygon():
global curface
global tessfaces
tessfaces.append(curface)
def addVertex(v):
global curface
curface.v.append(v)
curface.uv.append((v.uvco[0], v.uvco[1]))
class Face:
def __init__(self, vlist):
self.v= vlist
self.uv = []
self.mode = 0
class shadow:
def __setattr__(self, name, val):
setattr(self.data, name, val)
def __getattr__(self, name):
return getattr(self.data, name)
def __repr__(self):
return repr(self.data)
##########################################
# replacement xMesh (NMesh shadow class)
class shadowNVert: #shadow NMVert class for the tesselator
def __init__(self):
self.vert = None
self.uv = []
def __len__(self):
return 3
def __getitem__(self, i):
return self.vert.co[i]
def Color(r, g, b, a = 1.0):
return _NMesh.Col(255 * r, 255 * g, 255 * b, 255 * a)
class shadowNMesh:
def __init__(self, name = None, default_flags = None):
self.scene = Scene.getCurrent()
self.data = _NMesh.GetRaw()
self.name = name
if default_flags:
flags = default_flags
else:
flags = DEFAULTFLAGS
self.flags = flags
self.smooth = 0
self.faces = []
try:
import tess
self.tess = tess.Tess(256, beginPolygon, endPolygon, error, addVertex)
except:
#print "couldn't import tesselator"
self.tess = None
self.curface = None
self.tessfaces = []
self.recalc_normals = 1
def __del__(self):
del self.data
def __getattr__(self, name):
if name == 'vertices':
return self.data.verts
else:
return getattr(self.data, name)
def __repr__(self):
return "Mesh: %d faces, %d vertices" % (len(self.faces), len(self.verts))
def toNMFaces(self, ngon):
# This should be a Publisher only feature...once the tesselation
# is improved. The GLU tesselator of Mesa < 4.0 is crappy...
if not self.tess:
return [] # no faces converted
import tess
i = 0
global tessfaces
tessfaces = []
tess.beginPolygon(self.tess)
for v in ngon.v:
if len(ngon.uv) == len(ngon.v):
v.uvco = ngon.uv[i]
tess.vertex(self.tess, (v.co[0], v.co[1], v.co[2]), v)
i += 1
tess.endPolygon(self.tess)
return tessfaces
def hasFaceUV(self, true):
self.data.hasFaceUV(true)
def addVert(self, v):
vert = _NMesh.Vert(v[0], v[1], v[2])
self.data.verts.append(vert)
return vert
def addFace(self, vlist, flags = None, makedefaultUV = 0):
n = len(vlist)
if n > 4:
face = Face(vlist)
else:
face = _NMesh.Face()
for v in vlist:
face.v.append(v)
if makedefaultUV:
face.uv = defaultUV[:n]
self.faces.append(face)
# turn on default flags:
if not flags:
face.mode = self.flags
else:
face.mode = flags
return face
def write(self):
from Blender import Object
# new API style:
self.update()
ob = Object.New(Object.Types.MESH) # create object
ob.link(self.data) # link mesh data to it
self.scene.link(ob)
return ob
def update(self):
from Blender.Types import NMFaceType
smooth = self.smooth
for f in self.faces:
if type(f) == NMFaceType:
f.smooth = smooth
self.data.faces.append(f)
f.materialIndex = 0
else: #it's a NGON (shadow face)
faces = self.toNMFaces(f)
for nf in faces:
nf.smooth = smooth
nf.materialIndex = 0
self.data.faces.append(nf)
if not self.name:
self.name = "Mesh"
def assignMaterial(self, material):
self.data.materials = [material._object]
Mesh = shadowNMesh
Vert = shadowNVert

View File

@@ -0,0 +1,182 @@
"""This is a basic scenegraph module for Blender
It contains low level API calls..."""
# (c) 2001, Martin Strubel // onk@section5.de
from utils import quat #quaternions
from Blender import Object, Lamp, Scene
TOLERANCE = 0.01
def uniform_scale(vec):
v0 = vec[0]
d = abs(vec[1] - v0)
if d > TOLERANCE:
return 0
d = abs(vec[2] - v0)
if d > TOLERANCE:
return 0
return v0
class Transform:
"""An abstract transform, containing translation, rotation and scale information"""
def __init__(self):
self.scale = (1.0, 1.0, 1.0)
self.translation = (0.0, 0.0, 0.0)
self.rotation = quat.Quat()
self.scaleOrientation = quat.Quat() # axis, angle
self.parent = None
def __mul__(self, other):
s = uniform_scale(self.scale)
if not s:
raise RuntimeError, "non uniform scale, can't multiply"
t = Transform()
sc = other.scale
t.scale = (s * sc[0], s * sc[1], s * sc[2])
t.rotation = self.rotation * other.rotation
tr = s * apply(quat.Vector, other.translation)
t.translation = self.rotation.asMatrix() * tr + self.translation
return t
def getLoc(self):
t = self.translation
return (t[0], t[1], t[2]) # make sure it's a tuple..silly blender
def calcRotfromAxis(self, axisrotation):
self.rotation = apply(quat.fromRotAxis,axisrotation)
def getRot(self):
return self.rotation.asEuler()
def getSize(self):
s = self.scale
return (s[0], s[1], s[2])
def __repr__(self):
return "Transform: rot: %s loc:%s" % (self.getRot(), self.getLoc())
def copy(self):
"returns copy of self"
t = Transform()
t.scale = self.scale
t.translation = self.translation
t.rotation = self.rotation
t.scaleOrientation = self.scaleOrientation
return t
class BID:
"Blender named Object ID"
def __init__(self, name):
self.name = name
self.data = None
class BScene:
def __init__(self, name = None):
from Blender import Scene
self.dict = {'Image': {}, 'Object':{}, 'Mesh' : {}}
self.name = name
def __getitem__(self, name):
return self.dict[name]
def __setitem__(self, name, val):
self.dict[name] = val
def has_key(self, name):
if self.dict.has_key(name):
return 1
else:
return 0
def getnewID(self, templ):
n = 0
name = templ
while self.dict.has_key(name):
n += 1
name = "%s.%03d" % (templ, n)
return name
class BSGNode:
"Blender Scenegraph node"
isRoot = 0
def __init__(self, object = None, type = "", name = ""):
self.type = type
self.name = name
self.children = []
self.level = 0
self.object = object
def addChildren(self, children):
self.children += children
def traverse(self, visitor):
ret = visitor()
for c in self.children:
c.traverse(visitor)
return ret
def setDepth(self, level):
self.level = level
for c in self.children:
c.setDepth(level + 1)
def update(self):
ob.name = self.name
def __repr__(self):
l = self.level
children = ""
pre = l * ' '
return "\n%s%s [%s] ->%s" % (pre, self.name, self.type, self.children)
class ObjectNode(BSGNode):
def __init__(self, object = None, type = "", name = ""):
self.transform = Transform()
self.scene = Scene.getCurrent()
BSGNode.__init__(self, object, type, name)
def makeParent(self, child):
self.child = parent
child.parent = self
def clone(self):
ob = self.object
newob = ob.copy()
self.scene.link(newob)
new = ObjectNode(newob)
new.transform = self.transform.copy()
return new
def insert(self, child):
self.children.append(child)
child.level = self.level + 1
ob = child.object
self.object.makeParent([ob], 1, 1)
# first parent, THEN set local transform
child.update()
def applyTransform(self, tf):
self.transform = tf * self.transform
def update(self):
ob = self.object
t = self.transform
ob.loc = t.getLoc()
ob.size = t.getSize()
ob.rot = t.getRot()
ob.name = self.name
def NodefromData(ob, type, name):
new = ObjectNode(None, type, name)
if ob:
obj = ob
else:
obj = Object.New(type)
Scene.getCurrent().link(obj)
if not obj:
raise RuntimeError, "FATAL: could not create object"
new.object= obj
new.object.name = name
#new.fromData(ob)
return new
class RootNode(ObjectNode):
"""stupid simple scenegraph prototype"""
level = 0
isRoot = 1
type = 'Root'
name = 'ROOT'
def __init__(self, object = None, type = "", name = ""):
from Blender import Scene
self.transform = Transform()
BSGNode.__init__(self, object, type, name)
self.scene = Scene.getCurrent()
def insert(self, child):
child.update()
self.children.append(child)
def update(self):
self.scene.update()

View File

@@ -0,0 +1 @@
__all__ = ["Scenegraph", "Objects"]

View File

@@ -0,0 +1,24 @@
# This is the built in Blender emulation module for os.py
# not all features are implemented yet...
import Blender.sys as bsys
sep = bsys.dirsep # path separator ('/' or '\')
class Path:
def dirname(self, name):
return bsys.dirname(name)
def join(self, a, *p):
dirsep = bsys.dirsep
path = a
for b in p:
if b[:1] == dirsep:
path = b
elif path == '' or path[-1:] == dirsep:
path = path + b
else:
path = path + dirsep + b
return path
path = Path()

View File

@@ -0,0 +1,6 @@
#mcf 'vendor' packages
#These packages are free software, provided without warranty or
#guarantee, if you use them, you must agree to use them at your
#own risk. Please see the file license.txt for full license
#details.

View File

@@ -0,0 +1,6 @@
'''
mcf.utils package
'''

View File

@@ -0,0 +1,169 @@
'''
Destructive Functions for "collapsing" Sequences into single levels
>>> from mcf.utils import collapse
>>> collapse.test([[[1],[2,3]],[[]],[4],5,[6]])
[1, 2, 3, 4, 5, 6] # note that is the same root list
>>> collapse.collapse2([[[1],[2,3]],[[]],(4,()),(5,),[6]])
[1, 2, 3, 4, 5, 6] # note is the same root list
'''
import copy, types, sys
from types import ListType, TupleType # this now only supports the obsolete stuff...
def hyperCollapse( inlist, allowedmap, type=type, list=list, itype=types.InstanceType, maxint= sys.maxint):
'''
Destructively flatten a mixed hierarchy to a single level.
Non-recursive, many speedups and obfuscations by Tim Peters :)
'''
try:
# for every possible index
for ind in xrange( maxint):
# while that index currently holds a list
expandable = 1
while expandable:
expandable = 0
if allowedmap.has_key( type(inlist[ind]) ):
# expand that list into the index (and subsequent indicies)
inlist[ind:ind+1] = list( inlist[ind])
expandable = 1
# alternately you could iterate through checking for isinstance on all possible
# classes, but that would be very slow
elif type( inlist[ind] ) is itype and allowedmap.has_key( inlist[ind].__class__ ):
# here figure out some way to generically expand that doesn't risk
# infinite loops...
templist = []
for x in inlist[ind]:
templist.append( x)
inlist[ind:ind+1] = templist
expandable = 1
except IndexError:
pass
return inlist
def collapse(inlist, type=type, ltype=types.ListType, maxint= sys.maxint):
'''
Destructively flatten a list hierarchy to a single level.
Non-recursive, and (as far as I can see, doesn't have any
glaring loopholes).
Further speedups and obfuscations by Tim Peters :)
'''
try:
# for every possible index
for ind in xrange( maxint):
# while that index currently holds a list
while type(inlist[ind]) is ltype:
# expand that list into the index (and subsequent indicies)
inlist[ind:ind+1] = inlist[ind]
#ind = ind+1
except IndexError:
pass
return inlist
def collapse_safe(inlist):
'''
As collapse, but works on a copy of the inlist
'''
return collapse( inlist[:] )
def collapse2(inlist, ltype=(types.ListType, types.TupleType), type=type, maxint= sys.maxint ):
'''
Destructively flatten a list hierarchy to a single level.
Will expand tuple children as well, but will fail if the
top level element is not a list.
Non-recursive, and (as far as I can see, doesn't have any
glaring loopholes).
'''
ind = 0
try:
while 1:
while type(inlist[ind]) in ltype:
try:
inlist[ind:ind+1] = inlist[ind]
except TypeError:
inlist[ind:ind+1] = list(inlist[ind])
ind = ind+1
except IndexError:
pass
return inlist
def collapse2_safe(inlist):
'''
As collapse2, but works on a copy of the inlist
'''
return collapse( list(inlist) )
def old_buggy_collapse(inlist):
'''Always return a one-level list of all the non-list elements in listin,
rewritten to be non-recursive 96-12-28 Note that the new versions work
on the original list, not a copy of the original.'''
if type(inlist)==TupleType:
inlist = list(inlist)
elif type(inlist)!=ListType:
return [inlist]
x = 0
while 1:
try:
y = inlist[x]
if type(y) == ListType:
ylen = len(y)
if ylen == 1:
inlist[x] = y[0]
if type(inlist[x]) == ListType:
x = x - 1 # need to collapse that list...
elif ylen == 0:
del(inlist[x])
x = x-1 # list has been shortened
else:
inlist[x:x+1]=y
x = x+1
except IndexError:
break
return inlist
def old_buggy_collapse2(inlist):
'''As collapse, but also collapse tuples, rewritten 96-12-28 to be non-recursive'''
if type(inlist)==TupleType:
inlist = list(inlist)
elif type(inlist)!=ListType:
return [inlist]
x = 0
while 1:
try:
y = inlist[x]
if type(y) in [ListType, TupleType]:
ylen = len(y)
if ylen == 1:
inlist[x] = y[0]
if type(inlist[x]) in [ListType,TupleType]:
x = x-1 #(to deal with that element)
elif ylen == 0:
del(inlist[x])
x = x-1 # list has been shortened, will raise exception with tuples...
else:
inlist[x:x+1]=list(y)
x = x+1
except IndexError:
break
return inlist
def oldest_buggy_collapse(listin):
'Always return a one-level list of all the non-list elements in listin'
if type(listin) == ListType:
return reduce(lambda x,y: x+y, map(collapse, listin), [])
else: return [listin]
def oldest_buggy_collapse2(seqin):
if type(seqin) in [ListType, TupleType]:
return reduce(lambda x,y: x+y, map(collapse2, seqin), [])
else:
return [seqin]

View File

@@ -0,0 +1,83 @@
'''
Module to allow for "copying" Numeric arrays,
(and thereby also matrices and userarrays)
standard arrays, classes and modules
(last two are not actually copied, but hey :) ).
Could do approximately the same thing with
copy_reg, but would be inefficient because
of passing the data into and out of strings.
To use, just import this module.
'''
# altered 98.11.05, moved copy out of NUMPY test
import copy
try: # in case numpy not installed
import Numeric
def _numpyarray_copy(somearray, memo=None):
'''
Simple function for getting a copy of a NUMPY array
'''
if memo == None:
memo = {} # yeah, I know, not _really_ necessary
# see if already done this item, return the copy if we have...
d = id(somearray)
try:
return memo[d]
except KeyError:
pass
temp = Numeric.array(somearray, copy=1)
memo[d] = temp
return temp
# now make it available to the copying functions
copy._copy_dispatch[Numeric.ArrayType] = _numpyarray_copy
copy._deepcopy_dispatch[Numeric.ArrayType] = _numpyarray_copy
except ImportError: # Numeric not installed...
pass
try: # in case array not installed
import array
def _array_copy(somearray, memo = None):
'''
Simple function for getting a copy of a standard array.
'''
if memo == None:
memo = {} # yeah, I know, not _really_ necessary
# see if already done this item, return the copy if we have...
d = id(somearray)
try:
return memo[d]
except KeyError:
pass
newarray = somearay[:]
memo[d] = newarray
return newarray
# now make it available to the copying functions
copy._copy_dispatch[ array.ArrayType ] = _array_copy
copy._deepcopy_dispatch[ array.ArrayType ] = _array_copy
except ImportError:
pass
import types
def _module_copy(somemodule, memo = None):
'''
Modules we will always treat as themselves during copying???
'''
return somemodule
# now make it available to the copying functions
copy._copy_dispatch[ types.ModuleType ] = _module_copy
copy._deepcopy_dispatch[ types.ModuleType ] = _module_copy
def _class_copy(someclass, memo=None):
'''
Again, classes are considered immutable, they are
just returned as themselves, not as new objects.
'''
return someclass
# now make it available to the copying functions
#copy._copy_dispatch[ types.ClassType ] = _class_copy
copy._deepcopy_dispatch[ types.ClassType ] = _class_copy

View File

@@ -0,0 +1,190 @@
'''
Extend cpickle storage to include modules, and builtin functions/methods
To use, just import this module.
'''
import copy_reg
### OBJECTS WHICH ARE RESTORED THROUGH IMPORTS
# MODULES
def pickle_module(module):
'''
Store a module to a pickling stream, must be available for
reimport during unpickling
'''
return unpickle_imported_code, ('import %s'%module.__name__, module.__name__)
# FUNCTIONS, METHODS (BUILTIN)
def pickle_imported_code(funcmeth):
'''
Store a reference to an imported element (such as a function/builtin function,
Must be available for reimport during unpickling.
'''
module = _whichmodule(funcmeth)
return unpickle_imported_code, ('from %s import %s'%(module.__name__,funcmeth.__name__),funcmeth.__name__)
import types, regex
import_filter = regex.compile('''\(from [A-Za-z0-9_\.]+ \)?import [A-Za-z0-9_\.]+''') # note the limitations on whitespace
getattr_filter = regex.compile('''[A-Za-z0-9_\.]+''') # note we allow you to use x.y.z here
# MODULES, AND FUNCTIONS
def unpickle_imported_code(impstr,impname):
'''
Attempt to load a reference to a module or other imported code (such as functions/builtin functions)
'''
if import_filter.match(impstr) != len(impstr) or getattr_filter.match(impname)!= len(impname):
import sys
sys.stderr.write('''Possible attempt to smuggle arbitrary code into pickle file (see module cpickle_extend).\nPassed code was %s\n%s\n'''%(impstr,impname))
del(sys)
else:
ns = {}
try:
exec (impstr) in ns # could raise all sorts of errors, of course, and is still dangerous when you have no control over the modules on your system! Do not allow for untrusted code!!!
return eval(impname, ns)
except:
import sys
sys.stderr.write('''Error unpickling module %s\n None returned, will likely raise errors.'''%impstr)
return None
# Modules
copy_reg.pickle(type(regex),pickle_module,unpickle_imported_code)
# builtin functions/methods
copy_reg.pickle(type(regex.compile),pickle_imported_code, unpickle_imported_code)
del(regex) # to keep the namespace neat as possible
### INSTANCE METHODS
'''
The problem with instance methods is that they are almost always
stored inside a class somewhere. We really need a new type: reference
that lets us just say "y.this"
We also need something that can reliably find burried functions :( not
likely to be easy or clean...
then filter for x is part of the set
'''
import new
def pickle_instance_method(imeth):
'''
Use the (rather surprisingly clean) internals of
the method to store a reference to a method. Might
be better to use a more general "get the attribute
'x' of this object" system, but I haven't written that yet :)
'''
klass = imeth.im_class
funcimp = _imp_meth(imeth)
self = imeth.im_self # will be None for UnboundMethodType
return unpickle_instance_method, (funcimp,self,klass)
def unpickle_instance_method(funcimp,self,klass):
'''
Attempt to restore a reference to an instance method,
the instance has already been recreated by the system
as self, so we just call new.instancemethod
'''
funcimp = apply(unpickle_imported_code, funcimp)
return new.instancemethod(func,self,klass)
copy_reg.pickle(types.MethodType, pickle_instance_method, unpickle_instance_method)
copy_reg.pickle(types.UnboundMethodType, pickle_instance_method, unpickle_instance_method)
### Arrays
try:
import array
LittleEndian = array.array('i',[1]).tostring()[0] == '\001'
def pickle_array(somearray):
'''
Store a standard array object, inefficient because of copying to string
'''
return unpickle_array, (somearray.typecode, somearray.tostring(), LittleEndian)
def unpickle_array(typecode, stringrep, origendian):
'''
Restore a standard array object
'''
newarray = array.array(typecode)
newarray.fromstring(stringrep)
# floats are always big-endian, single byte elements don't need swapping
if origendian != LittleEndian and typecode in ('I','i','h','H'):
newarray.byteswap()
return newarray
copy_reg.pickle(array.ArrayType, pickle_array, unpickle_array)
except ImportError: # no arrays
pass
### NUMPY Arrays
try:
import Numeric
LittleEndian = Numeric.array([1],'i').tostring()[0] == '\001'
def pickle_numpyarray(somearray):
'''
Store a numpy array, inefficent, but should work with cPickle
'''
return unpickle_numpyarray, (somearray.typecode(), somearray.shape, somearray.tostring(), LittleEndian)
def unpickle_numpyarray(typecode, shape, stringval, origendian):
'''
Restore a numpy array
'''
newarray = Numeric.fromstring(stringval, typecode)
Numeric.reshape(newarray, shape)
if origendian != LittleEndian and typecode in ('I','i','h','H'):
# this doesn't seem to work correctly, what's byteswapped doing???
return newarray.byteswapped()
else:
return newarray
copy_reg.pickle(Numeric.ArrayType, pickle_numpyarray, unpickle_numpyarray)
except ImportError:
pass
### UTILITY FUNCTIONS
classmap = {}
def _whichmodule(cls):
"""Figure out the module in which an imported_code object occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the class cannot be found, return __main__.
Copied here from the standard pickle distribution
to prevent another import
"""
if classmap.has_key(cls):
return classmap[cls]
clsname = cls.__name__
for name, module in sys.modules.items():
if name != '__main__' and \
hasattr(module, clsname) and \
getattr(module, clsname) is cls:
break
else:
name = '__main__'
classmap[cls] = name
return name
import os, string, sys
def _imp_meth(im):
'''
One-level deep recursion on finding methods, i.e. we can
find them only if the class is at the top level.
'''
fname = im.im_func.func_code.co_filename
tail = os.path.splitext(os.path.split(fname)[1])[0]
ourkeys = sys.modules.keys()
possibles = filter(lambda x,tail=tail: x[-1] == tail, map(string.split, ourkeys, ['.']*len(ourkeys)))
# now, iterate through possibles to find the correct class/function
possibles = map(string.join, possibles, ['.']*len(possibles))
imp_string = _search_modules(possibles, im.im_func)
return imp_string
def _search_modules(possibles, im_func):
for our_mod_name in possibles:
our_mod = sys.modules[our_mod_name]
if hasattr(our_mod, im_func.__name__) and getattr(our_mod, im_func.__name__).im_func is im_func:
return 'from %s import %s'%(our_mod.__name__, im_func.__name__), im_func.__name__
for key,val in our_mod.__dict__.items():
if hasattr(val, im_func.__name__) and getattr(val, im_func.__name__).im_func is im_func:
return 'from %s import %s'%(our_mod.__name__,key), '%s.%s'%(key,im_func.__name__)
raise '''No import string calculable for %s'''%im_func

View File

@@ -0,0 +1,80 @@
'''
DictBool:
Simplistic (and slow) implementation of Boolean operations for
dictionaries... really these should be implemented in C, but I
can't do that till I have MSVC++, which I don't really want to
buy... this will have to do in the meantime.
>>> from mcf.utils import dictbool
>>> a = {1:2}; b = {2:3}; c={4:5,6:7,8:9,1:5}
>>> dictbool.union(a,b,c) # overwrite a with b and the result with c
{1: 5, 2: 3, 4: 5, 8: 9, 6: 7}
>>> dictbool.collectunion(a,b,c) # collect all possible for each key
{1: [2, 5], 2: [3], 4: [5], 8: [9], 6: [7]}
>>> dictbool.intersect(a,b,c) # no common elements in all three
{}
>>> dictbool.intersect(a,c) # one element is common to both
{1: [2, 5]}
'''
def union(*args):
'''
Build a new dictionary with the key,val from all args,
first overwritten by second, overwritten by third etc.
Rewritten for Python 1.5 on 98.03.31
'''
temp = {}
for adict in args:
# following is the 1.5 version
temp.update(adict)
# for key,val in adict.items():
# temp[key] = val
return temp
def collectunion(*args):
'''
As union, save instead of overwriting, all vals are
returned in lists, and duplicates are appended to those
lists.
'''
temp = {}
for adict in args:
for key,val in adict.items():
try:
temp[key].append(val)
except KeyError:
temp[key] = [val]
return temp
def intersect(*args):
'''
Build a new dictionary with those keys common to all args,
the vals of the new dict are lists of length len(args), where
list[ind] is the value of args[ind] for that key.
'''
args = map(lambda x: (len(x),x), args)
args.sort()
temp = {}
master = args[0][1]
rest = map(lambda x: x[1], args[1:])
for var,val in master.items():
tempval = [val]
for slave in rest:
try:
tempval.append(slave[var])
except KeyError:
tempval = None
break
if tempval:
temp[var] = tempval
return temp

View File

@@ -0,0 +1,91 @@
nullval = (1,)
class DSort:
'''
A "dependency" sorting class, used to order elements
according to declared "dependencies" (many-to-one relationships)
Is not a beautiful algo, but it works (or seems to)
Requires hashable values for all elements.
This is a quick hack, use at your own risk!
Basic usage:
Create a DSort mysorter
for each element q which is part of the set to sort, call:
mysorter.rule( dsort.nullval, q)
# this is not strictly necessary for elements which are
# dependent on other objects, but it is necessary for
# those which are not. Generally it's easiest to call
# the null rule for each element.
for each rule x depends on y, call:
mysorter.rule( x, y)
when _all_ rules are entered, call
try:
sortedlist = mysorter.sort()
except ValueError:
handle recursive dependencies here...
For an example of real-life use, see the VRML lineariser.
'''
def __init__(self, recurseError=None ):
self.dependon = {nullval:[0]}
self.recurseError = recurseError
def rule( self, depon, deps):
'''
Register a "rule". Both elements must be hashable values.
See the class' documentation for usage.
'''
# print '''registering rule:''', depon, deps
if self.dependon.has_key( deps ) and depon is not nullval:
self.dependon[ deps ].append( depon )
elif depon is not nullval:
self.dependon[ deps ] = [-1, depon]
elif not self.dependon.has_key( deps ):
self.dependon[ deps ] = [-1 ]
def sort( self ):
'''
Get the sorted results as a list
'''
for key, value in self.dependon.items():
self._dsort( key, value)
temp = []
for key, value in self.dependon.items():
temp.append( (value[0], key) )
temp.sort()
temp.reverse()
temp2 = []
for x,y in temp:
temp2.append( y )
# following adds the elements with no dependencies
temp2[len(temp2):] = self.dependon[ nullval ][1:]
return temp2
def _dsort( self, key, value ):
if value[0] == -2:
if self.recurseError:
raise ValueError, '''Dependencies were recursive!'''
else:
if __debug__:
print '''Recursive dependency discovered and ignored in dsort.Dsort._dsort on %s:%s'''%(key, value)
return 1 # we know it has at least one reference...
elif value[0] == -1: # haven't yet calculated this rdepth
value[0] = -2
tempval = [0]
for x in value[1:]:
try:
tempval.append( 1 + self._dsort( x, self.dependon[x]) )
except KeyError:
self.dependon[ nullval ].append( x ) # is an unreferenced element
tempval.append( 1 )
value[0] = max( tempval )
return value[0]
else:
return value[0]
'''
from mcf.utils import dsort
>>> x = dsort.DSort()
>>> map( x.rule, [1,2,2,4,5,4], [2,3,4,5,6,3] )
[None, None, None, None, None, None]
>>> x.sort()
'''

View File

@@ -0,0 +1,91 @@
'''
Dummy Class, intended as an abstract class for the creation
of base/builtin classes with slightly altered functionality
uses _base as the name of an instance of the base datatype,
mapping all special functions to that name.
>>> from mcf.utils import dummy
>>> j = dummy.Dummy({})
>>> j['this'] = 23
>>> j
{'this': 23}
>>> class example(dummy.Dummy):
... def __repr__(self):
... return '<example: %s>'%`self._base`
>>> k = example([])
>>> k # uses the __repr__ function
<example: []>
>>> k.append # finds the attribute of the _base
<built-in method append of list object at 501830>
'''
import types, copy
class Dummy:
'Abstract class for slightly altering functionality of objects (including builtins)'
def __init__(self, val=None):
'Initialisation, should be overridden'
if val and type(val)== types.InstanceType and hasattr(val, '_base'):
# Dict is used because subclasses often want to override
# the setattr function
self.__dict__['_base']=copy.copy(val.__dict__['_base'])
else:
self.__dict__['_base'] = val
def __repr__(self):
'Return a string representation'
return repr(self._base)
def __str__(self):
'Convert to a string'
return str(self._base)
def __cmp__(self,other):
'Compare to other value'
# altered 98.03.17 from if...elif...else statement
return cmp(self._base, other)
def __getitem__(self, key):
'Get an item by index'
return self._base[key]
def __setitem__(self, key, val):
'Set an item by index'
self._base[key]=val
def __len__(self):
'return the length of the self'
return len(self._base)
def __delitem__(self, key):
'remove an item by index'
del(self._base[key])
def __getslice__(self, i, j):
'retrieve a slice by indexes'
return self._base[i:j]
def __setslice__(self, i, j, val):
'set a slice by indexes to values'
self._base[i:j]=val
def __delslice__(self, i, j):
'remove a slice by indexes'
del(self._base[i:j])
def __nonzero__(self):
if self._base:
return 1
else:
return 0
def __getattr__(self, attr):
'find an attribute when normal lookup fails, will raise a KeyError if missing _base attribute'
try:
return getattr( self.__dict__['_base'], attr)
except (AttributeError, KeyError):
try:
return self.__dict__['_base'][attr]
except (KeyError,TypeError):
pass
raise AttributeError, attr

View File

@@ -0,0 +1,37 @@
'''
err.py Encapsulated writing to sys.stderr
The idea of this module is that, for a GUI system (or a more advanced UI),
you can just import a different err module (or object) and keep
your code the same. (For instance, you often want a status window
which flashes warnings and info, and have error messages pop up an
alert to get immediate attention.
'''
import sys
def err(message, Code=0):
'''
report an error, with an optional error code
'''
if Code:
sys.stderr.write('Error #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Error: %s\n'%message)
def warn(message, Code=0):
'''
report a warning, with an optional error code
'''
if Code:
sys.stderr.write('Warning #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Warning: %s\n'%message)
def info(message, Code=0):
'''
report information/status, with an optional error code
'''
if Code:
sys.stderr.write('Info #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Info: %s\n'%message)

View File

@@ -0,0 +1,19 @@
'''
Make either cPickle or pickle available as the virtual
module mcf.utils.pickle. This allows you to use a single
import statement:
from mcf.utils import extpkl, pickle
and then use that pickle, knowing that you have the best
available pickling engine.
'''
defaultset = ('import cPickle', 'cPickle')
import sys, mcf.utils
from mcf.utils import cpickle_extend
try:
import cPickle
pickle = cPickle
except:
import pickle
sys.modules['mcf.utils.pickle'] = mcf.utils.pickle = pickle

View File

@@ -0,0 +1,65 @@
### WARNING:
# I don't have a clue what I'm doing here!
import win32api
### Following is the "normal" approach,
### but it requires loading the entire win32con file (which is big)
### for two values...
##import win32con
##HKEY_CLASSES_ROOT = win32con.HKEY_CLASSES_ROOT
##REG_SZ = win32con.REG_SZ
### These are the hard-coded values, should work everywhere as far as I know...
HKEY_CLASSES_ROOT = 0x80000000
REG_SZ= 1
def associate( extension, filetype, description="", commands=(), iconfile="" ):
'''Warning: I don't have a clue what I'm doing here!
extension -- extension including "." character, e.g. .proc
filetype -- formal name, no spaces allowed, e.g. SkeletonBuilder.RulesFile
description -- human-readable description of the file type
commands -- sequence of (command, commandline), e.g. (("Open", "someexe.exe %1"),)
iconfile -- optional default icon file for the filetype
'''
win32api.RegSetValue(
HKEY_CLASSES_ROOT,
extension,
REG_SZ,
filetype
)
if description:
win32api.RegSetValue(
HKEY_CLASSES_ROOT ,
filetype,
REG_SZ,
description
)
if iconfile:
win32api.RegSetValue(
HKEY_CLASSES_ROOT ,
"%(filetype)s\\DefaultIcon" % locals(),
REG_SZ,
iconfile
)
for (command, commandline) in commands:
win32api.RegSetValue(
HKEY_CLASSES_ROOT ,
"%(filetype)s\\Shell\\%(command)s" % locals(),
REG_SZ,
command,
)
win32api.RegSetValue(
HKEY_CLASSES_ROOT ,
"%(filetype)s\\Shell\\%(command)s\\Command" % locals(),
REG_SZ,
commandline
)
if __name__ == "__main__":
associate(
".proc",
"SkeletonBuilder.Processing",
"SkeletonBuilder Processing File",
(("Open", '''z:\\skeletonbuilder\\skeletonbuilder.exe "%1" %*'''),),
'''z:\\skeletonbuilder\\bitmaps\\skeletonbuildericon.ico''',
)

View File

@@ -0,0 +1,30 @@
'''
This utility allows a python system to find a file in it's
directory. To do this, you need to pass it a function object from
a module in the correct directory. I know there must be a better
way to do this, but I haven't seen it yet. Incidentally, the
current directory should be _different_ from the module in which
the function is contained, otherwise this function will go off into
the root directory.
Currently this has to be called with the current directory a directory
other than the directory we're trying to find... need a better solution
for this kind of thing... a python registry would be great :)
NOTE: as of Python 1.5, this module should be obsolete! As soon as I
have verified that all of my code is fixed, it will be moved to the unused
directories.
'''
import os,sys
def findourfile(function, filename):
'''
Given the function, return a path to the a file in the
same directory with 'filename'. We also let the caller
know if the file already exists.
'''
ourfilename = os.path.split(function.func_code.co_filename)[0]+os.sep+filename
exists = os.path.exists(ourfilename)
return (exists,ourfilename)

View File

@@ -0,0 +1,201 @@
'''
Simple Hierarchic Walking functions for use with hierobj-type objects.
Provide for recurse-safe processing. Currently only provide depth-first
processing, and don't provide means for ignoring branches of the tree
during processing. For an example of breadth-first processing, see
mcf.pars.int.index.indutils. For more complex hierarchic processing,
see the mcf.walker package.
Originally these functions were only methods of the hierobj class (they
still are methods of it). I've split them out to allow them to be
imported selectively by other classes (some classes will only want
the simple walking functions, and not want to be bothered with the
methods which hierobj uses to keep track of its particular internal
structures.
'''
def hier_rapply(self, function,arglist=None,argdict={},moreattr = '__childlist__'):
'''
Safely apply a function to self and all children for
the function's side effects. Discard the return values
that function returns.
function
function to apply
arglist
(self,)+arglist is the set of arguments passed to function
argdict
passed as namedargs to the function
moreattr
the attribute representing the children of a node
'''
alreadydone = {}
tobedone = [self]
if arglist or argdict:
if not arglist: arglist=[self]
else:
arglist.insert(0,self) # we could insert anything... self is convenient
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
arglist[0]=object
apply(function,tuple(arglist),argdict)
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
else: # no arglist or argdict
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
function(object)
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
def hier_rreturn(self, function,arglist=None,argdict={},moreattr = '__childlist__'):
'''
Safely apply a function to self and all children,
collect the results in a list and return.
function
function to apply
arglist
(self,)+arglist is the set of arguments passed to function
argdict
passed as namedargs to the function
moreattr
the attribute representing the children of a node
'''
alreadydone = {}
tobedone = [self]
results = []
if arglist or argdict:
if not arglist: arglist=[self]
else:
arglist.insert(0,self) # or anything you feel like
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
arglist[0]=object
results.append(apply(function,tuple(arglist),argdict))
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
else:
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
results.append(function(object))
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
return results
def hier_rgetattr(self, attrname, multiple=1, moreattr = '__childlist__'):
'''
Recursively collect the values for attrname and
return as a list.
attrname
attribute to collect
arglist
(self,)+arglist is the set of arguments passed to function
argdict
passed as namedargs to the function
moreattr
the attribute representing the children of a node
'''
alreadydone = {}
tobedone = [self]
results = []
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
try:
if multiple:
results.append(getattr(object, attrname))
else:
return getattr(object, attrname)
except AttributeError:
pass
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
return results
def hier_rmethod(self, methodname,arglist=(),argdict={},moreattr = '__childlist__'):
'''
return the result of calling every object's method methodname,
as for hier_rreturn otherwise.
methodname
method to call
arglist
(self,)+arglist is the set of arguments passed to function
argdict
passed as namedargs to the function
moreattr
the attribute representing the children of a node
'''
alreadydone = {}
tobedone = [self]
results = []
while tobedone:
object = tobedone[0]
try:
alreadydone[id(object)]
# We've already processed this object
except KeyError:
# We haven't processed this object
alreadydone[id(object)]=1
try:
results.append(apply(getattr(object,methodname),arglist,argdict))
except:
pass
try:
tobedone[1:1]=getattr(object,moreattr)
except AttributeError:
# if the object isn't a hierobj, we don't need to recurse into it.
pass
del(tobedone[0])
return results

View File

@@ -0,0 +1,16 @@
'''
Hierarchic 'Dummy' objects
'''
import hierobj, dummy
class HierobjDummy(hierobj.Hierobj,dummy.Dummy):
'''
An Hierarchic Dummy object, which provides direct access to its
children through object[x] interfaces, allows "index" "count"
etceteras by returning the corresponding attributes of the _base.
'''
def __init__(self, parent=None, childlist=None):
hierobj.Hierobj.__init__(self, parent, childlist)
self._base = self.__childlist__ #set by init function above

View File

@@ -0,0 +1,133 @@
'''
Generic Hierarchic Objects Module
Hierobj's store their children (which can be anything) in their
__childlist__ attribute, and provide methods for walking the
hierarchy, either collecting results or not.
The index function returns an index of the objects (effectively a
flattened copy of the hierarchy)
97-03-17 Added ability to pass arguments to hier_rapply and hier_rreturn.
97-10-31 Removed dependencies on mcf.store
'''
import copy,types
import singletonlist, hier_rx
class Hierobj:
'''
An abstract class which handles hierarchic functions and information
# remade as a DAG 97-04-02, also reduced memory overhead for
hier-r* functions by using while-del-IndexError construct versus
for loop (probably makes it slower though)
If you require a true hierarchy, use the TrueHierobj class below...
'''
def __init__(self, parent=None, childlist=None):
if parent is None: # passed no parents
self.__dict__['__parent__'] = []
elif type(parent) == types.ListType: # passed a list of parents
self.__dict__['__parent__'] = parent
else: # passed a single parent
self.__dict__['__parent__'] = [parent]
self.__dict__['__childlist__'] = childlist or []
for child in self.__childlist__:
try:
child.__parent__.append(self)
except:
pass
# import simple hierarchic processing methods
hier_rapply = hier_rx.hier_rapply
hier_rreturn = hier_rx.hier_rreturn
hier_rgetattr = hier_rx.hier_rgetattr
hier_rmethod = hier_rx.hier_rmethod
def hier_addchild(self, child):
'''
Add a single child to the childlist
'''
self.__childlist__.append(child)
try:
# Hierobj-aware child
child.__parent__.append(self) # raises error if not hier_obj aware
except (TypeError, AttributeError):
# Non Hierobj-aware child
pass
append = hier_addchild
def hier_remchild(self, child):
'''
Breaks the child relationship with child, including the
reciprocal parent relationship
'''
try:
self.__childlist__.remove(child)
try:
child.hier_remparent(self) # if this fails, no problem
except AttributeError: pass
except (AttributeError,ValueError):
return 0 # didn't manage to remove the child
return 1 # succeeded
def hier_remparent(self, parent):
'''
Normally only called by hier_remchild of the parent,
just removes the parent from the child's parent list,
but leaves child in parent's childlist
'''
try:
self.__parent__.remove(parent)
except (AttributeError,ValueError):
return 0
return 1
def hier_replacewith(self,newel):
'''
As far as the hierarchy is concerned, the new element
is exactly the same as the old element, it has all
the same children, all the same parents. The old
element becomes completely disconnected from the hierarchy,
but it still retains all of its references
For every parent, replace this as a child
For every child, replace this as the parent
'''
for parent in self.__parent__:
try:
parent.hier_replacechild(self, newel)
except AttributeError:
pass
for child in self.__childlist__:
try:
child.hier_replaceparent(self,parent)
except AttributeError:
pass
def hier_replaceparent(self, oldparent, newparent):
ind = self.__parent__.index(oldparent)
self.__parent__[ind] = newparent
def hier_replacechild(self, oldchild, newchild):
ind = self.__childlist__.index(oldchild)
self.__childlist__[ind] = newchild
class TrueHierobj(Hierobj):
'''
An inefficient implementation of an Hierobj which limits the
__parent__ attribute to a single element. This will likely be
_slower_ than an equivalent Hierobj. That will have to be fixed
eventually.
'''
def __init__(self, parent=None, childlist=[]):
if parent is None: # passed no parents
self.__dict__['__parent__'] = singletonlist.SingletonList()
else: # passed a single parent
self.__dict__['__parent__'] = singletonlist.SingletonList(parent)
self.__dict__['__childlist__'] = copy.copy(childlist)
for child in self.__childlist__:
try:
child.__parent__.append(self)
except:
pass
def index(grove):
'''
Returns a flattened version of the grove
'''
return grove.hier_rreturn(lambda x: x)

View File

@@ -0,0 +1,38 @@
class inplace:
def __add__( self, num ):
self.base = self.base + num
return self.base
def __sub__( self, num ):
self.base = self.base - num
return self.base
def __init__(self, base ):
self.base = base
def __repr__(self ):
return repr( self.base)
def __str__(self ):
return str( self.base)
__radd__ = __add__
def __mul__(self, num ):
return self.base * num
def __div__(self, num ):
return self.base / num
def __mod__(self, num ):
return self.base % num
def __neg__(self ):
return - abs( self.base)
def __pos__(self ):
return abs( self.base)
def __abs__(self ):
return abs( self.base )
def __inv__(self ):
return -self.base
def __lshift__(self, num ):
return self.base << num
def __rshift__(self, num ):
return self.base >> num
def __and__(self, num ):
return self.base and num
def __or__(self, num ):
return self.base or num
def value( self ):
return self.base

View File

@@ -0,0 +1,224 @@
'''
NameSpace v0.04:
A "NameSpace" is an object wrapper around a _base dictionary
which allows chaining searches for an 'attribute' within that
dictionary, or any other namespace which is defined as part
of the search path (depending on the downcascade variable, is
either the hier-parents or the hier-children).
You can assign attributes to the namespace normally, and read
them normally. (setattr, getattr, a.this = that, a.this)
I use namespaces for writing parsing systems, where I want to
differentiate between sources (have multiple sources that I can
swap into or out of the namespace), but want to be able to get
at them through a single interface. There is a test function
which gives you an idea how to use the system.
In general, call NameSpace(someobj), where someobj is a dictionary,
a module, or another NameSpace, and it will return a NameSpace which
wraps up the keys of someobj. To add a namespace to the NameSpace,
just call the append (or hier_addchild) method of the parent namespace
with the child as argument.
### NOTE: if you pass a module (or anything else with a dict attribute),
names which start with '__' will be removed. You can avoid this by
pre-copying the dict of the object and passing it as the arg to the
__init__ method.
### NOTE: to properly pickle and/or copy module-based namespaces you
will likely want to do: from mcf.utils import extpkl, copy_extend
### Changes:
97.05.04 -- Altered to use standard hierobj interface, cleaned up
interface by removing the "addparent" function, which is reachable
by simply appending to the __parent__ attribute, though normally
you would want to use the hier_addchild or append functions, since
they let both objects know about the addition (and therefor the
relationship will be restored if the objects are stored and unstored)
97.06.26 -- Altered the getattr function to reduce the number of
situations in which infinite lookup loops could be created
(unfortunately, the cost is rather high). Made the downcascade
variable harden (resolve) at init, instead of checking for every
lookup. (see next note)
97.08.29 -- Discovered some _very_ weird behaviour when storing
namespaces in mcf.store dbases. Resolved it by storing the
__namespace_cascade__ attribute as a normal attribute instead of
using the __unstore__ mechanism... There was really no need to
use the __unstore__, but figuring out how a functions saying
self.__dict__['__namespace_cascade__'] = something
print `self.__dict__['__namespace_cascade__']` can print nothing
is a bit beyond me. (without causing an exception, mind you)
97.11.15 Found yet more errors, decided to make two different
classes of namespace. Those based on modules now act similar
to dummy objects, that is, they let you modify the original
instead of keeping a copy of the original and modifying that.
98.03.15 -- Eliminated custom pickling methods as they are no longer
needed for use with Python 1.5final
98.03.15 -- Fixed bug in items, values, etceteras with module-type
base objects.
'''
import copy, types, string
import hierobj
class NameSpace(hierobj.Hierobj):
'''
An hierarchic NameSpace, allows specification of upward or downward
chaining search for resolving names
'''
def __init__(self, val = None, parents=None, downcascade=1,children=[]):
'''
A NameSpace can be initialised with a dictionary, a dummied
dictionary, another namespace, or something which has a __dict__
attribute.
Note that downcascade is hardened (resolved) at init, not at
lookup time.
'''
hierobj.Hierobj.__init__(self, parents, children)
self.__dict__['__downcascade__'] = downcascade # boolean
if val is None:
self.__dict__['_base'] = {}
else:
if type( val ) == types.StringType:
# this is a reference to a module which has been pickled
val = __import__( val, {},{}, string.split( val, '.') )
try:
# See if val's a dummy-style object which has a _base
self.__dict__['_base']=copy.copy(val._base)
except (AttributeError,KeyError):
# not a dummy-style object... see if it has a dict attribute...
try:
if type(val) != types.ModuleType:
val = copy.copy(val.__dict__)
except (AttributeError, KeyError):
pass
# whatever val is now, it's going to become our _base...
self.__dict__['_base']=val
# harden (resolve) the reference to downcascade to speed attribute lookups
if downcascade: self.__dict__['__namespace_cascade__'] = self.__childlist__
else: self.__dict__['__namespace_cascade__'] = self.__parent__
def __setattr__(self, var, val):
'''
An attempt to set an attribute should place the attribute in the _base
dictionary through a setitem call.
'''
# Note that we use standard attribute access to allow ObStore loading if the
# ._base isn't yet available.
try:
self._base[var] = val
except TypeError:
setattr(self._base, var, val)
def __getattr__(self,var):
## print '__getattr__', var
return self.__safe_getattr__(var, {}) # the {} is a stopdict
def __safe_getattr__(self, var,stopdict):
'''
We have a lot to do in this function, if the attribute is an unloaded
but stored attribute, we need to load it. If it's not in the stored
attributes, then we need to load the _base, then see if it's in the
_base.
If it's not found by then, then we need to check our resource namespaces
and see if it's in them.
'''
# we don't have a __storedattr__ or it doesn't have this key...
if var != '_base':
try:
return self._base[var]
except (KeyError,TypeError), x:
try:
return getattr(self._base, var)
except AttributeError:
pass
try: # with pickle, it tries to get the __setstate__ before restoration is complete
for cas in self.__dict__['__namespace_cascade__']:
try:
stopdict[id(cas)] # if succeeds, we've already tried this child
# no need to do anything, if none of the children succeeds we will
# raise an AttributeError
except KeyError:
stopdict[id(cas)] = None
return cas.__safe_getattr__(var,stopdict)
except (KeyError,AttributeError):
pass
raise AttributeError, var
def items(self):
try:
return self._base.items()
except AttributeError:
pass
try:
return self._base.__dict__.items()
except AttributeError:
pass
def keys(self):
try:
return self._base.keys()
except AttributeError:
pass
try:
return self._base.__dict__.keys()
except AttributeError:
pass
def has_key( self, key ):
try:
return self._base.has_key( key)
except AttributeError:
pass
try:
return self._base.__dict__.has_key( key)
except AttributeError:
pass
def values(self):
try:
return self._base.values()
except AttributeError:
pass
try:
return self._base.__dict__.values()
except AttributeError:
pass
def __getinitargs__(self):
if type( self._base ) is types.ModuleType:
base = self._base.__name__
else:
base = self._base
return (base, self.__parent__, self.__downcascade__, self.__childlist__)
def __getstate__(self):
return None
def __setstate__(self,*args):
pass
def __deepcopy__(self, memo=None):
d = id(self)
if memo is None:
memo = {}
elif memo.has_key(d):
return memo[d]
if type(self._base) == types.ModuleType:
rest = tuple(map( copy.deepcopy, (self.__parent__, self.__downcascade__, self.__childlist__) ))
new = apply(self.__class__, (self._base,)+rest )
else:
new = tuple(map( copy.deepcopy, (self._base, self.__parent__, self.__downcascade__, self.__childlist__) ))
return new
## def __del__( self, id=id ):
## print 'del namespace', id( self )
def test():
import string
a = NameSpace(string)
del(string)
a.append(NameSpace({'a':23,'b':42}))
import math
a.append(NameSpace(math))
print 'The returned object should allow access to the attributes of the string,\nand math modules, and two simple variables "a" and "b" (== 23 and42 respectively)'
return a

View File

@@ -0,0 +1,78 @@
'''
Generic quoting functions (very fast),
generalised to allow use in any number of
situations, but normally you'll want to create
a new function based on these patterns which
has the default args you need. This will
prevent an extra function call.
'''
import string, regex
# create a translator which is fully worked out...
def _quote(somestring,trans,start='"',stop='"'):
'''
Return a quoted version of somestring.
'''
# would be _so_ much better if we could use the
# getitem, consider...
# return '%s%s%s'%(start,string.join(map(trans.__getitem__, somestring), ''),stop)
temp = list(somestring)
for charno in xrange(len(temp)):
temp[charno]= trans[temp[charno]]
return '%s%s%s'%(start,string.join(temp, ''),stop)
def compilerex(trans):
'''
Compiles a suitable regex from a dictionary
translation table. Should be used at design
time in most cases to improve speed. Note:
is not a very intelligent algo. You could
do better by creating a character-class []
for the single-character keys and then the
groups for the or-ing after it, but I've not
got the time at the moment.
'''
keyset = trans.keys()
multitrans = []
for x in range(len(keyset)):
if len(keyset[x]) != len(trans[keyset[x]]):
multitrans.append((keyset[x],trans[keyset[x]]))
if len(keyset[x])!= 1:
keyset[x] = '\(%s\)'%keyset[x]
if multitrans:
return 1,regex.compile(string.join(keyset,'\|'))
def quote2(somestring,trans,rex,start='',stop=''):
'''
Should be a faster version of _quote once
the regex is built. Rex should be a simple
or'ing of all characters requiring substitution,
use character ranges whereever possible (should
be in most cases)
'''
temp = list(somestring)
curpos = 0
try:
while rex.search(somestring,curpos) != -1:
pos = rex.regs[0]
print pos
replacement = list(trans[rex.group(0)])
temp[pos[0]:pos[1]] = replacement
curpos = pos[0]+len(replacement)
except (IndexError,regex.error):
pass
return '%s%s%s'%(start,string.join(temp, ''),stop)
# compatability
_quote2 = quote2
def reprq(obj, qtype):
'''
Return representation of a string obj as a string with qtype
quotes surrounding it. Usable when linearising Python objects
to languages which have only a particular type of string. (Such
as VRML). This is not a generalised nor a particularly reliable
solution. You should use the _quote2 function instead.
'''
return '%s%s%s'%(qtype,string.join(string.split(string.join(string.split(obj, '\\'), '\\\\'), qtype), '\\%s'%qtype),qtype)

View File

@@ -0,0 +1,64 @@
''' Classes which match ranges, sets, or anything at all. '''
import dummy # provides storage functions as well as a few others
class BetwVal(dummy.Dummy):
'''
Matches any object greater than smaller and less than larger
'''
def __init__(self, first, second):
if first <= second:
dummy.Dummy.__init__(self, [first, second])
else:
dummy.Dummy.__init__(self, [second, first])
def __getinitargs__(self):
return (self._base[0], self._base[1])
def __cmp__(self, object):
'''The Guts of the Class, allows standard comparison operators'''
if self._base[0]<=object:
if self._base[1] >=object:
return 0
else: return 1
else: return -1
def __repr__(self):
return '%s(%s,%s)'% (self.__class__.__name__,`self._base[0]`,`self._base[1]`)
class WInVal(dummy.Dummy):
'''
Matches any value in the sequential object used as initialiser
Doesn't gracefully handle situations where not found, as it just
returns a -1
'''
def __init__(self,seq):
self._base = seq
def __cmp__(self, object):
''' Standard comparison operators '''
for x in self._base:
if x == object:
return 0
return -1
def __repr__(self):
return '%s(%s)'% (self.__class__.__name__,`self._base`)
class ExceptVal(WInVal):
'''
A negative Version of WInVal
'''
def __cmp__(self, object):
for x in self._base:
if x == object:
return -1
return 0
class AnyVal:
'''
Matches anything at all
'''
def __init__(self):
pass
def __getinitargs__(self):
return ()
def __cmp__(self, object):
return 0
def __repr__(self):
return 'AnyVal()'

View File

@@ -0,0 +1,158 @@
import win32api, win32con, string, types
def _getDataType( data, coerce = 1 ):
'''
Return a tuple of dataType, data for a given object
automatically converts non-string-or-tuple-data into
strings by calling pickle.dumps
'''
if type( data ) is types.StringType:
return win32con.REG_SZ, data
elif type( data ) is types.IntType:
return win32con.REG_DWORD, data
# what about attempting to convert Longs, floats, etceteras to ints???
elif coerce:
import pickle
return win32con.REG_SZ, pickle.dumps( data )
else:
raise TypeError, '''Unsupported datatype for registry, use getDataType( data, coerce=1) to store types other than string/int.'''
def _getBaseKey( fullPathSpec ):
'''
Split a "full path specification" registry key
into its root and subpath components
'''
key = ''
subkey = fullPathSpec
# while loop will strip off preceding \\ characters
while subkey and not key:
key, subkey = string.split( fullPathSpec, '\\', 1 )
try:
return getattr( win32con, key ), subkey
except AttributeError:
raise '''Unknown root key %s in registry path %s'''% (key, fullPathSpec)
def RegSetValue( key, valuename='', data='', allowPickling=1 ):
'''
Set a registry value by providing a fully-specified
registry key (and an optional sub-key/value name),
and a data element. If allowPickling is true, the
data element can be any picklable element, otherwise
data element must be a string or integer.
'''
root, subkey = _getBaseKey( key )
dataType, data = _getDataType( data, allowPickling )
try:
hKey = win32api.RegOpenKeyEx( root , subkey, 0, win32con.KEY_ALL_ACCESS) # could we use a lesser access model?
except:
hKey = win32api.RegCreateKey( root, subkey )
try:
if not valuename: # the default value
win32api.RegSetValue( hKey, valuename, dataType, data )
else: # named sub-value
win32api.RegSetValueEx( hKey, valuename, 0, dataType, data )
finally:
win32api.RegCloseKey( hKey)
def RegQueryValue( key, valuename='', pickling=0 ):
'''
Get a registry value by providing a fully-specified
registry key (and an optional sub-key/value name)
If pickling is true, the data element will be
unpickled before being returned.
'''
#print 'key', key
root, subkey = _getBaseKey( key )
if not valuename: # the default value
data, type = win32api.RegQueryValue( root , subkey)
else:
try:
#print root, subkey
hKey = win32api.RegOpenKeyEx( root, subkey, 0, win32con.KEY_READ)
#print hKey, valuename
try:
data, type = win32api.RegQueryValueEx( hKey, valuename )
except: #
data, type = None, 0 # value is not available...
pickling = None
finally:
win32api.RegCloseKey( hKey)
if pickling:
import pickle
data = pickle.loads( data )
return data
# following constants seem to reflect where path data is stored on NT machines
# no idea if it'll work on a 95 machine
def AddPathEntry( newEntry, user = 1, prepend=0 ):
'''
Add or remove path entry on NT, use prepend == -1 for removal,
use prepend == 0 for append, prepend= 1 for prepending to the
current path.
'''
if user:
user = 'USER'
else:
user = 'MACHINE'
key, valuename = COMMON_KEYS[ (user, 'PATH') ]
_PathManager( key, valuename, newEntry, prepend )
def PyExecutables( user = 1, prepend=0 ):
'''
Register/Deregister Python files as executables
'''
if user:
user = 'USER'
else:
user = 'MACHINE'
key, valuename = COMMON_KEYS[ (user, 'PYEXECUTABLES') ]
# the default executables + Python scripts...
if prepend < 0: # are to eliminate only .py
newEntry = '.PY'
else:
newEntry = '.PY;.COM;.EXE;.BAT;.CMD'
_PathManager( key, valuename, newEntry, prepend )
def _PathManager( key, valuename, newEntry, prepend=0, eliminate_duplicates=1 ):
'''
Create a new Path entry on NT machines (or kill an old one)
user determines whether to alter the USER or the Machine's path
prepend
1 -> add newEntry to start
0 -> add newEntry to end
-1 -> don't add newEntry
eliminate_duplicates determines whether to kill equal paths
All values are converted to lower case
'''
# get current value...
curval = RegQueryValue( key, valuename ) or ''
# split into elements
curval = string.split( string.lower(curval), ';' )
if type( newEntry ) not in (types.ListType, types.TupleType):
newEntry = string.split( string.lower(newEntry), ';' )
# eliminate duplicates of the newEntry
curval = filter( None, curval) # strip out null entries
if eliminate_duplicates:
newval = []
for p in curval:
if p not in newEntry:
newval.append( p )
curval = newval
if prepend == 1:
curval = list(newEntry) + curval
elif prepend == 0:
curval = curval + list( newEntry )
elif prepend == -1: # this call is just killing the path entry
pass
#now do the recombination
curval = string.join( curval, ';' )
RegSetValue( key, valuename, curval )
COMMON_KEYS = {
('USER','PATH') : ('''HKEY_CURRENT_USER\\Environment''', 'path'),
('MACHINE','PATH') : ('''HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment''', 'path'),
('USER','PYEXECUTABLES') : ('''HKEY_CURRENT_USER\\Environment''', 'pathext'),
('MACHINE','PYEXECUTABLES') : ('''HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment''', 'pathext')
}

View File

@@ -0,0 +1,33 @@
import sys, string
class Reloader:
'''
Class allows for reloading all modules imported
after the instance is created. Normally you will
use this by doing:
import <anything you don't want reloaded>
from mcf.utils import reloader
<do testing and rewriting>
reloader.go()
'''
def __init__(self):
self.keys = sys.modules.keys()
def __call__(self, *args, **namedargs):
done = []
for key, val in sys.modules.items():
if key not in self.keys:
try:
reload( val )
done.append( key )
except (ImportError):
print '''Couldn't reload module:''', key
except (TypeError): # for None's
# is a flag to prevent reloading
pass
if done:
print '''Reloaded:''', string.join( done, ', ')
else:
print '''No modules reloaded'''
# the default reloader...
go = Reloader()

View File

@@ -0,0 +1,104 @@
class SingletonList:
'''
A SingletonList always has a length of one or 0,
appends overwrite the single element, iteration will
return precisely one element. Attempts to get any item
other than 0 will raise an IndexError or return the single
item depending on whether the 'raiseIndexError' flag is
true or false (generally it should be true except if the
for x in SingletonList: construct is known never to be
used, since this construct will create an infinite loop
if we never raise an IndexError).
'''
def __init__(self,base=None,raiseIndexError=1):
self._base = base
self.raiseIndexError = raiseIndexError
def __len__(self):
'''
The length is 0 if no _base, 1 if a base
'''
if hasattr(self, '_base'):
return 1
else:
return 0
def __getitem__(self,ind):
'''
Get the item if ind == 0, else raise an IndexError or return
the item, depending on the raiseIndexError flag
'''
if ind == 0:
try:
return self._base
except AttributeError:
raise IndexError, ind
elif self.raiseIndexError:
raise IndexError, ind
else:
return self._base
def __setitem__(self,ind, item):
'''
The item is to become the base
'''
self._base = item
def __delitem__(self,ind):
'''
Delete the base, regardless of the index used
'''
try:
del(self._base)
except AttributeError:
raise IndexError, ind
def append(self,item):
'''
Replace the base with the item
'''
self._base = item
def index(self,item):
'''
if the item is the base, return the only valid index (0)
'''
try:
if item == self._base:
return 0
except:
pass
raise ValueError, item
def count(self, item):
'''
If the item is the base, we have one, else 0
'''
try:
if item == self._base:
return 1
except:
pass
return 0
insert = __setitem__
def remove(self, item):
'''
if the item is the base, delete the base, else ValueError
'''
try:
if item == self._base:
del(self._base)
return
except:
pass
raise ValueError, item
def reverse(self):
pass
def sort(self):
pass
def __repr__(self):
try:
return '[%s]'%`self._base`
except AttributeError:
return '[]'
# store and copy functions
# def __getinitargs__(self):
# return (self._base,self.raiseIndexError)
# def __getstate__(self,*args,**namedargs):
# pass
# def __setstate__(self,*args,**namedargs):
# pass

View File

@@ -0,0 +1,251 @@
'''
Generate module for holding temporary classes which
will be reconstructed into the same module to allow
cPickle and the like to properly import them.
Note: You _must_ pickle a reference to the tempclassmodule
_before_ you pickle any instances which use the classes stored
in the module! Also, the classes cannot reference anything
in their dictionary or bases tuples which are not normally
pickleable (in particular, you can't subclass a class in the
same tempclassmodule or a tempclassmodule which you cannot
guarantee will be loaded before the dependent classes. (i.e.
by guaranteeing they will be pickled first)
'''
import new, time, string, sys, types
def buildModule(packagename, basename, rebuild=None, initialcontents=None):
'''
Dynamically build a module or rebuild one, generates
a persistent ID/name if not rebuilding. The persistent
ID is the value of basename+`time.time()` with the decimal
point removed (i.e. a long string of digits). Packagename
must be an importable package! Will raise an ImportError
otherwise. Also, for easy reconstitution, basename must not
include any decimal points.
initialcontents is a dictionary (or list) of elements which will be
added to the new module.
'''
if rebuild == None:
timestamp = `time.time()`
decpos = string.find(timestamp,'.')
basename = basename+timestamp[:decpos]+timestamp[decpos+1:]
name = string.join((packagename, basename), '.')
a = {}
b = {}
try: # see if we've already loaded this module...
mod = __import__( name, {},{}, string.split( name, '.'))
if initialcontents:
_updateFrom(mod, initialcontents)
return mod.__name__, mod
except ImportError:
pass
mod = new.module(name)
sys.modules[name] = mod
# following is just to make sure the package is loaded before attempting to alter it...
__import__( packagename, {}, {}, string.split(packagename) )
## exec 'import %s'%(packagename) in a, b ### Security Risk!
setattr(sys.modules[ packagename ], basename, mod)
# now do the update if there were initial contents...
if initialcontents:
_updateFrom(mod, initialcontents)
return name, mod
def buildClassIn(module, *classargs, **namedclassargs):
'''
Build a new class and register it in the module
as if it were really defined there.
'''
print module, classargs, namedclassargs
namedclassargs["__temporary_class__"] = 1
newclass = new.classobj(classargs[0], classargs[1], namedclassargs)
newclass.__module__ = module.__name__
setattr(module, newclass.__name__, newclass)
return newclass
def addClass(module, classobj):
'''
Insert a classobj into the tempclassmodule, setting the
class' __module__ attribute to point to this tempclassmodule
'''
classobj.__module__ = module.__name__
setattr(module, classobj.__name__, classobj)
setattr( classobj, "__temporary_class__", 1)
def delClass(module, classobj):
'''
Remove this class from the module, Note: after running this
the classobj is no longer able to be pickled/unpickled unless
it is subsequently added to another module. This is because
it's __module__ attribute is now pointing to a module which
is no longer going to save its definition!
'''
try:
delattr(module, classobj.__name__)
except AttributeError:
pass
def _packageName(modulename):
decpos = string.rfind(modulename, '.')
return modulename[:decpos], modulename[decpos+1:]
def _updateFrom(module, contentsource):
'''
For dealing with unknown datatypes (those passed in by the user),
we want to check and make sure we're building the classes correctly.
'''
# often will pass in a protoNamespace from which to update (during cloning)
if type(contentsource) in ( types.DictType, types.InstanceType):
contentsource = contentsource.values()
# contentsource should now be a list of classes or class-building tuples
for val in contentsource:
if type(val) is types.ClassType:
try:
addClass(module, val)
except:
pass
elif type(val) is types.TupleType:
try:
apply(buildClassIn, (module,)+val)
except:
pass
def deconstruct(templatemodule):
'''
Return a tuple which can be passed to reconstruct
in order to get a rebuilt version of the module
after pickling. i.e. apply(reconstruct, deconstruct(tempmodule))
is the equivalent of doing a deepcopy on the tempmodule.
'''
## import pdb
## pdb.set_trace()
classbuilder = []
for name, classobj in templatemodule.__dict__.items():
if type(classobj) is types.ClassType: # only copy class objects, could do others, but these are special-purpose modules, not general-purpose ones.
classbuilder.append( deconstruct_class( classobj) )
## import pdb
## pdb.set_trace()
return (templatemodule.__name__, classbuilder)
## except AttributeError:
## print templatemodule
## print classbuilder
def deconstruct_class( classobj ):
'''
Pull apart a class into a tuple of values which can be used
to reconstruct it through a call to buildClassIn
'''
if not hasattr( classobj, "__temporary_class__"):
# this is a regular class, re-import on load...
return (classobj.__module__, classobj.__name__)
else:
# this is a temporary class which can be deconstructed
bases = []
for classobject in classobj.__bases__:
bases.append( deconstruct_class (classobject) )
return (classobj.__name__, tuple (bases), classobj.__dict__)
def reconstruct(modulename, classbuilder):
'''
Rebuild a temporary module and all of its classes
from the structure created by deconstruct.
i.e. apply(reconstruct, deconstruct(tempmodule))
is the equivalent of doing a deepcopy on the tempmodule.
'''
## import pdb
## pdb.set_trace()
mname, newmod = apply(buildModule, _packageName(modulename)+(1,) ) # 1 signals reconstruct
reconstruct_classes( newmod, classbuilder )
return newmod
def reconstruct_classes( module, constructors ):
'''
Put a class back together from the tuple of values
created by deconstruct_class.
'''
classes = []
import pprint
pprint.pprint( constructors)
for constructor in constructors:
if len (constructor) == 2:
module, name = constructor
# this is a standard class, re-import
temporarymodule = __import__(
module,
{},{},
string.split(module)+[name]
)
classobject =getattr (temporarymodule, name)
else:
# this is a class which needs to be re-constructed
(name, bases,namedarguments) = constructor
bases = tuple( reconstruct_classes( module, bases ))
classobject = apply (
buildClassIn,
(module, name, bases), # name and bases are the args to the class constructor along with the dict contents in namedarguments
namedarguments,
)
classes.append (classobject)
return classes
def destroy(tempmodule):
'''
Destroy the module to allow the system to do garbage collection
on it. I'm not sure that the system really does do gc on modules,
but one would hope :)
'''
name = tempmodule.__name__
tempmodule.__dict__.clear() # clears references to the classes
try:
del(sys.modules[name])
except KeyError:
pass
packagename, modname = _packageName(name)
try:
delattr(sys.modules[ packagename ], modname)
except AttributeError:
pass
del( tempmodule ) # no, I don't see any reason to do it...
return None
def deepcopy(templatemodule, packagename=None, basename=None):
'''
Rebuild the whole Module and it's included classes
(just the classes). Note: This will _not_ make instances
based on the old classes point to the new classes!
The value of this function is likely to be minimal given
this restriction. For pickling use deconstruct/reconstruct
for simple copying just return the module.
'''
name, classbuilder = deconstruct( templatemodule )
if packagename is None:
tp, tb = _packageName( name )
if packagename is None:
packagename = tp
if basename is None:
basename = tb
newmod = buildModule(packagename, basename, initialcontents=classbuilder )
return newmod
if __name__ == "__main__":
def testPickle ():
import mcf.vrml.prototype
name, module = buildModule( 'mcf.vrml.temp', 'scenegraph' )
buildClassIn( module, 'this', () )
buildClassIn( module, 'that', (mcf.vrml.prototype.ProtoTypeNode,) )
## import pdb
## pdb.set_trace()
import pprint
pprint.pprint( deconstruct( module ))
name,builder = deconstruct( module )
destroy( module)
return reconstruct(name, builder)
t = testPickle()
print t

View File

@@ -0,0 +1,50 @@
'''
Classes of Types
Often you want to be able to say:
if type(obj) in MutableTypes:
yada
This module is intended to make that easier.
Just import and use :)
'''
import types
MutableTypes = [ types.ListType, types.DictType, types.InstanceType ]
MutableSequenceTypes = [ types.ListType ]
SequenceTypes = [ types.ListType, types.StringType, types.TupleType ]
NumericTypes = [ types.IntType, types.FloatType, types.LongType, types.ComplexType ]
MappingTypes = [ types.DictType ]
def regarray():
if globals().has_key('array'):
return 1
try:
import array
SequenceTypes.append( array.ArrayType )
MutableTypes.append( array.ArrayType )
MutableSequenceTypes.append( array.ArrayType )
return 1
except ImportError:
return 0
def regnumpy():
'''
Call if you want to register numpy arrays
according to their types.
'''
if globals().has_key('Numeric'):
return 1
try:
import Numeric
SequenceTypes.append( Numeric.ArrayType )
MutableTypes.append( Numeric.ArrayType )
MutableSequenceTypes.append( Numeric.ArrayType )
return 1
except ImportError:
return 0
# for now, I'm going to always register these, if the module becomes part of the base distribution
# it might be better to leave it out so numpy isn't always getting loaded...
regarray()
regnumpy()

View File

@@ -0,0 +1,17 @@
import string
def userquery( prompt, choices, contextdata = '', defaultind=0 ):
if contextdata:
print 'Contextual Information:', contextdata
for x in range( len( choices ) ):
print '(%s)'%x, `choices[x]`
choice = raw_input( prompt+( '(%s):'%defaultind ) )
if not choice:
return choices[ defaultind ]
try:
choice = string.atoi( choice )
return choices[ choice]
except IndexError :
return choices[ defaultind ]
except ValueError:
return choice

View File

@@ -0,0 +1,17 @@
'''
Module giving a float representation
of the interpreter major version (1.4, 1.5 etceteras)
ver -- Float representation of the current interpreter version
Note: Since I no longer have any Python 1.4 modules, this module is
no longer in use by me. I intend to leave it here for the next version
jump :) .
'''
import regex, sys, string
ver = string.atof(sys.version[:regex.match('[0-9.]*', sys.version)])
### Clean up namespace
del(regex)
del(sys)
del(string)

View File

@@ -0,0 +1,46 @@
'''
Really simplistic walker-processable hierobjects, doesn't
have parent attributes, every element has an __attrDict__
item and a childlist. This is different from the mechanisms
we'll want to use for multi-tree systems, but it's fairly
close. Should be fairly simply worked with.
'''
class WalkerAble:
'''
Simple hierarchic objects with the following elements
__attrDict__ -- app-specific attributes
__childlist__ -- childen of this node
__gi__ -- "type" or Generic Indicator of this node
__childlist__append__ -- as you'd expect, method on childlist to add an element
'''
def __init__(self, childlist=None, attrDict=None, gi=None):
self.__dict__['__attrDict__'] = attrDict or {}
self.__dict__['__childlist__'] = childlist or []
self.__dict__['__gi__'] = gi or ''
self.__dict__['__childlist__append__'] = self.__childlist__.append
def __getattr__(self, attrName):
'''
Note: you can store attributes with the same names as
the reserved names, but to get them back, you'll need
to read it directly out of the attrDict
'''
if attrName != '__attrDict__':
try:
return self.__attrDict__[attrName]
except KeyError:
pass
raise AttributeError, attrName
def __setattr__(self, attrName, attrVal):
self.__attrDict__[attrName] = attrVal
def __setGI__(self, gi):
self.__dict__['__gi__'] = gi
def __repr__(self):
return '''<WalkerAble %(__gi__)s %(__attrDict__)s %(__childlist__)s>'''%self.__dict__
# copy functions
# def __getinitargs__(self):
# return (self.__childlist__, self.__attrDict__, self.__gi__)

View File

@@ -0,0 +1,5 @@
'''
Simple parsing using mxTextTools
tar -cvf simpleparse.tar --exclude-from=exclude.txt
'''

View File

@@ -0,0 +1,279 @@
from TextTools.TextTools import *
#####################################################
# FOLLOWING IS THE BOOTSTRAP PARSER, HAND-CODED!
parsernamelist = [
'declarationset', # 0
'declaration', # 1
'implicit_group', # 2 --> no longer used
'added_token', # 3
'seq_added_token', #4
'fo_added_token', #5
'or_added_token', #6
'and_added_token', #7
'element_token', #8
'group', #9
'negpos_indicator', #10
'occurence_indicator', #11
'unreportedname', #12
'name', #13
'<ts>', # 14
'literal', #15
'range', # 16
'CHARBRACE', #17
'CHARDASH', # 18
'CHARRANGE', # 19
'CHARNOBRACE', # 20
'ESCAPEDCHAR', # 21
'SPECIALESCAPEDCHAR', # 22
'OCTALESCAPEDCHAR' # 23
]
parsertuplelist = range( 24 )
parsertuplelist[0] = ( # declarationset
('declaration', TableInList,(parsertuplelist, 1)), # must be at least one declaration
('declaration', TableInList,(parsertuplelist, 1),1,0)
)
parsertuplelist[1] = ( # declaration
(None, TableInList,(parsertuplelist, 14)), # ts
(None, SubTable, (
('unreportedname', TableInList,(parsertuplelist, 12),1,2),
('name', TableInList,(parsertuplelist, 13)), # name
)
),
(None, TableInList,(parsertuplelist, 14)), # ts
(None, Word, ':='),
(None, TableInList,(parsertuplelist, 14)), # ts
('element_token', TableInList,(parsertuplelist, 8)),
(None, SubTable, ( # added_token
('seq_added_token', TableInList, (parsertuplelist,4), 1, 5 ),
('fo_added_token', TableInList, (parsertuplelist,5), 1, 4 ),
('or_added_token', TableInList, (parsertuplelist,6), 1, 3 ),
('and_added_token', TableInList, (parsertuplelist,7), 1, 2 ),
(None, Fail, Here),
('seq_added_token', TableInList, (parsertuplelist,4), 1, 0 ),
('fo_added_token', TableInList, (parsertuplelist,5), 1, -1 ),
('or_added_token', TableInList, (parsertuplelist,6), 1, -2 ),
('and_added_token', TableInList, (parsertuplelist,7), 1, -3 ),
),1,1),
(None, TableInList,(parsertuplelist, 14)), # ts
)
parsertuplelist[3] = ( # added_token
('seq_added_token', TableInList, (parsertuplelist,4), 1, 5 ),
('fo_added_token', TableInList, (parsertuplelist,5), 1, 4 ),
('or_added_token', TableInList, (parsertuplelist,6), 1, 3 ),
('and_added_token', TableInList, (parsertuplelist,7), 1, 2 ),
(None, Fail, Here),
('seq_added_token', TableInList, (parsertuplelist,4), 1, 0 ),
('fo_added_token', TableInList, (parsertuplelist,5), 1, -1 ),
('or_added_token', TableInList, (parsertuplelist,6), 1, -2 ),
('and_added_token', TableInList, (parsertuplelist,7), 1, -3 ),
)
parsertuplelist[4] = ( # seq_added_token
(None, TableInList,(parsertuplelist, 14)), # ts
(None, Is, ','),
(None, TableInList,(parsertuplelist, 14)), # ts
('element_token', TableInList,(parsertuplelist, 8)),
(None, TableInList,(parsertuplelist, 14),4,1), # ts
(None, Is, ',',3,1),
(None, TableInList,(parsertuplelist, 14),2,1), # ts
('element_token', TableInList,(parsertuplelist, 8),1,-3),
)
parsertuplelist[5] = ( # fo_added_token
(None, TableInList,(parsertuplelist, 14)), # ts
(None, Is, '/'),
(None, TableInList,(parsertuplelist, 14)), # ts
('element_token', TableInList,(parsertuplelist, 8)),
(None, TableInList,(parsertuplelist, 14),4,1), # ts
(None, Is, '/',3,1),
(None, TableInList,(parsertuplelist, 14),2,1), # ts
('element_token', TableInList,(parsertuplelist, 8),1,-3),
)
parsertuplelist[6] = ( # or_added_token
(None, TableInList,(parsertuplelist, 14)), # ts
(None, Is, '|'),
(None, TableInList,(parsertuplelist, 14)), # ts
('element_token', TableInList,(parsertuplelist, 8)),
(None, TableInList,(parsertuplelist, 14),4,1), # ts
(None, Is, '|',3,1),
(None, TableInList,(parsertuplelist, 14),2,1), # ts
('element_token', TableInList,(parsertuplelist, 8),1,-3),
)
parsertuplelist[7] = ( # and_added_token
(None, TableInList,(parsertuplelist, 14)), # ts
(None, Is, '&'),
(None, TableInList,(parsertuplelist, 14)), # ts
('element_token', TableInList,(parsertuplelist, 8)),
(None, TableInList,(parsertuplelist, 14),4,1), # ts
(None, Is, '&',3,1),
(None, TableInList,(parsertuplelist, 14),2,1), # ts
('element_token', TableInList,(parsertuplelist, 8),1,-3),
)
parsertuplelist[8] = ( # element_token
('negpos_indicator', TableInList,(parsertuplelist, 10),1,1),
(None, TableInList,(parsertuplelist, 14),1,1), # ts, very inefficient :(
('literal', TableInList, (parsertuplelist,15),1, 4 ),
('range', TableInList, (parsertuplelist,16),1, 3 ),
('group', TableInList, (parsertuplelist,9),1, 2 ),
('name', TableInList, (parsertuplelist,13) ),
(None, TableInList,(parsertuplelist, 14),1,1), # ts, very inefficient :(
('occurence_indicator', TableInList,(parsertuplelist, 11), 1,1),
)
parsertuplelist[9] = ( # group
(None, Is, '('),
(None, TableInList,(parsertuplelist, 14),1,1), # ts
('element_token', TableInList, (parsertuplelist,8) ),
(None, SubTable, ( # added_token
('seq_added_token', TableInList, (parsertuplelist,4), 1, 5 ),
('fo_added_token', TableInList, (parsertuplelist,5), 1, 4 ),
('or_added_token', TableInList, (parsertuplelist,6), 1, 3 ),
('and_added_token', TableInList, (parsertuplelist,7), 1, 2 ),
(None, Fail, Here),
('seq_added_token', TableInList, (parsertuplelist,4), 1, 0 ),
('fo_added_token', TableInList, (parsertuplelist,5), 1, -1 ),
('or_added_token', TableInList, (parsertuplelist,6), 1, -2 ),
('and_added_token', TableInList, (parsertuplelist,7), 1, -3 ),
),1,1),
(None, TableInList,(parsertuplelist, 14),1,1), # ts
(None, Is, ')'),
)
parsertuplelist[10] = ( # negpos_indicator
(None, Is, "+",1,2),
(None, Is, "-"),
)
parsertuplelist[11] = ( #occurence_indicator
(None, Is, "+",1,3),
(None, Is, "*",1,2),
(None, Is, '?'),
)
parsertuplelist[12] = ( #unreportedname
(None, Is, '<'),
('name', TableInList, (parsertuplelist, 13)), # inefficiency in final system :(
(None, Is, '>'),
)
parsertuplelist[13] = ( # name
(None, IsIn, alpha+'_'),
(None, AllIn, alphanumeric+'_',1,1)
)
parsertuplelist[14] = ( # ts (whitespace)
(None, AllIn, ' \011\012\013\014\015',1,1),
(None, SubTable, (
(None, Is, '#' ),
(None, AllNotIn, '\n',1,1 ) # problem if there's a comment at the end of the file :(
)
,1,-1 ),
)
# this isn't actually used in the bootstrap parser...
_specialescapedchar = parsertuplelist[22] = ( # SPECIALESCAPEDCHAR
('SPECIALESCAPEDCHAR', IsIn, '\\abfnrtv'),
)
_octalescapechar = parsertuplelist[23] = ( # OCTALESCAPEDCHAR
(None, IsIn, '01234567'),
(None, IsIn, '01234567',2),
(None, IsIn, '01234567',1),
)
_escapedchar = parsertuplelist[21] = ( # escapedcharacter
(None, Is, '\\' ),
('SPECIALESCAPEDCHAR', IsIn, '\\abfnrtv',1,4),
('OCTALESCAPEDCHAR', SubTable, _octalescapechar)
)
_charnobrace = parsertuplelist[20] = ( # charnobrace
('ESCAPEDCHAR', Table, _escapedchar, 1,2),
('CHAR', IsNot, ']'),
)
_rangedef = parsertuplelist[19] = ( # charrange
('CHARNOBRACE', Table, _charnobrace ),
(None, Is, '-'),
('CHARNOBRACE', Table, _charnobrace ),
)
parsertuplelist[16] = ( #range
(None, Is, '['),
('CHARBRACE', Is, ']',1,1),
('CHARDASH', Is, '-',1,1),
('CHARRANGE', Table, _rangedef, 1,0),
(None, SubTable, _charnobrace, 1,-1),
(None, Is, ']')
)
_sqstr = (
(None, Is, "'" ),
# (None, Is, "'",1, 5 ), # immediate close
(None, AllNotIn, "\\'",1,1 ), # all not an escape or end
(None, Is, "\\", 2, 1), # is an escaped char
(None, Skip, 1, 1, -2), # consume the escaped char and loop back
(None, Is, "'" ) # in case there was no matching ', which would also cause a fail for allnotin
)
_dblstr = (
(None, Is, '"' ),
# (None, Is, '"',1, 5 ), # immediate close
(None, AllNotIn, '\\"' ,1,1), # not an escaped or end
(None, Is, "\\", 2, 1), # is an escaped char
(None, Skip, 1, 1, -2), # consume the escaped char and loop back
(None, Is, '"' ) # in case there was no matching ", which would also cause a fail for allnotin
)
# literal := ("'",(CHARNOSNGLQUOTE/ESCAPEDCHAR)*,"'") / ('"',(CHARNODBLQUOTE/ESCAPEDCHAR)*,'"')
parsertuplelist[15] = ( # literal
(None, Is, "'", 4, 1 ),
('CHARNOSNGLQUOTE', AllNotIn, "\\'",1,1 ), # all not an escape or end
('ESCAPEDCHAR', Table, _escapedchar, 1, -1),
(None, Is, "'", 1,5 ),
(None, Is, '"' ),
('CHARNODBLQUOTE', AllNotIn, '\\"',1,1 ), # all not an escape or end
('ESCAPEDCHAR', Table, _escapedchar, 1, -1),
(None, Is, '"'),
)
declaration = r'''declarationset := declaration+
declaration := ts , (unreportedname/name) ,ts,':=',ts, element_token, ( seq_added_token / fo_added_token / or_added_token / and_added_token )*, ts
seq_added_token := (ts,',',ts, element_token)+
fo_added_token := (ts,'/',ts, element_token)+
or_added_token := (ts,'|',ts, element_token)+ # not currently supported
and_added_token := (ts,'&',ts, element_token)+ # not currently supported
element_token := negpos_indicator?, ts, (literal/range/group/name),ts, occurence_indicator?
group := '(',ts, element_token, ( seq_added_token / fo_added_token / or_added_token / and_added_token )*, ts, ')'
negpos_indicator := '+'/'-'
occurence_indicator := '+'/'*'/'?'
unreportedname := '<', name, '>'
name := [a-zA-Z_],[a-zA-Z0-9_]*
<ts> := ( [ \011-\015]+ / ('#',-'\n'+,'\n')+ )*
literal := ("'",(CHARNOSNGLQUOTE/ESCAPEDCHAR)*,"'") / ('"',(CHARNODBLQUOTE/ESCAPEDCHAR)*,'"')
range := '[',CHARBRACE?,CHARDASH?, (CHARRANGE/CHARNOBRACE)*, CHARDASH?,']'
CHARBRACE := ']'
CHARDASH := '-'
CHARRANGE := CHARNOBRACE, '-', CHARNOBRACE
CHARNOBRACE := ESCAPEDCHAR/CHAR
CHAR := -[]]
ESCAPEDCHAR := '\\',( SPECIALESCAPEDCHAR / OCTALESCAPEDCHAR )
SPECIALESCAPEDCHAR := [\\abfnrtv]
OCTALESCAPEDCHAR := [0-7],[0-7]?,[0-7]?
CHARNODBLQUOTE := -[\\"]+
CHARNOSNGLQUOTE := -[\\']+
'''
def parse( instr = declaration, parserelement = 'declarationset' ):
tbl = (
(parserelement, Table, parsertuplelist[parsernamelist.index( parserelement )] ),
)
return tag( instr, tbl)
if __name__ == '__main__':
import sys, pprint
pprint.pprint( apply( parse, tuple( sys.argv[1:] ) ) )

View File

@@ -0,0 +1,432 @@
from TextTools.TextTools import *
import bootstrap # the hand-coded parser
import operator, strop as string
def err( value ):
print value
class _BaseGenerator:
'''
Class providing the functions required to turn a
parse tree as generated by the bootstrap parser into
a new set of parser tuples. I.e a parser generator :)
Effectively this is the bootstrap generator.
'''
def __init__( self, syntaxstring = bootstrap.declaration, parserelement = 'declarationset' ):
'''
Turn syntaxstring into a parsetree using
the bootstrap module's parse command
'''
# should do some error checking in here :)
self.syntaxstring = syntaxstring
self.parsetree = bootstrap.parse( syntaxstring, parserelement )[1][0] # the child list
self.nameset = []
self.tupleset = []
def stringval( self, tuple ):
'''
Return the string value for a parse-result tuple
'''
return self.syntaxstring[ tuple[1]:tuple[2] ]
def build( self, prebuiltnodes=() ):
'''
Build a new parsing table from the syntax string.
New parsers may be accessed using the parserbyname method.
The pre-built nodes are parsing tables for inclusion in the grammar
Added version 1.0.1 to provide greater extensibility.
'''
# first register all declared names to reserve their indicies
#if self.__class__.__name__ == 'Generator':
# import pdb
# pdb.set_trace()
for key, value in prebuiltnodes:
self.nameset.append( key )
self.tupleset.append( value )
for decl in self.parsetree[3]:
#print decl
name = self.stringval( decl[3][0] )
self.nameset.append( name )
self.tupleset.append( None)
#print 'Declared names:',self.nameset
for i in range( len( self.nameset)):
#print '''Processing declaration %s '''% self.nameset[i]
dataset = self.group( ('group',1,2, self.parsetree[3][i][3][1:]), self )
if dataset:
self.tupleset[i] = tuple( dataset)
def parserbyname( self, name ):
'''
Retrieve a single parsing tuple by its production name
'''
try:
return self.tupleset[ self.nameset.index( name ) ]
except ValueError:
print '''Could not find parser tuple of name''', name
return ()
def allparsers (self):
'''
Return a list of (productionname, parsingtuple) values
suitable for passing to another generator as its pre-calculated
set of parsing tuples. (See method build)
'''
returnvalue = []
for i in range(len( self.nameset)):
returnvalue.append ( (self.nameset[i],self.tupleset[i]) )
return returnvalue
### Actual processing functions...
def element_token( self, eltup, genobj, reportname=None ):
# Determine the type of element
# Descry the various options for the element
negative = optional = repeating = element = None
for data in eltup[3]:
if data[0] == 'negpos_indicator':
if genobj.stringval ( data ) == '-':
negative = 1
elif data[0] == 'occurence_indicator':
data = genobj.stringval ( data )
if data == '*':
optional = 1
repeating = 1
elif data == '+':
repeating = 1
elif data == '?':
optional = 1
else:
err( 'Unknown occurence indicator '+ data )
else:
element = data
# call the appropriate handler
try:
return getattr( self, element [0])( element, genobj, negative, repeating, optional)
except AttributeError,x:
err( '''Didn't find handler for element type %s, parser build aborted'''%element [0])
raise x
def group( self, els, genobj, negative= None, repeating=None, optional = None, reportname=None):
'''
Determine what type of group we're dealing with and determine what
function to call, then call it.
'''
groupset = els[3]
# groupset is an element_token followed by a possible added_token
if groupset:
els = []
els.append( groupset[0] )
if len(groupset) > 1:
els[len(els):] = groupset[1][3]
gtype = groupset[1][0]
if gtype == 'seq_added_token':
return self.seq( els, genobj, negative, repeating, optional, reportname )
elif gtype == 'fo_added_token':
return self.fo( els, genobj, negative, repeating, optional, reportname )
else:
err( '''An as-yet undefined group type was used! %s'''%gtype )
else: # default "sequence" of one... could do more work and make it process the results specifically, but that's optimisation ;)
return self.seq( els, genobj, negative, repeating, optional, None )
else:
return []
def seq( self, els, genobj, negative= None, repeating=None, optional = None, reportname=None ):
elset = map( self.element_token, els, [genobj]*len( els) )
elset = reduce( operator.add, elset )
if negative:
if repeating:
if optional:
return [(None, SubTable, (( None, SubTable,( (None, SubTable, tuple( elset), 2,1), (None, Fail, Here),(None,Skip,1) ), 2,1 ), ( None, EOF, Here, -1,1 ), ), ), ]
else: # not optional
return [(None, SubTable, (( None, SubTable,( (None, SubTable, tuple( elset), 2,1), (None, Fail, Here),(None,Skip,1) )), ( None, SubTable,( (None, SubTable, tuple( elset), 2,1), (None, Fail, Here),(None,Skip,1) ), 2,1 ), ( None, EOF, Here, -1,1 ), ), ), ]
else: # single
if optional:
return [ (None, SubTable, ( (None, SubTable, tuple( elset), 2,1), (None, Fail, Here), (None, Skip, 1) ),1,1) ]
else: # not optional
return [ (None, SubTable, ( (None, SubTable, tuple( elset), 2,1), (None, Fail, Here), (None, Skip, 1) )) ]
else: # positive
if repeating:
if optional:
return [ (None, SubTable, tuple( elset), 1,0) ]
else: # not optional
return [ (None, SubTable, tuple( elset)), (None, SubTable, tuple( elset), 1,0) ]
else: # single
if optional:
return [ (None, SubTable, tuple( elset), 1,1) ]
else: # not optional
return [ (None, SubTable, tuple( elset)) ]
def fo( self, els, genobj, negative= None, repeating=None, optional = None, reportname=None ):
elset = map( self.element_token, els, [genobj]*len( els) )
elset = reduce( operator.add, elset )
elset = []
for el in els:
dataset = self.element_token( el, genobj )
if len( dataset) == 1 and len(dataset[0]) == 3: # we can alter the jump states with impunity
elset.append( dataset[0] )
else: # for now I'm eating the inefficiency and doing an extra SubTable for all elements to allow for easy calculation of jumps within the FO group
elset.append( (None, SubTable, tuple( dataset )) )
if negative:
# all negative FO's have the meaning "a positive, single, non-optional FO not matching"
# the flags modify how failure and continuation are handled in that case, so they can use
# the same procset.
# Note: Negative FO groups are _very_ heavy, they have normally about 4 subtable calls
# guess we'll find out how well mxTextTools handles recursive tables :)
procset = []
for i in range( len( elset) -1): # note that we have to treat last el specially
ival = elset[i] + (1,len(elset)-i)
procset.append( ival ) # if success, jump past end
procset.append( elset[-1] + (2,1) ) # will cause a failure if last element doesn't match
procset.append( (None, Fail, Here ) )
procset.append( (None, Skip, 1) )
# if the following looks familiar you probably looked at seq above
if repeating:
if optional:
return [ (None, SubTable, ( (None, SubTable, tuple( procset), 2,1), (None, EOF, Here,-1,1) ) ) ]
else: # not optional
return [ (None, SubTable, ( (None, SubTable, tuple( procset)),(None, SubTable, tuple( procset), 2,1), (None, EOF, Here,-1,1) ) ) ]
else: # single
if optional:
return [ (None, SubTable, tuple( procset), 1,1) ]
else: # not optional
return [ (None, SubTable, tuple( procset) ) ]
else: # positive
if repeating:
if optional:
procset = []
for i in range( len( elset)):
procset.append( elset[i] + (1,-i) ) # if success, go back to start which is -i elements back
return procset
else: # not optional
procset = []
for i in range( len( elset)-1):
procset.append( elset[i] + (1, len(elset)-i+1) ) # if success, jump to later section
procset.append( elset[-1] + ( 1, 2) ) # will cause a failure if last element doesn't match using an explicit fail command
procset.append( (None, Fail, Here) ) # will cause a failure if last element doesn't match using an explicit fail command
for i in range( len( elset)-1):
procset.append( elset[i] + (1, -i) ) # if success, go back to start which is -i elements back
procset.append( elset[-1] + ( 1, 1-(len(elset)) ) ) # will cause a failure if last element doesn't match using an explicit fail command
return procset
else: # single
if optional:
procset = []
for i in range( len( elset)):
procset.append( elset[i] + (1,len(elset)-i) ) # if success, jump past end
return procset
else: # not optional
procset = []
for i in range( len( elset) -1): # note that we have to treat last el specially
procset.append( elset[i] + (1,len(elset)-i) ) # if success, jump past end
procset.append( elset[-1] ) # will cause a failure if last element doesn't match
return procset
def name( self, value, genobj, negative = None, repeating = None, optional = None, reportname=None ):
svalue = genobj.stringval( value )
try:
sindex = genobj.nameset.index( svalue )
except ValueError: # eeps, a value not declared
try:
sindex = genobj.nameset.index( '<'+svalue+'>' )
svalue = None
except ValueError:
err( '''The name %s could not be found in the declarationset. The parser will not compile.'''%svalue)
genobj.nameset.append( svalue )
genobj.tupleset.append( None )
sindex = len( genobj.nameset) - 1
if negative:
if repeating:
if optional:
return [ (svalue, SubTable, ( (None, TableInList, (genobj.tupleset, sindex), 1,3), (None, EOF, Here,1,2), (None,Skip,1,-2,-2) ) ) ]
else: # not optional
return [ (svalue, SubTable, ( (None, TableInList, (genobj.tupleset, sindex),2,1),(None, Fail, Here),(None, Skip, 1), (None, TableInList, (genobj.tupleset, sindex), 1,3), (None, EOF, Here,1,2), (None,Skip,1,-2,-2) ) ) ]
else: # single
if optional:
return [ (None, SubTable, ( (None, TableInList, (genobj.tupleset, sindex),2,1),(None, Fail, Here),(svalue, Skip, 1) ),1,1) ]
else: # not optional
return [ (None, SubTable, ( (None, TableInList, (genobj.tupleset, sindex),2,1),(None, Fail, Here),(svalue, Skip, 1) )) ]
else: # positive
if repeating:
if optional:
return [ (svalue, TableInList, (genobj.tupleset, sindex), 1,0) ]
else: # not optional
return [ (svalue, TableInList, (genobj.tupleset, sindex)), (svalue, TableInList, (genobj.tupleset, sindex),1,0) ]
else: # single
if optional:
return [ (svalue, TableInList, (genobj.tupleset, sindex), 1,1) ]
else: # not optional
return [ (svalue, TableInList, (genobj.tupleset, sindex)) ]
specialescapedmap = {
'a':'\a',
'b':'\b',
'f':'\f',
'n':'\n',
'r':'\r',
't':'\t',
'v':'\v',
'\\':'\\',
'"':'"',
"'":"'",
}
def escapedchar( self, el, genobj ):
svalue = ''
if el[3][0][0] == 'SPECIALESCAPEDCHAR':
svalue = svalue + self.specialescapedmap[ genobj.stringval( el[3][0] ) ]
elif el[3][0][0] == 'OCTALESCAPEDCHAR':
#print 'OCTALESCAPEDCHAR', genobj.stringval( el)
ovnum = 0
ovpow = 0
ov = genobj.stringval( el[3][0] )
while ov:
ovnum = ovnum + int( ov[-1] ) * (8**ovpow)
ovpow = ovpow + 1
ov = ov[:-1]
svalue = svalue + chr( ovnum )
#print 'svalue ', `svalue`
return svalue
def literal( self, value, genobj, negative = None, repeating=None, optional=None, reportname=None ):
'''
Calculate the tag-table for a literal element token
'''
svalue = ''
for el in value[3]:
if el[0] in ('CHARNOSNGLQUOTE', 'CHARNODBLQUOTE'):
svalue = svalue+genobj.stringval( el )
elif el[0] == 'ESCAPEDCHAR':
svalue = svalue + self.escapedchar( el, genobj )
#print 'literal value', `genobj.stringval( value )`
#print ' svalue', `svalue`
# svalue = svalue[1:-1]
if negative:
if repeating: # a repeating negative value, a "search" in effect
if optional: # if fails, then go to end of file
return [ (None, sWordStart, BMS( svalue ),1,2), (None, Move, ToEOF ) ]
else: # must first check to make sure the current position is not the word, then the same
return [ (None, Word, svalue, 2,1),(None, Fail, Here),(None, sWordStart, BMS( svalue ),1,2), (None, Move, ToEOF ) ]
#return [ (None, Word, svalue, 2,1),(None, Fail, Here),(None, WordStart, svalue,1,2), (None, Move, ToEOF ) ]
else: # a single-character test saying "not a this"
if optional: # test for a success, move back if success, move one forward if failure
if len(svalue) > 1:
return [ (None, Word, svalue, 2,1),
(None, Skip, -len(svalue), 2,2), # backup if this was the word to start of word, succeed
(None, Skip, 1 ) ] # else just move one character and succeed
else: # Uses Is test instead of Word test, should be faster I'd imagine
return [ (None, Is, svalue, 2,1),
(None, Skip, -1, 2,2), # backtrack
(None, Skip, 1 ) ] # else just move one character and succeed
else: # must find at least one character not part of the word, so
if len(svalue) > 1:
return [ (None, Word, svalue, 2,1),
(None, Fail, Here),
(None, Skip, 1 ) ] # else just move one character and succeed
else: #must fail if it finds or move one forward
return [ (None, Is, svalue, 2,1),
(None, Fail, Here),
(None, Skip, 1 ) ] # else just move one character and succeed
else: # positive
if repeating:
if optional:
if len(svalue) > 1:
return [ (None, Word, svalue, 1,0) ]
else:
return [ (None, Is, svalue, 1,0) ]
else: # not optional
if len(svalue) > 1:
return [ (None, Word, svalue),(None, Word, svalue,1,0) ]
else:
return [ (None, Is, svalue),(None, Is, svalue,1,0) ]
else: # not repeating
if optional:
if len(svalue) > 1:
return [ (None, Word, svalue, 1,1) ]
else:
return [ (None, Is, svalue, 1,1) ]
else: # not optional
if len(svalue) > 1:
return [ (None, Word, svalue) ]
else:
return [ (None, Word, svalue) ]
def charnobrace( self, cval, genobj ):
#print 'cval', cval
if cval[3][0][0] == 'ESCAPEDCHAR':
return self.escapedchar( cval[3][0], genobj )
#print '''Straight non-brace character''', `genobj.stringval( cval[3][0] )`
return genobj.stringval( cval )
def range( self, value, genobj, negative = None, repeating=None, optional=None, reportname=None ):
dataset = []
for cval in value[3]:
if cval[0] == 'CHARBRACE':
dataset.append( ']')
elif cval[0] == 'CHARDASH':
dataset.append( '-')
elif cval[0] == 'CHARNOBRACE':
dataset.append( self.charnobrace( cval, genobj ) )
elif cval[0] == 'CHARRANGE':
start = ord( self.charnobrace( cval[3][0], genobj ) )
end = ord( self.charnobrace( cval[3][1], genobj ) )
if start < end:
dataset.append( string.join( map( chr, range( start, end +1 ) ), '' ) )
else:
dataset.append( string.join( map( chr, range( end, start +1 ) ), '' ) )
else:
dataset.append( genobj.stringval( cval ) )
if negative:
#svalue = set( string.join( dataset, '' ), 0 )
svalue = string.join( dataset, '' )
else:
#svalue = set( string.join( dataset, '' ), 1)
svalue = string.join( dataset, '' )
if negative:
if repeating:
if optional:
#return [ (None, AllInSet, svalue, 1 ) ]
return [ (None, AllNotIn, svalue, 1 ) ]
else: # not optional
#return [ (None, AllInSet, svalue ) ]
return [ (None, AllNotIn, svalue ) ]
else: # not repeating
if optional:
#return [ (None, IsInSet, svalue, 1 ) ]
return [ (None, IsNotIn, svalue, 1 ) ]
else: # not optional
#return [ (None, IsInSet, svalue ) ]
return [ (None, IsNotIn, svalue ) ]
else:
if repeating:
if optional:
#return [ (None, AllInSet, svalue, 1 ) ]
return [ (None, AllIn, svalue, 1 ) ]
else: # not optional
#return [ (None, AllInSet, svalue ) ]
return [ (None, AllIn, svalue ) ]
else: # not repeating
if optional:
#return [ (None, IsInSet, svalue, 1 ) ]
return [ (None, IsIn, svalue, 1 ) ]
else: # not optional
#return [ (None, IsInSet, svalue ) ]
return [ (None, IsIn, svalue ) ]
class Generator( _BaseGenerator ):
def __init__( self, syntaxstring , parser ):
self.syntaxstring = syntaxstring
self.parsetree = [0,1,2, tag( syntaxstring, parser )[1] ]
self.nameset = []
self.tupleset = []
def buildParser( declaration, prebuiltnodes=() ):
'''
End-developer function to create an application-specific parser
the parsing tuple is available on the returned object as
object.parserbyname( 'declaredname' ), where declaredname is the
name you defined in your language defintion file.
The declaration argument is the text of a language defintion file.
'''
proc = _BaseGenerator( )
proc.build()
newgen = Generator( declaration, proc.parserbyname( 'declarationset' ) )
newgen.build( prebuiltnodes=prebuiltnodes )
return newgen

View File

@@ -0,0 +1,13 @@
3D utilities
(c) onk, 1998-2001
A few low level & math utilities for 2D/3D computations as:
- area.py: solving close packing problems in 2D
- vect.py: low level / OO like matrix and vector calculation module
- vectools.py: more vector tools for intersection calculation, etc.
- tree.py: binary trees (used by the BSPtree module)

View File

@@ -0,0 +1,2 @@
__all__ = ["vect", "vectools", "area", "quat", "blvect", "tree"]

View File

@@ -0,0 +1,109 @@
"""Quaternion module
This module provides conversion routines between Matrices, Quaternions (rotations around
an axis) and Eulers.
(c) 2000, onk@section5.de """
# NON PUBLIC XXX
from math import sin, cos, acos
from util import vect
reload(vect)
Vector = vect.Vector
Matrix = vect.Matrix
class Quat:
"""Simple Quaternion class
Usually, you create a quaternion from a rotation axis (x, y, z) and a given
angle 'theta', defining the right hand rotation:
q = fromRotAxis((x, y, z), theta)
This class supports multiplication, providing an efficient way to
chain rotations"""
def __init__(self, w = 1.0, x = 0.0, y = 0.0, z = 0.0):
self.v = (w, x, y, z)
def asRotAxis(self):
"""returns rotation axis (x, y, z) and angle phi (right hand rotation)"""
phi2 = acos(self.v[0])
if phi2 == 0.0:
return Vector(0.0, 0.0, 1.0), 0.0
else:
s = 1 / (sin(phi2))
v = Vector(s * self.v[1], s * self.v[2], s * self.v[3])
return v, 2.0 * phi2
def __mul__(self, other):
w1, x1, y1, z1 = self.v
w2, x2, y2, z2 = other.v
w = w1*w2 - x1*x2 - y1*y2 - z1*z2
x = w1*x2 + x1*w2 + y1*z2 - z1*y2
y = w1*y2 - x1*z2 + y1*w2 + z1*x2
z = w1*z2 + x1*y2 - y1*x2 + z1*w2
return Quat(w, x, y, z)
def asMatrix(self):
w, x, y, z = self.v
v1 = Vector(1.0 - 2.0 * (y*y + z*z), 2.0 * (x*y + w*z), 2.0 * (x*z - w*y))
v2 = Vector(2.0 * (x*y - w*z), 1.0 - 2.0 * (x*x + z*z), 2.0 * (y*z + w*x))
v3 = Vector(2.0 * (x*z + w*y), 2.0 * (y*z - w*x), 1.0 - 2.0 * (x*x + y*y))
return Matrix(v1, v2, v3)
# def asEuler1(self, transp = 0):
# m = self.asMatrix()
# if transp:
# m = m.transposed()
# return m.asEuler()
def asEuler(self, transp = 0):
from math import atan, asin, atan2
w, x, y, z = self.v
x2 = x*x
z2 = z*z
tmp = x2 - z2
r = (w*w + tmp - y*y )
phi_z = atan2(2.0 * (x * y + w * z) , r)
phi_y = asin(2.0 * (w * y - x * z))
phi_x = atan2(2.0 * (w * x + y * z) , (r - 2.0*tmp))
return phi_x, phi_y, phi_z
def fromRotAxis(axis, phi):
"""computes quaternion from (axis, phi)"""
phi2 = 0.5 * phi
s = sin(phi2)
return Quat(cos(phi2), axis[0] * s, axis[1] * s, axis[2] * s)
#def fromEuler1(eul):
#qx = fromRotAxis((1.0, 0.0, 0.0), eul[0])
#qy = fromRotAxis((0.0, 1.0, 0.0), eul[1])
#qz = fromRotAxis((0.0, 0.0, 1.0), eul[2])
#return qz * qy * qx
def fromEuler(eul):
from math import sin, cos
e = eul[0] / 2.0
cx = cos(e)
sx = sin(e)
e = eul[1] / 2.0
cy = cos(e)
sy = sin(e)
e = eul[2] / 2.0
cz = cos(e)
sz = sin(e)
w = cx * cy * cz - sx * sy * sz
x = sx * cy * cz - cx * sy * sz
y = cx * sy * cz + sx * cy * sz
z = cx * cy * sz + sx * sy * cz
return Quat(w, x, y, z)

View File

@@ -0,0 +1,215 @@
# Basisklasse fuer Baumstruktur
# Object-orientiertes Programmieren Wi/97
#
# (c) Martin Strubel, Fakultaet fuer Physik, Universitaet Konstanz
# (strubi@gandalf.physik.uni-konstanz.de)
# updated 08.2001
"""Simple binary tree module
This module demonstrates a binary tree class.
Example::
a = [5, 8, 8, 3, 7, 9]
t1 = Tree()
t1.fromList(a)
Operations on tree nodes are done by writing a simple operator class::
class myOp:
def __init__(self):
...
def operate(self, node):
do_something(node)
and calling the recursive application::
op = MyOp()
t1.recurse(op)
Objects inserted into the tree can be of any kind, as long as they define a
comparison operation.
"""
def recurse(node, do):
if node == None:
return
recurse(node.left, do)
do(node)
recurse(node.right, do)
class Nullnode:
def __init__(self):
self.left = None
self.right = None
self.depth = 0
def recurse(self, do):
if self == Nil:
return
self.left.recurse(do)
do(self)
self.right.recurse(do)
Nil = Nullnode()
def nothing(x):
return x
class Node(Nullnode):
def __init__(self, data = None):
self.left = Nil
self.right = Nil
self.data = data
self.depth = 0
def __repr__(self):
return "Node: %s" % self.data
def insert(self, node):
if node.data < self.data:
if self.left != Nil:
return self.left.insert(node)
else:
node.depth = self.depth + 1
self.left = node
# print "inserted left"
return self
elif node.data > self.data:
if self.right != Nil:
return self.right.insert(node)
else:
node.depth = self.depth + 1
self.right = node
# print "inserted right"
return self
else:
return self.insert_equal(node)
def find(self, node, do = nothing):
if node.data < self.data:
if self.left != Nil:
return self.left.find(node, do)
else:
return self
elif node.data > self.data:
if self.right != Nil:
return self.right.find(node, do)
else:
return self
else:
return do(self)
def remove(self, node):
newpar
return self
def insert_equal(self, node):
#print "insert:",
self.equal(node)
return self
def found_equal(self, node):
self.equal(node)
def equal(self, node):
# handle special
print "node (%s) is equal self (%s)" % (node, self)
def copy(self):
n = Node(self.data)
return n
def recursecopy(self):
n = Node()
n.data = self.data
n.flag = self.flag
if self.left != Nil:
n.left = self.left.recursecopy()
if self.right != Nil:
n.right = self.right.recursecopy()
return n
class NodeOp:
def __init__(self):
self.list = []
def copy(self, node):
self.list.append(node.data)
class Tree:
def __init__(self, root = None):
self.root = root
self.n = 0
def __radd__(self, other):
print other
t = self.copy()
t.merge(other)
return t
def __repr__(self):
return "Tree with %d elements" % self.n
def insert(self, node):
if self.root == None:
self.root = node
else:
self.root.insert(node)
self.n += 1
def recurse(self, do):
if self.root == None:
return
self.root.recurse(do)
def find(self, node):
return self.root.find(node)
def remove(self, node):
self.root.remove(node)
def copy(self):
"make true copy of self"
t = newTree()
c = NodeOp()
self.recurse(c.copy)
t.fromList(c.list)
return t
def asList(self):
c = NodeOp()
self.recurse(c.copy)
return c.list
def fromList(self, list):
for item in list:
n = Node(item)
self.insert(n)
def insertcopy(self, node):
n = node.copy()
self.insert(n)
def merge(self, other):
other.recurse(self.insertcopy)
# EXAMPLE:
newTree = Tree
def printnode(x):
print "Element: %s, depth: %s" % (x, x.depth)
def test():
a = [5, 8, 8, 3, 7, 9]
t1 = Tree()
t1.fromList(a)
b = [12, 4, 56, 7, 34]
t2 = Tree()
t2.fromList(b)
print "tree1:"
print t1.asList()
print "tree2:"
print t2.asList()
print '-----'
print "Trees can be added:"
t3 = t1 + t2
print t3.asList()
print "..or alternatively merged:"
t1.merge(t2)
print t1.asList()
if __name__ == '__main__':
test()

View File

@@ -0,0 +1,480 @@
#------------------------------------------------------------------------------
# simple 3D vector / matrix class
#
# (c) 9.1999, Martin Strubel // onk@section5.de
# updated 4.2001
#
# This module consists of a rather low level command oriented
# and a more OO oriented part for 3D vector/matrix manipulation
#
# For documentation, please look at the EXAMPLE code below - execute by:
#
# > python vect.py
#
#
# permission to use in scientific and free programs granted
# In doubt, please contact author.
#
# history:
#
# 1.5: Euler/Rotation matrix support moved here
# 1.4: high level Vector/Matrix classes extended/improved
#
"""Vector and matrix math module
Version 1.5
by onk@section5.de
This is a lightweight 3D matrix and vector module, providing basic vector
and matrix math plus a more object oriented layer.
For examples, look at vect.test()
"""
VERSION = 1.5
TOLERANCE = 0.0000001
VectorType = 'Vector3'
MatrixType = 'Matrix3'
FloatType = type(1.0)
def dot(x, y):
"(x,y) - Returns the dot product of vector 'x' and 'y'"
return (x[0] * y[0] + x[1] * y[1] + x[2] * y[2])
def cross(x, y):
"(x,y) - Returns the cross product of vector 'x' and 'y'"
return (x[1] * y[2] - x[2] * y[1],
x[2] * y[0] - x[0] * y[2],
x[0] * y[1] - x[1] * y[0])
def matrix():
"Returns Unity matrix"
return ((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))
def matxvec(m, x):
"y = matxvec(m,x) - Returns product of Matrix 'm' and vector 'x'"
vx = m[0][0] * x[0] + m[1][0] * x[1] + m[2][0] * x[2]
vy = m[0][1] * x[0] + m[1][1] * x[1] + m[2][1] * x[2]
vz = m[0][2] * x[0] + m[1][2] * x[1] + m[2][2] * x[2]
return (vx, vy, vz)
def matfromnormal(z, y = (0.0, 1.0, 0.0)):
"""(z, y) - returns transformation matrix for local coordinate system
where 'z' = local z, with optional *up* axis 'y'"""
y = norm3(y)
x = cross(y, z)
y = cross(z, x)
return (x, y, z)
def matxmat(m, n):
"(m,n) - Returns matrix product of 'm' and 'n'"
return (matxvec(m, n[0]), matxvec(m, n[1]), matxvec(m, n[2]))
def len(x):
"(x) - Returns the length of vector 'x'"
import math
return math.sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2])
len3 = len # compatiblity reasons
def norm3(x):
"(x) - Returns the vector 'x' normed to 1.0"
import math
r = math.sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2])
return (x[0]/r, x[1]/r, x[2]/r)
def add3(x, y):
"(x,y) - Returns vector ('x' + 'y')"
return (x[0]+y[0], x[1]+y[1], x[2]+y[2])
def sub3(x, y):
"(x,y) - Returns vector ('x' - 'y')"
return ((x[0] - y[0]), (x[1] - y[1]), (x[2] - y[2]))
def dist3(x, y):
"(x,y) - Returns euclidian distance from Point 'x' to 'y'"
return len3(sub3(x, y))
def scale3(s, x):
"(s,x) - Returns the vector 'x' scaled by 's'"
return (s*x[0], s*x[1], s*x[2])
def scalemat(s,m):
"(s,m) - Returns the Matrix 'm' scaled by 's'"
return (scale3(s, m[0]), scale3(s, m[1]), scale3(s,m[2]))
def invmatdet(m):
"""n, det = invmat(m) - Inverts matrix without determinant correction.
Inverse matrix 'n' and Determinant 'det' are returned"""
# Matrix: (row vectors)
# 00 10 20
# 01 11 21
# 02 12 22
wk = [0.0, 0.0, 0.0]
t = m[1][1] * m[2][2] - m[1][2] * m[2][1]
wk[0] = t
det = t * m[0][0]
t = m[2][1] * m[0][2] - m[0][1] * m[2][2]
wk[1] = t
det = det + t * m[1][0]
t = m[0][1] * m[1][2] - m[1][1] * m[0][2]
wk[2] = t
det = det + t * m[2][0]
v0 = (wk[0], wk[1], wk[2])
t = m[2][0] * m[1][2] - m[1][0] * m[2][2]
wk[0] = t
det = det + t * m[0][1]
t = m[0][0] * m[2][2] - m[0][2] * m[2][0]
wk[1] = t
det = det + t * m[1][1]
t = m[1][0] * m[0][2] - m[0][0] * m[1][2]
wk[2] = t
det = det + t * m[2][1]
v1 = (wk[0], wk[1], wk[2])
t = m[1][0] * m[2][1] - m[1][1] * m[2][0]
wk[0] = t
det = det + t * m[0][2]
t = m[2][0] * m[0][1] - m[0][0] * m[2][1]
wk[1] = t
det = det + t * m[1][2]
t = m[0][0] * m[1][1] - m[1][0] * m[0][1]
wk[2] = t
det = det + t * m[2][2]
v2 = (wk[0], wk[1], wk[2])
# det = 3 * determinant
return ((v0,v1,v2), det/3.0)
def invmat(m):
"(m) - Inverts the 3x3 matrix 'm', result in 'n'"
n, det = invmatdet(m)
if det < 0.000001:
raise ZeroDivisionError, "minor rank matrix"
d = 1.0/det
return (scale3(d, n[0]),
scale3(d, n[1]),
scale3(d, n[2]))
def transmat(m):
# can be used to invert orthogonal rotation matrices
"(m) - Returns transposed matrix of 'm'"
return ((m[0][0], m[1][0], m[2][0]),
(m[0][1], m[1][1], m[2][1]),
(m[0][2], m[1][2], m[2][2]))
def coplanar(verts):
"checks whether list of 4 vertices is coplanar"
v1 = verts[0]
v2 = verts[1]
a = sub3(v2, v1)
v1 = verts[1]
v2 = verts[2]
b = sub3(v2, v1)
if dot(cross(a,b), sub3(verts[3] - verts[2])) < 0.0001:
return 1
return 0
################################################################################
# Matrix / Vector highlevel
# (and slower)
# TODO: include better type checks !
class Vector:
"""Vector class
This vector class provides vector operations as addition, multiplication, etc.
Usage::
v = Vector(x, y, z)
where 'x', 'y', 'z' are float values, representing coordinates.
Note: This datatype emulates a float triple."""
def __init__(self, x = 0.0, y = 0.0, z = 0.0):
# don't change these to lists, very ugly referencing details...
self.v = (x, y, z)
# ... can lead to same data being shared by several matrices..
# (unless you want this to happen)
self.type = VectorType
def __neg__(self):
return self.new(-self.v[0], -self.v[1], -self.v[2])
def __getitem__(self, i):
"Tuple emulation"
return self.v[i]
# def __setitem__(self, i, arg):
# self.v[i] = arg
def new(self, *args):
return Vector(args[0], args[1], args[2])
def __cmp__(self, v):
"Comparison only supports '=='"
if self[0] == v[0] and self[1] == v[1] and self[1] == v[1]:
return 0
return 1
def __add__(self, v):
"Addition of 'Vector' objects"
return self.new(self[0] + v[0],
self[1] + v[1],
self[2] + v[2])
def __sub__(self, v):
"Subtraction of 'Vector' objects"
return self.new(self[0] - v[0],
self[1] - v[1],
self[2] - v[2])
def __rmul__(self, s): # scaling by s
return self.new(s * self[0], s * self[1], s * self[2])
def __mul__(self, t): # dot product
"""Left multiplikation supports:
- scaling with a float value
- Multiplikation with *Matrix* object"""
if type(t) == FloatType:
return self.__rmul__(t)
elif t.type == MatrixType:
return Matrix(self[0] * t[0], self[1] * t[1], self[2] * t[2])
else:
return dot(self, t)
def cross(self, v):
"(Vector v) - returns the cross product of 'self' with 'v'"
return self.new(self[1] * v[2] - self[2] * v[1],
self[2] * v[0] - self[0] * v[2],
self[0] * v[1] - self[1] * v[0])
def __repr__(self):
return "(%.3f, %.3f, %.3f)" % (self.v[0], self.v[1], self.v[2])
class Matrix(Vector):
"""Matrix class
This class is representing a vector of Vectors.
Usage::
M = Matrix(v1, v2, v3)
where 'v'n are Vector class instances.
Note: This datatype emulates a 3x3 float array."""
def __init__(self, v1 = Vector(1.0, 0.0, 0.0),
v2 = Vector(0.0, 1.0, 0.0),
v3 = Vector(0.0, 0.0, 1.0)):
self.v = [v1, v2, v3]
self.type = MatrixType
def __setitem__(self, i, arg):
self.v[i] = arg
def new(self, *args):
return Matrix(args[0], args[1], args[2])
def __repr__(self):
return "Matrix:\n %s\n %s\n %s\n" % (self.v[0], self.v[1], self.v[2])
def __mul__(self, m):
"""Left multiplication supported with:
- Scalar (float)
- Matrix
- Vector: row_vector * matrix; same as self.transposed() * vector
"""
try:
if type(m) == FloatType:
return self.__rmul__(m)
if m.type == MatrixType:
M = matxmat(self, m)
return self.new(Vector(M[0][0], M[0][1], M[0][2]),
Vector(M[1][0], M[1][1], M[1][2]),
Vector(M[2][0], M[2][1], M[2][2]))
if m.type == VectorType:
v = matxvec(self, m)
return Vector(v[0], v[1], v[2])
except:
raise TypeError, "bad multiplicator type"
def inverse(self):
"""returns the matrix inverse"""
M = invmat(self)
return self.new(Vector(M[0][0], M[0][1], M[0][2]),
Vector(M[1][0], M[1][1], M[1][2]),
Vector(M[2][0], M[2][1], M[2][2]))
def transposed(self):
"returns the transposed matrix"
M = self
return self.new(Vector(M[0][0], M[1][0], M[2][0]),
Vector(M[1][0], M[1][1], M[2][1]),
Vector(M[2][0], M[1][2], M[2][2]))
def det(self):
"""returns the determinant"""
M, det = invmatdet(self)
return det
def tr(self):
"""returns trace (sum of diagonal elements) of matrix"""
return self.v[0][0] + self.v[1][1] + self.v[2][2]
def __rmul__(self, m):
"Right multiplication supported with scalar"
if type(m) == FloatType:
return self.new(m * self[0],
m * self[1],
m * self[2])
else:
raise TypeError, "bad multiplicator type"
def __div__(self, m):
"""Division supported with:
- Scalar
- Matrix: a / b equivalent b.inverse * a
"""
if type(m) == FloatType:
m = 1.0 /m
return m * self
elif m.type == MatrixType:
return self.inverse() * m
else:
raise TypeError, "bad multiplicator type"
def __rdiv__(self, m):
"Right division of matrix equivalent to multiplication with matrix.inverse()"
return m * self.inverse()
def asEuler(self):
"""returns Matrix 'self' as Eulers. Note that this not the only result, due to
the nature of sin() and cos(). The Matrix MUST be a rotation matrix, i.e. orthogonal and
normalized."""
from math import cos, sin, acos, asin, atan2, atan
mat = self.v
sy = mat[0][2]
# for numerical stability:
if sy > 1.0:
if sy > 1.0 + TOLERANCE:
raise RuntimeError, "FATAL: bad matrix given"
else:
sy = 1.0
phi_y = -asin(sy)
if abs(sy) > (1.0 - TOLERANCE):
# phi_x can be arbitrarely chosen, we set it = 0.0
phi_x = 0.0
sz = mat[1][0]
cz = mat[2][0]
phi_z = atan(sz/cz)
else:
cy = cos(phi_y)
cz = mat[0][0] / cy
sz = mat[0][1] / cy
phi_z = atan2(sz, cz)
sx = mat[1][2] / cy
cx = mat[2][2] / cy
phi_x = atan2(sx, cx)
return phi_x, phi_y, phi_z
Ex = Vector(1.0, 0.0, 0.0)
Ey = Vector(0.0, 1.0, 0.0)
Ez = Vector(0.0, 0.0, 1.0)
One = Matrix(Ex, Ey, Ez)
orig = (0.0, 0.0, 0.0)
def rotmatrix(phi_x, phi_y, phi_z, reverse = 0):
"""Creates rotation matrix from euler angles. Rotations are applied in order
X, then Y, then Z. If the reverse is desired, you have to transpose the matrix after."""
from math import sin, cos
s = sin(phi_z)
c = cos(phi_z)
matz = Matrix(Vector(c, s, 0.0), Vector(-s, c, 0.0), Ez)
s = sin(phi_y)
c = cos(phi_y)
maty = Matrix(Vector(c, 0.0, -s), Ey, Vector(s, 0.0, c))
s = sin(phi_x)
c = cos(phi_x)
matx = Matrix(Ex, Vector(0.0, c, s), Vector(0.0, -s, c))
return matz * maty * matx
def test():
"The module test"
print "********************"
print "VECTOR TEST"
print "********************"
a = Vector(1.1, 0.0, 0.0)
b = Vector(0.0, 2.0, 0.0)
print "vectors: a = %s, b = %s" % (a, b)
print "dot:", a * a
print "scalar:", 4.0 * a
print "scalar:", a * 4.0
print "cross:", a.cross(b)
print "add:", a + b
print "sub:", a - b
print "sub:", b - a
print
print "********************"
print "MATRIX TEST"
print "********************"
c = a.cross(b)
m = Matrix(a, b, c)
v = Vector(1.0, 2.0, 3.0)
E = One
print "Original", m
print "det", m.det()
print "add", m + m
print "scalar", 0.5 * m
print "sub", m - 0.5 * m
print "vec mul", v * m
print "mul vec", m * v
n = m * m
print "mul:", n
print "matrix div (mul inverse):", n / m
print "scal div (inverse):", 1.0 / m
print "mat * inverse", m * m.inverse()
print "mat * inverse (/-notation):", m * (1.0 / m)
print "div scal", m / 2.0
# matrices with rang < dimension have det = 0.0
m = Matrix(a, 2.0 * a, c)
print "minor rang", m
print "det:", m.det()
if __name__ == '__main__':
test()

View File

@@ -0,0 +1,142 @@
"""Vector tools
Various vector tools, basing on vect.py"""
from vect import *
EPSILON = 0.0001
def vecarea(v, w):
"Computes area of the span of vector 'v' and 'w' in 2D (not regarding z coordinate)"
return v[0]*w[1] - v[1]*w[0]
def intersect(a1, b1, a2, b2):
"""Computes 2D intersection of edges ('a1' -> 'b1') and ('a2' -> 'b2'),
returning normalized intersection parameter 's' of edge (a1 -> b1).
If 0.0 < 's' <= 1.0,
the two edges intersect at the point::
v = a1 + s * (b1 - a1)
"""
v = (b1[0] - a1[0], b1[1] - a1[1])
w = (b2[0] - a2[0], b2[1] - a2[1])
d0 = (a2[0] - a1[0])
d1 = (a2[1] - a1[1])
det = w[0]*v[1] - w[1]*v[0]
if det == 0: return 0.0
t = v[0]*d1 - v[1]*d0
s = w[0]*d1 - w[1]*d0
s /= det
t /= det
if s > 1.0 or s < 0.0: return 0.0
if t > 1.0 or t < 0.0: return 0.0
return s
def insidetri(a, b, c, x):
"Returns 1 if 'x' is inside the 2D triangle ('a' -> 'b' -> 'c'), 0 otherwise"
v1 = norm3(sub3(b, a))
v2 = norm3(sub3(c, a))
v3 = norm3(sub3(x, a))
a1 = (vecarea(v1, v2))
a2 = (vecarea(v1, v3))
lo = min(0.0, a1)
hi = max(0.0, a1)
if a2 < lo or a2 > hi: return 0
v2 = norm3(sub3(b, c))
v3 = norm3(sub3(b, x))
a1 = (vecarea(v1, v2))
a2 = (vecarea(v1, v3))
lo = min(0.0, a1)
hi = max(0.0, a1)
if a2 < lo or a2 > hi: return 0
return 1
def plane_fromface(v1, v2, v3):
"Returns plane (normal, point) from 3 vertices 'v1', 'v2', 'v3'"
v = sub3(v2, v1)
w = sub3(v3, v1)
n = norm3(cross(v, w))
return n, v1
def inside_halfspace(vec, plane):
"Returns 1 if point 'vec' inside halfspace defined by 'plane'"
n, t = plane
n = norm3(n)
v = sub3(vec, t)
if dot(n, v) < 0.0:
return 1
else:
return 0
def half_space(vec, plane, tol = EPSILON):
"""Determine whether point 'vec' is inside (return value -1), outside (+1)
, or lying in the plane 'plane' (return 0) of a numerical thickness
'tol' = 'EPSILON' (default)."""
n, t = plane
v = sub3(vec, t)
fac = len3(n)
d = dot(n, v)
if d < -fac * tol:
return -1
elif d > fac * tol:
return 1
else:
return 0
def plane_edge_intersect(plane, edge):
"""Returns normalized factor 's' of the intersection of 'edge' with 'plane'.
The point of intersection on the plane is::
p = edge[0] + s * (edge[1] - edge[0])
"""
n, t = plane # normal, translation
mat = matfromnormal(n)
mat = transmat(mat) # inverse
v = matxvec(mat, sub3(edge[0], t)) #transformed edge points
w = matxvec(mat, sub3(edge[1], t))
w = sub3(w, v)
if w[2] != 0.0:
s = -v[2] / w[2]
return s
else:
return None
def insidecube(v):
"Returns 1 if point 'v' inside normalized cube, 0 otherwise"
if v[0] > 1.0 or v[0] < 0.0:
return 0
if v[1] > 1.0 or v[1] < 0.0:
return 0
if v[2] > 1.0 or v[2] < 0.0:
return 0
return 1
def flatproject(verts, up):
"""Projects a 3D set (list of vertices) 'verts' into a 2D set according to
an 'up'-vector"""
z, t = plane_fromface(verts[0], verts[1], verts[2])
y = norm3(up)
x = cross(y, z)
uvs = []
for v in verts:
w = (v[0] - t[0], v[1] - t[1], v[2] - t[2])
# this is the transposed 2x2 matrix * the vertex vector
uv = (dot(x, w), dot(y,w)) # do projection
uvs.append(uv)
return uvs

View File

@@ -0,0 +1 @@
"""The VRML import module"""

View File

@@ -0,0 +1,974 @@
from scenegraph import Prototype, NULL, sceneGraph, IS, Script, ExternalPrototype, ROUTE
PROTO = Prototype
EXTERNPROTO = ExternalPrototype
Anchor = Prototype( "Anchor",
{
'bboxSize':('bboxSize', 'SFVec3f', 0),
'children':('children', 'MFNode', 1),
'parameter':('parameter', 'MFString', 1),
'url':('url', 'MFString', 1),
'description':('description', 'SFString', 1),
'bboxCenter':('bboxCenter', 'SFVec3f', 0),
},
{
'bboxSize':[-1.0, -1.0, -1.0],
'children':[],
'parameter':[],
'url':[],
'description':'',
'bboxCenter':[0.0, 0.0, 0.0],
},
{
'addChildren':('addChildren', 'MFNode', 0),
'removeChildren':('removeChildren', 'MFNode', 0),
},
)
Appearance = Prototype( "Appearance",
{
'material':('material', 'SFNode', 1),
'texture':('texture', 'SFNode', 1),
'textureTransform':('textureTransform', 'SFNode', 1),
},
{
'material':NULL,
'texture':NULL,
'textureTransform':NULL,
},
{
},
)
AudioClip = Prototype( "AudioClip",
{
'pitch':('pitch', 'SFFloat', 1),
'loop':('loop', 'SFBool', 1),
'description':('description', 'SFString', 1),
'stopTime':('stopTime', 'SFTime', 1),
'startTime':('startTime', 'SFTime', 1),
'url':('url', 'MFString', 1),
},
{
'pitch':1.0,
'loop':0,
'description':'',
'stopTime':0.0,
'startTime':0.0,
'url':[],
},
{
'isActive':('isActive', 'SFBool', 1),
'duration_changed':('duration_changed', 'SFTime', 1),
},
)
Background = Prototype( "Background",
{
'groundAngle':('groundAngle', 'MFFloat', 1),
'skyAngle':('skyAngle', 'MFFloat', 1),
'frontUrl':('frontUrl', 'MFString', 1),
'bottomUrl':('bottomUrl', 'MFString', 1),
'groundColor':('groundColor', 'MFColor', 1),
'backUrl':('backUrl', 'MFString', 1),
'skyColor':('skyColor', 'MFColor', 1),
'topUrl':('topUrl', 'MFString', 1),
'rightUrl':('rightUrl', 'MFString', 1),
'leftUrl':('leftUrl', 'MFString', 1),
},
{
'groundAngle':[],
'skyAngle':[],
'frontUrl':[],
'bottomUrl':[],
'groundColor':[],
'backUrl':[],
'skyColor':[[0.0, 0.0, 0.0]],
'topUrl':[],
'rightUrl':[],
'leftUrl':[],
},
{
'isBound':('isBound', 'SFBool', 1),
'set_bind':('set_bind', 'SFBool', 0),
},
)
Billboard = Prototype( "Billboard",
{
'bboxCenter':('bboxCenter', 'SFVec3f', 0),
'bboxSize':('bboxSize', 'SFVec3f', 0),
'children':('children', 'MFNode', 1),
'axisOfRotation':('axisOfRotation', 'SFVec3f', 1),
},
{
'bboxCenter':[0.0, 0.0, 0.0],
'bboxSize':[-1.0, -1.0, -1.0],
'children':[],
'axisOfRotation':[0.0, 1.0, 0.0],
},
{
'addChildren':('addChildren', 'MFNode', 0),
'removeChildren':('removeChildren', 'MFNode', 0),
},
)
Box = Prototype( "Box",
{
'size':('size', 'SFVec3f', 0),
},
{
'size':[2.0, 2.0, 2.0],
},
{
},
)
Collision = Prototype( "Collision",
{
'bboxCenter':('bboxCenter', 'SFVec3f', 0),
'bboxSize':('bboxSize', 'SFVec3f', 0),
'children':('children', 'MFNode', 1),
'collide':('collide', 'SFBool', 1),
'proxy':('proxy', 'SFNode', 0),
},
{
'bboxCenter':[0.0, 0.0, 0.0],
'bboxSize':[-1.0, -1.0, -1.0],
'children':[],
'collide':1,
'proxy':NULL,
},
{
'addChildren':('addChildren', 'MFNode', 0),
'removeChildren':('removeChildren', 'MFNode', 0),
'collideTime':('collideTime', 'SFTime', 1),
},
)
Color = Prototype( "Color",
{
'color':('color', 'MFColor', 1),
},
{
'color':[],
},
{
},
)
ColorInterpolator = Prototype( "ColorInterpolator",
{
'key':('key', 'MFFloat', 1),
'keyValue':('keyValue', 'MFColor', 1),
},
{
'key':[],
'keyValue':[],
},
{
'value_changed':('value_changed', 'SFColor', 1),
'set_fraction':('set_fraction', 'SFFloat', 0),
},
)
Cone = Prototype( "Cone",
{
'bottomRadius':('bottomRadius', 'SFFloat', 0),
'side':('side', 'SFBool', 0),
'bottom':('bottom', 'SFBool', 0),
'height':('height', 'SFFloat', 0),
},
{
'bottomRadius':1.0,
'side':1,
'bottom':1,
'height':2.0,
},
{
},
)
Coordinate = Prototype( "Coordinate",
{
'point':('point', 'MFVec3f', 1),
},
{
'point':[],
},
{
},
)
CoordinateInterpolator = Prototype( "CoordinateInterpolator",
{
'key':('key', 'MFFloat', 1),
'keyValue':('keyValue', 'MFVec3f', 1),
},
{
'key':[],
'keyValue':[],
},
{
'value_changed':('value_changed', 'MFVec3f', 1),
'set_fraction':('set_fraction', 'SFFloat', 0),
},
)
Cylinder = Prototype( "Cylinder",
{
'bottom':('bottom', 'SFBool', 0),
'side':('side', 'SFBool', 0),
'radius':('radius', 'SFFloat', 0),
'top':('top', 'SFBool', 0),
'height':('height', 'SFFloat', 0),
},
{
'bottom':1,
'side':1,
'radius':1.0,
'top':1,
'height':2.0,
},
{
},
)
CylinderSensor = Prototype( "CylinderSensor",
{
'maxAngle':('maxAngle', 'SFFloat', 1),
'autoOffset':('autoOffset', 'SFBool', 1),
'minAngle':('minAngle', 'SFFloat', 1),
'enabled':('enabled', 'SFBool', 1),
'offset':('offset', 'SFFloat', 1),
'diskAngle':('diskAngle', 'SFFloat', 1),
},
{
'maxAngle':-1.0,
'autoOffset':1,
'minAngle':0.0,
'enabled':1,
'offset':0.0,
'diskAngle':0.262,
},
{
'rotation_changed':('rotation_changed', 'SFRotation', 1),
'isActive':('isActive', 'SFBool', 1),
'trackPoint_changed':('trackPoint_changed', 'SFVec3f', 1),
},
)
DirectionalLight = Prototype( "DirectionalLight",
{
'color':('color', 'SFColor', 1),
'ambientIntensity':('ambientIntensity', 'SFFloat', 1),
'intensity':('intensity', 'SFFloat', 1),
'on':('on', 'SFBool', 1),
'direction':('direction', 'SFVec3f', 1),
},
{
'color':[1.0, 1.0, 1.0],
'ambientIntensity':0.0,
'intensity':1.0,
'on':1,
'direction':[0.0, 0.0, -1.0],
},
{
},
)
ElevationGrid = Prototype( "ElevationGrid",
{
'xSpacing':('xSpacing', 'SFFloat', 0),
'zSpacing':('zSpacing', 'SFFloat', 0),
'xDimension':('xDimension', 'SFInt32', 0),
'colorPerVertex':('colorPerVertex', 'SFBool', 0),
'height':('height', 'MFFloat', 0),
'texCoord':('texCoord', 'SFNode', 1),
'normalPerVertex':('normalPerVertex', 'SFBool', 0),
'ccw':('ccw', 'SFBool', 0),
'color':('color', 'SFNode', 1),
'normal':('normal', 'SFNode', 1),
'creaseAngle':('creaseAngle', 'SFFloat', 0),
'solid':('solid', 'SFBool', 0),
'zDimension':('zDimension', 'SFInt32', 0),
},
{
'xSpacing':0.0,
'zSpacing':0.0,
'xDimension':0,
'colorPerVertex':1,
'height':[],
'texCoord':NULL,
'normalPerVertex':1,
'ccw':1,
'color':NULL,
'normal':NULL,
'creaseAngle':0.0,
'solid':1,
'zDimension':0,
},
{
'set_height':('set_height', 'MFFloat', 0),
},
)
Extrusion = Prototype( "Extrusion",
{
'endCap':('endCap', 'SFBool', 0),
'scale':('scale', 'MFVec2f', 0),
'ccw':('ccw', 'SFBool', 0),
'crossSection':('crossSection', 'MFVec2f', 0),
'solid':('solid', 'SFBool', 0),
'convex':('convex', 'SFBool', 0),
'creaseAngle':('creaseAngle', 'SFFloat', 0),
'spine':('spine', 'MFVec3f', 0),
'beginCap':('beginCap', 'SFBool', 0),
'orientation':('orientation', 'MFRotation', 0),
},
{
'endCap':1,
'scale':[[1.0, 1.0]],
'ccw':1,
'crossSection':[[1.0, 1.0], [1.0, -1.0], [-1.0, -1.0], [-1.0, 1.0], [1.0, 1.0]],
'solid':1,
'convex':1,
'creaseAngle':0.0,
'spine':[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
'beginCap':1,
'orientation':[[0.0, 0.0, 1.0, 0.0]],
},
{
'set_scale':('set_scale', 'MFVec2f', 0),
'set_spine':('set_spine', 'MFVec3f', 0),
'set_orientation':('set_orientation', 'MFRotation', 0),
'set_crossSection':('set_crossSection', 'MFVec2f', 0),
},
)
Fog = Prototype( "Fog",
{
'fogType':('fogType', 'SFString', 1),
'color':('color', 'SFColor', 1),
'visibilityRange':('visibilityRange', 'SFFloat', 1),
},
{
'fogType':'LINEAR',
'color':[1.0, 1.0, 1.0],
'visibilityRange':0.0,
},
{
'isBound':('isBound', 'SFBool', 1),
'set_bind':('set_bind', 'SFBool', 0),
},
)
FontStyle = Prototype( "FontStyle",
{
'justify':('justify', 'MFString', 0),
'leftToRight':('leftToRight', 'SFBool', 0),
'spacing':('spacing', 'SFFloat', 0),
'horizontal':('horizontal', 'SFBool', 0),
'language':('language', 'SFString', 0),
'topToBottom':('topToBottom', 'SFBool', 0),
'size':('size', 'SFFloat', 0),
'style':('style', 'SFString', 0),
'family':('family', 'SFString', 0),
},
{
'justify':['BEGIN'],
'leftToRight':1,
'spacing':1.0,
'horizontal':1,
'language':'',
'topToBottom':1,
'size':1.0,
'style':'PLAIN',
'family':'SERIF',
},
{
},
)
Group = Prototype( "Group",
{
'bboxSize':('bboxSize', 'SFVec3f', 0),
'children':('children', 'MFNode', 1),
'bboxCenter':('bboxCenter', 'SFVec3f', 0),
},
{
'bboxSize':[-1.0, -1.0, -1.0],
'children':[],
'bboxCenter':[0.0, 0.0, 0.0],
},
{
'addChildren':('addChildren', 'MFNode', 0),
'removeChildren':('removeChildren', 'MFNode', 0),
},
)
ImageTexture = Prototype( "ImageTexture",
{
'repeatS':('repeatS', 'SFBool', 0),
'url':('url', 'MFString', 1),
'repeatT':('repeatT', 'SFBool', 0),
},
{
'repeatS':1,
'url':[],
'repeatT':1,
},
{
},
)
IndexedFaceSet = Prototype( "IndexedFaceSet",
{
'texCoordIndex':('texCoordIndex', 'MFInt32', 0),
'normalIndex':('normalIndex', 'MFInt32', 0),
'coordIndex':('coordIndex', 'MFInt32', 0),
'convex':('convex', 'SFBool', 0),
'texCoord':('texCoord', 'SFNode', 1),
'normalPerVertex':('normalPerVertex', 'SFBool', 0),
'coord':('coord', 'SFNode', 1),
'ccw':('ccw', 'SFBool', 0),
'color':('color', 'SFNode', 1),
'normal':('normal', 'SFNode', 1),
'creaseAngle':('creaseAngle', 'SFFloat', 0),
'solid':('solid', 'SFBool', 0),
'colorPerVertex':('colorPerVertex', 'SFBool', 0),
'colorIndex':('colorIndex', 'MFInt32', 0),
},
{
'texCoordIndex':[],
'normalIndex':[],
'coordIndex':[],
'convex':1,
'texCoord':NULL,
'normalPerVertex':1,
'coord':NULL,
'ccw':1,
'color':NULL,
'normal':NULL,
'creaseAngle':0.0,
'solid':1,
'colorPerVertex':1,
'colorIndex':[],
},
{
'set_normalIndex':('set_normalIndex', 'MFInt32', 0),
'set_colorIndex':('set_colorIndex', 'MFInt32', 0),
'set_texCoordIndex':('set_texCoordIndex', 'MFInt32', 0),
'set_coordIndex':('set_coordIndex', 'MFInt32', 0),
},
)
IndexedLineSet = Prototype( "IndexedLineSet",
{
'coordIndex':('coordIndex', 'MFInt32', 0),
'coord':('coord', 'SFNode', 1),
'colorIndex':('colorIndex', 'MFInt32', 0),
'colorPerVertex':('colorPerVertex', 'SFBool', 0),
'color':('color', 'SFNode', 1),
},
{
'coordIndex':[],
'coord':NULL,
'colorIndex':[],
'colorPerVertex':1,
'color':NULL,
},
{
'set_colorIndex':('set_colorIndex', 'MFInt32', 0),
'set_coordIndex':('set_coordIndex', 'MFInt32', 0),
},
)
Inline = Prototype( "Inline",
{
'url':('url', 'MFString', 1),
'bboxSize':('bboxSize', 'SFVec3f', 0),
'bboxCenter':('bboxCenter', 'SFVec3f', 0),
},
{
'url':[],
'bboxSize':[-1.0, -1.0, -1.0],
'bboxCenter':[0.0, 0.0, 0.0],
},
{
},
)
LOD = Prototype( "LOD",
{
'level':('level', 'MFNode', 1),
'range':('range', 'MFFloat', 0),
'center':('center', 'SFVec3f', 0),
},
{
'level':[],
'range':[],
'center':[0.0, 0.0, 0.0],
},
{
},
)
Material = Prototype( "Material",
{
'emissiveColor':('emissiveColor', 'SFColor', 1),
'transparency':('transparency', 'SFFloat', 1),
'shininess':('shininess', 'SFFloat', 1),
'diffuseColor':('diffuseColor', 'SFColor', 1),
'ambientIntensity':('ambientIntensity', 'SFFloat', 1),
'specularColor':('specularColor', 'SFColor', 1),
},
{
'emissiveColor':[0.0, 0.0, 0.0],
'transparency':0.0,
'shininess':0.2,
'diffuseColor':[0.8, 0.8, 0.8],
'ambientIntensity':0.2,
'specularColor':[0.0, 0.0, 0.0],
},
{
},
)
MovieTexture = Prototype( "MovieTexture",
{
'loop':('loop', 'SFBool', 1),
'speed':('speed', 'SFFloat', 1),
'repeatT':('repeatT', 'SFBool', 0),
'repeatS':('repeatS', 'SFBool', 0),
'url':('url', 'MFString', 1),
'startTime':('startTime', 'SFTime', 1),
'stopTime':('stopTime', 'SFTime', 1),
},
{
'loop':0,
'speed':1.0,
'repeatT':1,
'repeatS':1,
'url':[],
'startTime':0.0,
'stopTime':0.0,
},
{
'isActive':('isActive', 'SFBool', 1),
'duration_changed':('duration_changed', 'SFFloat', 1),
},
)
NavigationInfo = Prototype( "NavigationInfo",
{
'avatarSize':('avatarSize', 'MFFloat', 1),
'speed':('speed', 'SFFloat', 1),
'headlight':('headlight', 'SFBool', 1),
'visibilityLimit':('visibilityLimit', 'SFFloat', 1),
'type':('type', 'MFString', 1),
},
{
'avatarSize':[0.25, 1.6, 0.75],
'speed':1.0,
'headlight':1,
'visibilityLimit':0.0,
'type':['WALK'],
},
{
'isBound':('isBound', 'SFBool', 1),
'set_bind':('set_bind', 'SFBool', 0),
},
)
Normal = Prototype( "Normal",
{
'vector':('vector', 'MFVec3f', 1),
},
{
'vector':[],
},
{
},
)
NormalInterpolator = Prototype( "NormalInterpolator",
{
'key':('key', 'MFFloat', 1),
'keyValue':('keyValue', 'MFVec3f', 1),
},
{
'key':[],
'keyValue':[],
},
{
'value_changed':('value_changed', 'MFVec3f', 1),
'set_fraction':('set_fraction', 'SFFloat', 0),
},
)
OrientationInterpolator = Prototype( "OrientationInterpolator",
{
'key':('key', 'MFFloat', 1),
'keyValue':('keyValue', 'MFRotation', 1),
},
{
'key':[],
'keyValue':[],
},
{
'value_changed':('value_changed', 'SFRotation', 1),
'set_fraction':('set_fraction', 'SFFloat', 0),
},
)
PixelTexture = Prototype( "PixelTexture",
{
'repeatS':('repeatS', 'SFBool', 0),
'image':('image', 'SFImage', 1),
'repeatT':('repeatT', 'SFBool', 0),
},
{
'repeatS':1,
'image':[0, 0, 0],
'repeatT':1,
},
{
},
)
PlaneSensor = Prototype( "PlaneSensor",
{
'offset':('offset', 'SFVec3f', 1),
'autoOffset':('autoOffset', 'SFBool', 1),
'minPosition':('minPosition', 'SFVec2f', 1),
'enabled':('enabled', 'SFBool', 1),
'maxPosition':('maxPosition', 'SFVec2f', 1),
},
{
'offset':[0.0, 0.0, 0.0],
'autoOffset':1,
'minPosition':[0.0, 0.0],
'enabled':1,
'maxPosition':[-1.0, -1.0],
},
{
'translation_changed':('translation_changed', 'SFVec3f', 1),
'isActive':('isActive', 'SFBool', 1),
'trackPoint_changed':('trackPoint_changed', 'SFVec3f', 1),
},
)
PointLight = Prototype( "PointLight",
{
'ambientIntensity':('ambientIntensity', 'SFFloat', 1),
'color':('color', 'SFColor', 1),
'location':('location', 'SFVec3f', 1),
'radius':('radius', 'SFFloat', 1),
'attenuation':('attenuation', 'SFVec3f', 1),
'intensity':('intensity', 'SFFloat', 1),
'on':('on', 'SFBool', 1),
},
{
'ambientIntensity':0.0,
'color':[1.0, 1.0, 1.0],
'location':[0.0, 0.0, 0.0],
'radius':100.0,
'attenuation':[1.0, 0.0, 0.0],
'intensity':1.0,
'on':1,
},
{
},
)
PointSet = Prototype( "PointSet",
{
'coord':('coord', 'SFNode', 1),
'color':('color', 'SFNode', 1),
},
{
'coord':NULL,
'color':NULL,
},
{
},
)
PositionInterpolator = Prototype( "PositionInterpolator",
{
'key':('key', 'MFFloat', 1),
'keyValue':('keyValue', 'MFVec3f', 1),
},
{
'key':[],
'keyValue':[],
},
{
'value_changed':('value_changed', 'SFVec3f', 1),
'set_fraction':('set_fraction', 'SFFloat', 0),
},
)
ProximitySensor = Prototype( "ProximitySensor",
{
'size':('size', 'SFVec3f', 1),
'center':('center', 'SFVec3f', 1),
'enabled':('enabled', 'SFBool', 1),
},
{
'size':[0.0, 0.0, 0.0],
'center':[0.0, 0.0, 0.0],
'enabled':1,
},
{
'enterTime':('enterTime', 'SFTime', 1),
'isActive':('isActive', 'SFBool', 1),
'orientation_changed':('orientation_changed', 'SFRotation', 1),
'exitTime':('exitTime', 'SFTime', 1),
'position_changed':('position_changed', 'SFVec3f', 1),
},
)
ScalarInterpolator = Prototype( "ScalarInterpolator",
{
'key':('key', 'MFFloat', 1),
'keyValue':('keyValue', 'MFFloat', 1),
},
{
'key':[],
'keyValue':[],
},
{
'value_changed':('value_changed', 'SFFloat', 1),
'set_fraction':('set_fraction', 'SFFloat', 0),
},
)
Shape = Prototype( "Shape",
{
'appearance':('appearance', 'SFNode', 1),
'geometry':('geometry', 'SFNode', 1),
},
{
'appearance':NULL,
'geometry':NULL,
},
{
},
)
Sound = Prototype( "Sound",
{
'spatialize':('spatialize', 'SFBool', 0),
'maxFront':('maxFront', 'SFFloat', 1),
'minBack':('minBack', 'SFFloat', 1),
'maxBack':('maxBack', 'SFFloat', 1),
'minFront':('minFront', 'SFFloat', 1),
'location':('location', 'SFVec3f', 1),
'intensity':('intensity', 'SFFloat', 1),
'direction':('direction', 'SFVec3f', 1),
'source':('source', 'SFNode', 1),
'priority':('priority', 'SFFloat', 1),
},
{
'spatialize':1,
'maxFront':10.0,
'minBack':1.0,
'maxBack':10.0,
'minFront':1.0,
'location':[0.0, 0.0, 0.0],
'intensity':1.0,
'direction':[0.0, 0.0, 1.0],
'source':NULL,
'priority':0.0,
},
{
},
)
Sphere = Prototype( "Sphere",
{
'radius':('radius', 'SFFloat', 0),
},
{
'radius':1.0,
},
{
},
)
SphereSensor = Prototype( "SphereSensor",
{
'offset':('offset', 'SFRotation', 1),
'autoOffset':('autoOffset', 'SFBool', 1),
'enabled':('enabled', 'SFBool', 1),
},
{
'offset':[0.0, 1.0, 0.0, 0.0],
'autoOffset':1,
'enabled':1,
},
{
'rotation_changed':('rotation_changed', 'SFRotation', 1),
'isActive':('isActive', 'SFBool', 1),
'trackPoint_changed':('trackPoint_changed', 'SFVec3f', 1),
},
)
SpotLight = Prototype( "SpotLight",
{
'attenuation':('attenuation', 'SFVec3f', 1),
'ambientIntensity':('ambientIntensity', 'SFFloat', 1),
'cutOffAngle':('cutOffAngle', 'SFFloat', 1),
'direction':('direction', 'SFVec3f', 1),
'color':('color', 'SFColor', 1),
'location':('location', 'SFVec3f', 1),
'radius':('radius', 'SFFloat', 1),
'intensity':('intensity', 'SFFloat', 1),
'beamWidth':('beamWidth', 'SFFloat', 1),
'on':('on', 'SFBool', 1),
},
{
'attenuation':[1.0, 0.0, 0.0],
'ambientIntensity':0.0,
'cutOffAngle':0.785398,
'direction':[0.0, 0.0, -1.0],
'color':[1.0, 1.0, 1.0],
'location':[0.0, 0.0, 0.0],
'radius':100.0,
'intensity':1.0,
'beamWidth':1.570796,
'on':1,
},
{
},
)
Switch = Prototype( "Switch",
{
'choice':('choice', 'MFNode', 1),
'whichChoice':('whichChoice', 'SFInt32', 1),
},
{
'choice':[],
'whichChoice':-1,
},
{
},
)
Text = Prototype( "Text",
{
'maxExtent':('maxExtent', 'SFFloat', 1),
'string':('string', 'MFString', 1),
'fontStyle':('fontStyle', 'SFNode', 1),
'length':('length', 'MFFloat', 1),
},
{
'maxExtent':0.0,
'string':[],
'fontStyle':NULL,
'length':[],
},
{
},
)
TextureCoordinate = Prototype( "TextureCoordinate",
{
'point':('point', 'MFVec2f', 1),
},
{
'point':[],
},
{
},
)
TextureTransform = Prototype( "TextureTransform",
{
'center':('center', 'SFVec2f', 1),
'scale':('scale', 'SFVec2f', 1),
'rotation':('rotation', 'SFFloat', 1),
'translation':('translation', 'SFVec2f', 1),
},
{
'center':[0.0, 0.0],
'scale':[1.0, 1.0],
'rotation':0.0,
'translation':[0.0, 0.0],
},
{
},
)
TimeSensor = Prototype( "TimeSensor",
{
'loop':('loop', 'SFBool', 1),
'cycleInterval':('cycleInterval', 'SFTime', 1),
'enabled':('enabled', 'SFBool', 1),
'stopTime':('stopTime', 'SFTime', 1),
'startTime':('startTime', 'SFTime', 1),
},
{
'loop':0,
'cycleInterval':1.0,
'enabled':1,
'stopTime':0.0,
'startTime':0.0,
},
{
'fraction_changed':('fraction_changed', 'SFFloat', 1),
'isActive':('isActive', 'SFBool', 1),
'time':('time', 'SFTime', 1),
'cycleTime':('cycleTime', 'SFTime', 1),
},
)
TouchSensor = Prototype( "TouchSensor",
{
'enabled':('enabled', 'SFBool', 1),
},
{
'enabled':1,
},
{
'hitNormal_changed':('hitNormal_changed', 'SFVec3f', 1),
'hitPoint_changed':('hitPoint_changed', 'SFVec3f', 1),
'touchTime':('touchTime', 'SFTime', 1),
'hitTexCoord_changed':('hitTexCoord_changed', 'SFVec2f', 1),
'isActive':('isActive', 'SFBool', 1),
'isOver':('isOver', 'SFBool', 1),
},
)
Transform = Prototype( "Transform",
{
'bboxSize':('bboxSize', 'SFVec3f', 0),
'children':('children', 'MFNode', 1),
'scaleOrientation':('scaleOrientation', 'SFRotation', 1),
'rotation':('rotation', 'SFRotation', 1),
'translation':('translation', 'SFVec3f', 1),
'bboxCenter':('bboxCenter', 'SFVec3f', 0),
'center':('center', 'SFVec3f', 1),
'scale':('scale', 'SFVec3f', 1),
},
{
'bboxSize':[-1.0, -1.0, -1.0],
'children':[],
'scaleOrientation':[0.0, 0.0, 1.0, 0.0],
'rotation':[0.0, 0.0, 1.0, 0.0],
'translation':[0.0, 0.0, 0.0],
'bboxCenter':[0.0, 0.0, 0.0],
'center':[0.0, 0.0, 0.0],
'scale':[1.0, 1.0, 1.0],
},
{
'addChildren':('addChildren', 'MFNode', 0),
'removeChildren':('removeChildren', 'MFNode', 0),
},
)
Viewpoint = Prototype( "Viewpoint",
{
'jump':('jump', 'SFBool', 1),
'orientation':('orientation', 'SFRotation', 1),
'fieldOfView':('fieldOfView', 'SFFloat', 1),
'position':('position', 'SFVec3f', 1),
'description':('description', 'SFString', 0),
},
{
'jump':1,
'orientation':[0.0, 0.0, 1.0, 0.0],
'fieldOfView':0.785398,
'position':[0.0, 0.0, 10.0],
'description':'',
},
{
'isBound':('isBound', 'SFBool', 1),
'set_bind':('set_bind', 'SFBool', 0),
'bindTime':('bindTime', 'SFTime', 1),
},
)
VisibilitySensor = Prototype( "VisibilitySensor",
{
'size':('size', 'SFVec3f', 1),
'center':('center', 'SFVec3f', 1),
'enabled':('enabled', 'SFBool', 1),
},
{
'size':[0.0, 0.0, 0.0],
'center':[0.0, 0.0, 0.0],
'enabled':1,
},
{
'exitTime':('exitTime', 'SFTime', 1),
'isActive':('isActive', 'SFBool', 1),
'enterTime':('enterTime', 'SFTime', 1),
},
)
WorldInfo = Prototype( "WorldInfo",
{
'title':('title', 'SFString', 0),
'info':('info', 'MFString', 0),
},
{
'title':'',
'info':[],
},
{
},
)

View File

@@ -0,0 +1,310 @@
'''
Field coercian routines.
To replace the field coercian routines, you must edit
basenodes.py and node.py to import some other coercian
routines. Basenodes.py is for use by the parser, node
is used by each node as it checks the validity of its
attributes.
'''
import types, sys, string
from utils import typeclasses, collapse
class FieldCoercian:
'''
A Field Coercian class allows for creating new behaviours
when dealing with the conversion of fields to-and-from
particular field types. This allows the programmer to
use alternate representations of fields (such as matrix arrays)
'''
def SFString( self, someobj, targetType=types.StringType, targetName='SFString', convertfunc=str ):
'''
Allowable types:
simple string -> unchanged
instance ( an IS ) -> unchanged
sequence of length == 1 where first element is a string -> returns first element
sequence of length > 1 where all elements are strings -> returns string.join( someobj, '')
'''
t = type(someobj)
if t is targetType:
return someobj
if t in typeclasses.SequenceTypes:
if len( someobj) == 1 and type( someobj[0] ) is targetType:
return someobj[0] #
elif len(someobj) > 1:
try:
return string.join( someobj, '')
except:
pass # is not a sequence of strings...
### if we get here, then an incorrect value was passed
raise ValueError, """Attempted to set value for an %s field which is not compatible: %s"""%( targetName, `someobj` )
def MFString( self, someobj, targetType=types.StringType, targetName='SFString', convertfunc=str ):
'''
Allowable Types:
simple string -> wrapped in a list
instance (an IS ) -> unchanged
sequence of strings (of any length) -> equivalent list returned
'''
t = type(someobj)
if t is targetType: # a bare string...
return [someobj]
elif t in typeclasses.SequenceTypes: # is a sequence
if not filter( lambda x, t=targetType: x is not t, map( type, someobj) ): # are all strings...
if t is not types.ListType:
return list( someobj )
else:
return someobj
### if we get here, then an incorrect value was passed
raise ValueError, """Attempted to set value for an %s field which is not compatible: %s"""%( targetName, `someobj` )
def SFBool( self, someobj, targetType=types.IntType, targetName='SFBool', convertfunc=int):
'''
Allowable Types:
instance (an IS) -> unchanged
Any object which is testable for truth/falsehood -> 1 or 0 respectively
SFBool should always succeed
'''
if (type(someobj) in typeclasses.SequenceTypes):
try:
if hasattr( someobj[0], '__gi__'):
return someobj[0]
else:
someobj = someobj[0]
except IndexError: # is a null MFNode
pass
if someobj:
return 1
else:
return 0
def SFNode( self, someobj, targetType=types.InstanceType, targetName='SFNode', convertfunc=None):
'''
Allowable Types:
instance of a Node -> unchanged
instance (an IS or USE) -> unchanged
sequence of length == 1 where first element is as above -> return first element
'''
if hasattr( someobj, '__gi__'): # about the only test I have without requiring that elements inherit from Node
return someobj
elif (type(someobj) in typeclasses.SequenceTypes):
try:
if hasattr( someobj[0], '__gi__'):
return someobj[0]
except IndexError: # is a null MFNode
pass
raise ValueError, """Attempted to set value for an %s field which is not compatible: %s"""%( targetName, `someobj` )
def MFNode( self, someobj, targetType=types.InstanceType, targetName='MFNode', convertfunc=None):
'''
Allowable Types:
instance (an IS) -> unchanged
instance of a Node -> wrapped with a list
sequence where all elements are nodes -> returned as list of same
'''
if hasattr( someobj, '__gi__') and someobj.__gi__ != "IS":
# is this a bare SFNode? wrap with a list and return
return [someobj]
elif hasattr( someobj, "__gi__"): # is this an IS node
return someobj
elif type(someobj) in typeclasses.SequenceTypes:
try:
map( getattr, someobj, ['__gi__']*len(someobj) )
# is this an IS node wrapped in a list?
if len(someobj) == 1 and someobj[0].__gi__ == "IS":
return someobj[0]
# okay, assume is really nodes...
if type(someobj) is types.ListType:
return someobj
else:
return list(someobj)
except AttributeError: # something isn't a node
pass
raise ValueError, """Attempted to set value for an %s field which is not compatible: %s"""%( targetName, `someobj` )
def SFNumber( self, someobj, targetType, targetName, convertfunc=int ):
'''
Allowable Types:
bare number -> numerically coerced to correct type
instance ( an IS ) -> unchanged
sequence of length == 1 where first element is a string -> returns first element
'''
t = type(someobj)
if t is targetType or t is types.InstanceType:
return someobj
elif t in typeclasses.NumericTypes:
return convertfunc( someobj)
elif t in typeclasses.SequenceTypes:
if len( someobj) == 1 and type( someobj[0] ):
return convertfunc( someobj[0] ) #
### if we get here, then an incorrect value was passed
raise ValueError, """Attempted to set value for an %s field which is not compatible: %s"""%( targetName, `someobj` )
def MFInt32 ( self, someobject ):
''' Convert value into a MFInt32 field value (preferably an array, otherwise a list of integers) '''
t = type(someobject)
value = None
if t in typeclasses.SequenceTypes: # is a sequence
try:
value = map( int, someobject)
except:
try:
value = map( int, collapse.collapse2_safe( someobject) )
except:
pass
elif t in typeclasses.NumericTypes or t is types.StringType:
value = [int(someobject)]
if value is None:
### if we get here, then an incorrect value was passed
raise ValueError, """Attempted to set value for an %s field which is not compatible: %s"""%( targetName, `someobj` )
return value
SFImage = MFInt32
def MFFloat( self, someobject ):
''' Convert value into a MFFloat field value (preferably an array, otherwise a list of integers) '''
t = type(someobject)
value = None
if t in typeclasses.SequenceTypes: # is a sequence
try:
value = map( float, someobject)
except:
try:
value = map( float, collapse.collapse2_safe( someobject))
except:
pass
elif t in typeclasses.NumericTypes or t is types.StringType:
value = [float(someobj)]
if value is None:
### if we get here, then an incorrect value was passed
raise ValueError, """Attempted to set value for an %s field which is not compatible: %s"""%( targetName, `someobj` )
return value
def SFVec3f (self, value):
''' Create a new SFVec3f value from value '''
t = type(value)
try:
value = x,y,z = map (float, value)
except ValueError:
try:
value = (x,y,z) = map( float, value[0] )
except (IndexError, ValueError):
raise ValueError (''' Invalid value for field type SFVec3f: %s'''%(value))
return value
def SFRotation(self, value):
''' Create a new SFRotation value from value '''
t = type(value)
try:
value = x,y,z, a = map (float, value)
except ValueError:
try:
value = (x,y,z, a) = map( float, value[0] )
except (IndexError, ValueError):
raise ValueError (''' Invalid value for field type SFRotation: %s'''%(value))
# get the normalized vector for x,y,z
## length = (x*x+y*y+z*z)**.5 or 0.0000
## value = (x/length,y/length,z/length, a)
return value
def SFVec2f (self, value):
''' Create a new SFVec3f value from value '''
t = type(value)
try:
value = x,y = map (float, value)
except ValueError:
try:
value = (x,y) = map( float, value[0] )
except (IndexError, ValueError):
raise ValueError (''' Invalid value for field type SFVec3f: %s'''%(value))
return value
def SFColor(self, value):
''' Create a new SFVec3f value from value '''
t = type(value)
try:
r,g,b = map (float, value)
except ValueError:
try:
r,g,b = map( float, value[0] )
except (IndexError, ValueError):
raise ValueError (''' Invalid value for field type SFColor: %s'''%(value))
r = max( (0.0, min((r,1.0))) )
g = max( (0.0, min((g,1.0))) )
b = max( (0.0, min((b,1.0))) )
return value
def MFCompoundNumber( self, someobj, targetName='SFVec3f', convertfunc=float, type=type):
'''
Allowable Types:
instance ( an IS ) -> unchanged
# instance ( a matrix ) -> reshaped (eventually)
list of lists, sub-sequences of proper length -> unchanged
sequence of numeric types of proper length -> converted to list, diced
'''
## if targetName == 'SFColor':
## import pdb
## pdb.set_trace()
converter = getattr( self, targetName )
t = type( someobj)
reporterror = 0
if t is types.InstanceType:
return someobj
elif t in typeclasses.SequenceTypes:
if not someobj:
return []
if type( someobj[0] ) is not types.StringType and type( someobj[0] ) in typeclasses.SequenceTypes:
try:
return map( converter, someobj )
except ValueError:
pass
elif type( someobj[0] ) in typeclasses.NumericTypes or type( someobj[0] ) is types.StringType:
# a single-level list?
base = map( convertfunc, someobj )
# if we get here, someobj is a list
if targetName[-2:] == '2f': # vec2f
tlen = 2
elif targetName[-2:] == 'on': # rotation
tlen = 4
else:
tlen = 3
value = []
while base:
value.append( converter( base[:tlen]) )
del base[:tlen]
return value
raise ValueError, """Attempted to set value for an %s field which is not compatible: %s"""%( targetName, `someobj` )
def __call__( self, someobj, targetName):
func, args = self.algomap[targetName]
## try:
## if targetName == 'SFInt32':
## import pdb
## pdb.set_trace()
if hasattr( someobj, "__gi__") and someobj.__gi__ == "IS":
return someobj
else:
return apply( func, (self, someobj)+args )
## except TypeError:
## print someobj, targetName
## print func, args
## raise
algomap = { \
'SFString': (SFString, (types.StringType, 'SFString', str)), \
'MFString': (MFString, (types.StringType, 'MFString', str)), \
'SFInt32': (SFNumber, (types.IntType, 'SFInt32', int)), \
'SFFloat': (SFNumber, (types.FloatType, 'SFFloat', float)), \
'SFTime': (SFNumber, (types.FloatType, 'SFFloat', float)), \
'SFColor': (SFColor, ()), \
'SFVec2f': (SFVec2f, ()), \
'SFVec3f': (SFVec3f, ()), \
'SFNode': (SFNode, (types.InstanceType, 'SFNode', None)), \
'SFBool': (SFBool, (types.IntType, 'SFBool', int)), \
'SFNode': (SFNode, (types.InstanceType, 'SFNode', None)), \
'MFInt32': (MFInt32, ()), \
'SFImage': (MFInt32, ()), \
'MFTime': (MFFloat, ()), \
'MFFloat': (MFFloat, ()), \
'MFColor': (MFCompoundNumber, ('SFColor', float)), \
'MFVec2f': (MFCompoundNumber, ('SFVec2f', float)), \
'MFVec3f': (MFCompoundNumber, ('SFVec3f', float)), \
'SFRotation': (SFRotation, ()), \
'MFRotation': (MFCompoundNumber, ('SFRotation', float)), \
'MFNode': (MFNode, (types.InstanceType, 'MFNode', None)) \
}
FIELDCOERCE = FieldCoercian ()

View File

@@ -0,0 +1,97 @@
# The VRML loader
# supports gzipped files
#
# TODO: better progress monitoring
import parser
def quiet(txt):
pass
debug = quiet
def debug1(txt):
print "Loader:", txt
g_last = 0
def getFileType(file):
"returns the file type string from 'file'"
file.seek(0)
magic = file.readline()
if magic[:3] == '\037\213\010':
file.seek(0)
return "gzip"
elif magic[:10] == '#VRML V2.0':
file.seek(0)
return "vrml"
else:
file.seek(0)
return ""
class Loader:
def __init__(self, url, progress = None):
self.url = url
self.debug = debug
self.fail = debug
self.monitor = debug
self.progress = progress
self.nodes = 0 # number of nodes parsed
def getGzipFile(self, file):
'''Return gzip file (only called when gzip type is recognised)'''
# we now have the local filename and the headers
# read the first few bytes, check for gzip magic number
self.monitor( "gzip-encoded file... loading gzip library")
try:
import gzip
file = gzip.open(file,"rb")
return file
except ImportError, value:
self.fail("Gzip library unavailable, compressed file cannot be read")
except:
self.fail("Failed to open Gzip file")
return None
def load(self):
self.debug("try: load file from %s" % self.url)
url = self.url
# XXX
try:
file = open(url, 'rb')
except IOError, val:
self.debug("couldn't open file %s" % url)
return None
if getFileType(file) == 'gzip':
file.close()
file = self.getGzipFile(url)
try:
data = file.read()
except MemoryError, value:
self.fail("Insufficient memory to load file as string", value)
return None
except IOError, value:
self.fail("I/O Error while reading data from file %s "% url)
p = parser.Parser(data)
if self.progress:
scenegraph = p.parse(self.progress)
print "progress"
else:
scenegraph = p.parse()
self.nodes = p.progresscount # progress
del p
return scenegraph
def load(url, progress = None):
l = Loader(url, progress)
return l.load()
def test(name = None):
if not name:
name = '/tmp/gna.wrl'
return load(name)

View File

@@ -0,0 +1,426 @@
from TextTools import TextTools
from simpleparse import generator
import scenegraph as proto
import strop as string
IMPORT_PARSE_TIME = 0.4
PROGRESS_DEPTH = 5
class UnfinishedError(Exception):
pass
class Parser:
def __init__( self, data ):
self.data = data
self.position = 0
self.result = proto.sceneGraph()
self.finalised = None
self.sceneGraphStack = [self.result]
self.prototypeStack = []
self.nodeStack = []
self.fieldTypeStack = []
self.readHeader()
self.depth = 0
self.progresscount = 0
def _lines( self, index=None ):
if index is None:
index = self.position
return TextTools.countlines (self.data[:index])
def parse( self, progressCallback=None ):
datalength = float( len( self.data ))
while self.readNext():
if progressCallback:
if not progressCallback(IMPORT_PARSE_TIME * self.position/datalength ):
raise UnfinishedError(
"Did not complete parsing, cancelled by user. Stopped at line %s" %(self._lines())
)
if self.position < len( self.data ):
raise UnfinishedError(
'''Unable to complete parsing of file, stopped at line %s:\n%s...'''%(self._lines(), self.data[self.position:self.position+120])
)
return self.result
def readHeader( self ):
'''Read the file header'''
success, tags, next = TextTools.tag( self.data, HEADERPARSER, self.position )
if success:
self.datalength = len( self.data )
#print "header ok"
return success
else:
try:
self.decompress()
success, tags, next = TextTools.tag( self.data, HEADERPARSER, self.position )
self.datalength = len( self.data )
return success
except:
raise ValueError( "Could not find VRML97 header in file!" )
def readNext( self):
'''Read the next root-level construct'''
success, tags, next = TextTools.tag( self.data, ROOTITEMPARSER, self.position )
## print 'readnext', success
if self.position >= self.datalength:
print 'reached file end'
return None
if success:
# print ' successful parse'
self.position = next
map (self.rootItem_Item, tags )
return success
else:
return None
def rootItem (self, (type, start, stop, (item,))):
''' Process a single root item '''
self.rootItem_Item( item )
def rootItem_Item( self, item ):
result = self._dispatch(item)
if result is not None:
## print "non-null result"
## print id( self.sceneGraphStack[-1] ), id(self.result )
self.sceneGraphStack[-1].children.append( result )
def _getString (self, (tag, start, stop, sublist)):
''' Return the raw string for a given interval in the data '''
return self.data [start: stop]
def _dispatch (self, (tag, left, right, sublist)):
''' Dispatch to the appropriate processing function based on tag value '''
## print "dispatch", tag
self.depth += 1
if self.depth < PROGRESS_DEPTH:
self.progresscount += 1
try:
meth = getattr (self, tag)
except AttributeError:
raise AttributeError("Unknown parse tag '%s' found! Check the parser definition!" % (tag))
ret = meth( (tag, left, right, sublist) )
self.depth -= 1
return ret
def Proto(self, (tag, start, stop, sublist)):
''' Create a new prototype in the current sceneGraph '''
# first entry is always ID
ID = self._getString ( sublist [0])
print "PROTO",ID
newNode = proto.Prototype (ID)
## print "\t",newNode
setattr ( self.sceneGraphStack [-1].protoTypes, ID, newNode)
self.prototypeStack.append( newNode )
# process the rest of the entries with the given stack
map ( self._dispatch, sublist [1:] )
self.prototypeStack.pop( )
def fieldDecl(self,(tag, left, right, (exposure, datatype, name, field))):
''' Create a new field declaration for the current prototype'''
# get the definition in recognizable format
exposure = self._getString (exposure) == "exposedField"
datatype = self._getString (datatype)
name = self._getString (name)
# get the vrml value for the field
self.fieldTypeStack.append( datatype )
field = self._dispatch (field)
self.fieldTypeStack.pop( )
self.prototypeStack[-1].addField ((name, datatype, exposure), field)
def eventDecl(self,(tag, left, right, (direction, datatype, name))):
# get the definition in recognizable format
direction = self._getString (direction) == "eventOut"
datatype = self._getString (datatype)
name = self._getString (name)
# get the vrml value for the field
self.prototypeStack[-1].addEvent((name, datatype, direction))
def decompress( self ):
pass
def ExternProto( self, (tag, start, stop, sublist)):
''' Create a new external prototype from a tag list'''
# first entry is always ID
ID = self._getString ( sublist [0])
newNode = proto.Prototype (ID)
setattr ( self.sceneGraphStack [-1].protoTypes, ID, newNode)
self.prototypeStack.append( newNode )
# process the rest of the entries with the given stack
map ( self._dispatch, sublist [1:] )
self.prototypeStack.pop( )
def ExtProtoURL( self, (tag, start, stop, sublist)):
''' add the url to the external prototype '''
## print sublist
values = self.MFString( sublist )
self.prototypeStack[-1].url = values
return values
def extFieldDecl(self, (tag, start, stop, (exposure, datatype, name))):
''' An external field declaration, no default value '''
# get the definition in recognizable format
exposure = self._getString (exposure) == "exposedField"
datatype = self._getString (datatype)
name = self._getString (name)
# get the vrml value for the field
self.prototypeStack[-1].addField ((name, datatype, exposure))
def ROUTE(self, (tag, start, stop, names )):
''' Create a new route object, add the current sceneGraph '''
names = map(self._getString, names)
self.sceneGraphStack [-1].addRoute( names )
def Node (self, (tag, start, stop, sublist)):
''' Create new node, returning the value to the caller'''
## print 'node'
if sublist[0][0] == 'name':
name = self._getString ( sublist [0])
ID = self._getString ( sublist [1])
rest = sublist [2:]
else:
name = ""
ID = self._getString ( sublist [0])
rest = sublist [1:]
try:
prototype = getattr ( self.sceneGraphStack [-1].protoTypes, ID)
except AttributeError:
#raise NameError ('''Prototype %s used without declaration! %s:%s'''%(ID, start, stop) )
print ('''### Prototype %s used without declaration! %s:%s'''%(ID, start, stop) )
return None
newNode = prototype(name)
if name:
self.sceneGraphStack [-1].regDefName( name, newNode )
self.nodeStack.append (newNode)
map (self._dispatch, rest)
self.nodeStack.pop ()
## print 'node finished'
return newNode
def Attr(self, (tag, start, stop, (name, value))):
''' An attribute of a node or script '''
name = self._getString ( name )
self.fieldTypeStack.append( self.nodeStack[-1].PROTO.getField( name ).type )
value = self._dispatch( value )
self.fieldTypeStack.pop()
if hasattr( self.nodeStack[-1], "__setattr__" ):
self.nodeStack[-1].__setattr__( name, value, raw=1 )
else:
# use slower coercing versions...
setattr( self.nodeStack[-1], name, value )
def Script( self, (tag, start, stop, sublist)):
''' A script node (can be a root node)'''
# what's the DEF name...
if sublist and sublist[0][0] == 'name':
name = self._getString ( sublist [0])
rest = sublist [1:]
else:
name = ""
rest = sublist
# build the script node...
newNode = proto.Script( name )
# register with sceneGraph
if name:
self.sceneGraphStack [-1].regDefName( name, newNode )
self.nodeStack.append (newNode)
map( self._dispatch, rest )
self.nodeStack.pop ()
return newNode
def ScriptEventDecl( self,(tag, left, right, sublist)):
# get the definition in recognizable format
direction, datatype, name = sublist[:3] # must have at least these...
direction = self._getString (direction) == "eventOut"
datatype = self._getString (datatype)
name = self._getString (name)
# get the vrml value for the field
self.nodeStack[-1].PROTO.addEvent((name, datatype, direction))
if sublist[3:]:
# will this work???
setattr( self.nodeStack[-1], name, self._dispatch( sublist[3] ) )
def ScriptFieldDecl(self,(tag, left, right, (exposure, datatype, name, field))):
''' Create a new field declaration for the current prototype'''
# get the definition in recognizable format
exposure = self._getString (exposure) == "exposedField"
datatype = self._getString (datatype)
name = self._getString (name)
# get the vrml value for the field
self.fieldTypeStack.append( datatype )
field = self._dispatch (field)
self.fieldTypeStack.pop( )
self.nodeStack[-1].PROTO.addField ((name, datatype, exposure))
setattr( self.nodeStack[-1], name, field )
def SFNull(self, tup):
''' Create a reference to the SFNull node '''
## print 'hi'
return proto.NULL
def USE( self, (tag, start, stop, (nametuple,) )):
''' Create a reference to an already defined node'''
name = self._getString (nametuple)
if self.depth < PROGRESS_DEPTH:
self.progresscount += 1
try:
node = self.sceneGraphStack [-1].defNames [name]
return node
except KeyError:
raise NameError ('''USE without DEF for node %s %s:%s'''%(name, start, stop))
def IS(self, (tag, start, stop, (nametuple,))):
''' Create a field reference '''
name = self._getString (nametuple)
if not self.prototypeStack [-1].getField (name):
raise Exception (''' Attempt to create IS mapping of non-existent field %s %s:%s'''%(name, start, stop))
return proto.IS(name)
def Field( self, (tag, start, stop, sublist)):
''' A field value (of any type) '''
if sublist and sublist[0][0] in ('USE','Script','Node','SFNull'):
if self.fieldTypeStack[-1] == 'SFNode':
return self._dispatch( sublist[0] )
else:
return map( self._dispatch, sublist )
elif self.fieldTypeStack[-1] == 'MFNode':
return []
else:
# is a simple data type...
function = getattr( self, self.fieldTypeStack[-1] )
try:
return function( sublist )
except ValueError:
traceback.print_exc()
print sublist
raise
def SFBool( self, (tup,) ):
'''Boolean, in Python tradition is either 0 or 1'''
return self._getString(tup) == 'TRUE'
def SFFloat( self, (x,) ):
return string.atof( self._getString(x) )
SFTime = SFFloat
def SFInt32( self, (x,) ):
return string.atoi( self._getString(x), 0 ) # allow for non-decimal numbers
def SFVec3f( self, (x,y,z) ):
return map( string.atof, map(self._getString, (x,y,z)) )
def SFVec2f( self, (x,y) ):
return map( string.atof, map(self._getString, (x,y)) )
def SFColor( self, (r,g,b) ):
return map( string.atof, map(self._getString, (r,g,b)) )
def SFRotation( self, (x,y,z,a) ):
return map( string.atof, map(self._getString, (x,y,z,a)) )
def MFInt32( self, tuples ):
result = []
# localisation
atoi = string.atoi
append = result.append
data = self.data
for tag, start, stop, children in tuples:
append( atoi( data[start:stop], 0) )
return result
SFImage = MFInt32
def MFFloat( self, tuples ):
result = []
# localisation
atof = string.atof
append = result.append
data = self.data
for tag, start, stop, children in tuples:
append( atof( data[start:stop]) )
return result
MFTime = MFFloat
def MFVec3f( self, tuples, length=3, typename='MFVec3f'):
result = []
# localisation
atof = string.atof
data = self.data
while tuples:
newobj = []
for tag, start, stop, children in tuples[:length]:
newobj.append( atof(data[start:stop] ))
if len(newobj) != length:
raise ValueError(
'''Incorrect number of elements in %s field at line %s'''%(typename, self._lines(stop))
)
result.append( newobj )
del tuples[:length]
return result
def MFVec2f( self, tuples):
return self.MFVec3f( tuples, length=2, typename='MFVec2f')
def MFRotation( self, tuples ):
return self.MFVec3f( tuples, length=4, typename='MFRotation')
def MFColor( self, tuples ):
return self.MFVec3f( tuples, length=3, typename='MFColor')
def MFString( self, tuples ):
bigresult = []
for (tag, start, stop, sublist) in tuples:
result = []
for element in sublist:
if element[0] == 'CHARNODBLQUOTE':
result.append( self.data[element[1]:element[2]] )
elif element[0] == 'ESCAPEDCHAR':
result.append( self.data[element[1]+1:element[2]] )
elif element[0] == 'SIMPLEBACKSLASH':
result.append( '\\' )
bigresult.append( string.join( result, "") )
return bigresult
## result = []
## for tuple in tuples:
## result.append( self.SFString( tuple) )
## return result
def SFString( self, tuples ):
'''Return the (escaped) string as a simple Python string'''
if tuples:
(tag, start, stop, sublist) = tuples[0]
if len( tuples ) > 1:
print '''Warning: SFString field has more than one string value''', self.data[tuples[0][1]:tuples[-1][2]]
result = []
for element in sublist:
if element[0] == 'CHARNODBLQUOTE':
result.append( self.data[element[1]:element[2]] )
elif element[0] == 'ESCAPEDCHAR':
result.append( self.data[element[1]+1:element[2]] )
elif element[0] == 'SIMPLEBACKSLASH':
result.append( '\\' )
return string.join( result, "")
else:
raise ValueError( "NULL SFString parsed???!!!" )
def vrmlScene( self, (tag, start, stop, sublist)):
'''A (prototype's) vrml sceneGraph'''
newNode = proto.sceneGraph (root=self.sceneGraphStack [-1])
self.sceneGraphStack.append (newNode)
#print 'setting proto sceneGraph', `newNode`
self.prototypeStack[-1].sceneGraph = newNode
results = filter (None, map (self._dispatch, sublist))
if results:
# items which are not auto-magically inserted into their parent
for result in results:
newNode.children.append( result)
self.sceneGraphStack.pop()
PARSERDECLARATION = r'''header := -[\n]*
rootItem := ts,(Proto/ExternProto/ROUTE/('USE',ts,USE,ts)/Script/Node),ts
vrmlScene := rootItem*
Proto := 'PROTO',ts,nodegi,ts,'[',ts,(fieldDecl/eventDecl)*,']', ts, '{', ts, vrmlScene,ts, '}', ts
fieldDecl := fieldExposure,ts,dataType,ts,name,ts,Field,ts
fieldExposure := 'field'/'exposedField'
dataType := 'SFBool'/'SFString'/'SFFloat'/'SFTime'/'SFVec3f'/'SFVec2f'/'SFRotation'/'SFInt32'/'SFImage'/'SFColor'/'SFNode'/'MFBool'/'MFString'/'MFFloat'/'MFTime'/'MFVec3f'/'MFVec2f'/'MFRotation'/'MFInt32'/'MFColor'/'MFNode'
eventDecl := eventDirection, ts, dataType, ts, name, ts
eventDirection := 'eventIn'/'eventOut'
ExternProto := 'EXTERNPROTO',ts,nodegi,ts,'[',ts,(extFieldDecl/eventDecl)*,']', ts, ExtProtoURL
extFieldDecl := fieldExposure,ts,dataType,ts,name,ts
ExtProtoURL := '['?,(ts,SFString)*, ts, ']'?, ts # just an MFString by another name :)
ROUTE := 'ROUTE',ts, name,'.',name, ts, 'TO', ts, name,'.',name, ts
Node := ('DEF',ts,name,ts)?,nodegi,ts,'{',ts,(Proto/ExternProto/ROUTE/Attr)*,ts,'}', ts
Script := ('DEF',ts,name,ts)?,'Script',ts,'{',ts,(ScriptFieldDecl/ScriptEventDecl/Proto/ExternProto/ROUTE/Attr)*,ts,'}', ts
ScriptEventDecl := eventDirection, ts, dataType, ts, name, ts, ('IS', ts, IS,ts)?
ScriptFieldDecl := fieldExposure,ts,dataType,ts,name,ts,(('IS', ts,IS,ts)/Field),ts
SFNull := 'NULL', ts
# should really have an optimised way of declaring a different reporting name for the same production...
USE := name
IS := name
nodegi := name
Attr := name, ts, (('IS', ts,IS,ts)/Field), ts
Field := ( '[',ts,((SFNumber/SFBool/SFString/('USE',ts,USE,ts)/Script/Node),ts)*, ']', ts )/((SFNumber/SFBool/SFNull/SFString/('USE',ts,USE,ts)/Script/Node),ts)+
name := -[][0-9{}\000-\020"'#,.\\ ], -[][{}\000-\020"'#,.\\ ]*
SFNumber := [-+]*, ( ('0',[xX],[0-9]+) / ([0-9.]+,([eE],[-+0-9.]+)?))
SFBool := 'TRUE'/'FALSE'
SFString := '"',(CHARNODBLQUOTE/ESCAPEDCHAR/SIMPLEBACKSLASH)*,'"'
CHARNODBLQUOTE := -[\134"]+
SIMPLEBACKSLASH := '\134'
ESCAPEDCHAR := '\\"'/'\134\134'
<ts> := ( [ \011-\015,]+ / ('#',-'\012'*,'\n')+ )*
'''
PARSERTABLE = generator.buildParser( PARSERDECLARATION )
HEADERPARSER = PARSERTABLE.parserbyname( "header" )
ROOTITEMPARSER = PARSERTABLE.parserbyname( "rootItem" )

View File

@@ -0,0 +1,833 @@
# VRML node prototype class (SGbuilder)
# Wed Oct 31 16:18:35 CET 2001
'''Prototype2 -- VRML 97 sceneGraph/Node/Script/ROUTE/IS implementations'''
import copy, types # extern
import strop as string # builtin
from utils import typeclasses, err, namespace # XXX
## TODO: namespace must go
class baseProto:
def __vrmlStr__( self, **namedargs ):
'''Generate a VRML 97-syntax string representing this Prototype
**namedargs -- key:value
passed arguments for the linearisation object
see lineariser4.Lineariser
'''
import lineariser4
lineariser = apply( lineariser4.Lineariser, (), namedargs )
return apply( lineariser.linear, ( self, ), namedargs )
toString = __vrmlStr__
# added stuff for linking support for target scenegraph
def setTargetnode(self, node):
self.__dict__['_targetnode'] = node
def getTargetnode(self):
try:
return self.__dict__['_targetnode']
except:
return None
class Prototype(baseProto):
''' A VRML 97 Prototype object
A Prototype is a callable object which produces Node instances
the Node uses a pointer to its Prototype to provide much of the
Node's standard functionality.
Prototype's are often stored in a sceneGraph's protoTypes namespace,
where you can access them as sceneGraph.protoTypes.nodeGI . They are
also commonly found in Nodes' PROTO attributes.
Attributes:
__gi__ -- constant string "PROTO"
nodeGI -- string gi
The "generic identifier" of the node type, i.e. the name of the node
fieldDictionary -- string name: (string name, string dataType, boolean exposed)
defaultDictionary -- string name: object defaultValue
Will be blank for EXTERNPROTO's and Script prototypes
eventDictionary -- string name: (string name, string dataType, boolean eventOut)
sceneGraph -- object sceneGraph
MFNodeNames -- list of field name strings
Allows for easy calculation of "children" nodes
SFNodeNames -- list of field name strings
Allows for easy calculation of "children" nodes
'''
__gi__ = "PROTO"
def __init__(self, gi, fieldDict=None, defaultDict=None, eventDict=None, sGraph=None):
'''
gi -- string gi
see attribute nodeGI
fieldDict -- string name: (string name, string dataType, boolean exposed)
see attribute fieldDictionary
defaultDict -- string name: object defaultValue
see attribute defaultDictionary
eventDict -- string name: (string name, string dataType, boolean eventOut)
see attribute eventDictionary
sceneGraph -- object sceneGraph
see attribute sceneGraph
'''
self.nodeGI = checkName( gi )
self.fieldDictionary = {}
self.defaultDictionary = {}
self.eventDictionary = {}
self.SFNodeNames = []
self.MFNodeNames = []
self.sceneGraph = sGraph
# setup the fields/events
for definition in (fieldDict or {}).values():
self.addField( definition, (defaultDict or {}).get( definition[0]))
for definition in (eventDict or {}).values():
self.addEvent( definition )
def getSceneGraph( self ):
''' Retrieve the sceneGraph object (may be None object)
see attribute sceneGraph'''
return self.sceneGraph
def setSceneGraph( self, sceneGraph ):
''' Set the sceneGraph object (may be None object)
see attribute sceneGraph'''
self.sceneGraph = sceneGraph
def getChildren(self, includeSceneGraph=None, includeDefaults=1, *args, **namedargs):
''' Calculate the current children of the PROTO and return as a list of nodes
if includeDefaults:
include those default values which are node values
if includeSceneGraph:
include the sceneGraph object if it is not None
see attribute MFNodeNames
see attribute SFNodeNames
see attribute sceneGraph
'''
temp = []
if includeDefaults:
for attrname in self.SFNodeNames:
try:
temp.append( self.defaultDictionary[attrname] )
except KeyError: # sceneGraph object is not copied...
pass
for attrname in self.MFNodeNames:
try:
temp[len(temp):] = self.defaultDictionary[attrname]
except KeyError:
pass
if includeSceneGraph and self.sceneGraph:
temp.append( self.getSceneGraph() )
return temp
def addField (self, definition, default = None):
''' Add a single field definition to the Prototype
definition -- (string name, string dataType, boolean exposed)
default -- object defaultValue
see attribute fieldDictionary
see attribute defaultDictionary
'''
if type (definition) == types.InstanceType:
definition = definition.getDefinition()
default = definition.getDefault ()
self.removeField( definition[0] )
self.fieldDictionary[definition [0]] = definition
if default is not None:
default = fieldcoercian.FieldCoercian()( default, definition[1] )
self.defaultDictionary [definition [0]] = default
if definition[1] == 'SFNode':
self.SFNodeNames.append(definition[0])
elif definition[1] == 'MFNode':
self.MFNodeNames.append(definition[0])
def removeField (self, key):
''' Remove a single field from the Prototype
key -- string fieldName
The name of the field to remove
'''
if self.fieldDictionary.has_key (key):
del self.fieldDictionary [key]
if self.defaultDictionary.has_key (key):
del self.defaultDictionary [key]
for attribute in (self.SFNodeNames, self.MFNodeNames):
while key in attribute:
attribute.remove(key)
def addEvent(self, definition):
''' Add a single event definition to the Prototype
definition -- (string name, string dataType, boolean eventOut)
see attribute eventDictionary
'''
if type (definition) == types.InstanceType:
definition = definition.getDefinition()
self.eventDictionary[definition [0]] = definition
def removeEvent(self, key):
''' Remove a single event from the Prototype
key -- string eventName
The name of the event to remove
'''
if self.eventDictionary.has_key (key):
del self.eventDictionary [key]
def getField( self, key ):
'''Return a Field or Event object representing a given name
key -- string name
The name of the field or event to retrieve
will attempt to match key, key[4:], and key [:-8]
corresponding to key, set_key and key_changed
see class Field
see class Event
'''
# print self.fieldDictionary, self.eventDictionary
for tempkey in (key, key[4:], key[:-8]):
if self.fieldDictionary.has_key( tempkey ):
return Field( self.fieldDictionary[tempkey], self.defaultDictionary.get(tempkey) )
elif self.eventDictionary.has_key( tempkey ):
return Event( self.eventDictionary[tempkey] )
raise AttributeError, key
def getDefault( self, key ):
'''Return the default value for the given field
key -- string name
The name of the field
Will attempt to match key, key[4:], and key [:-8]
corresponding to key, set_key and key_changed
see attribute defaultDictionary
'''
for key in (key, key[4:], key[:-8]):
if self.defaultDictionary.has_key( key ):
val = self.defaultDictionary[key]
if type(val) in typeclasses.MutableTypes:
val = copy.deepcopy( val )
return val
elif self.fieldDictionary.has_key( key ):
'''We have the field, but we don't have a default, we are likely an EXTERNPROTO'''
return None
raise AttributeError, key
def setDefault (self, key, value):
'''Set the default value for the given field
key -- string name
The name of the field to set
value -- object defaultValue
The default value, will be checked for type and coerced if necessary
'''
field = self.getField (key)
self.defaultDictionary [field.name]= field.coerce (value)
def clone( self, children = 1, sceneGraph = 1 ):
'''Return a copy of this Prototype
children -- boolean
if true, copy the children of the Prototype, otherwise include them
sceneGraph -- boolean
if true, copy the sceneGraph of the Prototype
'''
if sceneGraph:
sceneGraph = self.sceneGraph
else:
sceneGraph = None
# defaults should always be copied before modification, but this is still dangerous...
defaultDictionary = self.defaultDictionary.copy()
if not children:
for attrname in self.SFNodeNames+self.MFNodeNames:
try:
del defaultDictionary[attrname]
except KeyError: # sceneGraph object is not copied...
pass
# now make a copy
if self.__gi__ == "PROTO":
newNode = self.__class__(
self.nodeGI,
self.fieldDictionary,
defaultDictionary,
self.eventDictionary,
sceneGraph,
)
else:
newNode = self.__class__(
self.nodeGI,
self.url,
self.fieldDictionary,
self.eventDictionary,
)
return newNode
def __call__(self, *args, **namedargs):
'''Create a new Node instance associated with this Prototype
*args, **namedargs -- passed to the Node.__init__
see class Node
'''
node = apply( Node, (self, )+args, namedargs )
return node
def __repr__ ( self ):
'''Create a simple Python representation'''
return '''%s( %s )'''%( self.__class__.__name__, self.nodeGI )
class ExternalPrototype( Prototype ):
'''Sub-class of Prototype
The ExternalPrototype is a minor sub-classing of the Prototype
it does not have any defaults, nor a sceneGraph
Attributes:
__gi__ -- constant string "EXTERNPROTO"
url -- string list urls
implementation source for the ExternalPrototype
'''
__gi__ = "EXTERNPROTO"
def __init__(self, gi, url=None, fieldDict=None, eventDict=None):
'''
gi -- string gi
see attribute nodeGI
url -- string list url
MFString-compatible list of url's for EXTERNPROTO
fieldDict -- string name: (string name, string dataType, boolean exposed)
see attribute fieldDictionary
eventDict -- string name: (string name, string dataType, boolean eventOut)
see attribute eventDictionary
'''
if url is None:
url = []
self.url = url
Prototype.__init__( self, gi, fieldDict=fieldDict, eventDict=eventDict)
from vrml import fieldcoercian # XXX
class Field:
''' Representation of a Prototype Field
The Field object is a simple wrapper to provide convenient
access to field coercian and meta- information
'''
def __init__( self, specification, default=None ):
self.name, self.type, self.exposure = specification
self.default = default
def getDefinition (self):
return self.name, self.type, self.exposure
def getDefault (self):
return self.default
def coerce( self, value ):
''' Coerce value to the appropriate dataType for this Field '''
return fieldcoercian.FieldCoercian()( value,self.type, )
def __repr__( self ):
if hasattr (self, "default"):
return '%s( (%s,%s,%s), %s)'%( self.__class__.__name__, self.name, self.type, self.exposure, self.default)
else:
return '%s( (%s,%s,%s),)'%( self.__class__.__name__, self.name, self.type, self.exposure)
def __str__( self ):
if self.exposure:
exposed = "exposedField"
else:
exposed = field
if hasattr (self, "default"):
default = ' ' + str( self.default)
else:
default = ""
return '%s %s %s%s'%(exposed, self.type, self.name, default)
class Event (Field):
def __str__( self ):
if self.exposure:
exposed = "eventOut"
else:
exposed = "eventIn"
return '%s %s %s'%(exposed, self.type, self.name)
### Translation strings for VRML node names...
translationstring = '''][0123456789{}"'#,.\\ \000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023'''
NAMEFIRSTCHARTRANSLATOR = string.maketrans( translationstring, '_'*len(translationstring) )
translationstring = '''][{}"'#,.\\ \000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023'''
NAMERESTCHARTRANSLATOR = string.maketrans( translationstring, '_'*len(translationstring) )
del translationstring
def checkName( name ):
'''Convert arbitrary string to a valid VRML id'''
if type(name) is types.StringType:
if not name:
return name
return string.translate( name[:1], NAMEFIRSTCHARTRANSLATOR) + string.translate( name[1:], NAMERESTCHARTRANSLATOR)
else:
raise TypeError, "VRML Node Name must be a string, was a %s: %s"%(type(name), name)
class Node(baseProto):
''' A VRML 97 Node object
A Node object represents a VRML 97 node. Attributes of the Node
can be set/retrieved with standard python setattr/getattr syntax.
VRML 97 attributes may be passed to the constructor as named
arguments.
Attributes:
__gi__ -- string PROTOname
DEF -- string DEFName
The DEF name of the node, will be coerced to be a valid
identifier (with "" being considered valid)
PROTO -- Prototype PROTO
The node's Prototype object
attributeDictionary -- string name: object value
Dictionary in which VRML 97 attributes are stored
'''
DEF = '' # the default name for all nodes (arbitrary)
def __init__(self, PROTO, name='', attrDict=None, *args, **namedargs):
'''Normally this method is only called indirectly via the Prototype() interface
PROTO -- Prototype PROTO
see attribute PROTO
name -- string DEFName
see attribute DEF
attrDict -- string name: object value
see attribute attributeDictionary
**namedargs -- string name: object value
added to attrDict to create attributeDictionary
'''
self.__dict__["PROTO"] = PROTO
self.DEF = name
self.__dict__["attributeDictionary"] = {}
## print attrDict, namedargs
for dict in (attrDict or {}), namedargs:
if dict:
for key, value in dict.items ():
self.__setattr__( key, value, check=1 )
def __setattr__( self, key, value, check=1, raw=0 ):
'''Set attribute on Node
key -- string attributeName
value -- object attributeValue
check -- boolean check
if false, put values for unrecognized keys into __dict__
otherwise, raise an AttributeError
'''
if key == "DEF":
self.__dict__["DEF"] = checkName( value )
return None
elif key == "PROTO":
self.__dict__["PROTO"] = value
try:
field = self.PROTO.getField( key )
if (hasattr( value, "__gi__") and value.__gi__ == "IS") or raw:
self.attributeDictionary[ field.name] = value
else:
self.attributeDictionary[ field.name] = field.coerce( value )
except ValueError, x:
raise ValueError( "Could not coerce value %s into value of VRML type %s for %s node %s's field %s"%( value, field.type, self.__gi__, self.DEF, key), x.args)
except (AttributeError), x:
if check:
raise AttributeError("%s is not a known field for node %s"%(key, repr(self)))
else:
self.__dict__[key] = value
def __getattr__( self, key, default = 1 ):
''' Retrieve an attribute when standard lookup fails
key -- string attributeName
default -- boolean default
if true, return the default value if the node does not have local value
otherwise, raise AttributeError
'''
if key != "attributeDictionary":
if self.__dict__.has_key( key):
return self.__dict__[ key ]
elif self.attributeDictionary.has_key( key):
return self.attributeDictionary[key]
if key != "PROTO":
if key == "__gi__":
return self.PROTO.nodeGI
elif default:
try:
default = self.PROTO.getDefault( key )
if type( default ) in typeclasses.MutableTypes:
# we need a copy, not the original
default = copy.deepcopy( default )
self.__setattr__( key, default, check=0, raw=1 )
return default
except AttributeError:
pass
raise AttributeError, key
def __delattr__( self, key ):
''' Delete an attribute from the Node
key -- string attributeName
'''
if key != "attributeDictionary":
if self.attributeDictionary.has_key( key):
del self.attributeDictionary[key]
elif self.__dict__.has_key( key):
del self.__dict__[ key ]
raise AttributeError, key
def __repr__(self):
''' Create simple python representation '''
return '<%s(%s): %s>'%(self.__gi__, `self.DEF`, self.attributeDictionary.keys() )
def getChildrenNames( self, current = 1, *args, **namedargs ):
''' Get the (current) children of Node
returns two lists: MFNode children, SFNode children
current -- boolean currentOnly
if true, only return current children
otherwise, include all potential children
'''
MFNODES, SFNODES = self.PROTO.MFNodeNames, self.PROTO.SFNodeNames
mns, sns = [],[]
for key in MFNODES:
if current and self.attributeDictionary.has_key(key):
mns.append(key)
elif not current:
mns.append(key)
for key in SFNODES:
if self.attributeDictionary.has_key(key):
sns.append(key)
elif not current:
sns.append(key)
return mns,sns
def calculateChildren(self, *args, **namedargs):
'''Calculate the current children of the Node as list of Nodes
'''
MFNODES, SFNODES = self.getChildrenNames( )
temp = []
for key in MFNODES:
try:
temp.extend( self.__getattr__( key, default=0 ) )
except AttributeError:
pass
for key in SFNODES:
try:
temp.append( self.__getattr__(key, default = 0 ) )
except AttributeError:
pass
return temp
def clone(self, newclass=None, name=None, children=None, attrDeepCopy=1, *args, **namedargs):
'''Return a copy of this Node
newclass -- object newClass or None
optionally use a different Prototype as base
name -- string DEFName or None or 1
if 1, copy from current
elif None, set to ""
else, set to passed value
children -- boolean copyChildren
if true, copy the children of this node
otherwise, skip children
attrDeepCopy -- boolean deepCopy
if true, use deepcopy
otherwise, use copy
'''
if attrDeepCopy:
cpy = copy.deepcopy
else:
cpy = copy.copy
newattrs = self.attributeDictionary.copy()
if not children:
mnames,snames = self.getChildrenNames( )
for key in mnames+snames:
try:
del(newattrs[key])
except KeyError:
pass
for key, val in newattrs.items():
if type(val) in typeclasses.MutableTypes:
newattrs[key] = cpy(val)
# following is Node specific, won't work for sceneGraphs, scripts, etceteras
if name == 1: # asked to copy the name
name = self.DEF
elif name is None: # asked to clear the name
name = ''
if not newclass:
newclass = self.PROTO
return newclass( name, newattrs )
def __cmp__( self, other, stop=None ):
''' Compare this node to another object/node
other -- object otherNode
stop -- boolean stopIfFailure
if true, failure to find comparison causes match failure (i.e. considered unequal)
'''
if hasattr( other, '__gi__') and other.__gi__ == self.__gi__:
try:
return cmp( self.DEF, other.DEF) or cmp( self.attributeDictionary, other.attributeDictionary )
except:
if not stop:
try:
return other.__cmp__( self , 1) # 1 being stop...
except:
pass
return -1 # could be one, doesn't really matter
def Script( name="", attrDict=None, fieldDict=None, defaultDict=None, eventDict=None, **namedarguments):
''' Create a script node (and associated prototype)
name -- string DEFName
attrDict -- string name: object value
see class Node.attributeDictionary
fieldDict -- string name: (string name, string dataType, boolean exposure)
see class Prototype.fieldDictionary
defaultDict -- string name: object value
see class Prototype.defaultDictionary
eventDict -- string name: (string name, string dataType, boolean eventOut)
'''
fieldDictionary = {
'directOutput':('directOutput', 'SFBool',0),
'url':('url',"MFString",0),
'mustEvaluate':('mustEvaluate', 'SFBool',0),
}
fieldDictionary.update( fieldDict or {})
defaultDictionary = {
"directOutput":0,
"url":[],
"mustEvaluate":0,
}
defaultDictionary.update( defaultDict or {})
PROTO = Prototype(
"Script",
fieldDictionary,
defaultDictionary ,
eventDict = eventDict,
)
if attrDict is not None:
attrDict.update( namedarguments )
else:
attrDict = namedarguments
return PROTO( name, attrDict )
class NullNode:
'''NULL SFNode value
There should only be a single NULL instance for
any particular system. It should, for all intents and
purposes just sit there inertly
'''
__gi__ = 'NULL'
DEF = ''
__walker_is_temporary_item__ = 1 # hacky signal to walking engine not to reject this node as already processed
def __repr__(self):
return '<NULL vrml SFNode>'
def __vrmlStr__(self,*args,**namedargs):
return ' NULL '
toString = __vrmlStr__
def __nonzero__(self ):
return 0
def __call__(self, *args, **namedargs):
return self
def __cmp__( self, other ):
if hasattr( other, '__gi__') and other.__gi__ == self.__gi__:
return 0
return -1 # could be one, doesn't really matter
def clone( self ):
return self
NULL = NullNode()
class fieldRef:
'''IS Prototype field reference
'''
__gi__ = 'IS'
DEF = ''
def __init__(self, declaredName):
self.declaredName = declaredName
def __repr__(self):
return 'IS %s'%self.declaredName
def __vrmlStr__(self,*args,**namedargs):
return 'IS %s'%self.declaredName
toString = __vrmlStr__
def __cmp__( self, other ):
if hasattr( other, '__gi__') and other.__gi__ == self.__gi__:
return cmp( self.declaredName, other.declaredName )
return -1 # could be one, doesn't really matter
def clone( self ):
return self.__class__( self.declaredName )
IS = fieldRef
class ROUTE:
''' VRML 97 ROUTE object
The ROUTE object keeps track of its source and destination nodes and attributes
It generally lives in a sceneGraph's "routes" collection
'''
__gi__ = 'ROUTE'
def __init__( self, fromNode, fromField, toNode, toField ):
if type(fromNode) is types.StringType:
raise TypeError( "String value for ROUTE fromNode",fromNode)
if type(toNode) is types.StringType:
raise TypeError( "String value for ROUTE toNode",toNode)
self.fromNode = fromNode
self.fromField = fromField
self.toNode = toNode
self.toField = toField
def __getitem__( self, index ):
return (self.fromNode, self.fromField, self.toNode, self.toField)[index]
def __setitem__( self, index, value ):
attribute = ("fromNode","fromField","toNode", "toField")[index]
setattr( self, attribute, value )
def __repr__( self ):
return 'ROUTE %s.%s TO %s.%s'%( self.fromNode.DEF, self.fromField, self.toNode.DEF, self.toField )
def clone( self ):
return self.__class__(
self.fromNode,
self.fromField,
self.toNode,
self.toField,
)
class sceneGraph(baseProto):
''' A VRML 97 sceneGraph
Attributes:
__gi__ -- constant string "sceneGraph"
DEF -- constant string ""
children -- Node list
List of the root children of the sceneGraph, nodes/scripts only
routes -- ROUTE list
List of the routes within the sceneGraph
defNames -- string DEFName: Node node
Mapping of DEF names to their respective nodes
protoTypes -- Namespace prototypes
Namespace (with chaining lookup) collection of prototypes
getattr( sceneGraph.protoTypes, 'nodeGI' ) retrieves a prototype
'''
__gi__ = 'sceneGraph'
DEF = ''
def __init__(self, root=None, protoTypes=None, routes=None, defNames=None, children=None, *args, **namedargs):
'''
root -- sceneGraph root or Dictionary root or Module root or None
Base object for root of protoType namespace hierarchy
protoTypes -- string nodeGI: Prototype PROTO
Dictionary of prototype definitions
routes -- ROUTE list or (string sourcenode, string sourceeventOut, string destinationnode, string destinationeventOut) list
List of route objects or tuples to be added to the sceneGraph
see attribute routes
defNames -- string DEFName: Node node
see attribute defNames
children -- Node list
see attribute children
'''
if children is None:
self.children = []
else:
self.children = children
if routes is None:
self.routes = [] # how will we efficiently handle routes?
else:
self.routes = routes
if defNames == None:
self.defNames = {} # maps 'defName':Node
else:
self.defNames = defNames
if protoTypes is None:
protoTypes = {}
if root is None:
from vrml import basenodes # XXX
self.protoTypes = namespace.NameSpace(
protoTypes,
children = [namespace.NameSpace(basenodes)]
)
else: # there is a root file, so need to use it as the children instead of basenodes...
if hasattr( root, "protoTypes"):
self.protoTypes = namespace.NameSpace(
protoTypes,
children = [root.protoTypes]
)
else:
self.protoTypes = namespace.NameSpace(
protoTypes,
children = [ namespace.NameSpace(root) ]
)
def __getinitargs__( self ):
# we only copy our explicit protos, our routes, our defNames, and our children
# inherited protos will be pulled along by their nodes...
return None, self.protoTypes._base, self.routes, self.defNames, self.children
def __getstate__( self ):
return {}
def __setstate__( self, dict ):
pass
def __del__( self, id=id ):
'''
Need to clean up the namespace's mutual references,
this can be done without affecting the cascade by just
eliminating the key/value pairs. The namespaces will
no longer contain the prototypes, but they will still
chain up to the higher-level namespaces, and the nodes
will have those prototypes still in use.
'''
## print 'del sceneGraph', id(self )
try:
## import pdb
## pdb.set_trace()
## self.protoTypes.__dict__.clear()
self.protoTypes._base.clear()
del self.protoTypes.__namespace_cascade__[:]
except:
print 'unable to free references'
def addRoute(self, routeTuple, getNewNodes=0):
''' Add a single route to the sceneGraph
routeTuple -- ROUTE route or (string sourcenode, string sourceeventOut, string destinationnode, string destinationeventOut)
getNewNodes -- boolean getNewNodes
if true, look up sourcenode and destinationnode within the current defNames to determine source/destination nodes
otherwise, just use current if available
'''
# create and wire together the Routes here,
# should just be a matter of pulling the events and passing the nodes...
## import pdb
## pdb.set_trace()
if type( routeTuple) in ( types.TupleType, types.ListType):
(fromNode, fromField, toNode, toField ) = routeTuple
if type(fromNode) is types.StringType:
# get the node instead of the string...
if self.defNames.has_key( fromNode ):
fromNode = self.defNames[fromNode]
else:
err.err( "ROUTE from an unknown node %s "%(routeTuple) )
return 0
if type(toNode) is types.StringType:
# get the node instead of the string...
if self.defNames.has_key( toNode ):
toNode = self.defNames[toNode]
else:
err.err( "ROUTE to an unknown node %s "%(routeTuple) )
return 0
routeTuple = ROUTE( fromNode, fromField, toNode, toField)
elif getNewNodes:
# get the nodes with the same names...
if self.defNames.has_key( routeTuple[0].DEF ):
routeTuple[0] = self.defNames[routeTuple[0].DEF]
else:
err.err( "ROUTE from an unknown node %s "%(routeTuple) )
return 0
if self.defNames.has_key( routeTuple[2].DEF ):
routeTuple[2] = self.defNames[routeTuple[2].DEF]
else:
err.err( "ROUTE to an unknown node %s "%(routeTuple) )
return 0
# should be a Route node now, append to our ROUTE list...
self.routes.append(routeTuple)
return 1
def regDefName(self, defName, object):
''' Register a DEF name for a particular object
defName -- string DEFName
object -- Node node
'''
object.DEF = defName
self.defNames[defName] = object
def addProto(self, proto):
'''Register a Prototype for this sceneGraph
proto -- Prototype PROTO
'''
setattr( self.protoTypes, proto.__gi__, proto )
#toString = __vrmlStr__
#__vrmlStr__ = toString
## def __setattr__( self, key, value ):
## if key == 'protoTypes' and type( value) is types.ListType:
## import pdb
## pdb.set_trace()
## raise TypeError( "Invalid type for protoTypes attribute of sceneGraph %s"%(`value`) )
## else:
## self.__dict__[key] = value
DEFAULTFIELDVALUES ={
"SFBool": 0,
"SFString": "",
"SFFloat": 0,
"SFTime": 0,
"SFVec3f": (0, 0,0),
"SFVec2f": (0,0),
"SFRotation": (0, 1,0, 0),
"SFInt32": 0,
"SFImage": (0,0,0),
"SFColor": (0,0, 0),
"SFNode": NULL,
"MFString": [],
"MFFloat": [],
"MFTime": [],
"MFVec3f": [],
"MFVec2f": [],
"MFRotation": [],
"MFInt32": [],
"MFColor": [],
"MFNode": [],
}

View File

@@ -0,0 +1 @@
"""utilities"""

View File

@@ -0,0 +1,169 @@
'''
Destructive Functions for "collapsing" Sequences into single levels
>>> from mcf.utils import collapse
>>> collapse.test([[[1],[2,3]],[[]],[4],5,[6]])
[1, 2, 3, 4, 5, 6] # note that is the same root list
>>> collapse.collapse2([[[1],[2,3]],[[]],(4,()),(5,),[6]])
[1, 2, 3, 4, 5, 6] # note is the same root list
'''
import copy, types, sys
from types import ListType, TupleType # this now only supports the obsolete stuff...
def hyperCollapse( inlist, allowedmap, type=type, list=list, itype=types.InstanceType, maxint= sys.maxint):
'''
Destructively flatten a mixed hierarchy to a single level.
Non-recursive, many speedups and obfuscations by Tim Peters :)
'''
try:
# for every possible index
for ind in xrange( maxint):
# while that index currently holds a list
expandable = 1
while expandable:
expandable = 0
if allowedmap.has_key( type(inlist[ind]) ):
# expand that list into the index (and subsequent indicies)
inlist[ind:ind+1] = list( inlist[ind])
expandable = 1
# alternately you could iterate through checking for isinstance on all possible
# classes, but that would be very slow
elif type( inlist[ind] ) is itype and allowedmap.has_key( inlist[ind].__class__ ):
# here figure out some way to generically expand that doesn't risk
# infinite loops...
templist = []
for x in inlist[ind]:
templist.append( x)
inlist[ind:ind+1] = templist
expandable = 1
except IndexError:
pass
return inlist
def collapse(inlist, type=type, ltype=types.ListType, maxint= sys.maxint):
'''
Destructively flatten a list hierarchy to a single level.
Non-recursive, and (as far as I can see, doesn't have any
glaring loopholes).
Further speedups and obfuscations by Tim Peters :)
'''
try:
# for every possible index
for ind in xrange( maxint):
# while that index currently holds a list
while type(inlist[ind]) is ltype:
# expand that list into the index (and subsequent indicies)
inlist[ind:ind+1] = inlist[ind]
#ind = ind+1
except IndexError:
pass
return inlist
def collapse_safe(inlist):
'''
As collapse, but works on a copy of the inlist
'''
return collapse( inlist[:] )
def collapse2(inlist, ltype=(types.ListType, types.TupleType), type=type, maxint= sys.maxint ):
'''
Destructively flatten a list hierarchy to a single level.
Will expand tuple children as well, but will fail if the
top level element is not a list.
Non-recursive, and (as far as I can see, doesn't have any
glaring loopholes).
'''
ind = 0
try:
while 1:
while type(inlist[ind]) in ltype:
try:
inlist[ind:ind+1] = inlist[ind]
except TypeError:
inlist[ind:ind+1] = list(inlist[ind])
ind = ind+1
except IndexError:
pass
return inlist
def collapse2_safe(inlist):
'''
As collapse2, but works on a copy of the inlist
'''
return collapse( list(inlist) )
def old_buggy_collapse(inlist):
'''Always return a one-level list of all the non-list elements in listin,
rewritten to be non-recursive 96-12-28 Note that the new versions work
on the original list, not a copy of the original.'''
if type(inlist)==TupleType:
inlist = list(inlist)
elif type(inlist)!=ListType:
return [inlist]
x = 0
while 1:
try:
y = inlist[x]
if type(y) == ListType:
ylen = len(y)
if ylen == 1:
inlist[x] = y[0]
if type(inlist[x]) == ListType:
x = x - 1 # need to collapse that list...
elif ylen == 0:
del(inlist[x])
x = x-1 # list has been shortened
else:
inlist[x:x+1]=y
x = x+1
except IndexError:
break
return inlist
def old_buggy_collapse2(inlist):
'''As collapse, but also collapse tuples, rewritten 96-12-28 to be non-recursive'''
if type(inlist)==TupleType:
inlist = list(inlist)
elif type(inlist)!=ListType:
return [inlist]
x = 0
while 1:
try:
y = inlist[x]
if type(y) in [ListType, TupleType]:
ylen = len(y)
if ylen == 1:
inlist[x] = y[0]
if type(inlist[x]) in [ListType,TupleType]:
x = x-1 #(to deal with that element)
elif ylen == 0:
del(inlist[x])
x = x-1 # list has been shortened, will raise exception with tuples...
else:
inlist[x:x+1]=list(y)
x = x+1
except IndexError:
break
return inlist
def oldest_buggy_collapse(listin):
'Always return a one-level list of all the non-list elements in listin'
if type(listin) == ListType:
return reduce(lambda x,y: x+y, map(collapse, listin), [])
else: return [listin]
def oldest_buggy_collapse2(seqin):
if type(seqin) in [ListType, TupleType]:
return reduce(lambda x,y: x+y, map(collapse2, seqin), [])
else:
return [seqin]

View File

@@ -0,0 +1,37 @@
'''
err.py Encapsulated writing to sys.stderr
The idea of this module is that, for a GUI system (or a more advanced UI),
you can just import a different err module (or object) and keep
your code the same. (For instance, you often want a status window
which flashes warnings and info, and have error messages pop up an
alert to get immediate attention.
'''
import sys
def err(message, Code=0):
'''
report an error, with an optional error code
'''
if Code:
sys.stderr.write('Error #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Error: %s\n'%message)
def warn(message, Code=0):
'''
report a warning, with an optional error code
'''
if Code:
sys.stderr.write('Warning #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Warning: %s\n'%message)
def info(message, Code=0):
'''
report information/status, with an optional error code
'''
if Code:
sys.stderr.write('Info #%i: %s\n'%(Code,message))
else:
sys.stderr.write('Info: %s\n'%message)

View File

@@ -0,0 +1,225 @@
'''
NameSpace v0.04:
A "NameSpace" is an object wrapper around a _base dictionary
which allows chaining searches for an 'attribute' within that
dictionary, or any other namespace which is defined as part
of the search path (depending on the downcascade variable, is
either the hier-parents or the hier-children).
You can assign attributes to the namespace normally, and read
them normally. (setattr, getattr, a.this = that, a.this)
I use namespaces for writing parsing systems, where I want to
differentiate between sources (have multiple sources that I can
swap into or out of the namespace), but want to be able to get
at them through a single interface. There is a test function
which gives you an idea how to use the system.
In general, call NameSpace(someobj), where someobj is a dictionary,
a module, or another NameSpace, and it will return a NameSpace which
wraps up the keys of someobj. To add a namespace to the NameSpace,
just call the append (or hier_addchild) method of the parent namespace
with the child as argument.
### NOTE: if you pass a module (or anything else with a dict attribute),
names which start with '__' will be removed. You can avoid this by
pre-copying the dict of the object and passing it as the arg to the
__init__ method.
### NOTE: to properly pickle and/or copy module-based namespaces you
will likely want to do: from mcf.utils import extpkl, copy_extend
### Changes:
97.05.04 -- Altered to use standard hierobj interface, cleaned up
interface by removing the "addparent" function, which is reachable
by simply appending to the __parent__ attribute, though normally
you would want to use the hier_addchild or append functions, since
they let both objects know about the addition (and therefor the
relationship will be restored if the objects are stored and unstored)
97.06.26 -- Altered the getattr function to reduce the number of
situations in which infinite lookup loops could be created
(unfortunately, the cost is rather high). Made the downcascade
variable harden (resolve) at init, instead of checking for every
lookup. (see next note)
97.08.29 -- Discovered some _very_ weird behaviour when storing
namespaces in mcf.store dbases. Resolved it by storing the
__namespace_cascade__ attribute as a normal attribute instead of
using the __unstore__ mechanism... There was really no need to
use the __unstore__, but figuring out how a functions saying
self.__dict__['__namespace_cascade__'] = something
print `self.__dict__['__namespace_cascade__']` can print nothing
is a bit beyond me. (without causing an exception, mind you)
97.11.15 Found yet more errors, decided to make two different
classes of namespace. Those based on modules now act similar
to dummy objects, that is, they let you modify the original
instead of keeping a copy of the original and modifying that.
98.03.15 -- Eliminated custom pickling methods as they are no longer
needed for use with Python 1.5final
98.03.15 -- Fixed bug in items, values, etceteras with module-type
base objects.
'''
import copy, types, string
from mcf.utils import hierobj
class NameSpace(hierobj.Hierobj):
'''
An hierarchic NameSpace, allows specification of upward or downward
chaining search for resolving names
'''
def __init__(self, val = None, parents=None, downcascade=1,children=[]):
'''
A NameSpace can be initialised with a dictionary, a dummied
dictionary, another namespace, or something which has a __dict__
attribute.
Note that downcascade is hardened (resolved) at init, not at
lookup time.
'''
hierobj.Hierobj.__init__(self, parents, children)
self.__dict__['__downcascade__'] = downcascade # boolean
if val is None:
self.__dict__['_base'] = {}
else:
if type( val ) == types.StringType:
# this is a reference to a module which has been pickled
val = __import__( val, {},{}, string.split( val, '.') )
try:
# See if val's a dummy-style object which has a _base
self.__dict__['_base']=copy.copy(val._base)
except (AttributeError,KeyError):
# not a dummy-style object... see if it has a dict attribute...
try:
if type(val) != types.ModuleType:
val = copy.copy(val.__dict__)
except (AttributeError, KeyError):
pass
# whatever val is now, it's going to become our _base...
self.__dict__['_base']=val
# harden (resolve) the reference to downcascade to speed attribute lookups
if downcascade: self.__dict__['__namespace_cascade__'] = self.__childlist__
else: self.__dict__['__namespace_cascade__'] = self.__parent__
def __setattr__(self, var, val):
'''
An attempt to set an attribute should place the attribute in the _base
dictionary through a setitem call.
'''
# Note that we use standard attribute access to allow ObStore loading if the
# ._base isn't yet available.
try:
self._base[var] = val
except TypeError:
setattr(self._base, var, val)
def __getattr__(self,var):
## print '__getattr__', var
return self.__safe_getattr__(var, {}) # the {} is a stopdict
def __safe_getattr__(self, var,stopdict):
'''
We have a lot to do in this function, if the attribute is an unloaded
but stored attribute, we need to load it. If it's not in the stored
attributes, then we need to load the _base, then see if it's in the
_base.
If it's not found by then, then we need to check our resource namespaces
and see if it's in them.
'''
# we don't have a __storedattr__ or it doesn't have this key...
if var != '_base':
try:
return self._base[var]
except (KeyError,TypeError), x:
try:
return getattr(self._base, var)
except AttributeError:
pass
try: # with pickle, it tries to get the __setstate__ before restoration is complete
for cas in self.__dict__['__namespace_cascade__']:
try:
stopdict[id(cas)] # if succeeds, we've already tried this child
# no need to do anything, if none of the children succeeds we will
# raise an AttributeError
except KeyError:
stopdict[id(cas)] = None
return cas.__safe_getattr__(var,stopdict)
except (KeyError,AttributeError):
pass
raise AttributeError, var
def items(self):
try:
return self._base.items()
except AttributeError:
pass
try:
return self._base.__dict__.items()
except AttributeError:
pass
def keys(self):
try:
return self._base.keys()
except AttributeError:
pass
try:
return self._base.__dict__.keys()
except AttributeError:
pass
def has_key( self, key ):
try:
return self._base.has_key( key)
except AttributeError:
pass
try:
return self._base.__dict__.has_key( key)
except AttributeError:
pass
def values(self):
try:
return self._base.values()
except AttributeError:
pass
try:
return self._base.__dict__.values()
except AttributeError:
pass
def __getinitargs__(self):
if type( self._base ) is types.ModuleType:
base = self._base.__name__
else:
base = self._base
return (base, self.__parent__, self.__downcascade__, self.__childlist__)
def __getstate__(self):
return None
def __setstate__(self,*args):
pass
def __deepcopy__(self, memo=None):
d = id(self)
if memo is None:
memo = {}
elif memo.has_key(d):
return memo[d]
if type(self._base) == types.ModuleType:
rest = tuple(map( copy.deepcopy, (self.__parent__, self.__downcascade__, self.__childlist__) ))
new = apply(self.__class__, (self._base,)+rest )
else:
new = tuple(map( copy.deepcopy, (self._base, self.__parent__, self.__downcascade__, self.__childlist__) ))
return new
## def __del__( self, id=id ):
## print 'del namespace', id( self )
def test():
import string
a = NameSpace(string)
del(string)
a.append(NameSpace({'a':23,'b':42}))
import math
a.append(NameSpace(math))
print 'The returned object should allow access to the attributes of the string,\nand math modules, and two simple variables "a" and "b" (== 23 and42 respectively)'
return a

View File

@@ -0,0 +1,50 @@
'''
Classes of Types
Often you want to be able to say:
if type(obj) in MutableTypes:
yada
This module is intended to make that easier.
Just import and use :)
'''
import types
MutableTypes = [ types.ListType, types.DictType, types.InstanceType ]
MutableSequenceTypes = [ types.ListType ]
SequenceTypes = [ types.ListType, types.StringType, types.TupleType ]
NumericTypes = [ types.IntType, types.FloatType, types.LongType, types.ComplexType ]
MappingTypes = [ types.DictType ]
def regarray():
if globals().has_key('array'):
return 1
try:
import array
SequenceTypes.append( array.ArrayType )
MutableTypes.append( array.ArrayType )
MutableSequenceTypes.append( array.ArrayType )
return 1
except ImportError:
return 0
def regnumpy():
'''
Call if you want to register numpy arrays
according to their types.
'''
if globals().has_key('Numeric'):
return 1
try:
import Numeric
SequenceTypes.append( Numeric.ArrayType )
MutableTypes.append( Numeric.ArrayType )
MutableSequenceTypes.append( Numeric.ArrayType )
return 1
except ImportError:
return 0
# for now, I'm going to always register these, if the module becomes part of the base distribution
# it might be better to leave it out so numpy isn't always getting loaded...
regarray()
regnumpy()

Some files were not shown because too many files have changed in this diff Show More