Text plugin script updates: Better error handling, variable parsing, token caching for repeat parsing of the same document. Fixed joining of multiline statements and context detection.

This commit is contained in:
2008-07-15 12:55:20 +00:00
parent aeb4d0c631
commit 9037159d7a
4 changed files with 159 additions and 68 deletions

View File

@@ -1,6 +1,7 @@
import bpy, sys
import bpy
import __builtin__, tokenize
from tokenize import generate_tokens
from Blender.sys import time
from tokenize import generate_tokens, TokenError
# TODO: Remove the dependency for a full Python installation. Currently only the
# tokenize module is required
@@ -17,15 +18,33 @@ KEYWORDS = ['and', 'del', 'from', 'not', 'while', 'as', 'elif', 'global',
'raise', 'continue', 'finally', 'is', 'return', 'def', 'for',
'lambda', 'try' ]
# Used to cache the return value of generate_tokens
_token_cache = None
_cache_update = 0
def suggest_cmp(x, y):
"""Use this method when sorting a list for suggestions"""
"""Use this method when sorting a list of suggestions.
"""
return cmp(x[0], y[0])
def cached_generate_tokens(txt, since=1):
"""A caching version of generate tokens for multiple parsing of the same
document within a given timescale.
"""
global _token_cache, _cache_update
if _cache_update < time() - since:
txt.reset()
_token_cache = [g for g in generate_tokens(txt.readline)]
_cache_update = time()
return _token_cache
def get_module(name):
"""Returns the module specified by its name. This module is imported and as
such will run any initialization code specified within the module."""
"""Returns the module specified by its name. The module itself is imported
by this method and, as such, any initialization code will be executed.
"""
mod = __import__(name)
components = name.split('.')
@@ -34,11 +53,21 @@ def get_module(name):
return mod
def is_module(m):
"""Taken from the inspect module of the standard Python installation"""
"""Taken from the inspect module of the standard Python installation.
"""
return isinstance(m, type(bpy))
def type_char(v):
"""Returns the character used to signify the type of a variable. Use this
method to identify the type character for an item in a suggestion list.
The following values are returned:
'm' if the parameter is a module
'f' if the parameter is callable
'v' if the parameter is variable or otherwise indeterminable
"""
if is_module(v):
return 'm'
elif callable(v):
@@ -46,8 +75,8 @@ def type_char(v):
else:
return 'v'
def get_context(line, cursor):
"""Establishes the context of the cursor in the given line
def get_context(txt):
"""Establishes the context of the cursor in the given Blender Text object
Returns one of:
NORMAL - Cursor is in a normal context
@@ -57,28 +86,43 @@ def get_context(line, cursor):
"""
l, cursor = txt.getCursorPos()
lines = txt.asLines()[:l+1]
# Detect context (in string or comment)
in_str = 0 # 1-single quotes, 2-double quotes
for i in range(cursor):
if not in_str:
if line[i] == "'": in_str = 1
elif line[i] == '"': in_str = 2
elif line[i] == '#': return 3 # In a comment so quit
for line in lines:
if l == 0:
end = cursor
else:
if in_str == 1:
if line[i] == "'":
in_str = 0
# In again if ' escaped, out again if \ escaped, and so on
for a in range(1, i+1):
if line[i-a] == '\\': in_str = 1-in_str
else: break
elif in_str == 2:
if line[i] == '"':
in_str = 0
# In again if " escaped, out again if \ escaped, and so on
for a in range(1, i+1):
if line[i-a] == '\\': in_str = 2-in_str
else: break
end = len(line)
l -= 1
# Comments end at new lines
if in_str == 3:
in_str = 0
for i in range(end):
if in_str == 0:
if line[i] == "'": in_str = 1
elif line[i] == '"': in_str = 2
elif line[i] == '#': in_str = 3
else:
if in_str == 1:
if line[i] == "'":
in_str = 0
# In again if ' escaped, out again if \ escaped, and so on
for a in range(i-1, -1, -1):
if line[a] == '\\': in_str = 1-in_str
else: break
elif in_str == 2:
if line[i] == '"':
in_str = 0
# In again if " escaped, out again if \ escaped, and so on
for a in range(i-1, -1, -1):
if line[i-a] == '\\': in_str = 2-in_str
else: break
return in_str
def current_line(txt):
@@ -101,9 +145,10 @@ def current_line(txt):
# Join later lines while there is an explicit joining character
i = lineindex
while i < len(lines)-1 and line[i].rstrip().endswith('\\'):
while i < len(lines)-1 and lines[i].rstrip().endswith('\\'):
later = lines[i+1].strip()
line = line + ' ' + later[:-1]
i += 1
return line, cursor
@@ -134,9 +179,8 @@ def get_imports(txt):
# strings open or there are other syntax errors. For now we return an empty
# dictionary until an alternative parse method is implemented.
try:
txt.reset()
tokens = generate_tokens(txt.readline)
except:
tokens = cached_generate_tokens(txt)
except TokenError:
return dict()
imports = dict()
@@ -191,8 +235,7 @@ def get_imports(txt):
# Handle special case of 'import *'
if impname == '*':
parent = get_module(fromname)
for symbol, attr in parent.__dict__.items():
imports[symbol] = attr
imports.update(parent.__dict__)
else:
# Try importing the name as a module
@@ -202,12 +245,12 @@ def get_imports(txt):
else:
module = get_module(impname)
imports[symbol] = module
except:
except (ImportError, ValueError, AttributeError, TypeError):
# Try importing name as an attribute of the parent
try:
module = __import__(fromname, globals(), locals(), [impname])
imports[symbol] = getattr(module, impname)
except:
except (ImportError, ValueError, AttributeError, TypeError):
pass
# More to import from the same module?
@@ -219,7 +262,6 @@ def get_imports(txt):
return imports
def get_builtins():
"""Returns a dictionary of built-in modules, functions and variables."""
@@ -235,9 +277,8 @@ def get_defs(txt):
# See above for problems with generate_tokens
try:
txt.reset()
tokens = generate_tokens(txt.readline)
except:
tokens = cached_generate_tokens(txt)
except TokenError:
return dict()
defs = dict()
@@ -269,3 +310,37 @@ def get_defs(txt):
step = 0
return defs
def get_vars(txt):
"""Returns a dictionary of variable names found in the specified Text
object. This method locates all names followed directly by an equal sign:
'a = ???' or indirectly as part of a tuple/list assignment or inside a
'for ??? in ???:' block.
"""
# See above for problems with generate_tokens
try:
tokens = cached_generate_tokens(txt)
except TokenError:
return []
vars = []
accum = [] # Used for tuple/list assignment
foring = False
for type, string, start, end, line in tokens:
# Look for names
if string == 'for':
foring = True
if string == '=' or (foring and string == 'in'):
vars.extend(accum)
accum = []
foring = False
elif type == tokenize.NAME:
accum.append(string)
elif not string in [',', '(', ')', '[', ']']:
accum = []
foring = False
return vars