--- Lib/tokenize.py 2009-02-26 09:05:17.000000000 -0500 +++ /home/virtualmachine/Documents/Winter2009/inforet/project/tokenize.py 2009-04-27 02:15:39.000000000 -0400 @@ -26,7 +26,7 @@ __credits__ = \ 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger' -import string, re +import string, re, collections from token import * import token @@ -142,10 +142,35 @@ tabsize = 8 +RowCol = collections.namedtuple("RowCol", "row col") +NamedToken = collections.namedtuple("Token", "type string start end line") + class TokenError(Exception): pass class StopTokenizing(Exception): pass +def yieldNamedToken(fn): + """ + Decorator for generate_tokens. Gets the next token, + transforms it into a namedtuple, and yields it. + + Returns some token with fields: + token.type, + token.string + token.start.row, token.start.col + token.end.row, token.end.col + token.line + """ + def nameTuple(*args): + for t in fn(*args): + next_token = NamedToken(type = t[0], + string = t[1], + start = RowCol._make(t[2]), + end = RowCol._make(t[3]), + line = t[4]) + yield next_token + return nameTuple + def printtoken(type, token, srow_scol, erow_ecol, line): # for testing srow, scol = srow_scol erow, ecol = erow_ecol @@ -261,6 +286,7 @@ ut = Untokenizer() return ut.untokenize(iterable) +@yieldNamedToken def generate_tokens(readline): """ The generate_tokens() generator requires one argment, readline, which