From a6bbcd39b09e07e34995776119c76c08c5d295b2 Mon Sep 17 00:00:00 2001 From: Malthe Borch Date: Wed, 14 Sep 2016 10:44:27 +0200 Subject: [PATCH] Allow 'make' to be run under Python 3 --- Parser/asdl.py | 78 +++++++++++++++++++++++++++++--------------------------- Parser/asdl_c.py | 8 +++--- Parser/spark.py | 77 +++++++++++++++++++++++++++++-------------------------- 3 files changed, 85 insertions(+), 78 deletions(-) diff --git a/Parser/asdl.py b/Parser/asdl.py index 7f5856b..083b45f 100644 --- a/Parser/asdl.py +++ b/Parser/asdl.py @@ -10,6 +10,8 @@ browser. Changes for Python: Add support for module versions """ +from __future__ import print_function + import os import traceback @@ -96,7 +98,7 @@ class ASDLScanner(spark.GenericScanner, object): def t_default(self, s): r" . +" - raise ValueError, "unmatched input: %s" % `s` + raise ValueError("unmatched input: %r" % s) class ASDLParser(spark.GenericParser, object): def __init__(self): @@ -108,48 +110,48 @@ class ASDLParser(spark.GenericParser, object): def error(self, tok): raise ASDLSyntaxError(tok.lineno, tok) - def p_module_0(self, (module, name, version, _0, _1)): + def p_module_0(self, module, name, version, _0, _1): " module ::= Id Id version { } " if module.value != "module": raise ASDLSyntaxError(module.lineno, msg="expected 'module', found %s" % module) return Module(name, None, version) - def p_module(self, (module, name, version, _0, definitions, _1)): + def p_module(self, module, name, version, _0, definitions, _1): " module ::= Id Id version { definitions } " if module.value != "module": raise ASDLSyntaxError(module.lineno, msg="expected 'module', found %s" % module) return Module(name, definitions, version) - def p_version(self, (version, V)): + def p_version(self, version, V): "version ::= Id String" if version.value != "version": raise ASDLSyntaxError(version.lineno, msg="expected 'version', found %" % version) return V - def p_definition_0(self, (definition,)): + def p_definition_0(self, definition): " definitions ::= definition " return definition - def p_definition_1(self, (definitions, definition)): + def p_definition_1(self, definitions, definition): " definitions ::= definition definitions " return definitions + definition - def p_definition(self, (id, _, type)): + def p_definition(self, id, _, type): " definition ::= Id = type " return [Type(id, type)] - def p_type_0(self, (product,)): + def p_type_0(self, product): " type ::= product " return product - def p_type_1(self, (sum,)): + def p_type_1(self, sum): " type ::= sum " return Sum(sum) - def p_type_2(self, (sum, id, _0, attributes, _1)): + def p_type_2(self, sum, id, _0, attributes, _1): " type ::= sum Id ( fields ) " if id.value != "attributes": raise ASDLSyntaxError(id.lineno, @@ -158,63 +160,63 @@ class ASDLParser(spark.GenericParser, object): attributes.reverse() return Sum(sum, attributes) - def p_product(self, (_0, fields, _1)): + def p_product(self, _0, fields, _1): " product ::= ( fields ) " # XXX can't I just construct things in the right order? fields.reverse() return Product(fields) - def p_sum_0(self, (constructor,)): + def p_sum_0(self, constructor): " sum ::= constructor " return [constructor] - def p_sum_1(self, (constructor, _, sum)): + def p_sum_1(self, constructor, _, sum): " sum ::= constructor | sum " return [constructor] + sum - def p_sum_2(self, (constructor, _, sum)): + def p_sum_2(self, constructor, _, sum): " sum ::= constructor | sum " return [constructor] + sum - def p_constructor_0(self, (id,)): + def p_constructor_0(self, id): " constructor ::= Id " return Constructor(id) - def p_constructor_1(self, (id, _0, fields, _1)): + def p_constructor_1(self, id, _0, fields, _1): " constructor ::= Id ( fields ) " # XXX can't I just construct things in the right order? fields.reverse() return Constructor(id, fields) - def p_fields_0(self, (field,)): + def p_fields_0(self, field): " fields ::= field " return [field] - def p_fields_1(self, (field, _, fields)): + def p_fields_1(self, field, _, fields): " fields ::= field , fields " return fields + [field] - def p_field_0(self, (type,)): + def p_field_0(self, type): " field ::= Id " return Field(type) - def p_field_1(self, (type, name)): + def p_field_1(self, type, name): " field ::= Id Id " return Field(type, name) - def p_field_2(self, (type, _, name)): + def p_field_2(self, type, _, name): " field ::= Id * Id " return Field(type, name, seq=True) - def p_field_3(self, (type, _, name)): + def p_field_3(self, type, _, name): " field ::= Id ? Id " return Field(type, name, opt=True) - def p_field_4(self, (type, _)): + def p_field_4(self, type, _): " field ::= Id * " return Field(type, seq=True) - def p_field_5(self, (type, _)): + def p_field_5(self, type, _): " field ::= Id ? " return Field(type, opt=True) @@ -304,9 +306,9 @@ class VisitorBase(object): return try: meth(object, *args) - except Exception, err: - print "Error visiting", repr(object) - print err + except Exception as err: + print("Error visiting", repr(object)) + print(err) traceback.print_exc() # XXX hack if hasattr(self, 'file'): @@ -351,8 +353,8 @@ class Check(VisitorBase): if conflict is None: self.cons[key] = name else: - print "Redefinition of constructor %s" % key - print "Defined in %s and %s" % (conflict, name) + print("Redefinition of constructor %s" % key) + print("Defined in %s and %s" % (conflict, name)) self.errors += 1 for f in cons.fields: self.visit(f, key) @@ -374,7 +376,7 @@ def check(mod): if t not in mod.types and not t in builtin_types: v.errors += 1 uses = ", ".join(v.types[t]) - print "Undefined type %s, used in %s" % (t, uses) + print("Undefined type %s, used in %s" % (t, uses)) return not v.errors @@ -386,10 +388,10 @@ def parse(file): tokens = scanner.tokenize(buf) try: return parser.parse(tokens) - except ASDLSyntaxError, err: - print err + except ASDLSyntaxError as err: + print(err) lines = buf.split("\n") - print lines[err.lineno - 1] # lines starts at 0, files at 1 + print(lines[err.lineno - 1]) # lines starts at 0, files at 1 if __name__ == "__main__": import glob @@ -402,12 +404,12 @@ if __name__ == "__main__": files = glob.glob(testdir + "/*.asdl") for file in files: - print file + print(file) mod = parse(file) - print "module", mod.name - print len(mod.dfns), "definitions" + print("module", mod.name) + print(len(mod.dfns), "definitions") if not check(mod): - print "Check failed" + print("Check failed") else: for dfn in mod.dfns: - print dfn.type + print(dfn.type) diff --git a/Parser/asdl_c.py b/Parser/asdl_c.py index 3d46220..8c6adca 100755 --- a/Parser/asdl_c.py +++ b/Parser/asdl_c.py @@ -1187,7 +1187,7 @@ def main(srcfile): sys.exit(1) if INC_DIR: p = "%s/%s-ast.h" % (INC_DIR, mod.name) - f = open(p, "wb") + f = open(p, "w") f.write(auto_gen_msg) f.write('#include "asdl.h"\n\n') c = ChainOfVisitors(TypeDefVisitor(f), @@ -1202,7 +1202,7 @@ def main(srcfile): if SRC_DIR: p = os.path.join(SRC_DIR, str(mod.name) + "-ast.c") - f = open(p, "wb") + f = open(p, "w") f.write(auto_gen_msg) f.write(c_file_msg % mod.version) f.write('#include "Python.h"\n') @@ -1230,7 +1230,7 @@ if __name__ == "__main__": SRC_DIR = '' opts, args = getopt.getopt(sys.argv[1:], "h:c:") if len(opts) != 1: - print "Must specify exactly one output file" + print("Must specify exactly one output file") sys.exit(1) for o, v in opts: if o == '-h': @@ -1238,6 +1238,6 @@ if __name__ == "__main__": if o == '-c': SRC_DIR = v if len(args) != 1: - print "Must specify single input file" + print("Must specify single input file") sys.exit(1) main(args[0]) diff --git a/Parser/spark.py b/Parser/spark.py index b064d62..76b9b48 100644 --- a/Parser/spark.py +++ b/Parser/spark.py @@ -19,6 +19,8 @@ # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +from __future__ import print_function + __version__ = 'SPARK-0.7 (pre-alpha-5)' import re @@ -30,7 +32,7 @@ def _namelist(instance): for b in c.__bases__: classlist.append(b) for name in c.__dict__.keys(): - if not namedict.has_key(name): + if name not in namedict: namelist.append(name) namedict[name] = 1 return namelist @@ -56,10 +58,10 @@ class GenericScanner: rv.append(self.makeRE(name)) rv.append(self.makeRE('t_default')) - return string.join(rv, '|') + return "|".join(rv) def error(self, s, pos): - print "Lexical error at position %s" % pos + print("Lexical error at position %s" % pos) raise SystemExit def tokenize(self, s): @@ -72,13 +74,13 @@ class GenericScanner: groups = m.groups() for i in range(len(groups)): - if groups[i] and self.index2func.has_key(i): + if groups[i] and i in self.index2func: self.index2func[i](groups[i]) pos = m.end() def t_default(self, s): r'( . | \n )+' - print "Specification error: unmatched input" + print("Specification error: unmatched input") raise SystemExit # @@ -140,7 +142,7 @@ class GenericParser: for k, v in self.edges.items(): if v is None: state, sym = k - if self.states.has_key(state): + if state in self.states: self.goto(state, sym) changes = 1 rv = self.__dict__.copy() @@ -171,7 +173,7 @@ class GenericParser: def addRule(self, doc, func, _preprocess=1): fn = func - rules = string.split(doc) + rules = doc.split() index = [] for i in range(len(rules)): @@ -187,7 +189,7 @@ class GenericParser: if _preprocess: rule, fn = self.preprocess(rule, func) - if self.rules.has_key(lhs): + if lhs in self.rules: self.rules[lhs].append(rule) else: self.rules[lhs] = [ rule ] @@ -204,7 +206,7 @@ class GenericParser: def augment(self, start): rule = '%s ::= %s %s' % (self._START, self._BOF, start) - self.addRule(rule, lambda args: args[1], 0) + self.addRule(rule, lambda *args: args[1], 0) def computeNull(self): self.nullable = {} @@ -225,7 +227,7 @@ class GenericParser: # grammars. # for sym in rhs: - if not self.rules.has_key(sym): + if sym not in self.rules: break else: tbd.append(rule) @@ -268,8 +270,7 @@ class GenericParser: n = len(rhs) while i < n: sym = rhs[i] - if not self.rules.has_key(sym) or \ - not self.nullable[sym]: + if sym not in self.rules or not self.nullable[sym]: candidate = 0 i = i + 1 continue @@ -285,7 +286,7 @@ class GenericParser: if candidate: lhs = self._NULLABLE+lhs rule = (lhs, rhs) - if self.newrules.has_key(lhs): + if lhs in self.newrules: self.newrules[lhs].append(rule) else: self.newrules[lhs] = [ rule ] @@ -295,7 +296,7 @@ class GenericParser: return None def error(self, token): - print "Syntax error at or near `%s' token" % token + print("Syntax error at or near `%s' token" % token) raise SystemExit def parse(self, tokens): @@ -312,12 +313,15 @@ class GenericParser: self.states = { 0: self.makeState0() } self.makeState(0, self._BOF) - for i in xrange(len(tokens)): + i = 0 + while i < len(tokens): sets.append([]) if sets[i] == []: break + self.makeSet(tokens[i], sets, i) + i += 1 else: sets.append([]) self.makeSet(None, sets, len(tokens)) @@ -341,7 +345,8 @@ class GenericParser: # return self._NULLABLE == sym[0:len(self._NULLABLE)] - def skip(self, (lhs, rhs), pos=0): + def skip(self, rule, pos=0): + lhs, rhs = rule n = len(rhs) while pos < n: if not self.isnullable(rhs[pos]): @@ -364,7 +369,7 @@ class GenericParser: core.sort() tcore = tuple(core) - if self.cores.has_key(tcore): + if tcore in self.cores: return self.cores[tcore] # # Nope, doesn't exist. Compute it and the associated @@ -388,13 +393,13 @@ class GenericParser: nextSym = rhs[pos] key = (X.stateno, nextSym) - if not rules.has_key(nextSym): - if not edges.has_key(key): + if nextSym not in rules: + if key not in edges: edges[key] = None X.T.append(nextSym) else: edges[key] = None - if not predicted.has_key(nextSym): + if nextSym not in predicted: predicted[nextSym] = 1 for prule in rules[nextSym]: ppos = self.skip(prule) @@ -418,10 +423,10 @@ class GenericParser: # need to know the entire set of predicted nonterminals # to do this without accidentally duplicating states. # - core = predicted.keys() + core = list(predicted.keys()) core.sort() tcore = tuple(core) - if self.cores.has_key(tcore): + if tcore in self.cores: self.edges[(k, None)] = self.cores[tcore] return k @@ -432,7 +437,7 @@ class GenericParser: def goto(self, state, sym): key = (state, sym) - if not self.edges.has_key(key): + if key not in self.edges: # # No transitions from state on sym. # @@ -630,7 +635,7 @@ class GenericParser: for i in range(len(rhs)-1, -1, -1): sym = rhs[i] - if not self.newrules.has_key(sym): + if sym not in self.newrules: if sym != self._BOF: attr[i] = tokens[k-1] key = (item, k) @@ -644,7 +649,7 @@ class GenericParser: attr[i] = self.buildTree(sym, why[0], tokens, why[1]) item, k = self.predecessor(key, why) - return self.rule2func[self.new2old[rule]](attr) + return self.rule2func[self.new2old[rule]](*attr) def ambiguity(self, rules): # @@ -660,8 +665,8 @@ class GenericParser: sortlist.append((len(rhs), name)) name2index[name] = i sortlist.sort() - list = map(lambda (a,b): b, sortlist) - return rules[name2index[self.resolve(list)]] + mapped = list(map(lambda ab: ab[1], sortlist)) + return rules[name2index[self.resolve(mapped)]] def resolve(self, list): # @@ -825,15 +830,15 @@ class GenericASTMatcher(GenericParser): def _dump(tokens, sets, states): for i in range(len(sets)): - print 'set', i + print('set', i) for item in sets[i]: - print '\t', item + print('\t', item) for (lhs, rhs), pos in states[item[0]].items: - print '\t\t', lhs, '::=', - print string.join(rhs[:pos]), - print '.', - print string.join(rhs[pos:]) + print('\t\t', lhs, '::=', end=' ') + print(' '.join(rhs[:pos]), end=' ') + print('.', end=' ') + print(' '.join(rhs[pos:])) if i < len(tokens): - print - print 'token', str(tokens[i]) - print + print() + print('token', str(tokens[i])) + print() -- 2.9.3