# HG changeset patch # User maru.newby # Date 1238609440 14400 # Branch py3k # Node ID 6915f67dd9add8028b8bd013a9c4dbcb25c7c868 # Parent a1572d5cac5e92961a2c1d6c12bf762b4ac9ae35 Fixing encoding handling for the trace module - it was breaking coverage execution for files not encoded in utf-8. diff -r a1572d5cac5e -r 6915f67dd9ad Lib/trace.py --- a/Lib/trace.py Wed Apr 01 01:50:31 2009 +0200 +++ b/Lib/trace.py Wed Apr 01 14:10:40 2009 -0400 @@ -48,6 +48,7 @@ r.write_results(show_missing=True, coverdir="/tmp") """ +import io import linecache import os import re @@ -391,7 +392,7 @@ linenos.update(find_lines(c, strs)) return linenos -def find_strings(filename): +def find_strings(filename, encoding): """Return a dict of possible docstring positions. The dict maps line numbers to strings. There is an entry for @@ -402,7 +403,7 @@ # If the first token is a string, then it's the module docstring. # Add this special case so that the test in the loop passes. prev_ttype = token.INDENT - f = open(filename) + f = open(filename, encoding=encoding) for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline): if ttype == token.STRING: if prev_ttype == token.INDENT: @@ -417,13 +418,15 @@ def find_executable_linenos(filename): """Return dict where keys are line numbers in the line number table.""" try: - prog = open(filename, "rU").read() + with io.FileIO(filename, 'r') as file: + encoding, lines = tokenize.detect_encoding(file.readline) + prog = open(filename, "rU", encoding=encoding).read() except IOError as err: print(("Not printing coverage data for %r: %s" % (filename, err)), file=sys.stderr) return {} code = compile(prog, filename, "exec") - strs = find_strings(filename) + strs = find_strings(filename, encoding) return find_lines(code, strs) class Trace: