Index: Include/parsetok.h =================================================================== --- Include/parsetok.h (révision 68984) +++ Include/parsetok.h (copie de travail) @@ -29,6 +29,8 @@ #define PyPARSE_UNICODE_LITERALS 0x0008 #endif +#define PyPARSE_IGNORE_COOKIE 0x0010 + PyAPI_FUNC(node *) PyParser_ParseString(const char *, grammar *, int, perrdetail *); PyAPI_FUNC(node *) PyParser_ParseFile (FILE *, const char *, grammar *, int, Index: Include/pythonrun.h =================================================================== --- Include/pythonrun.h (révision 68984) +++ Include/pythonrun.h (copie de travail) @@ -12,6 +12,7 @@ #define PyCF_SOURCE_IS_UTF8 0x0100 #define PyCF_DONT_IMPLY_DEDENT 0x0200 #define PyCF_ONLY_AST 0x0400 +#define PyCF_IGNORE_COOKIE 0x0800 typedef struct { int cf_flags; /* bitmask of CO_xxx flags relevant to future */ Index: Python/pythonrun.c =================================================================== --- Python/pythonrun.c (révision 68984) +++ Python/pythonrun.c (copie de travail) @@ -1002,9 +1002,17 @@ } /* compute parser flags based on compiler flags */ -#define PARSER_FLAGS(flags) \ - ((flags) ? ((((flags)->cf_flags & PyCF_DONT_IMPLY_DEDENT) ? \ - PyPARSE_DONT_IMPLY_DEDENT : 0)) : 0) +static int PARSER_FLAGS(PyCompilerFlags *flags) +{ + int parser_flags = 0; + if (!flags) + return 0; + if (flags->cf_flags & PyCF_DONT_IMPLY_DEDENT) + parser_flags |= PyPARSE_DONT_IMPLY_DEDENT; + if (flags->cf_flags & PyCF_IGNORE_COOKIE) + parser_flags |= PyPARSE_IGNORE_COOKIE; + return parser_flags; +} #if 0 /* Keep an example of flags with future keyword support. */ Index: Python/bltinmodule.c =================================================================== --- Python/bltinmodule.c (révision 68984) +++ Python/bltinmodule.c (copie de travail) @@ -513,12 +513,13 @@ static char * -source_as_string(PyObject *cmd, char *funcname, char *what) +source_as_string(PyObject *cmd, char *funcname, char *what, PyCompilerFlags *cf) { char *str; Py_ssize_t size; if (PyUnicode_Check(cmd)) { + cf->cf_flags |= PyCF_IGNORE_COOKIE; cmd = _PyUnicode_AsDefaultEncodedString(cmd, NULL); if (cmd == NULL) return NULL; @@ -610,7 +611,8 @@ return result; } - str = source_as_string(cmd, "compile", "string, bytes, AST or code"); + cf.cf_flags = supplied_flags | PyCF_SOURCE_IS_UTF8; + str = source_as_string(cmd, "compile", "string, bytes, AST or code", &cf); if (str == NULL) return NULL; @@ -722,14 +724,14 @@ return PyEval_EvalCode((PyCodeObject *) cmd, globals, locals); } - str = source_as_string(cmd, "eval", "string, bytes or code"); + cf.cf_flags = PyCF_SOURCE_IS_UTF8; + str = source_as_string(cmd, "eval", "string, bytes or code", &cf); if (str == NULL) return NULL; while (*str == ' ' || *str == '\t') str++; - cf.cf_flags = PyCF_SOURCE_IS_UTF8; (void)PyEval_MergeCompilerFlags(&cf); result = PyRun_StringFlags(str, Py_eval_input, globals, locals, &cf); Py_XDECREF(tmp); @@ -798,12 +800,13 @@ v = PyEval_EvalCode((PyCodeObject *) prog, globals, locals); } else { - char *str = source_as_string(prog, "exec", - "string, bytes or code"); + char *str; PyCompilerFlags cf; + cf.cf_flags = PyCF_SOURCE_IS_UTF8; + str = source_as_string(prog, "exec", + "string, bytes or code", &cf); if (str == NULL) return NULL; - cf.cf_flags = PyCF_SOURCE_IS_UTF8; if (PyEval_MergeCompilerFlags(&cf)) v = PyRun_StringFlags(str, Py_file_input, globals, locals, &cf); Index: Parser/tokenizer.c =================================================================== --- Parser/tokenizer.c (révision 68984) +++ Parser/tokenizer.c (copie de travail) @@ -704,7 +704,7 @@ struct tok_state *tok = tok_new(); if (tok == NULL) return NULL; - str = (char *)decode_str(str, tok); + str = decode_str(str, tok); if (str == NULL) { PyTokenizer_Free(tok); return NULL; @@ -715,6 +715,28 @@ return tok; } +struct tok_state * +PyTokenizer_FromUnicode(const char *str) +{ + struct tok_state *tok = tok_new(); + if (tok == NULL) + return NULL; + tok->decoding_state = STATE_RAW; + tok->read_coding_spec = 1; + tok->enc = NULL; + tok->str = str; + tok->encoding = (char *)PyMem_MALLOC(6); + if (!tok->encoding) { + PyTokenizer_Free(tok); + return NULL; + } + strcpy(tok->encoding, "utf-8"); + + /* XXX: constify members. */ + tok->buf = tok->cur = tok->end = tok->inp = (char*)str; + return tok; +} + /* Set up tokenizer for file */ Index: Parser/tokenizer.h =================================================================== --- Parser/tokenizer.h (révision 68984) +++ Parser/tokenizer.h (copie de travail) @@ -61,6 +61,7 @@ }; extern struct tok_state *PyTokenizer_FromString(const char *); +extern struct tok_state *PyTokenizer_FromUnicode(const char *); extern struct tok_state *PyTokenizer_FromFile(FILE *, char*, char *, char *); extern void PyTokenizer_Free(struct tok_state *); Index: Parser/parsetok.c =================================================================== --- Parser/parsetok.c (révision 68984) +++ Parser/parsetok.c (copie de travail) @@ -49,7 +49,11 @@ initerr(err_ret, filename); - if ((tok = PyTokenizer_FromString(s)) == NULL) { + if (*flags & PyPARSE_IGNORE_COOKIE) + tok = PyTokenizer_FromUnicode(s); + else + tok = PyTokenizer_FromString(s); + if (tok == NULL) { err_ret->error = PyErr_Occurred() ? E_DECODE : E_NOMEM; return NULL; } --- Lib/test/test_pep263.py.orig 2009-01-30 00:06:17.000000000 +0100 +++ Lib/test/test_pep263.py 2009-01-30 00:06:13.000000000 +0100 @@ -31,6 +31,12 @@ else: self.fail() + def test_issue4626(self): + c = compile("# coding=latin-1\n\u00c6 = '\u00c6'", "dummy", "exec") + d = {} + exec(c, d) + self.assertEquals(d['\xc6'], '\xc6') + def test_main(): support.run_unittest(PEP263Test)