Index: Doc/library/urllib.parse.rst =================================================================== --- Doc/library/urllib.parse.rst (revision 64891) +++ Doc/library/urllib.parse.rst (working copy) @@ -182,17 +182,22 @@ string. If there is no fragment identifier in *url*, return *url* unmodified and an empty string. -.. function:: quote(string[, safe]) +.. function:: quote(string[, safe[, encoding[, errors]]]) Replace special characters in *string* using the ``%xx`` escape. Letters, digits, and the characters ``'_.-'`` are never quoted. The optional *safe* parameter specifies additional characters that should not be quoted --- its default value is ``'/'``. + The optional *encoding* and *errors* parameters specify how to deal with + non-ASCII characters, as accepted by the :meth:`str.encode` method. + *encoding* defaults to ``'utf-8'``. + *errors* defaults to ``'replace'``, meaning unsupported characters are + replaced by a placeholder character. - Example: ``quote('/~connolly/')`` yields ``'/%7econnolly/'``. + Example: ``quote('/~connolly/')`` yields ``'/%7Econnolly/'``. -.. function:: quote_plus(string[, safe]) +.. function:: quote_plus(string[, safe[, encoding[, errors]]]) Like :func:`quote`, but also replace spaces by plus signs, as required for quoting HTML form values. Plus signs in the original string are escaped @@ -200,14 +205,20 @@ ``'/'``. -.. function:: unquote(string) +.. function:: unquote(string[, encoding[, errors]]) Replace ``%xx`` escapes by their single-character equivalent. + The optional *encoding* and *errors* parameters specify how to decode + percent-encoded sequences into Unicode characters, as accepted by the + :meth:`bytes.decode` method. + *encoding* defaults to ``'utf-8'``. + *errors* defaults to ``'replace'``, meaning invalid sequences are + replaced by a placeholder character. Example: ``unquote('/%7Econnolly/')`` yields ``'/~connolly/'``. -.. function:: unquote_plus(string) +.. function:: unquote_plus(string[, encoding[, errors]]) Like :func:`unquote`, but also replace plus signs by spaces, as required for unquoting HTML form values. Index: Lib/urllib/parse.py =================================================================== --- Lib/urllib/parse.py (revision 64891) +++ Lib/urllib/parse.py (working copy) @@ -260,50 +260,87 @@ else: return url, '' +# _hextochr maps 2-hex-digit strings onto single bytes +# eg. _hextochr['2f'] = b'\x2f' +# Maps lowercase and uppercase variants (but not mixed case). +_hextochr = dict(('%02x' % i, bytes([i])) for i in range(256)) +_hextochr.update(('%02X' % i, bytes([i])) for i in range(256)) -_hextochr = dict(('%02x' % i, chr(i)) for i in range(256)) -_hextochr.update(('%02X' % i, chr(i)) for i in range(256)) +def unquote(s, encoding = "utf-8", errors = "replace"): + """Replace %xx escapes by their single-character equivalent. The optional + encoding and errors parameters specify how to decode percent-encoded + sequences into Unicode characters, as accepted by the bytes.decode() + method. + By default, percent-encoded sequences are decoded with UTF-8, and invalid + sequences are replaced by a placeholder character. -def unquote(s): - """unquote('abc%20def') -> 'abc def'.""" + unquote('abc%20def') -> 'abc def'. + """ + # pct_sequence: contiguous sequence of percent-encoded bytes, decoded + # (list of single-byte bytes objects) + pct_sequence = [] res = s.split('%') for i in range(1, len(res)): item = res[i] try: - res[i] = _hextochr[item[:2]] + item[2:] + pct_sequence.append(_hextochr[item[:2]]) + rest = item[2:] except KeyError: - res[i] = '%' + item - except UnicodeDecodeError: - res[i] = chr(int(item[:2], 16)) + item[2:] + rest = '%' + item + if len(rest) == 0: + # This segment was just a single percent-encoded character. + # May be part of a sequence of code units, so delay decoding. + # (Stored in pct_sequence). + res[i] = '' + else: + # Encountered non-percent-encoded characters. Flush the current + # pct_sequence. + res[i] = b''.join(pct_sequence).decode(encoding, errors) + rest + pct_sequence = [] + if len(pct_sequence) > 0: + # Flush the final pct_sequence + # res[-1] will always be empty if pct_sequence != [] + res[-1] = b''.join(pct_sequence).decode(encoding, errors) return "".join(res) -def unquote_plus(s): - """unquote('%7e/abc+def') -> '~/abc def'""" +def unquote_plus(s, encoding = "utf-8", errors = "replace"): + """Like unquote(), but also replace plus signs by spaces, as required for + unquoting HTML form values. + + unquote_plus('%7e/abc+def') -> '~/abc def' + """ s = s.replace('+', ' ') - return unquote(s) + return unquote(s, encoding, errors) -always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' - 'abcdefghijklmnopqrstuvwxyz' - '0123456789' '_.-') +always_safe = frozenset( + b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + b'abcdefghijklmnopqrstuvwxyz' + b'0123456789' b'_.-') _safe_quoters= {} class Quoter: def __init__(self, safe): + """safe: May be either a string or bytes object.""" self.cache = {} - self.safe = safe + always_safe + try: + self.safe = always_safe.union(map(ord, safe)) + except TypeError: + # safe may be a bytes object + self.safe = always_safe.union(safe) def __call__(self, c): + """ + c: An int, representing a byte to be encoded. Must have range(0,256). + Returns a str. + """ try: return self.cache[c] except KeyError: - if ord(c) < 256: - res = (c in self.safe) and c or ('%%%02X' % ord(c)) - self.cache[c] = res - return res - else: - return "".join(['%%%02X' % i for i in c.encode("utf-8")]) + res = (c in self.safe and c < 128) and chr(c) or ('%%%02X' % c) + self.cache[c] = res + return res -def quote(s, safe = '/'): +def quote(s, safe = '/', encoding = "utf-8", errors = "replace"): """quote('abc def') -> 'abc%20def' Each part of a URL, e.g. the path info, the query, etc., has a @@ -323,6 +360,11 @@ is reserved, but in typical usage the quote function is being called on a path where the existing slash characters are used as reserved characters. + + The optional encoding and errors parameters specify how to deal with + non-ASCII characters, as accepted by the str.encode method. + By default, characters are encoded with UTF-8, and unsupported characters + are replaced by a placeholder character. """ cachekey = (safe, always_safe) try: @@ -330,15 +372,18 @@ except KeyError: quoter = Quoter(safe) _safe_quoters[cachekey] = quoter - res = map(quoter, s) + res = map(quoter, s.encode(encoding, errors)) return ''.join(res) -def quote_plus(s, safe = ''): - """Quote the query fragment of a URL; replacing ' ' with '+'""" +def quote_plus(s, safe = '', encoding = "utf-8", errors = "replace"): + """Like quote(), but also replace ' ' with '+', as required for quoting + HTML form values. Plus signs in the original string are escaped unless + they are included in safe. It also does not have safe default to '/'. + """ if ' ' in s: s = quote(s, safe + ' ') return s.replace(' ', '+') - return quote(s, safe) + return quote(s, safe, encoding, errors) def urlencode(query,doseq=0): """Encode a sequence of two-element tuples or dictionary into a URL query string. Index: Lib/email/utils.py =================================================================== --- Lib/email/utils.py (revision 64891) +++ Lib/email/utils.py (working copy) @@ -219,7 +219,7 @@ charset is given but not language, the string is encoded using the empty string for language. """ - s = urllib.parse.quote(s, safe='') + s = urllib.parse.quote(s, safe='', encoding=charset) if charset is None and language is None: return s if language is None: @@ -271,7 +271,10 @@ # language specifiers at the beginning of the string. for num, s, encoded in continuations: if encoded: - s = urllib.parse.unquote(s) + # Decode as "latin-1", so the characters in s directly + # represent the percent-encoded octet values. + # collapse_rfc2231_value treats this as an octet sequence. + s = urllib.parse.unquote(s, encoding="latin-1") extended = True value.append(s) value = quote(EMPTYSTRING.join(value)) Index: Lib/test/test_http_cookiejar.py =================================================================== --- Lib/test/test_http_cookiejar.py (revision 64891) +++ Lib/test/test_http_cookiejar.py (working copy) @@ -1444,7 +1444,8 @@ # Try some URL encodings of the PATHs. # (the behaviour here has changed from libwww-perl) c = CookieJar(DefaultCookiePolicy(rfc2965=True)) - interact_2965(c, "http://www.acme.com/foo%2f%25/%3c%3c%0Anew%E5/%E5", + interact_2965(c, "http://www.acme.com/foo%2f%25/" + "%3c%3c%0Anew%C3%A5/%C3%A5", "foo = bar; version = 1") cookie = interact_2965( Index: Lib/test/test_urllib.py =================================================================== --- Lib/test/test_urllib.py (revision 64891) +++ Lib/test/test_urllib.py (working copy) @@ -355,6 +355,13 @@ self.assertEqual(quote_by_default, result, "using quote_plus(): %s != %s" % (quote_by_default, result)) + # "Safe" non-ASCII characters should have no effect + # (Since URIs are not allowed to have non-ASCII characters) + result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc") + expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="") + self.assertEqual(expect, result, + "using quote(): %r != %r" % + (expect, result)) def test_default_quoting(self): # Make sure all characters that should be quoted are by default sans @@ -407,6 +414,39 @@ self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') + def test_quote_with_unicode(self): + # Characters in Latin-1 range, encoded by default in UTF-8 + given = "\xa2\xd8ab\xff" + expect = "%C2%A2%C3%98ab%C3%BF" + result = urllib.parse.quote(given) + self.assertEqual(expect, result, + "using quote(): %r != %r" % (expect, result)) + # Characters in Latin-1 range, encoded with Latin-1 + given = "\xa2\xd8ab\xff" + expect = "%A2%D8ab%FF" + result = urllib.parse.quote(given, encoding="latin-1") + self.assertEqual(expect, result, + "using quote(): %r != %r" % (expect, result)) + # Characters in BMP, encoded by default in UTF-8 + given = "\u6f22\u5b57" # "Kanji" + expect = "%E6%BC%A2%E5%AD%97" + result = urllib.parse.quote(given) + self.assertEqual(expect, result, + "using quote(): %r != %r" % (expect, result)) + # Characters in BMP, encoded with Latin-1 + given = "\u6f22\u5b57" + expect = "%3F%3F" # "??" + result = urllib.parse.quote(given, encoding="latin-1") + self.assertEqual(expect, result, + "using quote(): %r != %r" % (expect, result)) + # Characters in BMP, Latin-1, with xmlcharref error handling + given = "\u6f22\u5b57" + expect = "%26%2328450%3B%26%2323383%3B" # "漢字" + result = urllib.parse.quote(given, encoding="latin-1", + errors="xmlcharrefreplace") + self.assertEqual(expect, result, + "using quote(): %r != %r" % (expect, result)) + class UnquotingTests(unittest.TestCase): """Tests for unquote() and unquote_plus() @@ -464,9 +504,54 @@ "using unquote_plus(): %s != %s" % (expect, result)) def test_unquote_with_unicode(self): - r = urllib.parse.unquote('br%C3%BCckner_sapporo_20050930.doc') - self.assertEqual(r, 'br\xc3\xbcckner_sapporo_20050930.doc') + # Characters in the Latin-1 range, encoded with UTF-8 + given = 'br%C3%BCckner_sapporo_20050930.doc' + expect = 'br\u00fcckner_sapporo_20050930.doc' + result = urllib.parse.unquote(given) + self.assertEqual(expect, result, + "using unquote(): %r != %r" % (expect, result)) + # Characters in the Latin-1 range, encoded with Latin-1 + result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', + encoding="latin-1") + expect = 'br\u00fcckner_sapporo_20050930.doc' + self.assertEqual(expect, result, + "using unquote(): %r != %r" % (expect, result)) + + # Characters in BMP, encoded with UTF-8 + given = "%E6%BC%A2%E5%AD%97" + expect = "\u6f22\u5b57" # "Kanji" + result = urllib.parse.unquote(given) + self.assertEqual(expect, result, + "using unquote(): %r != %r" % (expect, result)) + + # Decode with UTF-8, invalid sequence + given = "%F3%B1" + expect = "\ufffd" # Replacement character + result = urllib.parse.unquote(given) + self.assertEqual(expect, result, + "using unquote(): %r != %r" % (expect, result)) + + # Decode with UTF-8, invalid sequence, ignoring errors + given = "%F3%B1" + expect = "" + result = urllib.parse.unquote(given, errors="ignore") + self.assertEqual(expect, result, + "using unquote(): %r != %r" % (expect, result)) + + # A mix of non-ASCII and percent-encoded characters, UTF-8 + result = urllib.parse.unquote("\u6f22%C3%BC") + expect = '\u6f22\xfc' + self.assertEqual(expect, result, + "using unquote(): %r != %r" % (expect, result)) + + # A mix of non-ASCII and percent-encoded characters, Latin-1 + # (Note, the string contains non-Latin-1-representable characters) + result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1") + expect = '\u6f22\xfc' + self.assertEqual(expect, result, + "using unquote(): %r != %r" % (expect, result)) + class urlencode_Tests(unittest.TestCase): """Tests for urlencode()"""