Index: Objects/unicodeobject.c =================================================================== --- Objects/unicodeobject.c (revision 79542) +++ Objects/unicodeobject.c (working copy) @@ -1862,24 +1862,24 @@ static char utf8_code_length[256] = { - /* Map UTF-8 encoded prefix byte to sequence length. zero means - illegal prefix. see RFC 2279 for details */ + /* Map UTF-8 encoded prefix byte to sequence length. Zero means + illegal prefix. See RFC 3629 for details */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-0F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 70-7F */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 80-8F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 0, 0 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B0-BF */ + 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* C0-C1 + C2-CF */ + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* D0-DF */ + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* E0-EF */ + 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F0-F4 + F5-FF */ }; PyObject *PyUnicode_DecodeUTF8(const char *s, @@ -1896,6 +1896,7 @@ { const char *starts = s; int n; + int k; Py_ssize_t startinpos; Py_ssize_t endinpos; Py_ssize_t outpos; @@ -1961,68 +1962,59 @@ if ((s[1] & 0xc0) != 0x80) { errmsg = "invalid data"; startinpos = s-starts; - endinpos = startinpos+2; + endinpos = startinpos + 1; goto utf8Error; } ch = ((s[0] & 0x1f) << 6) + (s[1] & 0x3f); - if (ch < 0x80) { - startinpos = s-starts; - endinpos = startinpos+2; - errmsg = "illegal encoding"; - goto utf8Error; - } - else - *p++ = (Py_UNICODE)ch; + assert ((ch > 0x007F) && (ch <= 0x07FF)); + *p++ = (Py_UNICODE)ch; break; case 3: + /* XXX: surrogates shouldn't be valid UTF-8! + see http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf + (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt + Uncomment the 2 lines below to make them invalid, + codepoints: d800-dffff; UTF-8: \xed\xa0\x80-\xed\xbf\xbf. */ if ((s[1] & 0xc0) != 0x80 || - (s[2] & 0xc0) != 0x80) { + (s[2] & 0xc0) != 0x80 || + ((unsigned char)s[0] == 0xE0 && + (unsigned char)s[1] < 0xA0)/* || + ((unsigned char)s[0] == 0xED && + (unsigned char)s[1] > 0x9F)*/) { errmsg = "invalid data"; startinpos = s-starts; - endinpos = startinpos+3; - goto utf8Error; - } - ch = ((s[0] & 0x0f) << 12) + ((s[1] & 0x3f) << 6) + (s[2] & 0x3f); - if (ch < 0x0800) { - /* Note: UTF-8 encodings of surrogates are considered - legal UTF-8 sequences; + endinpos = startinpos + 1; - XXX For wide builds (UCS-4) we should probably try - to recombine the surrogates into a single code - unit. - */ - errmsg = "illegal encoding"; - startinpos = s-starts; - endinpos = startinpos+3; + for (k=1; (k < 3) && ((s[k]&0xC0) == 0x80); k++) + endinpos++; goto utf8Error; } - else - *p++ = (Py_UNICODE)ch; + ch = ((s[0] & 0x0f) << 12) + ((s[1] & 0x3f) << 6) + (s[2] & 0x3f); + assert ((ch > 0x07FF) && (ch <= 0xFFFF)); + *p++ = (Py_UNICODE)ch; break; case 4: if ((s[1] & 0xc0) != 0x80 || (s[2] & 0xc0) != 0x80 || - (s[3] & 0xc0) != 0x80) { + (s[3] & 0xc0) != 0x80 || + ((unsigned char)s[0] == 0xF0 && + (unsigned char)s[1] < 0x90) || + ((unsigned char)s[0] == 0xF4 && + (unsigned char)s[1] > 0x8F)) { errmsg = "invalid data"; startinpos = s-starts; - endinpos = startinpos+4; + endinpos = startinpos + 1; + for (k=1; (k < 4) && ((s[k]&0xC0) == 0x80); k++) + endinpos++; goto utf8Error; } ch = ((s[0] & 0x7) << 18) + ((s[1] & 0x3f) << 12) + ((s[2] & 0x3f) << 6) + (s[3] & 0x3f); /* validate and convert to UTF-16 */ - if ((ch < 0x10000) /* minimum value allowed for 4 - byte encoding */ - || (ch > 0x10ffff)) /* maximum value allowed for - UTF-16 */ - { - errmsg = "illegal encoding"; - startinpos = s-starts; - endinpos = startinpos+4; - goto utf8Error; - } + assert ((ch > 0xFFFF) && (ch <= 0x10ffff)); + #ifdef Py_UNICODE_WIDE *p++ = (Py_UNICODE)ch; #else @@ -2038,13 +2030,6 @@ *p++ = (Py_UNICODE)(0xDC00 + (ch & 0x03FF)); #endif break; - - default: - /* Other sizes are only needed for UCS-4 */ - errmsg = "unsupported Unicode code range"; - startinpos = s-starts; - endinpos = startinpos+n; - goto utf8Error; } s += n; continue; Index: Lib/test/test_unicode.py =================================================================== --- Lib/test/test_unicode.py (revision 79542) +++ Lib/test/test_unicode.py (working copy) @@ -600,6 +600,114 @@ # * strict decoding testing for all of the # UTF8_ERROR cases in PyUnicode_DecodeUTF8 + def test_utf8_decode_valid_sequences(self): + sequences = [ + # single byte + ('\x00', u'\x00'), ('a', u'a'), ('\x7f', u'\x7f'), + # 2 bytes + ('\xc2\x80', u'\x80'), ('\xdf\xbf', u'\u07ff'), + # 3 bytes + ('\xe0\xa0\x80', u'\u0800'), ('\xed\x9f\xbf', u'\ud7ff'), + ('\xee\x80\x80', u'\uE000'), ('\xef\xbf\xbf', u'\uffff'), + # 4 bytes + ('\xF0\x90\x80\x80', u'\U00010000'), + ('\xf4\x8f\xbf\xbf', u'\U0010FFFF') + ] + for seq, res in sequences: + self.assertEqual(seq.decode('utf-8'), res) + + for ch in map(unichr, range(0, sys.maxunicode)): + self.assertEqual(ch, ch.encode('utf-8').decode('utf-8')) + + def test_utf8_decode_invalid_sequences(self): + # continuation bytes in a sequence of 2, 3, or 4 bytes + continuation_bytes = map(chr, range(0x80, 0xC0)) + # start bytes of a 2-byte sequence equivalent to codepoints < 0x7F + invalid_2B_seq_start_bytes = map(chr, range(0xC0, 0xC2)) + # start bytes of a 4-byte sequence equivalent to codepoints > 0x10FFFF + invalid_4B_seq_start_bytes = map(chr, range(0xF5, 0xF8)) + invalid_start_bytes = ( + continuation_bytes + invalid_2B_seq_start_bytes + + invalid_4B_seq_start_bytes + map(chr, range(0xF7, 0x100)) + ) + + for byte in invalid_start_bytes: + self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8') + + for sb in invalid_2B_seq_start_bytes: + for cb in continuation_bytes: + self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8') + + for sb in invalid_4B_seq_start_bytes: + for cb1 in continuation_bytes[:3]: + for cb3 in continuation_bytes[:3]: + self.assertRaises(UnicodeDecodeError, + (sb+cb1+'\x80'+cb3).decode, 'utf-8') + + for cb in map(chr, range(0x80, 0xA0)): + self.assertRaises(UnicodeDecodeError, + ('\xE0'+cb+'\x80').decode, 'utf-8') + self.assertRaises(UnicodeDecodeError, + ('\xE0'+cb+'\xBF').decode, 'utf-8') + # XXX: surrogates shouldn't be valid UTF-8! + # see http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf + # (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt + #for cb in map(chr, range(0xA0, 0xC0)): + #sys.__stdout__.write('\\xED\\x%02x\\x80\n' % ord(cb)) + #self.assertRaises(UnicodeDecodeError, + #('\xED'+cb+'\x80').decode, 'utf-8') + #self.assertRaises(UnicodeDecodeError, + #('\xED'+cb+'\xBF').decode, 'utf-8') + for cb in map(chr, range(0x80, 0x90)): + self.assertRaises(UnicodeDecodeError, + ('\xF0'+cb+'\x80\x80').decode, 'utf-8') + self.assertRaises(UnicodeDecodeError, + ('\xF0'+cb+'\xBF\xBF').decode, 'utf-8') + for cb in map(chr, range(0x90, 0xC0)): + self.assertRaises(UnicodeDecodeError, + ('\xF4'+cb+'\x80\x80').decode, 'utf-8') + self.assertRaises(UnicodeDecodeError, + ('\xF4'+cb+'\xBF\xBF').decode, 'utf-8') + + def test_issue8271(self): + #8271 + sequences = [ + ('\x80', u'\ufffd'), # continuation byte + ('\xc0', u'\ufffd'), # overlong encoding + ('\xc2', u'\ufffd'), # 2 bytes seq with only 1 byte + ('\xc2\x41', u'\ufffdA'), # 2 bytes seq without continuation byte + ('\xe0', u'\ufffd'), # 3 bytes seq with only 1 byte + ('\xe0\x80', u'\ufffd'), # 3 bytes seq with only 2 bytes + ('\xe0\x81\x41', u'\ufffdA'), # 3 bytes seq with only 2 valid bytes + ('\xf0', u'\ufffd'), # 4 bytes seq with only 1 byte + ('\xf0\x82', u'\ufffd'), # 4 bytes seq with only 2 bytes + ('\xf0\x83\x84', u'\ufffd'), # 4 bytes seq with only 3 bytes + ('\xf0\x83\x84\x41', u'\ufffdA'), # 4 bytes seq with only 3 valid bytes + ('\xf5', u'\ufffd'), # invalid 4 bytes seq with only 1 byte + ('\xf5\x85', u'\ufffd\ufffd'), # invalid 4 bytes seq with only 2 bytes + ('\xf5\x80\x41', u'\ufffd\ufffdA'), # invalid 4 bytes seq with only 2 bytes + ('\xf5\x86\x87\x88', u'\ufffd'*4), # invalid 4 bytes seq with 4 bytes + ('\xf8', u'\ufffd'), # invalid 5 bytes seq with only 1 byte + ('\xf8\x89', u'\ufffd\ufffd'), # invalid 5 bytes seq with only 2 bytes + ('\xf8\x80\x41', u'\ufffd\ufffdA'), # invalid 4 bytes seq with only 2 bytes + ('\xf8\x90\x91\x92\x93', u'\ufffd'*5), # invalid 5 bytes seq with 5 bytes + ('\xfc', u'\ufffd'), # invalid 6 bytes seq with only 1 byte + ('\xfc\x94\x95', u'\ufffd\ufffd\ufffd'), # invalid 6 bytes seq with only 3 bytes + ('\xfc\x96\x97\x98\x99\xa0', u'\ufffd'*6), # invalid 6 bytes seq with 6 bytes + ('\xfe', u'\ufffd'), # invalid seq + ('\xfe\xa1\xa2', u'\ufffd\ufffd\ufffd'), # invalid seq with 3 bytes + # other sequences + ('\xf1\x80\x41\x42\x43', u'\ufffd\x41\x42\x43'), + ('\xf1\x80\xff\x42\x43', u'\ufffd\ufffd\x42\x43'), + ('\xf1\x80\xc2\x81\x43', u'\ufffd\x81\x43'), + ('\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64', + u'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'), + ] + for n, (seq, res) in enumerate(sequences): + #sys.__stdout__.write('%d %r %r %r\n' % (n, seq, res, seq.decode('utf-8', 'replace'))) + self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8') + self.assertEqual(seq.decode('utf-8', 'replace'), res) + def test_codecs_idna(self): # Test whether trailing dot is preserved self.assertEqual(u"www.python.org.".encode("idna"), "www.python.org.")