diff -r 26c435168228 -r 2241c4aa43a0 Lib/test/test_zlib.py --- a/Lib/test/test_zlib.py Mon Dec 24 12:41:45 2012 +0100 +++ b/Lib/test/test_zlib.py Mon Dec 24 13:22:12 2012 +0100 @@ -2,7 +2,6 @@ from test import support import binascii import random -import sys from test.support import bigmemtest, _1G, _4G zlib = support.import_module('zlib') @@ -51,14 +50,14 @@ self.assertEqual32(zlib.adler32(b"penguin", 1), 0x0bd602f7) self.assertEqual(zlib.crc32(b"penguin"), zlib.crc32(b"penguin", 0)) - self.assertEqual(zlib.adler32(b"penguin"),zlib.adler32(b"penguin",1)) + self.assertEqual(zlib.adler32(b"penguin"), zlib.adler32(b"penguin", 1)) def test_crc32_adler32_unsigned(self): foo = b'abcdefghijklmnop' # explicitly test signed behavior self.assertEqual(zlib.crc32(foo), 2486878355) self.assertEqual(zlib.crc32(b'spam'), 1138425661) - self.assertEqual(zlib.adler32(foo+foo), 3573550353) + self.assertEqual(zlib.adler32(foo + foo), 3573550353) self.assertEqual(zlib.adler32(b'spam'), 72286642) def test_same_as_binascii_crc32(self): @@ -103,7 +102,8 @@ self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0) # specifying total bits too large causes an error self.assertRaises(ValueError, - zlib.compressobj, 1, zlib.DEFLATED, zlib.MAX_WBITS + 1) + zlib.compressobj, 1, zlib.DEFLATED, + zlib.MAX_WBITS + 1) def test_baddecompressobj(self): # verify failure on building decompress object with bad params @@ -118,7 +118,6 @@ class BaseCompressTestCase(object): def check_big_compress_buffer(self, size, compress_func): _1M = 1024 * 1024 - fmt = "%%0%dx" % (2 * _1M) # Generate 10MB worth of random, and expand it by repeating it. # The assumption is that zlib's memory is not big enough to exploit # such spread out redundancy. @@ -170,8 +169,9 @@ # An useful error message is given x = zlib.compress(HAMLET_SCENE) self.assertRaisesRegex(zlib.error, - "Error -5 while decompressing data: incomplete or truncated stream", - zlib.decompress, x[:-1]) + "Error -5 while decompressing data: incomplete" + " or truncated stream", + zlib.decompress, x[:-1]) # Memory use of the following functions takes into account overallocation @@ -205,7 +205,7 @@ co = zlib.compressobj() x1 = co.compress(data) x2 = co.flush() - self.assertRaises(zlib.error, co.flush) # second flush should not work + self.assertRaises(zlib.error, co.flush) # second flush should not work self.assertEqual(x1 + x2, datazip) for v1, v2 in ((x1, x2), (bytearray(x1), bytearray(x2))): dco = zlib.decompressobj() @@ -246,9 +246,8 @@ co = zlib.compressobj() bufs = [] for i in range(0, len(data), 256): - bufs.append(co.compress(data[i:i+256])) + bufs.append(co.compress(data[i:i + 256])) bufs.append(co.flush()) - combuf = b''.join(bufs) dco = zlib.decompressobj() y1 = dco.decompress(b''.join(bufs)) @@ -262,7 +261,7 @@ co = zlib.compressobj() bufs = [] for i in range(0, len(data), cx): - bufs.append(co.compress(data[i:i+cx])) + bufs.append(co.compress(data[i:i + cx])) bufs.append(co.flush()) combuf = b''.join(bufs) @@ -275,10 +274,10 @@ dco = zlib.decompressobj() bufs = [] for i in range(0, len(combuf), dcx): - bufs.append(dco.decompress(combuf[i:i+dcx])) - self.assertEqual(b'', dco.unconsumed_tail, ######## + bufs.append(dco.decompress(combuf[i:i + dcx])) + self.assertEqual(b'', dco.unconsumed_tail, "(A) uct should be b'': not %d long" % - len(dco.unconsumed_tail)) + (len(dco.unconsumed_tail, ))) self.assertEqual(b'', dco.unused_data) if flush: bufs.append(dco.flush()) @@ -289,9 +288,9 @@ bufs.append(chunk) else: break - self.assertEqual(b'', dco.unconsumed_tail, ######## + self.assertEqual(b'', dco.unconsumed_tail, "(B) uct should be b'': not %d long" % - len(dco.unconsumed_tail)) + (len(dco.unconsumed_tail), )) self.assertEqual(b'', dco.unused_data) self.assertEqual(data, b''.join(bufs)) # Failure means: "decompressobj with init options failed" @@ -307,7 +306,7 @@ co = zlib.compressobj() bufs = [] for i in range(0, len(data), cx): - bufs.append(co.compress(data[i:i+cx])) + bufs.append(co.compress(data[i:i + cx])) bufs.append(co.flush()) combuf = b''.join(bufs) self.assertEqual(data, zlib.decompress(combuf), @@ -320,7 +319,7 @@ #max_length = 1 + len(cb)//10 chunk = dco.decompress(cb, dcx) self.assertFalse(len(chunk) > dcx, - 'chunk too big (%d>%d)' % (len(chunk), dcx)) + 'chunk too big (%d>%d)' % (len(chunk), dcx)) bufs.append(chunk) cb = dco.unconsumed_tail bufs.append(dco.flush()) @@ -332,7 +331,7 @@ co = zlib.compressobj() bufs = [] for i in range(0, len(data), 256): - bufs.append(co.compress(data[i:i+256])) + bufs.append(co.compress(data[i:i + 256])) bufs.append(co.flush()) combuf = b''.join(bufs) self.assertEqual(data, zlib.decompress(combuf), @@ -342,10 +341,10 @@ bufs = [] cb = combuf while cb: - max_length = 1 + len(cb)//10 + max_length = 1 + len(cb) // 10 chunk = dco.decompress(cb, max_length) self.assertFalse(len(chunk) > max_length, - 'chunk too big (%d>%d)' % (len(chunk),max_length)) + 'chunk too big (%d>%d)' % (len(chunk), max_length)) bufs.append(chunk) cb = dco.unconsumed_tail if flush: @@ -354,7 +353,8 @@ while chunk: chunk = dco.decompress(b'', max_length) self.assertFalse(len(chunk) > max_length, - 'chunk too big (%d>%d)' % (len(chunk),max_length)) + 'chunk too big (%d>%d)' % (len(chunk), + max_length)) bufs.append(chunk) self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved') @@ -386,12 +386,12 @@ for sync in sync_opt: for level in range(10): - obj = zlib.compressobj( level ) - a = obj.compress( data[:3000] ) - b = obj.flush( sync ) - c = obj.compress( data[3000:] ) + obj = zlib.compressobj(level) + a = obj.compress(data[:3000]) + b = obj.flush(sync) + c = obj.compress(data[3000:]) d = obj.flush() - self.assertEqual(zlib.decompress(b''.join([a,b,c,d])), + self.assertEqual(zlib.decompress(b''.join([a, b, c, d])), data, ("Decompress failed: flush " "mode=%i, level=%i") % (sync, level)) del obj @@ -438,7 +438,7 @@ co = zlib.compressobj(zlib.Z_BEST_COMPRESSION) self.assertTrue(co.flush()) # Returns a zlib header dco = zlib.decompressobj() - self.assertEqual(dco.flush(), b"") # Returns nothing + self.assertEqual(dco.flush(), b"") # Returns nothing def test_dictionary(self): h = HAMLET_SCENE @@ -517,11 +517,11 @@ if i < len(y): self.assertEqual(dco.unused_data, b'') if maxlen == 0: - data += dco.decompress(x[i : i + step]) + data += dco.decompress(x[i: i + step]) self.assertEqual(dco.unconsumed_tail, b'') else: - data += dco.decompress( - dco.unconsumed_tail + x[i : i + step], maxlen) + data += dco.decompress(dco.unconsumed_tail + + x[i: i + step], maxlen) data += dco.flush() self.assertTrue(dco.eof) self.assertEqual(data, source) @@ -537,7 +537,7 @@ dco = zlib.decompressobj() dco.decompress(data, 1) del data - data = zlib.compress(input2) + zlib.compress(input2) self.assertEqual(dco.flush(), input1[1:]) if hasattr(zlib.compressobj(), "copy"): @@ -560,8 +560,8 @@ bufs1.append(c1.flush()) s1 = b''.join(bufs1) - self.assertEqual(zlib.decompress(s0),data0+data0) - self.assertEqual(zlib.decompress(s1),data0+data1) + self.assertEqual(zlib.decompress(s0), data0 + data0) + self.assertEqual(zlib.decompress(s1), data0 + data1) def test_badcompresscopy(self): # Test copying a compression object in an inconsistent state @@ -591,8 +591,8 @@ bufs1.append(d1.decompress(comp[32:])) s1 = b''.join(bufs1) - self.assertEqual(s0,s1) - self.assertEqual(s0,data) + self.assertEqual(s0, s1) + self.assertEqual(s0, data) def test_baddecompresscopy(self): # Test copying a compression object in an inconsistent state @@ -641,7 +641,6 @@ return blocks - def choose_lines(source, number, seed=None, generator=random): """Return a list of number lines randomly chosen from the source""" if seed is not None: @@ -650,7 +649,6 @@ return [generator.choice(sources) for n in range(number)] - HAMLET_SCENE = b""" LAERTES @@ -727,5 +725,4 @@ ) if __name__ == "__main__": - unittest.main() # XXX - ###test_main() + unittest.main()