Index: Objects/dictobject.c =================================================================== --- Objects/dictobject.c (révision 85803) +++ Objects/dictobject.c (copie de travail) @@ -124,15 +124,6 @@ polynomial. In Tim's experiments the current scheme ran faster, produced equally good collision statistics, needed less code & used less memory. -Theoretical Python 2.5 headache: hash codes are only C "long", but -sizeof(Py_ssize_t) > sizeof(long) may be possible. In that case, and if a -dict is genuinely huge, then only the slots directly reachable via indexing -by a C long can be the first slot in a probe sequence. The probe sequence -will still eventually reach every slot in the table, but the collision rate -on initial probes may be much higher than this scheme was designed for. -Getting a hash code as fat as Py_ssize_t is the only real cure. But in -practice, this probably won't make a lick of difference for many years (at -which point everyone will have terabytes of RAM on 64-bit boxes). */ /* Object used as dummy key to fill deleted entries */ @@ -531,7 +522,7 @@ { PyObject *old_value; register PyDictEntry *ep; - typedef PyDictEntry *(*lookupfunc)(PyDictObject *, PyObject *, long); + typedef PyDictEntry *(*lookupfunc)(PyDictObject *, PyObject *, Py_hash_t); assert(mp->ma_lookup != NULL); ep = mp->ma_lookup(mp, key, hash); Index: Objects/setobject.c =================================================================== --- Objects/setobject.c (révision 85803) +++ Objects/setobject.c (copie de travail) @@ -214,7 +214,7 @@ set_insert_key(register PySetObject *so, PyObject *key, Py_hash_t hash) { register setentry *entry; - typedef setentry *(*lookupfunc)(PySetObject *, PyObject *, long); + typedef setentry *(*lookupfunc)(PySetObject *, PyObject *, Py_hash_t); assert(so->lookup != NULL); entry = so->lookup(so, key, hash); @@ -663,7 +663,7 @@ if (key != NULL && key != dummy) { Py_INCREF(key); - if (set_insert_key(so, key, (long) entry->hash) == -1) { + if (set_insert_key(so, key, entry->hash) == -1) { Py_DECREF(key); return -1; } @@ -772,14 +772,14 @@ if (so->hash != -1) return so->hash; - hash *= (long) PySet_GET_SIZE(self) + 1; + hash *= PySet_GET_SIZE(self) + 1; while (set_next(so, &pos, &entry)) { /* Work to increase the bit dispersion for closely spaced hash values. The is important because some use cases have many combinations of a small number of elements with nearby hashes so that many distinct combinations collapse to only a handful of distinct hash values. */ - h = (long) entry->hash; + h = entry->hash; hash ^= (h ^ (h << 16) ^ 89869747L) * 3644798167u; } hash = hash * 69069L + 907133923L; @@ -1116,7 +1116,7 @@ setentry *u; setentry *(*f)(PySetObject *so, PyObject *key, Py_ssize_t hash); setentry tab[PySet_MINSIZE]; - long h; + Py_hash_t h; t = a->fill; a->fill = b->fill; b->fill = t; t = a->used; a->used = b->used; b->used = t; @@ -1550,7 +1550,7 @@ setentry entrycopy; entrycopy.hash = entry->hash; entrycopy.key = entry->key; - if (!_PyDict_Contains(other, entry->key, (long) entry->hash)) { + if (!_PyDict_Contains(other, entry->key, entry->hash)) { if (set_add_entry((PySetObject *)result, &entrycopy) == -1) { Py_DECREF(result); return NULL; Index: PC/winreg.c =================================================================== --- PC/winreg.c (révision 85803) +++ PC/winreg.c (copie de travail) @@ -452,7 +452,7 @@ (pyhkey1 < pyhkey2 ? -1 : 1); } -static long +static Py_hash_t PyHKEY_hashFunc(PyObject *ob) { /* Just use the address. Index: Lib/test/test_sys.py =================================================================== --- Lib/test/test_sys.py (révision 85803) +++ Lib/test/test_sys.py (copie de travail) @@ -827,7 +827,7 @@ # we need to test for both sizes, because we don't know if the string # has been cached for s in samples: - basicsize = size(h + 'PPliP') + usize * (len(s) + 1) + basicsize = size(h + 'PPPiP') + usize * (len(s) + 1) check(s, basicsize) # weakref import weakref