diff -r 090502a0c69c Objects/obmalloc.c --- a/Objects/obmalloc.c Tue Apr 26 12:35:13 2016 +0200 +++ b/Objects/obmalloc.c Tue Apr 26 13:32:39 2016 +0200 @@ -70,22 +70,46 @@ static void _PyObject_Free(void *ctx, vo static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); #endif +/* Counter for the number of currently allocated memory blocks */ +#ifdef Py_DEBUG +/* Note: _Py_atomic_address is an uintptr_t type in practice */ +static _Py_atomic_address _Py_AllocatedBlocks = {0}; +#define ALLOCATED_BLOCKS_INC() (_Py_AllocatedBlocks._value++) +#define ALLOCATED_BLOCKS_DEC() (_Py_AllocatedBlocks._value--) +#define ALLOCATED_BLOCKS_GET() ((Py_ssize_t)_Py_atomic_load_relaxed(&_Py_AllocatedBlocks)) +#else +static Py_ssize_t _Py_AllocatedBlocks = 0; +#define ALLOCATED_BLOCKS_INC() (_Py_AllocatedBlocks++) +#define ALLOCATED_BLOCKS_DEC() (_Py_AllocatedBlocks--) +#define ALLOCATED_BLOCKS_GET() (_Py_AllocatedBlocks) +#endif + static void * _PyMem_RawMalloc(void *ctx, size_t size) { + void *ptr; + /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL for malloc(0), which would be treated as an error. Some platforms would return a pointer with no memory behind it, which would break pymalloc. To solve these problems, allocate an extra byte. */ if (size == 0) size = 1; - return malloc(size); + ptr = malloc(size); +#ifdef Py_DEBUG + if (ptr != NULL) { + ALLOCATED_BLOCKS_INC(); + } +#endif + return ptr; } static void * _PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize) { + void *ptr; + /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL for calloc(0, 0), which would be treated as an error. Some platforms would return a pointer with no memory behind it, which would break @@ -94,7 +118,13 @@ static void * nelem = 1; elsize = 1; } - return calloc(nelem, elsize); + ptr = calloc(nelem, elsize); +#ifdef Py_DEBUG + if (ptr != NULL) { + ALLOCATED_BLOCKS_INC(); + } +#endif + return ptr; } static void * @@ -108,6 +138,11 @@ static void * static void _PyMem_RawFree(void *ctx, void *ptr) { +#ifdef Py_DEBUG + if (ptr != NULL) { + ALLOCATED_BLOCKS_DEC(); + } +#endif free(ptr); } @@ -1016,12 +1051,10 @@ static size_t ntimes_arena_allocated = 0 /* High water mark (max value ever seen) for narenas_currently_allocated. */ static size_t narenas_highwater = 0; -static Py_ssize_t _Py_AllocatedBlocks = 0; - Py_ssize_t _Py_GetAllocatedBlocks(void) { - return _Py_AllocatedBlocks; + return ALLOCATED_BLOCKS_GET(); } @@ -1264,8 +1297,7 @@ static void * poolp pool; poolp next; uint size; - - _Py_AllocatedBlocks++; + void *result; assert(nelem <= PY_SSIZE_T_MAX / elsize); nbytes = nelem * elsize; @@ -1282,6 +1314,7 @@ static void * if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { LOCK(); + /* * Most frequent paths first */ @@ -1299,8 +1332,10 @@ static void * UNLOCK(); if (use_calloc) memset(bp, 0, nbytes); + ALLOCATED_BLOCKS_INC(); return (void *)bp; } + /* * Reached the end of the free list, try to extend it. */ @@ -1313,8 +1348,10 @@ static void * UNLOCK(); if (use_calloc) memset(bp, 0, nbytes); + ALLOCATED_BLOCKS_INC(); return (void *)bp; } + /* Pool is full, unlink from used pools. */ next = pool->nextpool; pool = pool->prevpool; @@ -1323,6 +1360,7 @@ static void * UNLOCK(); if (use_calloc) memset(bp, 0, nbytes); + ALLOCATED_BLOCKS_INC(); return (void *)bp; } @@ -1404,8 +1442,10 @@ static void * UNLOCK(); if (use_calloc) memset(bp, 0, nbytes); + ALLOCATED_BLOCKS_INC(); return (void *)bp; } + /* * Initialize the pool header, set up the free list to * contain just the second block, and return the first @@ -1421,6 +1461,7 @@ static void * UNLOCK(); if (use_calloc) memset(bp, 0, nbytes); + ALLOCATED_BLOCKS_INC(); return (void *)bp; } @@ -1459,16 +1500,19 @@ redirect: * last chance to serve the request) or when the max memory limit * has been reached. */ - { - void *result; - if (use_calloc) - result = PyMem_RawCalloc(nelem, elsize); - else - result = PyMem_RawMalloc(nbytes); - if (!result) - _Py_AllocatedBlocks--; - return result; + if (use_calloc) + result = PyMem_RawCalloc(nelem, elsize); + else + result = PyMem_RawMalloc(nbytes); +#ifndef Py_DEBUG + /* PyMem_RawMalloc() and PyMem_RawCalloc() only update _Py_AllocatedBlocks + in debug mode, but _PyObject_Alloc() can update the variable because it + is protected by the GIL. */ + if (ptr != NULL) { + ALLOCATED_BLOCKS_INC(); } +#endif + return result; } static void * @@ -1500,8 +1544,6 @@ static void if (p == NULL) /* free(NULL) has no effect */ return; - _Py_AllocatedBlocks--; - #ifdef WITH_VALGRIND if (UNLIKELY(running_on_valgrind > 0)) goto redirect; @@ -1511,6 +1553,9 @@ static void if (Py_ADDRESS_IN_RANGE(p, pool)) { /* We allocated this address. */ LOCK(); + + ALLOCATED_BLOCKS_DEC(); + /* Link p to the start of the pool's freeblock list. Since * the pool had at least the p block outstanding, the pool * wasn't empty (so it's already in a usedpools[] list, or @@ -1705,6 +1750,16 @@ static void #ifdef WITH_VALGRIND redirect: #endif + +#ifndef Py_DEBUG + /* PyMem_RawFree() only updates _Py_AllocatedBlocks in debug mode, + but _PyObject_Free() can update the variable because it is protected + by the GIL. */ + if (ptr != NULL) { + ALLOCATED_BLOCKS_DEC(); + } +#endif + /* We didn't allocate this address. */ PyMem_RawFree(p); }