Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(50283)

Side by Side Diff: Objects/obmalloc.c

Issue 13390: Hunt memory allocations in addition to reference leaks
Patch Set: Created 7 years, 10 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
View unified diff | Download patch
OLDNEW
1 #include "Python.h" 1 #include "Python.h"
2 2
3 #ifdef WITH_PYMALLOC 3 #ifdef WITH_PYMALLOC
4 4
5 #ifdef HAVE_MALLOPT_MMAP_THRESHOLD 5 #ifdef HAVE_MALLOPT_MMAP_THRESHOLD
6 #include <malloc.h> 6 #include <malloc.h>
7 #endif 7 #endif
8 8
9 #ifdef WITH_VALGRIND 9 #ifdef WITH_VALGRIND
10 #include <valgrind/valgrind.h> 10 #include <valgrind/valgrind.h>
(...skipping 508 matching lines...) Expand 10 before | Expand all | Expand 10 after
519 519
520 /* Number of arenas allocated that haven't been free()'d. */ 520 /* Number of arenas allocated that haven't been free()'d. */
521 static size_t narenas_currently_allocated = 0; 521 static size_t narenas_currently_allocated = 0;
522 522
523 #ifdef PYMALLOC_DEBUG 523 #ifdef PYMALLOC_DEBUG
524 /* Total number of times malloc() called to allocate an arena. */ 524 /* Total number of times malloc() called to allocate an arena. */
525 static size_t ntimes_arena_allocated = 0; 525 static size_t ntimes_arena_allocated = 0;
526 /* High water mark (max value ever seen) for narenas_currently_allocated. */ 526 /* High water mark (max value ever seen) for narenas_currently_allocated. */
527 static size_t narenas_highwater = 0; 527 static size_t narenas_highwater = 0;
528 #endif 528 #endif
529
530 Py_ssize_t _Py_AllocedBlocks = 0;
529 531
530 /* Allocate a new arena. If we run out of memory, return NULL. Else 532 /* Allocate a new arena. If we run out of memory, return NULL. Else
531 * allocate a new arena, and return the address of an arena_object 533 * allocate a new arena, and return the address of an arena_object
532 * describing the new arena. It's expected that the caller will set 534 * describing the new arena. It's expected that the caller will set
533 * `usable_arenas` to the return value. 535 * `usable_arenas` to the return value.
534 */ 536 */
535 static struct arena_object* 537 static struct arena_object*
536 new_arena(void) 538 new_arena(void)
537 { 539 {
538 struct arena_object* arenaobj; 540 struct arena_object* arenaobj;
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after
776 #endif 778 #endif
777 779
778 /* 780 /*
779 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes. 781 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
780 * Most python internals blindly use a signed Py_ssize_t to track 782 * Most python internals blindly use a signed Py_ssize_t to track
781 * things without checking for overflows or negatives. 783 * things without checking for overflows or negatives.
782 * As size_t is unsigned, checking for nbytes < 0 is not required. 784 * As size_t is unsigned, checking for nbytes < 0 is not required.
783 */ 785 */
784 if (nbytes > PY_SSIZE_T_MAX) 786 if (nbytes > PY_SSIZE_T_MAX)
785 return NULL; 787 return NULL;
788
789 _Py_AllocedBlocks++;
786 790
787 /* 791 /*
788 * This implicitly redirects malloc(0). 792 * This implicitly redirects malloc(0).
789 */ 793 */
790 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { 794 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
791 LOCK(); 795 LOCK();
792 /* 796 /*
793 * Most frequent paths first 797 * Most frequent paths first
794 */ 798 */
795 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; 799 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
895 next->prevpool = pool; 899 next->prevpool = pool;
896 pool->ref.count = 1; 900 pool->ref.count = 1;
897 if (pool->szidx == size) { 901 if (pool->szidx == size) {
898 /* Luckily, this pool last contained blocks 902 /* Luckily, this pool last contained blocks
899 * of the same size class, so its header 903 * of the same size class, so its header
900 * and free list are already initialized. 904 * and free list are already initialized.
901 */ 905 */
902 bp = pool->freeblock; 906 bp = pool->freeblock;
903 pool->freeblock = *(block **)bp; 907 pool->freeblock = *(block **)bp;
904 UNLOCK(); 908 UNLOCK();
905 return (void *)bp; 909 return (void *)bp;
Nick Coghlan 2011/11/13 01:52:44 To guarantee correct updating of the number of all
906 } 910 }
907 /* 911 /*
908 * Initialize the pool header, set up the free list to 912 * Initialize the pool header, set up the free list to
909 * contain just the second block, and return the first 913 * contain just the second block, and return the first
910 * block. 914 * block.
911 */ 915 */
912 pool->szidx = size; 916 pool->szidx = size;
913 size = INDEX2SIZE(size); 917 size = INDEX2SIZE(size);
914 bp = (block *)pool + POOL_OVERHEAD; 918 bp = (block *)pool + POOL_OVERHEAD;
915 pool->nextoffset = POOL_OVERHEAD + (size << 1); 919 pool->nextoffset = POOL_OVERHEAD + (size << 1);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
950 /* The small block allocator ends here. */ 954 /* The small block allocator ends here. */
951 955
952 redirect: 956 redirect:
953 /* Redirect the original request to the underlying (libc) allocator. 957 /* Redirect the original request to the underlying (libc) allocator.
954 * We jump here on bigger requests, on error in the code above (as a 958 * We jump here on bigger requests, on error in the code above (as a
955 * last chance to serve the request) or when the max memory limit 959 * last chance to serve the request) or when the max memory limit
956 * has been reached. 960 * has been reached.
957 */ 961 */
958 if (nbytes == 0) 962 if (nbytes == 0)
959 nbytes = 1; 963 nbytes = 1;
960 return (void *)malloc(nbytes); 964 {
965 void *result = malloc(nbytes);
966 if (!result)
967 _Py_AllocedBlocks--;
968 return result;
969 }
961 } 970 }
962 971
963 /* free */ 972 /* free */
964 973
965 #undef PyObject_Free 974 #undef PyObject_Free
966 void 975 void
967 PyObject_Free(void *p) 976 PyObject_Free(void *p)
968 { 977 {
969 poolp pool; 978 poolp pool;
970 block *lastfree; 979 block *lastfree;
971 poolp next, prev; 980 poolp next, prev;
972 uint size; 981 uint size;
973 #ifndef Py_USING_MEMORY_DEBUGGER 982 #ifndef Py_USING_MEMORY_DEBUGGER
974 uint arenaindex_temp; 983 uint arenaindex_temp;
975 #endif 984 #endif
976 985
977 if (p == NULL) /* free(NULL) has no effect */ 986 if (p == NULL) /* free(NULL) has no effect */
978 return; 987 return;
988
989 _Py_AllocedBlocks--;
979 990
980 #ifdef WITH_VALGRIND 991 #ifdef WITH_VALGRIND
981 if (UNLIKELY(running_on_valgrind > 0)) 992 if (UNLIKELY(running_on_valgrind > 0))
982 goto redirect; 993 goto redirect;
983 #endif 994 #endif
984 995
985 pool = POOL_ADDR(p); 996 pool = POOL_ADDR(p);
986 if (Py_ADDRESS_IN_RANGE(p, pool)) { 997 if (Py_ADDRESS_IN_RANGE(p, pool)) {
987 /* We allocated this address. */ 998 /* We allocated this address. */
988 LOCK(); 999 LOCK();
(...skipping 907 matching lines...) Expand 10 before | Expand all | Expand 10 after
1896 int 1907 int
1897 Py_ADDRESS_IN_RANGE(void *P, poolp pool) 1908 Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1898 { 1909 {
1899 uint arenaindex_temp = pool->arenaindex; 1910 uint arenaindex_temp = pool->arenaindex;
1900 1911
1901 return arenaindex_temp < maxarenas && 1912 return arenaindex_temp < maxarenas &&
1902 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && 1913 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&
1903 arenas[arenaindex_temp].address != 0; 1914 arenas[arenaindex_temp].address != 0;
1904 } 1915 }
1905 #endif 1916 #endif
OLDNEW
« Include/objimpl.h ('K') | « Lib/test/support.py ('k') | Python/pythonrun.c » ('j') | no next file with comments »

RSS Feeds Recent Issues | This issue
This is Rietveld 894c83f36cb7+