Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(10)

Side by Side Diff: Objects/obmalloc.c

Issue 13390: Hunt memory allocations in addition to reference leaks
Patch Set: Created 8 years, 3 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
View unified diff | Download patch
« no previous file with comments | « Lib/test/test_sys.py ('k') | Python/pythonrun.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #include "Python.h" 1 #include "Python.h"
2 2
3 #ifdef WITH_PYMALLOC 3 #ifdef WITH_PYMALLOC
4 4
5 #ifdef HAVE_MALLOPT_MMAP_THRESHOLD 5 #ifdef HAVE_MALLOPT_MMAP_THRESHOLD
6 #include <malloc.h> 6 #include <malloc.h>
7 #endif 7 #endif
8 8
9 #ifdef WITH_VALGRIND 9 #ifdef WITH_VALGRIND
10 #include <valgrind/valgrind.h> 10 #include <valgrind/valgrind.h>
(...skipping 508 matching lines...) Expand 10 before | Expand all | Expand 10 after
519 519
520 /* Number of arenas allocated that haven't been free()'d. */ 520 /* Number of arenas allocated that haven't been free()'d. */
521 static size_t narenas_currently_allocated = 0; 521 static size_t narenas_currently_allocated = 0;
522 522
523 #ifdef PYMALLOC_DEBUG 523 #ifdef PYMALLOC_DEBUG
524 /* Total number of times malloc() called to allocate an arena. */ 524 /* Total number of times malloc() called to allocate an arena. */
525 static size_t ntimes_arena_allocated = 0; 525 static size_t ntimes_arena_allocated = 0;
526 /* High water mark (max value ever seen) for narenas_currently_allocated. */ 526 /* High water mark (max value ever seen) for narenas_currently_allocated. */
527 static size_t narenas_highwater = 0; 527 static size_t narenas_highwater = 0;
528 #endif 528 #endif
529
530 static Py_ssize_t _Py_AllocedBlocks = 0;
531
532 Py_ssize_t
533 _Py_GetAllocedBlocks(void)
534 {
535 return _Py_AllocedBlocks;
536 }
537
529 538
530 /* Allocate a new arena. If we run out of memory, return NULL. Else 539 /* Allocate a new arena. If we run out of memory, return NULL. Else
531 * allocate a new arena, and return the address of an arena_object 540 * allocate a new arena, and return the address of an arena_object
532 * describing the new arena. It's expected that the caller will set 541 * describing the new arena. It's expected that the caller will set
533 * `usable_arenas` to the return value. 542 * `usable_arenas` to the return value.
534 */ 543 */
535 static struct arena_object* 544 static struct arena_object*
536 new_arena(void) 545 new_arena(void)
537 { 546 {
538 struct arena_object* arenaobj; 547 struct arena_object* arenaobj;
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after
776 #endif 785 #endif
777 786
778 /* 787 /*
779 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes. 788 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
780 * Most python internals blindly use a signed Py_ssize_t to track 789 * Most python internals blindly use a signed Py_ssize_t to track
781 * things without checking for overflows or negatives. 790 * things without checking for overflows or negatives.
782 * As size_t is unsigned, checking for nbytes < 0 is not required. 791 * As size_t is unsigned, checking for nbytes < 0 is not required.
783 */ 792 */
784 if (nbytes > PY_SSIZE_T_MAX) 793 if (nbytes > PY_SSIZE_T_MAX)
785 return NULL; 794 return NULL;
795
796 _Py_AllocedBlocks++;
786 797
787 /* 798 /*
788 * This implicitly redirects malloc(0). 799 * This implicitly redirects malloc(0).
789 */ 800 */
790 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { 801 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
791 LOCK(); 802 LOCK();
792 /* 803 /*
793 * Most frequent paths first 804 * Most frequent paths first
794 */ 805 */
795 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; 806 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
893 pool->prevpool = next; 904 pool->prevpool = next;
894 next->nextpool = pool; 905 next->nextpool = pool;
895 next->prevpool = pool; 906 next->prevpool = pool;
896 pool->ref.count = 1; 907 pool->ref.count = 1;
897 if (pool->szidx == size) { 908 if (pool->szidx == size) {
898 /* Luckily, this pool last contained blocks 909 /* Luckily, this pool last contained blocks
899 * of the same size class, so its header 910 * of the same size class, so its header
900 * and free list are already initialized. 911 * and free list are already initialized.
901 */ 912 */
902 bp = pool->freeblock; 913 bp = pool->freeblock;
914 assert(bp != NULL);
903 pool->freeblock = *(block **)bp; 915 pool->freeblock = *(block **)bp;
904 UNLOCK(); 916 UNLOCK();
905 return (void *)bp; 917 return (void *)bp;
906 } 918 }
907 /* 919 /*
908 * Initialize the pool header, set up the free list to 920 * Initialize the pool header, set up the free list to
909 * contain just the second block, and return the first 921 * contain just the second block, and return the first
910 * block. 922 * block.
911 */ 923 */
912 pool->szidx = size; 924 pool->szidx = size;
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
950 /* The small block allocator ends here. */ 962 /* The small block allocator ends here. */
951 963
952 redirect: 964 redirect:
953 /* Redirect the original request to the underlying (libc) allocator. 965 /* Redirect the original request to the underlying (libc) allocator.
954 * We jump here on bigger requests, on error in the code above (as a 966 * We jump here on bigger requests, on error in the code above (as a
955 * last chance to serve the request) or when the max memory limit 967 * last chance to serve the request) or when the max memory limit
956 * has been reached. 968 * has been reached.
957 */ 969 */
958 if (nbytes == 0) 970 if (nbytes == 0)
959 nbytes = 1; 971 nbytes = 1;
960 return (void *)malloc(nbytes); 972 {
973 void *result = malloc(nbytes);
974 if (!result)
975 _Py_AllocedBlocks--;
976 return result;
977 }
961 } 978 }
962 979
963 /* free */ 980 /* free */
964 981
965 #undef PyObject_Free 982 #undef PyObject_Free
966 void 983 void
967 PyObject_Free(void *p) 984 PyObject_Free(void *p)
968 { 985 {
969 poolp pool; 986 poolp pool;
970 block *lastfree; 987 block *lastfree;
971 poolp next, prev; 988 poolp next, prev;
972 uint size; 989 uint size;
973 #ifndef Py_USING_MEMORY_DEBUGGER 990 #ifndef Py_USING_MEMORY_DEBUGGER
974 uint arenaindex_temp; 991 uint arenaindex_temp;
975 #endif 992 #endif
976 993
977 if (p == NULL) /* free(NULL) has no effect */ 994 if (p == NULL) /* free(NULL) has no effect */
978 return; 995 return;
996
997 _Py_AllocedBlocks--;
979 998
980 #ifdef WITH_VALGRIND 999 #ifdef WITH_VALGRIND
981 if (UNLIKELY(running_on_valgrind > 0)) 1000 if (UNLIKELY(running_on_valgrind > 0))
982 goto redirect; 1001 goto redirect;
983 #endif 1002 #endif
984 1003
985 pool = POOL_ADDR(p); 1004 pool = POOL_ADDR(p);
986 if (Py_ADDRESS_IN_RANGE(p, pool)) { 1005 if (Py_ADDRESS_IN_RANGE(p, pool)) {
987 /* We allocated this address. */ 1006 /* We allocated this address. */
988 LOCK(); 1007 LOCK();
(...skipping 907 matching lines...) Expand 10 before | Expand all | Expand 10 after
1896 int 1915 int
1897 Py_ADDRESS_IN_RANGE(void *P, poolp pool) 1916 Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1898 { 1917 {
1899 uint arenaindex_temp = pool->arenaindex; 1918 uint arenaindex_temp = pool->arenaindex;
1900 1919
1901 return arenaindex_temp < maxarenas && 1920 return arenaindex_temp < maxarenas &&
1902 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && 1921 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&
1903 arenas[arenaindex_temp].address != 0; 1922 arenas[arenaindex_temp].address != 0;
1904 } 1923 }
1905 #endif 1924 #endif
OLDNEW
« no previous file with comments | « Lib/test/test_sys.py ('k') | Python/pythonrun.c » ('j') | no next file with comments »

RSS Feeds Recent Issues | This issue
This is Rietveld 894c83f36cb7+