Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(41885)

Side by Side Diff: Objects/obmalloc.c

Issue 13390: Hunt memory allocations in addition to reference leaks
Patch Set: Created 6 years, 9 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
View unified diff | Download patch
« no previous file with comments | « Lib/test/test_sys.py ('k') | Python/pythonrun.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #include "Python.h" 1 #include "Python.h"
2 2
3 #ifdef WITH_PYMALLOC 3 #ifdef WITH_PYMALLOC
4 4
5 #ifdef HAVE_MMAP 5 #ifdef HAVE_MMAP
6 #include <sys/mman.h> 6 #include <sys/mman.h>
7 #ifdef MAP_ANONYMOUS 7 #ifdef MAP_ANONYMOUS
8 #define ARENAS_USE_MMAP 8 #define ARENAS_USE_MMAP
9 #endif 9 #endif
10 #endif 10 #endif
(...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after
517 */ 517 */
518 #define INITIAL_ARENA_OBJECTS 16 518 #define INITIAL_ARENA_OBJECTS 16
519 519
520 /* Number of arenas allocated that haven't been free()'d. */ 520 /* Number of arenas allocated that haven't been free()'d. */
521 static size_t narenas_currently_allocated = 0; 521 static size_t narenas_currently_allocated = 0;
522 522
523 /* Total number of times malloc() called to allocate an arena. */ 523 /* Total number of times malloc() called to allocate an arena. */
524 static size_t ntimes_arena_allocated = 0; 524 static size_t ntimes_arena_allocated = 0;
525 /* High water mark (max value ever seen) for narenas_currently_allocated. */ 525 /* High water mark (max value ever seen) for narenas_currently_allocated. */
526 static size_t narenas_highwater = 0; 526 static size_t narenas_highwater = 0;
527
528 static Py_ssize_t _Py_AllocedBlocks = 0;
529
530 Py_ssize_t
531 _Py_GetAllocedBlocks(void)
532 {
533 return _Py_AllocedBlocks;
534 }
535
527 536
528 /* Allocate a new arena. If we run out of memory, return NULL. Else 537 /* Allocate a new arena. If we run out of memory, return NULL. Else
529 * allocate a new arena, and return the address of an arena_object 538 * allocate a new arena, and return the address of an arena_object
530 * describing the new arena. It's expected that the caller will set 539 * describing the new arena. It's expected that the caller will set
531 * `usable_arenas` to the return value. 540 * `usable_arenas` to the return value.
532 */ 541 */
533 static struct arena_object* 542 static struct arena_object*
534 new_arena(void) 543 new_arena(void)
535 { 544 {
536 struct arena_object* arenaobj; 545 struct arena_object* arenaobj;
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after
777 #endif 786 #endif
778 787
779 /* 788 /*
780 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes. 789 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
781 * Most python internals blindly use a signed Py_ssize_t to track 790 * Most python internals blindly use a signed Py_ssize_t to track
782 * things without checking for overflows or negatives. 791 * things without checking for overflows or negatives.
783 * As size_t is unsigned, checking for nbytes < 0 is not required. 792 * As size_t is unsigned, checking for nbytes < 0 is not required.
784 */ 793 */
785 if (nbytes > PY_SSIZE_T_MAX) 794 if (nbytes > PY_SSIZE_T_MAX)
786 return NULL; 795 return NULL;
796
797 _Py_AllocedBlocks++;
787 798
788 /* 799 /*
789 * This implicitly redirects malloc(0). 800 * This implicitly redirects malloc(0).
790 */ 801 */
791 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { 802 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
792 LOCK(); 803 LOCK();
793 /* 804 /*
794 * Most frequent paths first 805 * Most frequent paths first
795 */ 806 */
796 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; 807 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
894 pool->prevpool = next; 905 pool->prevpool = next;
895 next->nextpool = pool; 906 next->nextpool = pool;
896 next->prevpool = pool; 907 next->prevpool = pool;
897 pool->ref.count = 1; 908 pool->ref.count = 1;
898 if (pool->szidx == size) { 909 if (pool->szidx == size) {
899 /* Luckily, this pool last contained blocks 910 /* Luckily, this pool last contained blocks
900 * of the same size class, so its header 911 * of the same size class, so its header
901 * and free list are already initialized. 912 * and free list are already initialized.
902 */ 913 */
903 bp = pool->freeblock; 914 bp = pool->freeblock;
915 assert(bp != NULL);
904 pool->freeblock = *(block **)bp; 916 pool->freeblock = *(block **)bp;
905 UNLOCK(); 917 UNLOCK();
906 return (void *)bp; 918 return (void *)bp;
907 } 919 }
908 /* 920 /*
909 * Initialize the pool header, set up the free list to 921 * Initialize the pool header, set up the free list to
910 * contain just the second block, and return the first 922 * contain just the second block, and return the first
911 * block. 923 * block.
912 */ 924 */
913 pool->szidx = size; 925 pool->szidx = size;
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
951 /* The small block allocator ends here. */ 963 /* The small block allocator ends here. */
952 964
953 redirect: 965 redirect:
954 /* Redirect the original request to the underlying (libc) allocator. 966 /* Redirect the original request to the underlying (libc) allocator.
955 * We jump here on bigger requests, on error in the code above (as a 967 * We jump here on bigger requests, on error in the code above (as a
956 * last chance to serve the request) or when the max memory limit 968 * last chance to serve the request) or when the max memory limit
957 * has been reached. 969 * has been reached.
958 */ 970 */
959 if (nbytes == 0) 971 if (nbytes == 0)
960 nbytes = 1; 972 nbytes = 1;
961 return (void *)malloc(nbytes); 973 {
974 void *result = malloc(nbytes);
975 if (!result)
976 _Py_AllocedBlocks--;
977 return result;
978 }
962 } 979 }
963 980
964 /* free */ 981 /* free */
965 982
966 #undef PyObject_Free 983 #undef PyObject_Free
967 void 984 void
968 PyObject_Free(void *p) 985 PyObject_Free(void *p)
969 { 986 {
970 poolp pool; 987 poolp pool;
971 block *lastfree; 988 block *lastfree;
972 poolp next, prev; 989 poolp next, prev;
973 uint size; 990 uint size;
974 #ifndef Py_USING_MEMORY_DEBUGGER 991 #ifndef Py_USING_MEMORY_DEBUGGER
975 uint arenaindex_temp; 992 uint arenaindex_temp;
976 #endif 993 #endif
977 994
978 if (p == NULL) /* free(NULL) has no effect */ 995 if (p == NULL) /* free(NULL) has no effect */
979 return; 996 return;
997
998 _Py_AllocedBlocks--;
980 999
981 #ifdef WITH_VALGRIND 1000 #ifdef WITH_VALGRIND
982 if (UNLIKELY(running_on_valgrind > 0)) 1001 if (UNLIKELY(running_on_valgrind > 0))
983 goto redirect; 1002 goto redirect;
984 #endif 1003 #endif
985 1004
986 pool = POOL_ADDR(p); 1005 pool = POOL_ADDR(p);
987 if (Py_ADDRESS_IN_RANGE(p, pool)) { 1006 if (Py_ADDRESS_IN_RANGE(p, pool)) {
988 /* We allocated this address. */ 1007 /* We allocated this address. */
989 LOCK(); 1008 LOCK();
(...skipping 930 matching lines...) Expand 10 before | Expand all | Expand 10 after
1920 int 1939 int
1921 Py_ADDRESS_IN_RANGE(void *P, poolp pool) 1940 Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1922 { 1941 {
1923 uint arenaindex_temp = pool->arenaindex; 1942 uint arenaindex_temp = pool->arenaindex;
1924 1943
1925 return arenaindex_temp < maxarenas && 1944 return arenaindex_temp < maxarenas &&
1926 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && 1945 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&
1927 arenas[arenaindex_temp].address != 0; 1946 arenas[arenaindex_temp].address != 0;
1928 } 1947 }
1929 #endif 1948 #endif
OLDNEW
« no previous file with comments | « Lib/test/test_sys.py ('k') | Python/pythonrun.c » ('j') | no next file with comments »

RSS Feeds Recent Issues | This issue
This is Rietveld 894c83f36cb7+