Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(28453)

Delta Between Two Patch Sets: Objects/obmalloc.c

Issue 13390: Hunt memory allocations in addition to reference leaks
Left Patch Set: Created 7 years, 10 months ago
Right Patch Set: Created 6 years, 9 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « Lib/test/test_sys.py ('k') | Python/pythonrun.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 #include "Python.h" 1 #include "Python.h"
2 2
3 #ifdef WITH_PYMALLOC 3 #ifdef WITH_PYMALLOC
4 4
5 #ifdef HAVE_MALLOPT_MMAP_THRESHOLD 5 #ifdef HAVE_MMAP
6 #include <malloc.h> 6 #include <sys/mman.h>
7 #ifdef MAP_ANONYMOUS
8 #define ARENAS_USE_MMAP
9 #endif
7 #endif 10 #endif
8 11
9 #ifdef WITH_VALGRIND 12 #ifdef WITH_VALGRIND
10 #include <valgrind/valgrind.h> 13 #include <valgrind/valgrind.h>
11 14
12 /* If we're using GCC, use __builtin_expect() to reduce overhead of 15 /* If we're using GCC, use __builtin_expect() to reduce overhead of
13 the valgrind checks */ 16 the valgrind checks */
14 #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__) 17 #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
15 # define UNLIKELY(value) __builtin_expect((value), 0) 18 # define UNLIKELY(value) __builtin_expect((value), 0)
16 #else 19 #else
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
128 /* 131 /*
129 * Alignment of addresses returned to the user. 8-bytes alignment works 132 * Alignment of addresses returned to the user. 8-bytes alignment works
130 * on most current architectures (with 32-bit or 64-bit address busses). 133 * on most current architectures (with 32-bit or 64-bit address busses).
131 * The alignment value is also used for grouping small requests in size 134 * The alignment value is also used for grouping small requests in size
132 * classes spaced ALIGNMENT bytes apart. 135 * classes spaced ALIGNMENT bytes apart.
133 * 136 *
134 * You shouldn't change this unless you know what you are doing. 137 * You shouldn't change this unless you know what you are doing.
135 */ 138 */
136 #define ALIGNMENT 8 /* must be 2^N */ 139 #define ALIGNMENT 8 /* must be 2^N */
137 #define ALIGNMENT_SHIFT 3 140 #define ALIGNMENT_SHIFT 3
138 #define ALIGNMENT_MASK (ALIGNMENT - 1)
139 141
140 /* Return the number of bytes in size class I, as a uint. */ 142 /* Return the number of bytes in size class I, as a uint. */
141 #define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT) 143 #define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
142 144
143 /* 145 /*
144 * Max size threshold below which malloc requests are considered to be 146 * Max size threshold below which malloc requests are considered to be
145 * small enough in order to use preallocated memory pools. You can tune 147 * small enough in order to use preallocated memory pools. You can tune
146 * this value according to your application behaviour and memory needs. 148 * this value according to your application behaviour and memory needs.
147 * 149 *
148 * Note: a size threshold of 512 guarantees that newly created dictionaries 150 * Note: a size threshold of 512 guarantees that newly created dictionaries
(...skipping 27 matching lines...) Expand all
176 */ 178 */
177 #ifdef WITH_MEMORY_LIMITS 179 #ifdef WITH_MEMORY_LIMITS
178 #ifndef SMALL_MEMORY_LIMIT 180 #ifndef SMALL_MEMORY_LIMIT
179 #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */ 181 #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
180 #endif 182 #endif
181 #endif 183 #endif
182 184
183 /* 185 /*
184 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned 186 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
185 * on a page boundary. This is a reserved virtual address space for the 187 * on a page boundary. This is a reserved virtual address space for the
186 * current process (obtained through a malloc call). In no way this means 188 * current process (obtained through a malloc()/mmap() call). In no way this
187 * that the memory arenas will be used entirely. A malloc(<Big>) is usually 189 * means that the memory arenas will be used entirely. A malloc(<Big>) is
188 * an address range reservation for <Big> bytes, unless all pages within this 190 * usually an address range reservation for <Big> bytes, unless all pages within
189 * space are referenced subsequently. So malloc'ing big blocks and not using 191 * this space are referenced subsequently. So malloc'ing big blocks and not
190 * them does not mean "wasting memory". It's an addressable range wastage... 192 * using them does not mean "wasting memory". It's an addressable range
193 * wastage...
191 * 194 *
192 * Therefore, allocating arenas with malloc is not optimal, because there is 195 * Arenas are allocated with mmap() on systems supporting anonymous memory
193 * some address space wastage, but this is the most portable way to request 196 * mappings to reduce heap fragmentation.
194 * memory from the system across various platforms.
195 */ 197 */
196 #define ARENA_SIZE (256 << 10) /* 256KB */ 198 #define ARENA_SIZE (256 << 10) /* 256KB */
197 199
198 #ifdef WITH_MEMORY_LIMITS 200 #ifdef WITH_MEMORY_LIMITS
199 #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE) 201 #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
200 #endif 202 #endif
201 203
202 /* 204 /*
203 * Size of the pools used for small blocks. Should be a power of 2, 205 * Size of the pools used for small blocks. Should be a power of 2,
204 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k. 206 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
304 * increasing order of `nfreepools` values. 306 * increasing order of `nfreepools` values.
305 * 307 *
306 * Else this arena_object is associated with an allocated arena 308 * Else this arena_object is associated with an allocated arena
307 * all of whose pools are in use. `nextarena` and `prevarena` 309 * all of whose pools are in use. `nextarena` and `prevarena`
308 * are both meaningless in this case. 310 * are both meaningless in this case.
309 */ 311 */
310 struct arena_object* nextarena; 312 struct arena_object* nextarena;
311 struct arena_object* prevarena; 313 struct arena_object* prevarena;
312 }; 314 };
313 315
314 #undef ROUNDUP 316 #define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
315 #define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
316 #define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
317 317
318 #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */ 318 #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
319 319
320 /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ 320 /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
321 #define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK)) 321 #define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
322 322
323 /* Return total number of blocks in pool of size index I, as a uint. */ 323 /* Return total number of blocks in pool of size index I, as a uint. */
324 #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) 324 #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
325 325
326 /*==========================================================================*/ 326 /*==========================================================================*/
327 327
328 /* 328 /*
329 * This malloc lock 329 * This malloc lock
330 */ 330 */
331 SIMPLELOCK_DECL(_malloc_lock) 331 SIMPLELOCK_DECL(_malloc_lock)
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
513 513
514 /* How many arena_objects do we initially allocate? 514 /* How many arena_objects do we initially allocate?
515 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the 515 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
516 * `arenas` vector. 516 * `arenas` vector.
517 */ 517 */
518 #define INITIAL_ARENA_OBJECTS 16 518 #define INITIAL_ARENA_OBJECTS 16
519 519
520 /* Number of arenas allocated that haven't been free()'d. */ 520 /* Number of arenas allocated that haven't been free()'d. */
521 static size_t narenas_currently_allocated = 0; 521 static size_t narenas_currently_allocated = 0;
522 522
523 #ifdef PYMALLOC_DEBUG
524 /* Total number of times malloc() called to allocate an arena. */ 523 /* Total number of times malloc() called to allocate an arena. */
525 static size_t ntimes_arena_allocated = 0; 524 static size_t ntimes_arena_allocated = 0;
526 /* High water mark (max value ever seen) for narenas_currently_allocated. */ 525 /* High water mark (max value ever seen) for narenas_currently_allocated. */
527 static size_t narenas_highwater = 0; 526 static size_t narenas_highwater = 0;
528 #endif 527
529 528 static Py_ssize_t _Py_AllocedBlocks = 0;
530 Py_ssize_t _Py_AllocedBlocks = 0; 529
530 Py_ssize_t
531 _Py_GetAllocedBlocks(void)
532 {
533 return _Py_AllocedBlocks;
534 }
535
531 536
532 /* Allocate a new arena. If we run out of memory, return NULL. Else 537 /* Allocate a new arena. If we run out of memory, return NULL. Else
533 * allocate a new arena, and return the address of an arena_object 538 * allocate a new arena, and return the address of an arena_object
534 * describing the new arena. It's expected that the caller will set 539 * describing the new arena. It's expected that the caller will set
535 * `usable_arenas` to the return value. 540 * `usable_arenas` to the return value.
536 */ 541 */
537 static struct arena_object* 542 static struct arena_object*
538 new_arena(void) 543 new_arena(void)
539 { 544 {
540 struct arena_object* arenaobj; 545 struct arena_object* arenaobj;
541 uint excess; /* number of bytes above pool alignment */ 546 uint excess; /* number of bytes above pool alignment */
547 void *address;
548 int err;
542 549
543 #ifdef PYMALLOC_DEBUG 550 #ifdef PYMALLOC_DEBUG
544 if (Py_GETENV("PYTHONMALLOCSTATS")) 551 if (Py_GETENV("PYTHONMALLOCSTATS"))
545 _PyObject_DebugMallocStats(); 552 _PyObject_DebugMallocStats(stderr);
546 #endif 553 #endif
547 if (unused_arena_objects == NULL) { 554 if (unused_arena_objects == NULL) {
548 uint i; 555 uint i;
549 uint numarenas; 556 uint numarenas;
550 size_t nbytes; 557 size_t nbytes;
551 558
552 /* Double the number of arena objects on each allocation. 559 /* Double the number of arena objects on each allocation.
553 * Note that it's possible for `numarenas` to overflow. 560 * Note that it's possible for `numarenas` to overflow.
554 */ 561 */
555 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS; 562 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
556 if (numarenas <= maxarenas) 563 if (numarenas <= maxarenas)
557 return NULL; /* overflow */ 564 return NULL; /* overflow */
558 #if SIZEOF_SIZE_T <= SIZEOF_INT 565 #if SIZEOF_SIZE_T <= SIZEOF_INT
559 if (numarenas > PY_SIZE_MAX / sizeof(*arenas)) 566 if (numarenas > PY_SIZE_MAX / sizeof(*arenas))
560 return NULL; /* overflow */ 567 return NULL; /* overflow */
561 #endif 568 #endif
562 #ifdef HAVE_MALLOPT_MMAP_THRESHOLD
563 /* Ensure arenas are allocated by mmap to avoid heap fragmentation. */
564 if (numarenas == INITIAL_ARENA_OBJECTS)
565 mallopt(M_MMAP_THRESHOLD, ARENA_SIZE);
566 #endif
567 nbytes = numarenas * sizeof(*arenas); 569 nbytes = numarenas * sizeof(*arenas);
568 arenaobj = (struct arena_object *)realloc(arenas, nbytes); 570 arenaobj = (struct arena_object *)realloc(arenas, nbytes);
569 if (arenaobj == NULL) 571 if (arenaobj == NULL)
570 return NULL; 572 return NULL;
571 arenas = arenaobj; 573 arenas = arenaobj;
572 574
573 /* We might need to fix pointers that were copied. However, 575 /* We might need to fix pointers that were copied. However,
574 * new_arena only gets called when all the pages in the 576 * new_arena only gets called when all the pages in the
575 * previous arenas are full. Thus, there are *no* pointers 577 * previous arenas are full. Thus, there are *no* pointers
576 * into the old array. Thus, we don't have to worry about 578 * into the old array. Thus, we don't have to worry about
(...skipping 12 matching lines...) Expand all
589 /* Update globals. */ 591 /* Update globals. */
590 unused_arena_objects = &arenas[maxarenas]; 592 unused_arena_objects = &arenas[maxarenas];
591 maxarenas = numarenas; 593 maxarenas = numarenas;
592 } 594 }
593 595
594 /* Take the next available arena object off the head of the list. */ 596 /* Take the next available arena object off the head of the list. */
595 assert(unused_arena_objects != NULL); 597 assert(unused_arena_objects != NULL);
596 arenaobj = unused_arena_objects; 598 arenaobj = unused_arena_objects;
597 unused_arena_objects = arenaobj->nextarena; 599 unused_arena_objects = arenaobj->nextarena;
598 assert(arenaobj->address == 0); 600 assert(arenaobj->address == 0);
599 arenaobj->address = (uptr)malloc(ARENA_SIZE); 601 #ifdef ARENAS_USE_MMAP
600 if (arenaobj->address == 0) { 602 address = mmap(NULL, ARENA_SIZE, PROT_READ|PROT_WRITE,
603 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
604 err = (address == MAP_FAILED);
605 #else
606 address = malloc(ARENA_SIZE);
607 err = (address == 0);
608 #endif
609 if (err) {
601 /* The allocation failed: return NULL after putting the 610 /* The allocation failed: return NULL after putting the
602 * arenaobj back. 611 * arenaobj back.
603 */ 612 */
604 arenaobj->nextarena = unused_arena_objects; 613 arenaobj->nextarena = unused_arena_objects;
605 unused_arena_objects = arenaobj; 614 unused_arena_objects = arenaobj;
606 return NULL; 615 return NULL;
607 } 616 }
617 arenaobj->address = (uptr)address;
608 618
609 ++narenas_currently_allocated; 619 ++narenas_currently_allocated;
610 #ifdef PYMALLOC_DEBUG
611 ++ntimes_arena_allocated; 620 ++ntimes_arena_allocated;
612 if (narenas_currently_allocated > narenas_highwater) 621 if (narenas_currently_allocated > narenas_highwater)
613 narenas_highwater = narenas_currently_allocated; 622 narenas_highwater = narenas_currently_allocated;
614 #endif
615 arenaobj->freepools = NULL; 623 arenaobj->freepools = NULL;
616 /* pool_address <- first pool-aligned address in the arena 624 /* pool_address <- first pool-aligned address in the arena
617 nfreepools <- number of whole pools that fit after alignment */ 625 nfreepools <- number of whole pools that fit after alignment */
618 arenaobj->pool_address = (block*)arenaobj->address; 626 arenaobj->pool_address = (block*)arenaobj->address;
619 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE; 627 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
620 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE); 628 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
621 excess = (uint)(arenaobj->address & POOL_SIZE_MASK); 629 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
622 if (excess != 0) { 630 if (excess != 0) {
623 --arenaobj->nfreepools; 631 --arenaobj->nfreepools;
624 arenaobj->pool_address += POOL_SIZE - excess; 632 arenaobj->pool_address += POOL_SIZE - excess;
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
897 pool->prevpool = next; 905 pool->prevpool = next;
898 next->nextpool = pool; 906 next->nextpool = pool;
899 next->prevpool = pool; 907 next->prevpool = pool;
900 pool->ref.count = 1; 908 pool->ref.count = 1;
901 if (pool->szidx == size) { 909 if (pool->szidx == size) {
902 /* Luckily, this pool last contained blocks 910 /* Luckily, this pool last contained blocks
903 * of the same size class, so its header 911 * of the same size class, so its header
904 * and free list are already initialized. 912 * and free list are already initialized.
905 */ 913 */
906 bp = pool->freeblock; 914 bp = pool->freeblock;
915 assert(bp != NULL);
907 pool->freeblock = *(block **)bp; 916 pool->freeblock = *(block **)bp;
908 UNLOCK(); 917 UNLOCK();
909 return (void *)bp; 918 return (void *)bp;
Nick Coghlan 2011/11/13 01:52:44 To guarantee correct updating of the number of all
910 } 919 }
911 /* 920 /*
912 * Initialize the pool header, set up the free list to 921 * Initialize the pool header, set up the free list to
913 * contain just the second block, and return the first 922 * contain just the second block, and return the first
914 * block. 923 * block.
915 */ 924 */
916 pool->szidx = size; 925 pool->szidx = size;
917 size = INDEX2SIZE(size); 926 size = INDEX2SIZE(size);
918 bp = (block *)pool + POOL_OVERHEAD; 927 bp = (block *)pool + POOL_OVERHEAD;
919 pool->nextoffset = POOL_OVERHEAD + (size << 1); 928 pool->nextoffset = POOL_OVERHEAD + (size << 1);
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
1075 ao->nextarena->prevarena = 1084 ao->nextarena->prevarena =
1076 ao->prevarena; 1085 ao->prevarena;
1077 } 1086 }
1078 /* Record that this arena_object slot is 1087 /* Record that this arena_object slot is
1079 * available to be reused. 1088 * available to be reused.
1080 */ 1089 */
1081 ao->nextarena = unused_arena_objects; 1090 ao->nextarena = unused_arena_objects;
1082 unused_arena_objects = ao; 1091 unused_arena_objects = ao;
1083 1092
1084 /* Free the entire arena. */ 1093 /* Free the entire arena. */
1094 #ifdef ARENAS_USE_MMAP
1095 munmap((void *)ao->address, ARENA_SIZE);
1096 #else
1085 free((void *)ao->address); 1097 free((void *)ao->address);
1098 #endif
1086 ao->address = 0; /* mark unassociated */ 1099 ao->address = 0; /* mark unassociated */
1087 --narenas_currently_allocated; 1100 --narenas_currently_allocated;
1088 1101
1089 UNLOCK(); 1102 UNLOCK();
1090 return; 1103 return;
1091 } 1104 }
1092 if (nf == 1) { 1105 if (nf == 1) {
1093 /* Case 2. Put ao at the head of 1106 /* Case 2. Put ao at the head of
1094 * usable_arenas. Note that because 1107 * usable_arenas. Note that because
1095 * ao->nfreepools was 0 before, ao isn't 1108 * ao->nfreepools was 0 before, ao isn't
(...skipping 619 matching lines...) Expand 10 before | Expand all | Expand 10 after
1715 } 1728 }
1716 while (q < tail) { 1729 while (q < tail) {
1717 fprintf(stderr, " %02x", *q); 1730 fprintf(stderr, " %02x", *q);
1718 ++q; 1731 ++q;
1719 } 1732 }
1720 } 1733 }
1721 fputc('\n', stderr); 1734 fputc('\n', stderr);
1722 } 1735 }
1723 } 1736 }
1724 1737
1738 #endif /* PYMALLOC_DEBUG */
1739
1725 static size_t 1740 static size_t
1726 printone(const char* msg, size_t value) 1741 printone(FILE *out, const char* msg, size_t value)
1727 { 1742 {
1728 int i, k; 1743 int i, k;
1729 char buf[100]; 1744 char buf[100];
1730 size_t origvalue = value; 1745 size_t origvalue = value;
1731 1746
1732 fputs(msg, stderr); 1747 fputs(msg, out);
1733 for (i = (int)strlen(msg); i < 35; ++i) 1748 for (i = (int)strlen(msg); i < 35; ++i)
1734 fputc(' ', stderr); 1749 fputc(' ', out);
1735 fputc('=', stderr); 1750 fputc('=', out);
1736 1751
1737 /* Write the value with commas. */ 1752 /* Write the value with commas. */
1738 i = 22; 1753 i = 22;
1739 buf[i--] = '\0'; 1754 buf[i--] = '\0';
1740 buf[i--] = '\n'; 1755 buf[i--] = '\n';
1741 k = 3; 1756 k = 3;
1742 do { 1757 do {
1743 size_t nextvalue = value / 10; 1758 size_t nextvalue = value / 10;
1744 uint digit = (uint)(value - nextvalue * 10); 1759 uint digit = (uint)(value - nextvalue * 10);
1745 value = nextvalue; 1760 value = nextvalue;
1746 buf[i--] = (char)(digit + '0'); 1761 buf[i--] = (char)(digit + '0');
1747 --k; 1762 --k;
1748 if (k == 0 && value && i >= 0) { 1763 if (k == 0 && value && i >= 0) {
1749 k = 3; 1764 k = 3;
1750 buf[i--] = ','; 1765 buf[i--] = ',';
1751 } 1766 }
1752 } while (value && i >= 0); 1767 } while (value && i >= 0);
1753 1768
1754 while (i >= 0) 1769 while (i >= 0)
1755 buf[i--] = ' '; 1770 buf[i--] = ' ';
1756 fputs(buf, stderr); 1771 fputs(buf, out);
1757 1772
1758 return origvalue; 1773 return origvalue;
1759 } 1774 }
1760 1775
1761 /* Print summary info to stderr about the state of pymalloc's structures. 1776 void
1777 _PyDebugAllocatorStats(FILE *out,
1778 const char *block_name, int num_blocks, size_t sizeof_blo ck)
1779 {
1780 char buf1[128];
1781 char buf2[128];
1782 PyOS_snprintf(buf1, sizeof(buf1),
1783 "%d %ss * %zd bytes each",
1784 num_blocks, block_name, sizeof_block);
1785 PyOS_snprintf(buf2, sizeof(buf2),
1786 "%48s ", buf1);
1787 (void)printone(out, buf2, num_blocks * sizeof_block);
1788 }
1789
1790 #ifdef WITH_PYMALLOC
1791
1792 /* Print summary info to "out" about the state of pymalloc's structures.
1762 * In Py_DEBUG mode, also perform some expensive internal consistency 1793 * In Py_DEBUG mode, also perform some expensive internal consistency
1763 * checks. 1794 * checks.
1764 */ 1795 */
1765 void 1796 void
1766 _PyObject_DebugMallocStats(void) 1797 _PyObject_DebugMallocStats(FILE *out)
1767 { 1798 {
1768 uint i; 1799 uint i;
1769 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT; 1800 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1770 /* # of pools, allocated blocks, and free blocks per class index */ 1801 /* # of pools, allocated blocks, and free blocks per class index */
1771 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; 1802 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1772 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; 1803 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1773 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; 1804 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1774 /* total # of allocated bytes in used and full pools */ 1805 /* total # of allocated bytes in used and full pools */
1775 size_t allocated_bytes = 0; 1806 size_t allocated_bytes = 0;
1776 /* total # of available bytes in used pools */ 1807 /* total # of available bytes in used pools */
1777 size_t available_bytes = 0; 1808 size_t available_bytes = 0;
1778 /* # of free pools + pools not yet carved out of current arena */ 1809 /* # of free pools + pools not yet carved out of current arena */
1779 uint numfreepools = 0; 1810 uint numfreepools = 0;
1780 /* # of bytes for arena alignment padding */ 1811 /* # of bytes for arena alignment padding */
1781 size_t arena_alignment = 0; 1812 size_t arena_alignment = 0;
1782 /* # of bytes in used and full pools used for pool_headers */ 1813 /* # of bytes in used and full pools used for pool_headers */
1783 size_t pool_header_bytes = 0; 1814 size_t pool_header_bytes = 0;
1784 /* # of bytes in used and full pools wasted due to quantization, 1815 /* # of bytes in used and full pools wasted due to quantization,
1785 * i.e. the necessarily leftover space at the ends of used and 1816 * i.e. the necessarily leftover space at the ends of used and
1786 * full pools. 1817 * full pools.
1787 */ 1818 */
1788 size_t quantization = 0; 1819 size_t quantization = 0;
1789 /* # of arenas actually allocated. */ 1820 /* # of arenas actually allocated. */
1790 size_t narenas = 0; 1821 size_t narenas = 0;
1791 /* running total -- should equal narenas * ARENA_SIZE */ 1822 /* running total -- should equal narenas * ARENA_SIZE */
1792 size_t total; 1823 size_t total;
1793 char buf[128]; 1824 char buf[128];
1794 1825
1795 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n", 1826 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
1796 SMALL_REQUEST_THRESHOLD, numclasses); 1827 SMALL_REQUEST_THRESHOLD, numclasses);
1797 1828
1798 for (i = 0; i < numclasses; ++i) 1829 for (i = 0; i < numclasses; ++i)
1799 numpools[i] = numblocks[i] = numfreeblocks[i] = 0; 1830 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1800 1831
1801 /* Because full pools aren't linked to from anything, it's easiest 1832 /* Because full pools aren't linked to from anything, it's easiest
1802 * to march over all the arenas. If we're lucky, most of the memory 1833 * to march over all the arenas. If we're lucky, most of the memory
1803 * will be living in full pools -- would be a shame to miss them. 1834 * will be living in full pools -- would be a shame to miss them.
1804 */ 1835 */
1805 for (i = 0; i < maxarenas; ++i) { 1836 for (i = 0; i < maxarenas; ++i) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1839 freeblocks = NUMBLOCKS(sz) - p->ref.count; 1870 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1840 numfreeblocks[sz] += freeblocks; 1871 numfreeblocks[sz] += freeblocks;
1841 #ifdef Py_DEBUG 1872 #ifdef Py_DEBUG
1842 if (freeblocks > 0) 1873 if (freeblocks > 0)
1843 assert(pool_is_in_list(p, usedpools[sz + sz])); 1874 assert(pool_is_in_list(p, usedpools[sz + sz]));
1844 #endif 1875 #endif
1845 } 1876 }
1846 } 1877 }
1847 assert(narenas == narenas_currently_allocated); 1878 assert(narenas == narenas_currently_allocated);
1848 1879
1849 fputc('\n', stderr); 1880 fputc('\n', out);
1850 fputs("class size num pools blocks in use avail blocks\n" 1881 fputs("class size num pools blocks in use avail blocks\n"
1851 "----- ---- --------- ------------- ------------\n", 1882 "----- ---- --------- ------------- ------------\n",
1852 stderr); 1883 out);
1853 1884
1854 for (i = 0; i < numclasses; ++i) { 1885 for (i = 0; i < numclasses; ++i) {
1855 size_t p = numpools[i]; 1886 size_t p = numpools[i];
1856 size_t b = numblocks[i]; 1887 size_t b = numblocks[i];
1857 size_t f = numfreeblocks[i]; 1888 size_t f = numfreeblocks[i];
1858 uint size = INDEX2SIZE(i); 1889 uint size = INDEX2SIZE(i);
1859 if (p == 0) { 1890 if (p == 0) {
1860 assert(b == 0 && f == 0); 1891 assert(b == 0 && f == 0);
1861 continue; 1892 continue;
1862 } 1893 }
1863 fprintf(stderr, "%5u %6u " 1894 fprintf(out, "%5u %6u "
1864 "%11" PY_FORMAT_SIZE_T "u " 1895 "%11" PY_FORMAT_SIZE_T "u "
1865 "%15" PY_FORMAT_SIZE_T "u " 1896 "%15" PY_FORMAT_SIZE_T "u "
1866 "%13" PY_FORMAT_SIZE_T "u\n", 1897 "%13" PY_FORMAT_SIZE_T "u\n",
1867 i, size, p, b, f); 1898 i, size, p, b, f);
1868 allocated_bytes += b * size; 1899 allocated_bytes += b * size;
1869 available_bytes += f * size; 1900 available_bytes += f * size;
1870 pool_header_bytes += p * POOL_OVERHEAD; 1901 pool_header_bytes += p * POOL_OVERHEAD;
1871 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size); 1902 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1872 } 1903 }
1873 fputc('\n', stderr); 1904 fputc('\n', out);
1874 (void)printone("# times object malloc called", serialno); 1905 #ifdef PYMALLOC_DEBUG
1875 1906 (void)printone(out, "# times object malloc called", serialno);
1876 (void)printone("# arenas allocated total", ntimes_arena_allocated); 1907 #endif
1877 (void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas); 1908 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
1878 (void)printone("# arenas highwater mark", narenas_highwater); 1909 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
1879 (void)printone("# arenas allocated current", narenas); 1910 (void)printone(out, "# arenas highwater mark", narenas_highwater);
1911 (void)printone(out, "# arenas allocated current", narenas);
1880 1912
1881 PyOS_snprintf(buf, sizeof(buf), 1913 PyOS_snprintf(buf, sizeof(buf),
1882 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena", 1914 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1883 narenas, ARENA_SIZE); 1915 narenas, ARENA_SIZE);
1884 (void)printone(buf, narenas * ARENA_SIZE); 1916 (void)printone(out, buf, narenas * ARENA_SIZE);
1885 1917
1886 fputc('\n', stderr); 1918 fputc('\n', out);
1887 1919
1888 total = printone("# bytes in allocated blocks", allocated_bytes); 1920 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
1889 total += printone("# bytes in available blocks", available_bytes); 1921 total += printone(out, "# bytes in available blocks", available_bytes);
1890 1922
1891 PyOS_snprintf(buf, sizeof(buf), 1923 PyOS_snprintf(buf, sizeof(buf),
1892 "%u unused pools * %d bytes", numfreepools, POOL_SIZE); 1924 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
1893 total += printone(buf, (size_t)numfreepools * POOL_SIZE); 1925 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
1894 1926
1895 total += printone("# bytes lost to pool headers", pool_header_bytes); 1927 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
1896 total += printone("# bytes lost to quantization", quantization); 1928 total += printone(out, "# bytes lost to quantization", quantization);
1897 total += printone("# bytes lost to arena alignment", arena_alignment); 1929 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
1898 (void)printone("Total", total); 1930 (void)printone(out, "Total", total);
1899 } 1931 }
1900 1932
1901 #endif /* PYMALLOC_DEBUG */ 1933 #endif /* #ifdef WITH_PYMALLOC */
1902 1934
1903 #ifdef Py_USING_MEMORY_DEBUGGER 1935 #ifdef Py_USING_MEMORY_DEBUGGER
1904 /* Make this function last so gcc won't inline it since the definition is 1936 /* Make this function last so gcc won't inline it since the definition is
1905 * after the reference. 1937 * after the reference.
1906 */ 1938 */
1907 int 1939 int
1908 Py_ADDRESS_IN_RANGE(void *P, poolp pool) 1940 Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1909 { 1941 {
1910 uint arenaindex_temp = pool->arenaindex; 1942 uint arenaindex_temp = pool->arenaindex;
1911 1943
1912 return arenaindex_temp < maxarenas && 1944 return arenaindex_temp < maxarenas &&
1913 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && 1945 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&
1914 arenas[arenaindex_temp].address != 0; 1946 arenas[arenaindex_temp].address != 0;
1915 } 1947 }
1916 #endif 1948 #endif
LEFTRIGHT

RSS Feeds Recent Issues | This issue
This is Rietveld 894c83f36cb7+