diff -crN CVS/Include/pydebug.h dev/Include/pydebug.h *** CVS/Include/pydebug.h Tue Aug 15 07:23:56 2000 --- dev/Include/pydebug.h Sat Aug 19 06:28:27 2000 *************** *** 18,23 **** --- 18,24 ---- extern DL_IMPORT(int) Py_VerboseFlag; extern DL_IMPORT(int) Py_InteractiveFlag; extern DL_IMPORT(int) Py_OptimizeFlag; + extern DL_IMPORT(int) Py_ProfileFlag; extern DL_IMPORT(int) Py_NoSiteFlag; extern DL_IMPORT(int) Py_UseClassExceptionsFlag; extern DL_IMPORT(int) Py_FrozenFlag; diff -crN CVS/Modules/Setup.config.in dev/Modules/Setup.config.in *** CVS/Modules/Setup.config.in Tue Aug 15 07:24:09 2000 --- dev/Modules/Setup.config.in Sat Aug 19 06:27:53 2000 *************** *** 12,14 **** --- 12,17 ---- # Garbage collection enabled with --with-cycle-gc @USE_GC_MODULE@gc gcmodule.c + + # Memory profiler enabled with --with-memprof + @USE_MEMPROF_MODULE@memprof memprof.c diff -crN CVS/Modules/main.c dev/Modules/main.c *** CVS/Modules/main.c Tue Aug 15 07:24:11 2000 --- dev/Modules/main.c Sat Aug 19 06:41:32 2000 *************** *** 49,67 **** and force prompts, even if stdin does not appear to be a terminal\n\ -O : optimize generated bytecode (a tad; also PYTHONOPTIMIZE=x)\n\ -OO : remove doc-strings in addition to the -O optimizations\n\ -S : don't imply 'import site' on initialization\n\ - -t : issue warnings about inconsistent tab usage (-tt: issue errors)\n\ "; static char *usage_mid = "\ -u : unbuffered binary stdout and stderr (also PYTHONUNBUFFERED=x)\n\ -U : Unicode literals: treats '...' literals like u'...'\n\ -v : verbose (trace import statements) (also PYTHONVERBOSE=x)\n\ -x : skip first line of source, allowing use of non-Unix forms of #!cmd\n\ -c cmd : program passed in as string (terminates option list)\n\ file : program read from script file\n\ - - : program read from stdin (default; interactive mode if a tty)\n\ "; static char *usage_bot = "\ arg ...: arguments passed to program in sys.argv[1:]\n\ Other environment variables:\n\ PYTHONSTARTUP: file executed on interactive startup (no default)\n\ --- 49,68 ---- and force prompts, even if stdin does not appear to be a terminal\n\ -O : optimize generated bytecode (a tad; also PYTHONOPTIMIZE=x)\n\ -OO : remove doc-strings in addition to the -O optimizations\n\ + -p : start any available profilers (also PYTHONPROFILE=x)\n\ -S : don't imply 'import site' on initialization\n\ "; static char *usage_mid = "\ + -t : issue warnings about inconsistent tab usage (-tt: issue errors)\n\ -u : unbuffered binary stdout and stderr (also PYTHONUNBUFFERED=x)\n\ -U : Unicode literals: treats '...' literals like u'...'\n\ -v : verbose (trace import statements) (also PYTHONVERBOSE=x)\n\ -x : skip first line of source, allowing use of non-Unix forms of #!cmd\n\ -c cmd : program passed in as string (terminates option list)\n\ file : program read from script file\n\ "; static char *usage_bot = "\ + - : program read from stdin (default; interactive mode if a tty)\n\ arg ...: arguments passed to program in sys.argv[1:]\n\ Other environment variables:\n\ PYTHONSTARTUP: file executed on interactive startup (no default)\n\ *************** *** 96,102 **** if ((p = getenv("PYTHONUNBUFFERED")) && *p != '\0') unbuffered = 1; ! while ((c = getopt(argc, argv, "c:diOStuUvxX")) != EOF) { if (c == 'c') { /* -c is the last option; following arguments that look like options are left for the --- 97,103 ---- if ((p = getenv("PYTHONUNBUFFERED")) && *p != '\0') unbuffered = 1; ! while ((c = getopt(argc, argv, "c:diOpStuUvxX")) != EOF) { if (c == 'c') { /* -c is the last option; following arguments that look like options are left for the *************** *** 123,128 **** --- 124,133 ---- case 'O': Py_OptimizeFlag++; + break; + + case 'p': + Py_ProfileFlag++; break; case 'S': diff -crN CVS/Modules/memprof.c dev/Modules/memprof.c *** CVS/Modules/memprof.c Thu Jan 1 01:00:00 1970 --- dev/Modules/memprof.c Sat Aug 19 08:40:50 2000 *************** *** 0 **** --- 1,1607 ---- + /* + * malloc profiler + */ + + /* Vladimir Marangozov -- August 2000 */ + + /* XXX: only memory layer 2, because it's the only allocator that + we know how to hook in order to get called and collect stats. + Fortunately, it's the most useful one. */ + + /* TODO: + - make per mem layer (per malloc) profiles + - see whether registering size classes dynamically makes sense + - ditto for altering the alignment (!), the threshold and the pbo + - make a logarithmic distribution of the size classes (for > 256) + - compute and report *real* values, not only simulation + (memo: most legacy mallocs don't even know what they do; + when asked "how much?", they can't answer -- ours can!!). + - compute fragmentation, think about mem compaction, etc. + */ + + #include "Python.h" + #include "structmember.h" + + #ifndef WITH_PYMALLOC + #warning "Warning! The rest of Python is not compiled with" + #warning "its memory allocators. Consequently, no profiling" + #warning "can be performed. To enable Python's allocators," + #warning "rerun configure, adding a --with-pymalloc option." + #warning "Then run `make clean' followed by `make'." + #endif + + /* forward */ + static void *wrapper_malloc(size_t); + static void *wrapper_calloc(size_t, size_t); + static void *wrapper_realloc(void *, size_t); + static void wrapper_free(void *); + static void mprof_hooks_set(void); + static void mprof_hooks_clear(void); + + #ifdef WITH_PYMALLOC + extern void _PyCore_ObjectMalloc_SetHooks(void *(*m)(size_t), + void *(*c)(size_t, size_t), + void *(*r)(void *, size_t), + void (*f)(void *)); + + /* XXX need a mechanism to register and set the profiled malloc */ + + #define target_malloc_func PyCore_OBJECT_MALLOC_FUNC + #define target_realloc_func PyCore_OBJECT_REALLOC_FUNC + #define target_free_func PyCore_OBJECT_FREE_FUNC + + #define target_hooks_set_func(m,c,r,f) \ + _PyCore_ObjectMalloc_SetHooks(m,c,r,f) + + #else /* !WITH_PYMALLOC */ + + #define target_malloc_func malloc + #define target_realloc_func realloc + #define target_free_func free + + #define target_hooks_set_func(m,c,r,f) /* pass */ + + #endif /* !WITH_PYMALLOC */ + + /* ----------------------------------------------------------------------- */ + + /* Stat structures & helpers */ + + typedef struct { + long cur; + long max; + } watermark_t; + + #define watermark_up(w, n) do { (w).cur += (n); \ + if ((w).cur > (w).max) \ + (w).max = (w).cur; \ + } while(0) + #define watermark_down(w, n) (w).cur -= (n) + + typedef struct { + watermark_t requested; + watermark_t allocated; + watermark_t estimated; + } mem_stat; + + #define mem_stat_up(m, slot, n) watermark_up( (m).slot, (n) ) + #define mem_stat_down(m, slot, n) watermark_down( (m).slot, (n) ) + + typedef struct { + watermark_t requested; + watermark_t allocated; + } block_stat; + + #define block_stat_up(b, slot, n) watermark_up( (b).slot, (n) ) + #define block_stat_down(b, slot, n) watermark_down( (b).slot, (n) ) + + typedef struct { + long calls; + int errors; + int warnings; + } call_stat; + + #define call_stat_up(c, slot, n) (c).slot += (n) + #define call_stat_down(c, slot, n) (c).slot -= (n) + + typedef struct { + call_stat malloc; + call_stat calloc; + call_stat realloc; + call_stat free; + } func_stat; + + #define func_stat_up(f, c, slot, n) call_stat_up( (f).c, slot, (n) ) + #define func_stat_down(f, c, slot, n) call_stat_down( (f).c, slot, (n) ) + + typedef struct { + mem_stat mem; + func_stat funcs; + block_stat blocks; + } malloc_profile; + + /* helpers */ + #define new_call(mp, f) func_stat_up(mp->funcs, f, calls, 1) + #define del_call(mp, f) func_stat_down(mp->funcs, f, calls, 1) + #define new_error(mp, f) func_stat_up(mp->funcs, f, errors, 1) + #define del_error(mp, f) func_stat_down(mp->funcs, f, errors, 1) + #define new_warning(mp, f) func_stat_up(mp->funcs, f, warnings, 1) + #define del_warning(mp, f) func_stat_down(mp->funcs, f, warnings, 1) + + #define new_request(mp, n) do { \ + mem_stat_up(mp->mem, requested, n); \ + block_stat_up(mp->blocks, requested, 1); \ + } while (0) + + #define del_request(mp, n) do { \ + mem_stat_down(mp->mem, requested, n); \ + block_stat_down(mp->blocks, requested, 1); \ + } while (0) + + #define new_allocation(mp, n) do { \ + mem_stat_up(mp->mem, allocated, n); \ + mem_stat_up(mp->mem, estimated, n+pbo); \ + block_stat_up(mp->blocks, allocated, 1); \ + } while (0) + + #define del_allocation(mp, n) do { \ + mem_stat_down(mp->mem, allocated, n); \ + mem_stat_down(mp->mem, estimated, n+pbo); \ + block_stat_down(mp->blocks, allocated, 1); \ + } while (0) + + /* ----------------------------------------------------------------------- */ + + /* mprof variables & helpers */ + + #ifndef ALIGNMENT + #define ALIGNMENT 8 + #define ALIGNMENT_MASK (ALIGNMENT - 1) + #endif + + #ifndef MAX_SMALL_THRESHOLD + #define MAX_SMALL_THRESHOLD 256 + #endif + + #undef MAX_SMALL_SIZE_CLASSES + #define MAX_SMALL_SIZE_CLASSES (MAX_SMALL_THRESHOLD / ALIGNMENT) + + /* error levels */ + #define ERROR_IGNORE (0) + #define ERROR_REPORT (1 << 0) + #define ERROR_STOP (1 << 1) + #define ERROR_RAISE (1 << 2) + #define ERROR_ABORT (1 << 3) + + static int error_level = ERROR_REPORT; + + /* profiles and size classes + + gmp = global memory profile + gmp + (i+1) = per size class profile 'i' for small blocks + gmp + (MAX_SMALL_SIZE_CLASSES+1) = profile detail for big blocks + + XXX: need per malloc profiles and dynamic size class registration. + */ + static malloc_profile *gmp; + + /* small block threshold; used to feed gmp + i; a user setting */ + static size_t small_block_threshold = MAX_SMALL_THRESHOLD; + + /* per block overhead (pbo): fixed overhead used for estimations. + Doug Lea's malloc adds 2 words per chunk (GNU libc's case). */ + static size_t pbo = (2 * sizeof(size_t)); + + /* global flags */ + static int profiling = 0; + static int initialized = 0; + + /* mprof opcodes */ + #define ABORT 0 + #define REQUEST 1 + #define COMMIT 2 + + /* mprof helpers */ + #define mprof_malloc_request(n) mprof_malloc(REQUEST, NULL, (n)) + #define mprof_calloc_request(n) mprof_calloc(REQUEST, NULL, (n)) + #define mprof_realloc_request(p, n) mprof_realloc(REQUEST, (p), (n)) + #define mprof_free_request(p) mprof_free(REQUEST, (p)) + + #define mprof_malloc_abort(n) mprof_malloc(ABORT, NULL, (n)) + #define mprof_calloc_abort(n) mprof_calloc(ABORT, NULL, (n)) + #define mprof_realloc_abort(p, n) mprof_realloc(ABORT, (p), (n)) + #define mprof_free_abort(p) mprof_free(ABORT, (p)) + + #define mprof_malloc_commit(p, n) mprof_malloc(COMMIT, (p), (n)) + #define mprof_calloc_commit(p, n) mprof_calloc(COMMIT, (p), (n)) + #define mprof_realloc_commit(p, n) mprof_realloc(COMMIT, (p), (n)) + #define mprof_free_commit(p) mprof_free(COMMIT, (p)) + + #undef roundup + #define roundup(x, y) (((x)==0) ? (y) : ((((x)+((y)-1))/(y))*(y))) + + #undef get_size_class + #define get_size_class(x) ((roundup((x),(ALIGNMENT))/(ALIGNMENT))-1) + + #undef get_class_size + #define get_class_size(x) (((x)+1)*(ALIGNMENT)) + + /* XXX take care of thread safety */ + #define SIMPLELOCK_DECL(lock) /* simple lock declaration */ + #define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */ + #define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */ + #define SIMPLELOCK_LOCK(lock) /* acquire released lock */ + #define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */ + + /* wrapper lock */ + SIMPLELOCK_DECL(wrapper_malloc_lock); + #define LOCK() SIMPLELOCK_LOCK(wrapper_malloc_lock) + #define UNLOCK() SIMPLELOCK_UNLOCK(wrapper_malloc_lock) + #define LOCK_INIT() SIMPLELOCK_INIT(wrapper_malloc_lock) + #define LOCK_FINI() SIMPLELOCK_FINI(wrapper_malloc_lock) + + /* ----------------------------------------------------------------------- */ + + /* block table */ + + /* This is a stripped & adapted dictobject. Sadly, we can't use + a dict because it's allocator may be the one being profiled. + + So here it is, using malloc/free calls "isolated" from the world + and with specialized [void *, size_t] entries. */ + + static long + _hash_ptr(void *p) + { + #if SIZEOF_LONG >= SIZEOF_VOID_P + return (long)p; + #else + #ifndef HAVE_LONG_LONG + #error "Sorry! undefined HAVE_LONG_LONG while SIZEOF_LONG < SIZEOF_VOID_P" + #endif + long long lh = (long long)p; + long h = (long)lh; + return h ^ (long)(lh - (lh - (long long)h)); + #endif + } + + #define MINSIZE 4 + static long polys[] = { + 4 + 3, + 8 + 3, + 16 + 3, + 32 + 5, + 64 + 3, + 128 + 3, + 256 + 29, + 512 + 17, + 1024 + 9, + 2048 + 5, + 4096 + 83, + 8192 + 27, + 16384 + 43, + 32768 + 3, + 65536 + 45, + 131072 + 9, + 262144 + 39, + 524288 + 39, + 1048576 + 9, + 2097152 + 5, + 4194304 + 3, + 8388608 + 33, + 16777216 + 27, + 33554432 + 9, + 67108864 + 71, + 134217728 + 39, + 268435456 + 9, + 536870912 + 5, + 1073741824 + 83, + 0 + }; + + #define dummy ((void *)(-1)) + + typedef struct { + void *addr; + size_t size; + } mem_block; + + typedef struct { + int ma_fill; + int ma_used; + int ma_size; + int ma_poly; + mem_block *ma_table; + } block_table; + + static block_table * + block_table_init(void) + { + block_table *bt; + bt = (block_table *) malloc(sizeof(block_table)); + if (bt == NULL) + return NULL; + bt->ma_fill = 0; + bt->ma_used = 0; + bt->ma_size = 0; + bt->ma_poly = 0; + bt->ma_table = NULL; + return bt; + } + + static void + block_table_fini(block_table *bt) + { + if (bt == NULL) + return; + if (bt->ma_table != NULL) + free(bt->ma_table); + free(bt); + } + + static mem_block * + block_table_lookup(block_table *bt, void *addr) + { + int i; + unsigned int incr; + mem_block *mb, *freeslot; + unsigned int mask = bt->ma_size-1; + mem_block *mb0 = bt->ma_table; + long hash = _hash_ptr(addr); + + i = (~hash) & mask; + mb = &mb0[i]; + if (mb->addr == NULL || mb->addr == addr) + return mb; + if (mb->addr == dummy) + freeslot = mb; + else + freeslot = NULL; + incr = (hash ^ ((unsigned long)hash >> 3)) & mask; + if (!incr) + incr = mask; + for (;;) { + mb = &mb0[(i+incr)&mask]; + if (mb->addr == NULL) { + if (freeslot != NULL) + return freeslot; + else + return mb; + } + if (mb->addr == dummy) { + if (freeslot == NULL) + freeslot = mb; + } + else if (mb->addr == addr) { + return mb; + } + incr = incr << 1; + if (incr > mask) + incr ^= bt->ma_poly; + } + } + + static void + block_table_insert(block_table *bt, void *addr, size_t size) + { + mem_block *mb; + mb = block_table_lookup(bt, addr); + if (mb->size != 0) { + mb->size = size; + } + else { + if (mb->addr == NULL) + bt->ma_fill++; + mb->addr = addr; + mb->size = size; + bt->ma_used++; + } + } + + static int + block_table_resize(block_table *bt, int minused) + { + int oldsize = bt->ma_size; + int newsize, newpoly; + mem_block *oldtable = bt->ma_table; + mem_block *newtable; + mem_block *mb; + int i; + for (i = 0, newsize = MINSIZE; ; i++, newsize <<= 1) { + if (i > sizeof(polys)/sizeof(polys[0])) { + /* ran out of polynomials */ + return -1; + } + if (newsize > minused) { + newpoly = polys[i]; + break; + } + } + newtable = (mem_block *) malloc(sizeof(mem_block) * newsize); + if (newtable == NULL) + return -1; + memset(newtable, 0, sizeof(mem_block) * newsize); + bt->ma_size = newsize; + bt->ma_poly = newpoly; + bt->ma_table = newtable; + bt->ma_fill = 0; + bt->ma_used = 0; + + for (i = 0, mb = oldtable; i < oldsize; i++, mb++) { + if (mb->size != 0) + block_table_insert(bt, mb->addr, mb->size); + } + + if (oldtable != NULL) + free(oldtable); + return 0; + } + + static mem_block * + block_table_get(block_table *bt, void *addr) + { + if (bt == NULL || bt->ma_table == NULL) + return NULL; + return block_table_lookup(bt, addr); + } + + static int + block_table_set(block_table *bt, void *addr, size_t size) + { + if (bt == NULL) + return -1; + /* if fill >= 2/3 size, double in size */ + if (bt->ma_fill*3 >= bt->ma_size*2) { + if (block_table_resize(bt, bt->ma_used*2) != 0) { + if (bt->ma_fill+1 > bt->ma_size) + return -1; + } + } + block_table_insert(bt, addr, size); + return 0; + } + + static int + block_table_del(block_table *bt, void *addr) + { + mem_block *mb; + if (bt == NULL || bt->ma_table == NULL) + return -1; + mb = block_table_lookup(bt, addr); + if (mb->size == 0) + return -1; + mb->addr = dummy; + mb->size = 0; + bt->ma_used--; + return 0; + } + + /* ----------------------------------------------------------------------- */ + + /* block log interface */ + + /* the block log is global dict with items: { mem_addr : requested_size } */ + static block_table *mprof_log = NULL; + + /* if the size == 0, it is stored as -1, because 0 has a special meaning. */ + static int + block_login(void *p, size_t n) + { + return block_table_set(mprof_log, p, (n == 0) ? -1 : n); + } + + static int + block_logout(void *p) + { + return block_table_del(mprof_log, p); + } + + static int + block_exists(void *p) + { + mem_block *mb = block_table_get(mprof_log, p); + if (mb == NULL || mb->size == 0) + return 0; + return 1; + } + + /* block_getsize(p) should be called only for existing blocks + that is, only for p such as block_exists(p) is true. */ + static size_t + block_getsize(void *p) + { + mem_block *mb = block_table_get(mprof_log, p); + if (mb == NULL) { + /* better than a segfault anyway */ + Py_FatalError("memprof: block_getsize() on unknown block"); + } + return (mb->size == -1) ? 0 : mb->size; + } + + + /* ----------------------------------------------------------------------- */ + + /* mprof functions */ + + static malloc_profile * + mprof_new(void) + { + return PyMem_New(malloc_profile, MAX_SMALL_SIZE_CLASSES+2); + } + + static void + mprof_del(void *mp) + { + if (mp == NULL) + return; + PyMem_Del(mp); + } + + static void + mprof_zero() + { + if (profiling) + return; + memset(gmp, 0, sizeof(malloc_profile) * (MAX_SMALL_SIZE_CLASSES+2)); + } + + static int + mprof_init(void) + { + if (initialized) + return 0; + gmp = mprof_new(); + if (gmp == NULL) + return -1; + mprof_log = block_table_init(); + if (mprof_log == NULL) { + free(gmp); + return -1; + } + mprof_zero(); + initialized = 1; + return 0; + } + + static void + mprof_fini(void) + { + if (!initialized) + return; + mprof_del(gmp); + block_table_fini(mprof_log); + mprof_log = NULL; + initialized = 0; + } + + static int + mprof_start(void) + { + if (profiling) + return 0; + if (!initialized && mprof_init() != 0) + return -1; + mprof_hooks_set(); + profiling = 1; + return 0; + } + + static void + mprof_stop(void) + { + if (!profiling) + return; + mprof_hooks_clear(); + profiling = 0; + } + + static int + mprof_error(char *msg) + { + int res = 0; + int err = error_level; + if (err == ERROR_IGNORE) + return 0; + if (err & ERROR_REPORT) + fprintf(stderr, "%s\n", msg); + if (err & ERROR_STOP) { + mprof_stop(); + if (err & ERROR_REPORT) + fprintf(stderr, "memprof: stopped.\n"); + } + if (err & ERROR_RAISE) { + /* raise an exception only in initialized state + and if MemoryError is not already current. */ + if (Py_IsInitialized() && + !PyErr_ExceptionMatches(PyExc_MemoryError)) + { + PyErr_SetString(PyExc_MemoryError, msg); + if (err & ERROR_REPORT) + fprintf(stderr, + "memprof: raised MemoryError.\n"); + } + res = -1; /* causes an allocation failure */ + } + if (err & ERROR_ABORT) + Py_FatalError(msg); + return res; + } + + /* the following functions are at the heart of the profiler */ + + #define _mp_fromsize(s) (gmp + ((((s) > small_block_threshold) ? \ + MAX_SMALL_SIZE_CLASSES : \ + get_size_class(s)) + 1) ) + + static int + mprof_malloc(int opcode, void *p, size_t n) + { + static malloc_profile *mp = NULL; + int res = 0; + + switch (opcode) { + + case ABORT: + new_error(gmp, malloc); + new_error(mp, malloc); + break; + + case REQUEST: + mp = _mp_fromsize(n); + if (n == 0) { + /* warn about malloc(0) */ + new_warning(gmp, malloc); + new_warning(mp, malloc); + } + new_call(gmp, malloc); + new_call(mp, malloc); + new_request(gmp, n); + new_request(mp, n); + break; + + case COMMIT: + res = block_login(p, n); + if (res == -1) { + /* loose track of this block on error tolerance */ + res = mprof_error("memprof: malloc commit failed"); + } + n = roundup(n, ALIGNMENT); + new_allocation(gmp, n); + new_allocation(mp, n); + break; + + default: + res = mprof_error("memprof: bad opcode in mprof_malloc()"); + } + return res; + } + + static int + mprof_calloc(int opcode, void *p, size_t n) + { + static malloc_profile *mp = NULL; + int res = 0; + + switch (opcode) { + + case ABORT: + new_error(gmp, calloc); + new_error(mp, calloc); + break; + + case REQUEST: + mp = _mp_fromsize(n); + if (n == 0) { + /* warn about calloc(0) */ + new_warning(gmp, calloc); + new_warning(mp, calloc); + } + new_call(gmp, calloc); + new_call(mp, calloc); + new_request(gmp, n); + new_request(mp, n); + break; + + case COMMIT: + res = block_login(p, n); + if (res == -1) { + /* loose track of this block on error tolerance */ + res = mprof_error("memprof: calloc commit failed"); + } + n = roundup(n, ALIGNMENT); + new_allocation(gmp, n); + new_allocation(mp, n); + break; + + default: + res = mprof_error("memprof: bad opcode in mprof_calloc()"); + } + return res; + } + + static int + mprof_realloc(int opcode, void *p, size_t n) + { + static malloc_profile *mp = NULL; + static malloc_profile *old_mp = NULL; + static void *old_p = NULL; + static size_t old_n = 0; + int res = 0; + + switch (opcode) { + + case ABORT: + new_error(gmp, realloc); + new_error(mp, realloc); + break; + + case REQUEST: + mp = _mp_fromsize(n); + new_call(gmp, realloc); + new_call(mp, realloc); + if (p == NULL) { + /* return; realloc(NULL, n) == malloc(n) + warn about it */ + new_warning(gmp, realloc); + new_warning(mp, realloc); + break; + } + if (n == 0) { + /* warn about realloc(p, 0) == free(p) */ + new_warning(gmp, realloc); + new_warning(mp, realloc); + } + old_p = p; + if (!block_exists(p)) { + char buf[64]; + /* too bad -- an unusual emergency situation + may be a programming error or mixed mallocs. */ + new_error(gmp, realloc); + new_error(mp, realloc); + sprintf(buf, + "memprof: resizing unknown block (%p)", p); + res = mprof_error(buf); + old_mp = NULL; /* signal the error to commit; */ + } + else { + old_n = block_getsize(p); + old_mp = _mp_fromsize(old_n); + /* if the new request is greater than the old one, + take care to compute the requested extension + according to malloc-copy-free -- this may change + the peak values; shrinking is not a problem. */ + if (n > old_n) { + new_request(gmp, n); + new_request(mp, n); + del_request(gmp, old_n); + del_request(old_mp, old_n); + } + else { + del_request(gmp, old_n); + del_request(old_mp, old_n); + new_request(gmp, n); + new_request(mp, n); + } + } + break; + + case COMMIT: + if (old_mp == NULL) { + /* error case in the realloc request above -- pass */ + break; + } + block_logout(old_p); + if (p == NULL && n == 0) { + /* this was realloc(p, 0) == free(p) + ret NULL; + the NULL indicates that the block is freed. */ + old_n = roundup(old_n, ALIGNMENT); + del_allocation(gmp, old_n); + del_allocation(old_mp, old_n); + break; + } + res = block_login(p, n); + if (res == -1) { + /* loose track of this block on error tolerance */ + res = mprof_error("memprof: realloc commit failed"); + } + n = roundup(n, ALIGNMENT); + old_n = roundup(old_n, ALIGNMENT); + if (n > old_n) { + new_allocation(gmp, n); + new_allocation(mp, n); + del_allocation(gmp, old_n); + del_allocation(old_mp, old_n); + } + else { + del_allocation(gmp, old_n); + del_allocation(old_mp, old_n); + new_allocation(gmp, n); + new_allocation(mp, n); + } + break; + + default: + res = mprof_error("memprof: bad opcode in mprof_realloc()"); + } + return res; + } + + static int + mprof_free(int opcode, void *p) + { + static malloc_profile *mp = NULL; + size_t n = 0; + int res = 0; + + switch (opcode) { + + case ABORT: + /* unreachable */ + break; + + case REQUEST: + new_call(gmp, free); + if (p == NULL) { + /* free(NULL) has no effect; account the call + in the gmp, though, and warn about it */ + new_warning(gmp, free); + break; + } + if (!block_exists(p)) { + char buf[64]; + /* too bad -- an unusual emergency situation: + may be a programming error or mixed mallocs. */ + new_error(gmp, free); + sprintf(buf, + "memprof: freeing unknown block (%p)", p); + res = mprof_error(buf); + mp = NULL; /* signal the error to commit */ + } + else { + n = block_getsize(p); + mp = _mp_fromsize(n); + new_call(mp, free); + del_request(gmp, n); + del_request(mp, n); + } + break; + + case COMMIT: + if (mp == NULL) { + /* error case in the free request above -- pass */ + break; + } + n = block_getsize(p); + block_logout(p); + n = roundup(n, ALIGNMENT); + del_allocation(gmp, n); + del_allocation(mp, n); + break; + + default: + res = mprof_error("memprof: bad opcode in mprof_free()"); + } + return res; + } + + /* ----------------------------------------------------------------------- */ + + /* hooks & callbacks */ + + /* hooks */ + static void + mprof_hooks_set(void) + { + target_hooks_set_func(wrapper_malloc, + wrapper_calloc, + wrapper_realloc, + wrapper_free); + } + + static void + mprof_hooks_clear(void) + { + target_hooks_set_func(NULL, NULL, NULL, NULL); + } + + static void * + target_malloc(size_t n) + { + void *p; + mprof_hooks_clear(); + p = target_malloc_func(n); + if (profiling) + mprof_hooks_set(); + return p; + } + + static void * + target_realloc(void *p, size_t n) + { + mprof_hooks_clear(); + p = target_realloc_func(p, n); + if (profiling) + mprof_hooks_set(); + return p; + } + + static void + target_free(void *p) + { + mprof_hooks_clear(); + target_free_func(p); + if (profiling) + mprof_hooks_set(); + } + + + /* wrapper callbacks -- these are called from the profiled malloc */ + static void * + wrapper_malloc(size_t n) + { + void *p; + LOCK(); + if (!profiling && mprof_start() != 0) + goto fail; + if (mprof_malloc_request(n) == -1) + goto fail; + + p = target_malloc(n); + + if (p == NULL) { + mprof_malloc_abort(n); + goto fail; + } + if (mprof_malloc_commit(p, n) == -1) { + target_free(p); + goto fail; + } + UNLOCK(); + return p; + fail: + UNLOCK(); + return NULL; + } + + static void * + wrapper_calloc(size_t nbel, size_t elsz) + { + void *p; + size_t n; + LOCK(); + if (!profiling && mprof_start() != 0) + goto fail; + n = nbel * elsz; + if (mprof_calloc_request(n) == -1) + goto fail; + + p = target_malloc(n); + + if (p == NULL) { + mprof_calloc_abort(n); + goto fail; + } + if (mprof_calloc_commit(p, n) == -1) { + target_free(p); + goto fail; + } + memset(p, 0, n); + UNLOCK(); + return p; + fail: + UNLOCK(); + return NULL; + } + + static void * + wrapper_realloc(void *p, size_t n) + { + void *new_p; + LOCK(); + if (!profiling && mprof_start() != 0) + goto fail; + if (mprof_realloc_request(p, n) == -1) + goto fail; + if (p == NULL) { + UNLOCK(); + return target_malloc(n); + } + + new_p = target_realloc(p, n); + + if (new_p == NULL && n != 0) { + mprof_realloc_abort(p, n); + goto fail; + } + if (mprof_realloc_commit(new_p, n) == -1) + goto fail; + UNLOCK(); + return new_p; + fail: + UNLOCK(); + return NULL; + } + + static void + wrapper_free(void *p) + { + LOCK(); + if (!profiling && mprof_start() != 0) + goto finally; + if (mprof_free_request(p) == -1) + goto finally; + if (p == NULL) + goto finally; + + target_free(p); + + mprof_free_commit(p); + finally: + UNLOCK(); + } + + /* ----------------------------------------------------------------------- */ + + /* Python interface */ + + /* Someday, this may evolve into a memory-oriented miniclass system . + For now, let's just limit ourselves to simple memprof objects... -- VM */ + + /* helpers */ + #undef bitset + #define bitset(x, y) ((x) | (y)) + + #undef bitclear + #define bitclear(x, y) ((x) & (~(y))) + + #undef bittest + #define bittest(x, y) ((x) & (y)) + + #undef bitchange + #define bitchange(x, y) ((x) ^ (y)) + + static int + bitcount(int n) + { + int nbits = 0; + while (n) { + if (n & 1) + nbits++; + n >>= 1; + } + return nbits; + } + + /* profile flags */ + #define MEM_CORE (1 << 1) /* layer 1: Python core memory */ + #define MEM_OBJECT (1 << 2) /* layer 2: Python object memory */ + #define MEM_OBCLASS (1 << 3) /* layer 3: Python object-specific memory */ + + #define MEMTYPE_MASK (MEM_CORE | MEM_OBJECT | MEM_OBCLASS) + + #define MEMPROF_GLOBAL (1 << 4) + #define MEMPROF_DETAIL (1 << 5) + + #define MEMPROF_MASK (MEMPROF_GLOBAL | MEMPROF_DETAIL) + + #define mptype_global(t) bitset(bitclear((t),MEMPROF_MASK),MEMPROF_GLOBAL) + #define mptype_detail(t) bitset(bitclear((t),MEMPROF_MASK),MEMPROF_DETAIL) + #define mptype_isglobal(t) (bittest((t), MEMPROF_GLOBAL)) + #define mptype_check(t) (bitcount((t) & MEMPROF_MASK) == 1) + + static int default_profile_type = MEM_OBJECT; + + /* memprof object */ + typedef struct { + PyObject_HEAD + int mp_type; /* type flags */ + PyObject *mp_size; /* size class or list of memprof objects */ + malloc_profile *mp_data; /* a snapshot of the profile data */ + } MemProfObject; + + staticforward PyTypeObject MemProf_Type; + #define MemProf_Check(v) ((v)->ob_type == &MemProf_Type) + + static MemProfObject* + MemProf_new(int type, int size) + { + MemProfObject *mpo; + MemProfObject *x; + int i; + + /* a bit of sanity */ + if (!mptype_check(type)) + return NULL; + + mpo = PyObject_New(MemProfObject, &MemProf_Type); + if (mpo == NULL) + return NULL; + /* set the type */ + mpo->mp_type = type; + + if (!mptype_isglobal(type)) + /* for size class profiles, return here */ + return mpo; + + /* the global profile allocates storage and creates a + list of profiles per size class in the size slot by + taking care to update the mem pointers of its kids. */ + mpo->mp_data = mprof_new(); + if (mpo->mp_data == NULL) { + PyObject_Del(mpo); + PyErr_NoMemory(); + return NULL; + } + memcpy(mpo->mp_data, gmp, + sizeof(malloc_profile) * (MAX_SMALL_SIZE_CLASSES+2)); + + mpo->mp_size = PyTuple_New(MAX_SMALL_SIZE_CLASSES+1); + if (mpo->mp_size == NULL) { + mprof_del(mpo->mp_data); + PyObject_Del(mpo); + return NULL; + } + type = mptype_detail(type); /* set to size class */ + for (i = 0; i <= MAX_SMALL_SIZE_CLASSES; i++) { + x = MemProf_new(type, i); + if (x == NULL || + (x->mp_size = PyInt_FromLong((long)i)) == NULL) { + mprof_del(mpo->mp_data); + Py_DECREF(mpo->mp_size); + PyObject_Del(mpo); + return NULL; + } + PyTuple_SET_ITEM(mpo->mp_size, i, (PyObject *)x); + x->mp_data = mpo->mp_data + i+1; + } + return mpo; + } + + static void + MemProf_dealloc(MemProfObject *mpo) + { + if (mptype_isglobal(mpo->mp_type)) + mprof_del(mpo->mp_data); + Py_DECREF(mpo->mp_size); + PyObject_Del(mpo); + } + + /* members */ + static struct memberlist memprof_memberlist[] = { + /* No members are implemented with this table! */ + {"malloc", T_INT, 0, RO}, + {"calloc", T_INT, 0, RO}, + {"realloc", T_INT, 0, RO}, + {"free", T_INT, 0, RO}, + {"memory", T_INT, 0, RO}, + {"peakmemory", T_INT, 0, RO}, + {"blocks", T_INT, 0, RO}, + {"peakblocks", T_INT, 0, RO}, + {"sizerange", T_INT, 0, RO}, + {"sizeclass", T_INT, 0, RO}, + {"memlayer", T_INT, 0, RO}, + {"isglobal", T_INT, 0, RO}, + {NULL} /* sentinel */ + }; + + /* helpers */ + static int + getmemlayer(MemProfObject *mpo) + { + int i = mpo->mp_type & MEMTYPE_MASK; + int j = 0; + /* bit position */ + while (i >>= 1) + j++; + return j; + } + + static void + getsizerange(MemProfObject *mpo, int *min, int *max) + { + int size; + if (mpo->mp_type & MEMPROF_GLOBAL) { + *min = 0; + *max = -1; + return; + } + /* size class */ + size = (int)PyInt_AsLong(mpo->mp_size); + *min = get_class_size(size) - ALIGNMENT + 1; + *max = (size < MAX_SMALL_SIZE_CLASSES) ? get_class_size(size) : -1; + } + + static PyObject * + MemProf_getattr(MemProfObject *mpo, char *name) + { + PyObject *x; + int m, n; + + #define _mpo_member(a,b,c) ((((mpo->mp_data)->a).b).c) + + if (strcmp(name, "memory") == 0) { + return Py_BuildValue("(lll)", + _mpo_member(mem, requested, cur), + _mpo_member(mem, allocated, cur), + _mpo_member(mem, estimated, cur)); + } + if (strcmp(name, "peakmemory") == 0) { + return Py_BuildValue("(lll)", + _mpo_member(mem, requested, max), + _mpo_member(mem, allocated, max), + _mpo_member(mem, estimated, max)); + } + if (strcmp(name, "blocks") == 0) { + return Py_BuildValue("(ll)", + _mpo_member(blocks, requested, cur), + _mpo_member(blocks, allocated, cur)); + } + if (strcmp(name, "peakblocks") == 0) { + return Py_BuildValue("(ll)", + _mpo_member(blocks, requested, max), + _mpo_member(blocks, allocated, max)); + } + if (strcmp(name, "malloc") == 0) { + return Py_BuildValue("(lii)", + _mpo_member(funcs, malloc, calls), + _mpo_member(funcs, malloc, errors), + _mpo_member(funcs, malloc, warnings)); + } + if (strcmp(name, "calloc") == 0) { + return Py_BuildValue("(lii)", + _mpo_member(funcs, calloc, calls), + _mpo_member(funcs, calloc, errors), + _mpo_member(funcs, calloc, warnings)); + } + if (strcmp(name, "realloc") == 0) { + return Py_BuildValue("(lii)", + _mpo_member(funcs, realloc, calls), + _mpo_member(funcs, realloc, errors), + _mpo_member(funcs, realloc, warnings)); + } + if (strcmp(name, "free") == 0) { + return Py_BuildValue("(lii)", + _mpo_member(funcs, free, calls), + _mpo_member(funcs, free, errors), + _mpo_member(funcs, free, warnings)); + } + if (strcmp(name, "sizerange") == 0) { + getsizerange(mpo, &m, &n); + return Py_BuildValue("(ii)", m, n); + } + if (strcmp(name, "sizeclass") == 0) { + x = mpo->mp_size; + Py_INCREF(x); + return x; + } + if (strcmp(name, "memlayer") == 0) { + return PyInt_FromLong((long)getmemlayer(mpo)); + } + if (strcmp(name, "isglobal") == 0) { + x = mptype_isglobal(mpo->mp_type) ? Py_True : Py_False; + Py_INCREF(x); + return x; + } + /* just for fun */ + return PyMember_Get((char *)mpo, memprof_memberlist, name); + } + + static PyObject * + MemProf_repr(MemProfObject *mpo, char *name) + { + char buf[100]; + int m, n; + + if (mptype_isglobal(mpo->mp_type)) { + sprintf(buf, + "", + getmemlayer(mpo), PyTuple_Size(mpo->mp_size)); + } + else { + getsizerange(mpo, &m, &n); + sprintf(buf, + "", + getmemlayer(mpo), m, n); + } + return PyString_FromString(buf); + } + + static PyTypeObject MemProf_Type = { + PyObject_HEAD_INIT(&PyType_Type) + 0, /*ob_size*/ + "memory profile", /*tp_name*/ + sizeof(MemProfObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)MemProf_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + (getattrfunc)MemProf_getattr, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (reprfunc)MemProf_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + }; + + /* ----------------------------------------------------------------------- */ + + /* module interface */ + + static char mp_getprofile__doc__[] = + "getprofile([type]) -> object\n\n\ + Return a snapshot of the current memory profile of the interpreter.\n\ + An optional type argument may be provided to request the profile of\n\ + a specific memory layer. It must be one of the following constants:\n\ + \n\ + MEM_CORE - layer 1: Python core memory\n\ + MEM_OBJECT - layer 2: Python object memory\n\ + MEM_OBCLASS - layer 3: Python object-specific memory \n\ + \n\ + If a type argument is not specified, the default profile is returned.\n\ + The default profile type can be set with the setproftype() function."; + /* ... and if you don't understand the above, don't use this module :-) */ + + static PyObject * + mp_getprofile(PyObject *self, PyObject *args) + { + int type = default_profile_type; + if (!PyArg_ParseTuple(args, "|i:getprofile", &type)) + return NULL; + if (bitcount(type & MEMTYPE_MASK) != 1) { + PyErr_SetString(PyExc_ValueError, "bad profile type"); + return NULL; + } + type = mptype_global(type); + return (PyObject *)MemProf_new(type, -1); + } + + static char mp_setproftype__doc__[] = + "setproftype(type) -> None\n\n\ + Set the default profile type returned by getprofile() without arguments."; + + static PyObject * + mp_setproftype(PyObject *self, PyObject *args) + { + int type = 0; + if (!PyArg_ParseTuple(args, "i:setproftype", &type)) + return NULL; + if (bitcount(type & MEMTYPE_MASK) != 1) { + PyErr_SetString(PyExc_ValueError, "bad profile type"); + return NULL; + } + /* temporary */ + if (bittest(type, MEM_CORE | MEM_OBCLASS)) { + /* for now, nothing else but MEM_OBJECT */ + PyErr_SetString(PyExc_NotImplementedError, + "this memory type is not supported yet"); + return NULL; + } + type = mptype_global(type); + default_profile_type = type; + Py_INCREF(Py_None); + return Py_None; + } + + /* changing the threshold & pbo doesn't really make sense for now :-( */ + + static char mp_getthreshold__doc__[] = + "getthreshold() -> int\n\n\ + Return the size threshold (in bytes) between small and big blocks."; + + static PyObject * + mp_getthreshold(PyObject *self, PyObject *args) + { + if (!PyArg_ParseTuple(args, ":getthreshold")) + return NULL; + return PyInt_FromLong((long)small_block_threshold); + } + + /* XXX begin of insane function */ + static char mp_setthreshold__doc__[] = + "setthreshold(int) -> None\n\n\ + Set the size threshold (in bytes) between small and big blocks.\n\ + The maximum is 256. The argument is rounded up to the ALIGNMENT."; + + static PyObject * + mp_setthreshold(PyObject *self, PyObject *args) + { + int threshold = 0; + if (!PyArg_ParseTuple(args, "i:setthreshold", &threshold)) + return NULL; + if (threshold <= 0 || threshold > 256) { + PyErr_SetString(PyExc_ValueError, + "threshold out of range"); + return NULL; + } + small_block_threshold = roundup(threshold, ALIGNMENT); + Py_INCREF(Py_None); + return Py_None; + } + /* XXX end of insane function */ + + static char mp_getpbo__doc__[] = + "getpbo() -> int\n\n\ + Return the fixed per block overhead (pbo) used for estimations."; + + static PyObject * + mp_getpbo(PyObject *self, PyObject *args) + { + if (!PyArg_ParseTuple(args, ":get_pbo")) + return NULL; + return PyInt_FromLong((long)pbo); + } + + static char mp_setpbo__doc__[] = + "setpbo(int) -> None\n\n\ + Set the fixed per block overhead (pbo) used for estimations."; + + /* XXX begin of insane function */ + static PyObject * + mp_setpbo(PyObject *self, PyObject *args) + { + int n = 0; + if (!PyArg_ParseTuple(args, "i:setpbo", &n)) + return NULL; + if (0 < n) { + PyErr_SetString(PyExc_ValueError, "arg must be positive"); + return NULL; + } + pbo = n; + Py_INCREF(Py_None); + return Py_None; + } + /* XXX end of insane function */ + + static char mp_start__doc__[] = + "start() -> int\n\n\ + Start the profiler. Returns 1 if it is currently running, 0 otherwise."; + + static PyObject * + mp_start(PyObject *self, PyObject *args) + { + if (!PyArg_ParseTuple(args, ":start")) + return NULL; + #ifdef WITH_PYMALLOC + return PyInt_FromLong((long)(mprof_start() + 1)); + #else + Py_INCREF(Py_False); + return Py_False; + #endif + } + + static char mp_stop__doc__[] = + "stop() -> None\n\n\ + Stop the profiler. If it has been stopped, this function has no effect."; + + static PyObject * + mp_stop(PyObject *self, PyObject *args) + { + if (!PyArg_ParseTuple(args, ":stop")) + return NULL; + mprof_stop(); + Py_INCREF(Py_None); + return Py_None; + } + + static char mp_isprofiling__doc__[] = + "isprofiling() -> 1 if profiling is currently in progress, 0 otherwise."; + + static PyObject * + mp_isprofiling(PyObject *self, PyObject *args) + { + if (!PyArg_ParseTuple(args, ":isprofiling")) + return NULL; + return PyInt_FromLong((long)profiling); + } + + static char mp_seterrlevel__doc__[] = + "seterrlevel(flags) -> None\n\n\ + Set the error level of the profiler. The provided argument instructs the\n\ + profiler on how tolerant it should be against any detected simple errors\n\ + or memory corruption. The following non-exclusive values are recognized:\n\ + \n\ + ERROR_IGNORE - ignore silently any detected errors\n\ + ERROR_REPORT - report all detected errors to stderr\n\ + ERROR_STOP - stop the profiler on the first detected error\n\ + ERROR_RAISE - raise a MemoryError exception for all detected errors\n\ + ERROR_ABORT - report the first error as fatal and abort immediately\n\ + \n\ + The default error level is ERROR_REPORT."; + + static PyObject * + mp_seterrlevel(PyObject *self, PyObject *args) + { + if (!PyArg_ParseTuple(args, "i:seterrlevel", &error_level)) + return NULL; + Py_INCREF(Py_None); + return Py_None; + } + + static char mp_geterrlevel__doc__[] = + "geterrlevel() -> errflags\n\n\ + Get the current error level of the profiler."; + + static PyObject * + mp_geterrlevel(PyObject *self, PyObject *args) + { + if (!PyArg_ParseTuple(args, ":geterrlevel")) + return NULL; + return PyInt_FromLong((long)error_level); + } + + static char memprof__doc__[] = + "This module provides access to the Python memory profiler."; + + static PyMethodDef MemProfMethods[] = { + {"getprofile", mp_getprofile, METH_VARARGS, mp_getprofile__doc__}, + {"setproftype", mp_setproftype, METH_VARARGS, mp_setproftype__doc__}, + {"getthreshold", mp_getthreshold, METH_VARARGS, mp_getthreshold__doc__}, + {"setthreshold", mp_setthreshold, METH_VARARGS, mp_setthreshold__doc__}, + {"geterrlevel", mp_geterrlevel, METH_VARARGS, mp_geterrlevel__doc__}, + {"seterrlevel", mp_seterrlevel, METH_VARARGS, mp_seterrlevel__doc__}, + {"getpbo", mp_getpbo, METH_VARARGS, mp_getpbo__doc__}, + {"setpbo", mp_setpbo, METH_VARARGS, mp_setpbo__doc__}, + {"isprofiling", mp_isprofiling, METH_VARARGS, mp_isprofiling__doc__}, + {"stop", mp_stop, METH_VARARGS, mp_stop__doc__}, + {"start", mp_start, METH_VARARGS, mp_start__doc__}, + {NULL, NULL} /* Sentinel */ + }; + + static void + insint(PyObject *d, char*name, int value) + { + PyObject *v = PyInt_FromLong((long)value); + if (!v || PyDict_SetItemString(d, name, v)) + Py_FatalError("can't add constant to memprof module"); + Py_DECREF(v); + } + + void + initmemprof(void) + { + PyObject *d, *m; + + if (mprof_init() != 0) + Py_FatalError("can't initialize memprof module"); + + m = Py_InitModule4("memprof", + MemProfMethods, + memprof__doc__, + NULL, + PYTHON_API_VERSION); + d = PyModule_GetDict(m); + + insint(d, "ALIGNMENT", ALIGNMENT); + + insint(d, "MEM_CORE", MEM_CORE); + insint(d, "MEM_OBJECT", MEM_OBJECT); + insint(d, "MEM_OBCLASS", MEM_OBCLASS); + + insint(d, "ERROR_IGNORE", ERROR_IGNORE); + insint(d, "ERROR_REPORT", ERROR_REPORT); + insint(d, "ERROR_STOP", ERROR_STOP); + insint(d, "ERROR_RAISE", ERROR_RAISE); + insint(d, "ERROR_ABORT", ERROR_ABORT); + } + + /* ----------------------------------------------------------------------- */ + + /* Public C interface */ + + int + _PyMem_ProfilerInit(void) + { + return mprof_start(); + } + + void + _PyMem_ProfilerFini(void) + { + mprof_stop(); + mprof_fini(); + } + diff -crN CVS/Python/pythonrun.c dev/Python/pythonrun.c *** CVS/Python/pythonrun.c Sat Aug 19 05:42:33 2000 --- dev/Python/pythonrun.c Sat Aug 19 06:36:45 2000 *************** *** 72,77 **** --- 72,78 ---- int Py_UseClassExceptionsFlag = 1; /* Needed by bltinmodule.c: deprecated */ int Py_FrozenFlag; /* Needed by getpath.c */ int Py_UnicodeFlag = 0; /* Needed by compile.c */ + int Py_ProfileFlag = 0; /* Needed by profilers */ static int initialized = 0; *************** *** 113,118 **** --- 114,129 ---- Py_VerboseFlag = 1; if ((p = getenv("PYTHONOPTIMIZE")) && *p != '\0') Py_OptimizeFlag = 1; + if ((p = getenv("PYTHONPROFILE")) && *p != '\0') + Py_ProfileFlag = 1; + + if (Py_ProfileFlag) { + #ifdef WITH_MEMPROF + #ifdef WITH_PYMALLOC + _PyMem_ProfilerInit(); + #endif + #endif + } interp = PyInterpreterState_New(); if (interp == NULL) *************** *** 263,268 **** --- 274,288 ---- PyGrammar_RemoveAccelerators(&_PyParser_Grammar); call_ll_exitfuncs(); + + if (Py_ProfileFlag) { + #ifdef WITH_MEMPROF + #ifdef WITH_PYMALLOC + _PyMem_ProfilerFini(); + #endif + #endif + } + #ifdef Py_TRACE_REFS _Py_ResetReferences(); diff -crN CVS/acconfig.h dev/acconfig.h *** CVS/acconfig.h Sat Aug 19 05:42:08 2000 --- dev/acconfig.h Sat Aug 19 06:44:00 2000 *************** *** 144,149 **** --- 144,152 ---- /* Define if you want to compile in rudimentary thread support */ #undef WITH_THREAD + /* Define if you want to compile in memory profiling support */ + #undef WITH_MEMPROF + /* Define if you want to compile in cycle garbage collection */ #undef WITH_CYCLE_GC diff -crN CVS/configure.in dev/configure.in *** CVS/configure.in Sat Aug 19 05:42:08 2000 --- dev/configure.in Sat Aug 19 07:46:48 2000 *************** *** 736,741 **** --- 736,757 ---- LIBOBJS="$LIBOBJS thread.o"]) fi + # Check for memory profiling support + AC_SUBST(USE_MEMPROF_MODULE) + AC_MSG_CHECKING(for --with-memprof) + AC_ARG_WITH(memprof, + [ --with(out)-memprof disable/enable the memory profiler], [ + if test "$withval" != no + then + USE_MEMPROF_MODULE="" + AC_DEFINE(WITH_MEMPROF) AC_MSG_RESULT(yes) + else + USE_MEMPROF_MODULE="# " + AC_MSG_RESULT(no) + fi], + [USE_MEMPROF_MODULE="# " + AC_MSG_RESULT(no)]) + # Check for GC support AC_SUBST(USE_GC_MODULE) USE_GC_MODULE=""