Index: MemPool.c =================================================================== RCS file: /cvsroot/squid/squid3/lib/MemPool.c,v retrieving revision 1.6 diff -u -r1.6 MemPool.c --- MemPool.c 8 Sep 2003 02:12:43 -0000 1.6 +++ MemPool.c 22 Dec 2003 15:50:07 -0000 @@ -85,6 +85,30 @@ #define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */ #define MEM_MAX_MMAP_CHUNKS 2048 +#if !DISABLE_POOLS +// #define DEBUG_MEMPOOLS 4 /* do not enable debug when pools disabled */ +#endif +/* + * Debug: OR'ed bitmask: + * 1 - check for right pool during free, + * 2 - check whole free list/chain, detects freelist corruption + * 4 - mprotect pages on freelist + * For this to work, we switch to mmap for all chunks applicable, + * ie. objects of page size multiples so that each object starts + * and ends at page boundary. + * Your OS must support mprotect, mmap, and have enough kernel resources + * Use only to catch hard to find buffer overrun bugs. + * + * NB! 1 and 2 can consume 99% of your cpu! + */ + +#if (DEBUG_MEMPOOLS & 4) +#include +#define DEBUG_MPAGE(x) x +#else +#define DEBUG_MPAGE(x) +#endif + #include #if HAVE_STRING_H @@ -232,7 +256,12 @@ MemChunk *chunk; chunk = xcalloc(1, sizeof(MemChunk)); /* should have a pool for this too */ - chunk->objCache = xcalloc(1, pool->chunk_size); +#if (DEBUG_MEMPOOLS & 4) + if ( (pool->obj_size & (MEM_PAGE_SIZE-1)) == 0) { + chunk->objCache = mmap(0, pool->chunk_size, (PROT_WRITE | PROT_READ), (MAP_SHARED | MAP_ANON), -1, 0); + } else +#endif + chunk->objCache = xcalloc(1, pool->chunk_size); Free = chunk->freeList = chunk->objCache; for (i = 1; i < pool->chunk_capacity; i++) { @@ -262,7 +291,12 @@ pool->chunkCount--; lastPool = pool; pool->allChunks = splay_delete(chunk, pool->allChunks, memCompChunks); - xfree(chunk->objCache); +#if (DEBUG_MEMPOOLS & 4) + if ( (pool->obj_size & (MEM_PAGE_SIZE-1)) == 0) + munmap(chunk->objCache, pool->chunk_size); + else +#endif + xfree(chunk->objCache); xfree(chunk); } @@ -272,6 +306,28 @@ memPoolPush(MemPool * pool, void *obj) { void **Free; +#if (DEBUG_MEMPOOLS & 1) + /* We walk whole FreeCache for early corruption detection: SLOW */ + Free = pool->freeCache; + while (Free) { + assert(Free != obj && "memPoolPush: item already in freeCache! Duplicate Free"); +#if (DEBUG_MEMPOOLS & 2) + /* First item can't be wrong, Checked during Free of obj below. + * So we check pointer *Free, to have corrupt item in Free if assert triggers + * Would be nice to have file:line of who freed this item somewhere.. + */ + if (*Free) { + lastPool = pool; + pool->allChunks = splay_splay((const void **) &*Free, pool->allChunks, memCompObjChunks); + assert(splayLastResult == 0 && "FreeCache chain points outside this POOL: Use after Free corruption"); + } +#endif + Free = *Free; + } + lastPool = pool; + pool->allChunks = splay_splay((const void **) &obj, pool->allChunks, memCompObjChunks); + assert(splayLastResult == 0 && "memPoolPush: item doesnt belong to this Pool or late duplicate Free"); +#endif /* XXX We should figure out a sane way of avoiding having to clear * all buffers. For example data buffers such as used by MemBuf do * not really need to be cleared.. There was a condition based on @@ -281,6 +337,7 @@ Free = obj; *Free = pool->freeCache; pool->freeCache = obj; + DEBUG_MPAGE(if ((pool->obj_size & (MEM_PAGE_SIZE-1)) == 0) mprotect(obj, pool->obj_size, PROT_READ)); return; } @@ -298,6 +355,7 @@ /* first, try cache */ if (pool->freeCache) { + DEBUG_MPAGE(if ((pool->obj_size & (MEM_PAGE_SIZE-1)) == 0) assert(0 == mprotect(pool->freeCache, pool->obj_size, PROT_READ | PROT_WRITE))); Free = pool->freeCache; pool->freeCache = *Free; *Free = NULL; @@ -312,7 +370,13 @@ chunk = pool->nextFreeChunk; Free = chunk->freeList; +#if (DEBUG_MEMPOOLS & 2) + lastPool = pool; + pool->allChunks = splay_splay((const void **) &Free, pool->allChunks, memCompObjChunks); + assert(splayLastResult == 0 && "memPoolGet: freeList item not of this Pool or use after Free corruption"); +#endif chunk->freeList = *Free; + DEBUG_MPAGE(if ((pool->obj_size & (MEM_PAGE_SIZE-1)) == 0) assert(0 == mprotect(*(void **) &Free, pool->obj_size, PROT_READ | PROT_WRITE))); *Free = NULL; chunk->inuse_count++; chunk->lastref = squid_curtime; @@ -580,12 +644,14 @@ MemChunk *chunk; lastPool = pool; pool->allChunks = splay_splay((const void **)&Free, pool->allChunks, memCompObjChunks); - assert(splayLastResult == 0); + assert(splayLastResult == 0 && "FreeCache item not of this POOL or use after Free corruption"); chunk = pool->allChunks->data; assert(chunk->inuse_count > 0); chunk->inuse_count--; + DEBUG_MPAGE(if ((pool->obj_size & (MEM_PAGE_SIZE-1)) == 0) assert(0 == mprotect(Free, pool->obj_size, PROT_READ | PROT_WRITE))); pool->freeCache = *(void **)Free; /* remove from global cache */ *(void **)Free = chunk->freeList; /* stuff into chunks freelist */ + DEBUG_MPAGE(if ((pool->obj_size & (MEM_PAGE_SIZE-1)) == 0) mprotect(Free, pool->obj_size, PROT_READ)); chunk->freeList = Free; chunk->lastref = squid_curtime; }