/* * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- */ /* HACK... */ #ifdef MAP_ANON #include "sm/generic.h" SM_RCSID("@(#)$Id: malloc.c,v 1.7 2002/06/01 15:31:23 ca Exp $") /* * use this for malloc()/free() with a max. memory size * and some minimum size allocated at the start. * this can be used as "backup" memory that is there in an emergency * situation. * * todo: put the global variables into a struct * write open(), close() functions, change all function names, * check locking. * check constants (page size etc) */ /* * Defining MALLOC_EXTRA_SANITY will enable extra checks which are * related to internal conditions and consistency in malloc.c. This has * a noticeable runtime performance hit, and generally will not do you * any good unless you fiddle with the internals of malloc or want * to catch random pointer corruption as early as possible. */ #ifndef MALLOC_EXTRA_SANITY #define MALLOC_EXTRA_SANITY 0 #endif /* * Defining MALLOC_STATS will enable you to call malloc_dump() and set * the [dD] options in the MALLOC_OPTIONS environment variable. * It has no run-time performance hit, but does pull in stdio... */ #ifndef MALLOC_STATS #define MALLOC_STATS 0 #endif /* * What to use for Junk. This is the byte value we use to fill with * when the 'J' option is enabled. */ #define SOME_JUNK 0xd0 /* as in "Duh" :-) */ #include "sm/types.h" #include "sm/param.h" #include "sm/mman.h" #include "sm/io.h" #include "sm/string.h" #include "sm/fcntl.h" #include "sm/error.h" #define iovec sm_iov #include /* * The basic parameters you can tweak. * * malloc_pageshift pagesize = 1 << malloc_pageshift * It's probably best if this is the native * page size, but it shouldn't have to be. * * malloc_minsize minimum size of an allocation in bytes. * If this is too small it's too much work * to manage them. This is also the smallest * unit of alignment used for the storage * returned by malloc/realloc. * */ #if defined(__i386__) && defined(__FreeBSD__) # define malloc_pageshift 12U # define malloc_minsize 16U #endif /* __i386__ && __FreeBSD__ */ #if defined(__sparc__) && !defined(__OpenBSD__) # define malloc_pageshift 12U # define malloc_minsize 16U # define MAP_ANON (0) # define USE_DEV_ZERO # define MADV_FREE MADV_DONTNEED #endif /* __sparc__ */ /* Insert your combination here... */ #if defined(__FOOCPU__) && defined(__BAROS__) # define malloc_pageshift 12U # define malloc_minsize 16U #endif /* __FOOCPU__ && __BAROS__ */ #if defined(__OpenBSD__) && !defined(__sparc__) # define malloc_pageshift (PGSHIFT) # define malloc_minsize 16U #endif /* __OpenBSD__ */ #ifdef _THREAD_SAFE # include "thread_private.h" # if 0 /* kernel threads */ # include static pthread_mutex_t malloc_lock; # define THREAD_LOCK() pthread_mutex_lock(&malloc_lock) # define THREAD_UNLOCK() pthread_mutex_unlock(&malloc_lock) # define THREAD_LOCK_INIT() pthread_mutex_init(&malloc_lock, 0); # else /* user threads */ # include "spinlock.h" static spinlock_t malloc_lock = _SPINLOCK_INITIALIZER; # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&malloc_lock) # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&malloc_lock) # define THREAD_LOCK_INIT() /* * Malloc can't use the wrapped write() if it fails very early, so * we use the unwrapped syscall _thread_sys_write() */ # define write _thread_sys_write ssize_t write(int, const void *, size_t); # undef malloc # undef realloc # undef free # endif #else /* no threads */ # define THREAD_LOCK() # define THREAD_UNLOCK() # define THREAD_LOCK_INIT() #endif /* * No user serviceable parts behind this point. * * This structure describes a page worth of chunks. */ struct pginfo { struct pginfo *next; /* next on the free list */ void *page; /* Pointer to the page */ u_short size; /* size of this page's chunks */ u_short shift; /* How far to shift for this size chunks */ u_short free; /* How many free chunks */ u_short total; /* How many chunk */ u_long bits[1]; /* Which chunks are free */ }; /* * This structure describes a number of free pages. */ struct pgfree { struct pgfree *next; /* next run of free pages */ struct pgfree *prev; /* prev run of free pages */ void *page; /* pointer to free pages */ void *end; /* pointer to end of free pages */ u_long size; /* number of bytes free */ }; /* * How many bits per u_long in the bitmap. * Change only if not 8 bits/byte */ #define MALLOC_BITS (8*sizeof(u_long)) /* * Magic values to put in the page_directory */ #define MALLOC_NOT_MINE ((struct pginfo*) 0) #define MALLOC_FREE ((struct pginfo*) 1) #define MALLOC_FIRST ((struct pginfo*) 2) #define MALLOC_FOLLOW ((struct pginfo*) 3) #define MALLOC_MAGIC ((struct pginfo*) 4) #ifndef malloc_pageshift #define malloc_pageshift 12U #endif #ifndef malloc_minsize #define malloc_minsize 16U #endif #ifndef malloc_pageshift #error "malloc_pageshift undefined" #endif #if !defined(malloc_pagesize) #define malloc_pagesize (1UL<>1) #endif /* A mask for the offset inside a page. */ #define malloc_pagemask ((malloc_pagesize)-1) #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask))) #define ptr2index(fxm, foo) (((u_long)(foo) >> malloc_pageshift)-fxm->malloc_origo) /* fd of /dev/zero */ #ifdef USE_DEV_ZERO static int fdzero; #define MMAP_FD fdzero #define INIT_MMAP() \ { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \ wrterror(fxm, "open of /dev/zero"); } #else #define MMAP_FD (-1) #define INIT_MMAP() #endif #ifdef __FreeBSD__ /* utrace ? */ struct ut { void *p; size_t s; void *r; }; void utrace(struct ut *, int); #define UTRACE(fxm, a, b, c) \ if ((fxm)->malloc_utrace) \ {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);} #else /* !__FreeBSD__ */ #define UTRACE(s,a,b,c) #endif struct fxmalloc_S { /* Set when initialization has been done */ unsigned malloc_started; /* Number of free pages we cache */ unsigned malloc_cache /* = 16 */; /* The offset from pagenumber to index into the page directory */ u_long malloc_origo; /* The last index in the page directory we care about */ u_long last_index; /* Pointer to page directory. Allocated "as if with" malloc */ struct pginfo **page_dir; /* How many slots in the page directory */ size_t malloc_ninfo; /* Free pages line up here */ struct pgfree free_list; /* Abort(), user doesn't handle problems. */ int malloc_abort; /* Are we trying to die ? */ int suicide; #if MALLOC_STATS /* dump statistics */ int malloc_stats; #endif /* MALLOC_STATS */ /* avoid outputting warnings? */ int malloc_silent; /* always realloc ? */ int malloc_realloc; #ifdef __FreeBSD__ /* pass the kernel a hint on free pages ? */ int malloc_hint; #endif /* xmalloc behaviour ? */ int malloc_xmalloc; /* zero fill ? */ int malloc_zero; /* junk fill ? */ int malloc_junk; #ifdef __FreeBSD__ /* utrace ? */ int malloc_utrace; #endif /* my last break. */ void *malloc_brk; /* one location cache for free-list holders */ struct pgfree *px; /* compile-time options */ char *malloc_options; /* Name of the current public function */ char *malloc_func; int malloc_active; }; typedef struct fxmalloc_S fxmalloc_T; typedef struct fxmalloc_S *fxmalloc_P; /* Macro for mmap */ #define MMAP(size) \ mmap((void *)0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ MMAP_FD, (off_t)0); /* * Necessary function declarations */ static int extend_pgdir(fxmalloc_P fxm, u_long index); static void *imalloc(fxmalloc_P fxm, size_t size); static void ifree(fxmalloc_P fxm, void *ptr); static void *irealloc(fxmalloc_P fxm, void *ptr, size_t size); static void *malloc_bytes(fxmalloc_P fxm, size_t size); #if MALLOC_STATS void malloc_dump(fxmalloc_P fxm, FILE *fd) { struct pginfo **pd; struct pgfree *pf; int j; pd = fxm->page_dir; /* print out all the pages */ for(j=0;j<=last_index;j++) { fprintf(fd, "%08lx %5d ", (j+malloc_origo) << malloc_pageshift, j); if (pd[j] == MALLOC_NOT_MINE) { for(j++;j<=last_index && pd[j] == MALLOC_NOT_MINE;j++) ; j--; fprintf(fd, ".. %5d not mine\n", j); } else if (pd[j] == MALLOC_FREE) { for(j++;j<=last_index && pd[j] == MALLOC_FREE;j++) ; j--; fprintf(fd, ".. %5d free\n", j); } else if (pd[j] == MALLOC_FIRST) { for(j++;j<=last_index && pd[j] == MALLOC_FOLLOW;j++) ; j--; fprintf(fd, ".. %5d in use\n", j); } else if (pd[j] < MALLOC_MAGIC) { fprintf(fd, "(%p)\n", pd[j]); } else { fprintf(fd, "%p %d (of %d) x %d @ %p --> %p\n", pd[j], pd[j]->free, pd[j]->total, pd[j]->size, pd[j]->page, pd[j]->next); } } for(pf=free_list.next; pf; pf=pf->next) { fprintf(fd, "Free: @%p [%p...%p[ %ld ->%p <-%p\n", pf, pf->page, pf->end, pf->size, pf->prev, pf->next); if (pf == pf->next) { fprintf(fd, "Free_list loops.\n"); break; } } /* print out various info */ fprintf(fd, "Minsize\t%d\n", malloc_minsize); fprintf(fd, "Maxsize\t%d\n", malloc_maxsize); fprintf(fd, "Pagesize\t%lu\n", (u_long)malloc_pagesize); fprintf(fd, "Pageshift\t%d\n", malloc_pageshift); fprintf(fd, "FirstPage\t%ld\n", malloc_origo); fprintf(fd, "LastPage\t%ld %lx\n", last_index+malloc_pageshift, (last_index + malloc_pageshift) << malloc_pageshift); fprintf(fd, "Break\t%ld\n", (u_long)sbrk(0) >> malloc_pageshift); } #endif /* MALLOC_STATS */ extern char *__progname; static void wrterror(fxmalloc_P fxm, char *p) { char *q = " error: "; struct iovec iov[4]; iov[0].iov_base = __progname; iov[0].iov_len = strlen(__progname); iov[1].iov_base = fxm->malloc_func; iov[1].iov_len = strlen(fxm->malloc_func); iov[2].iov_base = q; iov[2].iov_len = strlen(q); iov[3].iov_base = p; iov[3].iov_len = strlen(p); writev(STDERR_FILENO, iov, 4); fxm->suicide = 1; #if MALLOC_STATS if (malloc_stats) malloc_dump(fxm, stderr); #endif /* MALLOC_STATS */ abort(); } static void wrtwarning(fxmalloc_P fxm, char *p) { char *q = " warning: "; struct iovec iov[4]; if (fxm->malloc_abort) wrterror(fxm, p); else if (fxm->malloc_silent) return; iov[0].iov_base = __progname; iov[0].iov_len = strlen(__progname); iov[1].iov_base = fxm->malloc_func; iov[1].iov_len = strlen(fxm->malloc_func); iov[2].iov_base = q; iov[2].iov_len = strlen(q); iov[3].iov_base = p; iov[3].iov_len = strlen(p); writev(STDERR_FILENO, iov, 4); } #if MALLOC_STATS static void malloc_exit() { FILE *fd = fopen("malloc.out", "a"); char *q = "malloc() warning: Couldn't dump stats.\n"; if (fd) { malloc_dump(fd); fclose(fd); } else write(2, q, strlen(q)); } #endif /* MALLOC_STATS */ /* * Allocate a number of pages from the OS */ static void * map_pages(fxmalloc_P fxm, int pages) { caddr_t result, tail; result = (caddr_t)pageround((u_long)sbrk(0)); tail = result + (pages << malloc_pageshift); if (brk(tail)) { #if MALLOC_EXTRA_SANITY wrterror("(ES): map_pages fails\n"); #endif /* MALLOC_EXTRA_SANITY */ return 0; } fxm->last_index = ptr2index(fxm, tail) - 1; fxm->malloc_brk = tail; if ((fxm->last_index+1) >= fxm->malloc_ninfo && !extend_pgdir(fxm, fxm->last_index)) return 0; return result; } /* * Extend page directory */ static int extend_pgdir(fxmalloc_P fxm, u_long index) { struct pginfo **new, **old; size_t i, oldlen; /* Make it this many pages */ i = index * sizeof(*(fxm->page_dir));; i /= malloc_pagesize; i += 2; /* remember the old mapping size */ oldlen = fxm->malloc_ninfo * sizeof(*(fxm->page_dir)); /* * NOTE: we allocate new pages and copy the directory rather than tempt * fate by trying to "grow" the region.. There is nothing to prevent * us from accidently re-mapping space that's been allocated by our caller * via dlopen() or other mmap(). * * The copy problem is not too bad, as there is 4K of page index per * 4MB of malloc arena. * * We can totally avoid the copy if we open a file descriptor to associate * the anon mappings with. Then, when we remap the pages at the new * address, the old pages will be "magically" remapped.. But this means * keeping open a "secret" file descriptor..... */ /* Get new pages */ new = (struct pginfo**) MMAP(i * malloc_pagesize); if (new == (struct pginfo **)-1) return 0; /* Copy the old stuff */ memcpy(new, fxm->page_dir, fxm->malloc_ninfo * sizeof(*(fxm->page_dir))); /* register the new size */ fxm->malloc_ninfo = i * malloc_pagesize / sizeof(*(fxm->page_dir)); /* swap the pointers */ old = fxm->page_dir; fxm->page_dir = new; /* Now free the old stuff */ munmap((void *) old, oldlen); return 1; } /* * Initialize the world */ static void malloc_init(fxmalloc_P fxm) { char *p, b[64]; int i, j; int save_errno = errno; THREAD_LOCK_INIT(); INIT_MMAP(); #if MALLOC_EXTRA_SANITY fxm->malloc_junk = 1; #endif /* MALLOC_EXTRA_SANITY */ for (i = 0; i < 3; i++) { if (i == 0) { j = readlink("/etc/malloc.conf", b, sizeof b - 1); if (j <= 0) continue; b[j] = '\0'; p = b; } else if (i == 1) { if (issetugid() == 0) p = getenv("MALLOC_OPTIONS"); else continue; } else if (i == 2) { p = fxm->malloc_options; } for (; p && *p; p++) { switch (*p) { case '>': fxm->malloc_cache <<= 1; break; case '<': fxm->malloc_cache >>= 1; break; case 'a': fxm->malloc_abort = 0; break; case 'A': fxm->malloc_abort = 1; break; #if MALLOC_STATS case 'd': fxm->malloc_stats = 0; break; case 'D': fxm->malloc_stats = 1; break; #endif /* MALLOC_STATS */ #ifdef __FreeBSD__ case 'h': fxm->malloc_hint = 0; break; case 'H': fxm->malloc_hint = 1; break; #endif /* __FreeBSD__ */ case 'r': fxm->malloc_realloc = 0; break; case 'R': fxm->malloc_realloc = 1; break; case 'j': fxm->malloc_junk = 0; break; case 'J': fxm->malloc_junk = 1; break; case 'n': fxm->malloc_silent = 0; break; case 'N': fxm->malloc_silent = 1; break; #ifdef __FreeBSD__ case 'u': fxm->malloc_utrace = 0; break; case 'U': fxm->malloc_utrace = 1; break; #endif /* __FreeBSD__ */ case 'x': fxm->malloc_xmalloc = 0; break; case 'X': fxm->malloc_xmalloc = 1; break; case 'z': fxm->malloc_zero = 0; break; case 'Z': fxm->malloc_zero = 1; break; default: j = fxm->malloc_abort; fxm->malloc_abort = 0; wrtwarning(fxm, "unknown char in MALLOC_OPTIONS\n"); fxm->malloc_abort = j; break; } } } UTRACE(fxm, 0, 0, 0); /* * We want junk in the entire allocation, and zero only in the part * the user asked for. */ if (fxm->malloc_zero) fxm->malloc_junk=1; #if MALLOC_STATS if (fxm->malloc_stats) atexit(malloc_exit); #endif /* MALLOC_STATS */ /* Allocate one page for the page directory */ fxm->page_dir = (struct pginfo **) MMAP(malloc_pagesize); if (fxm->page_dir == (struct pginfo **) -1) wrterror(fxm, "mmap(2) failed, check limits.\n"); /* * We need a maximum of malloc_pageshift buckets, steal these from the * front of the page_directory; */ fxm->malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift; fxm->malloc_origo -= malloc_pageshift; fxm->malloc_ninfo = malloc_pagesize / sizeof(*(fxm->page_dir)); /* Been here, done that */ fxm->malloc_started++; /* Recalculate the cache size in bytes, and make sure it's nonzero */ if (!fxm->malloc_cache) fxm->malloc_cache++; fxm->malloc_cache <<= malloc_pageshift; /* * This is a nice hack from Kaleb Keithly (kaleb@x.org). * We can sbrk(2) further back when we keep this on a low address. */ fxm->px = (struct pgfree *) imalloc (fxm, sizeof(*(fxm->px))); errno = save_errno; } /* * Allocate a number of complete pages */ static void * malloc_pages(fxmalloc_P fxm, size_t size) { void *p, *delay_free; int i; struct pgfree *pf; u_long index; size = pageround(size); p = 0; delay_free = NULL; /* Look for free pages before asking for more */ for(pf = fxm->free_list.next; pf != NULL; pf = pf->next) { #if MALLOC_EXTRA_SANITY if (pf->size & malloc_pagemask) wrterror(fxm, "(ES): junk length entry on free_list\n"); if (!pf->size) wrterror(fxm, "(ES): zero length entry on free_list\n"); if (pf->page == pf->end) wrterror(fxm, "(ES): zero entry on free_list\n"); if (pf->page > pf->end) wrterror(fxm, "(ES): sick entry on free_list\n"); if ((void*)pf->page >= (void*)sbrk(0)) wrterror(fxm, "(ES): entry on free_list past brk\n"); if (page_dir[ptr2index(pf->page)] != MALLOC_FREE) wrterror(fxm, "(ES): non-free first page on free-list\n"); if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE) wrterror(fxm, "(ES): non-free last page on free-list\n"); #endif /* MALLOC_EXTRA_SANITY */ if (pf->size < size) continue; if (pf->size == size) { p = pf->page; if (pf->next) pf->next->prev = pf->prev; pf->prev->next = pf->next; delay_free = pf; break; } p = pf->page; pf->page = (char *)pf->page + size; pf->size -= size; break; } #if MALLOC_EXTRA_SANITY if (p && page_dir[ptr2index(p)] != MALLOC_FREE) wrterror("(ES): allocated non-free page on free-list\n"); #endif /* MALLOC_EXTRA_SANITY */ size >>= malloc_pageshift; /* Map new pages */ if (!p) p = map_pages(fxm, size); if (p) { index = ptr2index(fxm, p); fxm->page_dir[index] = MALLOC_FIRST; for (i=1;ipage_dir[index+i] = MALLOC_FOLLOW; if (fxm->malloc_junk) memset(p, SOME_JUNK, size << malloc_pageshift); } if (delay_free) { if (!fxm->px) fxm->px = delay_free; else ifree(fxm, delay_free); } return p; } /* * Allocate a page of fragments */ static __inline__ int malloc_make_chunks(fxmalloc_P fxm, int bits) { struct pginfo *bp; void *pp; int i, k, l; /* Allocate a new bucket */ pp = malloc_pages(fxm, (size_t)malloc_pagesize); if (!pp) return 0; /* Find length of admin structure */ l = sizeof *bp - sizeof(u_long); l += sizeof(u_long) * (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS); /* Don't waste more than two chunks on this */ if ((1UL<<(bits)) <= l+l) { bp = (struct pginfo *)pp; } else { bp = (struct pginfo *)imalloc(fxm, l); if (!bp) { ifree(fxm, pp); return 0; } } bp->size = (1UL<shift = bits; bp->total = bp->free = malloc_pagesize >> bits; bp->page = pp; /* set all valid bits in the bitmap */ k = bp->total; i = 0; /* Do a bunch at a time */ for(;k-i >= MALLOC_BITS; i += MALLOC_BITS) bp->bits[i / MALLOC_BITS] = ~0UL; for(; i < k; i++) bp->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); if (bp == bp->page) { /* Mark the ones we stole for ourselves */ for(i=0;l > 0;i++) { bp->bits[i/MALLOC_BITS] &= ~(1UL<<(i%MALLOC_BITS)); bp->free--; bp->total--; l -= (1 << bits); } } /* MALLOC_LOCK */ fxm->page_dir[ptr2index(fxm, pp)] = bp; bp->next = fxm->page_dir[bits]; fxm->page_dir[bits] = bp; /* MALLOC_UNLOCK */ return 1; } /* * Allocate a fragment */ static void * malloc_bytes(fxmalloc_P fxm, size_t size) { int i,j; u_long u; struct pginfo *bp; int k; u_long *lp; /* Don't bother with anything less than this */ if (size < malloc_minsize) size = malloc_minsize; /* Find the right bucket */ j = 1; i = size-1; while (i >>= 1) j++; /* If it's empty, make a page more of that size chunks */ if (!fxm->page_dir[j] && !malloc_make_chunks(fxm, j)) return 0; bp = fxm->page_dir[j]; /* Find first word of bitmap which isn't empty */ for (lp = bp->bits; !*lp; lp++) ; /* Find that bit, and tweak it */ u = 1; k = 0; while (!(*lp & u)) { u += u; k++; } *lp ^= u; /* If there are no more free, remove from free-list */ if (!--bp->free) { fxm->page_dir[j] = bp->next; bp->next = 0; } /* Adjust to the real offset of that chunk */ k += (lp-bp->bits)*MALLOC_BITS; k <<= bp->shift; if (fxm->malloc_junk) memset((char *)bp->page + k, SOME_JUNK, bp->size); return (u_char *)bp->page + k; } /* * Allocate a piece of memory */ static void * imalloc(fxmalloc_P fxm, size_t size) { void *result; if (!fxm->malloc_started) malloc_init(fxm); if (fxm->suicide) abort(); if ((size + malloc_pagesize) < size) /* Check for overflow */ result = 0; else if (size <= malloc_maxsize) result = malloc_bytes(fxm, size); else result = malloc_pages(fxm, size); if (fxm->malloc_abort && !result) wrterror(fxm, "allocation failed.\n"); if (fxm->malloc_zero && result) memset(result, 0, size); return result; } /* * Change the size of an allocation. */ static void * irealloc(fxmalloc_P fxm, void *ptr, size_t size) { void *p; u_long osize, index; struct pginfo **mp; int i; if (fxm->suicide) abort(); if (!fxm->malloc_started) { wrtwarning(fxm, "malloc() has never been called.\n"); return 0; } index = ptr2index(fxm, ptr); if (index < malloc_pageshift) { wrtwarning(fxm, "junk pointer, too low to make sense.\n"); return 0; } if (index > fxm->last_index) { wrtwarning(fxm, "junk pointer, too high to make sense.\n"); return 0; } mp = &fxm->page_dir[index]; if (*mp == MALLOC_FIRST) { /* Page allocation */ /* Check the pointer */ if ((u_long)ptr & malloc_pagemask) { wrtwarning(fxm, "modified (page-) pointer.\n"); return 0; } /* Find the size in bytes */ for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;) osize += malloc_pagesize; if (!fxm->malloc_realloc && /* unless we have to, */ size <= osize && /* .. or are too small, */ size > (osize - malloc_pagesize)) { /* .. or can free a page, */ return ptr; /* don't do anything. */ } } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */ /* Check the pointer for sane values */ if (((u_long)ptr & ((*mp)->size-1))) { wrtwarning(fxm, "modified (chunk-) pointer.\n"); return 0; } /* Find the chunk index in the page */ i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift; /* Verify that it isn't a free chunk already */ if ((*mp)->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { wrtwarning(fxm, "chunk is already free.\n"); return 0; } osize = (*mp)->size; if (!fxm->malloc_realloc && /* Unless we have to, */ size < osize && /* ..or are too small, */ (size > osize/2 || /* ..or could use a smaller size, */ osize == malloc_minsize)) { /* ..(if there is one) */ return ptr; /* ..Don't do anything */ } } else { wrtwarning(fxm, "pointer to wrong page.\n"); return 0; } p = imalloc(fxm, size); if (p) { /* copy the lesser of the two sizes, and free the old one */ if (osize < size) memcpy(p, ptr, osize); else memcpy(p, ptr, size); ifree(fxm, ptr); } return p; } /* * Free a sequence of pages */ static __inline__ void free_pages(fxmalloc_P fxm, void *ptr, int index, struct pginfo *info) { int i; struct pgfree *pf, *pt=0; u_long l; void *tail; if (info == MALLOC_FREE) { wrtwarning(fxm, "page is already free.\n"); return; } if (info != MALLOC_FIRST) { wrtwarning(fxm, "pointer to wrong page.\n"); return; } if ((u_long)ptr & malloc_pagemask) { wrtwarning(fxm, "modified (page-) pointer.\n"); return; } /* Count how many pages and mark them free at the same time */ fxm->page_dir[index] = MALLOC_FREE; for (i = 1; fxm->page_dir[index+i] == MALLOC_FOLLOW; i++) fxm->page_dir[index + i] = MALLOC_FREE; l = i << malloc_pageshift; if (fxm->malloc_junk) memset(ptr, SOME_JUNK, l); #ifdef __FreeBSD__ if (fxm->malloc_hint) madvise(ptr, l, MADV_FREE); #endif tail = (char *)ptr+l; /* add to free-list */ if (!fxm->px) fxm->px = imalloc(fxm, sizeof(*(fxm->px))); /* This cannot fail... */ fxm->px->page = ptr; fxm->px->end = tail; fxm->px->size = l; if (!fxm->free_list.next) { /* Nothing on free list, put this at head */ fxm->px->next = fxm->free_list.next; fxm->px->prev = &fxm->free_list; fxm->free_list.next = fxm->px; pf = fxm->px; fxm->px = 0; } else { /* Find the right spot, leave pf pointing to the modified entry. */ tail = (char *)ptr+l; for(pf = fxm->free_list.next; pf->end < ptr && pf->next; pf = pf->next) ; /* Race ahead here */ if (pf->page > tail) { /* Insert before entry */ fxm->px->next = pf; fxm->px->prev = pf->prev; pf->prev = fxm->px; fxm->px->prev->next = fxm->px; pf = fxm->px; fxm->px = 0; } else if (pf->end == ptr ) { /* Append to the previous entry */ pf->end = (char *)pf->end + l; pf->size += l; if (pf->next && pf->end == pf->next->page ) { /* And collapse the next too. */ pt = pf->next; pf->end = pt->end; pf->size += pt->size; pf->next = pt->next; if (pf->next) pf->next->prev = pf; } } else if (pf->page == tail) { /* Prepend to entry */ pf->size += l; pf->page = ptr; } else if (!pf->next) { /* Append at tail of chain */ fxm->px->next = 0; fxm->px->prev = pf; pf->next = fxm->px; pf = fxm->px; fxm->px = 0; } else { wrterror(fxm, "freelist is destroyed.\n"); } } /* Return something to OS ? */ if (!pf->next && /* If we're the last one, */ pf->size > fxm->malloc_cache && /* ..and the cache is full, */ pf->end == fxm->malloc_brk && /* ..and none behind us, */ fxm->malloc_brk == sbrk(0)) { /* ..and it's OK to do... */ /* * Keep the cache intact. Notice that the '>' above guarantees that * the pf will always have at least one page afterwards. */ pf->end = (char *)pf->page + fxm->malloc_cache; pf->size = fxm->malloc_cache; brk(pf->end); fxm->malloc_brk = pf->end; index = ptr2index(fxm, pf->end); fxm->last_index = index - 1; for(i=index;i <= fxm->last_index;) fxm->page_dir[i++] = MALLOC_NOT_MINE; /* XXX: We could realloc/shrink the pagedir here I guess. */ } if (pt) ifree(fxm, pt); } /* * Free a chunk, and possibly the page it's on, if the page becomes empty. */ /* ARGSUSED */ static __inline__ void free_bytes(fxmalloc_P fxm, void *ptr, int index, struct pginfo *info) { int i; struct pginfo **mp; void *vp; /* Find the chunk number on the page */ i = ((u_long)ptr & malloc_pagemask) >> info->shift; if (((u_long)ptr & (info->size-1))) { wrtwarning(fxm, "modified (chunk-) pointer.\n"); return; } if (info->bits[i/MALLOC_BITS] & (1UL<<(i%MALLOC_BITS))) { wrtwarning(fxm, "chunk is already free.\n"); return; } if (fxm->malloc_junk) memset(ptr, SOME_JUNK, info->size); info->bits[i/MALLOC_BITS] |= 1UL<<(i%MALLOC_BITS); info->free++; mp = fxm->page_dir + info->shift; if (info->free == 1) { /* Page became non-full */ mp = fxm->page_dir + info->shift; /* Insert in address order */ while (*mp && (*mp)->next && (*mp)->next->page < info->page) mp = &(*mp)->next; info->next = *mp; *mp = info; return; } if (info->free != info->total) return; /* Find & remove this page in the queue */ while (*mp != info) { mp = &((*mp)->next); #if MALLOC_EXTRA_SANITY if (!*mp) wrterror(fxm, "(ES): Not on queue\n"); #endif /* MALLOC_EXTRA_SANITY */ } *mp = info->next; /* Free the page & the info structure if need be */ fxm->page_dir[ptr2index(fxm, info->page)] = MALLOC_FIRST; vp = info->page; /* Order is important ! */ if(vp != (void*)info) ifree(fxm, info); ifree(fxm, vp); } static void ifree(fxmalloc_P fxm, void *ptr) { struct pginfo *info; int index; /* This is legal */ if (!ptr) return; if (!fxm->malloc_started) { wrtwarning(fxm, "malloc() has never been called.\n"); return; } /* If we're already sinking, don't make matters any worse. */ if (fxm->suicide) return; index = ptr2index(fxm, ptr); if (index < malloc_pageshift) { wrtwarning(fxm, "junk pointer, too low to make sense.\n"); return; } if (index > fxm->last_index) { wrtwarning(fxm, "junk pointer, too high to make sense.\n"); return; } info = fxm->page_dir[index]; if (info < MALLOC_MAGIC) free_pages(fxm, ptr, index, info); else free_bytes(fxm, ptr, index, info); return; } /* * These are the public exported interface routines. */ void * fxmalloc(fxmalloc_P fxm, size_t size) { void *r; fxm->malloc_func = " in malloc():"; THREAD_LOCK(); if (fxm->malloc_active++) { wrtwarning(fxm, "recursive call.\n"); fxm->malloc_active--; return (0); } r = imalloc(fxm, size); UTRACE(fxm, 0, size, r); fxm->malloc_active--; THREAD_UNLOCK(); if (fxm->malloc_xmalloc && !r) wrterror(fxm, "out of memory.\n"); return (r); } void fxfree(fxmalloc_P fxm, void *ptr) { fxm->malloc_func = " in free():"; THREAD_LOCK(); if (fxm->malloc_active++) { wrtwarning(fxm, "recursive call.\n"); fxm->malloc_active--; THREAD_UNLOCK(); return; } ifree(fxm, ptr); UTRACE(fxm, ptr, 0, 0); fxm->malloc_active--; THREAD_UNLOCK(); return; } void * fxrealloc(fxmalloc_P fxm, void *ptr, size_t size) { void *r; fxm->malloc_func = " in realloc():"; THREAD_LOCK(); if (fxm->malloc_active++) { wrtwarning(fxm, "recursive call.\n"); fxm->malloc_active--; return (0); } if (!ptr) { r = imalloc(fxm, size); } else { r = irealloc(fxm, ptr, size); } UTRACE(fxm, ptr, size, r); fxm->malloc_active--; THREAD_UNLOCK(); if (fxm->malloc_xmalloc && !r) wrterror(fxm, "out of memory.\n"); return (r); } #endif /* MAP_ANON */