xref: /minix/lib/libc/stdlib/malloc.c (revision dda632a2)
1 /*	$NetBSD: malloc.c,v 1.54 2011/05/18 01:59:39 christos Exp $	*/
2 
3 /*
4  * ----------------------------------------------------------------------------
5  * "THE BEER-WARE LICENSE" (Revision 42):
6  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
7  * can do whatever you want with this stuff. If we meet some day, and you think
8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
9  * ----------------------------------------------------------------------------
10  *
11  * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12  *
13  */
14 
15 #ifdef __minix
16 #ifdef _LIBSYS
17 #include <minix/sysutil.h>
18 #include <machine/vmparam.h>
19 #define MALLOC_NO_SYSCALLS
20 #define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
21 #define wrterror(w) panic("libminc malloc error: %s\n", w)
22 #endif
23 #endif
24 
25 /*
26  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
27  * to internal conditions and consistency in malloc.c. This has a
28  * noticeable runtime performance hit, and generally will not do you
29  * any good unless you fiddle with the internals of malloc or want
30  * to catch random pointer corruption as early as possible.
31  */
32 #ifndef MALLOC_EXTRA_SANITY
33 #undef MALLOC_EXTRA_SANITY
34 #endif
35 
36 /*
37  * What to use for Junk.  This is the byte value we use to fill with
38  * when the 'J' option is enabled.
39  */
40 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
41 
42 /*
43  * The basic parameters you can tweak.
44  *
45  * malloc_minsize	minimum size of an allocation in bytes.
46  *			If this is too small it's too much work
47  *			to manage them.  This is also the smallest
48  *			unit of alignment used for the storage
49  *			returned by malloc/realloc.
50  *
51  */
52 
53 #include "namespace.h"
54 #if defined(__FreeBSD__)
55 #   if defined(__i386__)
56 #       define malloc_minsize		16U
57 #   endif
58 #   if defined(__ia64__)
59 #	define malloc_pageshift		13U
60 #	define malloc_minsize		16U
61 #   endif
62 #   if defined(__alpha__)
63 #       define malloc_pageshift		13U
64 #       define malloc_minsize		16U
65 #   endif
66 #   if defined(__sparc64__)
67 #       define malloc_pageshift		13U
68 #       define malloc_minsize		16U
69 #   endif
70 #   if defined(__amd64__)
71 #       define malloc_pageshift		12U
72 #       define malloc_minsize		16U
73 #   endif
74 #   if defined(__arm__)
75 #       define malloc_pageshift         12U
76 #       define malloc_minsize           16U
77 #   endif
78 #   define HAS_UTRACE
79 #   define UTRACE_LABEL
80 
81 #include <sys/cdefs.h>
82 void utrace(struct ut *, int);
83 
84     /*
85      * Make malloc/free/realloc thread-safe in libc for use with
86      * kernel threads.
87      */
88 #   include "libc_private.h"
89 #   include "spinlock.h"
90     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
91 #   define _MALLOC_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
92 #   define _MALLOC_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
93 #endif /* __FreeBSD__ */
94 
95 /* #undef these things so that malloc uses the non-internal symbols.
96  * This is necessary for VM to be able to define its own versions, and
97  * use this malloc.
98  */
99 #undef mmap
100 #undef munmap
101 
102 #include <sys/types.h>
103 #if defined(__NetBSD__)
104 #   define malloc_minsize               16U
105 #   define HAS_UTRACE
106 #   define UTRACE_LABEL "malloc",
107 #include <sys/cdefs.h>
108 #include "extern.h"
109 #if defined(LIBC_SCCS) && !defined(lint)
110 __RCSID("$NetBSD: malloc.c,v 1.54 2011/05/18 01:59:39 christos Exp $");
111 #endif /* LIBC_SCCS and not lint */
112 int utrace(const char *, void *, size_t);
113 
114 #include <reentrant.h>
115 extern int __isthreaded;
116 static mutex_t thread_lock = MUTEX_INITIALIZER;
117 #define _MALLOC_LOCK()	if (__isthreaded) mutex_lock(&thread_lock);
118 #define _MALLOC_UNLOCK()	if (__isthreaded) mutex_unlock(&thread_lock);
119 #endif /* __NetBSD__ */
120 
121 #if defined(__sparc__) && defined(sun)
122 #   define malloc_minsize		16U
123 #   define MAP_ANON			(0)
124     static int fdzero;
125 #   define MMAP_FD	fdzero
126 #   define INIT_MMAP() \
127 	{ if ((fdzero = open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
128 	    wrterror("open of /dev/zero"); }
129 #endif /* __sparc__ */
130 
131 /* Insert your combination here... */
132 #if defined(__FOOCPU__) && defined(__BAROS__)
133 #   define malloc_minsize		16U
134 #endif /* __FOOCPU__ && __BAROS__ */
135 
136 #ifndef ZEROSIZEPTR
137 #define ZEROSIZEPTR	((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
138 #endif
139 
140 /*
141  * No user serviceable parts behind this point.
142  */
143 #include <sys/types.h>
144 #include <sys/mman.h>
145 #include <errno.h>
146 #include <fcntl.h>
147 #include <paths.h>
148 #include <stddef.h>
149 #include <stdio.h>
150 #include <stdlib.h>
151 #include <string.h>
152 #include <unistd.h>
153 
154 /*
155  * This structure describes a page worth of chunks.
156  */
157 
158 struct pginfo {
159     struct pginfo	*next;	/* next on the free list */
160     void		*page;	/* Pointer to the page */
161     u_short		size;	/* size of this page's chunks */
162     u_short		shift;	/* How far to shift for this size chunks */
163     u_short		free;	/* How many free chunks */
164     u_short		total;	/* How many chunk */
165     u_int		bits[1]; /* Which chunks are free */
166 };
167 
168 /*
169  * This structure describes a number of free pages.
170  */
171 
172 struct pgfree {
173     struct pgfree	*next;	/* next run of free pages */
174     struct pgfree	*prev;	/* prev run of free pages */
175     void		*page;	/* pointer to free pages */
176     void		*end;	/* pointer to end of free pages */
177     size_t		size;	/* number of bytes free */
178 };
179 
180 /*
181  * How many bits per u_int in the bitmap.
182  * Change only if not 8 bits/byte
183  */
184 #define	MALLOC_BITS	((int)(8*sizeof(u_int)))
185 
186 /*
187  * Magic values to put in the page_directory
188  */
189 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
190 #define MALLOC_FREE 	((struct pginfo*) 1)
191 #define MALLOC_FIRST	((struct pginfo*) 2)
192 #define MALLOC_FOLLOW	((struct pginfo*) 3)
193 #define MALLOC_MAGIC	((struct pginfo*) 4)
194 
195 /*
196  * Page size related parameters, computed at run-time.
197  */
198 static size_t malloc_pagesize;
199 static size_t malloc_pageshift;
200 static size_t malloc_pagemask;
201 
202 #ifndef malloc_minsize
203 #define malloc_minsize			16U
204 #endif
205 
206 #ifndef malloc_maxsize
207 #define malloc_maxsize			((malloc_pagesize)>>1)
208 #endif
209 
210 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
211 #define ptr2idx(foo) \
212     (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
213 
214 #ifndef _MALLOC_LOCK
215 #define _MALLOC_LOCK()
216 #endif
217 
218 #ifndef _MALLOC_UNLOCK
219 #define _MALLOC_UNLOCK()
220 #endif
221 
222 #ifndef MMAP_FD
223 #define MMAP_FD (-1)
224 #endif
225 
226 #ifndef INIT_MMAP
227 #define INIT_MMAP()
228 #endif
229 
230 #ifndef __minix
231 #ifndef MADV_FREE
232 #define MADV_FREE MADV_DONTNEED
233 #endif
234 #endif /* !__minix */
235 
236 /* Number of free pages we cache */
237 static size_t malloc_cache = 16;
238 
239 /* The offset from pagenumber to index into the page directory */
240 static size_t malloc_origo;
241 
242 /* The last index in the page directory we care about */
243 static size_t last_idx;
244 
245 /* Pointer to page directory. Allocated "as if with" malloc */
246 static struct	pginfo **page_dir;
247 
248 /* How many slots in the page directory */
249 static size_t	malloc_ninfo;
250 
251 /* Free pages line up here */
252 static struct pgfree free_list;
253 
254 /* Abort(), user doesn't handle problems.  */
255 static int malloc_abort;
256 
257 /* Are we trying to die ?  */
258 static int suicide;
259 
260 /* always realloc ?  */
261 static int malloc_realloc;
262 
263 /* pass the kernel a hint on free pages ?  */
264 #if defined(MADV_FREE)
265 static int malloc_hint = 0;
266 #endif
267 
268 /* xmalloc behaviour ?  */
269 static int malloc_xmalloc;
270 
271 /* sysv behaviour for malloc(0) ?  */
272 static int malloc_sysv;
273 
274 /* zero fill ?  */
275 static int malloc_zero;
276 
277 /* junk fill ?  */
278 static int malloc_junk;
279 
280 #ifdef HAS_UTRACE
281 
282 /* utrace ?  */
283 static int malloc_utrace;
284 
285 struct ut { void *p; size_t s; void *r; };
286 
287 #define UTRACE(a, b, c) \
288 	if (malloc_utrace) {			\
289 		struct ut u;			\
290 		u.p=a; u.s = b; u.r=c;		\
291 		utrace(UTRACE_LABEL (void *) &u, sizeof u);	\
292 	}
293 #else /* !HAS_UTRACE */
294 #define UTRACE(a,b,c)
295 #endif /* HAS_UTRACE */
296 
297 /* my last break. */
298 static void *malloc_brk;
299 
300 /* one location cache for free-list holders */
301 static struct pgfree *px;
302 
303 /* compile-time options */
304 const char *_malloc_options;
305 
306 /* Name of the current public function */
307 static const char *malloc_func;
308 
309 /* Macro for mmap */
310 #define MMAP(size) \
311 	mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
312 	    MMAP_FD, (off_t)0);
313 
314 /*
315  * Necessary function declarations
316  */
317 static int extend_pgdir(size_t idx);
318 static void *imalloc(size_t size);
319 static void ifree(void *ptr);
320 static void *irealloc(void *ptr, size_t size);
321 
322 #ifndef MALLOC_NO_SYSCALLS
323 static void
324 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
325 {
326 
327     write(STDERR_FILENO, p1, strlen(p1));
328     write(STDERR_FILENO, p2, strlen(p2));
329     write(STDERR_FILENO, p3, strlen(p3));
330     write(STDERR_FILENO, p4, strlen(p4));
331 }
332 
333 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
334 	    const char *p4) = wrtmessage;
335 static void
336 wrterror(const char *p)
337 {
338 
339     suicide = 1;
340     _malloc_message(getprogname(), malloc_func, " error: ", p);
341     abort();
342 }
343 
344 static void
345 wrtwarning(const char *p)
346 {
347 
348     /*
349      * Sensitive processes, somewhat arbitrarily defined here as setuid,
350      * setgid, root and wheel cannot afford to have malloc mistakes.
351      */
352     if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
353 	wrterror(p);
354 }
355 #endif
356 
357 /*
358  * Allocate a number of pages from the OS
359  */
360 static void *
361 map_pages(size_t pages)
362 {
363     caddr_t result, rresult, tail;
364     intptr_t bytes = pages << malloc_pageshift;
365 
366     if (bytes < 0 || (size_t)bytes < pages) {
367 	errno = ENOMEM;
368 	return NULL;
369     }
370 
371     if ((result = sbrk(bytes)) == (void *)-1)
372 	return NULL;
373 
374     /*
375      * Round to a page, in case sbrk(2) did not do this for us
376      */
377     rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
378     if (result < rresult) {
379 	/* make sure we have enough space to fit bytes */
380 	if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
381 	    /* we failed, put everything back */
382 	    if (brk(result)) {
383 		wrterror("brk(2) failed [internal error]\n");
384 	    }
385 	}
386     }
387     tail = rresult + (size_t)bytes;
388 
389     last_idx = ptr2idx(tail) - 1;
390     malloc_brk = tail;
391 
392     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
393 	malloc_brk = result;
394 	last_idx = ptr2idx(malloc_brk) - 1;
395 	/* Put back break point since we failed. */
396 	if (brk(malloc_brk))
397 	    wrterror("brk(2) failed [internal error]\n");
398 	return 0;
399     }
400 
401     return rresult;
402 }
403 
404 /*
405  * Extend page directory
406  */
407 static int
408 extend_pgdir(size_t idx)
409 {
410     struct  pginfo **new, **old;
411     size_t newlen, oldlen;
412 
413     /* check for overflow */
414     if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
415 	+ (malloc_pagesize / sizeof *page_dir)) < idx) {
416 	errno = ENOMEM;
417 	return 0;
418     }
419 
420     /* Make it this many pages */
421     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
422 
423     /* remember the old mapping size */
424     oldlen = malloc_ninfo * sizeof *page_dir;
425 
426     /*
427      * NOTE: we allocate new pages and copy the directory rather than tempt
428      * fate by trying to "grow" the region.. There is nothing to prevent
429      * us from accidentally re-mapping space that's been allocated by our caller
430      * via dlopen() or other mmap().
431      *
432      * The copy problem is not too bad, as there is 4K of page index per
433      * 4MB of malloc arena.
434      *
435      * We can totally avoid the copy if we open a file descriptor to associate
436      * the anon mappings with.  Then, when we remap the pages at the new
437      * address, the old pages will be "magically" remapped..  But this means
438      * keeping open a "secret" file descriptor.....
439      */
440 
441     /* Get new pages */
442     new = MMAP(newlen);
443     if (new == MAP_FAILED)
444 	return 0;
445 
446     /* Copy the old stuff */
447     memcpy(new, page_dir, oldlen);
448 
449     /* register the new size */
450     malloc_ninfo = newlen / sizeof *page_dir;
451 
452     /* swap the pointers */
453     old = page_dir;
454     page_dir = new;
455 
456     /* Now free the old stuff */
457     munmap(old, oldlen);
458     return 1;
459 }
460 
461 /*
462  * Initialize the world
463  */
464 static void
465 malloc_init(void)
466 {
467 #ifndef MALLOC_NO_SYSCALLS
468     const char *p;
469     char b[64];
470     size_t i;
471     ssize_t j;
472 #endif
473     int serrno = errno;
474 #ifndef MALLOC_NO_SYSCALLS
475 
476     /*
477      * Compute page-size related variables.
478      */
479     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
480 #else
481     malloc_pagesize = PAGE_SIZE;
482 #endif
483     malloc_pagemask = malloc_pagesize - 1;
484     for (malloc_pageshift = 0;
485 	 (1UL << malloc_pageshift) != malloc_pagesize;
486 	 malloc_pageshift++)
487 	/* nothing */ ;
488 
489     INIT_MMAP();
490 
491 #ifdef MALLOC_EXTRA_SANITY
492     malloc_junk = 1;
493 #endif /* MALLOC_EXTRA_SANITY */
494 
495 #ifndef MALLOC_NO_SYSCALLS
496     for (i = 0; i < 3; i++) {
497 	if (i == 0) {
498 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
499 	    if (j == -1)
500 		continue;
501 	    b[j] = '\0';
502 	    p = b;
503 	} else if (i == 1 && issetugid() == 0) {
504 	    p = getenv("MALLOC_OPTIONS");
505 	} else if (i == 1) {
506 	    continue;
507 	} else {
508 	    p = _malloc_options;
509 	}
510 	for (; p != NULL && *p != '\0'; p++) {
511 	    switch (*p) {
512 		case '>': malloc_cache   <<= 1; break;
513 		case '<': malloc_cache   >>= 1; break;
514 		case 'a': malloc_abort   = 0; break;
515 		case 'A': malloc_abort   = 1; break;
516 #ifndef __minix
517 		case 'h': malloc_hint    = 0; break;
518 		case 'H': malloc_hint    = 1; break;
519 #endif /* !__minix */
520 		case 'r': malloc_realloc = 0; break;
521 		case 'R': malloc_realloc = 1; break;
522 		case 'j': malloc_junk    = 0; break;
523 		case 'J': malloc_junk    = 1; break;
524 #ifdef HAS_UTRACE
525 		case 'u': malloc_utrace  = 0; break;
526 		case 'U': malloc_utrace  = 1; break;
527 #endif
528 		case 'v': malloc_sysv    = 0; break;
529 		case 'V': malloc_sysv    = 1; break;
530 		case 'x': malloc_xmalloc = 0; break;
531 		case 'X': malloc_xmalloc = 1; break;
532 		case 'z': malloc_zero    = 0; break;
533 		case 'Z': malloc_zero    = 1; break;
534 		default:
535 		    _malloc_message(getprogname(), malloc_func,
536 			 " warning: ", "unknown char in MALLOC_OPTIONS\n");
537 		    break;
538 	    }
539 	}
540     }
541 #endif
542 
543     UTRACE(0, 0, 0);
544 
545     /*
546      * We want junk in the entire allocation, and zero only in the part
547      * the user asked for.
548      */
549     if (malloc_zero)
550 	malloc_junk = 1;
551 
552     /* Allocate one page for the page directory */
553     page_dir = MMAP(malloc_pagesize);
554 
555     if (page_dir == MAP_FAILED)
556 	wrterror("mmap(2) failed, check limits.\n");
557 
558     /*
559      * We need a maximum of malloc_pageshift buckets, steal these from the
560      * front of the page_directory;
561      */
562     malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
563 	>> malloc_pageshift;
564     malloc_origo -= malloc_pageshift;
565 
566     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
567 
568     /* Recalculate the cache size in bytes, and make sure it's nonzero */
569 
570     if (!malloc_cache)
571 	malloc_cache++;
572 
573     malloc_cache <<= malloc_pageshift;
574 
575     /*
576      * This is a nice hack from Kaleb Keithly (kaleb@x.org).
577      * We can sbrk(2) further back when we keep this on a low address.
578      */
579     px = imalloc(sizeof *px);
580 
581     errno = serrno;
582 }
583 
584 /*
585  * Allocate a number of complete pages
586  */
587 static void *
588 malloc_pages(size_t size)
589 {
590     void *p, *delay_free = NULL;
591     size_t i;
592     struct pgfree *pf;
593     size_t idx;
594 
595     idx = pageround(size);
596     if (idx < size) {
597 	errno = ENOMEM;
598 	return NULL;
599     } else
600 	size = idx;
601 
602     p = NULL;
603 
604     /* Look for free pages before asking for more */
605     for(pf = free_list.next; pf; pf = pf->next) {
606 
607 #ifdef MALLOC_EXTRA_SANITY
608 	if (pf->size & malloc_pagemask)
609 	    wrterror("(ES): junk length entry on free_list.\n");
610 	if (!pf->size)
611 	    wrterror("(ES): zero length entry on free_list.\n");
612 	if (pf->page == pf->end)
613 	    wrterror("(ES): zero entry on free_list.\n");
614 	if (pf->page > pf->end)
615 	    wrterror("(ES): sick entry on free_list.\n");
616 	if ((void*)pf->page >= (void*)sbrk(0))
617 	    wrterror("(ES): entry on free_list past brk.\n");
618 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
619 	    wrterror("(ES): non-free first page on free-list.\n");
620 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
621 	    wrterror("(ES): non-free last page on free-list.\n");
622 #endif /* MALLOC_EXTRA_SANITY */
623 
624 	if (pf->size < size)
625 	    continue;
626 
627 	if (pf->size == size) {
628 	    p = pf->page;
629 	    if (pf->next != NULL)
630 		    pf->next->prev = pf->prev;
631 	    pf->prev->next = pf->next;
632 	    delay_free = pf;
633 	    break;
634 	}
635 
636 	p = pf->page;
637 	pf->page = (char *)pf->page + size;
638 	pf->size -= size;
639 	break;
640     }
641 
642 #ifdef MALLOC_EXTRA_SANITY
643     if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
644 	wrterror("(ES): allocated non-free page on free-list.\n");
645 #endif /* MALLOC_EXTRA_SANITY */
646 
647     size >>= malloc_pageshift;
648 
649     /* Map new pages */
650     if (p == NULL)
651 	p = map_pages(size);
652 
653     if (p != NULL) {
654 
655 	idx = ptr2idx(p);
656 	page_dir[idx] = MALLOC_FIRST;
657 	for (i=1;i<size;i++)
658 	    page_dir[idx+i] = MALLOC_FOLLOW;
659 
660 	if (malloc_junk)
661 	    memset(p, SOME_JUNK, size << malloc_pageshift);
662     }
663 
664     if (delay_free) {
665 	if (px == NULL)
666 	    px = delay_free;
667 	else
668 	    ifree(delay_free);
669     }
670 
671     return p;
672 }
673 
674 /*
675  * Allocate a page of fragments
676  */
677 
678 static inline int
679 malloc_make_chunks(int bits)
680 {
681     struct  pginfo *bp;
682     void *pp;
683     int i, k;
684     long l;
685 
686     /* Allocate a new bucket */
687     pp = malloc_pages(malloc_pagesize);
688     if (pp == NULL)
689 	return 0;
690 
691     /* Find length of admin structure */
692     l = (long)offsetof(struct pginfo, bits[0]);
693     l += (long)sizeof bp->bits[0] *
694 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
695 
696     /* Don't waste more than two chunks on this */
697     if ((1<<(bits)) <= l+l) {
698 	bp = (struct  pginfo *)pp;
699     } else {
700 	bp = imalloc((size_t)l);
701 	if (bp == NULL) {
702 	    ifree(pp);
703 	    return 0;
704 	}
705     }
706 
707     bp->size = (1<<bits);
708     bp->shift = bits;
709     bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
710     bp->page = pp;
711 
712     /* set all valid bits in the bitmap */
713     k = bp->total;
714     i = 0;
715 
716     /* Do a bunch at a time */
717     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
718 	bp->bits[i / MALLOC_BITS] = ~0U;
719 
720     for(; i < k; i++)
721         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
722 
723     if (bp == bp->page) {
724 	/* Mark the ones we stole for ourselves */
725 	for(i = 0; l > 0; i++) {
726 	    bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
727 	    bp->free--;
728 	    bp->total--;
729 	    l -= (long)(1 << bits);
730 	}
731     }
732 
733     /* MALLOC_LOCK */
734 
735     page_dir[ptr2idx(pp)] = bp;
736 
737     bp->next = page_dir[bits];
738     page_dir[bits] = bp;
739 
740     /* MALLOC_UNLOCK */
741 
742     return 1;
743 }
744 
745 /*
746  * Allocate a fragment
747  */
748 static void *
749 malloc_bytes(size_t size)
750 {
751     size_t i;
752     int j;
753     u_int u;
754     struct  pginfo *bp;
755     size_t k;
756     u_int *lp;
757 
758     /* Don't bother with anything less than this */
759     if (size < malloc_minsize)
760 	size = malloc_minsize;
761 
762 
763     /* Find the right bucket */
764     j = 1;
765     i = size-1;
766     while (i >>= 1)
767 	j++;
768 
769     /* If it's empty, make a page more of that size chunks */
770     if (page_dir[j] == NULL && !malloc_make_chunks(j))
771 	return NULL;
772 
773     bp = page_dir[j];
774 
775     /* Find first word of bitmap which isn't empty */
776     for (lp = bp->bits; !*lp; lp++)
777 	;
778 
779     /* Find that bit, and tweak it */
780     u = 1;
781     k = 0;
782     while (!(*lp & u)) {
783 	u += u;
784 	k++;
785     }
786     *lp ^= u;
787 
788     /* If there are no more free, remove from free-list */
789     if (!--bp->free) {
790 	page_dir[j] = bp->next;
791 	bp->next = NULL;
792     }
793 
794     /* Adjust to the real offset of that chunk */
795     k += (lp-bp->bits)*MALLOC_BITS;
796     k <<= bp->shift;
797 
798     if (malloc_junk)
799 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
800 
801     return (u_char *)bp->page + k;
802 }
803 
804 /*
805  * Allocate a piece of memory
806  */
807 static void *
808 imalloc(size_t size)
809 {
810     void *result;
811 
812     if (suicide)
813 	abort();
814 
815     if ((size + malloc_pagesize) < size)	/* Check for overflow */
816 	result = NULL;
817     else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
818 	result = NULL;
819     else if (size <= malloc_maxsize)
820 	result = malloc_bytes(size);
821     else
822 	result = malloc_pages(size);
823 
824     if (malloc_abort && result == NULL)
825 	wrterror("allocation failed.\n");
826 
827     if (malloc_zero && result != NULL)
828 	memset(result, 0, size);
829 
830     return result;
831 }
832 
833 /*
834  * Change the size of an allocation.
835  */
836 static void *
837 irealloc(void *ptr, size_t size)
838 {
839     void *p;
840     size_t osize, idx;
841     struct pginfo **mp;
842     size_t i;
843 
844     if (suicide)
845 	abort();
846 
847     idx = ptr2idx(ptr);
848 
849     if (idx < malloc_pageshift) {
850 	wrtwarning("junk pointer, too low to make sense.\n");
851 	return 0;
852     }
853 
854     if (idx > last_idx) {
855 	wrtwarning("junk pointer, too high to make sense.\n");
856 	return 0;
857     }
858 
859     mp = &page_dir[idx];
860 
861     if (*mp == MALLOC_FIRST) {			/* Page allocation */
862 
863 	/* Check the pointer */
864 	if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
865 	    wrtwarning("modified (page-) pointer.\n");
866 	    return NULL;
867 	}
868 
869 	/* Find the size in bytes */
870 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
871 	    osize += malloc_pagesize;
872 
873         if (!malloc_realloc && 			/* unless we have to, */
874 	  size <= osize && 			/* .. or are too small, */
875 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
876 	    if (malloc_junk)
877 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
878 	    return ptr;				/* don't do anything. */
879 	}
880 
881     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
882 
883 	/* Check the pointer for sane values */
884 	if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
885 	    wrtwarning("modified (chunk-) pointer.\n");
886 	    return NULL;
887 	}
888 
889 	/* Find the chunk index in the page */
890 	i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
891 
892 	/* Verify that it isn't a free chunk already */
893         if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
894 	    wrtwarning("chunk is already free.\n");
895 	    return NULL;
896 	}
897 
898 	osize = (*mp)->size;
899 
900 	if (!malloc_realloc &&		/* Unless we have to, */
901 	  size <= osize && 		/* ..or are too small, */
902 	  (size > osize / 2 ||	 	/* ..or could use a smaller size, */
903 	  osize == malloc_minsize)) {	/* ..(if there is one) */
904 	    if (malloc_junk)
905 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
906 	    return ptr;			/* ..Don't do anything */
907 	}
908 
909     } else {
910 	wrtwarning("pointer to wrong page.\n");
911 	return NULL;
912     }
913 
914     p = imalloc(size);
915 
916     if (p != NULL) {
917 	/* copy the lesser of the two sizes, and free the old one */
918 	if (!size || !osize)
919 	    ;
920 	else if (osize < size)
921 	    memcpy(p, ptr, osize);
922 	else
923 	    memcpy(p, ptr, size);
924 	ifree(ptr);
925     }
926     return p;
927 }
928 
929 /*
930  * Free a sequence of pages
931  */
932 
933 static inline void
934 free_pages(void *ptr, size_t idx, struct pginfo *info)
935 {
936     size_t i;
937     struct pgfree *pf, *pt=NULL;
938     size_t l;
939     void *tail;
940 
941     if (info == MALLOC_FREE) {
942 	wrtwarning("page is already free.\n");
943 	return;
944     }
945 
946     if (info != MALLOC_FIRST) {
947 	wrtwarning("pointer to wrong page.\n");
948 	return;
949     }
950 
951     if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
952 	wrtwarning("modified (page-) pointer.\n");
953 	return;
954     }
955 
956     /* Count how many pages and mark them free at the same time */
957     page_dir[idx] = MALLOC_FREE;
958     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
959 	page_dir[idx + i] = MALLOC_FREE;
960 
961     l = i << malloc_pageshift;
962 
963     if (malloc_junk)
964 	memset(ptr, SOME_JUNK, l);
965 
966 #ifndef __minix
967     if (malloc_hint)
968 	madvise(ptr, l, MADV_FREE);
969 #endif /* !__minix */
970 
971     tail = (char *)ptr+l;
972 
973     /* add to free-list */
974     if (px == NULL)
975 	px = imalloc(sizeof *px);	/* This cannot fail... */
976     px->page = ptr;
977     px->end =  tail;
978     px->size = l;
979     if (free_list.next == NULL) {
980 
981 	/* Nothing on free list, put this at head */
982 	px->next = free_list.next;
983 	px->prev = &free_list;
984 	free_list.next = px;
985 	pf = px;
986 	px = NULL;
987 
988     } else {
989 
990 	/* Find the right spot, leave pf pointing to the modified entry. */
991 	tail = (char *)ptr+l;
992 
993 	for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
994 	    pf = pf->next)
995 	    ; /* Race ahead here */
996 
997 	if (pf->page > tail) {
998 	    /* Insert before entry */
999 	    px->next = pf;
1000 	    px->prev = pf->prev;
1001 	    pf->prev = px;
1002 	    px->prev->next = px;
1003 	    pf = px;
1004 	    px = NULL;
1005 	} else if (pf->end == ptr ) {
1006 	    /* Append to the previous entry */
1007 	    pf->end = (char *)pf->end + l;
1008 	    pf->size += l;
1009 	    if (pf->next != NULL && pf->end == pf->next->page ) {
1010 		/* And collapse the next too. */
1011 		pt = pf->next;
1012 		pf->end = pt->end;
1013 		pf->size += pt->size;
1014 		pf->next = pt->next;
1015 		if (pf->next != NULL)
1016 		    pf->next->prev = pf;
1017 	    }
1018 	} else if (pf->page == tail) {
1019 	    /* Prepend to entry */
1020 	    pf->size += l;
1021 	    pf->page = ptr;
1022 	} else if (pf->next == NULL) {
1023 	    /* Append at tail of chain */
1024 	    px->next = NULL;
1025 	    px->prev = pf;
1026 	    pf->next = px;
1027 	    pf = px;
1028 	    px = NULL;
1029 	} else {
1030 	    wrterror("freelist is destroyed.\n");
1031 	}
1032     }
1033 
1034     /* Return something to OS ? */
1035     if (pf->next == NULL &&			/* If we're the last one, */
1036       pf->size > malloc_cache &&		/* ..and the cache is full, */
1037       pf->end == malloc_brk &&			/* ..and none behind us, */
1038       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
1039 
1040 	/*
1041 	 * Keep the cache intact.  Notice that the '>' above guarantees that
1042 	 * the pf will always have at least one page afterwards.
1043 	 */
1044 	pf->end = (char *)pf->page + malloc_cache;
1045 	pf->size = malloc_cache;
1046 
1047 	brk(pf->end);
1048 	malloc_brk = pf->end;
1049 
1050 	idx = ptr2idx(pf->end);
1051 
1052 	for(i=idx;i <= last_idx;)
1053 	    page_dir[i++] = MALLOC_NOT_MINE;
1054 
1055 	last_idx = idx - 1;
1056 
1057 	/* XXX: We could realloc/shrink the pagedir here I guess. */
1058     }
1059     if (pt != NULL)
1060 	ifree(pt);
1061 }
1062 
1063 /*
1064  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1065  */
1066 
1067 static inline void
1068 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1069 {
1070     size_t i;
1071     struct pginfo **mp;
1072     void *vp;
1073 
1074     /* Find the chunk number on the page */
1075     i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1076 
1077     if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1078 	wrtwarning("modified (chunk-) pointer.\n");
1079 	return;
1080     }
1081 
1082     if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1083 	wrtwarning("chunk is already free.\n");
1084 	return;
1085     }
1086 
1087     if (malloc_junk)
1088 	memset(ptr, SOME_JUNK, (size_t)info->size);
1089 
1090     info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1091     info->free++;
1092 
1093     mp = page_dir + info->shift;
1094 
1095     if (info->free == 1) {
1096 
1097 	/* Page became non-full */
1098 
1099 	mp = page_dir + info->shift;
1100 	/* Insert in address order */
1101 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1102 	    mp = &(*mp)->next;
1103 	info->next = *mp;
1104 	*mp = info;
1105 	return;
1106     }
1107 
1108     if (info->free != info->total)
1109 	return;
1110 
1111     /* Find & remove this page in the queue */
1112     while (*mp != info) {
1113 	mp = &((*mp)->next);
1114 #ifdef MALLOC_EXTRA_SANITY
1115 	if (!*mp)
1116 		wrterror("(ES): Not on queue.\n");
1117 #endif /* MALLOC_EXTRA_SANITY */
1118     }
1119     *mp = info->next;
1120 
1121     /* Free the page & the info structure if need be */
1122     page_dir[idx] = MALLOC_FIRST;
1123     vp = info->page;		/* Order is important ! */
1124     if(vp != (void*)info)
1125 	ifree(info);
1126     ifree(vp);
1127 }
1128 
1129 static void
1130 ifree(void *ptr)
1131 {
1132     struct pginfo *info;
1133     size_t idx;
1134 
1135     /* This is legal */
1136     if (ptr == NULL)
1137 	return;
1138 
1139     /* If we're already sinking, don't make matters any worse. */
1140     if (suicide)
1141 	return;
1142 
1143     idx = ptr2idx(ptr);
1144 
1145     if (idx < malloc_pageshift) {
1146 	wrtwarning("junk pointer, too low to make sense.\n");
1147 	return;
1148     }
1149 
1150     if (idx > last_idx) {
1151 	wrtwarning("junk pointer, too high to make sense.\n");
1152 	return;
1153     }
1154 
1155     info = page_dir[idx];
1156 
1157     if (info < MALLOC_MAGIC)
1158         free_pages(ptr, idx, info);
1159     else
1160 	free_bytes(ptr, idx, info);
1161     return;
1162 }
1163 
1164 static int malloc_active; /* Recusion flag for public interface. */
1165 static unsigned malloc_started; /* Set when initialization has been done */
1166 
1167 static void *
1168 pubrealloc(void *ptr, size_t size, const char *func)
1169 {
1170     void *r;
1171     int err = 0;
1172 
1173     /*
1174      * If a thread is inside our code with a functional lock held, and then
1175      * catches a signal which calls us again, we would get a deadlock if the
1176      * lock is not of a recursive type.
1177      */
1178     _MALLOC_LOCK();
1179     malloc_func = func;
1180     if (malloc_active > 0) {
1181 	if (malloc_active == 1) {
1182 	    wrtwarning("recursive call\n");
1183 	    malloc_active = 2;
1184 	}
1185         _MALLOC_UNLOCK();
1186 	errno = EINVAL;
1187 	return (NULL);
1188     }
1189     malloc_active = 1;
1190 
1191     if (!malloc_started) {
1192         if (ptr != NULL) {
1193 	    wrtwarning("malloc() has never been called\n");
1194 	    malloc_active = 0;
1195             _MALLOC_UNLOCK();
1196 	    errno = EINVAL;
1197 	    return (NULL);
1198 	}
1199 	malloc_init();
1200 	malloc_started = 1;
1201     }
1202 
1203     if (ptr == ZEROSIZEPTR)
1204 	ptr = NULL;
1205     if (malloc_sysv && !size) {
1206 	if (ptr != NULL)
1207 	    ifree(ptr);
1208 	r = NULL;
1209     } else if (!size) {
1210 	if (ptr != NULL)
1211 	    ifree(ptr);
1212 	r = ZEROSIZEPTR;
1213     } else if (ptr == NULL) {
1214 	r = imalloc(size);
1215 	err = (r == NULL);
1216     } else {
1217         r = irealloc(ptr, size);
1218 	err = (r == NULL);
1219     }
1220     UTRACE(ptr, size, r);
1221     malloc_active = 0;
1222     _MALLOC_UNLOCK();
1223     if (malloc_xmalloc && err)
1224 	wrterror("out of memory\n");
1225     if (err)
1226 	errno = ENOMEM;
1227     return (r);
1228 }
1229 
1230 /*
1231  * These are the public exported interface routines.
1232  */
1233 
1234 void *
1235 malloc(size_t size)
1236 {
1237 
1238     return pubrealloc(NULL, size, " in malloc():");
1239 }
1240 
1241 int
1242 posix_memalign(void **memptr, size_t alignment, size_t size)
1243 {
1244     int err;
1245     void *result;
1246 
1247     if (!malloc_started) {
1248 	    malloc_init();
1249 	    malloc_started = 1;
1250     }
1251     /* Make sure that alignment is a large enough power of 2. */
1252     if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1253 	alignment > malloc_pagesize)
1254 	    return EINVAL;
1255 
1256     /*
1257      * (size | alignment) is enough to assure the requested alignment, since
1258      * the allocator always allocates power-of-two blocks.
1259      */
1260     err = errno; /* Protect errno against changes in pubrealloc(). */
1261     result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1262     errno = err;
1263 
1264     if (result == NULL)
1265 	return ENOMEM;
1266 
1267     *memptr = result;
1268     return 0;
1269 }
1270 
1271 void *
1272 calloc(size_t num, size_t size)
1273 {
1274     void *ret;
1275 
1276     if (size != 0 && (num * size) / size != num) {
1277 	/* size_t overflow. */
1278 	errno = ENOMEM;
1279 	return (NULL);
1280     }
1281 
1282     ret = pubrealloc(NULL, num * size, " in calloc():");
1283 
1284     if (ret != NULL)
1285 	memset(ret, 0, num * size);
1286 
1287     return ret;
1288 }
1289 
1290 void
1291 free(void *ptr)
1292 {
1293 
1294     pubrealloc(ptr, 0, " in free():");
1295 }
1296 
1297 void *
1298 realloc(void *ptr, size_t size)
1299 {
1300 
1301     return pubrealloc(ptr, size, " in realloc():");
1302 }
1303 
1304 /*
1305  * Begin library-private functions, used by threading libraries for protection
1306  * of malloc during fork().  These functions are only called if the program is
1307  * running in threaded mode, so there is no need to check whether the program
1308  * is threaded here.
1309  */
1310 
1311 void
1312 _malloc_prefork(void)
1313 {
1314 
1315 	_MALLOC_LOCK();
1316 }
1317 
1318 void
1319 _malloc_postfork(void)
1320 {
1321 
1322 	_MALLOC_UNLOCK();
1323 }
1324