xref: /openbsd/lib/libc/stdlib/malloc.c (revision d25d28bf)
1 /*	$OpenBSD: malloc.c,v 1.197 2016/09/21 04:38:56 guenther Exp $	*/
2 /*
3  * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4  * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5  * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6  * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * If we meet some day, and you think this stuff is worth it, you
23  * can buy me a beer in return. Poul-Henning Kamp
24  */
25 
26 /* #define MALLOC_STATS */
27 
28 #include <sys/types.h>
29 #include <sys/param.h>	/* PAGE_SHIFT ALIGN */
30 #include <sys/queue.h>
31 #include <sys/mman.h>
32 #include <sys/uio.h>
33 #include <errno.h>
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <stdio.h>
38 #include <unistd.h>
39 
40 #ifdef MALLOC_STATS
41 #include <sys/tree.h>
42 #include <fcntl.h>
43 #endif
44 
45 #include "thread_private.h"
46 #include <tib.h>
47 
48 #if defined(__mips64__)
49 #define MALLOC_PAGESHIFT	(14U)
50 #else
51 #define MALLOC_PAGESHIFT	(PAGE_SHIFT)
52 #endif
53 
54 #define MALLOC_MINSHIFT		4
55 #define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
56 #define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
57 #define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
58 #define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
59 #define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
60 
61 #define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
62 #define MALLOC_MAXCACHE		256
63 #define MALLOC_DELAYED_CHUNK_MASK	15
64 #define MALLOC_INITIAL_REGIONS	512
65 #define MALLOC_DEFAULT_CACHE	64
66 #define	MALLOC_CHUNK_LISTS	4
67 
68 /*
69  * When the P option is active, we move allocations between half a page
70  * and a whole page towards the end, subject to alignment constraints.
71  * This is the extra headroom we allow. Set to zero to be the most
72  * strict.
73  */
74 #define MALLOC_LEEWAY		0
75 
76 #define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
77 
78 /*
79  * What to use for Junk.  This is the byte value we use to fill with
80  * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
81  * and SOME_FREEJUNK right before free.
82  */
83 #define SOME_JUNK		0xd0	/* as in "Duh" :-) */
84 #define SOME_FREEJUNK		0xdf
85 
86 #define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
87     MAP_ANON | MAP_PRIVATE, -1, 0)
88 
89 #define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
90     MAP_ANON | MAP_PRIVATE, -1, 0)
91 
92 #define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
93     MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
94 
95 struct region_info {
96 	void *p;		/* page; low bits used to mark chunks */
97 	uintptr_t size;		/* size for pages, or chunk_info pointer */
98 #ifdef MALLOC_STATS
99 	void *f;		/* where allocated from */
100 #endif
101 };
102 
103 LIST_HEAD(chunk_head, chunk_info);
104 
105 struct dir_info {
106 	u_int32_t canary1;
107 	int active;			/* status of malloc */
108 	struct region_info *r;		/* region slots */
109 	size_t regions_total;		/* number of region slots */
110 	size_t regions_free;		/* number of free slots */
111 					/* lists of free chunk info structs */
112 	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
113 					/* lists of chunks with free slots */
114 	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
115 	size_t free_regions_size;	/* free pages cached */
116 					/* free pages cache */
117 	struct region_info free_regions[MALLOC_MAXCACHE];
118 					/* delayed free chunk slots */
119 	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
120 	size_t rbytesused;		/* random bytes used */
121 	char *func;			/* current function */
122 	int mutex;
123 	u_char rbytes[32];		/* random bytes */
124 	u_short chunk_start;
125 #ifdef MALLOC_STATS
126 	size_t inserts;
127 	size_t insert_collisions;
128 	size_t finds;
129 	size_t find_collisions;
130 	size_t deletes;
131 	size_t delete_moves;
132 	size_t cheap_realloc_tries;
133 	size_t cheap_reallocs;
134 	size_t malloc_used;		/* bytes allocated */
135 	size_t malloc_guarded;		/* bytes used for guards */
136 #define STATS_ADD(x,y)	((x) += (y))
137 #define STATS_SUB(x,y)	((x) -= (y))
138 #define STATS_INC(x)	((x)++)
139 #define STATS_ZERO(x)	((x) = 0)
140 #define STATS_SETF(x,y)	((x)->f = (y))
141 #else
142 #define STATS_ADD(x,y)	/* nothing */
143 #define STATS_SUB(x,y)	/* nothing */
144 #define STATS_INC(x)	/* nothing */
145 #define STATS_ZERO(x)	/* nothing */
146 #define STATS_SETF(x,y)	/* nothing */
147 #endif /* MALLOC_STATS */
148 	u_int32_t canary2;
149 };
150 #define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
151 			~MALLOC_PAGEMASK)
152 
153 /*
154  * This structure describes a page worth of chunks.
155  *
156  * How many bits per u_short in the bitmap
157  */
158 #define MALLOC_BITS		(NBBY * sizeof(u_short))
159 struct chunk_info {
160 	LIST_ENTRY(chunk_info) entries;
161 	void *page;			/* pointer to the page */
162 	u_int32_t canary;
163 	u_short size;			/* size of this page's chunks */
164 	u_short shift;			/* how far to shift for this size */
165 	u_short free;			/* how many free chunks */
166 	u_short total;			/* how many chunks */
167 					/* which chunks are free */
168 	u_short bits[1];
169 };
170 
171 struct malloc_readonly {
172 	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
173 	int	malloc_mt;		/* multi-threaded mode? */
174 	int	malloc_freenow;		/* Free quickly - disable chunk rnd */
175 	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
176 	int	malloc_hint;		/* call madvice on free pages?  */
177 	int	malloc_junk;		/* junk fill? */
178 	int	malloc_move;		/* move allocations to end of page? */
179 	int	malloc_realloc;		/* always realloc? */
180 	int	malloc_xmalloc;		/* xmalloc behaviour? */
181 	size_t	malloc_canaries;	/* use canaries after chunks? */
182 	size_t	malloc_guard;		/* use guard pages after allocations? */
183 	u_int	malloc_cache;		/* free pages we cache */
184 #ifdef MALLOC_STATS
185 	int	malloc_stats;		/* dump statistics at end */
186 #endif
187 	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
188 	uintptr_t malloc_chunk_canary;
189 };
190 
191 /* This object is mapped PROT_READ after initialisation to prevent tampering */
192 static union {
193 	struct malloc_readonly mopts;
194 	u_char _pad[MALLOC_PAGESIZE];
195 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
196 #define mopts	malloc_readonly.mopts
197 
198 char		*malloc_options;	/* compile-time options */
199 
200 static u_char getrbyte(struct dir_info *d);
201 
202 #ifdef MALLOC_STATS
203 void malloc_dump(int, struct dir_info *);
204 PROTO_NORMAL(malloc_dump);
205 static void malloc_exit(void);
206 #define CALLER	__builtin_return_address(0)
207 #else
208 #define CALLER	NULL
209 #endif
210 
211 /* low bits of r->p determine size: 0 means >= page size and p->size holding
212  *  real size, otherwise r->size is a shift count, or 1 for malloc(0)
213  */
214 #define REALSIZE(sz, r)						\
215 	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
216 	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
217 
218 static inline void
219 _MALLOC_LEAVE(struct dir_info *d)
220 {
221 	if (mopts.malloc_mt) {
222 		d->active--;
223 		_MALLOC_UNLOCK(d->mutex);
224 	}
225 }
226 
227 static inline void
228 _MALLOC_ENTER(struct dir_info *d)
229 {
230 	if (mopts.malloc_mt) {
231 		_MALLOC_LOCK(d->mutex);
232 		d->active++;
233 	}
234 }
235 
236 static inline size_t
237 hash(void *p)
238 {
239 	size_t sum;
240 	uintptr_t u;
241 
242 	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
243 	sum = u;
244 	sum = (sum << 7) - sum + (u >> 16);
245 #ifdef __LP64__
246 	sum = (sum << 7) - sum + (u >> 32);
247 	sum = (sum << 7) - sum + (u >> 48);
248 #endif
249 	return sum;
250 }
251 
252 static inline
253 struct dir_info *getpool(void)
254 {
255 	if (!mopts.malloc_mt)
256 		return mopts.malloc_pool[0];
257 	else
258 		return mopts.malloc_pool[TIB_GET()->tib_tid &
259 		    (_MALLOC_MUTEXES - 1)];
260 }
261 
262 static __dead void
263 wrterror(struct dir_info *d, char *msg, void *p)
264 {
265 	char		*q = " error: ";
266 	struct iovec	iov[7];
267 	char		pidbuf[20];
268 	char		buf[20];
269 	int		saved_errno = errno, i;
270 
271 	iov[0].iov_base = __progname;
272 	iov[0].iov_len = strlen(__progname);
273 	iov[1].iov_base = pidbuf;
274 	snprintf(pidbuf, sizeof(pidbuf), "(%d) in ", getpid());
275 	iov[1].iov_len = strlen(pidbuf);
276 	if (d != NULL) {
277 		iov[2].iov_base = d->func;
278 		iov[2].iov_len = strlen(d->func);
279  	} else {
280 		iov[2].iov_base = "unknown";
281 		iov[2].iov_len = 7;
282 	}
283 	iov[3].iov_base = q;
284 	iov[3].iov_len = strlen(q);
285 	iov[4].iov_base = msg;
286 	iov[4].iov_len = strlen(msg);
287 	iov[5].iov_base = buf;
288 	if (p == NULL)
289 		iov[5].iov_len = 0;
290 	else {
291 		snprintf(buf, sizeof(buf), " %p", p);
292 		iov[5].iov_len = strlen(buf);
293 	}
294 	iov[6].iov_base = "\n";
295 	iov[6].iov_len = 1;
296 	writev(STDERR_FILENO, iov, 7);
297 
298 #ifdef MALLOC_STATS
299 	if (mopts.malloc_stats)
300 		for (i = 0; i < _MALLOC_MUTEXES; i++)
301 			malloc_dump(STDERR_FILENO, mopts.malloc_pool[i]);
302 #endif /* MALLOC_STATS */
303 
304 	errno = saved_errno;
305 
306 	abort();
307 }
308 
309 static void
310 rbytes_init(struct dir_info *d)
311 {
312 	arc4random_buf(d->rbytes, sizeof(d->rbytes));
313 	/* add 1 to account for using d->rbytes[0] */
314 	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
315 }
316 
317 static inline u_char
318 getrbyte(struct dir_info *d)
319 {
320 	u_char x;
321 
322 	if (d->rbytesused >= sizeof(d->rbytes))
323 		rbytes_init(d);
324 	x = d->rbytes[d->rbytesused++];
325 	return x;
326 }
327 
328 /*
329  * Cache maintenance. We keep at most malloc_cache pages cached.
330  * If the cache is becoming full, unmap pages in the cache for real,
331  * and then add the region to the cache
332  * Opposed to the regular region data structure, the sizes in the
333  * cache are in MALLOC_PAGESIZE units.
334  */
335 static void
336 unmap(struct dir_info *d, void *p, size_t sz)
337 {
338 	size_t psz = sz >> MALLOC_PAGESHIFT;
339 	size_t rsz, tounmap;
340 	struct region_info *r;
341 	u_int i, offset;
342 
343 	if (sz != PAGEROUND(sz))
344 		wrterror(d, "munmap round", NULL);
345 
346 	if (psz > mopts.malloc_cache) {
347 		i = munmap(p, sz);
348 		if (i)
349 			wrterror(d, "munmap", p);
350 		STATS_SUB(d->malloc_used, sz);
351 		return;
352 	}
353 	tounmap = 0;
354 	rsz = mopts.malloc_cache - d->free_regions_size;
355 	if (psz > rsz)
356 		tounmap = psz - rsz;
357 	offset = getrbyte(d);
358 	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
359 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
360 		if (r->p != NULL) {
361 			rsz = r->size << MALLOC_PAGESHIFT;
362 			if (munmap(r->p, rsz))
363 				wrterror(d, "munmap", r->p);
364 			r->p = NULL;
365 			if (tounmap > r->size)
366 				tounmap -= r->size;
367 			else
368 				tounmap = 0;
369 			d->free_regions_size -= r->size;
370 			r->size = 0;
371 			STATS_SUB(d->malloc_used, rsz);
372 		}
373 	}
374 	if (tounmap > 0)
375 		wrterror(d, "malloc cache underflow", NULL);
376 	for (i = 0; i < mopts.malloc_cache; i++) {
377 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
378 		if (r->p == NULL) {
379 			if (mopts.malloc_junk && !mopts.malloc_freeunmap) {
380 				size_t amt = mopts.malloc_junk == 1 ?
381 				    MALLOC_MAXCHUNK : sz;
382 				memset(p, SOME_FREEJUNK, amt);
383 			}
384 			if (mopts.malloc_hint)
385 				madvise(p, sz, MADV_FREE);
386 			if (mopts.malloc_freeunmap)
387 				mprotect(p, sz, PROT_NONE);
388 			r->p = p;
389 			r->size = psz;
390 			d->free_regions_size += psz;
391 			break;
392 		}
393 	}
394 	if (i == mopts.malloc_cache)
395 		wrterror(d, "malloc free slot lost", NULL);
396 	if (d->free_regions_size > mopts.malloc_cache)
397 		wrterror(d, "malloc cache overflow", NULL);
398 }
399 
400 static void
401 zapcacheregion(struct dir_info *d, void *p, size_t len)
402 {
403 	u_int i;
404 	struct region_info *r;
405 	size_t rsz;
406 
407 	for (i = 0; i < mopts.malloc_cache; i++) {
408 		r = &d->free_regions[i];
409 		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
410 			rsz = r->size << MALLOC_PAGESHIFT;
411 			if (munmap(r->p, rsz))
412 				wrterror(d, "munmap", r->p);
413 			r->p = NULL;
414 			d->free_regions_size -= r->size;
415 			r->size = 0;
416 			STATS_SUB(d->malloc_used, rsz);
417 		}
418 	}
419 }
420 
421 static void *
422 map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
423 {
424 	size_t psz = sz >> MALLOC_PAGESHIFT;
425 	struct region_info *r, *big = NULL;
426 	u_int i, offset;
427 	void *p;
428 
429 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
430 	    d->canary1 != ~d->canary2)
431 		wrterror(d, "internal struct corrupt", NULL);
432 	if (sz != PAGEROUND(sz))
433 		wrterror(d, "map round", NULL);
434 
435 	if (!hint && psz > d->free_regions_size) {
436 		_MALLOC_LEAVE(d);
437 		p = MMAP(sz);
438 		_MALLOC_ENTER(d);
439 		if (p != MAP_FAILED)
440 			STATS_ADD(d->malloc_used, sz);
441 		/* zero fill not needed */
442 		return p;
443 	}
444 	offset = getrbyte(d);
445 	for (i = 0; i < mopts.malloc_cache; i++) {
446 		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
447 		if (r->p != NULL) {
448 			if (hint && r->p != hint)
449 				continue;
450 			if (r->size == psz) {
451 				p = r->p;
452 				r->p = NULL;
453 				r->size = 0;
454 				d->free_regions_size -= psz;
455 				if (mopts.malloc_freeunmap)
456 					mprotect(p, sz, PROT_READ | PROT_WRITE);
457 				if (mopts.malloc_hint)
458 					madvise(p, sz, MADV_NORMAL);
459 				if (zero_fill)
460 					memset(p, 0, sz);
461 				else if (mopts.malloc_junk == 2 &&
462 				    mopts.malloc_freeunmap)
463 					memset(p, SOME_FREEJUNK, sz);
464 				return p;
465 			} else if (r->size > psz)
466 				big = r;
467 		}
468 	}
469 	if (big != NULL) {
470 		r = big;
471 		p = r->p;
472 		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
473 		if (mopts.malloc_freeunmap)
474 			mprotect(p, sz, PROT_READ | PROT_WRITE);
475 		if (mopts.malloc_hint)
476 			madvise(p, sz, MADV_NORMAL);
477 		r->size -= psz;
478 		d->free_regions_size -= psz;
479 		if (zero_fill)
480 			memset(p, 0, sz);
481 		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
482 			memset(p, SOME_FREEJUNK, sz);
483 		return p;
484 	}
485 	if (hint)
486 		return MAP_FAILED;
487 	if (d->free_regions_size > mopts.malloc_cache)
488 		wrterror(d, "malloc cache", NULL);
489 	_MALLOC_LEAVE(d);
490 	p = MMAP(sz);
491 	_MALLOC_ENTER(d);
492 	if (p != MAP_FAILED)
493 		STATS_ADD(d->malloc_used, sz);
494 	/* zero fill not needed */
495 	return p;
496 }
497 
498 static void
499 omalloc_parseopt(char opt)
500 {
501 	switch (opt) {
502 	case '>':
503 		mopts.malloc_cache <<= 1;
504 		if (mopts.malloc_cache > MALLOC_MAXCACHE)
505 			mopts.malloc_cache = MALLOC_MAXCACHE;
506 		break;
507 	case '<':
508 		mopts.malloc_cache >>= 1;
509 		break;
510 	case 'a':
511 	case 'A':
512 		/* ignored */
513 		break;
514 	case 'c':
515 		mopts.malloc_canaries = 0;
516 		break;
517 	case 'C':
518 		mopts.malloc_canaries = sizeof(void *);
519 		break;
520 #ifdef MALLOC_STATS
521 	case 'd':
522 		mopts.malloc_stats = 0;
523 		break;
524 	case 'D':
525 		mopts.malloc_stats = 1;
526 		break;
527 #endif /* MALLOC_STATS */
528 	case 'f':
529 		mopts.malloc_freenow = 0;
530 		mopts.malloc_freeunmap = 0;
531 		break;
532 	case 'F':
533 		mopts.malloc_freenow = 1;
534 		mopts.malloc_freeunmap = 1;
535 		break;
536 	case 'g':
537 		mopts.malloc_guard = 0;
538 		break;
539 	case 'G':
540 		mopts.malloc_guard = MALLOC_PAGESIZE;
541 		break;
542 	case 'h':
543 		mopts.malloc_hint = 0;
544 		break;
545 	case 'H':
546 		mopts.malloc_hint = 1;
547 		break;
548 	case 'j':
549 		if (mopts.malloc_junk > 0)
550 			mopts.malloc_junk--;
551 		break;
552 	case 'J':
553 		if (mopts.malloc_junk < 2)
554 			mopts.malloc_junk++;
555 		break;
556 	case 'n':
557 	case 'N':
558 		break;
559 	case 'p':
560 		mopts.malloc_move = 0;
561 		break;
562 	case 'P':
563 		mopts.malloc_move = 1;
564 		break;
565 	case 'r':
566 		mopts.malloc_realloc = 0;
567 		break;
568 	case 'R':
569 		mopts.malloc_realloc = 1;
570 		break;
571 	case 'u':
572 		mopts.malloc_freeunmap = 0;
573 		break;
574 	case 'U':
575 		mopts.malloc_freeunmap = 1;
576 		break;
577 	case 'x':
578 		mopts.malloc_xmalloc = 0;
579 		break;
580 	case 'X':
581 		mopts.malloc_xmalloc = 1;
582 		break;
583 	default: {
584 		static const char q[] = "malloc() warning: "
585 		    "unknown char in MALLOC_OPTIONS\n";
586 		write(STDERR_FILENO, q, sizeof(q) - 1);
587 		break;
588 	}
589 	}
590 }
591 
592 static void
593 omalloc_init(void)
594 {
595 	char *p, *q, b[64];
596 	int i, j;
597 
598 	/*
599 	 * Default options
600 	 */
601 	mopts.malloc_junk = 1;
602 	mopts.malloc_move = 1;
603 	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
604 
605 	for (i = 0; i < 3; i++) {
606 		switch (i) {
607 		case 0:
608 			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
609 			if (j <= 0)
610 				continue;
611 			b[j] = '\0';
612 			p = b;
613 			break;
614 		case 1:
615 			if (issetugid() == 0)
616 				p = getenv("MALLOC_OPTIONS");
617 			else
618 				continue;
619 			break;
620 		case 2:
621 			p = malloc_options;
622 			break;
623 		default:
624 			p = NULL;
625 		}
626 
627 		for (; p != NULL && *p != '\0'; p++) {
628 			switch (*p) {
629 			case 'S':
630 				for (q = "CGJ"; *q != '\0'; q++)
631 					omalloc_parseopt(*q);
632 				mopts.malloc_cache = 0;
633 				break;
634 			case 's':
635 				for (q = "cgj"; *q != '\0'; q++)
636 					omalloc_parseopt(*q);
637 				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
638 				break;
639 			default:
640 				omalloc_parseopt(*p);
641 				break;
642 			}
643 		}
644 	}
645 
646 #ifdef MALLOC_STATS
647 	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
648 		static const char q[] = "malloc() warning: atexit(2) failed."
649 		    " Will not be able to dump stats on exit\n";
650 		write(STDERR_FILENO, q, sizeof(q) - 1);
651 	}
652 #endif /* MALLOC_STATS */
653 
654 	while ((mopts.malloc_canary = arc4random()) == 0)
655 		;
656 
657 	arc4random_buf(&mopts.malloc_chunk_canary,
658 	    sizeof(mopts.malloc_chunk_canary));
659 }
660 
661 /*
662  * Initialize a dir_info, which should have been cleared by caller
663  */
664 static void
665 omalloc_poolinit(struct dir_info **dp)
666 {
667 	void *p;
668 	size_t d_avail, regioninfo_size;
669 	struct dir_info *d;
670 	int i, j;
671 
672 	/*
673 	 * Allocate dir_info with a guard page on either side. Also
674 	 * randomise offset inside the page at which the dir_info
675 	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
676 	 */
677 	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
678 		wrterror(NULL, "malloc init mmap failed", NULL);
679 	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
680 	mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
681 	    MALLOC_PAGESIZE, PROT_NONE);
682 	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
683 	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
684 	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
685 
686 	rbytes_init(d);
687 	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
688 	regioninfo_size = d->regions_total * sizeof(struct region_info);
689 	d->r = MMAP(regioninfo_size);
690 	if (d->r == MAP_FAILED) {
691 		d->regions_total = 0;
692 		wrterror(NULL, "malloc init mmap failed", NULL);
693 	}
694 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
695 		LIST_INIT(&d->chunk_info_list[i]);
696 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
697 			LIST_INIT(&d->chunk_dir[i][j]);
698 	}
699 	STATS_ADD(d->malloc_used, regioninfo_size);
700 	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
701 	d->canary2 = ~d->canary1;
702 
703 	*dp = d;
704 }
705 
706 static int
707 omalloc_grow(struct dir_info *d)
708 {
709 	size_t newtotal;
710 	size_t newsize;
711 	size_t mask;
712 	size_t i;
713 	struct region_info *p;
714 
715 	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
716 		return 1;
717 
718 	newtotal = d->regions_total * 2;
719 	newsize = newtotal * sizeof(struct region_info);
720 	mask = newtotal - 1;
721 
722 	p = MMAP(newsize);
723 	if (p == MAP_FAILED)
724 		return 1;
725 
726 	STATS_ADD(d->malloc_used, newsize);
727 	STATS_ZERO(d->inserts);
728 	STATS_ZERO(d->insert_collisions);
729 	for (i = 0; i < d->regions_total; i++) {
730 		void *q = d->r[i].p;
731 		if (q != NULL) {
732 			size_t index = hash(q) & mask;
733 			STATS_INC(d->inserts);
734 			while (p[index].p != NULL) {
735 				index = (index - 1) & mask;
736 				STATS_INC(d->insert_collisions);
737 			}
738 			p[index] = d->r[i];
739 		}
740 	}
741 	/* avoid pages containing meta info to end up in cache */
742 	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
743 		wrterror(d, "munmap", d->r);
744 	else
745 		STATS_SUB(d->malloc_used,
746 		    d->regions_total * sizeof(struct region_info));
747 	d->regions_free = d->regions_free + d->regions_total;
748 	d->regions_total = newtotal;
749 	d->r = p;
750 	return 0;
751 }
752 
753 static struct chunk_info *
754 alloc_chunk_info(struct dir_info *d, int bits)
755 {
756 	struct chunk_info *p;
757 	size_t size, count;
758 
759 	if (bits == 0)
760 		count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
761 	else
762 		count = MALLOC_PAGESIZE >> bits;
763 
764 	size = howmany(count, MALLOC_BITS);
765 	size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
766 	size = ALIGN(size);
767 
768 	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
769 		char *q;
770 		int i;
771 
772 		q = MMAP(MALLOC_PAGESIZE);
773 		if (q == MAP_FAILED)
774 			return NULL;
775 		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
776 		count = MALLOC_PAGESIZE / size;
777 		for (i = 0; i < count; i++, q += size)
778 			LIST_INSERT_HEAD(&d->chunk_info_list[bits],
779 			    (struct chunk_info *)q, entries);
780 	}
781 	p = LIST_FIRST(&d->chunk_info_list[bits]);
782 	LIST_REMOVE(p, entries);
783 	memset(p, 0, size);
784 	p->canary = d->canary1;
785 	return p;
786 }
787 
788 
789 /*
790  * The hashtable uses the assumption that p is never NULL. This holds since
791  * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
792  */
793 static int
794 insert(struct dir_info *d, void *p, size_t sz, void *f)
795 {
796 	size_t index;
797 	size_t mask;
798 	void *q;
799 
800 	if (d->regions_free * 4 < d->regions_total) {
801 		if (omalloc_grow(d))
802 			return 1;
803 	}
804 	mask = d->regions_total - 1;
805 	index = hash(p) & mask;
806 	q = d->r[index].p;
807 	STATS_INC(d->inserts);
808 	while (q != NULL) {
809 		index = (index - 1) & mask;
810 		q = d->r[index].p;
811 		STATS_INC(d->insert_collisions);
812 	}
813 	d->r[index].p = p;
814 	d->r[index].size = sz;
815 #ifdef MALLOC_STATS
816 	d->r[index].f = f;
817 #endif
818 	d->regions_free--;
819 	return 0;
820 }
821 
822 static struct region_info *
823 find(struct dir_info *d, void *p)
824 {
825 	size_t index;
826 	size_t mask = d->regions_total - 1;
827 	void *q, *r;
828 
829 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
830 	    d->canary1 != ~d->canary2)
831 		wrterror(d, "internal struct corrupt", NULL);
832 	p = MASK_POINTER(p);
833 	index = hash(p) & mask;
834 	r = d->r[index].p;
835 	q = MASK_POINTER(r);
836 	STATS_INC(d->finds);
837 	while (q != p && r != NULL) {
838 		index = (index - 1) & mask;
839 		r = d->r[index].p;
840 		q = MASK_POINTER(r);
841 		STATS_INC(d->find_collisions);
842 	}
843 	return (q == p && r != NULL) ? &d->r[index] : NULL;
844 }
845 
846 static void
847 delete(struct dir_info *d, struct region_info *ri)
848 {
849 	/* algorithm R, Knuth Vol III section 6.4 */
850 	size_t mask = d->regions_total - 1;
851 	size_t i, j, r;
852 
853 	if (d->regions_total & (d->regions_total - 1))
854 		wrterror(d, "regions_total not 2^x", NULL);
855 	d->regions_free++;
856 	STATS_INC(d->deletes);
857 
858 	i = ri - d->r;
859 	for (;;) {
860 		d->r[i].p = NULL;
861 		d->r[i].size = 0;
862 		j = i;
863 		for (;;) {
864 			i = (i - 1) & mask;
865 			if (d->r[i].p == NULL)
866 				return;
867 			r = hash(d->r[i].p) & mask;
868 			if ((i <= r && r < j) || (r < j && j < i) ||
869 			    (j < i && i <= r))
870 				continue;
871 			d->r[j] = d->r[i];
872 			STATS_INC(d->delete_moves);
873 			break;
874 		}
875 
876 	}
877 }
878 
879 /*
880  * Allocate a page of chunks
881  */
882 static struct chunk_info *
883 omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
884 {
885 	struct chunk_info *bp;
886 	void		*pp;
887 	int		i, k;
888 
889 	/* Allocate a new bucket */
890 	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
891 	if (pp == MAP_FAILED)
892 		return NULL;
893 
894 	bp = alloc_chunk_info(d, bits);
895 	if (bp == NULL) {
896 		unmap(d, pp, MALLOC_PAGESIZE);
897 		return NULL;
898 	}
899 
900 	/* memory protect the page allocated in the malloc(0) case */
901 	if (bits == 0) {
902 		bp->size = 0;
903 		bp->shift = 1;
904 		i = MALLOC_MINSIZE - 1;
905 		while (i >>= 1)
906 			bp->shift++;
907 		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
908 		bp->page = pp;
909 
910 		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
911 		if (k < 0) {
912 			unmap(d, pp, MALLOC_PAGESIZE);
913 			LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries);
914 			return NULL;
915 		}
916 	} else {
917 		bp->size = 1U << bits;
918 		bp->shift = bits;
919 		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
920 		bp->page = pp;
921 	}
922 
923 	/* set all valid bits in the bitmap */
924 	k = bp->total;
925 	i = 0;
926 
927 	/* Do a bunch at a time */
928 	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
929 		bp->bits[i / MALLOC_BITS] = (u_short)~0U;
930 
931 	for (; i < k; i++)
932 		bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
933 
934 	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
935 
936 	bits++;
937 	if ((uintptr_t)pp & bits)
938 		wrterror(d, "pp & bits", pp);
939 
940 	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
941 	return bp;
942 }
943 
944 
945 /*
946  * Allocate a chunk
947  */
948 static void *
949 malloc_bytes(struct dir_info *d, size_t size, void *f)
950 {
951 	int		i, j, listnum;
952 	size_t		k;
953 	u_short		u, *lp;
954 	struct chunk_info *bp;
955 
956 	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
957 	    d->canary1 != ~d->canary2)
958 		wrterror(d, "internal struct corrupt", NULL);
959 	/* Don't bother with anything less than this */
960 	/* unless we have a malloc(0) requests */
961 	if (size != 0 && size < MALLOC_MINSIZE)
962 		size = MALLOC_MINSIZE;
963 
964 	/* Find the right bucket */
965 	if (size == 0)
966 		j = 0;
967 	else {
968 		j = MALLOC_MINSHIFT;
969 		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
970 		while (i >>= 1)
971 			j++;
972 	}
973 
974 	listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
975 	/* If it's empty, make a page more of that size chunks */
976 	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
977 		bp = omalloc_make_chunks(d, j, listnum);
978 		if (bp == NULL)
979 			return NULL;
980 	}
981 
982 	if (bp->canary != d->canary1)
983 		wrterror(d, "chunk info corrupted", NULL);
984 
985 	i = d->chunk_start;
986 	if (bp->free > 1)
987 		i += getrbyte(d);
988 	if (i >= bp->total)
989 		i &= bp->total - 1;
990 	for (;;) {
991 		for (;;) {
992 			lp = &bp->bits[i / MALLOC_BITS];
993 			if (!*lp) {
994 				i += MALLOC_BITS;
995 				i &= ~(MALLOC_BITS - 1);
996 				if (i >= bp->total)
997 					i = 0;
998 			} else
999 				break;
1000 		}
1001 		k = i % MALLOC_BITS;
1002 		u = 1 << k;
1003 		if (*lp & u)
1004 			break;
1005 		if (++i >= bp->total)
1006 			i = 0;
1007 	}
1008 	d->chunk_start += i + 1;
1009 #ifdef MALLOC_STATS
1010 	if (i == 0) {
1011 		struct region_info *r = find(d, bp->page);
1012 		r->f = f;
1013 	}
1014 #endif
1015 
1016 	*lp ^= u;
1017 
1018 	/* If there are no more free, remove from free-list */
1019 	if (!--bp->free)
1020 		LIST_REMOVE(bp, entries);
1021 
1022 	/* Adjust to the real offset of that chunk */
1023 	k += (lp - bp->bits) * MALLOC_BITS;
1024 	k <<= bp->shift;
1025 
1026 	if (mopts.malloc_canaries && bp->size > 0) {
1027 		char *end = (char *)bp->page + k + bp->size;
1028 		uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries);
1029 		*canary = mopts.malloc_chunk_canary ^ hash(canary);
1030 	}
1031 
1032 	if (mopts.malloc_junk == 2 && bp->size > 0)
1033 		memset((char *)bp->page + k, SOME_JUNK,
1034 		    bp->size - mopts.malloc_canaries);
1035 	return ((char *)bp->page + k);
1036 }
1037 
1038 static uint32_t
1039 find_chunknum(struct dir_info *d, struct region_info *r, void *ptr)
1040 {
1041 	struct chunk_info *info;
1042 	uint32_t chunknum;
1043 
1044 	info = (struct chunk_info *)r->size;
1045 	if (info->canary != d->canary1)
1046 		wrterror(d, "chunk info corrupted", NULL);
1047 
1048 	if (mopts.malloc_canaries && info->size > 0) {
1049 		char *end = (char *)ptr + info->size;
1050 		uintptr_t *canary = (uintptr_t *)(end - mopts.malloc_canaries);
1051 		if (*canary != (mopts.malloc_chunk_canary ^ hash(canary)))
1052 			wrterror(d, "chunk canary corrupted", ptr);
1053 	}
1054 
1055 	/* Find the chunk number on the page */
1056 	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1057 
1058 	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1059 		wrterror(d, "modified chunk-pointer", ptr);
1060 	if (info->bits[chunknum / MALLOC_BITS] &
1061 	    (1U << (chunknum % MALLOC_BITS)))
1062 		wrterror(d, "chunk is already free", ptr);
1063 	return chunknum;
1064 }
1065 
1066 /*
1067  * Free a chunk, and possibly the page it's on, if the page becomes empty.
1068  */
1069 static void
1070 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1071 {
1072 	struct chunk_head *mp;
1073 	struct chunk_info *info;
1074 	uint32_t chunknum;
1075 	int listnum;
1076 
1077 	info = (struct chunk_info *)r->size;
1078 	if ((chunknum = find_chunknum(d, r, ptr)) == -1)
1079 		return;
1080 
1081 	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1082 	info->free++;
1083 
1084 	if (info->free == 1) {
1085 		/* Page became non-full */
1086 		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1087 		if (info->size != 0)
1088 			mp = &d->chunk_dir[info->shift][listnum];
1089 		else
1090 			mp = &d->chunk_dir[0][listnum];
1091 
1092 		LIST_INSERT_HEAD(mp, info, entries);
1093 		return;
1094 	}
1095 
1096 	if (info->free != info->total)
1097 		return;
1098 
1099 	LIST_REMOVE(info, entries);
1100 
1101 	if (info->size == 0 && !mopts.malloc_freeunmap)
1102 		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1103 	unmap(d, info->page, MALLOC_PAGESIZE);
1104 
1105 	delete(d, r);
1106 	if (info->size != 0)
1107 		mp = &d->chunk_info_list[info->shift];
1108 	else
1109 		mp = &d->chunk_info_list[0];
1110 	LIST_INSERT_HEAD(mp, info, entries);
1111 }
1112 
1113 
1114 
1115 static void *
1116 omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1117 {
1118 	void *p;
1119 	size_t psz;
1120 
1121 	if (sz > MALLOC_MAXCHUNK) {
1122 		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1123 			errno = ENOMEM;
1124 			return NULL;
1125 		}
1126 		sz += mopts.malloc_guard;
1127 		psz = PAGEROUND(sz);
1128 		p = map(pool, NULL, psz, zero_fill);
1129 		if (p == MAP_FAILED) {
1130 			errno = ENOMEM;
1131 			return NULL;
1132 		}
1133 		if (insert(pool, p, sz, f)) {
1134 			unmap(pool, p, psz);
1135 			errno = ENOMEM;
1136 			return NULL;
1137 		}
1138 		if (mopts.malloc_guard) {
1139 			if (mprotect((char *)p + psz - mopts.malloc_guard,
1140 			    mopts.malloc_guard, PROT_NONE))
1141 				wrterror(pool, "mprotect", NULL);
1142 			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1143 		}
1144 
1145 		if (mopts.malloc_move &&
1146 		    sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1147 		    MALLOC_LEEWAY) {
1148 			/* fill whole allocation */
1149 			if (mopts.malloc_junk == 2)
1150 				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1151 			/* shift towards the end */
1152 			p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1153 			    (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1154 			/* fill zeros if needed and overwritten above */
1155 			if (zero_fill && mopts.malloc_junk == 2)
1156 				memset(p, 0, sz - mopts.malloc_guard);
1157 		} else {
1158 			if (mopts.malloc_junk == 2) {
1159 				if (zero_fill)
1160 					memset((char *)p + sz - mopts.malloc_guard,
1161 					    SOME_JUNK, psz - sz);
1162 				else
1163 					memset(p, SOME_JUNK,
1164 					    psz - mopts.malloc_guard);
1165 			}
1166 		}
1167 
1168 	} else {
1169 		/* takes care of SOME_JUNK */
1170 		p = malloc_bytes(pool, sz, f);
1171 		if (zero_fill && p != NULL && sz > 0)
1172 			memset(p, 0, sz - mopts.malloc_canaries);
1173 	}
1174 
1175 	return p;
1176 }
1177 
1178 /*
1179  * Common function for handling recursion.  Only
1180  * print the error message once, to avoid making the problem
1181  * potentially worse.
1182  */
1183 static void
1184 malloc_recurse(struct dir_info *d)
1185 {
1186 	static int noprint;
1187 
1188 	if (noprint == 0) {
1189 		noprint = 1;
1190 		wrterror(d, "recursive call", NULL);
1191 	}
1192 	d->active--;
1193 	_MALLOC_UNLOCK(d->mutex);
1194 	errno = EDEADLK;
1195 }
1196 
1197 void
1198 _malloc_init(int from_rthreads)
1199 {
1200 	int i, max;
1201 	struct dir_info *d;
1202 
1203 	_MALLOC_LOCK(0);
1204 	if (!from_rthreads && mopts.malloc_pool[0]) {
1205 		_MALLOC_UNLOCK(0);
1206 		return;
1207 	}
1208 	if (!mopts.malloc_canary)
1209 		omalloc_init();
1210 
1211 	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1212 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1213 		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1214 		     PROT_READ | PROT_WRITE);
1215 	for (i = 0; i < max; i++) {
1216 		if (mopts.malloc_pool[i])
1217 			continue;
1218 		omalloc_poolinit(&d);
1219 		d->mutex = i;
1220 		mopts.malloc_pool[i] = d;
1221 	}
1222 
1223 	if (from_rthreads)
1224 		mopts.malloc_mt = 1;
1225 
1226 	/*
1227 	 * Options have been set and will never be reset.
1228 	 * Prevent further tampering with them.
1229 	 */
1230 	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1231 		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1232 	_MALLOC_UNLOCK(0);
1233 }
1234 
1235 void *
1236 malloc(size_t size)
1237 {
1238 	void *r;
1239 	struct dir_info *d;
1240 	int saved_errno = errno;
1241 
1242 	d = getpool();
1243 	if (d == NULL) {
1244 		_malloc_init(0);
1245 		d = getpool();
1246 	}
1247 	_MALLOC_LOCK(d->mutex);
1248 	d->func = "malloc():";
1249 
1250 	if (d->active++) {
1251 		malloc_recurse(d);
1252 		return NULL;
1253 	}
1254 	if (size > 0 && size <= MALLOC_MAXCHUNK)
1255 		size += mopts.malloc_canaries;
1256 	r = omalloc(d, size, 0, CALLER);
1257 	d->active--;
1258 	_MALLOC_UNLOCK(d->mutex);
1259 	if (r == NULL && mopts.malloc_xmalloc)
1260 		wrterror(d, "out of memory", NULL);
1261 	if (r != NULL)
1262 		errno = saved_errno;
1263 	return r;
1264 }
1265 /*DEF_STRONG(malloc);*/
1266 
1267 static void
1268 validate_junk(struct dir_info *pool, void *p) {
1269 	struct region_info *r;
1270 	size_t byte, sz;
1271 
1272 	if (p == NULL)
1273 		return;
1274 	r = find(pool, p);
1275 	if (r == NULL)
1276 		wrterror(pool, "bogus pointer in validate_junk", p);
1277 	REALSIZE(sz, r);
1278 	if (sz > 0 && sz <= MALLOC_MAXCHUNK)
1279 		sz -= mopts.malloc_canaries;
1280 	if (sz > 32)
1281 		sz = 32;
1282 	for (byte = 0; byte < sz; byte++) {
1283 		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1284 			wrterror(pool, "use after free", p);
1285 	}
1286 }
1287 
1288 static void
1289 ofree(struct dir_info *argpool, void *p)
1290 {
1291 	struct dir_info *pool;
1292 	struct region_info *r;
1293 	size_t sz;
1294 	int i;
1295 
1296 	pool = argpool;
1297 	r = find(pool, p);
1298 	if (r == NULL) {
1299 		if (mopts.malloc_mt)  {
1300 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1301 				if (i == argpool->mutex)
1302 					continue;
1303 				pool->active--;
1304 				_MALLOC_UNLOCK(pool->mutex);
1305 				pool = mopts.malloc_pool[i];
1306 				_MALLOC_LOCK(pool->mutex);
1307 				pool->active++;
1308 				r = find(pool, p);
1309 				if (r != NULL)
1310 					break;
1311 			}
1312 		}
1313 		if (r == NULL)
1314 			wrterror(pool, "bogus pointer (double free?)", p);
1315 	}
1316 
1317 	REALSIZE(sz, r);
1318 	if (sz > MALLOC_MAXCHUNK) {
1319 		if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1320 		    MALLOC_LEEWAY) {
1321 			if (r->p != p)
1322 				wrterror(pool, "bogus pointer", p);
1323 		} else {
1324 #if notyetbecause_of_realloc
1325 			/* shifted towards the end */
1326 			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1327 			    MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1328 			    ~(MALLOC_MINSIZE-1))) {
1329 			}
1330 #endif
1331 			p = r->p;
1332 		}
1333 		if (mopts.malloc_guard) {
1334 			if (sz < mopts.malloc_guard)
1335 				wrterror(pool, "guard size", NULL);
1336 			if (!mopts.malloc_freeunmap) {
1337 				if (mprotect((char *)p + PAGEROUND(sz) -
1338 				    mopts.malloc_guard, mopts.malloc_guard,
1339 				    PROT_READ | PROT_WRITE))
1340 					wrterror(pool, "mprotect", NULL);
1341 			}
1342 			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1343 		}
1344 		unmap(pool, p, PAGEROUND(sz));
1345 		delete(pool, r);
1346 	} else {
1347 		void *tmp;
1348 		int i;
1349 
1350 		if (mopts.malloc_junk && sz > 0)
1351 			memset(p, SOME_FREEJUNK, sz - mopts.malloc_canaries);
1352 		if (!mopts.malloc_freenow) {
1353 			if (find_chunknum(pool, r, p) == -1)
1354 				goto done;
1355 			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1356 			tmp = p;
1357 			p = pool->delayed_chunks[i];
1358 			if (tmp == p)
1359 				wrterror(pool, "double free", p);
1360 			if (mopts.malloc_junk)
1361 				validate_junk(pool, p);
1362 			pool->delayed_chunks[i] = tmp;
1363 		}
1364 		if (p != NULL) {
1365 			r = find(pool, p);
1366 			if (r == NULL)
1367 				wrterror(pool, "bogus pointer (double free?)", p);
1368 			free_bytes(pool, r, p);
1369 		}
1370 	}
1371 done:
1372 	if (argpool != pool) {
1373 		pool->active--;
1374 		_MALLOC_UNLOCK(pool->mutex);
1375 		_MALLOC_LOCK(argpool->mutex);
1376 		argpool->active++;
1377 	}
1378 }
1379 
1380 void
1381 free(void *ptr)
1382 {
1383 	struct dir_info *d;
1384 	int saved_errno = errno;
1385 
1386 	/* This is legal. */
1387 	if (ptr == NULL)
1388 		return;
1389 
1390 	d = getpool();
1391 	if (d == NULL)
1392 		wrterror(d, "free() called before allocation", NULL);
1393 	_MALLOC_LOCK(d->mutex);
1394 	d->func = "free():";
1395 	if (d->active++) {
1396 		malloc_recurse(d);
1397 		return;
1398 	}
1399 	ofree(d, ptr);
1400 	d->active--;
1401 	_MALLOC_UNLOCK(d->mutex);
1402 	errno = saved_errno;
1403 }
1404 /*DEF_STRONG(free);*/
1405 
1406 
1407 static void *
1408 orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1409 {
1410 	struct dir_info *pool;
1411 	struct region_info *r;
1412 	size_t oldsz, goldsz, gnewsz;
1413 	void *q, *ret;
1414 	int i;
1415 
1416 	pool = argpool;
1417 
1418 	if (p == NULL)
1419 		return omalloc(pool, newsz, 0, f);
1420 
1421 	r = find(pool, p);
1422 	if (r == NULL) {
1423 		if (mopts.malloc_mt) {
1424 			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1425 				if (i == argpool->mutex)
1426 					continue;
1427 				pool->active--;
1428 				_MALLOC_UNLOCK(pool->mutex);
1429 				pool = mopts.malloc_pool[i];
1430 				_MALLOC_LOCK(pool->mutex);
1431 				pool->active++;
1432 				r = find(pool, p);
1433 				if (r != NULL)
1434 					break;
1435 			}
1436 		}
1437 		if (r == NULL)
1438 			wrterror(pool, "bogus pointer (double free?)", p);
1439 	}
1440 	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1441 		errno = ENOMEM;
1442 		ret = NULL;
1443 		goto done;
1444 	}
1445 
1446 	REALSIZE(oldsz, r);
1447 	goldsz = oldsz;
1448 	if (oldsz > MALLOC_MAXCHUNK) {
1449 		if (oldsz < mopts.malloc_guard)
1450 			wrterror(pool, "guard size", NULL);
1451 		oldsz -= mopts.malloc_guard;
1452 	}
1453 
1454 	gnewsz = newsz;
1455 	if (gnewsz > MALLOC_MAXCHUNK)
1456 		gnewsz += mopts.malloc_guard;
1457 
1458 	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1459 	    !mopts.malloc_realloc) {
1460 		size_t roldsz = PAGEROUND(goldsz);
1461 		size_t rnewsz = PAGEROUND(gnewsz);
1462 
1463 		if (rnewsz > roldsz) {
1464 			if (!mopts.malloc_guard) {
1465 				void *hint = (char *)p + roldsz;
1466 				size_t needed = rnewsz - roldsz;
1467 
1468 				STATS_INC(pool->cheap_realloc_tries);
1469 				q = map(pool, hint, needed, 0);
1470 				if (q == hint)
1471 					goto gotit;
1472 				zapcacheregion(pool, hint, needed);
1473 				q = MQUERY(hint, needed);
1474 				if (q == hint)
1475 					q = MMAPA(hint, needed);
1476 				else
1477 					q = MAP_FAILED;
1478 				if (q == hint) {
1479 gotit:
1480 					STATS_ADD(pool->malloc_used, needed);
1481 					if (mopts.malloc_junk == 2)
1482 						memset(q, SOME_JUNK, needed);
1483 					r->size = newsz;
1484 					STATS_SETF(r, f);
1485 					STATS_INC(pool->cheap_reallocs);
1486 					ret = p;
1487 					goto done;
1488 				} else if (q != MAP_FAILED) {
1489 					if (munmap(q, needed))
1490 						wrterror(pool, "munmap", q);
1491 				}
1492 			}
1493 		} else if (rnewsz < roldsz) {
1494 			if (mopts.malloc_guard) {
1495 				if (mprotect((char *)p + roldsz -
1496 				    mopts.malloc_guard, mopts.malloc_guard,
1497 				    PROT_READ | PROT_WRITE))
1498 					wrterror(pool, "mprotect", NULL);
1499 				if (mprotect((char *)p + rnewsz -
1500 				    mopts.malloc_guard, mopts.malloc_guard,
1501 				    PROT_NONE))
1502 					wrterror(pool, "mprotect", NULL);
1503 			}
1504 			unmap(pool, (char *)p + rnewsz, roldsz - rnewsz);
1505 			r->size = gnewsz;
1506 			STATS_SETF(r, f);
1507 			ret = p;
1508 			goto done;
1509 		} else {
1510 			if (newsz > oldsz && mopts.malloc_junk == 2)
1511 				memset((char *)p + newsz, SOME_JUNK,
1512 				    rnewsz - mopts.malloc_guard - newsz);
1513 			r->size = gnewsz;
1514 			STATS_SETF(r, f);
1515 			ret = p;
1516 			goto done;
1517 		}
1518 	}
1519 	if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1520 		if (mopts.malloc_junk == 2 && newsz > 0) {
1521 			size_t usable_oldsz = oldsz;
1522 			if (oldsz <= MALLOC_MAXCHUNK)
1523 				usable_oldsz -= mopts.malloc_canaries;
1524 			if (newsz < usable_oldsz)
1525 				memset((char *)p + newsz, SOME_JUNK, usable_oldsz - newsz);
1526 		}
1527 		STATS_SETF(r, f);
1528 		ret = p;
1529 	} else if (newsz != oldsz || mopts.malloc_realloc) {
1530 		q = omalloc(pool, newsz, 0, f);
1531 		if (q == NULL) {
1532 			ret = NULL;
1533 			goto done;
1534 		}
1535 		if (newsz != 0 && oldsz != 0) {
1536 			size_t copysz = oldsz < newsz ? oldsz : newsz;
1537 			if (copysz <= MALLOC_MAXCHUNK)
1538 				copysz -= mopts.malloc_canaries;
1539 			memcpy(q, p, copysz);
1540 		}
1541 		ofree(pool, p);
1542 		ret = q;
1543 	} else {
1544 		STATS_SETF(r, f);
1545 		ret = p;
1546 	}
1547 done:
1548 	if (argpool != pool) {
1549 		pool->active--;
1550 		_MALLOC_UNLOCK(pool->mutex);
1551 		_MALLOC_LOCK(argpool->mutex);
1552 		argpool->active++;
1553 	}
1554 	return ret;
1555 }
1556 
1557 void *
1558 realloc(void *ptr, size_t size)
1559 {
1560 	struct dir_info *d;
1561 	void *r;
1562 	int saved_errno = errno;
1563 
1564 	d = getpool();
1565 	if (d == NULL) {
1566 		_malloc_init(0);
1567 		d = getpool();
1568 	}
1569 	_MALLOC_LOCK(d->mutex);
1570 	d->func = "realloc():";
1571 	if (d->active++) {
1572 		malloc_recurse(d);
1573 		return NULL;
1574 	}
1575 	if (size > 0 && size <= MALLOC_MAXCHUNK)
1576 		size += mopts.malloc_canaries;
1577 	r = orealloc(d, ptr, size, CALLER);
1578 
1579 	d->active--;
1580 	_MALLOC_UNLOCK(d->mutex);
1581 	if (r == NULL && mopts.malloc_xmalloc)
1582 		wrterror(d, "out of memory", NULL);
1583 	if (r != NULL)
1584 		errno = saved_errno;
1585 	return r;
1586 }
1587 /*DEF_STRONG(realloc);*/
1588 
1589 
1590 /*
1591  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1592  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1593  */
1594 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1595 
1596 void *
1597 calloc(size_t nmemb, size_t size)
1598 {
1599 	struct dir_info *d;
1600 	void *r;
1601 	int saved_errno = errno;
1602 
1603 	d = getpool();
1604 	if (d == NULL) {
1605 		_malloc_init(0);
1606 		d = getpool();
1607 	}
1608 	_MALLOC_LOCK(d->mutex);
1609 	d->func = "calloc():";
1610 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1611 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1612 		_MALLOC_UNLOCK(d->mutex);
1613 		if (mopts.malloc_xmalloc)
1614 			wrterror(d, "out of memory", NULL);
1615 		errno = ENOMEM;
1616 		return NULL;
1617 	}
1618 
1619 	if (d->active++) {
1620 		malloc_recurse(d);
1621 		return NULL;
1622 	}
1623 
1624 	size *= nmemb;
1625 	if (size > 0 && size <= MALLOC_MAXCHUNK)
1626 		size += mopts.malloc_canaries;
1627 	r = omalloc(d, size, 1, CALLER);
1628 
1629 	d->active--;
1630 	_MALLOC_UNLOCK(d->mutex);
1631 	if (r == NULL && mopts.malloc_xmalloc)
1632 		wrterror(d, "out of memory", NULL);
1633 	if (r != NULL)
1634 		errno = saved_errno;
1635 	return r;
1636 }
1637 /*DEF_STRONG(calloc);*/
1638 
1639 static void *
1640 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1641 {
1642 	char *p, *q;
1643 
1644 	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1645 		wrterror(d, "mapalign bad alignment", NULL);
1646 	if (sz != PAGEROUND(sz))
1647 		wrterror(d, "mapalign round", NULL);
1648 
1649 	/* Allocate sz + alignment bytes of memory, which must include a
1650 	 * subrange of size bytes that is properly aligned.  Unmap the
1651 	 * other bytes, and then return that subrange.
1652 	 */
1653 
1654 	/* We need sz + alignment to fit into a size_t. */
1655 	if (alignment > SIZE_MAX - sz)
1656 		return MAP_FAILED;
1657 
1658 	p = map(d, NULL, sz + alignment, zero_fill);
1659 	if (p == MAP_FAILED)
1660 		return MAP_FAILED;
1661 	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1662 	if (q != p) {
1663 		if (munmap(p, q - p))
1664 			wrterror(d, "munmap", p);
1665 	}
1666 	if (munmap(q + sz, alignment - (q - p)))
1667 		wrterror(d, "munmap", q + sz);
1668 	STATS_SUB(d->malloc_used, alignment);
1669 
1670 	return q;
1671 }
1672 
1673 static void *
1674 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, void *f)
1675 {
1676 	size_t psz;
1677 	void *p;
1678 
1679 	if (alignment <= MALLOC_PAGESIZE) {
1680 		/*
1681 		 * max(size, alignment) is enough to assure the requested alignment,
1682 		 * since the allocator always allocates power-of-two blocks.
1683 		 */
1684 		if (sz < alignment)
1685 			sz = alignment;
1686 		return omalloc(pool, sz, zero_fill, f);
1687 	}
1688 
1689 	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1690 		errno = ENOMEM;
1691 		return NULL;
1692 	}
1693 
1694 	sz += mopts.malloc_guard;
1695 	psz = PAGEROUND(sz);
1696 
1697 	p = mapalign(pool, alignment, psz, zero_fill);
1698 	if (p == NULL) {
1699 		errno = ENOMEM;
1700 		return NULL;
1701 	}
1702 
1703 	if (insert(pool, p, sz, f)) {
1704 		unmap(pool, p, psz);
1705 		errno = ENOMEM;
1706 		return NULL;
1707 	}
1708 
1709 	if (mopts.malloc_guard) {
1710 		if (mprotect((char *)p + psz - mopts.malloc_guard,
1711 		    mopts.malloc_guard, PROT_NONE))
1712 			wrterror(pool, "mprotect", NULL);
1713 		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1714 	}
1715 
1716 	if (mopts.malloc_junk == 2) {
1717 		if (zero_fill)
1718 			memset((char *)p + sz - mopts.malloc_guard,
1719 			    SOME_JUNK, psz - sz);
1720 		else
1721 			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1722 	}
1723 
1724 	return p;
1725 }
1726 
1727 int
1728 posix_memalign(void **memptr, size_t alignment, size_t size)
1729 {
1730 	struct dir_info *d;
1731 	int res, saved_errno = errno;
1732 	void *r;
1733 
1734 	/* Make sure that alignment is a large enough power of 2. */
1735 	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
1736 		return EINVAL;
1737 
1738 	d = getpool();
1739 	if (d == NULL) {
1740 		_malloc_init(0);
1741 		d = getpool();
1742 	}
1743 	_MALLOC_LOCK(d->mutex);
1744 	d->func = "posix_memalign():";
1745 	if (d->active++) {
1746 		malloc_recurse(d);
1747 		goto err;
1748 	}
1749 	if (size > 0 && size <= MALLOC_MAXCHUNK)
1750 		size += mopts.malloc_canaries;
1751 	r = omemalign(d, alignment, size, 0, CALLER);
1752 	d->active--;
1753 	_MALLOC_UNLOCK(d->mutex);
1754 	if (r == NULL) {
1755 		if (mopts.malloc_xmalloc)
1756 			wrterror(d, "out of memory", NULL);
1757 		goto err;
1758 	}
1759 	errno = saved_errno;
1760 	*memptr = r;
1761 	return 0;
1762 
1763 err:
1764 	res = errno;
1765 	errno = saved_errno;
1766 	return res;
1767 }
1768 /*DEF_STRONG(posix_memalign);*/
1769 
1770 #ifdef MALLOC_STATS
1771 
1772 struct malloc_leak {
1773 	void (*f)();
1774 	size_t total_size;
1775 	int count;
1776 };
1777 
1778 struct leaknode {
1779 	RB_ENTRY(leaknode) entry;
1780 	struct malloc_leak d;
1781 };
1782 
1783 static int
1784 leakcmp(struct leaknode *e1, struct leaknode *e2)
1785 {
1786 	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
1787 }
1788 
1789 static RB_HEAD(leaktree, leaknode) leakhead;
1790 RB_GENERATE_STATIC(leaktree, leaknode, entry, leakcmp)
1791 
1792 static void
1793 putleakinfo(void *f, size_t sz, int cnt)
1794 {
1795 	struct leaknode key, *p;
1796 	static struct leaknode *page;
1797 	static int used;
1798 
1799 	if (cnt == 0 || page == MAP_FAILED)
1800 		return;
1801 
1802 	key.d.f = f;
1803 	p = RB_FIND(leaktree, &leakhead, &key);
1804 	if (p == NULL) {
1805 		if (page == NULL ||
1806 		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
1807 			page = MMAP(MALLOC_PAGESIZE);
1808 			if (page == MAP_FAILED)
1809 				return;
1810 			used = 0;
1811 		}
1812 		p = &page[used++];
1813 		p->d.f = f;
1814 		p->d.total_size = sz * cnt;
1815 		p->d.count = cnt;
1816 		RB_INSERT(leaktree, &leakhead, p);
1817 	} else {
1818 		p->d.total_size += sz * cnt;
1819 		p->d.count += cnt;
1820 	}
1821 }
1822 
1823 static struct malloc_leak *malloc_leaks;
1824 
1825 static void
1826 writestr(int fd, const char *p)
1827 {
1828 	write(fd, p, strlen(p));
1829 }
1830 
1831 static void
1832 dump_leaks(int fd)
1833 {
1834 	struct leaknode *p;
1835 	char buf[64];
1836 	int i = 0;
1837 
1838 	writestr(fd, "Leak report\n");
1839 	writestr(fd, "                 f     sum      #    avg\n");
1840 	/* XXX only one page of summary */
1841 	if (malloc_leaks == NULL)
1842 		malloc_leaks = MMAP(MALLOC_PAGESIZE);
1843 	if (malloc_leaks != MAP_FAILED)
1844 		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
1845 	RB_FOREACH(p, leaktree, &leakhead) {
1846 		snprintf(buf, sizeof(buf), "%18p %7zu %6u %6zu\n", p->d.f,
1847 		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
1848 		write(fd, buf, strlen(buf));
1849 		if (malloc_leaks == MAP_FAILED ||
1850 		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
1851 			continue;
1852 		malloc_leaks[i].f = p->d.f;
1853 		malloc_leaks[i].total_size = p->d.total_size;
1854 		malloc_leaks[i].count = p->d.count;
1855 		i++;
1856 	}
1857 }
1858 
1859 static void
1860 dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
1861 {
1862 	char buf[64];
1863 
1864 	while (p != NULL) {
1865 		snprintf(buf, sizeof(buf), "chunk %18p %18p %4d %d/%d\n",
1866 		    p->page, ((p->bits[0] & 1) ? NULL : f),
1867 		    p->size, p->free, p->total);
1868 		write(fd, buf, strlen(buf));
1869 		if (!fromfreelist) {
1870 			if (p->bits[0] & 1)
1871 				putleakinfo(NULL, p->size, p->total - p->free);
1872 			else {
1873 				putleakinfo(f, p->size, 1);
1874 				putleakinfo(NULL, p->size,
1875 				    p->total - p->free - 1);
1876 			}
1877 			break;
1878 		}
1879 		p = LIST_NEXT(p, entries);
1880 		if (p != NULL)
1881 			writestr(fd, "        ");
1882 	}
1883 }
1884 
1885 static void
1886 dump_free_chunk_info(int fd, struct dir_info *d)
1887 {
1888 	char buf[64];
1889 	int i, j, count;
1890 	struct chunk_info *p;
1891 
1892 	writestr(fd, "Free chunk structs:\n");
1893 	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
1894 		count = 0;
1895 		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
1896 			count++;
1897 		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
1898 			p = LIST_FIRST(&d->chunk_dir[i][j]);
1899 			if (p == NULL && count == 0)
1900 				continue;
1901 			snprintf(buf, sizeof(buf), "%2d) %3d ", i, count);
1902 			write(fd, buf, strlen(buf));
1903 			if (p != NULL)
1904 				dump_chunk(fd, p, NULL, 1);
1905 			else
1906 				write(fd, "\n", 1);
1907 		}
1908 	}
1909 
1910 }
1911 
1912 static void
1913 dump_free_page_info(int fd, struct dir_info *d)
1914 {
1915 	char buf[64];
1916 	int i;
1917 
1918 	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
1919 	    d->free_regions_size);
1920 	write(fd, buf, strlen(buf));
1921 	for (i = 0; i < mopts.malloc_cache; i++) {
1922 		if (d->free_regions[i].p != NULL) {
1923 			snprintf(buf, sizeof(buf), "%2d) ", i);
1924 			write(fd, buf, strlen(buf));
1925 			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
1926 			    d->free_regions[i].p, d->free_regions[i].size);
1927 			write(fd, buf, strlen(buf));
1928 		}
1929 	}
1930 }
1931 
1932 static void
1933 malloc_dump1(int fd, struct dir_info *d)
1934 {
1935 	char buf[100];
1936 	size_t i, realsize;
1937 
1938 	snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
1939 	write(fd, buf, strlen(buf));
1940 	if (d == NULL)
1941 		return;
1942 	snprintf(buf, sizeof(buf), "Region slots free %zu/%zu\n",
1943 		d->regions_free, d->regions_total);
1944 	write(fd, buf, strlen(buf));
1945 	snprintf(buf, sizeof(buf), "Finds %zu/%zu\n", d->finds,
1946 	    d->find_collisions);
1947 	write(fd, buf, strlen(buf));
1948 	snprintf(buf, sizeof(buf), "Inserts %zu/%zu\n", d->inserts,
1949 	    d->insert_collisions);
1950 	write(fd, buf, strlen(buf));
1951 	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
1952 	    d->delete_moves);
1953 	write(fd, buf, strlen(buf));
1954 	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
1955 	    d->cheap_reallocs, d->cheap_realloc_tries);
1956 	write(fd, buf, strlen(buf));
1957 	dump_free_chunk_info(fd, d);
1958 	dump_free_page_info(fd, d);
1959 	writestr(fd,
1960 	    "slot)  hash d  type               page                  f size [free/n]\n");
1961 	for (i = 0; i < d->regions_total; i++) {
1962 		if (d->r[i].p != NULL) {
1963 			size_t h = hash(d->r[i].p) &
1964 			    (d->regions_total - 1);
1965 			snprintf(buf, sizeof(buf), "%4zx) #%4zx %zd ",
1966 			    i, h, h - i);
1967 			write(fd, buf, strlen(buf));
1968 			REALSIZE(realsize, &d->r[i]);
1969 			if (realsize > MALLOC_MAXCHUNK) {
1970 				putleakinfo(d->r[i].f, realsize, 1);
1971 				snprintf(buf, sizeof(buf),
1972 				    "pages %12p %12p %zu\n", d->r[i].p,
1973 				    d->r[i].f, realsize);
1974 				write(fd, buf, strlen(buf));
1975 			} else
1976 				dump_chunk(fd,
1977 				    (struct chunk_info *)d->r[i].size,
1978 				    d->r[i].f, 0);
1979 		}
1980 	}
1981 	snprintf(buf, sizeof(buf), "In use %zu\n", d->malloc_used);
1982 	write(fd, buf, strlen(buf));
1983 	snprintf(buf, sizeof(buf), "Guarded %zu\n", d->malloc_guarded);
1984 	write(fd, buf, strlen(buf));
1985 	dump_leaks(fd);
1986 	write(fd, "\n", 1);
1987 }
1988 
1989 void
1990 malloc_dump(int fd, struct dir_info *pool)
1991 {
1992 	int i;
1993 	void *p;
1994 	struct region_info *r;
1995 	int saved_errno = errno;
1996 
1997 	if (pool == NULL)
1998 		return;
1999 	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2000 		p = pool->delayed_chunks[i];
2001 		if (p == NULL)
2002 			continue;
2003 		r = find(pool, p);
2004 		if (r == NULL)
2005 			wrterror(pool, "bogus pointer in malloc_dump", p);
2006 		free_bytes(pool, r, p);
2007 		pool->delayed_chunks[i] = NULL;
2008 	}
2009 	/* XXX leak when run multiple times */
2010 	RB_INIT(&leakhead);
2011 	malloc_dump1(fd, pool);
2012 	errno = saved_errno;
2013 }
2014 DEF_WEAK(malloc_dump);
2015 
2016 static void
2017 malloc_exit(void)
2018 {
2019 	static const char q[] = "malloc() warning: Couldn't dump stats\n";
2020 	int save_errno = errno, fd, i;
2021 
2022 	fd = open("malloc.out", O_RDWR|O_APPEND);
2023 	if (fd != -1) {
2024 		for (i = 0; i < _MALLOC_MUTEXES; i++)
2025 			malloc_dump(fd, mopts.malloc_pool[i]);
2026 		close(fd);
2027 	} else
2028 		write(STDERR_FILENO, q, sizeof(q) - 1);
2029 	errno = save_errno;
2030 }
2031 
2032 #endif /* MALLOC_STATS */
2033