1 /* $OpenBSD: malloc.c,v 1.297 2024/09/20 02:00:46 jsg Exp $ */
2 /*
3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /*
22 * If we meet some day, and you think this stuff is worth it, you
23 * can buy me a beer in return. Poul-Henning Kamp
24 */
25
26 #ifndef MALLOC_SMALL
27 #define MALLOC_STATS
28 #endif
29
30 #include <sys/types.h>
31 #include <sys/queue.h>
32 #include <sys/mman.h>
33 #include <sys/sysctl.h>
34 #include <uvm/uvmexp.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <unistd.h>
42
43 #ifdef MALLOC_STATS
44 #include <sys/tree.h>
45 #include <sys/ktrace.h>
46 #include <dlfcn.h>
47 #endif
48
49 #include "thread_private.h"
50 #include <tib.h>
51
52 #define MALLOC_PAGESHIFT _MAX_PAGE_SHIFT
53
54 #define MALLOC_MINSHIFT 4
55 #define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1)
56 #define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT)
57 #define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT)
58 #define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1)
59 #define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
60
61 #define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT)
62 #define MALLOC_MAXCACHE 256
63 #define MALLOC_DELAYED_CHUNK_MASK 15
64 #ifdef MALLOC_STATS
65 #define MALLOC_INITIAL_REGIONS 512
66 #else
67 #define MALLOC_INITIAL_REGIONS (MALLOC_PAGESIZE / sizeof(struct region_info))
68 #endif
69 #define MALLOC_DEFAULT_CACHE 64
70 #define MALLOC_CHUNK_LISTS 4
71 #define CHUNK_CHECK_LENGTH 32
72
73 #define B2SIZE(b) ((b) * MALLOC_MINSIZE)
74 #define B2ALLOC(b) ((b) == 0 ? MALLOC_MINSIZE : \
75 (b) * MALLOC_MINSIZE)
76 #define BUCKETS (MALLOC_MAXCHUNK / MALLOC_MINSIZE)
77
78 /*
79 * We move allocations between half a page and a whole page towards the end,
80 * subject to alignment constraints. This is the extra headroom we allow.
81 * Set to zero to be the most strict.
82 */
83 #define MALLOC_LEEWAY 0
84 #define MALLOC_MOVE_COND(sz) ((sz) - mopts.malloc_guard < \
85 MALLOC_PAGESIZE - MALLOC_LEEWAY)
86 #define MALLOC_MOVE(p, sz) (((char *)(p)) + \
87 ((MALLOC_PAGESIZE - MALLOC_LEEWAY - \
88 ((sz) - mopts.malloc_guard)) & \
89 ~(MALLOC_MINSIZE - 1)))
90
91 #define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
92
93 /*
94 * What to use for Junk. This is the byte value we use to fill with
95 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
96 * and SOME_FREEJUNK right before free.
97 */
98 #define SOME_JUNK 0xdb /* deadbeef */
99 #define SOME_FREEJUNK 0xdf /* dead, free */
100 #define SOME_FREEJUNK_ULL 0xdfdfdfdfdfdfdfdfULL
101
102 #define MMAP(sz,f) mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
103 MAP_ANON | MAP_PRIVATE | (f), -1, 0)
104
105 #define MMAPNONE(sz,f) mmap(NULL, (sz), PROT_NONE, \
106 MAP_ANON | MAP_PRIVATE | (f), -1, 0)
107
108 #define MMAPA(a,sz,f) mmap((a), (sz), PROT_READ | PROT_WRITE, \
109 MAP_ANON | MAP_PRIVATE | (f), -1, 0)
110
111 struct region_info {
112 void *p; /* page; low bits used to mark chunks */
113 uintptr_t size; /* size for pages, or chunk_info pointer */
114 #ifdef MALLOC_STATS
115 void **f; /* where allocated from */
116 #endif
117 };
118
119 LIST_HEAD(chunk_head, chunk_info);
120
121 /*
122 * Two caches, one for "small" regions, one for "big".
123 * Small cache is an array per size, big cache is one array with different
124 * sized regions
125 */
126 #define MAX_SMALLCACHEABLE_SIZE 32
127 #define MAX_BIGCACHEABLE_SIZE 512
128 /* If the total # of pages is larger than this, evict before inserting */
129 #define BIGCACHE_FILL(sz) (MAX_BIGCACHEABLE_SIZE * (sz) / 4)
130
131 struct smallcache {
132 void **pages;
133 ushort length;
134 ushort max;
135 };
136
137 struct bigcache {
138 void *page;
139 size_t psize;
140 };
141
142 #ifdef MALLOC_STATS
143 #define NUM_FRAMES 4
144 struct btnode {
145 RBT_ENTRY(btnode) entry;
146 void *caller[NUM_FRAMES];
147 };
148 RBT_HEAD(btshead, btnode);
149 RBT_PROTOTYPE(btshead, btnode, entry, btcmp);
150 #endif /* MALLOC_STATS */
151
152 struct dir_info {
153 u_int32_t canary1;
154 int active; /* status of malloc */
155 struct region_info *r; /* region slots */
156 size_t regions_total; /* number of region slots */
157 size_t regions_free; /* number of free slots */
158 size_t rbytesused; /* random bytes used */
159 const char *func; /* current function */
160 int malloc_junk; /* junk fill? */
161 int mmap_flag; /* extra flag for mmap */
162 int mutex;
163 int malloc_mt; /* multi-threaded mode? */
164 /* lists of free chunk info structs */
165 struct chunk_head chunk_info_list[BUCKETS + 1];
166 /* lists of chunks with free slots */
167 struct chunk_head chunk_dir[BUCKETS + 1][MALLOC_CHUNK_LISTS];
168 /* delayed free chunk slots */
169 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
170 u_char rbytes[32]; /* random bytes */
171 /* free pages cache */
172 struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE];
173 size_t bigcache_used;
174 size_t bigcache_size;
175 struct bigcache *bigcache;
176 void *chunk_pages;
177 size_t chunk_pages_used;
178 #ifdef MALLOC_STATS
179 void *caller;
180 size_t inserts;
181 size_t insert_collisions;
182 size_t deletes;
183 size_t delete_moves;
184 size_t cheap_realloc_tries;
185 size_t cheap_reallocs;
186 size_t malloc_used; /* bytes allocated */
187 size_t malloc_guarded; /* bytes used for guards */
188 struct btshead btraces; /* backtraces seen */
189 struct btnode *btnodes; /* store of backtrace nodes */
190 size_t btnodesused;
191 #define STATS_ADD(x,y) ((x) += (y))
192 #define STATS_SUB(x,y) ((x) -= (y))
193 #define STATS_INC(x) ((x)++)
194 #define STATS_ZERO(x) ((x) = 0)
195 #define STATS_SETF(x,y) ((x)->f = (y))
196 #define STATS_SETFN(x,k,y) ((x)->f[k] = (y))
197 #define SET_CALLER(x,y) if (DO_STATS) ((x)->caller = (y))
198 #else
199 #define STATS_ADD(x,y) /* nothing */
200 #define STATS_SUB(x,y) /* nothing */
201 #define STATS_INC(x) /* nothing */
202 #define STATS_ZERO(x) /* nothing */
203 #define STATS_SETF(x,y) /* nothing */
204 #define STATS_SETFN(x,k,y) /* nothing */
205 #define SET_CALLER(x,y) /* nothing */
206 #endif /* MALLOC_STATS */
207 u_int32_t canary2;
208 };
209
210 static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
211
212 /*
213 * This structure describes a page worth of chunks.
214 *
215 * How many bits per u_short in the bitmap
216 */
217 #define MALLOC_BITS (NBBY * sizeof(u_short))
218 struct chunk_info {
219 LIST_ENTRY(chunk_info) entries;
220 void *page; /* pointer to the page */
221 /* number of shorts should add up to 8, check alloc_chunk_info() */
222 u_short canary;
223 u_short bucket;
224 u_short free; /* how many free chunks */
225 u_short total; /* how many chunks */
226 u_short offset; /* requested size table offset */
227 #define CHUNK_INFO_TAIL 3
228 u_short bits[CHUNK_INFO_TAIL]; /* which chunks are free */
229 };
230
231 #define CHUNK_FREE(i, n) ((i)->bits[(n) / MALLOC_BITS] & \
232 (1U << ((n) % MALLOC_BITS)))
233
234 struct malloc_readonly {
235 /* Main bookkeeping information */
236 struct dir_info *malloc_pool[_MALLOC_MUTEXES];
237 u_int malloc_mutexes; /* how much in actual use? */
238 int malloc_freecheck; /* Extensive double free check */
239 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */
240 int def_malloc_junk; /* junk fill? */
241 int malloc_realloc; /* always realloc? */
242 int malloc_xmalloc; /* xmalloc behaviour? */
243 u_int chunk_canaries; /* use canaries after chunks? */
244 int internal_funcs; /* use better recallocarray/freezero? */
245 u_int def_maxcache; /* free pages we cache */
246 u_int junk_loc; /* variation in location of junk */
247 size_t malloc_guard; /* use guard pages after allocations? */
248 #ifdef MALLOC_STATS
249 int malloc_stats; /* save callers, dump leak report */
250 int malloc_verbose; /* dump verbose statistics at end */
251 #define DO_STATS mopts.malloc_stats
252 #else
253 #define DO_STATS 0
254 #endif
255 u_int32_t malloc_canary; /* Matched against ones in pool */
256 };
257
258
259 /* This object is mapped PROT_READ after initialisation to prevent tampering */
260 static union {
261 struct malloc_readonly mopts;
262 u_char _pad[MALLOC_PAGESIZE];
263 } malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)))
264 __attribute__((section(".openbsd.mutable")));
265 #define mopts malloc_readonly.mopts
266
267 char *malloc_options; /* compile-time options */
268
269 static __dead void wrterror(struct dir_info *d, char *msg, ...)
270 __attribute__((__format__ (printf, 2, 3)));
271
272 #ifdef MALLOC_STATS
273 void malloc_dump(void);
274 PROTO_NORMAL(malloc_dump);
275 static void malloc_exit(void);
276 static void print_chunk_details(struct dir_info *, void *, size_t, size_t);
277 static void* store_caller(struct dir_info *, struct btnode *);
278
279 /* below are the arches for which deeper caller info has been tested */
280 #if defined(__aarch64__) || \
281 defined(__amd64__) || \
282 defined(__arm__) || \
283 defined(__i386__) || \
284 defined(__powerpc__)
285 __attribute__((always_inline))
286 static inline void*
caller(struct dir_info * d)287 caller(struct dir_info *d)
288 {
289 struct btnode p;
290 int level = DO_STATS;
291
292 if (level == 0)
293 return NULL;
294
295 memset(&p.caller, 0, sizeof(p.caller));
296 if (level >= 1)
297 p.caller[0] = __builtin_extract_return_addr(
298 __builtin_return_address(0));
299 if (p.caller[0] != NULL && level >= 2)
300 p.caller[1] = __builtin_extract_return_addr(
301 __builtin_return_address(1));
302 if (p.caller[1] != NULL && level >= 3)
303 p.caller[2] = __builtin_extract_return_addr(
304 __builtin_return_address(2));
305 if (p.caller[2] != NULL && level >= 4)
306 p.caller[3] = __builtin_extract_return_addr(
307 __builtin_return_address(3));
308 return store_caller(d, &p);
309 }
310 #else
311 __attribute__((always_inline))
caller(struct dir_info * d)312 static inline void* caller(struct dir_info *d)
313 {
314 struct btnode p;
315
316 if (DO_STATS == 0)
317 return NULL;
318 memset(&p.caller, 0, sizeof(p.caller));
319 p.caller[0] = __builtin_extract_return_addr(__builtin_return_address(0));
320 return store_caller(d, &p);
321 }
322 #endif
323 #endif /* MALLOC_STATS */
324
325 /* low bits of r->p determine size: 0 means >= page size and r->size holding
326 * real size, otherwise low bits is the bucket + 1
327 */
328 #define REALSIZE(sz, r) \
329 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \
330 (sz) = ((sz) == 0 ? (r)->size : B2SIZE((sz) - 1))
331
332 static inline size_t
hash(void * p)333 hash(void *p)
334 {
335 size_t sum;
336 uintptr_t u;
337
338 u = (uintptr_t)p >> MALLOC_PAGESHIFT;
339 sum = u;
340 sum = (sum << 7) - sum + (u >> 16);
341 #ifdef __LP64__
342 sum = (sum << 7) - sum + (u >> 32);
343 sum = (sum << 7) - sum + (u >> 48);
344 #endif
345 return sum;
346 }
347
348 static inline struct dir_info *
getpool(void)349 getpool(void)
350 {
351 if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt)
352 return mopts.malloc_pool[1];
353 else /* first one reserved for special pool */
354 return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
355 (mopts.malloc_mutexes - 1)];
356 }
357
358 static __dead void
wrterror(struct dir_info * d,char * msg,...)359 wrterror(struct dir_info *d, char *msg, ...)
360 {
361 int saved_errno = errno;
362 va_list ap;
363
364 dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
365 getpid(), (d != NULL && d->func) ? d->func : "unknown");
366 va_start(ap, msg);
367 vdprintf(STDERR_FILENO, msg, ap);
368 va_end(ap);
369 dprintf(STDERR_FILENO, "\n");
370
371 #ifdef MALLOC_STATS
372 if (DO_STATS && mopts.malloc_verbose)
373 malloc_dump();
374 #endif
375
376 errno = saved_errno;
377
378 abort();
379 }
380
381 static void
rbytes_init(struct dir_info * d)382 rbytes_init(struct dir_info *d)
383 {
384 arc4random_buf(d->rbytes, sizeof(d->rbytes));
385 /* add 1 to account for using d->rbytes[0] */
386 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
387 }
388
389 static inline u_char
getrbyte(struct dir_info * d)390 getrbyte(struct dir_info *d)
391 {
392 u_char x;
393
394 if (d->rbytesused >= sizeof(d->rbytes))
395 rbytes_init(d);
396 x = d->rbytes[d->rbytesused++];
397 return x;
398 }
399
400 static void
omalloc_parseopt(char opt)401 omalloc_parseopt(char opt)
402 {
403 switch (opt) {
404 case '+':
405 mopts.malloc_mutexes <<= 1;
406 if (mopts.malloc_mutexes > _MALLOC_MUTEXES)
407 mopts.malloc_mutexes = _MALLOC_MUTEXES;
408 break;
409 case '-':
410 mopts.malloc_mutexes >>= 1;
411 if (mopts.malloc_mutexes < 2)
412 mopts.malloc_mutexes = 2;
413 break;
414 case '>':
415 mopts.def_maxcache <<= 1;
416 if (mopts.def_maxcache > MALLOC_MAXCACHE)
417 mopts.def_maxcache = MALLOC_MAXCACHE;
418 break;
419 case '<':
420 mopts.def_maxcache >>= 1;
421 break;
422 case 'c':
423 mopts.chunk_canaries = 0;
424 break;
425 case 'C':
426 mopts.chunk_canaries = 1;
427 break;
428 #ifdef MALLOC_STATS
429 case 'd':
430 mopts.malloc_stats = 0;
431 break;
432 case 'D':
433 case '1':
434 mopts.malloc_stats = 1;
435 break;
436 case '2':
437 mopts.malloc_stats = 2;
438 break;
439 case '3':
440 mopts.malloc_stats = 3;
441 break;
442 case '4':
443 mopts.malloc_stats = 4;
444 break;
445 #endif /* MALLOC_STATS */
446 case 'f':
447 mopts.malloc_freecheck = 0;
448 mopts.malloc_freeunmap = 0;
449 break;
450 case 'F':
451 mopts.malloc_freecheck = 1;
452 mopts.malloc_freeunmap = 1;
453 break;
454 case 'g':
455 mopts.malloc_guard = 0;
456 break;
457 case 'G':
458 mopts.malloc_guard = MALLOC_PAGESIZE;
459 break;
460 case 'j':
461 if (mopts.def_malloc_junk > 0)
462 mopts.def_malloc_junk--;
463 break;
464 case 'J':
465 if (mopts.def_malloc_junk < 2)
466 mopts.def_malloc_junk++;
467 break;
468 case 'r':
469 mopts.malloc_realloc = 0;
470 break;
471 case 'R':
472 mopts.malloc_realloc = 1;
473 break;
474 case 'u':
475 mopts.malloc_freeunmap = 0;
476 break;
477 case 'U':
478 mopts.malloc_freeunmap = 1;
479 break;
480 #ifdef MALLOC_STATS
481 case 'v':
482 mopts.malloc_verbose = 0;
483 break;
484 case 'V':
485 mopts.malloc_verbose = 1;
486 break;
487 #endif /* MALLOC_STATS */
488 case 'x':
489 mopts.malloc_xmalloc = 0;
490 break;
491 case 'X':
492 mopts.malloc_xmalloc = 1;
493 break;
494 default:
495 dprintf(STDERR_FILENO, "malloc() warning: "
496 "unknown char in MALLOC_OPTIONS\n");
497 break;
498 }
499 }
500
501 static void
omalloc_init(void)502 omalloc_init(void)
503 {
504 char *p, *q, b[16];
505 int i, j;
506 const int mib[2] = { CTL_VM, VM_MALLOC_CONF };
507 size_t sb;
508
509 /*
510 * Default options
511 */
512 mopts.malloc_mutexes = 8;
513 mopts.def_malloc_junk = 1;
514 mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
515
516 for (i = 0; i < 3; i++) {
517 switch (i) {
518 case 0:
519 sb = sizeof(b);
520 j = sysctl(mib, 2, b, &sb, NULL, 0);
521 if (j != 0)
522 continue;
523 p = b;
524 break;
525 case 1:
526 if (issetugid() == 0)
527 p = getenv("MALLOC_OPTIONS");
528 else
529 continue;
530 break;
531 case 2:
532 p = malloc_options;
533 break;
534 default:
535 p = NULL;
536 }
537
538 for (; p != NULL && *p != '\0'; p++) {
539 switch (*p) {
540 case 'S':
541 for (q = "CFGJ"; *q != '\0'; q++)
542 omalloc_parseopt(*q);
543 mopts.def_maxcache = 0;
544 break;
545 case 's':
546 for (q = "cfgj"; *q != '\0'; q++)
547 omalloc_parseopt(*q);
548 mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
549 break;
550 default:
551 omalloc_parseopt(*p);
552 break;
553 }
554 }
555 }
556
557 #ifdef MALLOC_STATS
558 if (DO_STATS && (atexit(malloc_exit) == -1)) {
559 dprintf(STDERR_FILENO, "malloc() warning: atexit(3) failed."
560 " Will not be able to dump stats on exit\n");
561 }
562 #endif
563
564 while ((mopts.malloc_canary = arc4random()) == 0)
565 ;
566 mopts.junk_loc = arc4random();
567 if (mopts.chunk_canaries)
568 do {
569 mopts.chunk_canaries = arc4random();
570 } while ((u_char)mopts.chunk_canaries == 0 ||
571 (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
572 }
573
574 static void
omalloc_poolinit(struct dir_info * d,int mmap_flag)575 omalloc_poolinit(struct dir_info *d, int mmap_flag)
576 {
577 u_int i, j;
578
579 d->r = NULL;
580 d->rbytesused = sizeof(d->rbytes);
581 d->regions_free = d->regions_total = 0;
582 for (i = 0; i <= BUCKETS; i++) {
583 LIST_INIT(&d->chunk_info_list[i]);
584 for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
585 LIST_INIT(&d->chunk_dir[i][j]);
586 }
587 d->mmap_flag = mmap_flag;
588 d->malloc_junk = mopts.def_malloc_junk;
589 #ifdef MALLOC_STATS
590 RBT_INIT(btshead, &d->btraces);
591 #endif
592 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
593 d->canary2 = ~d->canary1;
594 }
595
596 static int
omalloc_grow(struct dir_info * d)597 omalloc_grow(struct dir_info *d)
598 {
599 size_t newtotal;
600 size_t newsize;
601 size_t mask;
602 size_t i, oldpsz;
603 struct region_info *p;
604
605 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
606 return 1;
607
608 newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS :
609 d->regions_total * 2;
610 newsize = PAGEROUND(newtotal * sizeof(struct region_info));
611 mask = newtotal - 1;
612
613 /* Don't use cache here, we don't want user uaf touch this */
614 p = MMAP(newsize, d->mmap_flag);
615 if (p == MAP_FAILED)
616 return 1;
617
618 STATS_ADD(d->malloc_used, newsize);
619 STATS_ZERO(d->inserts);
620 STATS_ZERO(d->insert_collisions);
621 for (i = 0; i < d->regions_total; i++) {
622 void *q = d->r[i].p;
623 if (q != NULL) {
624 size_t index = hash(q) & mask;
625 STATS_INC(d->inserts);
626 while (p[index].p != NULL) {
627 index = (index - 1) & mask;
628 STATS_INC(d->insert_collisions);
629 }
630 p[index] = d->r[i];
631 }
632 }
633
634 if (d->regions_total > 0) {
635 oldpsz = PAGEROUND(d->regions_total *
636 sizeof(struct region_info));
637 /* clear to avoid meta info ending up in the cache */
638 unmap(d, d->r, oldpsz, oldpsz);
639 }
640 d->regions_free += newtotal - d->regions_total;
641 d->regions_total = newtotal;
642 d->r = p;
643 return 0;
644 }
645
646 /*
647 * The hashtable uses the assumption that p is never NULL. This holds since
648 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
649 */
650 static int
insert(struct dir_info * d,void * p,size_t sz,void * f)651 insert(struct dir_info *d, void *p, size_t sz, void *f)
652 {
653 size_t index;
654 size_t mask;
655 void *q;
656
657 if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) {
658 if (omalloc_grow(d))
659 return 1;
660 }
661 mask = d->regions_total - 1;
662 index = hash(p) & mask;
663 q = d->r[index].p;
664 STATS_INC(d->inserts);
665 while (q != NULL) {
666 index = (index - 1) & mask;
667 q = d->r[index].p;
668 STATS_INC(d->insert_collisions);
669 }
670 d->r[index].p = p;
671 d->r[index].size = sz;
672 STATS_SETF(&d->r[index], f);
673 d->regions_free--;
674 return 0;
675 }
676
677 static struct region_info *
find(struct dir_info * d,void * p)678 find(struct dir_info *d, void *p)
679 {
680 size_t index;
681 size_t mask = d->regions_total - 1;
682 void *q, *r;
683
684 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
685 d->canary1 != ~d->canary2)
686 wrterror(d, "internal struct corrupt");
687 if (d->r == NULL)
688 return NULL;
689 p = MASK_POINTER(p);
690 index = hash(p) & mask;
691 r = d->r[index].p;
692 q = MASK_POINTER(r);
693 while (q != p && r != NULL) {
694 index = (index - 1) & mask;
695 r = d->r[index].p;
696 q = MASK_POINTER(r);
697 }
698 return (q == p && r != NULL) ? &d->r[index] : NULL;
699 }
700
701 static void
delete(struct dir_info * d,struct region_info * ri)702 delete(struct dir_info *d, struct region_info *ri)
703 {
704 /* algorithm R, Knuth Vol III section 6.4 */
705 size_t mask = d->regions_total - 1;
706 size_t i, j, r;
707
708 if (d->regions_total & (d->regions_total - 1))
709 wrterror(d, "regions_total not 2^x");
710 d->regions_free++;
711 STATS_INC(d->deletes);
712
713 i = ri - d->r;
714 for (;;) {
715 d->r[i].p = NULL;
716 d->r[i].size = 0;
717 j = i;
718 for (;;) {
719 i = (i - 1) & mask;
720 if (d->r[i].p == NULL)
721 return;
722 r = hash(d->r[i].p) & mask;
723 if ((i <= r && r < j) || (r < j && j < i) ||
724 (j < i && i <= r))
725 continue;
726 d->r[j] = d->r[i];
727 STATS_INC(d->delete_moves);
728 break;
729 }
730
731 }
732 }
733
734 static inline void
junk_free(int junk,void * p,size_t sz)735 junk_free(int junk, void *p, size_t sz)
736 {
737 size_t i, step = 1;
738 uint64_t *lp = p;
739
740 if (junk == 0 || sz == 0)
741 return;
742 sz /= sizeof(uint64_t);
743 if (junk == 1) {
744 if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
745 sz = MALLOC_PAGESIZE / sizeof(uint64_t);
746 step = sz / 4;
747 if (step == 0)
748 step = 1;
749 }
750 /* Do not always put the free junk bytes in the same spot.
751 There is modulo bias here, but we ignore that. */
752 for (i = mopts.junk_loc % step; i < sz; i += step)
753 lp[i] = SOME_FREEJUNK_ULL;
754 }
755
756 static inline void
validate_junk(struct dir_info * pool,void * p,size_t argsz)757 validate_junk(struct dir_info *pool, void *p, size_t argsz)
758 {
759 size_t i, sz, step = 1;
760 uint64_t *lp = p;
761
762 if (pool->malloc_junk == 0 || argsz == 0)
763 return;
764 sz = argsz / sizeof(uint64_t);
765 if (pool->malloc_junk == 1) {
766 if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
767 sz = MALLOC_PAGESIZE / sizeof(uint64_t);
768 step = sz / 4;
769 if (step == 0)
770 step = 1;
771 }
772 /* see junk_free */
773 for (i = mopts.junk_loc % step; i < sz; i += step) {
774 if (lp[i] != SOME_FREEJUNK_ULL) {
775 #ifdef MALLOC_STATS
776 if (DO_STATS && argsz <= MALLOC_MAXCHUNK)
777 print_chunk_details(pool, lp, argsz, i);
778 else
779 #endif
780 wrterror(pool,
781 "write to free mem %p[%zu..%zu]@%zu",
782 lp, i * sizeof(uint64_t),
783 (i + 1) * sizeof(uint64_t) - 1, argsz);
784 }
785 }
786 }
787
788
789 /*
790 * Cache maintenance.
791 * Opposed to the regular region data structure, the sizes in the
792 * cache are in MALLOC_PAGESIZE units.
793 */
794 static void
unmap(struct dir_info * d,void * p,size_t sz,size_t clear)795 unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
796 {
797 size_t psz = sz >> MALLOC_PAGESHIFT;
798 void *r;
799 u_short i;
800 struct smallcache *cache;
801
802 if (sz != PAGEROUND(sz) || psz == 0)
803 wrterror(d, "munmap round");
804
805 if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
806 psz <= MAX_BIGCACHEABLE_SIZE) {
807 u_short base = getrbyte(d);
808 u_short j;
809
810 /* don't look through all slots */
811 for (j = 0; j < d->bigcache_size / 4; j++) {
812 i = (base + j) & (d->bigcache_size - 1);
813 if (d->bigcache_used <
814 BIGCACHE_FILL(d->bigcache_size)) {
815 if (d->bigcache[i].psize == 0)
816 break;
817 } else {
818 if (d->bigcache[i].psize != 0)
819 break;
820 }
821 }
822 /* if we didn't find a preferred slot, use random one */
823 if (d->bigcache[i].psize != 0) {
824 size_t tmp;
825
826 r = d->bigcache[i].page;
827 d->bigcache_used -= d->bigcache[i].psize;
828 tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT;
829 if (!mopts.malloc_freeunmap)
830 validate_junk(d, r, tmp);
831 if (munmap(r, tmp))
832 wrterror(d, "munmap %p", r);
833 STATS_SUB(d->malloc_used, tmp);
834 }
835
836 if (clear > 0)
837 explicit_bzero(p, clear);
838 if (mopts.malloc_freeunmap) {
839 if (mprotect(p, sz, PROT_NONE))
840 wrterror(d, "mprotect %p", r);
841 } else
842 junk_free(d->malloc_junk, p, sz);
843 d->bigcache[i].page = p;
844 d->bigcache[i].psize = psz;
845 d->bigcache_used += psz;
846 return;
847 }
848 if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) {
849 if (munmap(p, sz))
850 wrterror(d, "munmap %p", p);
851 STATS_SUB(d->malloc_used, sz);
852 return;
853 }
854 cache = &d->smallcache[psz - 1];
855 if (cache->length == cache->max) {
856 int fresh;
857 /* use a random slot */
858 i = getrbyte(d) & (cache->max - 1);
859 r = cache->pages[i];
860 fresh = (uintptr_t)r & 1;
861 *(uintptr_t*)&r &= ~1UL;
862 if (!fresh && !mopts.malloc_freeunmap)
863 validate_junk(d, r, sz);
864 if (munmap(r, sz))
865 wrterror(d, "munmap %p", r);
866 STATS_SUB(d->malloc_used, sz);
867 cache->length--;
868 } else
869 i = cache->length;
870
871 /* fill slot */
872 if (clear > 0)
873 explicit_bzero(p, clear);
874 if (mopts.malloc_freeunmap)
875 mprotect(p, sz, PROT_NONE);
876 else
877 junk_free(d->malloc_junk, p, sz);
878 cache->pages[i] = p;
879 cache->length++;
880 }
881
882 static void *
map(struct dir_info * d,size_t sz,int zero_fill)883 map(struct dir_info *d, size_t sz, int zero_fill)
884 {
885 size_t psz = sz >> MALLOC_PAGESHIFT;
886 u_short i;
887 void *p;
888 struct smallcache *cache;
889
890 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
891 d->canary1 != ~d->canary2)
892 wrterror(d, "internal struct corrupt");
893 if (sz != PAGEROUND(sz) || psz == 0)
894 wrterror(d, "map round");
895
896
897 if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
898 psz <= MAX_BIGCACHEABLE_SIZE) {
899 size_t base = getrbyte(d);
900 size_t cached = d->bigcache_used;
901 ushort j;
902
903 for (j = 0; j < d->bigcache_size && cached >= psz; j++) {
904 i = (j + base) & (d->bigcache_size - 1);
905 if (d->bigcache[i].psize == psz) {
906 p = d->bigcache[i].page;
907 d->bigcache_used -= psz;
908 d->bigcache[i].page = NULL;
909 d->bigcache[i].psize = 0;
910
911 if (!mopts.malloc_freeunmap)
912 validate_junk(d, p, sz);
913 if (mopts.malloc_freeunmap)
914 mprotect(p, sz, PROT_READ | PROT_WRITE);
915 if (zero_fill)
916 memset(p, 0, sz);
917 else if (mopts.malloc_freeunmap)
918 junk_free(d->malloc_junk, p, sz);
919 return p;
920 }
921 cached -= d->bigcache[i].psize;
922 }
923 }
924 if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) {
925 cache = &d->smallcache[psz - 1];
926 if (cache->length > 0) {
927 int fresh;
928 if (cache->length == 1)
929 p = cache->pages[--cache->length];
930 else {
931 i = getrbyte(d) % cache->length;
932 p = cache->pages[i];
933 cache->pages[i] = cache->pages[--cache->length];
934 }
935 /* check if page was not junked, i.e. "fresh
936 we use the lsb of the pointer for that */
937 fresh = (uintptr_t)p & 1UL;
938 *(uintptr_t*)&p &= ~1UL;
939 if (!fresh && !mopts.malloc_freeunmap)
940 validate_junk(d, p, sz);
941 if (mopts.malloc_freeunmap)
942 mprotect(p, sz, PROT_READ | PROT_WRITE);
943 if (zero_fill)
944 memset(p, 0, sz);
945 else if (mopts.malloc_freeunmap)
946 junk_free(d->malloc_junk, p, sz);
947 return p;
948 }
949 if (psz <= 1) {
950 p = MMAP(cache->max * sz, d->mmap_flag);
951 if (p != MAP_FAILED) {
952 STATS_ADD(d->malloc_used, cache->max * sz);
953 cache->length = cache->max - 1;
954 for (i = 0; i < cache->max - 1; i++) {
955 void *q = (char*)p + i * sz;
956 cache->pages[i] = q;
957 /* mark pointer in slot as not junked */
958 *(uintptr_t*)&cache->pages[i] |= 1UL;
959 }
960 if (mopts.malloc_freeunmap)
961 mprotect(p, (cache->max - 1) * sz,
962 PROT_NONE);
963 p = (char*)p + (cache->max - 1) * sz;
964 /* zero fill not needed, freshly mmapped */
965 return p;
966 }
967 }
968
969 }
970 p = MMAP(sz, d->mmap_flag);
971 if (p != MAP_FAILED)
972 STATS_ADD(d->malloc_used, sz);
973 /* zero fill not needed */
974 return p;
975 }
976
977 static void
init_chunk_info(struct dir_info * d,struct chunk_info * p,u_int bucket)978 init_chunk_info(struct dir_info *d, struct chunk_info *p, u_int bucket)
979 {
980 u_int i;
981
982 p->bucket = bucket;
983 p->total = p->free = MALLOC_PAGESIZE / B2ALLOC(bucket);
984 p->offset = howmany(p->total, MALLOC_BITS);
985 p->canary = (u_short)d->canary1;
986
987 /* set all valid bits in the bitmap */
988 i = p->total - 1;
989 memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
990 p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
991 }
992
993 static struct chunk_info *
alloc_chunk_info(struct dir_info * d,u_int bucket)994 alloc_chunk_info(struct dir_info *d, u_int bucket)
995 {
996 struct chunk_info *p;
997
998 if (LIST_EMPTY(&d->chunk_info_list[bucket])) {
999 const size_t chunk_pages = 64;
1000 size_t size, count, i;
1001 char *q;
1002
1003 count = MALLOC_PAGESIZE / B2ALLOC(bucket);
1004
1005 size = howmany(count, MALLOC_BITS);
1006 /* see declaration of struct chunk_info */
1007 if (size <= CHUNK_INFO_TAIL)
1008 size = 0;
1009 else
1010 size -= CHUNK_INFO_TAIL;
1011 size = sizeof(struct chunk_info) + size * sizeof(u_short);
1012 if (mopts.chunk_canaries && bucket > 0)
1013 size += count * sizeof(u_short);
1014 size = _ALIGN(size);
1015 count = MALLOC_PAGESIZE / size;
1016
1017 /* Don't use cache here, we don't want user uaf touch this */
1018 if (d->chunk_pages_used == chunk_pages ||
1019 d->chunk_pages == NULL) {
1020 q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag);
1021 if (q == MAP_FAILED)
1022 return NULL;
1023 d->chunk_pages = q;
1024 d->chunk_pages_used = 0;
1025 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE *
1026 chunk_pages);
1027 }
1028 q = (char *)d->chunk_pages + d->chunk_pages_used *
1029 MALLOC_PAGESIZE;
1030 d->chunk_pages_used++;
1031
1032 for (i = 0; i < count; i++, q += size) {
1033 p = (struct chunk_info *)q;
1034 LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p,
1035 entries);
1036 }
1037 }
1038 p = LIST_FIRST(&d->chunk_info_list[bucket]);
1039 LIST_REMOVE(p, entries);
1040 if (p->total == 0)
1041 init_chunk_info(d, p, bucket);
1042 return p;
1043 }
1044
1045 /*
1046 * Allocate a page of chunks
1047 */
1048 static struct chunk_info *
omalloc_make_chunks(struct dir_info * d,u_int bucket,u_int listnum)1049 omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
1050 {
1051 struct chunk_info *bp;
1052 void *pp;
1053 void *ff = NULL;
1054
1055 /* Allocate a new bucket */
1056 pp = map(d, MALLOC_PAGESIZE, 0);
1057 if (pp == MAP_FAILED)
1058 return NULL;
1059 if (DO_STATS) {
1060 ff = map(d, MALLOC_PAGESIZE, 0);
1061 if (ff == MAP_FAILED)
1062 goto err;
1063 memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE /
1064 B2ALLOC(bucket));
1065 }
1066
1067 /* memory protect the page allocated in the malloc(0) case */
1068 if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
1069 goto err;
1070
1071 bp = alloc_chunk_info(d, bucket);
1072 if (bp == NULL)
1073 goto err;
1074 bp->page = pp;
1075
1076 if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
1077 ff))
1078 goto err;
1079 LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
1080
1081 if (bucket > 0 && d->malloc_junk != 0)
1082 memset(pp, SOME_FREEJUNK, MALLOC_PAGESIZE);
1083
1084 return bp;
1085
1086 err:
1087 unmap(d, pp, MALLOC_PAGESIZE, 0);
1088 if (ff != NULL && ff != MAP_FAILED)
1089 unmap(d, ff, MALLOC_PAGESIZE, 0);
1090 return NULL;
1091 }
1092
1093 #if defined(__GNUC__) && __GNUC__ < 4
1094 static inline unsigned int
lb(u_int x)1095 lb(u_int x)
1096 {
1097 #if defined(__m88k__)
1098 __asm__ __volatile__ ("ff1 %0, %0" : "=r" (x) : "0" (x));
1099 return x;
1100 #else
1101 /* portable version */
1102 unsigned int count = 0;
1103 while ((x & (1U << (sizeof(int) * CHAR_BIT - 1))) == 0) {
1104 count++;
1105 x <<= 1;
1106 }
1107 return (sizeof(int) * CHAR_BIT - 1) - count;
1108 #endif
1109 }
1110 #else
1111 /* using built-in function version */
1112 static inline unsigned int
lb(u_int x)1113 lb(u_int x)
1114 {
1115 /* I need an extension just for integer-length (: */
1116 return (sizeof(int) * CHAR_BIT - 1) - __builtin_clz(x);
1117 }
1118 #endif
1119
1120 /* https://pvk.ca/Blog/2015/06/27/linear-log-bucketing-fast-versatile-simple/
1121 via Tony Finch */
1122 static inline unsigned int
bin_of(unsigned int size)1123 bin_of(unsigned int size)
1124 {
1125 const unsigned int linear = 6;
1126 const unsigned int subbin = 2;
1127
1128 unsigned int mask, rounded, rounded_size;
1129 unsigned int n_bits, shift;
1130
1131 n_bits = lb(size | (1U << linear));
1132 shift = n_bits - subbin;
1133 mask = (1ULL << shift) - 1;
1134 rounded = size + mask; /* XXX: overflow. */
1135
1136 rounded_size = rounded & ~mask;
1137 return rounded_size;
1138 }
1139
1140 static inline u_short
find_bucket(u_short size)1141 find_bucket(u_short size)
1142 {
1143 /* malloc(0) is special */
1144 if (size == 0)
1145 return 0;
1146 if (size < MALLOC_MINSIZE)
1147 size = MALLOC_MINSIZE;
1148 if (mopts.def_maxcache != 0)
1149 size = bin_of(size);
1150 return howmany(size, MALLOC_MINSIZE);
1151 }
1152
1153 static void
fill_canary(char * ptr,size_t sz,size_t allocated)1154 fill_canary(char *ptr, size_t sz, size_t allocated)
1155 {
1156 size_t check_sz = allocated - sz;
1157
1158 if (check_sz > CHUNK_CHECK_LENGTH)
1159 check_sz = CHUNK_CHECK_LENGTH;
1160 memset(ptr + sz, mopts.chunk_canaries, check_sz);
1161 }
1162
1163 /*
1164 * Allocate a chunk
1165 */
1166 static void *
malloc_bytes(struct dir_info * d,size_t size)1167 malloc_bytes(struct dir_info *d, size_t size)
1168 {
1169 u_int i, j, k, r, bucket, listnum;
1170 u_short *lp;
1171 struct chunk_info *bp;
1172 void *p;
1173
1174 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
1175 d->canary1 != ~d->canary2)
1176 wrterror(d, "internal struct corrupt");
1177
1178 bucket = find_bucket(size);
1179
1180 r = getrbyte(d);
1181 listnum = r % MALLOC_CHUNK_LISTS;
1182
1183 /* If it's empty, make a page more of that size chunks */
1184 if ((bp = LIST_FIRST(&d->chunk_dir[bucket][listnum])) == NULL) {
1185 bp = omalloc_make_chunks(d, bucket, listnum);
1186 if (bp == NULL)
1187 return NULL;
1188 }
1189
1190 if (bp->canary != (u_short)d->canary1 || bucket != bp->bucket)
1191 wrterror(d, "chunk info corrupted");
1192
1193 r /= MALLOC_CHUNK_LISTS;
1194 /* do we need more random bits? */
1195 if (bp->total > 256 / MALLOC_CHUNK_LISTS)
1196 r = r << 8 | getrbyte(d);
1197 /* bias, as bp->total is not a power of 2 */
1198 i = r % bp->total;
1199
1200 j = i % MALLOC_BITS;
1201 i /= MALLOC_BITS;
1202 lp = &bp->bits[i];
1203 /* potentially start somewhere in a short */
1204 if (j > 0 && *lp >> j)
1205 k = ffs(*lp >> j) + j;
1206 else {
1207 /* no bit halfway, go to next full short */
1208 for (;;) {
1209 if (*lp) {
1210 k = ffs(*lp);
1211 break;
1212 }
1213 if (++i >= bp->offset)
1214 i = 0;
1215 lp = &bp->bits[i];
1216 }
1217 }
1218 *lp ^= 1 << --k;
1219
1220 /* If there are no more free, remove from free-list */
1221 if (--bp->free == 0)
1222 LIST_REMOVE(bp, entries);
1223
1224 /* Adjust to the real offset of that chunk */
1225 k += i * MALLOC_BITS;
1226
1227 if (mopts.chunk_canaries && size > 0)
1228 bp->bits[bp->offset + k] = size;
1229
1230 if (DO_STATS) {
1231 struct region_info *r = find(d, bp->page);
1232 STATS_SETFN(r, k, d->caller);
1233 }
1234
1235 p = (char *)bp->page + k * B2ALLOC(bucket);
1236 if (bucket > 0) {
1237 validate_junk(d, p, B2SIZE(bucket));
1238 if (mopts.chunk_canaries)
1239 fill_canary(p, size, B2SIZE(bucket));
1240 }
1241 return p;
1242 }
1243
1244 static void
validate_canary(struct dir_info * d,u_char * ptr,size_t sz,size_t allocated)1245 validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1246 {
1247 size_t check_sz = allocated - sz;
1248 u_char *p, *q;
1249
1250 if (check_sz > CHUNK_CHECK_LENGTH)
1251 check_sz = CHUNK_CHECK_LENGTH;
1252 p = ptr + sz;
1253 q = p + check_sz;
1254
1255 while (p < q) {
1256 if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
1257 wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s",
1258 ptr, p - ptr, sz, allocated,
1259 *p == SOME_FREEJUNK ? " (double free?)" : "");
1260 }
1261 p++;
1262 }
1263 }
1264
1265 static inline uint32_t
find_chunknum(struct dir_info * d,struct chunk_info * info,void * ptr,int check)1266 find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1267 {
1268 uint32_t chunknum;
1269
1270 if (info->canary != (u_short)d->canary1)
1271 wrterror(d, "chunk info corrupted");
1272
1273 /* Find the chunk number on the page */
1274 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) / B2ALLOC(info->bucket);
1275
1276 if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1))
1277 wrterror(d, "modified chunk-pointer %p", ptr);
1278 if (CHUNK_FREE(info, chunknum))
1279 wrterror(d, "double free %p", ptr);
1280 if (check && info->bucket > 0) {
1281 validate_canary(d, ptr, info->bits[info->offset + chunknum],
1282 B2SIZE(info->bucket));
1283 }
1284 return chunknum;
1285 }
1286
1287 /*
1288 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1289 */
1290 static void
free_bytes(struct dir_info * d,struct region_info * r,void * ptr)1291 free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1292 {
1293 struct chunk_head *mp;
1294 struct chunk_info *info;
1295 uint32_t chunknum;
1296 uint32_t listnum;
1297
1298 info = (struct chunk_info *)r->size;
1299 chunknum = find_chunknum(d, info, ptr, 0);
1300
1301 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1302 info->free++;
1303
1304 if (info->free == 1) {
1305 /* Page became non-full */
1306 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1307 mp = &d->chunk_dir[info->bucket][listnum];
1308 LIST_INSERT_HEAD(mp, info, entries);
1309 return;
1310 }
1311
1312 if (info->free != info->total)
1313 return;
1314
1315 LIST_REMOVE(info, entries);
1316
1317 if (info->bucket == 0 && !mopts.malloc_freeunmap)
1318 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1319 unmap(d, info->page, MALLOC_PAGESIZE, 0);
1320 #ifdef MALLOC_STATS
1321 if (r->f != NULL) {
1322 unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
1323 r->f = NULL;
1324 }
1325 #endif
1326
1327 delete(d, r);
1328 mp = &d->chunk_info_list[info->bucket];
1329 LIST_INSERT_HEAD(mp, info, entries);
1330 }
1331
1332 static void *
omalloc(struct dir_info * pool,size_t sz,int zero_fill)1333 omalloc(struct dir_info *pool, size_t sz, int zero_fill)
1334 {
1335 void *p, *caller = NULL;
1336 size_t psz;
1337
1338 if (sz > MALLOC_MAXCHUNK) {
1339 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1340 errno = ENOMEM;
1341 return NULL;
1342 }
1343 sz += mopts.malloc_guard;
1344 psz = PAGEROUND(sz);
1345 p = map(pool, psz, zero_fill);
1346 if (p == MAP_FAILED) {
1347 errno = ENOMEM;
1348 return NULL;
1349 }
1350 #ifdef MALLOC_STATS
1351 if (DO_STATS)
1352 caller = pool->caller;
1353 #endif
1354 if (insert(pool, p, sz, caller)) {
1355 unmap(pool, p, psz, 0);
1356 errno = ENOMEM;
1357 return NULL;
1358 }
1359 if (mopts.malloc_guard) {
1360 if (mprotect((char *)p + psz - mopts.malloc_guard,
1361 mopts.malloc_guard, PROT_NONE))
1362 wrterror(pool, "mprotect");
1363 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1364 }
1365
1366 if (MALLOC_MOVE_COND(sz)) {
1367 /* fill whole allocation */
1368 if (pool->malloc_junk == 2)
1369 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1370 /* shift towards the end */
1371 p = MALLOC_MOVE(p, sz);
1372 /* fill zeros if needed and overwritten above */
1373 if (zero_fill && pool->malloc_junk == 2)
1374 memset(p, 0, sz - mopts.malloc_guard);
1375 } else {
1376 if (pool->malloc_junk == 2) {
1377 if (zero_fill)
1378 memset((char *)p + sz -
1379 mopts.malloc_guard, SOME_JUNK,
1380 psz - sz);
1381 else
1382 memset(p, SOME_JUNK,
1383 psz - mopts.malloc_guard);
1384 } else if (mopts.chunk_canaries)
1385 fill_canary(p, sz - mopts.malloc_guard,
1386 psz - mopts.malloc_guard);
1387 }
1388
1389 } else {
1390 /* takes care of SOME_JUNK */
1391 p = malloc_bytes(pool, sz);
1392 if (zero_fill && p != NULL && sz > 0)
1393 memset(p, 0, sz);
1394 }
1395
1396 return p;
1397 }
1398
1399 /*
1400 * Common function for handling recursion. Only
1401 * print the error message once, to avoid making the problem
1402 * potentially worse.
1403 */
1404 static void
malloc_recurse(struct dir_info * d)1405 malloc_recurse(struct dir_info *d)
1406 {
1407 static int noprint;
1408
1409 if (noprint == 0) {
1410 noprint = 1;
1411 wrterror(d, "recursive call");
1412 }
1413 d->active--;
1414 _MALLOC_UNLOCK(d->mutex);
1415 errno = EDEADLK;
1416 }
1417
1418 void
_malloc_init(int from_rthreads)1419 _malloc_init(int from_rthreads)
1420 {
1421 u_int i, j, nmutexes;
1422 struct dir_info *d;
1423
1424 _MALLOC_LOCK(1);
1425 if (!from_rthreads && mopts.malloc_pool[1]) {
1426 _MALLOC_UNLOCK(1);
1427 return;
1428 }
1429 if (!mopts.malloc_canary) {
1430 char *p;
1431 size_t sz, roundup_sz, d_avail;
1432
1433 omalloc_init();
1434 /*
1435 * Allocate dir_infos with a guard page on either side. Also
1436 * randomise offset inside the page at which the dir_infos
1437 * lay (subject to alignment by 1 << MALLOC_MINSHIFT)
1438 */
1439 sz = mopts.malloc_mutexes * sizeof(*d);
1440 roundup_sz = (sz + MALLOC_PAGEMASK) & ~MALLOC_PAGEMASK;
1441 if ((p = MMAPNONE(roundup_sz + 2 * MALLOC_PAGESIZE, 0)) ==
1442 MAP_FAILED)
1443 wrterror(NULL, "malloc_init mmap1 failed");
1444 if (mprotect(p + MALLOC_PAGESIZE, roundup_sz,
1445 PROT_READ | PROT_WRITE))
1446 wrterror(NULL, "malloc_init mprotect1 failed");
1447 if (mimmutable(p, roundup_sz + 2 * MALLOC_PAGESIZE))
1448 wrterror(NULL, "malloc_init mimmutable1 failed");
1449 d_avail = (roundup_sz - sz) >> MALLOC_MINSHIFT;
1450 d = (struct dir_info *)(p + MALLOC_PAGESIZE +
1451 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
1452 STATS_ADD(d[1].malloc_used, roundup_sz + 2 * MALLOC_PAGESIZE);
1453 for (i = 0; i < mopts.malloc_mutexes; i++)
1454 mopts.malloc_pool[i] = &d[i];
1455 mopts.internal_funcs = 1;
1456 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) {
1457 if (mprotect(&malloc_readonly, sizeof(malloc_readonly),
1458 PROT_READ))
1459 wrterror(NULL,
1460 "malloc_init mprotect r/o failed");
1461 if (mimmutable(&malloc_readonly,
1462 sizeof(malloc_readonly)))
1463 wrterror(NULL,
1464 "malloc_init mimmutable r/o failed");
1465 }
1466 }
1467
1468 nmutexes = from_rthreads ? mopts.malloc_mutexes : 2;
1469 for (i = 0; i < nmutexes; i++) {
1470 d = mopts.malloc_pool[i];
1471 d->malloc_mt = from_rthreads;
1472 if (d->canary1 == ~d->canary2)
1473 continue;
1474 if (i == 0) {
1475 omalloc_poolinit(d, MAP_CONCEAL);
1476 d->malloc_junk = 2;
1477 d->bigcache_size = 0;
1478 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++)
1479 d->smallcache[j].max = 0;
1480 } else {
1481 size_t sz = 0;
1482
1483 omalloc_poolinit(d, 0);
1484 d->malloc_junk = mopts.def_malloc_junk;
1485 d->bigcache_size = mopts.def_maxcache;
1486 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1487 d->smallcache[j].max =
1488 mopts.def_maxcache >> (j / 8);
1489 sz += d->smallcache[j].max * sizeof(void *);
1490 }
1491 sz += d->bigcache_size * sizeof(struct bigcache);
1492 if (sz > 0) {
1493 void *p = MMAP(sz, 0);
1494 if (p == MAP_FAILED)
1495 wrterror(NULL,
1496 "malloc_init mmap2 failed");
1497 if (mimmutable(p, sz))
1498 wrterror(NULL,
1499 "malloc_init mimmutable2 failed");
1500 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1501 d->smallcache[j].pages = p;
1502 p = (char *)p + d->smallcache[j].max *
1503 sizeof(void *);
1504 }
1505 d->bigcache = p;
1506 }
1507 }
1508 d->mutex = i;
1509 }
1510
1511 _MALLOC_UNLOCK(1);
1512 }
1513 DEF_STRONG(_malloc_init);
1514
1515 #define PROLOGUE(p, fn) \
1516 d = (p); \
1517 if (d == NULL) { \
1518 _malloc_init(0); \
1519 d = (p); \
1520 } \
1521 _MALLOC_LOCK(d->mutex); \
1522 d->func = fn; \
1523 if (d->active++) { \
1524 malloc_recurse(d); \
1525 return NULL; \
1526 } \
1527
1528 #define EPILOGUE() \
1529 d->active--; \
1530 _MALLOC_UNLOCK(d->mutex); \
1531 if (r == NULL && mopts.malloc_xmalloc) \
1532 wrterror(d, "out of memory"); \
1533 if (r != NULL) \
1534 errno = saved_errno; \
1535
1536 void *
malloc(size_t size)1537 malloc(size_t size)
1538 {
1539 void *r;
1540 struct dir_info *d;
1541 int saved_errno = errno;
1542
1543 PROLOGUE(getpool(), "malloc")
1544 SET_CALLER(d, caller(d));
1545 r = omalloc(d, size, 0);
1546 EPILOGUE()
1547 return r;
1548 }
1549 DEF_STRONG(malloc);
1550
1551 void *
malloc_conceal(size_t size)1552 malloc_conceal(size_t size)
1553 {
1554 void *r;
1555 struct dir_info *d;
1556 int saved_errno = errno;
1557
1558 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1559 SET_CALLER(d, caller(d));
1560 r = omalloc(d, size, 0);
1561 EPILOGUE()
1562 return r;
1563 }
1564 DEF_WEAK(malloc_conceal);
1565
1566 static struct region_info *
findpool(void * p,struct dir_info * argpool,struct dir_info ** foundpool,const char ** saved_function)1567 findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1568 const char ** saved_function)
1569 {
1570 struct dir_info *pool = argpool;
1571 struct region_info *r = find(pool, p);
1572
1573 if (r == NULL) {
1574 u_int i, nmutexes;
1575
1576 nmutexes = mopts.malloc_pool[1]->malloc_mt ?
1577 mopts.malloc_mutexes : 2;
1578 for (i = 1; i < nmutexes; i++) {
1579 u_int j = (argpool->mutex + i) & (nmutexes - 1);
1580
1581 pool->active--;
1582 _MALLOC_UNLOCK(pool->mutex);
1583 pool = mopts.malloc_pool[j];
1584 _MALLOC_LOCK(pool->mutex);
1585 pool->active++;
1586 r = find(pool, p);
1587 if (r != NULL) {
1588 *saved_function = pool->func;
1589 pool->func = argpool->func;
1590 break;
1591 }
1592 }
1593 if (r == NULL)
1594 wrterror(argpool, "bogus pointer (double free?) %p", p);
1595 }
1596 *foundpool = pool;
1597 return r;
1598 }
1599
1600 static void
ofree(struct dir_info ** argpool,void * p,int clear,int check,size_t argsz)1601 ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1602 {
1603 struct region_info *r;
1604 struct dir_info *pool;
1605 const char *saved_function;
1606 size_t sz;
1607
1608 r = findpool(p, *argpool, &pool, &saved_function);
1609
1610 REALSIZE(sz, r);
1611 if (pool->mmap_flag) {
1612 clear = 1;
1613 if (!check) {
1614 argsz = sz;
1615 if (sz > MALLOC_MAXCHUNK)
1616 argsz -= mopts.malloc_guard;
1617 }
1618 }
1619 if (check) {
1620 if (sz <= MALLOC_MAXCHUNK) {
1621 if (mopts.chunk_canaries && sz > 0) {
1622 struct chunk_info *info =
1623 (struct chunk_info *)r->size;
1624 uint32_t chunknum =
1625 find_chunknum(pool, info, p, 0);
1626
1627 if (info->bits[info->offset + chunknum] < argsz)
1628 wrterror(pool, "recorded size %hu"
1629 " < %zu",
1630 info->bits[info->offset + chunknum],
1631 argsz);
1632 } else {
1633 if (sz < argsz)
1634 wrterror(pool, "chunk size %zu < %zu",
1635 sz, argsz);
1636 }
1637 } else if (sz - mopts.malloc_guard < argsz) {
1638 wrterror(pool, "recorded size %zu < %zu",
1639 sz - mopts.malloc_guard, argsz);
1640 }
1641 }
1642 if (sz > MALLOC_MAXCHUNK) {
1643 if (!MALLOC_MOVE_COND(sz)) {
1644 if (r->p != p)
1645 wrterror(pool, "bogus pointer %p", p);
1646 if (mopts.chunk_canaries)
1647 validate_canary(pool, p,
1648 sz - mopts.malloc_guard,
1649 PAGEROUND(sz - mopts.malloc_guard));
1650 } else {
1651 /* shifted towards the end */
1652 if (p != MALLOC_MOVE(r->p, sz))
1653 wrterror(pool, "bogus moved pointer %p", p);
1654 p = r->p;
1655 }
1656 if (mopts.malloc_guard) {
1657 if (sz < mopts.malloc_guard)
1658 wrterror(pool, "guard size");
1659 if (!mopts.malloc_freeunmap) {
1660 if (mprotect((char *)p + PAGEROUND(sz) -
1661 mopts.malloc_guard, mopts.malloc_guard,
1662 PROT_READ | PROT_WRITE))
1663 wrterror(pool, "mprotect");
1664 }
1665 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1666 }
1667 unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0);
1668 delete(pool, r);
1669 } else {
1670 void *tmp;
1671 u_int i;
1672
1673 /* Validate and optionally canary check */
1674 struct chunk_info *info = (struct chunk_info *)r->size;
1675 if (B2SIZE(info->bucket) != sz)
1676 wrterror(pool, "internal struct corrupt");
1677 find_chunknum(pool, info, p, mopts.chunk_canaries);
1678
1679 if (mopts.malloc_freecheck) {
1680 for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) {
1681 tmp = pool->delayed_chunks[i];
1682 if (tmp == p)
1683 wrterror(pool,
1684 "double free %p", p);
1685 if (tmp != NULL) {
1686 size_t tmpsz;
1687
1688 r = find(pool, tmp);
1689 if (r == NULL)
1690 wrterror(pool,
1691 "bogus pointer ("
1692 "double free?) %p", tmp);
1693 REALSIZE(tmpsz, r);
1694 validate_junk(pool, tmp, tmpsz);
1695 }
1696 }
1697 }
1698
1699 if (clear && argsz > 0)
1700 explicit_bzero(p, argsz);
1701 junk_free(pool->malloc_junk, p, sz);
1702
1703 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1704 tmp = p;
1705 p = pool->delayed_chunks[i];
1706 if (tmp == p)
1707 wrterror(pool, "double free %p", p);
1708 pool->delayed_chunks[i] = tmp;
1709 if (p != NULL) {
1710 r = find(pool, p);
1711 if (r == NULL)
1712 wrterror(pool,
1713 "bogus pointer (double free?) %p", p);
1714 if (!mopts.malloc_freecheck) {
1715 REALSIZE(sz, r);
1716 validate_junk(pool, p, sz);
1717 }
1718 free_bytes(pool, r, p);
1719 }
1720 }
1721
1722 if (*argpool != pool) {
1723 pool->func = saved_function;
1724 *argpool = pool;
1725 }
1726 }
1727
1728 void
free(void * ptr)1729 free(void *ptr)
1730 {
1731 struct dir_info *d;
1732 int saved_errno = errno;
1733
1734 /* This is legal. */
1735 if (ptr == NULL)
1736 return;
1737
1738 d = getpool();
1739 if (d == NULL)
1740 wrterror(d, "free() called before allocation");
1741 _MALLOC_LOCK(d->mutex);
1742 d->func = "free";
1743 if (d->active++) {
1744 malloc_recurse(d);
1745 return;
1746 }
1747 ofree(&d, ptr, 0, 0, 0);
1748 d->active--;
1749 _MALLOC_UNLOCK(d->mutex);
1750 errno = saved_errno;
1751 }
1752 DEF_STRONG(free);
1753
1754 static void
freezero_p(void * ptr,size_t sz)1755 freezero_p(void *ptr, size_t sz)
1756 {
1757 explicit_bzero(ptr, sz);
1758 free(ptr);
1759 }
1760
1761 void
freezero(void * ptr,size_t sz)1762 freezero(void *ptr, size_t sz)
1763 {
1764 struct dir_info *d;
1765 int saved_errno = errno;
1766
1767 /* This is legal. */
1768 if (ptr == NULL)
1769 return;
1770
1771 if (!mopts.internal_funcs) {
1772 freezero_p(ptr, sz);
1773 return;
1774 }
1775
1776 d = getpool();
1777 if (d == NULL)
1778 wrterror(d, "freezero() called before allocation");
1779 _MALLOC_LOCK(d->mutex);
1780 d->func = "freezero";
1781 if (d->active++) {
1782 malloc_recurse(d);
1783 return;
1784 }
1785 ofree(&d, ptr, 1, 1, sz);
1786 d->active--;
1787 _MALLOC_UNLOCK(d->mutex);
1788 errno = saved_errno;
1789 }
1790 DEF_WEAK(freezero);
1791
1792 static void *
orealloc(struct dir_info ** argpool,void * p,size_t newsz)1793 orealloc(struct dir_info **argpool, void *p, size_t newsz)
1794 {
1795 struct region_info *r;
1796 struct dir_info *pool;
1797 const char *saved_function;
1798 struct chunk_info *info;
1799 size_t oldsz, goldsz, gnewsz;
1800 void *q, *ret;
1801 uint32_t chunknum;
1802 int forced;
1803
1804 if (p == NULL)
1805 return omalloc(*argpool, newsz, 0);
1806
1807 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1808 errno = ENOMEM;
1809 return NULL;
1810 }
1811
1812 r = findpool(p, *argpool, &pool, &saved_function);
1813
1814 REALSIZE(oldsz, r);
1815 if (oldsz <= MALLOC_MAXCHUNK) {
1816 if (DO_STATS || mopts.chunk_canaries) {
1817 info = (struct chunk_info *)r->size;
1818 chunknum = find_chunknum(pool, info, p, 0);
1819 }
1820 }
1821
1822 goldsz = oldsz;
1823 if (oldsz > MALLOC_MAXCHUNK) {
1824 if (oldsz < mopts.malloc_guard)
1825 wrterror(pool, "guard size");
1826 oldsz -= mopts.malloc_guard;
1827 }
1828
1829 gnewsz = newsz;
1830 if (gnewsz > MALLOC_MAXCHUNK)
1831 gnewsz += mopts.malloc_guard;
1832
1833 forced = mopts.malloc_realloc || pool->mmap_flag;
1834 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) {
1835 /* First case: from n pages sized allocation to m pages sized
1836 allocation, m > n */
1837 size_t roldsz = PAGEROUND(goldsz);
1838 size_t rnewsz = PAGEROUND(gnewsz);
1839
1840 if (rnewsz < roldsz && rnewsz > roldsz / 2 &&
1841 roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE &&
1842 !mopts.malloc_guard) {
1843
1844 ret = p;
1845 goto done;
1846 }
1847
1848 if (rnewsz > roldsz) {
1849 /* try to extend existing region */
1850 if (!mopts.malloc_guard) {
1851 void *hint = (char *)r->p + roldsz;
1852 size_t needed = rnewsz - roldsz;
1853
1854 STATS_INC(pool->cheap_realloc_tries);
1855 q = MMAPA(hint, needed, MAP_FIXED |
1856 __MAP_NOREPLACE | pool->mmap_flag);
1857 if (q == hint) {
1858 STATS_ADD(pool->malloc_used, needed);
1859 if (pool->malloc_junk == 2)
1860 memset(q, SOME_JUNK, needed);
1861 r->size = gnewsz;
1862 if (r->p != p) {
1863 /* old pointer is moved */
1864 memmove(r->p, p, oldsz);
1865 p = r->p;
1866 }
1867 if (mopts.chunk_canaries)
1868 fill_canary(p, newsz,
1869 PAGEROUND(newsz));
1870 STATS_SETF(r, (*argpool)->caller);
1871 STATS_INC(pool->cheap_reallocs);
1872 ret = p;
1873 goto done;
1874 }
1875 }
1876 } else if (rnewsz < roldsz) {
1877 /* shrink number of pages */
1878 if (mopts.malloc_guard) {
1879 if (mprotect((char *)r->p + rnewsz -
1880 mopts.malloc_guard, mopts.malloc_guard,
1881 PROT_NONE))
1882 wrterror(pool, "mprotect");
1883 }
1884 if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
1885 wrterror(pool, "munmap %p", (char *)r->p +
1886 rnewsz);
1887 STATS_SUB(pool->malloc_used, roldsz - rnewsz);
1888 r->size = gnewsz;
1889 if (MALLOC_MOVE_COND(gnewsz)) {
1890 void *pp = MALLOC_MOVE(r->p, gnewsz);
1891 memmove(pp, p, newsz);
1892 p = pp;
1893 } else if (mopts.chunk_canaries)
1894 fill_canary(p, newsz, PAGEROUND(newsz));
1895 STATS_SETF(r, (*argpool)->caller);
1896 ret = p;
1897 goto done;
1898 } else {
1899 /* number of pages remains the same */
1900 void *pp = r->p;
1901
1902 r->size = gnewsz;
1903 if (MALLOC_MOVE_COND(gnewsz))
1904 pp = MALLOC_MOVE(r->p, gnewsz);
1905 if (p != pp) {
1906 memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1907 p = pp;
1908 }
1909 if (p == r->p) {
1910 if (newsz > oldsz && pool->malloc_junk == 2)
1911 memset((char *)p + newsz, SOME_JUNK,
1912 rnewsz - mopts.malloc_guard -
1913 newsz);
1914 if (mopts.chunk_canaries)
1915 fill_canary(p, newsz, PAGEROUND(newsz));
1916 }
1917 STATS_SETF(r, (*argpool)->caller);
1918 ret = p;
1919 goto done;
1920 }
1921 }
1922 if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1923 newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1924 !forced && find_bucket(newsz) == find_bucket(oldsz)) {
1925 /* do not reallocate if new size fits good in existing chunk */
1926 if (pool->malloc_junk == 2)
1927 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1928 if (mopts.chunk_canaries) {
1929 info->bits[info->offset + chunknum] = newsz;
1930 fill_canary(p, newsz, B2SIZE(info->bucket));
1931 }
1932 if (DO_STATS)
1933 STATS_SETFN(r, chunknum, (*argpool)->caller);
1934 ret = p;
1935 } else if (newsz != oldsz || forced) {
1936 /* create new allocation */
1937 q = omalloc(pool, newsz, 0);
1938 if (q == NULL) {
1939 ret = NULL;
1940 goto done;
1941 }
1942 if (newsz != 0 && oldsz != 0)
1943 memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1944 ofree(&pool, p, 0, 0, 0);
1945 ret = q;
1946 } else {
1947 /* oldsz == newsz */
1948 if (newsz != 0)
1949 wrterror(pool, "realloc internal inconsistency");
1950 if (DO_STATS)
1951 STATS_SETFN(r, chunknum, (*argpool)->caller);
1952 ret = p;
1953 }
1954 done:
1955 if (*argpool != pool) {
1956 pool->func = saved_function;
1957 *argpool = pool;
1958 }
1959 return ret;
1960 }
1961
1962 void *
realloc(void * ptr,size_t size)1963 realloc(void *ptr, size_t size)
1964 {
1965 struct dir_info *d;
1966 void *r;
1967 int saved_errno = errno;
1968
1969 PROLOGUE(getpool(), "realloc")
1970 SET_CALLER(d, caller(d));
1971 r = orealloc(&d, ptr, size);
1972 EPILOGUE()
1973 return r;
1974 }
1975 DEF_STRONG(realloc);
1976
1977 /*
1978 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1979 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1980 */
1981 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
1982
1983 void *
calloc(size_t nmemb,size_t size)1984 calloc(size_t nmemb, size_t size)
1985 {
1986 struct dir_info *d;
1987 void *r;
1988 int saved_errno = errno;
1989
1990 PROLOGUE(getpool(), "calloc")
1991 SET_CALLER(d, caller(d));
1992 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1993 nmemb > 0 && SIZE_MAX / nmemb < size) {
1994 d->active--;
1995 _MALLOC_UNLOCK(d->mutex);
1996 if (mopts.malloc_xmalloc)
1997 wrterror(d, "out of memory");
1998 errno = ENOMEM;
1999 return NULL;
2000 }
2001
2002 size *= nmemb;
2003 r = omalloc(d, size, 1);
2004 EPILOGUE()
2005 return r;
2006 }
2007 DEF_STRONG(calloc);
2008
2009 void *
calloc_conceal(size_t nmemb,size_t size)2010 calloc_conceal(size_t nmemb, size_t size)
2011 {
2012 struct dir_info *d;
2013 void *r;
2014 int saved_errno = errno;
2015
2016 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
2017 SET_CALLER(d, caller(d));
2018 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2019 nmemb > 0 && SIZE_MAX / nmemb < size) {
2020 d->active--;
2021 _MALLOC_UNLOCK(d->mutex);
2022 if (mopts.malloc_xmalloc)
2023 wrterror(d, "out of memory");
2024 errno = ENOMEM;
2025 return NULL;
2026 }
2027
2028 size *= nmemb;
2029 r = omalloc(d, size, 1);
2030 EPILOGUE()
2031 return r;
2032 }
2033 DEF_WEAK(calloc_conceal);
2034
2035 static void *
orecallocarray(struct dir_info ** argpool,void * p,size_t oldsize,size_t newsize)2036 orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
2037 size_t newsize)
2038 {
2039 struct region_info *r;
2040 struct dir_info *pool;
2041 const char *saved_function;
2042 void *newptr;
2043 size_t sz;
2044
2045 if (p == NULL)
2046 return omalloc(*argpool, newsize, 1);
2047
2048 if (oldsize == newsize)
2049 return p;
2050
2051 r = findpool(p, *argpool, &pool, &saved_function);
2052
2053 REALSIZE(sz, r);
2054 if (sz <= MALLOC_MAXCHUNK) {
2055 if (mopts.chunk_canaries && sz > 0) {
2056 struct chunk_info *info = (struct chunk_info *)r->size;
2057 uint32_t chunknum = find_chunknum(pool, info, p, 0);
2058
2059 if (info->bits[info->offset + chunknum] != oldsize)
2060 wrterror(pool, "recorded size %hu != %zu",
2061 info->bits[info->offset + chunknum],
2062 oldsize);
2063 } else {
2064 if (sz < oldsize)
2065 wrterror(pool, "chunk size %zu < %zu",
2066 sz, oldsize);
2067 }
2068 } else {
2069 if (sz - mopts.malloc_guard < oldsize)
2070 wrterror(pool, "recorded size %zu < %zu",
2071 sz - mopts.malloc_guard, oldsize);
2072 if (oldsize < (sz - mopts.malloc_guard) / 2)
2073 wrterror(pool,
2074 "recorded size %zu inconsistent with %zu",
2075 sz - mopts.malloc_guard, oldsize);
2076 }
2077
2078 newptr = omalloc(pool, newsize, 0);
2079 if (newptr == NULL)
2080 goto done;
2081
2082 if (newsize > oldsize) {
2083 memcpy(newptr, p, oldsize);
2084 memset((char *)newptr + oldsize, 0, newsize - oldsize);
2085 } else
2086 memcpy(newptr, p, newsize);
2087
2088 ofree(&pool, p, 1, 0, oldsize);
2089
2090 done:
2091 if (*argpool != pool) {
2092 pool->func = saved_function;
2093 *argpool = pool;
2094 }
2095
2096 return newptr;
2097 }
2098
2099 static void *
recallocarray_p(void * ptr,size_t oldnmemb,size_t newnmemb,size_t size)2100 recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2101 {
2102 size_t oldsize, newsize;
2103 void *newptr;
2104
2105 if (ptr == NULL)
2106 return calloc(newnmemb, size);
2107
2108 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2109 newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2110 errno = ENOMEM;
2111 return NULL;
2112 }
2113 newsize = newnmemb * size;
2114
2115 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2116 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2117 errno = EINVAL;
2118 return NULL;
2119 }
2120 oldsize = oldnmemb * size;
2121
2122 /*
2123 * Don't bother too much if we're shrinking just a bit,
2124 * we do not shrink for series of small steps, oh well.
2125 */
2126 if (newsize <= oldsize) {
2127 size_t d = oldsize - newsize;
2128
2129 if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
2130 memset((char *)ptr + newsize, 0, d);
2131 return ptr;
2132 }
2133 }
2134
2135 newptr = malloc(newsize);
2136 if (newptr == NULL)
2137 return NULL;
2138
2139 if (newsize > oldsize) {
2140 memcpy(newptr, ptr, oldsize);
2141 memset((char *)newptr + oldsize, 0, newsize - oldsize);
2142 } else
2143 memcpy(newptr, ptr, newsize);
2144
2145 explicit_bzero(ptr, oldsize);
2146 free(ptr);
2147
2148 return newptr;
2149 }
2150
2151 void *
recallocarray(void * ptr,size_t oldnmemb,size_t newnmemb,size_t size)2152 recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2153 {
2154 struct dir_info *d;
2155 size_t oldsize = 0, newsize;
2156 void *r;
2157 int saved_errno = errno;
2158
2159 if (!mopts.internal_funcs)
2160 return recallocarray_p(ptr, oldnmemb, newnmemb, size);
2161
2162 PROLOGUE(getpool(), "recallocarray")
2163 SET_CALLER(d, caller(d));
2164
2165 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2166 newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2167 d->active--;
2168 _MALLOC_UNLOCK(d->mutex);
2169 if (mopts.malloc_xmalloc)
2170 wrterror(d, "out of memory");
2171 errno = ENOMEM;
2172 return NULL;
2173 }
2174 newsize = newnmemb * size;
2175
2176 if (ptr != NULL) {
2177 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2178 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2179 d->active--;
2180 _MALLOC_UNLOCK(d->mutex);
2181 errno = EINVAL;
2182 return NULL;
2183 }
2184 oldsize = oldnmemb * size;
2185 }
2186
2187 r = orecallocarray(&d, ptr, oldsize, newsize);
2188 EPILOGUE()
2189 return r;
2190 }
2191 DEF_WEAK(recallocarray);
2192
2193 static void *
mapalign(struct dir_info * d,size_t alignment,size_t sz,int zero_fill)2194 mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
2195 {
2196 char *p, *q;
2197
2198 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
2199 wrterror(d, "mapalign bad alignment");
2200 if (sz != PAGEROUND(sz))
2201 wrterror(d, "mapalign round");
2202
2203 /* Allocate sz + alignment bytes of memory, which must include a
2204 * subrange of size bytes that is properly aligned. Unmap the
2205 * other bytes, and then return that subrange.
2206 */
2207
2208 /* We need sz + alignment to fit into a size_t. */
2209 if (alignment > SIZE_MAX - sz)
2210 return MAP_FAILED;
2211
2212 p = map(d, sz + alignment, zero_fill);
2213 if (p == MAP_FAILED)
2214 return MAP_FAILED;
2215 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
2216 if (q != p) {
2217 if (munmap(p, q - p))
2218 wrterror(d, "munmap %p", p);
2219 }
2220 if (munmap(q + sz, alignment - (q - p)))
2221 wrterror(d, "munmap %p", q + sz);
2222 STATS_SUB(d->malloc_used, alignment);
2223
2224 return q;
2225 }
2226
2227 static void *
omemalign(struct dir_info * pool,size_t alignment,size_t sz,int zero_fill)2228 omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill)
2229 {
2230 size_t psz;
2231 void *p, *caller = NULL;
2232
2233 /* If between half a page and a page, avoid MALLOC_MOVE. */
2234 if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
2235 sz = MALLOC_PAGESIZE;
2236 if (alignment <= MALLOC_PAGESIZE) {
2237 size_t pof2;
2238 /*
2239 * max(size, alignment) rounded up to power of 2 is enough
2240 * to assure the requested alignment. Large regions are
2241 * always page aligned.
2242 */
2243 if (sz < alignment)
2244 sz = alignment;
2245 if (sz < MALLOC_PAGESIZE) {
2246 pof2 = MALLOC_MINSIZE;
2247 while (pof2 < sz)
2248 pof2 <<= 1;
2249 } else
2250 pof2 = sz;
2251 return omalloc(pool, pof2, zero_fill);
2252 }
2253
2254 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
2255 errno = ENOMEM;
2256 return NULL;
2257 }
2258
2259 if (sz < MALLOC_PAGESIZE)
2260 sz = MALLOC_PAGESIZE;
2261 sz += mopts.malloc_guard;
2262 psz = PAGEROUND(sz);
2263
2264 p = mapalign(pool, alignment, psz, zero_fill);
2265 if (p == MAP_FAILED) {
2266 errno = ENOMEM;
2267 return NULL;
2268 }
2269
2270 #ifdef MALLOC_STATS
2271 if (DO_STATS)
2272 caller = pool->caller;
2273 #endif
2274 if (insert(pool, p, sz, caller)) {
2275 unmap(pool, p, psz, 0);
2276 errno = ENOMEM;
2277 return NULL;
2278 }
2279
2280 if (mopts.malloc_guard) {
2281 if (mprotect((char *)p + psz - mopts.malloc_guard,
2282 mopts.malloc_guard, PROT_NONE))
2283 wrterror(pool, "mprotect");
2284 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2285 }
2286
2287 if (pool->malloc_junk == 2) {
2288 if (zero_fill)
2289 memset((char *)p + sz - mopts.malloc_guard,
2290 SOME_JUNK, psz - sz);
2291 else
2292 memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2293 } else if (mopts.chunk_canaries)
2294 fill_canary(p, sz - mopts.malloc_guard,
2295 psz - mopts.malloc_guard);
2296
2297 return p;
2298 }
2299
2300 int
posix_memalign(void ** memptr,size_t alignment,size_t size)2301 posix_memalign(void **memptr, size_t alignment, size_t size)
2302 {
2303 struct dir_info *d;
2304 int res, saved_errno = errno;
2305 void *r;
2306
2307 /* Make sure that alignment is a large enough power of 2. */
2308 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2309 return EINVAL;
2310
2311 d = getpool();
2312 if (d == NULL) {
2313 _malloc_init(0);
2314 d = getpool();
2315 }
2316 _MALLOC_LOCK(d->mutex);
2317 d->func = "posix_memalign";
2318 if (d->active++) {
2319 malloc_recurse(d);
2320 goto err;
2321 }
2322 SET_CALLER(d, caller(d));
2323 r = omemalign(d, alignment, size, 0);
2324 d->active--;
2325 _MALLOC_UNLOCK(d->mutex);
2326 if (r == NULL) {
2327 if (mopts.malloc_xmalloc)
2328 wrterror(d, "out of memory");
2329 goto err;
2330 }
2331 errno = saved_errno;
2332 *memptr = r;
2333 return 0;
2334
2335 err:
2336 res = errno;
2337 errno = saved_errno;
2338 return res;
2339 }
2340 DEF_STRONG(posix_memalign);
2341
2342 void *
aligned_alloc(size_t alignment,size_t size)2343 aligned_alloc(size_t alignment, size_t size)
2344 {
2345 struct dir_info *d;
2346 int saved_errno = errno;
2347 void *r;
2348
2349 /* Make sure that alignment is a positive power of 2. */
2350 if (((alignment - 1) & alignment) != 0 || alignment == 0) {
2351 errno = EINVAL;
2352 return NULL;
2353 }
2354 /* Per spec, size should be a multiple of alignment */
2355 if ((size & (alignment - 1)) != 0) {
2356 errno = EINVAL;
2357 return NULL;
2358 }
2359
2360 PROLOGUE(getpool(), "aligned_alloc")
2361 SET_CALLER(d, caller(d));
2362 r = omemalign(d, alignment, size, 0);
2363 EPILOGUE()
2364 return r;
2365 }
2366 DEF_STRONG(aligned_alloc);
2367
2368 #ifdef MALLOC_STATS
2369
2370 static int
btcmp(const struct btnode * e1,const struct btnode * e2)2371 btcmp(const struct btnode *e1, const struct btnode *e2)
2372 {
2373 return memcmp(e1->caller, e2->caller, sizeof(e1->caller));
2374 }
2375
2376 RBT_GENERATE(btshead, btnode, entry, btcmp);
2377
2378 static void*
store_caller(struct dir_info * d,struct btnode * f)2379 store_caller(struct dir_info *d, struct btnode *f)
2380 {
2381 struct btnode *p;
2382
2383 if (DO_STATS == 0 || d->btnodes == MAP_FAILED)
2384 return NULL;
2385
2386 p = RBT_FIND(btshead, &d->btraces, f);
2387 if (p != NULL)
2388 return p;
2389 if (d->btnodes == NULL ||
2390 d->btnodesused >= MALLOC_PAGESIZE / sizeof(struct btnode)) {
2391 d->btnodes = map(d, MALLOC_PAGESIZE, 0);
2392 if (d->btnodes == MAP_FAILED)
2393 return NULL;
2394 d->btnodesused = 0;
2395 }
2396 p = &d->btnodes[d->btnodesused++];
2397 memcpy(p->caller, f->caller, sizeof(p->caller[0]) * DO_STATS);
2398 RBT_INSERT(btshead, &d->btraces, p);
2399 return p;
2400 }
2401
2402 static void fabstorel(const void *, char *, size_t);
2403
2404 static void
print_chunk_details(struct dir_info * pool,void * p,size_t sz,size_t index)2405 print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t index)
2406 {
2407 struct region_info *r;
2408 struct chunk_info *chunkinfo;
2409 struct btnode* btnode;
2410 uint32_t chunknum;
2411 int frame;
2412 char buf1[128];
2413 char buf2[128];
2414 const char *msg = "";
2415
2416 r = find(pool, p);
2417 chunkinfo = (struct chunk_info *)r->size;
2418 chunknum = find_chunknum(pool, chunkinfo, p, 0);
2419 btnode = (struct btnode *)r->f[chunknum];
2420 frame = DO_STATS - 1;
2421 if (btnode != NULL)
2422 fabstorel(btnode->caller[frame], buf1, sizeof(buf1));
2423 strlcpy(buf2, ". 0x0", sizeof(buf2));
2424 if (chunknum > 0) {
2425 chunknum--;
2426 btnode = (struct btnode *)r->f[chunknum];
2427 if (btnode != NULL)
2428 fabstorel(btnode->caller[frame], buf2, sizeof(buf2));
2429 if (CHUNK_FREE(chunkinfo, chunknum))
2430 msg = " (now free)";
2431 }
2432
2433 wrterror(pool,
2434 "write to free chunk %p[%zu..%zu]@%zu allocated at %s "
2435 "(preceding chunk %p allocated at %s%s)",
2436 p, index * sizeof(uint64_t), (index + 1) * sizeof(uint64_t) - 1,
2437 sz, buf1, p - sz, buf2, msg);
2438 }
2439
2440 static void
ulog(const char * format,...)2441 ulog(const char *format, ...)
2442 {
2443 va_list ap;
2444 static char* buf;
2445 static size_t filled;
2446 int len;
2447
2448 if (buf == NULL)
2449 buf = MMAP(KTR_USER_MAXLEN, 0);
2450 if (buf == MAP_FAILED)
2451 return;
2452
2453 va_start(ap, format);
2454 len = vsnprintf(buf + filled, KTR_USER_MAXLEN - filled, format, ap);
2455 va_end(ap);
2456 if (len < 0)
2457 return;
2458 if ((size_t)len > KTR_USER_MAXLEN - filled)
2459 len = KTR_USER_MAXLEN - filled;
2460 filled += len;
2461 if (filled > 0) {
2462 if (filled == KTR_USER_MAXLEN || buf[filled - 1] == '\n') {
2463 utrace("malloc", buf, filled);
2464 filled = 0;
2465 }
2466 }
2467 }
2468
2469 struct malloc_leak {
2470 void *f;
2471 size_t total_size;
2472 int count;
2473 };
2474
2475 struct leaknode {
2476 RBT_ENTRY(leaknode) entry;
2477 struct malloc_leak d;
2478 };
2479
2480 static inline int
leakcmp(const struct leaknode * e1,const struct leaknode * e2)2481 leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2482 {
2483 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2484 }
2485
2486 RBT_HEAD(leaktree, leaknode);
2487 RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2488 RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2489
2490 static void
wrtwarning(const char * func,char * msg,...)2491 wrtwarning(const char *func, char *msg, ...)
2492 {
2493 int saved_errno = errno;
2494 va_list ap;
2495
2496 dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
2497 getpid(), func != NULL ? func : "unknown");
2498 va_start(ap, msg);
2499 vdprintf(STDERR_FILENO, msg, ap);
2500 va_end(ap);
2501 dprintf(STDERR_FILENO, "\n");
2502
2503 errno = saved_errno;
2504 }
2505
2506 static void
putleakinfo(struct leaktree * leaks,void * f,size_t sz,int cnt)2507 putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt)
2508 {
2509 struct leaknode key, *p;
2510 static struct leaknode *page;
2511 static unsigned int used;
2512
2513 if (cnt == 0 || page == MAP_FAILED)
2514 return;
2515
2516 key.d.f = f;
2517 p = RBT_FIND(leaktree, leaks, &key);
2518 if (p == NULL) {
2519 if (page == NULL ||
2520 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2521 page = MMAP(MALLOC_PAGESIZE, 0);
2522 if (page == MAP_FAILED) {
2523 wrtwarning(__func__, strerror(errno));
2524 return;
2525 }
2526 used = 0;
2527 }
2528 p = &page[used++];
2529 p->d.f = f;
2530 p->d.total_size = sz * cnt;
2531 p->d.count = cnt;
2532 RBT_INSERT(leaktree, leaks, p);
2533 } else {
2534 p->d.total_size += sz * cnt;
2535 p->d.count += cnt;
2536 }
2537 }
2538
2539 static void
fabstorel(const void * f,char * buf,size_t size)2540 fabstorel(const void *f, char *buf, size_t size)
2541 {
2542 Dl_info info;
2543 const char *object = ".";
2544 const char *caller;
2545
2546 caller = f;
2547 if (caller != NULL && dladdr(f, &info) != 0) {
2548 caller -= (uintptr_t)info.dli_fbase;
2549 object = info.dli_fname;
2550 }
2551 snprintf(buf, size, "%s %p", object, caller);
2552 }
2553
2554 static void
dump_leak(struct leaknode * p)2555 dump_leak(struct leaknode *p)
2556 {
2557 int i;
2558 char buf[128];
2559
2560 if (p->d.f == NULL) {
2561 fabstorel(NULL, buf, sizeof(buf));
2562 ulog("%18p %7zu %6u %6zu addr2line -e %s\n",
2563 p->d.f, p->d.total_size, p->d.count,
2564 p->d.total_size / p->d.count, buf);
2565 return;
2566 }
2567
2568 for (i = 0; i < DO_STATS; i++) {
2569 const char *abscaller;
2570
2571 abscaller = ((struct btnode*)p->d.f)->caller[i];
2572 if (abscaller == NULL)
2573 break;
2574 fabstorel(abscaller, buf, sizeof(buf));
2575 if (i == 0)
2576 ulog("%18p %7zu %6u %6zu addr2line -e %s\n",
2577 abscaller, p->d.total_size, p->d.count,
2578 p->d.total_size / p->d.count, buf);
2579 else
2580 ulog("%*p %*s %6s %6s addr2line -e %s\n",
2581 i + 18, abscaller, 7 - i, "-", "-", "-", buf);
2582 }
2583 }
2584
2585 static void
dump_leaks(struct leaktree * leaks)2586 dump_leaks(struct leaktree *leaks)
2587 {
2588 struct leaknode *p;
2589
2590 ulog("Leak report:\n");
2591 ulog(" f sum # avg\n");
2592
2593 RBT_FOREACH(p, leaktree, leaks)
2594 dump_leak(p);
2595 }
2596
2597 static void
dump_chunk(struct leaktree * leaks,struct chunk_info * p,void ** f,int fromfreelist)2598 dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f,
2599 int fromfreelist)
2600 {
2601 while (p != NULL) {
2602 if (mopts.malloc_verbose)
2603 ulog("chunk %18p %18p %4zu %d/%d\n",
2604 p->page, NULL,
2605 B2SIZE(p->bucket), p->free, p->total);
2606 if (!fromfreelist) {
2607 size_t i, sz = B2SIZE(p->bucket);
2608 for (i = 0; i < p->total; i++) {
2609 if (!CHUNK_FREE(p, i))
2610 putleakinfo(leaks, f[i], sz, 1);
2611 }
2612 break;
2613 }
2614 p = LIST_NEXT(p, entries);
2615 if (mopts.malloc_verbose && p != NULL)
2616 ulog(" ->");
2617 }
2618 }
2619
2620 static void
dump_free_chunk_info(struct dir_info * d,struct leaktree * leaks)2621 dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks)
2622 {
2623 u_int i, j, count;
2624 struct chunk_info *p;
2625
2626 ulog("Free chunk structs:\n");
2627 ulog("Bkt) #CI page"
2628 " f size free/n\n");
2629 for (i = 0; i <= BUCKETS; i++) {
2630 count = 0;
2631 LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2632 count++;
2633 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2634 p = LIST_FIRST(&d->chunk_dir[i][j]);
2635 if (p == NULL && count == 0)
2636 continue;
2637 if (j == 0)
2638 ulog("%3d) %3d ", i, count);
2639 else
2640 ulog(" ");
2641 if (p != NULL)
2642 dump_chunk(leaks, p, NULL, 1);
2643 else
2644 ulog(".\n");
2645 }
2646 }
2647
2648 }
2649
2650 static void
dump_free_page_info(struct dir_info * d)2651 dump_free_page_info(struct dir_info *d)
2652 {
2653 struct smallcache *cache;
2654 size_t i, total = 0;
2655
2656 ulog("Cached in small cache:\n");
2657 for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) {
2658 cache = &d->smallcache[i];
2659 if (cache->length != 0)
2660 ulog("%zu(%u): %u = %zu\n", i + 1, cache->max,
2661 cache->length, cache->length * (i + 1));
2662 total += cache->length * (i + 1);
2663 }
2664
2665 ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used,
2666 d->bigcache_size);
2667 for (i = 0; i < d->bigcache_size; i++) {
2668 if (d->bigcache[i].psize != 0)
2669 ulog("%zu: %zu\n", i, d->bigcache[i].psize);
2670 total += d->bigcache[i].psize;
2671 }
2672 ulog("Free pages cached: %zu\n", total);
2673 }
2674
2675 static void
malloc_dump1(int poolno,struct dir_info * d,struct leaktree * leaks)2676 malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks)
2677 {
2678 size_t i, realsize;
2679
2680 if (mopts.malloc_verbose) {
2681 ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2682 ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk,
2683 d->mmap_flag);
2684 ulog("Region slots free %zu/%zu\n",
2685 d->regions_free, d->regions_total);
2686 ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2687 ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2688 ulog("Cheap reallocs %zu/%zu\n",
2689 d->cheap_reallocs, d->cheap_realloc_tries);
2690 ulog("In use %zu\n", d->malloc_used);
2691 ulog("Guarded %zu\n", d->malloc_guarded);
2692 dump_free_chunk_info(d, leaks);
2693 dump_free_page_info(d);
2694 ulog("Hash table:\n");
2695 ulog("slot) hash d type page "
2696 "f size [free/n]\n");
2697 }
2698 for (i = 0; i < d->regions_total; i++) {
2699 if (d->r[i].p != NULL) {
2700 size_t h = hash(d->r[i].p) &
2701 (d->regions_total - 1);
2702 if (mopts.malloc_verbose)
2703 ulog("%4zx) #%4zx %zd ",
2704 i, h, h - i);
2705 REALSIZE(realsize, &d->r[i]);
2706 if (realsize > MALLOC_MAXCHUNK) {
2707 putleakinfo(leaks, d->r[i].f, realsize, 1);
2708 if (mopts.malloc_verbose)
2709 ulog("pages %18p %18p %zu\n", d->r[i].p,
2710 d->r[i].f, realsize);
2711 } else
2712 dump_chunk(leaks,
2713 (struct chunk_info *)d->r[i].size,
2714 d->r[i].f, 0);
2715 }
2716 }
2717 if (mopts.malloc_verbose)
2718 ulog("\n");
2719 }
2720
2721 static void
malloc_dump0(int poolno,struct dir_info * pool,struct leaktree * leaks)2722 malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks)
2723 {
2724 int i;
2725 void *p;
2726 struct region_info *r;
2727
2728 if (pool == NULL || pool->r == NULL)
2729 return;
2730 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2731 p = pool->delayed_chunks[i];
2732 if (p == NULL)
2733 continue;
2734 r = find(pool, p);
2735 if (r == NULL)
2736 wrterror(pool, "bogus pointer in malloc_dump %p", p);
2737 free_bytes(pool, r, p);
2738 pool->delayed_chunks[i] = NULL;
2739 }
2740 malloc_dump1(poolno, pool, leaks);
2741 }
2742
2743 void
malloc_dump(void)2744 malloc_dump(void)
2745 {
2746 u_int i;
2747 int saved_errno = errno;
2748
2749 /* XXX leak when run multiple times */
2750 struct leaktree leaks = RBT_INITIALIZER(&leaks);
2751
2752 for (i = 0; i < mopts.malloc_mutexes; i++)
2753 malloc_dump0(i, mopts.malloc_pool[i], &leaks);
2754
2755 dump_leaks(&leaks);
2756 ulog("\n");
2757 errno = saved_errno;
2758 }
2759 DEF_WEAK(malloc_dump);
2760
2761 static void
malloc_exit(void)2762 malloc_exit(void)
2763 {
2764 int save_errno = errno;
2765
2766 ulog("******** Start dump %s *******\n", __progname);
2767 ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u "
2768 "G=%zu\n",
2769 mopts.malloc_mutexes,
2770 mopts.internal_funcs, mopts.malloc_freecheck,
2771 mopts.malloc_freeunmap, mopts.def_malloc_junk,
2772 mopts.malloc_realloc, mopts.malloc_xmalloc,
2773 mopts.chunk_canaries, mopts.def_maxcache,
2774 mopts.malloc_guard);
2775
2776 malloc_dump();
2777 ulog("******** End dump %s *******\n", __progname);
2778 errno = save_errno;
2779 }
2780
2781 #endif /* MALLOC_STATS */
2782