1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 /*
36 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
37 * based on memory types. Back end is implemented using the UMA(9) zone
38 * allocator. A set of fixed-size buckets are used for smaller allocations,
39 * and a special UMA allocation interface is used for larger allocations.
40 * Callers declare memory types, and statistics are maintained independently
41 * for each memory type. Statistics are maintained per-CPU for performance
42 * reasons. See malloc(9) and comments in malloc.h for a detailed
43 * description.
44 */
45
46 #include <sys/cdefs.h>
47 #include "opt_ddb.h"
48 #include "opt_vm.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/asan.h>
53 #include <sys/kdb.h>
54 #include <sys/kernel.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/msan.h>
58 #include <sys/mutex.h>
59 #include <sys/vmmeter.h>
60 #include <sys/proc.h>
61 #include <sys/queue.h>
62 #include <sys/sbuf.h>
63 #include <sys/smp.h>
64 #include <sys/sysctl.h>
65 #include <sys/time.h>
66 #include <sys/vmem.h>
67 #ifdef EPOCH_TRACE
68 #include <sys/epoch.h>
69 #endif
70
71 #include <vm/vm.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_domainset.h>
74 #include <vm/vm_pageout.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_phys.h>
81 #include <vm/vm_pagequeue.h>
82 #include <vm/uma.h>
83 #include <vm/uma_int.h>
84 #include <vm/uma_dbg.h>
85
86 #ifdef DEBUG_MEMGUARD
87 #include <vm/memguard.h>
88 #endif
89 #ifdef DEBUG_REDZONE
90 #include <vm/redzone.h>
91 #endif
92
93 #if defined(INVARIANTS) && defined(__i386__)
94 #include <machine/cpu.h>
95 #endif
96
97 #include <ddb/ddb.h>
98
99 #ifdef KDTRACE_HOOKS
100 #include <sys/dtrace_bsd.h>
101
102 bool __read_frequently dtrace_malloc_enabled;
103 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
104 #endif
105
106 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
107 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
108 #define MALLOC_DEBUG 1
109 #endif
110
111 #if defined(KASAN) || defined(DEBUG_REDZONE)
112 #define DEBUG_REDZONE_ARG_DEF , unsigned long osize
113 #define DEBUG_REDZONE_ARG , osize
114 #else
115 #define DEBUG_REDZONE_ARG_DEF
116 #define DEBUG_REDZONE_ARG
117 #endif
118
119 /*
120 * When realloc() is called, if the new size is sufficiently smaller than
121 * the old size, realloc() will allocate a new, smaller block to avoid
122 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
123 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
124 */
125 #ifndef REALLOC_FRACTION
126 #define REALLOC_FRACTION 1 /* new block if <= half the size */
127 #endif
128
129 /*
130 * Centrally define some common malloc types.
131 */
132 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
133 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
134 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
135
136 static struct malloc_type *kmemstatistics;
137 static int kmemcount;
138
139 #define KMEM_ZSHIFT 4
140 #define KMEM_ZBASE 16
141 #define KMEM_ZMASK (KMEM_ZBASE - 1)
142
143 #define KMEM_ZMAX 65536
144 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
145 static uint8_t kmemsize[KMEM_ZSIZE + 1];
146
147 #ifndef MALLOC_DEBUG_MAXZONES
148 #define MALLOC_DEBUG_MAXZONES 1
149 #endif
150 static int numzones = MALLOC_DEBUG_MAXZONES;
151
152 /*
153 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
154 * of various sizes.
155 *
156 * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
157 *
158 * XXX: The comment here used to read "These won't be powers of two for
159 * long." It's possible that a significant amount of wasted memory could be
160 * recovered by tuning the sizes of these buckets.
161 */
162 struct {
163 int kz_size;
164 const char *kz_name;
165 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
166 } kmemzones[] = {
167 {16, "malloc-16", },
168 {32, "malloc-32", },
169 {64, "malloc-64", },
170 {128, "malloc-128", },
171 {256, "malloc-256", },
172 {384, "malloc-384", },
173 {512, "malloc-512", },
174 {1024, "malloc-1024", },
175 {2048, "malloc-2048", },
176 {4096, "malloc-4096", },
177 {8192, "malloc-8192", },
178 {16384, "malloc-16384", },
179 {32768, "malloc-32768", },
180 {65536, "malloc-65536", },
181 {0, NULL},
182 };
183
184 u_long vm_kmem_size;
185 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
186 "Size of kernel memory");
187
188 static u_long kmem_zmax = KMEM_ZMAX;
189 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
190 "Maximum allocation size that malloc(9) would use UMA as backend");
191
192 static u_long vm_kmem_size_min;
193 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
194 "Minimum size of kernel memory");
195
196 static u_long vm_kmem_size_max;
197 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
198 "Maximum size of kernel memory");
199
200 static u_int vm_kmem_size_scale;
201 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
202 "Scale factor for kernel memory size");
203
204 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
205 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
206 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
207 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
208
209 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
210 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
211 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
212 sysctl_kmem_map_free, "LU", "Free space in kmem");
213
214 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
215 "Malloc information");
216
217 static u_int vm_malloc_zone_count = nitems(kmemzones);
218 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
219 CTLFLAG_RD, &vm_malloc_zone_count, 0,
220 "Number of malloc zones");
221
222 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
223 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
224 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
225 sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
226
227 /*
228 * The malloc_mtx protects the kmemstatistics linked list.
229 */
230 struct mtx malloc_mtx;
231
232 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
233
234 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
235 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
236 "Kernel malloc debugging options");
237 #endif
238
239 /*
240 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
241 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
242 */
243 #ifdef MALLOC_MAKE_FAILURES
244 static int malloc_failure_rate;
245 static int malloc_nowait_count;
246 static int malloc_failure_count;
247 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
248 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
249 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
250 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
251 #endif
252
253 static int
sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)254 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
255 {
256 u_long size;
257
258 size = uma_size();
259 return (sysctl_handle_long(oidp, &size, 0, req));
260 }
261
262 static int
sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)263 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
264 {
265 u_long size, limit;
266
267 /* The sysctl is unsigned, implement as a saturation value. */
268 size = uma_size();
269 limit = uma_limit();
270 if (size > limit)
271 size = 0;
272 else
273 size = limit - size;
274 return (sysctl_handle_long(oidp, &size, 0, req));
275 }
276
277 static int
sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)278 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
279 {
280 int sizes[nitems(kmemzones)];
281 int i;
282
283 for (i = 0; i < nitems(kmemzones); i++) {
284 sizes[i] = kmemzones[i].kz_size;
285 }
286
287 return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
288 }
289
290 /*
291 * malloc(9) uma zone separation -- sub-page buffer overruns in one
292 * malloc type will affect only a subset of other malloc types.
293 */
294 #if MALLOC_DEBUG_MAXZONES > 1
295 static void
tunable_set_numzones(void)296 tunable_set_numzones(void)
297 {
298
299 TUNABLE_INT_FETCH("debug.malloc.numzones",
300 &numzones);
301
302 /* Sanity check the number of malloc uma zones. */
303 if (numzones <= 0)
304 numzones = 1;
305 if (numzones > MALLOC_DEBUG_MAXZONES)
306 numzones = MALLOC_DEBUG_MAXZONES;
307 }
308 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
309 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
310 &numzones, 0, "Number of malloc uma subzones");
311
312 /*
313 * Any number that changes regularly is an okay choice for the
314 * offset. Build numbers are pretty good of you have them.
315 */
316 static u_int zone_offset = __FreeBSD_version;
317 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
318 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
319 &zone_offset, 0, "Separate malloc types by examining the "
320 "Nth character in the malloc type short description.");
321
322 static void
mtp_set_subzone(struct malloc_type * mtp)323 mtp_set_subzone(struct malloc_type *mtp)
324 {
325 struct malloc_type_internal *mtip;
326 const char *desc;
327 size_t len;
328 u_int val;
329
330 mtip = &mtp->ks_mti;
331 desc = mtp->ks_shortdesc;
332 if (desc == NULL || (len = strlen(desc)) == 0)
333 val = 0;
334 else
335 val = desc[zone_offset % len];
336 mtip->mti_zone = (val % numzones);
337 }
338
339 static inline u_int
mtp_get_subzone(struct malloc_type * mtp)340 mtp_get_subzone(struct malloc_type *mtp)
341 {
342 struct malloc_type_internal *mtip;
343
344 mtip = &mtp->ks_mti;
345
346 KASSERT(mtip->mti_zone < numzones,
347 ("mti_zone %u out of range %d",
348 mtip->mti_zone, numzones));
349 return (mtip->mti_zone);
350 }
351 #elif MALLOC_DEBUG_MAXZONES == 0
352 #error "MALLOC_DEBUG_MAXZONES must be positive."
353 #else
354 static void
mtp_set_subzone(struct malloc_type * mtp)355 mtp_set_subzone(struct malloc_type *mtp)
356 {
357 struct malloc_type_internal *mtip;
358
359 mtip = &mtp->ks_mti;
360 mtip->mti_zone = 0;
361 }
362
363 static inline u_int
mtp_get_subzone(struct malloc_type * mtp)364 mtp_get_subzone(struct malloc_type *mtp)
365 {
366
367 return (0);
368 }
369 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
370
371 /*
372 * An allocation has succeeded -- update malloc type statistics for the
373 * amount of bucket size. Occurs within a critical section so that the
374 * thread isn't preempted and doesn't migrate while updating per-PCU
375 * statistics.
376 */
377 static void
malloc_type_zone_allocated(struct malloc_type * mtp,unsigned long size,int zindx)378 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
379 int zindx)
380 {
381 struct malloc_type_internal *mtip;
382 struct malloc_type_stats *mtsp;
383
384 critical_enter();
385 mtip = &mtp->ks_mti;
386 mtsp = zpcpu_get(mtip->mti_stats);
387 if (size > 0) {
388 mtsp->mts_memalloced += size;
389 mtsp->mts_numallocs++;
390 }
391 if (zindx != -1)
392 mtsp->mts_size |= 1 << zindx;
393
394 #ifdef KDTRACE_HOOKS
395 if (__predict_false(dtrace_malloc_enabled)) {
396 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
397 if (probe_id != 0)
398 (dtrace_malloc_probe)(probe_id,
399 (uintptr_t) mtp, (uintptr_t) mtip,
400 (uintptr_t) mtsp, size, zindx);
401 }
402 #endif
403
404 critical_exit();
405 }
406
407 void
malloc_type_allocated(struct malloc_type * mtp,unsigned long size)408 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
409 {
410
411 if (size > 0)
412 malloc_type_zone_allocated(mtp, size, -1);
413 }
414
415 /*
416 * A free operation has occurred -- update malloc type statistics for the
417 * amount of the bucket size. Occurs within a critical section so that the
418 * thread isn't preempted and doesn't migrate while updating per-CPU
419 * statistics.
420 */
421 void
malloc_type_freed(struct malloc_type * mtp,unsigned long size)422 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
423 {
424 struct malloc_type_internal *mtip;
425 struct malloc_type_stats *mtsp;
426
427 critical_enter();
428 mtip = &mtp->ks_mti;
429 mtsp = zpcpu_get(mtip->mti_stats);
430 mtsp->mts_memfreed += size;
431 mtsp->mts_numfrees++;
432
433 #ifdef KDTRACE_HOOKS
434 if (__predict_false(dtrace_malloc_enabled)) {
435 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
436 if (probe_id != 0)
437 (dtrace_malloc_probe)(probe_id,
438 (uintptr_t) mtp, (uintptr_t) mtip,
439 (uintptr_t) mtsp, size, 0);
440 }
441 #endif
442
443 critical_exit();
444 }
445
446 /*
447 * contigmalloc:
448 *
449 * Allocate a block of physically contiguous memory.
450 *
451 * If M_NOWAIT is set, this routine will not block and return NULL if
452 * the allocation fails.
453 */
454 void *
contigmalloc(unsigned long size,struct malloc_type * type,int flags,vm_paddr_t low,vm_paddr_t high,unsigned long alignment,vm_paddr_t boundary)455 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
456 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
457 vm_paddr_t boundary)
458 {
459 void *ret;
460
461 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
462 boundary, VM_MEMATTR_DEFAULT);
463 if (ret != NULL)
464 malloc_type_allocated(type, round_page(size));
465 return (ret);
466 }
467
468 void *
contigmalloc_domainset(unsigned long size,struct malloc_type * type,struct domainset * ds,int flags,vm_paddr_t low,vm_paddr_t high,unsigned long alignment,vm_paddr_t boundary)469 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
470 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
471 unsigned long alignment, vm_paddr_t boundary)
472 {
473 void *ret;
474
475 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
476 alignment, boundary, VM_MEMATTR_DEFAULT);
477 if (ret != NULL)
478 malloc_type_allocated(type, round_page(size));
479 return (ret);
480 }
481
482 /*
483 * contigfree:
484 *
485 * Free a block of memory allocated by contigmalloc.
486 *
487 * This routine may not block.
488 */
489 void
contigfree(void * addr,unsigned long size,struct malloc_type * type)490 contigfree(void *addr, unsigned long size, struct malloc_type *type)
491 {
492
493 kmem_free(addr, size);
494 malloc_type_freed(type, round_page(size));
495 }
496
497 #ifdef MALLOC_DEBUG
498 static int
malloc_dbg(caddr_t * vap,size_t * sizep,struct malloc_type * mtp,int flags)499 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
500 int flags)
501 {
502 #ifdef INVARIANTS
503 int indx;
504
505 KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
506 /*
507 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
508 */
509 indx = flags & (M_WAITOK | M_NOWAIT);
510 if (indx != M_NOWAIT && indx != M_WAITOK) {
511 static struct timeval lasterr;
512 static int curerr, once;
513 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
514 printf("Bad malloc flags: %x\n", indx);
515 kdb_backtrace();
516 flags |= M_WAITOK;
517 once++;
518 }
519 }
520 #endif
521 #ifdef MALLOC_MAKE_FAILURES
522 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
523 atomic_add_int(&malloc_nowait_count, 1);
524 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
525 atomic_add_int(&malloc_failure_count, 1);
526 *vap = NULL;
527 return (EJUSTRETURN);
528 }
529 }
530 #endif
531 if (flags & M_WAITOK) {
532 KASSERT(curthread->td_intr_nesting_level == 0,
533 ("malloc(M_WAITOK) in interrupt context"));
534 if (__predict_false(!THREAD_CAN_SLEEP())) {
535 #ifdef EPOCH_TRACE
536 epoch_trace_list(curthread);
537 #endif
538 KASSERT(0,
539 ("malloc(M_WAITOK) with sleeping prohibited"));
540 }
541 }
542 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
543 ("malloc: called with spinlock or critical section held"));
544
545 #ifdef DEBUG_MEMGUARD
546 if (memguard_cmp_mtp(mtp, *sizep)) {
547 *vap = memguard_alloc(*sizep, flags);
548 if (*vap != NULL)
549 return (EJUSTRETURN);
550 /* This is unfortunate but should not be fatal. */
551 }
552 #endif
553
554 #ifdef DEBUG_REDZONE
555 *sizep = redzone_size_ntor(*sizep);
556 #endif
557
558 return (0);
559 }
560 #endif
561
562 /*
563 * Handle large allocations and frees by using kmem_malloc directly.
564 */
565 static inline bool
malloc_large_slab(uma_slab_t slab)566 malloc_large_slab(uma_slab_t slab)
567 {
568 uintptr_t va;
569
570 va = (uintptr_t)slab;
571 return ((va & 1) != 0);
572 }
573
574 static inline size_t
malloc_large_size(uma_slab_t slab)575 malloc_large_size(uma_slab_t slab)
576 {
577 uintptr_t va;
578
579 va = (uintptr_t)slab;
580 return (va >> 1);
581 }
582
583 static caddr_t __noinline
malloc_large(size_t size,struct malloc_type * mtp,struct domainset * policy,int flags DEBUG_REDZONE_ARG_DEF)584 malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
585 int flags DEBUG_REDZONE_ARG_DEF)
586 {
587 void *va;
588
589 size = roundup(size, PAGE_SIZE);
590 va = kmem_malloc_domainset(policy, size, flags);
591 if (va != NULL) {
592 /* The low bit is unused for slab pointers. */
593 vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1));
594 uma_total_inc(size);
595 }
596 malloc_type_allocated(mtp, va == NULL ? 0 : size);
597 if (__predict_false(va == NULL)) {
598 KASSERT((flags & M_WAITOK) == 0,
599 ("malloc(M_WAITOK) returned NULL"));
600 } else {
601 #ifdef DEBUG_REDZONE
602 va = redzone_setup(va, osize);
603 #endif
604 kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
605 }
606 return (va);
607 }
608
609 static void
free_large(void * addr,size_t size)610 free_large(void *addr, size_t size)
611 {
612
613 kmem_free(addr, size);
614 uma_total_dec(size);
615 }
616
617 /*
618 * malloc:
619 *
620 * Allocate a block of memory.
621 *
622 * If M_NOWAIT is set, this routine will not block and return NULL if
623 * the allocation fails.
624 */
625 void *
626 (malloc)(size_t size, struct malloc_type *mtp, int flags)
627 {
628 int indx;
629 caddr_t va;
630 uma_zone_t zone;
631 #if defined(DEBUG_REDZONE) || defined(KASAN)
632 unsigned long osize = size;
633 #endif
634
635 MPASS((flags & M_EXEC) == 0);
636
637 #ifdef MALLOC_DEBUG
638 va = NULL;
639 if (malloc_dbg(&va, &size, mtp, flags) != 0)
640 return (va);
641 #endif
642
643 if (__predict_false(size > kmem_zmax))
644 return (malloc_large(size, mtp, DOMAINSET_RR(), flags
645 DEBUG_REDZONE_ARG));
646
647 if (size & KMEM_ZMASK)
648 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
649 indx = kmemsize[size >> KMEM_ZSHIFT];
650 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
651 va = uma_zalloc_arg(zone, zone, flags);
652 if (va != NULL) {
653 size = zone->uz_size;
654 if ((flags & M_ZERO) == 0) {
655 kmsan_mark(va, size, KMSAN_STATE_UNINIT);
656 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
657 }
658 }
659 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
660 if (__predict_false(va == NULL)) {
661 KASSERT((flags & M_WAITOK) == 0,
662 ("malloc(M_WAITOK) returned NULL"));
663 }
664 #ifdef DEBUG_REDZONE
665 if (va != NULL)
666 va = redzone_setup(va, osize);
667 #endif
668 #ifdef KASAN
669 if (va != NULL)
670 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
671 #endif
672 return ((void *) va);
673 }
674
675 static void *
malloc_domain(size_t * sizep,int * indxp,struct malloc_type * mtp,int domain,int flags)676 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
677 int flags)
678 {
679 uma_zone_t zone;
680 caddr_t va;
681 size_t size;
682 int indx;
683
684 size = *sizep;
685 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
686 ("malloc_domain: Called with bad flag / size combination."));
687 if (size & KMEM_ZMASK)
688 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
689 indx = kmemsize[size >> KMEM_ZSHIFT];
690 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
691 va = uma_zalloc_domain(zone, zone, domain, flags);
692 if (va != NULL)
693 *sizep = zone->uz_size;
694 *indxp = indx;
695 return ((void *)va);
696 }
697
698 void *
malloc_domainset(size_t size,struct malloc_type * mtp,struct domainset * ds,int flags)699 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
700 int flags)
701 {
702 struct vm_domainset_iter di;
703 caddr_t va;
704 int domain;
705 int indx;
706 #if defined(KASAN) || defined(DEBUG_REDZONE)
707 unsigned long osize = size;
708 #endif
709
710 MPASS((flags & M_EXEC) == 0);
711
712 #ifdef MALLOC_DEBUG
713 va = NULL;
714 if (malloc_dbg(&va, &size, mtp, flags) != 0)
715 return (va);
716 #endif
717
718 if (__predict_false(size > kmem_zmax))
719 return (malloc_large(size, mtp, DOMAINSET_RR(), flags
720 DEBUG_REDZONE_ARG));
721
722 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
723 do {
724 va = malloc_domain(&size, &indx, mtp, domain, flags);
725 } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
726 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
727 if (__predict_false(va == NULL)) {
728 KASSERT((flags & M_WAITOK) == 0,
729 ("malloc(M_WAITOK) returned NULL"));
730 }
731 #ifdef DEBUG_REDZONE
732 if (va != NULL)
733 va = redzone_setup(va, osize);
734 #endif
735 #ifdef KASAN
736 if (va != NULL)
737 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
738 #endif
739 #ifdef KMSAN
740 if ((flags & M_ZERO) == 0) {
741 kmsan_mark(va, size, KMSAN_STATE_UNINIT);
742 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
743 }
744 #endif
745 return (va);
746 }
747
748 /*
749 * Allocate an executable area.
750 */
751 void *
malloc_exec(size_t size,struct malloc_type * mtp,int flags)752 malloc_exec(size_t size, struct malloc_type *mtp, int flags)
753 {
754
755 return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
756 }
757
758 void *
malloc_domainset_exec(size_t size,struct malloc_type * mtp,struct domainset * ds,int flags)759 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
760 int flags)
761 {
762 #if defined(DEBUG_REDZONE) || defined(KASAN)
763 unsigned long osize = size;
764 #endif
765 #ifdef MALLOC_DEBUG
766 caddr_t va;
767 #endif
768
769 flags |= M_EXEC;
770
771 #ifdef MALLOC_DEBUG
772 va = NULL;
773 if (malloc_dbg(&va, &size, mtp, flags) != 0)
774 return (va);
775 #endif
776
777 return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG));
778 }
779
780 void *
malloc_aligned(size_t size,size_t align,struct malloc_type * type,int flags)781 malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
782 {
783 return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
784 flags));
785 }
786
787 void *
malloc_domainset_aligned(size_t size,size_t align,struct malloc_type * mtp,struct domainset * ds,int flags)788 malloc_domainset_aligned(size_t size, size_t align,
789 struct malloc_type *mtp, struct domainset *ds, int flags)
790 {
791 void *res;
792 size_t asize;
793
794 KASSERT(powerof2(align),
795 ("malloc_domainset_aligned: wrong align %#zx size %#zx",
796 align, size));
797 KASSERT(align <= PAGE_SIZE,
798 ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
799 align, size));
800
801 /*
802 * Round the allocation size up to the next power of 2,
803 * because we can only guarantee alignment for
804 * power-of-2-sized allocations. Further increase the
805 * allocation size to align if the rounded size is less than
806 * align, since malloc zones provide alignment equal to their
807 * size.
808 */
809 if (size == 0)
810 size = 1;
811 asize = size <= align ? align : 1UL << flsl(size - 1);
812
813 res = malloc_domainset(asize, mtp, ds, flags);
814 KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
815 ("malloc_domainset_aligned: result not aligned %p size %#zx "
816 "allocsize %#zx align %#zx", res, size, asize, align));
817 return (res);
818 }
819
820 void *
mallocarray(size_t nmemb,size_t size,struct malloc_type * type,int flags)821 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
822 {
823
824 if (WOULD_OVERFLOW(nmemb, size))
825 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
826
827 return (malloc(size * nmemb, type, flags));
828 }
829
830 void *
mallocarray_domainset(size_t nmemb,size_t size,struct malloc_type * type,struct domainset * ds,int flags)831 mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
832 struct domainset *ds, int flags)
833 {
834
835 if (WOULD_OVERFLOW(nmemb, size))
836 panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
837
838 return (malloc_domainset(size * nmemb, type, ds, flags));
839 }
840
841 #if defined(INVARIANTS) && !defined(KASAN)
842 static void
free_save_type(void * addr,struct malloc_type * mtp,u_long size)843 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
844 {
845 struct malloc_type **mtpp = addr;
846
847 /*
848 * Cache a pointer to the malloc_type that most recently freed
849 * this memory here. This way we know who is most likely to
850 * have stepped on it later.
851 *
852 * This code assumes that size is a multiple of 8 bytes for
853 * 64 bit machines
854 */
855 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
856 mtpp += (size - sizeof(struct malloc_type *)) /
857 sizeof(struct malloc_type *);
858 *mtpp = mtp;
859 }
860 #endif
861
862 #ifdef MALLOC_DEBUG
863 static int
free_dbg(void ** addrp,struct malloc_type * mtp)864 free_dbg(void **addrp, struct malloc_type *mtp)
865 {
866 void *addr;
867
868 addr = *addrp;
869 KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
870 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
871 ("free: called with spinlock or critical section held"));
872
873 /* free(NULL, ...) does nothing */
874 if (addr == NULL)
875 return (EJUSTRETURN);
876
877 #ifdef DEBUG_MEMGUARD
878 if (is_memguard_addr(addr)) {
879 memguard_free(addr);
880 return (EJUSTRETURN);
881 }
882 #endif
883
884 #ifdef DEBUG_REDZONE
885 redzone_check(addr);
886 *addrp = redzone_addr_ntor(addr);
887 #endif
888
889 return (0);
890 }
891 #endif
892
893 /*
894 * free:
895 *
896 * Free a block of memory allocated by malloc.
897 *
898 * This routine may not block.
899 */
900 void
free(void * addr,struct malloc_type * mtp)901 free(void *addr, struct malloc_type *mtp)
902 {
903 uma_zone_t zone;
904 uma_slab_t slab;
905 u_long size;
906
907 #ifdef MALLOC_DEBUG
908 if (free_dbg(&addr, mtp) != 0)
909 return;
910 #endif
911 /* free(NULL, ...) does nothing */
912 if (addr == NULL)
913 return;
914
915 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
916 if (slab == NULL)
917 panic("free: address %p(%p) has not been allocated.\n",
918 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
919
920 if (__predict_true(!malloc_large_slab(slab))) {
921 size = zone->uz_size;
922 #if defined(INVARIANTS) && !defined(KASAN)
923 free_save_type(addr, mtp, size);
924 #endif
925 uma_zfree_arg(zone, addr, slab);
926 } else {
927 size = malloc_large_size(slab);
928 free_large(addr, size);
929 }
930 malloc_type_freed(mtp, size);
931 }
932
933 /*
934 * zfree:
935 *
936 * Zero then free a block of memory allocated by malloc.
937 *
938 * This routine may not block.
939 */
940 void
zfree(void * addr,struct malloc_type * mtp)941 zfree(void *addr, struct malloc_type *mtp)
942 {
943 uma_zone_t zone;
944 uma_slab_t slab;
945 u_long size;
946
947 #ifdef MALLOC_DEBUG
948 if (free_dbg(&addr, mtp) != 0)
949 return;
950 #endif
951 /* free(NULL, ...) does nothing */
952 if (addr == NULL)
953 return;
954
955 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
956 if (slab == NULL)
957 panic("free: address %p(%p) has not been allocated.\n",
958 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
959
960 if (__predict_true(!malloc_large_slab(slab))) {
961 size = zone->uz_size;
962 #if defined(INVARIANTS) && !defined(KASAN)
963 free_save_type(addr, mtp, size);
964 #endif
965 kasan_mark(addr, size, size, 0);
966 explicit_bzero(addr, size);
967 uma_zfree_arg(zone, addr, slab);
968 } else {
969 size = malloc_large_size(slab);
970 kasan_mark(addr, size, size, 0);
971 explicit_bzero(addr, size);
972 free_large(addr, size);
973 }
974 malloc_type_freed(mtp, size);
975 }
976
977 /*
978 * realloc: change the size of a memory block
979 */
980 void *
realloc(void * addr,size_t size,struct malloc_type * mtp,int flags)981 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
982 {
983 #ifndef DEBUG_REDZONE
984 uma_zone_t zone;
985 uma_slab_t slab;
986 #endif
987 unsigned long alloc;
988 void *newaddr;
989
990 KASSERT(mtp->ks_version == M_VERSION,
991 ("realloc: bad malloc type version"));
992 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
993 ("realloc: called with spinlock or critical section held"));
994
995 /* realloc(NULL, ...) is equivalent to malloc(...) */
996 if (addr == NULL)
997 return (malloc(size, mtp, flags));
998
999 /*
1000 * XXX: Should report free of old memory and alloc of new memory to
1001 * per-CPU stats.
1002 */
1003
1004 #ifdef DEBUG_MEMGUARD
1005 if (is_memguard_addr(addr))
1006 return (memguard_realloc(addr, size, mtp, flags));
1007 #endif
1008
1009 #ifdef DEBUG_REDZONE
1010 alloc = redzone_get_size(addr);
1011 #else
1012 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1013
1014 /* Sanity check */
1015 KASSERT(slab != NULL,
1016 ("realloc: address %p out of range", (void *)addr));
1017
1018 /* Get the size of the original block */
1019 if (!malloc_large_slab(slab))
1020 alloc = zone->uz_size;
1021 else
1022 alloc = malloc_large_size(slab);
1023
1024 /* Reuse the original block if appropriate */
1025 if (size <= alloc &&
1026 (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
1027 kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
1028 return (addr);
1029 }
1030 #endif /* !DEBUG_REDZONE */
1031
1032 /* Allocate a new, bigger (or smaller) block */
1033 if ((newaddr = malloc(size, mtp, flags)) == NULL)
1034 return (NULL);
1035
1036 /*
1037 * Copy over original contents. For KASAN, the redzone must be marked
1038 * valid before performing the copy.
1039 */
1040 kasan_mark(addr, alloc, alloc, 0);
1041 bcopy(addr, newaddr, min(size, alloc));
1042 free(addr, mtp);
1043 return (newaddr);
1044 }
1045
1046 /*
1047 * reallocf: same as realloc() but free memory on failure.
1048 */
1049 void *
reallocf(void * addr,size_t size,struct malloc_type * mtp,int flags)1050 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
1051 {
1052 void *mem;
1053
1054 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
1055 free(addr, mtp);
1056 return (mem);
1057 }
1058
1059 /*
1060 * malloc_size: returns the number of bytes allocated for a request of the
1061 * specified size
1062 */
1063 size_t
malloc_size(size_t size)1064 malloc_size(size_t size)
1065 {
1066 int indx;
1067
1068 if (size > kmem_zmax)
1069 return (0);
1070 if (size & KMEM_ZMASK)
1071 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
1072 indx = kmemsize[size >> KMEM_ZSHIFT];
1073 return (kmemzones[indx].kz_size);
1074 }
1075
1076 /*
1077 * malloc_usable_size: returns the usable size of the allocation.
1078 */
1079 size_t
malloc_usable_size(const void * addr)1080 malloc_usable_size(const void *addr)
1081 {
1082 #ifndef DEBUG_REDZONE
1083 uma_zone_t zone;
1084 uma_slab_t slab;
1085 #endif
1086 u_long size;
1087
1088 if (addr == NULL)
1089 return (0);
1090
1091 #ifdef DEBUG_MEMGUARD
1092 if (is_memguard_addr(__DECONST(void *, addr)))
1093 return (memguard_get_req_size(addr));
1094 #endif
1095
1096 #ifdef DEBUG_REDZONE
1097 size = redzone_get_size(__DECONST(void *, addr));
1098 #else
1099 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1100 if (slab == NULL)
1101 panic("malloc_usable_size: address %p(%p) is not allocated.\n",
1102 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
1103
1104 if (!malloc_large_slab(slab))
1105 size = zone->uz_size;
1106 else
1107 size = malloc_large_size(slab);
1108 #endif
1109
1110 /*
1111 * Unmark the redzone to avoid reports from consumers who are
1112 * (presumably) about to use the full allocation size.
1113 */
1114 kasan_mark(addr, size, size, 0);
1115
1116 return (size);
1117 }
1118
1119 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1120
1121 /*
1122 * Initialize the kernel memory (kmem) arena.
1123 */
1124 void
kmeminit(void)1125 kmeminit(void)
1126 {
1127 u_long mem_size;
1128 u_long tmp;
1129
1130 #ifdef VM_KMEM_SIZE
1131 if (vm_kmem_size == 0)
1132 vm_kmem_size = VM_KMEM_SIZE;
1133 #endif
1134 #ifdef VM_KMEM_SIZE_MIN
1135 if (vm_kmem_size_min == 0)
1136 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1137 #endif
1138 #ifdef VM_KMEM_SIZE_MAX
1139 if (vm_kmem_size_max == 0)
1140 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1141 #endif
1142 /*
1143 * Calculate the amount of kernel virtual address (KVA) space that is
1144 * preallocated to the kmem arena. In order to support a wide range
1145 * of machines, it is a function of the physical memory size,
1146 * specifically,
1147 *
1148 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1149 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1150 *
1151 * Every architecture must define an integral value for
1152 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
1153 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1154 * ceiling on this preallocation, are optional. Typically,
1155 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1156 * a given architecture.
1157 */
1158 mem_size = vm_cnt.v_page_count;
1159 if (mem_size <= 32768) /* delphij XXX 128MB */
1160 kmem_zmax = PAGE_SIZE;
1161
1162 if (vm_kmem_size_scale < 1)
1163 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1164
1165 /*
1166 * Check if we should use defaults for the "vm_kmem_size"
1167 * variable:
1168 */
1169 if (vm_kmem_size == 0) {
1170 vm_kmem_size = mem_size / vm_kmem_size_scale;
1171 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
1172 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1173 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
1174 vm_kmem_size = vm_kmem_size_min;
1175 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1176 vm_kmem_size = vm_kmem_size_max;
1177 }
1178 if (vm_kmem_size == 0)
1179 panic("Tune VM_KMEM_SIZE_* for the platform");
1180
1181 /*
1182 * The amount of KVA space that is preallocated to the
1183 * kmem arena can be set statically at compile-time or manually
1184 * through the kernel environment. However, it is still limited to
1185 * twice the physical memory size, which has been sufficient to handle
1186 * the most severe cases of external fragmentation in the kmem arena.
1187 */
1188 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1189 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
1190
1191 vm_kmem_size = round_page(vm_kmem_size);
1192
1193 /*
1194 * With KASAN or KMSAN enabled, dynamically allocated kernel memory is
1195 * shadowed. Account for this when setting the UMA limit.
1196 */
1197 #if defined(KASAN)
1198 vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) /
1199 (KASAN_SHADOW_SCALE + 1);
1200 #elif defined(KMSAN)
1201 vm_kmem_size /= 3;
1202 #endif
1203
1204 #ifdef DEBUG_MEMGUARD
1205 tmp = memguard_fudge(vm_kmem_size, kernel_map);
1206 #else
1207 tmp = vm_kmem_size;
1208 #endif
1209 uma_set_limit(tmp);
1210
1211 #ifdef DEBUG_MEMGUARD
1212 /*
1213 * Initialize MemGuard if support compiled in. MemGuard is a
1214 * replacement allocator used for detecting tamper-after-free
1215 * scenarios as they occur. It is only used for debugging.
1216 */
1217 memguard_init(kernel_arena);
1218 #endif
1219 }
1220
1221 /*
1222 * Initialize the kernel memory allocator
1223 */
1224 /* ARGSUSED*/
1225 static void
mallocinit(void * dummy)1226 mallocinit(void *dummy)
1227 {
1228 int i;
1229 uint8_t indx;
1230
1231 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
1232
1233 kmeminit();
1234
1235 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
1236 kmem_zmax = KMEM_ZMAX;
1237
1238 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
1239 int size = kmemzones[indx].kz_size;
1240 const char *name = kmemzones[indx].kz_name;
1241 size_t align;
1242 int subzone;
1243
1244 align = UMA_ALIGN_PTR;
1245 if (powerof2(size) && size > sizeof(void *))
1246 align = MIN(size, PAGE_SIZE) - 1;
1247 for (subzone = 0; subzone < numzones; subzone++) {
1248 kmemzones[indx].kz_zone[subzone] =
1249 uma_zcreate(name, size,
1250 #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
1251 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1252 #else
1253 NULL, NULL, NULL, NULL,
1254 #endif
1255 align, UMA_ZONE_MALLOC);
1256 }
1257 for (;i <= size; i+= KMEM_ZBASE)
1258 kmemsize[i >> KMEM_ZSHIFT] = indx;
1259 }
1260 }
1261 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1262
1263 void
malloc_init(void * data)1264 malloc_init(void *data)
1265 {
1266 struct malloc_type_internal *mtip;
1267 struct malloc_type *mtp;
1268
1269 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
1270
1271 mtp = data;
1272 if (mtp->ks_version != M_VERSION)
1273 panic("malloc_init: type %s with unsupported version %lu",
1274 mtp->ks_shortdesc, mtp->ks_version);
1275
1276 mtip = &mtp->ks_mti;
1277 mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1278 mtp_set_subzone(mtp);
1279
1280 mtx_lock(&malloc_mtx);
1281 mtp->ks_next = kmemstatistics;
1282 kmemstatistics = mtp;
1283 kmemcount++;
1284 mtx_unlock(&malloc_mtx);
1285 }
1286
1287 void
malloc_uninit(void * data)1288 malloc_uninit(void *data)
1289 {
1290 struct malloc_type_internal *mtip;
1291 struct malloc_type_stats *mtsp;
1292 struct malloc_type *mtp, *temp;
1293 long temp_allocs, temp_bytes;
1294 int i;
1295
1296 mtp = data;
1297 KASSERT(mtp->ks_version == M_VERSION,
1298 ("malloc_uninit: bad malloc type version"));
1299
1300 mtx_lock(&malloc_mtx);
1301 mtip = &mtp->ks_mti;
1302 if (mtp != kmemstatistics) {
1303 for (temp = kmemstatistics; temp != NULL;
1304 temp = temp->ks_next) {
1305 if (temp->ks_next == mtp) {
1306 temp->ks_next = mtp->ks_next;
1307 break;
1308 }
1309 }
1310 KASSERT(temp,
1311 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1312 } else
1313 kmemstatistics = mtp->ks_next;
1314 kmemcount--;
1315 mtx_unlock(&malloc_mtx);
1316
1317 /*
1318 * Look for memory leaks.
1319 */
1320 temp_allocs = temp_bytes = 0;
1321 for (i = 0; i <= mp_maxid; i++) {
1322 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1323 temp_allocs += mtsp->mts_numallocs;
1324 temp_allocs -= mtsp->mts_numfrees;
1325 temp_bytes += mtsp->mts_memalloced;
1326 temp_bytes -= mtsp->mts_memfreed;
1327 }
1328 if (temp_allocs > 0 || temp_bytes > 0) {
1329 printf("Warning: memory type %s leaked memory on destroy "
1330 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1331 temp_allocs, temp_bytes);
1332 }
1333
1334 uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1335 }
1336
1337 struct malloc_type *
malloc_desc2type(const char * desc)1338 malloc_desc2type(const char *desc)
1339 {
1340 struct malloc_type *mtp;
1341
1342 mtx_assert(&malloc_mtx, MA_OWNED);
1343 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1344 if (strcmp(mtp->ks_shortdesc, desc) == 0)
1345 return (mtp);
1346 }
1347 return (NULL);
1348 }
1349
1350 static int
sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)1351 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1352 {
1353 struct malloc_type_stream_header mtsh;
1354 struct malloc_type_internal *mtip;
1355 struct malloc_type_stats *mtsp, zeromts;
1356 struct malloc_type_header mth;
1357 struct malloc_type *mtp;
1358 int error, i;
1359 struct sbuf sbuf;
1360
1361 error = sysctl_wire_old_buffer(req, 0);
1362 if (error != 0)
1363 return (error);
1364 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1365 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1366 mtx_lock(&malloc_mtx);
1367
1368 bzero(&zeromts, sizeof(zeromts));
1369
1370 /*
1371 * Insert stream header.
1372 */
1373 bzero(&mtsh, sizeof(mtsh));
1374 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1375 mtsh.mtsh_maxcpus = MAXCPU;
1376 mtsh.mtsh_count = kmemcount;
1377 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1378
1379 /*
1380 * Insert alternating sequence of type headers and type statistics.
1381 */
1382 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1383 mtip = &mtp->ks_mti;
1384
1385 /*
1386 * Insert type header.
1387 */
1388 bzero(&mth, sizeof(mth));
1389 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1390 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1391
1392 /*
1393 * Insert type statistics for each CPU.
1394 */
1395 for (i = 0; i <= mp_maxid; i++) {
1396 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1397 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1398 }
1399 /*
1400 * Fill in the missing CPUs.
1401 */
1402 for (; i < MAXCPU; i++) {
1403 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1404 }
1405 }
1406 mtx_unlock(&malloc_mtx);
1407 error = sbuf_finish(&sbuf);
1408 sbuf_delete(&sbuf);
1409 return (error);
1410 }
1411
1412 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
1413 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
1414 sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1415 "Return malloc types");
1416
1417 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1418 "Count of kernel malloc types");
1419
1420 void
malloc_type_list(malloc_type_list_func_t * func,void * arg)1421 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1422 {
1423 struct malloc_type *mtp, **bufmtp;
1424 int count, i;
1425 size_t buflen;
1426
1427 mtx_lock(&malloc_mtx);
1428 restart:
1429 mtx_assert(&malloc_mtx, MA_OWNED);
1430 count = kmemcount;
1431 mtx_unlock(&malloc_mtx);
1432
1433 buflen = sizeof(struct malloc_type *) * count;
1434 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1435
1436 mtx_lock(&malloc_mtx);
1437
1438 if (count < kmemcount) {
1439 free(bufmtp, M_TEMP);
1440 goto restart;
1441 }
1442
1443 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1444 bufmtp[i] = mtp;
1445
1446 mtx_unlock(&malloc_mtx);
1447
1448 for (i = 0; i < count; i++)
1449 (func)(bufmtp[i], arg);
1450
1451 free(bufmtp, M_TEMP);
1452 }
1453
1454 #ifdef DDB
1455 static int64_t
get_malloc_stats(const struct malloc_type_internal * mtip,uint64_t * allocs,uint64_t * inuse)1456 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1457 uint64_t *inuse)
1458 {
1459 const struct malloc_type_stats *mtsp;
1460 uint64_t frees, alloced, freed;
1461 int i;
1462
1463 *allocs = 0;
1464 frees = 0;
1465 alloced = 0;
1466 freed = 0;
1467 for (i = 0; i <= mp_maxid; i++) {
1468 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1469
1470 *allocs += mtsp->mts_numallocs;
1471 frees += mtsp->mts_numfrees;
1472 alloced += mtsp->mts_memalloced;
1473 freed += mtsp->mts_memfreed;
1474 }
1475 *inuse = *allocs - frees;
1476 return (alloced - freed);
1477 }
1478
DB_SHOW_COMMAND_FLAGS(malloc,db_show_malloc,DB_CMD_MEMSAFE)1479 DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE)
1480 {
1481 const char *fmt_hdr, *fmt_entry;
1482 struct malloc_type *mtp;
1483 uint64_t allocs, inuse;
1484 int64_t size;
1485 /* variables for sorting */
1486 struct malloc_type *last_mtype, *cur_mtype;
1487 int64_t cur_size, last_size;
1488 int ties;
1489
1490 if (modif[0] == 'i') {
1491 fmt_hdr = "%s,%s,%s,%s\n";
1492 fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1493 } else {
1494 fmt_hdr = "%18s %12s %12s %12s\n";
1495 fmt_entry = "%18s %12ju %12jdK %12ju\n";
1496 }
1497
1498 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1499
1500 /* Select sort, largest size first. */
1501 last_mtype = NULL;
1502 last_size = INT64_MAX;
1503 for (;;) {
1504 cur_mtype = NULL;
1505 cur_size = -1;
1506 ties = 0;
1507
1508 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1509 /*
1510 * In the case of size ties, print out mtypes
1511 * in the order they are encountered. That is,
1512 * when we encounter the most recently output
1513 * mtype, we have already printed all preceding
1514 * ties, and we must print all following ties.
1515 */
1516 if (mtp == last_mtype) {
1517 ties = 1;
1518 continue;
1519 }
1520 size = get_malloc_stats(&mtp->ks_mti, &allocs,
1521 &inuse);
1522 if (size > cur_size && size < last_size + ties) {
1523 cur_size = size;
1524 cur_mtype = mtp;
1525 }
1526 }
1527 if (cur_mtype == NULL)
1528 break;
1529
1530 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1531 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1532 howmany(size, 1024), allocs);
1533
1534 if (db_pager_quit)
1535 break;
1536
1537 last_mtype = cur_mtype;
1538 last_size = cur_size;
1539 }
1540 }
1541
1542 #if MALLOC_DEBUG_MAXZONES > 1
DB_SHOW_COMMAND(multizone_matches,db_show_multizone_matches)1543 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1544 {
1545 struct malloc_type_internal *mtip;
1546 struct malloc_type *mtp;
1547 u_int subzone;
1548
1549 if (!have_addr) {
1550 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1551 return;
1552 }
1553 mtp = (void *)addr;
1554 if (mtp->ks_version != M_VERSION) {
1555 db_printf("Version %lx does not match expected %x\n",
1556 mtp->ks_version, M_VERSION);
1557 return;
1558 }
1559
1560 mtip = &mtp->ks_mti;
1561 subzone = mtip->mti_zone;
1562
1563 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1564 mtip = &mtp->ks_mti;
1565 if (mtip->mti_zone != subzone)
1566 continue;
1567 db_printf("%s\n", mtp->ks_shortdesc);
1568 if (db_pager_quit)
1569 break;
1570 }
1571 }
1572 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1573 #endif /* DDB */
1574