xref: /freebsd/sys/kern/kern_malloc.c (revision 47dd1d1b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1987, 1991, 1993
5  *	The Regents of the University of California.
6  * Copyright (c) 2005-2009 Robert N. M. Watson
7  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
35  */
36 
37 /*
38  * Kernel malloc(9) implementation -- general purpose kernel memory allocator
39  * based on memory types.  Back end is implemented using the UMA(9) zone
40  * allocator.  A set of fixed-size buckets are used for smaller allocations,
41  * and a special UMA allocation interface is used for larger allocations.
42  * Callers declare memory types, and statistics are maintained independently
43  * for each memory type.  Statistics are maintained per-CPU for performance
44  * reasons.  See malloc(9) and comments in malloc.h for a detailed
45  * description.
46  */
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 #include "opt_ddb.h"
52 #include "opt_vm.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kdb.h>
57 #include <sys/kernel.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/mutex.h>
61 #include <sys/vmmeter.h>
62 #include <sys/proc.h>
63 #include <sys/sbuf.h>
64 #include <sys/sysctl.h>
65 #include <sys/time.h>
66 #include <sys/vmem.h>
67 
68 #include <vm/vm.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_param.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_page.h>
76 #include <vm/uma.h>
77 #include <vm/uma_int.h>
78 #include <vm/uma_dbg.h>
79 
80 #ifdef DEBUG_MEMGUARD
81 #include <vm/memguard.h>
82 #endif
83 #ifdef DEBUG_REDZONE
84 #include <vm/redzone.h>
85 #endif
86 
87 #if defined(INVARIANTS) && defined(__i386__)
88 #include <machine/cpu.h>
89 #endif
90 
91 #include <ddb/ddb.h>
92 
93 #ifdef KDTRACE_HOOKS
94 #include <sys/dtrace_bsd.h>
95 
96 bool	__read_frequently			dtrace_malloc_enabled;
97 dtrace_malloc_probe_func_t __read_mostly	dtrace_malloc_probe;
98 #endif
99 
100 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) ||		\
101     defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
102 #define	MALLOC_DEBUG	1
103 #endif
104 
105 /*
106  * When realloc() is called, if the new size is sufficiently smaller than
107  * the old size, realloc() will allocate a new, smaller block to avoid
108  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
109  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
110  */
111 #ifndef REALLOC_FRACTION
112 #define	REALLOC_FRACTION	1	/* new block if <= half the size */
113 #endif
114 
115 /*
116  * Centrally define some common malloc types.
117  */
118 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
119 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
120 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
121 
122 static struct malloc_type *kmemstatistics;
123 static int kmemcount;
124 
125 #define KMEM_ZSHIFT	4
126 #define KMEM_ZBASE	16
127 #define KMEM_ZMASK	(KMEM_ZBASE - 1)
128 
129 #define KMEM_ZMAX	65536
130 #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
131 static uint8_t kmemsize[KMEM_ZSIZE + 1];
132 
133 #ifndef MALLOC_DEBUG_MAXZONES
134 #define	MALLOC_DEBUG_MAXZONES	1
135 #endif
136 static int numzones = MALLOC_DEBUG_MAXZONES;
137 
138 /*
139  * Small malloc(9) memory allocations are allocated from a set of UMA buckets
140  * of various sizes.
141  *
142  * XXX: The comment here used to read "These won't be powers of two for
143  * long."  It's possible that a significant amount of wasted memory could be
144  * recovered by tuning the sizes of these buckets.
145  */
146 struct {
147 	int kz_size;
148 	char *kz_name;
149 	uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
150 } kmemzones[] = {
151 	{16, "16", },
152 	{32, "32", },
153 	{64, "64", },
154 	{128, "128", },
155 	{256, "256", },
156 	{512, "512", },
157 	{1024, "1024", },
158 	{2048, "2048", },
159 	{4096, "4096", },
160 	{8192, "8192", },
161 	{16384, "16384", },
162 	{32768, "32768", },
163 	{65536, "65536", },
164 	{0, NULL},
165 };
166 
167 /*
168  * Zone to allocate malloc type descriptions from.  For ABI reasons, memory
169  * types are described by a data structure passed by the declaring code, but
170  * the malloc(9) implementation has its own data structure describing the
171  * type and statistics.  This permits the malloc(9)-internal data structures
172  * to be modified without breaking binary-compiled kernel modules that
173  * declare malloc types.
174  */
175 static uma_zone_t mt_zone;
176 
177 u_long vm_kmem_size;
178 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
179     "Size of kernel memory");
180 
181 static u_long kmem_zmax = KMEM_ZMAX;
182 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
183     "Maximum allocation size that malloc(9) would use UMA as backend");
184 
185 static u_long vm_kmem_size_min;
186 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
187     "Minimum size of kernel memory");
188 
189 static u_long vm_kmem_size_max;
190 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
191     "Maximum size of kernel memory");
192 
193 static u_int vm_kmem_size_scale;
194 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
195     "Scale factor for kernel memory size");
196 
197 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
198 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
199     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
200     sysctl_kmem_map_size, "LU", "Current kmem allocation size");
201 
202 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
203 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
204     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
205     sysctl_kmem_map_free, "LU", "Free space in kmem");
206 
207 /*
208  * The malloc_mtx protects the kmemstatistics linked list.
209  */
210 struct mtx malloc_mtx;
211 
212 #ifdef MALLOC_PROFILE
213 uint64_t krequests[KMEM_ZSIZE + 1];
214 
215 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
216 #endif
217 
218 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
219 
220 /*
221  * time_uptime of the last malloc(9) failure (induced or real).
222  */
223 static time_t t_malloc_fail;
224 
225 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
226 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
227     "Kernel malloc debugging options");
228 #endif
229 
230 /*
231  * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
232  * the caller specifies M_NOWAIT.  If set to 0, no failures are caused.
233  */
234 #ifdef MALLOC_MAKE_FAILURES
235 static int malloc_failure_rate;
236 static int malloc_nowait_count;
237 static int malloc_failure_count;
238 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
239     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
240 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
241     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
242 #endif
243 
244 static int
245 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
246 {
247 	u_long size;
248 
249 	size = uma_size();
250 	return (sysctl_handle_long(oidp, &size, 0, req));
251 }
252 
253 static int
254 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
255 {
256 	u_long size, limit;
257 
258 	/* The sysctl is unsigned, implement as a saturation value. */
259 	size = uma_size();
260 	limit = uma_limit();
261 	if (size > limit)
262 		size = 0;
263 	else
264 		size = limit - size;
265 	return (sysctl_handle_long(oidp, &size, 0, req));
266 }
267 
268 /*
269  * malloc(9) uma zone separation -- sub-page buffer overruns in one
270  * malloc type will affect only a subset of other malloc types.
271  */
272 #if MALLOC_DEBUG_MAXZONES > 1
273 static void
274 tunable_set_numzones(void)
275 {
276 
277 	TUNABLE_INT_FETCH("debug.malloc.numzones",
278 	    &numzones);
279 
280 	/* Sanity check the number of malloc uma zones. */
281 	if (numzones <= 0)
282 		numzones = 1;
283 	if (numzones > MALLOC_DEBUG_MAXZONES)
284 		numzones = MALLOC_DEBUG_MAXZONES;
285 }
286 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
287 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
288     &numzones, 0, "Number of malloc uma subzones");
289 
290 /*
291  * Any number that changes regularly is an okay choice for the
292  * offset.  Build numbers are pretty good of you have them.
293  */
294 static u_int zone_offset = __FreeBSD_version;
295 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
296 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
297     &zone_offset, 0, "Separate malloc types by examining the "
298     "Nth character in the malloc type short description.");
299 
300 static void
301 mtp_set_subzone(struct malloc_type *mtp)
302 {
303 	struct malloc_type_internal *mtip;
304 	const char *desc;
305 	size_t len;
306 	u_int val;
307 
308 	mtip = mtp->ks_handle;
309 	desc = mtp->ks_shortdesc;
310 	if (desc == NULL || (len = strlen(desc)) == 0)
311 		val = 0;
312 	else
313 		val = desc[zone_offset % len];
314 	mtip->mti_zone = (val % numzones);
315 }
316 
317 static inline u_int
318 mtp_get_subzone(struct malloc_type *mtp)
319 {
320 	struct malloc_type_internal *mtip;
321 
322 	mtip = mtp->ks_handle;
323 
324 	KASSERT(mtip->mti_zone < numzones,
325 	    ("mti_zone %u out of range %d",
326 	    mtip->mti_zone, numzones));
327 	return (mtip->mti_zone);
328 }
329 #elif MALLOC_DEBUG_MAXZONES == 0
330 #error "MALLOC_DEBUG_MAXZONES must be positive."
331 #else
332 static void
333 mtp_set_subzone(struct malloc_type *mtp)
334 {
335 	struct malloc_type_internal *mtip;
336 
337 	mtip = mtp->ks_handle;
338 	mtip->mti_zone = 0;
339 }
340 
341 static inline u_int
342 mtp_get_subzone(struct malloc_type *mtp)
343 {
344 
345 	return (0);
346 }
347 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
348 
349 int
350 malloc_last_fail(void)
351 {
352 
353 	return (time_uptime - t_malloc_fail);
354 }
355 
356 /*
357  * An allocation has succeeded -- update malloc type statistics for the
358  * amount of bucket size.  Occurs within a critical section so that the
359  * thread isn't preempted and doesn't migrate while updating per-PCU
360  * statistics.
361  */
362 static void
363 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
364     int zindx)
365 {
366 	struct malloc_type_internal *mtip;
367 	struct malloc_type_stats *mtsp;
368 
369 	critical_enter();
370 	mtip = mtp->ks_handle;
371 	mtsp = &mtip->mti_stats[curcpu];
372 	if (size > 0) {
373 		mtsp->mts_memalloced += size;
374 		mtsp->mts_numallocs++;
375 	}
376 	if (zindx != -1)
377 		mtsp->mts_size |= 1 << zindx;
378 
379 #ifdef KDTRACE_HOOKS
380 	if (__predict_false(dtrace_malloc_enabled)) {
381 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
382 		if (probe_id != 0)
383 			(dtrace_malloc_probe)(probe_id,
384 			    (uintptr_t) mtp, (uintptr_t) mtip,
385 			    (uintptr_t) mtsp, size, zindx);
386 	}
387 #endif
388 
389 	critical_exit();
390 }
391 
392 void
393 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
394 {
395 
396 	if (size > 0)
397 		malloc_type_zone_allocated(mtp, size, -1);
398 }
399 
400 /*
401  * A free operation has occurred -- update malloc type statistics for the
402  * amount of the bucket size.  Occurs within a critical section so that the
403  * thread isn't preempted and doesn't migrate while updating per-CPU
404  * statistics.
405  */
406 void
407 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
408 {
409 	struct malloc_type_internal *mtip;
410 	struct malloc_type_stats *mtsp;
411 
412 	critical_enter();
413 	mtip = mtp->ks_handle;
414 	mtsp = &mtip->mti_stats[curcpu];
415 	mtsp->mts_memfreed += size;
416 	mtsp->mts_numfrees++;
417 
418 #ifdef KDTRACE_HOOKS
419 	if (__predict_false(dtrace_malloc_enabled)) {
420 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
421 		if (probe_id != 0)
422 			(dtrace_malloc_probe)(probe_id,
423 			    (uintptr_t) mtp, (uintptr_t) mtip,
424 			    (uintptr_t) mtsp, size, 0);
425 	}
426 #endif
427 
428 	critical_exit();
429 }
430 
431 /*
432  *	contigmalloc:
433  *
434  *	Allocate a block of physically contiguous memory.
435  *
436  *	If M_NOWAIT is set, this routine will not block and return NULL if
437  *	the allocation fails.
438  */
439 void *
440 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
441     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
442     vm_paddr_t boundary)
443 {
444 	void *ret;
445 
446 	ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high,
447 	    alignment, boundary, VM_MEMATTR_DEFAULT);
448 	if (ret != NULL)
449 		malloc_type_allocated(type, round_page(size));
450 	return (ret);
451 }
452 
453 void *
454 contigmalloc_domain(unsigned long size, struct malloc_type *type,
455     int domain, int flags, vm_paddr_t low, vm_paddr_t high,
456     unsigned long alignment, vm_paddr_t boundary)
457 {
458 	void *ret;
459 
460 	ret = (void *)kmem_alloc_contig_domain(domain, size, flags, low, high,
461 	    alignment, boundary, VM_MEMATTR_DEFAULT);
462 	if (ret != NULL)
463 		malloc_type_allocated(type, round_page(size));
464 	return (ret);
465 }
466 
467 /*
468  *	contigfree:
469  *
470  *	Free a block of memory allocated by contigmalloc.
471  *
472  *	This routine may not block.
473  */
474 void
475 contigfree(void *addr, unsigned long size, struct malloc_type *type)
476 {
477 
478 	kmem_free(kernel_arena, (vm_offset_t)addr, size);
479 	malloc_type_freed(type, round_page(size));
480 }
481 
482 #ifdef MALLOC_DEBUG
483 static int
484 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
485     int flags)
486 {
487 #ifdef INVARIANTS
488 	int indx;
489 
490 	KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
491 	/*
492 	 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
493 	 */
494 	indx = flags & (M_WAITOK | M_NOWAIT);
495 	if (indx != M_NOWAIT && indx != M_WAITOK) {
496 		static	struct timeval lasterr;
497 		static	int curerr, once;
498 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
499 			printf("Bad malloc flags: %x\n", indx);
500 			kdb_backtrace();
501 			flags |= M_WAITOK;
502 			once++;
503 		}
504 	}
505 #endif
506 #ifdef MALLOC_MAKE_FAILURES
507 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
508 		atomic_add_int(&malloc_nowait_count, 1);
509 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
510 			atomic_add_int(&malloc_failure_count, 1);
511 			t_malloc_fail = time_uptime;
512 			*vap = NULL;
513 			return (EJUSTRETURN);
514 		}
515 	}
516 #endif
517 	if (flags & M_WAITOK) {
518 		KASSERT(curthread->td_intr_nesting_level == 0,
519 		   ("malloc(M_WAITOK) in interrupt context"));
520 		KASSERT(curthread->td_epochnest == 0,
521 			("malloc(M_WAITOK) in epoch context"));
522 	}
523 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
524 	    ("malloc: called with spinlock or critical section held"));
525 
526 #ifdef DEBUG_MEMGUARD
527 	if (memguard_cmp_mtp(mtp, *sizep)) {
528 		*vap = memguard_alloc(*sizep, flags);
529 		if (*vap != NULL)
530 			return (EJUSTRETURN);
531 		/* This is unfortunate but should not be fatal. */
532 	}
533 #endif
534 
535 #ifdef DEBUG_REDZONE
536 	*sizep = redzone_size_ntor(*sizep);
537 #endif
538 
539 	return (0);
540 }
541 #endif
542 
543 /*
544  *	malloc:
545  *
546  *	Allocate a block of memory.
547  *
548  *	If M_NOWAIT is set, this routine will not block and return NULL if
549  *	the allocation fails.
550  */
551 void *
552 malloc(size_t size, struct malloc_type *mtp, int flags)
553 {
554 	int indx;
555 	caddr_t va;
556 	uma_zone_t zone;
557 #if defined(DEBUG_REDZONE)
558 	unsigned long osize = size;
559 #endif
560 
561 #ifdef MALLOC_DEBUG
562 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
563 		return (va);
564 #endif
565 
566 	if (size <= kmem_zmax) {
567 		if (size & KMEM_ZMASK)
568 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
569 		indx = kmemsize[size >> KMEM_ZSHIFT];
570 		zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
571 #ifdef MALLOC_PROFILE
572 		krequests[size >> KMEM_ZSHIFT]++;
573 #endif
574 		va = uma_zalloc(zone, flags);
575 		if (va != NULL)
576 			size = zone->uz_size;
577 		malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
578 	} else {
579 		size = roundup(size, PAGE_SIZE);
580 		zone = NULL;
581 		va = uma_large_malloc(size, flags);
582 		malloc_type_allocated(mtp, va == NULL ? 0 : size);
583 	}
584 	if (flags & M_WAITOK)
585 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
586 	else if (va == NULL)
587 		t_malloc_fail = time_uptime;
588 #ifdef DEBUG_REDZONE
589 	if (va != NULL)
590 		va = redzone_setup(va, osize);
591 #endif
592 	return ((void *) va);
593 }
594 
595 void *
596 malloc_domain(size_t size, struct malloc_type *mtp, int domain,
597     int flags)
598 {
599 	int indx;
600 	caddr_t va;
601 	uma_zone_t zone;
602 #if defined(DEBUG_REDZONE)
603 	unsigned long osize = size;
604 #endif
605 
606 #ifdef MALLOC_DEBUG
607 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
608 		return (va);
609 #endif
610 	if (size <= kmem_zmax) {
611 		if (size & KMEM_ZMASK)
612 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
613 		indx = kmemsize[size >> KMEM_ZSHIFT];
614 		zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
615 #ifdef MALLOC_PROFILE
616 		krequests[size >> KMEM_ZSHIFT]++;
617 #endif
618 		va = uma_zalloc_domain(zone, NULL, domain, flags);
619 		if (va != NULL)
620 			size = zone->uz_size;
621 		malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
622 	} else {
623 		size = roundup(size, PAGE_SIZE);
624 		zone = NULL;
625 		va = uma_large_malloc_domain(size, domain, flags);
626 		malloc_type_allocated(mtp, va == NULL ? 0 : size);
627 	}
628 	if (flags & M_WAITOK)
629 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
630 	else if (va == NULL)
631 		t_malloc_fail = time_uptime;
632 #ifdef DEBUG_REDZONE
633 	if (va != NULL)
634 		va = redzone_setup(va, osize);
635 #endif
636 	return ((void *) va);
637 }
638 
639 void *
640 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
641 {
642 
643 	if (WOULD_OVERFLOW(nmemb, size))
644 		panic("mallocarray: %zu * %zu overflowed", nmemb, size);
645 
646 	return (malloc(size * nmemb, type, flags));
647 }
648 
649 #ifdef INVARIANTS
650 static void
651 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
652 {
653 	struct malloc_type **mtpp = addr;
654 
655 	/*
656 	 * Cache a pointer to the malloc_type that most recently freed
657 	 * this memory here.  This way we know who is most likely to
658 	 * have stepped on it later.
659 	 *
660 	 * This code assumes that size is a multiple of 8 bytes for
661 	 * 64 bit machines
662 	 */
663 	mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
664 	mtpp += (size - sizeof(struct malloc_type *)) /
665 	    sizeof(struct malloc_type *);
666 	*mtpp = mtp;
667 }
668 #endif
669 
670 #ifdef MALLOC_DEBUG
671 static int
672 free_dbg(void **addrp, struct malloc_type *mtp)
673 {
674 	void *addr;
675 
676 	addr = *addrp;
677 	KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
678 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
679 	    ("free: called with spinlock or critical section held"));
680 
681 	/* free(NULL, ...) does nothing */
682 	if (addr == NULL)
683 		return (EJUSTRETURN);
684 
685 #ifdef DEBUG_MEMGUARD
686 	if (is_memguard_addr(addr)) {
687 		memguard_free(addr);
688 		return (EJUSTRETURN);
689 	}
690 #endif
691 
692 #ifdef DEBUG_REDZONE
693 	redzone_check(addr);
694 	*addrp = redzone_addr_ntor(addr);
695 #endif
696 
697 	return (0);
698 }
699 #endif
700 
701 /*
702  *	free:
703  *
704  *	Free a block of memory allocated by malloc.
705  *
706  *	This routine may not block.
707  */
708 void
709 free(void *addr, struct malloc_type *mtp)
710 {
711 	uma_slab_t slab;
712 	u_long size;
713 
714 #ifdef MALLOC_DEBUG
715 	if (free_dbg(&addr, mtp) != 0)
716 		return;
717 #endif
718 	/* free(NULL, ...) does nothing */
719 	if (addr == NULL)
720 		return;
721 
722 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
723 	if (slab == NULL)
724 		panic("free: address %p(%p) has not been allocated.\n",
725 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
726 
727 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
728 		size = slab->us_keg->uk_size;
729 #ifdef INVARIANTS
730 		free_save_type(addr, mtp, size);
731 #endif
732 		uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
733 	} else {
734 		size = slab->us_size;
735 		uma_large_free(slab);
736 	}
737 	malloc_type_freed(mtp, size);
738 }
739 
740 void
741 free_domain(void *addr, struct malloc_type *mtp)
742 {
743 	uma_slab_t slab;
744 	u_long size;
745 
746 #ifdef MALLOC_DEBUG
747 	if (free_dbg(&addr, mtp) != 0)
748 		return;
749 #endif
750 
751 	/* free(NULL, ...) does nothing */
752 	if (addr == NULL)
753 		return;
754 
755 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
756 	if (slab == NULL)
757 		panic("free_domain: address %p(%p) has not been allocated.\n",
758 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
759 
760 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
761 		size = slab->us_keg->uk_size;
762 #ifdef INVARIANTS
763 		free_save_type(addr, mtp, size);
764 #endif
765 		uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones),
766 		    addr, slab);
767 	} else {
768 		size = slab->us_size;
769 		uma_large_free(slab);
770 	}
771 	malloc_type_freed(mtp, size);
772 }
773 
774 /*
775  *	realloc: change the size of a memory block
776  */
777 void *
778 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
779 {
780 	uma_slab_t slab;
781 	unsigned long alloc;
782 	void *newaddr;
783 
784 	KASSERT(mtp->ks_magic == M_MAGIC,
785 	    ("realloc: bad malloc type magic"));
786 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
787 	    ("realloc: called with spinlock or critical section held"));
788 
789 	/* realloc(NULL, ...) is equivalent to malloc(...) */
790 	if (addr == NULL)
791 		return (malloc(size, mtp, flags));
792 
793 	/*
794 	 * XXX: Should report free of old memory and alloc of new memory to
795 	 * per-CPU stats.
796 	 */
797 
798 #ifdef DEBUG_MEMGUARD
799 	if (is_memguard_addr(addr))
800 		return (memguard_realloc(addr, size, mtp, flags));
801 #endif
802 
803 #ifdef DEBUG_REDZONE
804 	slab = NULL;
805 	alloc = redzone_get_size(addr);
806 #else
807 	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
808 
809 	/* Sanity check */
810 	KASSERT(slab != NULL,
811 	    ("realloc: address %p out of range", (void *)addr));
812 
813 	/* Get the size of the original block */
814 	if (!(slab->us_flags & UMA_SLAB_MALLOC))
815 		alloc = slab->us_keg->uk_size;
816 	else
817 		alloc = slab->us_size;
818 
819 	/* Reuse the original block if appropriate */
820 	if (size <= alloc
821 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
822 		return (addr);
823 #endif /* !DEBUG_REDZONE */
824 
825 	/* Allocate a new, bigger (or smaller) block */
826 	if ((newaddr = malloc(size, mtp, flags)) == NULL)
827 		return (NULL);
828 
829 	/* Copy over original contents */
830 	bcopy(addr, newaddr, min(size, alloc));
831 	free(addr, mtp);
832 	return (newaddr);
833 }
834 
835 /*
836  *	reallocf: same as realloc() but free memory on failure.
837  */
838 void *
839 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
840 {
841 	void *mem;
842 
843 	if ((mem = realloc(addr, size, mtp, flags)) == NULL)
844 		free(addr, mtp);
845 	return (mem);
846 }
847 
848 #ifndef __sparc64__
849 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
850 #endif
851 
852 /*
853  * Initialize the kernel memory (kmem) arena.
854  */
855 void
856 kmeminit(void)
857 {
858 	u_long mem_size;
859 	u_long tmp;
860 
861 #ifdef VM_KMEM_SIZE
862 	if (vm_kmem_size == 0)
863 		vm_kmem_size = VM_KMEM_SIZE;
864 #endif
865 #ifdef VM_KMEM_SIZE_MIN
866 	if (vm_kmem_size_min == 0)
867 		vm_kmem_size_min = VM_KMEM_SIZE_MIN;
868 #endif
869 #ifdef VM_KMEM_SIZE_MAX
870 	if (vm_kmem_size_max == 0)
871 		vm_kmem_size_max = VM_KMEM_SIZE_MAX;
872 #endif
873 	/*
874 	 * Calculate the amount of kernel virtual address (KVA) space that is
875 	 * preallocated to the kmem arena.  In order to support a wide range
876 	 * of machines, it is a function of the physical memory size,
877 	 * specifically,
878 	 *
879 	 *	min(max(physical memory size / VM_KMEM_SIZE_SCALE,
880 	 *	    VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
881 	 *
882 	 * Every architecture must define an integral value for
883 	 * VM_KMEM_SIZE_SCALE.  However, the definitions of VM_KMEM_SIZE_MIN
884 	 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
885 	 * ceiling on this preallocation, are optional.  Typically,
886 	 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
887 	 * a given architecture.
888 	 */
889 	mem_size = vm_cnt.v_page_count;
890 	if (mem_size <= 32768) /* delphij XXX 128MB */
891 		kmem_zmax = PAGE_SIZE;
892 
893 	if (vm_kmem_size_scale < 1)
894 		vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
895 
896 	/*
897 	 * Check if we should use defaults for the "vm_kmem_size"
898 	 * variable:
899 	 */
900 	if (vm_kmem_size == 0) {
901 		vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
902 
903 		if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
904 			vm_kmem_size = vm_kmem_size_min;
905 		if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
906 			vm_kmem_size = vm_kmem_size_max;
907 	}
908 
909 	/*
910 	 * The amount of KVA space that is preallocated to the
911 	 * kmem arena can be set statically at compile-time or manually
912 	 * through the kernel environment.  However, it is still limited to
913 	 * twice the physical memory size, which has been sufficient to handle
914 	 * the most severe cases of external fragmentation in the kmem arena.
915 	 */
916 	if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
917 		vm_kmem_size = 2 * mem_size * PAGE_SIZE;
918 
919 	vm_kmem_size = round_page(vm_kmem_size);
920 #ifdef DEBUG_MEMGUARD
921 	tmp = memguard_fudge(vm_kmem_size, kernel_map);
922 #else
923 	tmp = vm_kmem_size;
924 #endif
925 	uma_set_limit(tmp);
926 
927 #ifdef DEBUG_MEMGUARD
928 	/*
929 	 * Initialize MemGuard if support compiled in.  MemGuard is a
930 	 * replacement allocator used for detecting tamper-after-free
931 	 * scenarios as they occur.  It is only used for debugging.
932 	 */
933 	memguard_init(kernel_arena);
934 #endif
935 }
936 
937 /*
938  * Initialize the kernel memory allocator
939  */
940 /* ARGSUSED*/
941 static void
942 mallocinit(void *dummy)
943 {
944 	int i;
945 	uint8_t indx;
946 
947 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
948 
949 	kmeminit();
950 
951 	if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
952 		kmem_zmax = KMEM_ZMAX;
953 
954 	mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
955 #ifdef INVARIANTS
956 	    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
957 #else
958 	    NULL, NULL, NULL, NULL,
959 #endif
960 	    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
961 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
962 		int size = kmemzones[indx].kz_size;
963 		char *name = kmemzones[indx].kz_name;
964 		int subzone;
965 
966 		for (subzone = 0; subzone < numzones; subzone++) {
967 			kmemzones[indx].kz_zone[subzone] =
968 			    uma_zcreate(name, size,
969 #ifdef INVARIANTS
970 			    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
971 #else
972 			    NULL, NULL, NULL, NULL,
973 #endif
974 			    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
975 		}
976 		for (;i <= size; i+= KMEM_ZBASE)
977 			kmemsize[i >> KMEM_ZSHIFT] = indx;
978 
979 	}
980 }
981 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
982 
983 void
984 malloc_init(void *data)
985 {
986 	struct malloc_type_internal *mtip;
987 	struct malloc_type *mtp;
988 
989 	KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
990 
991 	mtp = data;
992 	if (mtp->ks_magic != M_MAGIC)
993 		panic("malloc_init: bad malloc type magic");
994 
995 	mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
996 	mtp->ks_handle = mtip;
997 	mtp_set_subzone(mtp);
998 
999 	mtx_lock(&malloc_mtx);
1000 	mtp->ks_next = kmemstatistics;
1001 	kmemstatistics = mtp;
1002 	kmemcount++;
1003 	mtx_unlock(&malloc_mtx);
1004 }
1005 
1006 void
1007 malloc_uninit(void *data)
1008 {
1009 	struct malloc_type_internal *mtip;
1010 	struct malloc_type_stats *mtsp;
1011 	struct malloc_type *mtp, *temp;
1012 	uma_slab_t slab;
1013 	long temp_allocs, temp_bytes;
1014 	int i;
1015 
1016 	mtp = data;
1017 	KASSERT(mtp->ks_magic == M_MAGIC,
1018 	    ("malloc_uninit: bad malloc type magic"));
1019 	KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
1020 
1021 	mtx_lock(&malloc_mtx);
1022 	mtip = mtp->ks_handle;
1023 	mtp->ks_handle = NULL;
1024 	if (mtp != kmemstatistics) {
1025 		for (temp = kmemstatistics; temp != NULL;
1026 		    temp = temp->ks_next) {
1027 			if (temp->ks_next == mtp) {
1028 				temp->ks_next = mtp->ks_next;
1029 				break;
1030 			}
1031 		}
1032 		KASSERT(temp,
1033 		    ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1034 	} else
1035 		kmemstatistics = mtp->ks_next;
1036 	kmemcount--;
1037 	mtx_unlock(&malloc_mtx);
1038 
1039 	/*
1040 	 * Look for memory leaks.
1041 	 */
1042 	temp_allocs = temp_bytes = 0;
1043 	for (i = 0; i < MAXCPU; i++) {
1044 		mtsp = &mtip->mti_stats[i];
1045 		temp_allocs += mtsp->mts_numallocs;
1046 		temp_allocs -= mtsp->mts_numfrees;
1047 		temp_bytes += mtsp->mts_memalloced;
1048 		temp_bytes -= mtsp->mts_memfreed;
1049 	}
1050 	if (temp_allocs > 0 || temp_bytes > 0) {
1051 		printf("Warning: memory type %s leaked memory on destroy "
1052 		    "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1053 		    temp_allocs, temp_bytes);
1054 	}
1055 
1056 	slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
1057 	uma_zfree_arg(mt_zone, mtip, slab);
1058 }
1059 
1060 struct malloc_type *
1061 malloc_desc2type(const char *desc)
1062 {
1063 	struct malloc_type *mtp;
1064 
1065 	mtx_assert(&malloc_mtx, MA_OWNED);
1066 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1067 		if (strcmp(mtp->ks_shortdesc, desc) == 0)
1068 			return (mtp);
1069 	}
1070 	return (NULL);
1071 }
1072 
1073 static int
1074 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1075 {
1076 	struct malloc_type_stream_header mtsh;
1077 	struct malloc_type_internal *mtip;
1078 	struct malloc_type_header mth;
1079 	struct malloc_type *mtp;
1080 	int error, i;
1081 	struct sbuf sbuf;
1082 
1083 	error = sysctl_wire_old_buffer(req, 0);
1084 	if (error != 0)
1085 		return (error);
1086 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1087 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1088 	mtx_lock(&malloc_mtx);
1089 
1090 	/*
1091 	 * Insert stream header.
1092 	 */
1093 	bzero(&mtsh, sizeof(mtsh));
1094 	mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1095 	mtsh.mtsh_maxcpus = MAXCPU;
1096 	mtsh.mtsh_count = kmemcount;
1097 	(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1098 
1099 	/*
1100 	 * Insert alternating sequence of type headers and type statistics.
1101 	 */
1102 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1103 		mtip = (struct malloc_type_internal *)mtp->ks_handle;
1104 
1105 		/*
1106 		 * Insert type header.
1107 		 */
1108 		bzero(&mth, sizeof(mth));
1109 		strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1110 		(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1111 
1112 		/*
1113 		 * Insert type statistics for each CPU.
1114 		 */
1115 		for (i = 0; i < MAXCPU; i++) {
1116 			(void)sbuf_bcat(&sbuf, &mtip->mti_stats[i],
1117 			    sizeof(mtip->mti_stats[i]));
1118 		}
1119 	}
1120 	mtx_unlock(&malloc_mtx);
1121 	error = sbuf_finish(&sbuf);
1122 	sbuf_delete(&sbuf);
1123 	return (error);
1124 }
1125 
1126 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
1127     0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1128     "Return malloc types");
1129 
1130 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1131     "Count of kernel malloc types");
1132 
1133 void
1134 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1135 {
1136 	struct malloc_type *mtp, **bufmtp;
1137 	int count, i;
1138 	size_t buflen;
1139 
1140 	mtx_lock(&malloc_mtx);
1141 restart:
1142 	mtx_assert(&malloc_mtx, MA_OWNED);
1143 	count = kmemcount;
1144 	mtx_unlock(&malloc_mtx);
1145 
1146 	buflen = sizeof(struct malloc_type *) * count;
1147 	bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1148 
1149 	mtx_lock(&malloc_mtx);
1150 
1151 	if (count < kmemcount) {
1152 		free(bufmtp, M_TEMP);
1153 		goto restart;
1154 	}
1155 
1156 	for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1157 		bufmtp[i] = mtp;
1158 
1159 	mtx_unlock(&malloc_mtx);
1160 
1161 	for (i = 0; i < count; i++)
1162 		(func)(bufmtp[i], arg);
1163 
1164 	free(bufmtp, M_TEMP);
1165 }
1166 
1167 #ifdef DDB
1168 DB_SHOW_COMMAND(malloc, db_show_malloc)
1169 {
1170 	struct malloc_type_internal *mtip;
1171 	struct malloc_type *mtp;
1172 	uint64_t allocs, frees;
1173 	uint64_t alloced, freed;
1174 	int i;
1175 
1176 	db_printf("%18s %12s  %12s %12s\n", "Type", "InUse", "MemUse",
1177 	    "Requests");
1178 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1179 		mtip = (struct malloc_type_internal *)mtp->ks_handle;
1180 		allocs = 0;
1181 		frees = 0;
1182 		alloced = 0;
1183 		freed = 0;
1184 		for (i = 0; i < MAXCPU; i++) {
1185 			allocs += mtip->mti_stats[i].mts_numallocs;
1186 			frees += mtip->mti_stats[i].mts_numfrees;
1187 			alloced += mtip->mti_stats[i].mts_memalloced;
1188 			freed += mtip->mti_stats[i].mts_memfreed;
1189 		}
1190 		db_printf("%18s %12ju %12juK %12ju\n",
1191 		    mtp->ks_shortdesc, allocs - frees,
1192 		    (alloced - freed + 1023) / 1024, allocs);
1193 		if (db_pager_quit)
1194 			break;
1195 	}
1196 }
1197 
1198 #if MALLOC_DEBUG_MAXZONES > 1
1199 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1200 {
1201 	struct malloc_type_internal *mtip;
1202 	struct malloc_type *mtp;
1203 	u_int subzone;
1204 
1205 	if (!have_addr) {
1206 		db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1207 		return;
1208 	}
1209 	mtp = (void *)addr;
1210 	if (mtp->ks_magic != M_MAGIC) {
1211 		db_printf("Magic %lx does not match expected %x\n",
1212 		    mtp->ks_magic, M_MAGIC);
1213 		return;
1214 	}
1215 
1216 	mtip = mtp->ks_handle;
1217 	subzone = mtip->mti_zone;
1218 
1219 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1220 		mtip = mtp->ks_handle;
1221 		if (mtip->mti_zone != subzone)
1222 			continue;
1223 		db_printf("%s\n", mtp->ks_shortdesc);
1224 		if (db_pager_quit)
1225 			break;
1226 	}
1227 }
1228 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1229 #endif /* DDB */
1230 
1231 #ifdef MALLOC_PROFILE
1232 
1233 static int
1234 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
1235 {
1236 	struct sbuf sbuf;
1237 	uint64_t count;
1238 	uint64_t waste;
1239 	uint64_t mem;
1240 	int error;
1241 	int rsize;
1242 	int size;
1243 	int i;
1244 
1245 	waste = 0;
1246 	mem = 0;
1247 
1248 	error = sysctl_wire_old_buffer(req, 0);
1249 	if (error != 0)
1250 		return (error);
1251 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1252 	sbuf_printf(&sbuf,
1253 	    "\n  Size                    Requests  Real Size\n");
1254 	for (i = 0; i < KMEM_ZSIZE; i++) {
1255 		size = i << KMEM_ZSHIFT;
1256 		rsize = kmemzones[kmemsize[i]].kz_size;
1257 		count = (long long unsigned)krequests[i];
1258 
1259 		sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
1260 		    (unsigned long long)count, rsize);
1261 
1262 		if ((rsize * count) > (size * count))
1263 			waste += (rsize * count) - (size * count);
1264 		mem += (rsize * count);
1265 	}
1266 	sbuf_printf(&sbuf,
1267 	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
1268 	    (unsigned long long)mem, (unsigned long long)waste);
1269 	error = sbuf_finish(&sbuf);
1270 	sbuf_delete(&sbuf);
1271 	return (error);
1272 }
1273 
1274 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
1275     NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
1276 #endif /* MALLOC_PROFILE */
1277