xref: /openbsd/sys/kern/kern_malloc.c (revision ffff71aa)
1 /*	$OpenBSD: kern_malloc.c,v 1.153 2025/01/14 18:37:51 mvs Exp $	*/
2 /*	$NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $	*/
3 
4 /*
5  * Copyright (c) 1987, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
33  */
34 
35 #include <sys/param.h>
36 #include <sys/malloc.h>
37 #include <sys/proc.h>
38 #include <sys/stdint.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 #include <sys/time.h>
42 #include <sys/mutex.h>
43 #include <sys/rwlock.h>
44 #include <sys/tracepoint.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #if defined(DDB)
49 #include <machine/db_machdep.h>
50 #include <ddb/db_output.h>
51 #endif
52 
53 /*
54  * Locks used to protect data:
55  *	I	Immutable data
56  */
57 
58 static
59 #ifndef SMALL_KERNEL
60 __inline__
61 #endif
62 long
BUCKETINDX(size_t sz)63 BUCKETINDX(size_t sz)
64 {
65 	long b, d;
66 
67 	/* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */
68 	b = 7 + MINBUCKET; d = 4;
69 	while (d != 0) {
70 		if (sz <= (1 << b))
71 			b -= d;
72 		else
73 			b += d;
74 		d >>= 1;
75 	}
76 	if (sz <= (1 << b))
77 		b += 0;
78 	else
79 		b += 1;
80 	return b;
81 }
82 
83 static struct vm_map kmem_map_store;
84 struct vm_map *kmem_map = NULL;
85 
86 /*
87  * Default number of pages in kmem_map.  We attempt to calculate this
88  * at run-time, but allow it to be either patched or set in the kernel
89  * config file.
90  */
91 #ifndef NKMEMPAGES
92 #define	NKMEMPAGES	-1
93 #endif
94 u_int	nkmempages = NKMEMPAGES;
95 
96 struct mutex malloc_mtx = MUTEX_INITIALIZER(IPL_VM);
97 struct kmembuckets bucket[MINBUCKET + 16];
98 #ifdef KMEMSTATS
99 struct kmemstats kmemstats[M_LAST];
100 #endif
101 struct kmemusage *kmemusage;
102 char *kmembase, *kmemlimit;
103 char buckstring[16 * sizeof("123456,")];	/* [I] */
104 int buckstring_init = 0;
105 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
106 char *memname[] = INITKMEMNAMES;
107 char *memall;					/* [I] */
108 #endif
109 
110 /*
111  * Normally the freelist structure is used only to hold the list pointer
112  * for free objects.  However, when running with diagnostics, the first
113  * 8 bytes of the structure is unused except for diagnostic information,
114  * and the free list pointer is at offset 8 in the structure.  Since the
115  * first 8 bytes is the portion of the structure most often modified, this
116  * helps to detect memory reuse problems and avoid free list corruption.
117  */
118 struct kmem_freelist {
119 	int32_t	kf_spare0;
120 	int16_t	kf_type;
121 	int16_t	kf_spare1;
122 	XSIMPLEQ_ENTRY(kmem_freelist) kf_flist;
123 };
124 
125 #ifdef DIAGNOSTIC
126 /*
127  * This structure provides a set of masks to catch unaligned frees.
128  */
129 const long addrmask[] = { 0,
130 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
131 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
132 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
133 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
134 };
135 
136 #endif /* DIAGNOSTIC */
137 
138 #ifndef SMALL_KERNEL
139 struct timeval malloc_errintvl = { 5, 0 };
140 struct timeval malloc_lasterr;
141 #endif
142 
143 /*
144  * Allocate a block of memory
145  */
146 void *
malloc(size_t size,int type,int flags)147 malloc(size_t size, int type, int flags)
148 {
149 	struct kmembuckets *kbp;
150 	struct kmemusage *kup;
151 	struct kmem_freelist *freep;
152 	long indx, npg, allocsize;
153 	caddr_t va, cp;
154 	int s;
155 #ifdef DIAGNOSTIC
156 	int freshalloc;
157 	char *savedtype;
158 #endif
159 #ifdef KMEMSTATS
160 	struct kmemstats *ksp = &kmemstats[type];
161 	int wake;
162 
163 	if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST)
164 		panic("malloc: bogus type %d", type);
165 #endif
166 
167 	KASSERT(flags & (M_WAITOK | M_NOWAIT));
168 
169 #ifdef DIAGNOSTIC
170 	if ((flags & M_NOWAIT) == 0) {
171 		extern int pool_debug;
172 		assertwaitok();
173 		if (pool_debug == 2)
174 			yield();
175 	}
176 #endif
177 
178 	if (size > 65535 * PAGE_SIZE) {
179 		if (flags & M_CANFAIL) {
180 #ifndef SMALL_KERNEL
181 			if (ratecheck(&malloc_lasterr, &malloc_errintvl))
182 				printf("malloc(): allocation too large, "
183 				    "type = %d, size = %lu\n", type, size);
184 #endif
185 			return (NULL);
186 		} else
187 			panic("malloc: allocation too large, "
188 			    "type = %d, size = %lu", type, size);
189 	}
190 
191 	indx = BUCKETINDX(size);
192 	if (size > MAXALLOCSAVE)
193 		allocsize = round_page(size);
194 	else
195 		allocsize = 1 << indx;
196 	kbp = &bucket[indx];
197 	mtx_enter(&malloc_mtx);
198 #ifdef KMEMSTATS
199 	while (ksp->ks_memuse >= ksp->ks_limit) {
200 		if (flags & M_NOWAIT) {
201 			mtx_leave(&malloc_mtx);
202 			return (NULL);
203 		}
204 #ifdef DIAGNOSTIC
205 		if (ISSET(flags, M_WAITOK) && curproc == &proc0)
206 			panic("%s: cannot sleep for memory during boot",
207 			    __func__);
208 #endif
209 		if (ksp->ks_limblocks < 65535)
210 			ksp->ks_limblocks++;
211 		msleep_nsec(ksp, &malloc_mtx, PSWP+2, memname[type], INFSLP);
212 	}
213 	ksp->ks_memuse += allocsize; /* account for this early */
214 	ksp->ks_size |= 1 << indx;
215 #endif
216 	if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
217 		mtx_leave(&malloc_mtx);
218 		npg = atop(round_page(allocsize));
219 		s = splvm();
220 		va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL,
221 		    (vsize_t)ptoa(npg), 0,
222 		    ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
223 		    ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0),
224 		    no_constraint.ucr_low, no_constraint.ucr_high,
225 		    0, 0, 0);
226 		splx(s);
227 		if (va == NULL) {
228 			/*
229 			 * Kmem_malloc() can return NULL, even if it can
230 			 * wait, if there is no map space available, because
231 			 * it can't fix that problem.  Neither can we,
232 			 * right now.  (We should release pages which
233 			 * are completely free and which are in buckets
234 			 * with too many free elements.)
235 			 */
236 			if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
237 				panic("malloc: out of space in kmem_map");
238 
239 #ifdef KMEMSTATS
240 			mtx_enter(&malloc_mtx);
241 			ksp->ks_memuse -= allocsize;
242 			wake = ksp->ks_memuse + allocsize >= ksp->ks_limit &&
243 			    ksp->ks_memuse < ksp->ks_limit;
244 			mtx_leave(&malloc_mtx);
245 			if (wake)
246 				wakeup(ksp);
247 #endif
248 			return (NULL);
249 		}
250 		mtx_enter(&malloc_mtx);
251 #ifdef KMEMSTATS
252 		kbp->kb_total += kbp->kb_elmpercl;
253 #endif
254 		kup = btokup(va);
255 		kup->ku_indx = indx;
256 #ifdef DIAGNOSTIC
257 		freshalloc = 1;
258 #endif
259 		if (allocsize > MAXALLOCSAVE) {
260 			kup->ku_pagecnt = npg;
261 			goto out;
262 		}
263 #ifdef KMEMSTATS
264 		kup->ku_freecnt = kbp->kb_elmpercl;
265 		kbp->kb_totalfree += kbp->kb_elmpercl;
266 #endif
267 		cp = va + (npg * PAGE_SIZE) - allocsize;
268 		for (;;) {
269 			freep = (struct kmem_freelist *)cp;
270 #ifdef DIAGNOSTIC
271 			/*
272 			 * Copy in known text to detect modification
273 			 * after freeing.
274 			 */
275 			poison_mem(cp, allocsize);
276 			freep->kf_type = M_FREE;
277 #endif /* DIAGNOSTIC */
278 			XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep,
279 			    kf_flist);
280 			if (cp <= va)
281 				break;
282 			cp -= allocsize;
283 		}
284 	} else {
285 #ifdef DIAGNOSTIC
286 		freshalloc = 0;
287 #endif
288 	}
289 	freep = XSIMPLEQ_FIRST(&kbp->kb_freelist);
290 	XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
291 	va = (caddr_t)freep;
292 #ifdef DIAGNOSTIC
293 	savedtype = (unsigned)freep->kf_type < M_LAST ?
294 		memname[freep->kf_type] : "???";
295 	if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) {
296 		int rv;
297 		vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist);
298 
299 		vm_map_lock(kmem_map);
300 		rv = uvm_map_checkprot(kmem_map, addr,
301 		    addr + sizeof(struct kmem_freelist), PROT_WRITE);
302 		vm_map_unlock(kmem_map);
303 
304 		if (!rv)  {
305 			printf("%s %zd of object %p size 0x%lx %s %s"
306 			    " (invalid addr %p)\n",
307 			    "Data modified on freelist: word",
308 			    (int32_t *)&addr - (int32_t *)kbp, va, size,
309 			    "previous type", savedtype, (void *)addr);
310 		}
311 	}
312 
313 	/* Fill the fields that we've used with poison */
314 	poison_mem(freep, sizeof(*freep));
315 
316 	/* and check that the data hasn't been modified. */
317 	if (freshalloc == 0) {
318 		size_t pidx;
319 		uint32_t pval;
320 		if (poison_check(va, allocsize, &pidx, &pval)) {
321 			panic("%s %zd of object %p size 0x%lx %s %s"
322 			    " (0x%x != 0x%x)\n",
323 			    "Data modified on freelist: word",
324 			    pidx, va, size, "previous type",
325 			    savedtype, ((int32_t*)va)[pidx], pval);
326 		}
327 	}
328 
329 	freep->kf_spare0 = 0;
330 #endif /* DIAGNOSTIC */
331 #ifdef KMEMSTATS
332 	kup = btokup(va);
333 	if (kup->ku_indx != indx)
334 		panic("malloc: wrong bucket");
335 	if (kup->ku_freecnt == 0)
336 		panic("malloc: lost data");
337 	kup->ku_freecnt--;
338 	kbp->kb_totalfree--;
339 out:
340 	kbp->kb_calls++;
341 	ksp->ks_inuse++;
342 	ksp->ks_calls++;
343 	if (ksp->ks_memuse > ksp->ks_maxused)
344 		ksp->ks_maxused = ksp->ks_memuse;
345 #else
346 out:
347 #endif
348 	mtx_leave(&malloc_mtx);
349 
350 	if ((flags & M_ZERO) && va != NULL)
351 		memset(va, 0, size);
352 
353 	TRACEPOINT(uvm, malloc, type, va, size, flags);
354 
355 	return (va);
356 }
357 
358 /*
359  * Free a block of memory allocated by malloc.
360  */
361 void
free(void * addr,int type,size_t freedsize)362 free(void *addr, int type, size_t freedsize)
363 {
364 	struct kmembuckets *kbp;
365 	struct kmemusage *kup;
366 	struct kmem_freelist *freep;
367 	long size;
368 	int s;
369 #ifdef DIAGNOSTIC
370 	long alloc;
371 #endif
372 #ifdef KMEMSTATS
373 	struct kmemstats *ksp = &kmemstats[type];
374 	int wake;
375 #endif
376 
377 	if (addr == NULL)
378 		return;
379 
380 #ifdef DIAGNOSTIC
381 	if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
382 		panic("free: non-malloced addr %p type %s", addr,
383 		    memname[type]);
384 #endif
385 
386 	TRACEPOINT(uvm, free, type, addr, freedsize);
387 
388 	mtx_enter(&malloc_mtx);
389 	kup = btokup(addr);
390 	size = 1 << kup->ku_indx;
391 	kbp = &bucket[kup->ku_indx];
392 	if (size > MAXALLOCSAVE)
393 		size = kup->ku_pagecnt << PAGE_SHIFT;
394 #ifdef DIAGNOSTIC
395 #if 0
396 	if (freedsize == 0) {
397 		static int zerowarnings;
398 		if (zerowarnings < 5) {
399 			zerowarnings++;
400 			printf("free with zero size: (%d)\n", type);
401 #ifdef DDB
402 			db_stack_dump();
403 #endif
404 	}
405 #endif
406 	if (freedsize != 0 && freedsize > size)
407 		panic("free: size too large %zu > %ld (%p) type %s",
408 		    freedsize, size, addr, memname[type]);
409 	if (freedsize != 0 && size > MINALLOCSIZE && freedsize <= size / 2)
410 		panic("free: size too small %zu <= %ld / 2 (%p) type %s",
411 		    freedsize, size, addr, memname[type]);
412 	/*
413 	 * Check for returns of data that do not point to the
414 	 * beginning of the allocation.
415 	 */
416 	if (size > PAGE_SIZE)
417 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
418 	else
419 		alloc = addrmask[kup->ku_indx];
420 	if (((u_long)addr & alloc) != 0)
421 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
422 			addr, size, memname[type], alloc);
423 #endif /* DIAGNOSTIC */
424 	if (size > MAXALLOCSAVE) {
425 		u_short pagecnt = kup->ku_pagecnt;
426 
427 		kup->ku_indx = 0;
428 		kup->ku_pagecnt = 0;
429 		mtx_leave(&malloc_mtx);
430 		s = splvm();
431 		uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(pagecnt));
432 		splx(s);
433 #ifdef KMEMSTATS
434 		mtx_enter(&malloc_mtx);
435 		ksp->ks_memuse -= size;
436 		wake = ksp->ks_memuse + size >= ksp->ks_limit &&
437 		    ksp->ks_memuse < ksp->ks_limit;
438 		ksp->ks_inuse--;
439 		kbp->kb_total -= 1;
440 		mtx_leave(&malloc_mtx);
441 		if (wake)
442 			wakeup(ksp);
443 #endif
444 		return;
445 	}
446 	freep = (struct kmem_freelist *)addr;
447 #ifdef DIAGNOSTIC
448 	/*
449 	 * Check for multiple frees. Use a quick check to see if
450 	 * it looks free before laboriously searching the freelist.
451 	 */
452 	if (freep->kf_spare0 == poison_value(freep)) {
453 		struct kmem_freelist *fp;
454 		XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
455 			if (addr != fp)
456 				continue;
457 			printf("multiply freed item %p\n", addr);
458 			panic("free: duplicated free");
459 		}
460 	}
461 	/*
462 	 * Copy in known text to detect modification after freeing
463 	 * and to make it look free. Also, save the type being freed
464 	 * so we can list likely culprit if modification is detected
465 	 * when the object is reallocated.
466 	 */
467 	poison_mem(addr, size);
468 	freep->kf_spare0 = poison_value(freep);
469 
470 	freep->kf_type = type;
471 #endif /* DIAGNOSTIC */
472 #ifdef KMEMSTATS
473 	kup->ku_freecnt++;
474 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
475 		if (kup->ku_freecnt > kbp->kb_elmpercl)
476 			panic("free: multiple frees");
477 		else if (kbp->kb_totalfree > kbp->kb_highwat)
478 			kbp->kb_couldfree++;
479 	}
480 	kbp->kb_totalfree++;
481 	ksp->ks_memuse -= size;
482 	wake = ksp->ks_memuse + size >= ksp->ks_limit &&
483 	    ksp->ks_memuse < ksp->ks_limit;
484 	ksp->ks_inuse--;
485 #endif
486 	XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
487 	mtx_leave(&malloc_mtx);
488 #ifdef KMEMSTATS
489 	if (wake)
490 		wakeup(ksp);
491 #endif
492 }
493 
494 /*
495  * Compute the number of pages that kmem_map will map, that is,
496  * the size of the kernel malloc arena.
497  */
498 void
499 kmeminit_nkmempages(void)
500 {
501 	u_int npages;
502 
503 	if (nkmempages != -1) {
504 		/*
505 		 * It's already been set (by us being here before, or
506 		 * by patching or kernel config options), bail out now.
507 		 */
508 		return;
509 	}
510 
511 	/*
512 	 * We use the following (simple) formula:
513 	 *
514 	 * Up to 1G physmem use physical memory / 4,
515 	 * above 1G add an extra 16MB per 1G of memory.
516 	 *
517 	 * Clamp it down depending on VM_KERNEL_SPACE_SIZE
518 	 * - up and including 512M -> 64MB
519 	 * - between 512M and 1024M -> 128MB
520 	 * - over 1024M clamping to VM_KERNEL_SPACE_SIZE / 4
521 	 */
522 	npages = MIN(physmem, atop(1024 * 1024 * 1024)) / 4;
523 	if (physmem > atop(1024 * 1024 * 1024))
524 		npages += (physmem - atop(1024 * 1024 * 1024)) / 64;
525 
526 	if (VM_KERNEL_SPACE_SIZE <= 512 * 1024 * 1024) {
527 		if (npages > atop(64 * 1024 * 1024))
528 			npages = atop(64 * 1024 * 1024);
529 	} else if (VM_KERNEL_SPACE_SIZE <= 1024 * 1024 * 1024) {
530 		if (npages > atop(128 * 1024 * 1024))
531 			npages = atop(128 * 1024 * 1024);
532 	} else if (npages > atop(VM_KERNEL_SPACE_SIZE) / 4)
533 		npages = atop(VM_KERNEL_SPACE_SIZE) / 4;
534 
535 	nkmempages = npages;
536 }
537 
538 /*
539  * Initialize the kernel memory allocator
540  */
541 void
542 kmeminit(void)
543 {
544 	vaddr_t base, limit;
545 	long indx;
546 
547 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
548 	int i, siz, totlen;
549 #endif
550 
551 #ifdef DIAGNOSTIC
552 	if (sizeof(struct kmem_freelist) > (1 << MINBUCKET))
553 		panic("kmeminit: minbucket too small/struct freelist too big");
554 #endif
555 
556 	/*
557 	 * Compute the number of kmem_map pages, if we have not
558 	 * done so already.
559 	 */
560 	kmeminit_nkmempages();
561 	base = vm_map_min(kernel_map);
562 	kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
563 	    (vsize_t)nkmempages << PAGE_SHIFT,
564 #ifdef KVA_GUARDPAGES
565 	    VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES,
566 #else
567 	    VM_MAP_INTRSAFE,
568 #endif
569 	    FALSE, &kmem_map_store);
570 	kmembase = (char *)base;
571 	kmemlimit = (char *)limit;
572 	kmemusage = km_alloc(round_page(nkmempages * sizeof(struct kmemusage)),
573 	    &kv_any, &kp_zero, &kd_waitok);
574 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
575 		XSIMPLEQ_INIT(&bucket[indx].kb_freelist);
576 	}
577 #ifdef KMEMSTATS
578 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
579 		if (1 << indx >= PAGE_SIZE)
580 			bucket[indx].kb_elmpercl = 1;
581 		else
582 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
583 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
584 	}
585 	for (indx = 0; indx < M_LAST; indx++)
586 		kmemstats[indx].ks_limit =
587 		    (long)nkmempages * PAGE_SIZE * 6 / 10;
588 
589 	memset(buckstring, 0, sizeof(buckstring));
590 	for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
591 		snprintf(buckstring + siz, sizeof buckstring - siz,
592 		    "%d,", (u_int)(1<<i));
593 		siz += strlen(buckstring + siz);
594 	}
595 	/* Remove trailing comma */
596 	if (siz)
597 		buckstring[siz - 1] = '\0';
598 #endif
599 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
600 	/* Figure out how large a buffer we need */
601 	for (totlen = 0, i = 0; i < M_LAST; i++) {
602 		if (memname[i])
603 			totlen += strlen(memname[i]);
604 		totlen++;
605 	}
606 	memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK|M_ZERO);
607 	for (siz = 0, i = 0; i < M_LAST; i++) {
608 		snprintf(memall + siz, totlen + M_LAST - siz, "%s,",
609 		    memname[i] ? memname[i] : "");
610 		siz += strlen(memall + siz);
611 	}
612 	/* Remove trailing comma */
613 	if (siz)
614 		memall[siz - 1] = '\0';
615 	/* Now, convert all spaces to underscores */
616 	for (i = 0; i < totlen; i++) {
617 		if (memall[i] == ' ')
618 			memall[i] = '_';
619 	}
620 #endif
621 }
622 
623 /*
624  * Return kernel malloc statistics information.
625  */
626 int
627 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
628     size_t newlen, struct proc *p)
629 {
630 	struct kmembuckets kb;
631 #ifdef KMEMSTATS
632 	struct kmemstats km;
633 #endif
634 
635 	if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
636 	    name[0] != KERN_MALLOC_KMEMNAMES)
637 		return (ENOTDIR);		/* overloaded */
638 
639 	switch (name[0]) {
640 	case KERN_MALLOC_BUCKETS:
641 		return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
642 
643 	case KERN_MALLOC_BUCKET:
644 		mtx_enter(&malloc_mtx);
645 		memcpy(&kb, &bucket[BUCKETINDX(name[1])], sizeof(kb));
646 		mtx_leave(&malloc_mtx);
647 		memset(&kb.kb_freelist, 0, sizeof(kb.kb_freelist));
648 		return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
649 	case KERN_MALLOC_KMEMSTATS:
650 #ifdef KMEMSTATS
651 		if ((name[1] < 0) || (name[1] >= M_LAST))
652 			return (EINVAL);
653 		mtx_enter(&malloc_mtx);
654 		memcpy(&km, &kmemstats[name[1]], sizeof(km));
655 		mtx_leave(&malloc_mtx);
656 		return (sysctl_rdstruct(oldp, oldlenp, newp, &km, sizeof(km)));
657 #else
658 		return (EOPNOTSUPP);
659 #endif
660 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
661 	case KERN_MALLOC_KMEMNAMES:
662 		return (sysctl_rdstring(oldp, oldlenp, newp, memall));
663 #endif
664 	default:
665 		return (EOPNOTSUPP);
666 	}
667 	/* NOTREACHED */
668 }
669 
670 #if defined(DDB)
671 
672 void
673 malloc_printit(
674     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
675 {
676 #ifdef KMEMSTATS
677 	struct kmemstats *km;
678 	int i;
679 
680 	(*pr)("%15s %5s  %6s  %7s  %6s %9s %8s\n",
681 	    "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests",
682 	    "Type Lim");
683 	for (i = 0, km = kmemstats; i < M_LAST; i++, km++) {
684 		if (!km->ks_calls || !memname[i])
685 			continue;
686 
687 		(*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d\n",
688 		    memname[i], km->ks_inuse, km->ks_memuse / 1024,
689 		    km->ks_maxused / 1024, km->ks_limit / 1024,
690 		    km->ks_calls, km->ks_limblocks);
691 	}
692 #else
693 	(*pr)("No KMEMSTATS compiled in\n");
694 #endif
695 }
696 #endif /* DDB */
697 
698 /*
699  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
700  *
701  * Permission to use, copy, modify, and distribute this software for any
702  * purpose with or without fee is hereby granted, provided that the above
703  * copyright notice and this permission notice appear in all copies.
704  *
705  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
706  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
707  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
708  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
709  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
710  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
711  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
712  */
713 
714 /*
715  * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
716  * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
717  */
718 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
719 
720 void *
721 mallocarray(size_t nmemb, size_t size, int type, int flags)
722 {
723 	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
724 	    nmemb > 0 && SIZE_MAX / nmemb < size) {
725 		if (flags & M_CANFAIL)
726 			return (NULL);
727 		panic("mallocarray: overflow %zu * %zu", nmemb, size);
728 	}
729 	return (malloc(size * nmemb, type, flags));
730 }
731