1 /* $OpenBSD: kern_malloc.c,v 1.152 2024/06/26 01:40:49 jsg Exp $ */
2 /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */
3
4 /*
5 * Copyright (c) 1987, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
33 */
34
35 #include <sys/param.h>
36 #include <sys/malloc.h>
37 #include <sys/proc.h>
38 #include <sys/stdint.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 #include <sys/time.h>
42 #include <sys/mutex.h>
43 #include <sys/rwlock.h>
44 #include <sys/tracepoint.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #if defined(DDB)
49 #include <machine/db_machdep.h>
50 #include <ddb/db_output.h>
51 #endif
52
53 static
54 #ifndef SMALL_KERNEL
55 __inline__
56 #endif
57 long
BUCKETINDX(size_t sz)58 BUCKETINDX(size_t sz)
59 {
60 long b, d;
61
62 /* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */
63 b = 7 + MINBUCKET; d = 4;
64 while (d != 0) {
65 if (sz <= (1 << b))
66 b -= d;
67 else
68 b += d;
69 d >>= 1;
70 }
71 if (sz <= (1 << b))
72 b += 0;
73 else
74 b += 1;
75 return b;
76 }
77
78 static struct vm_map kmem_map_store;
79 struct vm_map *kmem_map = NULL;
80
81 /*
82 * Default number of pages in kmem_map. We attempt to calculate this
83 * at run-time, but allow it to be either patched or set in the kernel
84 * config file.
85 */
86 #ifndef NKMEMPAGES
87 #define NKMEMPAGES -1
88 #endif
89 u_int nkmempages = NKMEMPAGES;
90
91 struct mutex malloc_mtx = MUTEX_INITIALIZER(IPL_VM);
92 struct kmembuckets bucket[MINBUCKET + 16];
93 #ifdef KMEMSTATS
94 struct kmemstats kmemstats[M_LAST];
95 #endif
96 struct kmemusage *kmemusage;
97 char *kmembase, *kmemlimit;
98 char buckstring[16 * sizeof("123456,")];
99 int buckstring_init = 0;
100 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
101 char *memname[] = INITKMEMNAMES;
102 char *memall = NULL;
103 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk");
104 #endif
105
106 /*
107 * Normally the freelist structure is used only to hold the list pointer
108 * for free objects. However, when running with diagnostics, the first
109 * 8 bytes of the structure is unused except for diagnostic information,
110 * and the free list pointer is at offset 8 in the structure. Since the
111 * first 8 bytes is the portion of the structure most often modified, this
112 * helps to detect memory reuse problems and avoid free list corruption.
113 */
114 struct kmem_freelist {
115 int32_t kf_spare0;
116 int16_t kf_type;
117 int16_t kf_spare1;
118 XSIMPLEQ_ENTRY(kmem_freelist) kf_flist;
119 };
120
121 #ifdef DIAGNOSTIC
122 /*
123 * This structure provides a set of masks to catch unaligned frees.
124 */
125 const long addrmask[] = { 0,
126 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
127 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
128 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
129 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
130 };
131
132 #endif /* DIAGNOSTIC */
133
134 #ifndef SMALL_KERNEL
135 struct timeval malloc_errintvl = { 5, 0 };
136 struct timeval malloc_lasterr;
137 #endif
138
139 /*
140 * Allocate a block of memory
141 */
142 void *
malloc(size_t size,int type,int flags)143 malloc(size_t size, int type, int flags)
144 {
145 struct kmembuckets *kbp;
146 struct kmemusage *kup;
147 struct kmem_freelist *freep;
148 long indx, npg, allocsize;
149 caddr_t va, cp;
150 int s;
151 #ifdef DIAGNOSTIC
152 int freshalloc;
153 char *savedtype;
154 #endif
155 #ifdef KMEMSTATS
156 struct kmemstats *ksp = &kmemstats[type];
157 int wake;
158
159 if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST)
160 panic("malloc: bogus type %d", type);
161 #endif
162
163 KASSERT(flags & (M_WAITOK | M_NOWAIT));
164
165 #ifdef DIAGNOSTIC
166 if ((flags & M_NOWAIT) == 0) {
167 extern int pool_debug;
168 assertwaitok();
169 if (pool_debug == 2)
170 yield();
171 }
172 #endif
173
174 if (size > 65535 * PAGE_SIZE) {
175 if (flags & M_CANFAIL) {
176 #ifndef SMALL_KERNEL
177 if (ratecheck(&malloc_lasterr, &malloc_errintvl))
178 printf("malloc(): allocation too large, "
179 "type = %d, size = %lu\n", type, size);
180 #endif
181 return (NULL);
182 } else
183 panic("malloc: allocation too large, "
184 "type = %d, size = %lu", type, size);
185 }
186
187 indx = BUCKETINDX(size);
188 if (size > MAXALLOCSAVE)
189 allocsize = round_page(size);
190 else
191 allocsize = 1 << indx;
192 kbp = &bucket[indx];
193 mtx_enter(&malloc_mtx);
194 #ifdef KMEMSTATS
195 while (ksp->ks_memuse >= ksp->ks_limit) {
196 if (flags & M_NOWAIT) {
197 mtx_leave(&malloc_mtx);
198 return (NULL);
199 }
200 #ifdef DIAGNOSTIC
201 if (ISSET(flags, M_WAITOK) && curproc == &proc0)
202 panic("%s: cannot sleep for memory during boot",
203 __func__);
204 #endif
205 if (ksp->ks_limblocks < 65535)
206 ksp->ks_limblocks++;
207 msleep_nsec(ksp, &malloc_mtx, PSWP+2, memname[type], INFSLP);
208 }
209 ksp->ks_memuse += allocsize; /* account for this early */
210 ksp->ks_size |= 1 << indx;
211 #endif
212 if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
213 mtx_leave(&malloc_mtx);
214 npg = atop(round_page(allocsize));
215 s = splvm();
216 va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL,
217 (vsize_t)ptoa(npg), 0,
218 ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
219 ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0),
220 no_constraint.ucr_low, no_constraint.ucr_high,
221 0, 0, 0);
222 splx(s);
223 if (va == NULL) {
224 /*
225 * Kmem_malloc() can return NULL, even if it can
226 * wait, if there is no map space available, because
227 * it can't fix that problem. Neither can we,
228 * right now. (We should release pages which
229 * are completely free and which are in buckets
230 * with too many free elements.)
231 */
232 if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
233 panic("malloc: out of space in kmem_map");
234
235 #ifdef KMEMSTATS
236 mtx_enter(&malloc_mtx);
237 ksp->ks_memuse -= allocsize;
238 wake = ksp->ks_memuse + allocsize >= ksp->ks_limit &&
239 ksp->ks_memuse < ksp->ks_limit;
240 mtx_leave(&malloc_mtx);
241 if (wake)
242 wakeup(ksp);
243 #endif
244 return (NULL);
245 }
246 mtx_enter(&malloc_mtx);
247 #ifdef KMEMSTATS
248 kbp->kb_total += kbp->kb_elmpercl;
249 #endif
250 kup = btokup(va);
251 kup->ku_indx = indx;
252 #ifdef DIAGNOSTIC
253 freshalloc = 1;
254 #endif
255 if (allocsize > MAXALLOCSAVE) {
256 kup->ku_pagecnt = npg;
257 goto out;
258 }
259 #ifdef KMEMSTATS
260 kup->ku_freecnt = kbp->kb_elmpercl;
261 kbp->kb_totalfree += kbp->kb_elmpercl;
262 #endif
263 cp = va + (npg * PAGE_SIZE) - allocsize;
264 for (;;) {
265 freep = (struct kmem_freelist *)cp;
266 #ifdef DIAGNOSTIC
267 /*
268 * Copy in known text to detect modification
269 * after freeing.
270 */
271 poison_mem(cp, allocsize);
272 freep->kf_type = M_FREE;
273 #endif /* DIAGNOSTIC */
274 XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep,
275 kf_flist);
276 if (cp <= va)
277 break;
278 cp -= allocsize;
279 }
280 } else {
281 #ifdef DIAGNOSTIC
282 freshalloc = 0;
283 #endif
284 }
285 freep = XSIMPLEQ_FIRST(&kbp->kb_freelist);
286 XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
287 va = (caddr_t)freep;
288 #ifdef DIAGNOSTIC
289 savedtype = (unsigned)freep->kf_type < M_LAST ?
290 memname[freep->kf_type] : "???";
291 if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) {
292 int rv;
293 vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist);
294
295 vm_map_lock(kmem_map);
296 rv = uvm_map_checkprot(kmem_map, addr,
297 addr + sizeof(struct kmem_freelist), PROT_WRITE);
298 vm_map_unlock(kmem_map);
299
300 if (!rv) {
301 printf("%s %zd of object %p size 0x%lx %s %s"
302 " (invalid addr %p)\n",
303 "Data modified on freelist: word",
304 (int32_t *)&addr - (int32_t *)kbp, va, size,
305 "previous type", savedtype, (void *)addr);
306 }
307 }
308
309 /* Fill the fields that we've used with poison */
310 poison_mem(freep, sizeof(*freep));
311
312 /* and check that the data hasn't been modified. */
313 if (freshalloc == 0) {
314 size_t pidx;
315 uint32_t pval;
316 if (poison_check(va, allocsize, &pidx, &pval)) {
317 panic("%s %zd of object %p size 0x%lx %s %s"
318 " (0x%x != 0x%x)\n",
319 "Data modified on freelist: word",
320 pidx, va, size, "previous type",
321 savedtype, ((int32_t*)va)[pidx], pval);
322 }
323 }
324
325 freep->kf_spare0 = 0;
326 #endif /* DIAGNOSTIC */
327 #ifdef KMEMSTATS
328 kup = btokup(va);
329 if (kup->ku_indx != indx)
330 panic("malloc: wrong bucket");
331 if (kup->ku_freecnt == 0)
332 panic("malloc: lost data");
333 kup->ku_freecnt--;
334 kbp->kb_totalfree--;
335 out:
336 kbp->kb_calls++;
337 ksp->ks_inuse++;
338 ksp->ks_calls++;
339 if (ksp->ks_memuse > ksp->ks_maxused)
340 ksp->ks_maxused = ksp->ks_memuse;
341 #else
342 out:
343 #endif
344 mtx_leave(&malloc_mtx);
345
346 if ((flags & M_ZERO) && va != NULL)
347 memset(va, 0, size);
348
349 TRACEPOINT(uvm, malloc, type, va, size, flags);
350
351 return (va);
352 }
353
354 /*
355 * Free a block of memory allocated by malloc.
356 */
357 void
free(void * addr,int type,size_t freedsize)358 free(void *addr, int type, size_t freedsize)
359 {
360 struct kmembuckets *kbp;
361 struct kmemusage *kup;
362 struct kmem_freelist *freep;
363 long size;
364 int s;
365 #ifdef DIAGNOSTIC
366 long alloc;
367 #endif
368 #ifdef KMEMSTATS
369 struct kmemstats *ksp = &kmemstats[type];
370 int wake;
371 #endif
372
373 if (addr == NULL)
374 return;
375
376 #ifdef DIAGNOSTIC
377 if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
378 panic("free: non-malloced addr %p type %s", addr,
379 memname[type]);
380 #endif
381
382 TRACEPOINT(uvm, free, type, addr, freedsize);
383
384 mtx_enter(&malloc_mtx);
385 kup = btokup(addr);
386 size = 1 << kup->ku_indx;
387 kbp = &bucket[kup->ku_indx];
388 if (size > MAXALLOCSAVE)
389 size = kup->ku_pagecnt << PAGE_SHIFT;
390 #ifdef DIAGNOSTIC
391 #if 0
392 if (freedsize == 0) {
393 static int zerowarnings;
394 if (zerowarnings < 5) {
395 zerowarnings++;
396 printf("free with zero size: (%d)\n", type);
397 #ifdef DDB
398 db_stack_dump();
399 #endif
400 }
401 #endif
402 if (freedsize != 0 && freedsize > size)
403 panic("free: size too large %zu > %ld (%p) type %s",
404 freedsize, size, addr, memname[type]);
405 if (freedsize != 0 && size > MINALLOCSIZE && freedsize <= size / 2)
406 panic("free: size too small %zu <= %ld / 2 (%p) type %s",
407 freedsize, size, addr, memname[type]);
408 /*
409 * Check for returns of data that do not point to the
410 * beginning of the allocation.
411 */
412 if (size > PAGE_SIZE)
413 alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
414 else
415 alloc = addrmask[kup->ku_indx];
416 if (((u_long)addr & alloc) != 0)
417 panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
418 addr, size, memname[type], alloc);
419 #endif /* DIAGNOSTIC */
420 if (size > MAXALLOCSAVE) {
421 u_short pagecnt = kup->ku_pagecnt;
422
423 kup->ku_indx = 0;
424 kup->ku_pagecnt = 0;
425 mtx_leave(&malloc_mtx);
426 s = splvm();
427 uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(pagecnt));
428 splx(s);
429 #ifdef KMEMSTATS
430 mtx_enter(&malloc_mtx);
431 ksp->ks_memuse -= size;
432 wake = ksp->ks_memuse + size >= ksp->ks_limit &&
433 ksp->ks_memuse < ksp->ks_limit;
434 ksp->ks_inuse--;
435 kbp->kb_total -= 1;
436 mtx_leave(&malloc_mtx);
437 if (wake)
438 wakeup(ksp);
439 #endif
440 return;
441 }
442 freep = (struct kmem_freelist *)addr;
443 #ifdef DIAGNOSTIC
444 /*
445 * Check for multiple frees. Use a quick check to see if
446 * it looks free before laboriously searching the freelist.
447 */
448 if (freep->kf_spare0 == poison_value(freep)) {
449 struct kmem_freelist *fp;
450 XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
451 if (addr != fp)
452 continue;
453 printf("multiply freed item %p\n", addr);
454 panic("free: duplicated free");
455 }
456 }
457 /*
458 * Copy in known text to detect modification after freeing
459 * and to make it look free. Also, save the type being freed
460 * so we can list likely culprit if modification is detected
461 * when the object is reallocated.
462 */
463 poison_mem(addr, size);
464 freep->kf_spare0 = poison_value(freep);
465
466 freep->kf_type = type;
467 #endif /* DIAGNOSTIC */
468 #ifdef KMEMSTATS
469 kup->ku_freecnt++;
470 if (kup->ku_freecnt >= kbp->kb_elmpercl) {
471 if (kup->ku_freecnt > kbp->kb_elmpercl)
472 panic("free: multiple frees");
473 else if (kbp->kb_totalfree > kbp->kb_highwat)
474 kbp->kb_couldfree++;
475 }
476 kbp->kb_totalfree++;
477 ksp->ks_memuse -= size;
478 wake = ksp->ks_memuse + size >= ksp->ks_limit &&
479 ksp->ks_memuse < ksp->ks_limit;
480 ksp->ks_inuse--;
481 #endif
482 XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
483 mtx_leave(&malloc_mtx);
484 #ifdef KMEMSTATS
485 if (wake)
486 wakeup(ksp);
487 #endif
488 }
489
490 /*
491 * Compute the number of pages that kmem_map will map, that is,
492 * the size of the kernel malloc arena.
493 */
494 void
495 kmeminit_nkmempages(void)
496 {
497 u_int npages;
498
499 if (nkmempages != -1) {
500 /*
501 * It's already been set (by us being here before, or
502 * by patching or kernel config options), bail out now.
503 */
504 return;
505 }
506
507 /*
508 * We use the following (simple) formula:
509 *
510 * Up to 1G physmem use physical memory / 4,
511 * above 1G add an extra 16MB per 1G of memory.
512 *
513 * Clamp it down depending on VM_KERNEL_SPACE_SIZE
514 * - up and including 512M -> 64MB
515 * - between 512M and 1024M -> 128MB
516 * - over 1024M clamping to VM_KERNEL_SPACE_SIZE / 4
517 */
518 npages = MIN(physmem, atop(1024 * 1024 * 1024)) / 4;
519 if (physmem > atop(1024 * 1024 * 1024))
520 npages += (physmem - atop(1024 * 1024 * 1024)) / 64;
521
522 if (VM_KERNEL_SPACE_SIZE <= 512 * 1024 * 1024) {
523 if (npages > atop(64 * 1024 * 1024))
524 npages = atop(64 * 1024 * 1024);
525 } else if (VM_KERNEL_SPACE_SIZE <= 1024 * 1024 * 1024) {
526 if (npages > atop(128 * 1024 * 1024))
527 npages = atop(128 * 1024 * 1024);
528 } else if (npages > atop(VM_KERNEL_SPACE_SIZE) / 4)
529 npages = atop(VM_KERNEL_SPACE_SIZE) / 4;
530
531 nkmempages = npages;
532 }
533
534 /*
535 * Initialize the kernel memory allocator
536 */
537 void
538 kmeminit(void)
539 {
540 vaddr_t base, limit;
541 long indx;
542
543 #ifdef DIAGNOSTIC
544 if (sizeof(struct kmem_freelist) > (1 << MINBUCKET))
545 panic("kmeminit: minbucket too small/struct freelist too big");
546 #endif
547
548 /*
549 * Compute the number of kmem_map pages, if we have not
550 * done so already.
551 */
552 kmeminit_nkmempages();
553 base = vm_map_min(kernel_map);
554 kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
555 (vsize_t)nkmempages << PAGE_SHIFT,
556 #ifdef KVA_GUARDPAGES
557 VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES,
558 #else
559 VM_MAP_INTRSAFE,
560 #endif
561 FALSE, &kmem_map_store);
562 kmembase = (char *)base;
563 kmemlimit = (char *)limit;
564 kmemusage = km_alloc(round_page(nkmempages * sizeof(struct kmemusage)),
565 &kv_any, &kp_zero, &kd_waitok);
566 for (indx = 0; indx < MINBUCKET + 16; indx++) {
567 XSIMPLEQ_INIT(&bucket[indx].kb_freelist);
568 }
569 #ifdef KMEMSTATS
570 for (indx = 0; indx < MINBUCKET + 16; indx++) {
571 if (1 << indx >= PAGE_SIZE)
572 bucket[indx].kb_elmpercl = 1;
573 else
574 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
575 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
576 }
577 for (indx = 0; indx < M_LAST; indx++)
578 kmemstats[indx].ks_limit =
579 (long)nkmempages * PAGE_SIZE * 6 / 10;
580 #endif
581 }
582
583 /*
584 * Return kernel malloc statistics information.
585 */
586 int
587 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
588 size_t newlen, struct proc *p)
589 {
590 struct kmembuckets kb;
591 #ifdef KMEMSTATS
592 struct kmemstats km;
593 #endif
594 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
595 int error;
596 #endif
597 int i, siz;
598
599 if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
600 name[0] != KERN_MALLOC_KMEMNAMES)
601 return (ENOTDIR); /* overloaded */
602
603 switch (name[0]) {
604 case KERN_MALLOC_BUCKETS:
605 /* Initialize the first time */
606 if (buckstring_init == 0) {
607 buckstring_init = 1;
608 memset(buckstring, 0, sizeof(buckstring));
609 for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
610 snprintf(buckstring + siz,
611 sizeof buckstring - siz,
612 "%d,", (u_int)(1<<i));
613 siz += strlen(buckstring + siz);
614 }
615 /* Remove trailing comma */
616 if (siz)
617 buckstring[siz - 1] = '\0';
618 }
619 return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
620
621 case KERN_MALLOC_BUCKET:
622 mtx_enter(&malloc_mtx);
623 memcpy(&kb, &bucket[BUCKETINDX(name[1])], sizeof(kb));
624 mtx_leave(&malloc_mtx);
625 memset(&kb.kb_freelist, 0, sizeof(kb.kb_freelist));
626 return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
627 case KERN_MALLOC_KMEMSTATS:
628 #ifdef KMEMSTATS
629 if ((name[1] < 0) || (name[1] >= M_LAST))
630 return (EINVAL);
631 mtx_enter(&malloc_mtx);
632 memcpy(&km, &kmemstats[name[1]], sizeof(km));
633 mtx_leave(&malloc_mtx);
634 return (sysctl_rdstruct(oldp, oldlenp, newp, &km, sizeof(km)));
635 #else
636 return (EOPNOTSUPP);
637 #endif
638 case KERN_MALLOC_KMEMNAMES:
639 #if defined(KMEMSTATS) || defined(DIAGNOSTIC)
640 error = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR);
641 if (error)
642 return (error);
643 if (memall == NULL) {
644 int totlen;
645
646 /* Figure out how large a buffer we need */
647 for (totlen = 0, i = 0; i < M_LAST; i++) {
648 if (memname[i])
649 totlen += strlen(memname[i]);
650 totlen++;
651 }
652 memall = malloc(totlen + M_LAST, M_SYSCTL,
653 M_WAITOK|M_ZERO);
654 for (siz = 0, i = 0; i < M_LAST; i++) {
655 snprintf(memall + siz,
656 totlen + M_LAST - siz,
657 "%s,", memname[i] ? memname[i] : "");
658 siz += strlen(memall + siz);
659 }
660 /* Remove trailing comma */
661 if (siz)
662 memall[siz - 1] = '\0';
663
664 /* Now, convert all spaces to underscores */
665 for (i = 0; i < totlen; i++)
666 if (memall[i] == ' ')
667 memall[i] = '_';
668 }
669 rw_exit_write(&sysctl_kmemlock);
670 return (sysctl_rdstring(oldp, oldlenp, newp, memall));
671 #else
672 return (EOPNOTSUPP);
673 #endif
674 default:
675 return (EOPNOTSUPP);
676 }
677 /* NOTREACHED */
678 }
679
680 #if defined(DDB)
681
682 void
683 malloc_printit(
684 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
685 {
686 #ifdef KMEMSTATS
687 struct kmemstats *km;
688 int i;
689
690 (*pr)("%15s %5s %6s %7s %6s %9s %8s\n",
691 "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests",
692 "Type Lim");
693 for (i = 0, km = kmemstats; i < M_LAST; i++, km++) {
694 if (!km->ks_calls || !memname[i])
695 continue;
696
697 (*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d\n",
698 memname[i], km->ks_inuse, km->ks_memuse / 1024,
699 km->ks_maxused / 1024, km->ks_limit / 1024,
700 km->ks_calls, km->ks_limblocks);
701 }
702 #else
703 (*pr)("No KMEMSTATS compiled in\n");
704 #endif
705 }
706 #endif /* DDB */
707
708 /*
709 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
710 *
711 * Permission to use, copy, modify, and distribute this software for any
712 * purpose with or without fee is hereby granted, provided that the above
713 * copyright notice and this permission notice appear in all copies.
714 *
715 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
716 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
717 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
718 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
719 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
720 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
721 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
722 */
723
724 /*
725 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
726 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
727 */
728 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
729
730 void *
731 mallocarray(size_t nmemb, size_t size, int type, int flags)
732 {
733 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
734 nmemb > 0 && SIZE_MAX / nmemb < size) {
735 if (flags & M_CANFAIL)
736 return (NULL);
737 panic("mallocarray: overflow %zu * %zu", nmemb, size);
738 }
739 return (malloc(size * nmemb, type, flags));
740 }
741