1 /* $OpenBSD: kern_malloc.c,v 1.96 2013/03/21 01:29:41 deraadt Exp $ */ 2 /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1987, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/systm.h> 40 #include <sys/sysctl.h> 41 #include <sys/time.h> 42 #include <sys/rwlock.h> 43 44 #include <uvm/uvm.h> 45 46 static __inline__ long BUCKETINDX(size_t sz) 47 { 48 #ifdef SMALL_KERNEL 49 long b; 50 51 if (sz-- == 0) 52 return MINBUCKET; 53 54 for (b = MINBUCKET; b < MINBUCKET + 15; b++) 55 if ((sz >> b) == 0) 56 break; 57 #else 58 long b, d; 59 60 /* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */ 61 b = 7 + MINBUCKET; d = 4; 62 while (d != 0) { 63 if (sz <= (1 << b)) 64 b -= d; 65 else 66 b += d; 67 d >>= 1; 68 } 69 if (sz <= (1 << b)) 70 b += 0; 71 else 72 b += 1; 73 #endif 74 75 return b; 76 } 77 78 static struct vm_map kmem_map_store; 79 struct vm_map *kmem_map = NULL; 80 81 #ifdef NKMEMCLUSTERS 82 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size 83 #endif 84 85 /* 86 * Default number of pages in kmem_map. We attempt to calculate this 87 * at run-time, but allow it to be either patched or set in the kernel 88 * config file. 89 */ 90 #ifndef NKMEMPAGES 91 #define NKMEMPAGES 0 92 #endif 93 u_int nkmempages = NKMEMPAGES; 94 95 /* 96 * Defaults for lower- and upper-bounds for the kmem_map page count. 97 * Can be overridden by kernel config options. 98 */ 99 #ifndef NKMEMPAGES_MIN 100 #define NKMEMPAGES_MIN 0 101 #endif 102 u_int nkmempages_min = 0; 103 104 #ifndef NKMEMPAGES_MAX 105 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 106 #endif 107 u_int nkmempages_max = 0; 108 109 struct kmembuckets bucket[MINBUCKET + 16]; 110 #ifdef KMEMSTATS 111 struct kmemstats kmemstats[M_LAST]; 112 #endif 113 struct kmemusage *kmemusage; 114 char *kmembase, *kmemlimit; 115 char buckstring[16 * sizeof("123456,")]; 116 int buckstring_init = 0; 117 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 118 char *memname[] = INITKMEMNAMES; 119 char *memall = NULL; 120 struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk"); 121 #endif 122 123 #ifdef DIAGNOSTIC 124 /* 125 * This structure provides a set of masks to catch unaligned frees. 126 */ 127 const long addrmask[] = { 0, 128 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 129 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 130 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 131 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 132 }; 133 134 /* 135 * The WEIRD_ADDR is used as known text to copy into free objects so 136 * that modifications after frees can be detected. 137 */ 138 #ifdef DEADBEEF0 139 #define WEIRD_ADDR ((unsigned) DEADBEEF0) 140 #else 141 #define WEIRD_ADDR ((unsigned) 0xdeadbeef) 142 #endif 143 #define POISON_SIZE 32 144 145 static void 146 poison(void *v, size_t len) 147 { 148 uint32_t *ip = v; 149 size_t i; 150 151 if (len > POISON_SIZE) 152 len = POISON_SIZE; 153 len = len / sizeof(*ip); 154 for (i = 0; i < len; i++) { 155 ip[i] = WEIRD_ADDR; 156 } 157 } 158 159 static size_t 160 poison_check(void *v, size_t len) 161 { 162 163 uint32_t *ip = v; 164 size_t i; 165 166 if (len > POISON_SIZE) 167 len = POISON_SIZE; 168 len = len / sizeof(*ip); 169 for (i = 0; i < len; i++) { 170 if (ip[i] != WEIRD_ADDR) { 171 return i; 172 } 173 } 174 return -1; 175 } 176 177 178 179 /* 180 * Normally the freelist structure is used only to hold the list pointer 181 * for free objects. However, when running with diagnostics, the first 182 * 8 bytes of the structure is unused except for diagnostic information, 183 * and the free list pointer is at offset 8 in the structure. Since the 184 * first 8 bytes is the portion of the structure most often modified, this 185 * helps to detect memory reuse problems and avoid free list corruption. 186 */ 187 struct freelist { 188 int32_t spare0; 189 int16_t type; 190 int16_t spare1; 191 caddr_t next; 192 }; 193 #else /* !DIAGNOSTIC */ 194 struct freelist { 195 caddr_t next; 196 }; 197 #endif /* DIAGNOSTIC */ 198 199 #ifndef SMALL_KERNEL 200 struct timeval malloc_errintvl = { 5, 0 }; 201 struct timeval malloc_lasterr; 202 #endif 203 204 /* 205 * Allocate a block of memory 206 */ 207 void * 208 malloc(unsigned long size, int type, int flags) 209 { 210 struct kmembuckets *kbp; 211 struct kmemusage *kup; 212 struct freelist *freep; 213 long indx, npg, allocsize; 214 int s; 215 caddr_t va, cp, savedlist; 216 #ifdef DIAGNOSTIC 217 size_t pidx; 218 int freshalloc; 219 char *savedtype; 220 #endif 221 #ifdef KMEMSTATS 222 struct kmemstats *ksp = &kmemstats[type]; 223 224 if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST) 225 panic("malloc - bogus type"); 226 #endif 227 228 KASSERT(flags & (M_WAITOK | M_NOWAIT)); 229 230 if ((flags & M_NOWAIT) == 0) 231 assertwaitok(); 232 233 #ifdef MALLOC_DEBUG 234 if (debug_malloc(size, type, flags, (void **)&va)) { 235 if ((flags & M_ZERO) && va != NULL) 236 memset(va, 0, size); 237 return (va); 238 } 239 #endif 240 241 if (size > 65535 * PAGE_SIZE) { 242 if (flags & M_CANFAIL) { 243 #ifndef SMALL_KERNEL 244 if (ratecheck(&malloc_lasterr, &malloc_errintvl)) 245 printf("malloc(): allocation too large, " 246 "type = %d, size = %lu\n", type, size); 247 #endif 248 return (NULL); 249 } else 250 panic("malloc: allocation too large, " 251 "type = %d, size = %lu\n", type, size); 252 } 253 254 indx = BUCKETINDX(size); 255 kbp = &bucket[indx]; 256 s = splvm(); 257 #ifdef KMEMSTATS 258 while (ksp->ks_memuse >= ksp->ks_limit) { 259 if (flags & M_NOWAIT) { 260 splx(s); 261 return (NULL); 262 } 263 if (ksp->ks_limblocks < 65535) 264 ksp->ks_limblocks++; 265 tsleep(ksp, PSWP+2, memname[type], 0); 266 } 267 ksp->ks_size |= 1 << indx; 268 #endif 269 if (size > MAXALLOCSAVE) 270 allocsize = round_page(size); 271 else 272 allocsize = 1 << indx; 273 if (kbp->kb_next == NULL) { 274 npg = atop(round_page(allocsize)); 275 va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL, 276 (vsize_t)ptoa(npg), 0, 277 ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) | 278 ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0), 279 no_constraint.ucr_low, no_constraint.ucr_high, 280 0, 0, 0); 281 if (va == NULL) { 282 /* 283 * Kmem_malloc() can return NULL, even if it can 284 * wait, if there is no map space available, because 285 * it can't fix that problem. Neither can we, 286 * right now. (We should release pages which 287 * are completely free and which are in buckets 288 * with too many free elements.) 289 */ 290 if ((flags & (M_NOWAIT|M_CANFAIL)) == 0) 291 panic("malloc: out of space in kmem_map"); 292 splx(s); 293 return (NULL); 294 } 295 #ifdef KMEMSTATS 296 kbp->kb_total += kbp->kb_elmpercl; 297 #endif 298 kup = btokup(va); 299 kup->ku_indx = indx; 300 #ifdef DIAGNOSTIC 301 freshalloc = 1; 302 #endif 303 if (allocsize > MAXALLOCSAVE) { 304 kup->ku_pagecnt = npg; 305 #ifdef KMEMSTATS 306 ksp->ks_memuse += allocsize; 307 #endif 308 goto out; 309 } 310 #ifdef KMEMSTATS 311 kup->ku_freecnt = kbp->kb_elmpercl; 312 kbp->kb_totalfree += kbp->kb_elmpercl; 313 #endif 314 /* 315 * Just in case we blocked while allocating memory, 316 * and someone else also allocated memory for this 317 * bucket, don't assume the list is still empty. 318 */ 319 savedlist = kbp->kb_next; 320 kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 321 for (;;) { 322 freep = (struct freelist *)cp; 323 #ifdef DIAGNOSTIC 324 /* 325 * Copy in known text to detect modification 326 * after freeing. 327 */ 328 poison(cp, allocsize); 329 freep->type = M_FREE; 330 #endif /* DIAGNOSTIC */ 331 if (cp <= va) 332 break; 333 cp -= allocsize; 334 freep->next = cp; 335 } 336 freep->next = savedlist; 337 if (savedlist == NULL) 338 kbp->kb_last = (caddr_t)freep; 339 } else { 340 #ifdef DIAGNOSTIC 341 freshalloc = 0; 342 #endif 343 } 344 va = kbp->kb_next; 345 kbp->kb_next = ((struct freelist *)va)->next; 346 #ifdef DIAGNOSTIC 347 freep = (struct freelist *)va; 348 savedtype = (unsigned)freep->type < M_LAST ? 349 memname[freep->type] : "???"; 350 if (freshalloc == 0 && kbp->kb_next) { 351 int rv; 352 vaddr_t addr = (vaddr_t)kbp->kb_next; 353 354 vm_map_lock(kmem_map); 355 rv = uvm_map_checkprot(kmem_map, addr, 356 addr + sizeof(struct freelist), VM_PROT_WRITE); 357 vm_map_unlock(kmem_map); 358 359 if (!rv) { 360 printf("%s %zd of object %p size 0x%lx %s %s" 361 " (invalid addr %p)\n", 362 "Data modified on freelist: word", 363 (int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size, 364 "previous type", savedtype, (void *)addr); 365 kbp->kb_next = NULL; 366 } 367 } 368 369 /* Fill the fields that we've used with WEIRD_ADDR */ 370 poison(freep, sizeof(*freep)); 371 372 /* and check that the data hasn't been modified. */ 373 if (freshalloc == 0) { 374 if ((pidx = poison_check(va, allocsize)) != -1) { 375 printf("%s %zd of object %p size 0x%lx %s %s" 376 " (0x%x != 0x%x)\n", 377 "Data modified on freelist: word", 378 pidx, va, size, "previous type", 379 savedtype, ((int32_t*)va)[pidx], WEIRD_ADDR); 380 panic("boom"); 381 } 382 } 383 384 freep->spare0 = 0; 385 #endif /* DIAGNOSTIC */ 386 #ifdef KMEMSTATS 387 kup = btokup(va); 388 if (kup->ku_indx != indx) 389 panic("malloc: wrong bucket"); 390 if (kup->ku_freecnt == 0) 391 panic("malloc: lost data"); 392 kup->ku_freecnt--; 393 kbp->kb_totalfree--; 394 ksp->ks_memuse += 1 << indx; 395 out: 396 kbp->kb_calls++; 397 ksp->ks_inuse++; 398 ksp->ks_calls++; 399 if (ksp->ks_memuse > ksp->ks_maxused) 400 ksp->ks_maxused = ksp->ks_memuse; 401 #else 402 out: 403 #endif 404 splx(s); 405 406 if ((flags & M_ZERO) && va != NULL) 407 memset(va, 0, size); 408 return (va); 409 } 410 411 /* 412 * Free a block of memory allocated by malloc. 413 */ 414 void 415 free(void *addr, int type) 416 { 417 struct kmembuckets *kbp; 418 struct kmemusage *kup; 419 struct freelist *freep; 420 long size; 421 int s; 422 #ifdef DIAGNOSTIC 423 caddr_t cp; 424 long alloc; 425 #endif 426 #ifdef KMEMSTATS 427 struct kmemstats *ksp = &kmemstats[type]; 428 #endif 429 430 #ifdef MALLOC_DEBUG 431 if (debug_free(addr, type)) 432 return; 433 #endif 434 435 #ifdef DIAGNOSTIC 436 if (addr < (void *)kmembase || addr >= (void *)kmemlimit) 437 panic("free: non-malloced addr %p type %s", addr, 438 memname[type]); 439 #endif 440 441 kup = btokup(addr); 442 size = 1 << kup->ku_indx; 443 kbp = &bucket[kup->ku_indx]; 444 s = splvm(); 445 #ifdef DIAGNOSTIC 446 /* 447 * Check for returns of data that do not point to the 448 * beginning of the allocation. 449 */ 450 if (size > PAGE_SIZE) 451 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 452 else 453 alloc = addrmask[kup->ku_indx]; 454 if (((u_long)addr & alloc) != 0) 455 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 456 addr, size, memname[type], alloc); 457 #endif /* DIAGNOSTIC */ 458 if (size > MAXALLOCSAVE) { 459 uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(kup->ku_pagecnt)); 460 #ifdef KMEMSTATS 461 size = kup->ku_pagecnt << PAGE_SHIFT; 462 ksp->ks_memuse -= size; 463 kup->ku_indx = 0; 464 kup->ku_pagecnt = 0; 465 if (ksp->ks_memuse + size >= ksp->ks_limit && 466 ksp->ks_memuse < ksp->ks_limit) 467 wakeup(ksp); 468 ksp->ks_inuse--; 469 kbp->kb_total -= 1; 470 #endif 471 splx(s); 472 return; 473 } 474 freep = (struct freelist *)addr; 475 #ifdef DIAGNOSTIC 476 /* 477 * Check for multiple frees. Use a quick check to see if 478 * it looks free before laboriously searching the freelist. 479 */ 480 if (freep->spare0 == WEIRD_ADDR) { 481 for (cp = kbp->kb_next; cp; 482 cp = ((struct freelist *)cp)->next) { 483 if (addr != cp) 484 continue; 485 printf("multiply freed item %p\n", addr); 486 panic("free: duplicated free"); 487 } 488 } 489 /* 490 * Copy in known text to detect modification after freeing 491 * and to make it look free. Also, save the type being freed 492 * so we can list likely culprit if modification is detected 493 * when the object is reallocated. 494 */ 495 poison(addr, size); 496 497 freep->type = type; 498 #endif /* DIAGNOSTIC */ 499 #ifdef KMEMSTATS 500 kup->ku_freecnt++; 501 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 502 if (kup->ku_freecnt > kbp->kb_elmpercl) 503 panic("free: multiple frees"); 504 else if (kbp->kb_totalfree > kbp->kb_highwat) 505 kbp->kb_couldfree++; 506 } 507 kbp->kb_totalfree++; 508 ksp->ks_memuse -= size; 509 if (ksp->ks_memuse + size >= ksp->ks_limit && 510 ksp->ks_memuse < ksp->ks_limit) 511 wakeup(ksp); 512 ksp->ks_inuse--; 513 #endif 514 if (kbp->kb_next == NULL) 515 kbp->kb_next = addr; 516 else 517 ((struct freelist *)kbp->kb_last)->next = addr; 518 freep->next = NULL; 519 kbp->kb_last = addr; 520 splx(s); 521 } 522 523 /* 524 * Compute the number of pages that kmem_map will map, that is, 525 * the size of the kernel malloc arena. 526 */ 527 void 528 kmeminit_nkmempages(void) 529 { 530 u_int npages; 531 532 if (nkmempages != 0) { 533 /* 534 * It's already been set (by us being here before, or 535 * by patching or kernel config options), bail out now. 536 */ 537 return; 538 } 539 540 /* 541 * We can't initialize these variables at compilation time, since 542 * the page size may not be known (on sparc GENERIC kernels, for 543 * example). But we still want the MD code to be able to provide 544 * better values. 545 */ 546 if (nkmempages_min == 0) 547 nkmempages_min = NKMEMPAGES_MIN; 548 if (nkmempages_max == 0) 549 nkmempages_max = NKMEMPAGES_MAX; 550 551 /* 552 * We use the following (simple) formula: 553 * 554 * - Starting point is physical memory / 4. 555 * 556 * - Clamp it down to nkmempages_max. 557 * 558 * - Round it up to nkmempages_min. 559 */ 560 npages = physmem / 4; 561 562 if (npages > nkmempages_max) 563 npages = nkmempages_max; 564 565 if (npages < nkmempages_min) 566 npages = nkmempages_min; 567 568 nkmempages = npages; 569 } 570 571 /* 572 * Initialize the kernel memory allocator 573 */ 574 void 575 kmeminit(void) 576 { 577 vaddr_t base, limit; 578 #ifdef KMEMSTATS 579 long indx; 580 #endif 581 582 #ifdef DIAGNOSTIC 583 if (sizeof(struct freelist) > (1 << MINBUCKET)) 584 panic("kmeminit: minbucket too small/struct freelist too big"); 585 #endif 586 587 /* 588 * Compute the number of kmem_map pages, if we have not 589 * done so already. 590 */ 591 kmeminit_nkmempages(); 592 base = vm_map_min(kernel_map); 593 kmem_map = uvm_km_suballoc(kernel_map, &base, &limit, 594 (vsize_t)nkmempages << PAGE_SHIFT, 595 #ifdef KVA_GUARDPAGES 596 VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES, 597 #else 598 VM_MAP_INTRSAFE, 599 #endif 600 FALSE, &kmem_map_store); 601 kmembase = (char *)base; 602 kmemlimit = (char *)limit; 603 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map, 604 (vsize_t)(nkmempages * sizeof(struct kmemusage))); 605 #ifdef KMEMSTATS 606 for (indx = 0; indx < MINBUCKET + 16; indx++) { 607 if (1 << indx >= PAGE_SIZE) 608 bucket[indx].kb_elmpercl = 1; 609 else 610 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 611 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 612 } 613 for (indx = 0; indx < M_LAST; indx++) 614 kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10; 615 #endif 616 #ifdef MALLOC_DEBUG 617 debug_malloc_init(); 618 #endif 619 } 620 621 /* 622 * Return kernel malloc statistics information. 623 */ 624 int 625 sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 626 size_t newlen, struct proc *p) 627 { 628 struct kmembuckets kb; 629 int i, siz; 630 631 if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS && 632 name[0] != KERN_MALLOC_KMEMNAMES) 633 return (ENOTDIR); /* overloaded */ 634 635 switch (name[0]) { 636 case KERN_MALLOC_BUCKETS: 637 /* Initialize the first time */ 638 if (buckstring_init == 0) { 639 buckstring_init = 1; 640 bzero(buckstring, sizeof(buckstring)); 641 for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) { 642 snprintf(buckstring + siz, 643 sizeof buckstring - siz, 644 "%d,", (u_int)(1<<i)); 645 siz += strlen(buckstring + siz); 646 } 647 /* Remove trailing comma */ 648 if (siz) 649 buckstring[siz - 1] = '\0'; 650 } 651 return (sysctl_rdstring(oldp, oldlenp, newp, buckstring)); 652 653 case KERN_MALLOC_BUCKET: 654 bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb)); 655 kb.kb_next = kb.kb_last = 0; 656 return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb))); 657 case KERN_MALLOC_KMEMSTATS: 658 #ifdef KMEMSTATS 659 if ((name[1] < 0) || (name[1] >= M_LAST)) 660 return (EINVAL); 661 return (sysctl_rdstruct(oldp, oldlenp, newp, 662 &kmemstats[name[1]], sizeof(struct kmemstats))); 663 #else 664 return (EOPNOTSUPP); 665 #endif 666 case KERN_MALLOC_KMEMNAMES: 667 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES) 668 if (memall == NULL) { 669 int totlen; 670 671 i = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR); 672 if (i) 673 return (i); 674 675 /* Figure out how large a buffer we need */ 676 for (totlen = 0, i = 0; i < M_LAST; i++) { 677 if (memname[i]) 678 totlen += strlen(memname[i]); 679 totlen++; 680 } 681 memall = malloc(totlen + M_LAST, M_SYSCTL, 682 M_WAITOK|M_ZERO); 683 for (siz = 0, i = 0; i < M_LAST; i++) { 684 snprintf(memall + siz, 685 totlen + M_LAST - siz, 686 "%s,", memname[i] ? memname[i] : ""); 687 siz += strlen(memall + siz); 688 } 689 /* Remove trailing comma */ 690 if (siz) 691 memall[siz - 1] = '\0'; 692 693 /* Now, convert all spaces to underscores */ 694 for (i = 0; i < totlen; i++) 695 if (memall[i] == ' ') 696 memall[i] = '_'; 697 rw_exit_write(&sysctl_kmemlock); 698 } 699 return (sysctl_rdstring(oldp, oldlenp, newp, memall)); 700 #else 701 return (EOPNOTSUPP); 702 #endif 703 default: 704 return (EOPNOTSUPP); 705 } 706 /* NOTREACHED */ 707 } 708 709 /* 710 * Round up a size to how much malloc would actually allocate. 711 */ 712 size_t 713 malloc_roundup(size_t sz) 714 { 715 if (sz > MAXALLOCSAVE) 716 return round_page(sz); 717 718 return (1 << BUCKETINDX(sz)); 719 } 720 721 #if defined(DDB) 722 #include <machine/db_machdep.h> 723 #include <ddb/db_interface.h> 724 #include <ddb/db_output.h> 725 726 void 727 malloc_printit( 728 int (*pr)(const char *, ...) /* __attribute__((__format__(__kprintf__,1,2))) */) 729 { 730 #ifdef KMEMSTATS 731 struct kmemstats *km; 732 int i; 733 734 (*pr)("%15s %5s %6s %7s %6s %9s %8s %8s\n", 735 "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests", 736 "Type Lim", "Kern Lim"); 737 for (i = 0, km = kmemstats; i < M_LAST; i++, km++) { 738 if (!km->ks_calls || !memname[i]) 739 continue; 740 741 (*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d %8d\n", 742 memname[i], km->ks_inuse, km->ks_memuse / 1024, 743 km->ks_maxused / 1024, km->ks_limit / 1024, 744 km->ks_calls, km->ks_limblocks, km->ks_mapblocks); 745 } 746 #else 747 (*pr)("No KMEMSTATS compiled in\n"); 748 #endif 749 } 750 #endif /* DDB */ 751