1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1997, 1998 John S. Dyson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. Absolutely no warranty of function or purpose is made by the author 14 * John S. Dyson. 15 * 16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $ 17 */ 18 19 #include <sys/param.h> 20 #include <sys/queue.h> 21 #include <sys/systm.h> 22 #include <sys/kernel.h> 23 #include <sys/lock.h> 24 #include <sys/malloc.h> 25 #include <sys/sysctl.h> 26 #include <sys/vmmeter.h> 27 28 #include <vm/vm.h> 29 #include <vm/vm_object.h> 30 #include <vm/vm_page.h> 31 #include <vm/vm_map.h> 32 #include <vm/vm_kern.h> 33 #include <vm/vm_extern.h> 34 #include <vm/vm_zone.h> 35 36 #include <sys/spinlock2.h> 37 #include <vm/vm_page2.h> 38 39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header"); 40 41 #define ZONE_ERROR_INVALID 0 42 #define ZONE_ERROR_NOTFREE 1 43 #define ZONE_ERROR_ALREADYFREE 2 44 45 #define ZONE_ROUNDING 32 46 47 #define ZENTRY_FREE 0x12342378 48 49 long zone_burst = 128; 50 51 static void *zget(vm_zone_t z); 52 53 /* 54 * Return an item from the specified zone. This function is non-blocking for 55 * ZONE_INTERRUPT zones. 56 * 57 * No requirements. 58 */ 59 void * 60 zalloc(vm_zone_t z) 61 { 62 globaldata_t gd = mycpu; 63 vm_zpcpu_t *zpcpu; 64 void *item; 65 long n; 66 67 #ifdef INVARIANTS 68 if (z == NULL) 69 zerror(ZONE_ERROR_INVALID); 70 #endif 71 zpcpu = &z->zpcpu[gd->gd_cpuid]; 72 retry: 73 /* 74 * Avoid spinlock contention by allocating from a per-cpu queue 75 */ 76 if (zpcpu->zfreecnt > 0) { 77 crit_enter_gd(gd); 78 if (zpcpu->zfreecnt > 0) { 79 item = zpcpu->zitems; 80 #ifdef INVARIANTS 81 KASSERT(item != NULL, 82 ("zitems_pcpu unexpectedly NULL")); 83 if (((void **)item)[1] != (void *)ZENTRY_FREE) 84 zerror(ZONE_ERROR_NOTFREE); 85 ((void **)item)[1] = NULL; 86 #endif 87 zpcpu->zitems = ((void **) item)[0]; 88 --zpcpu->zfreecnt; 89 ++zpcpu->znalloc; 90 crit_exit_gd(gd); 91 92 return item; 93 } 94 crit_exit_gd(gd); 95 } 96 97 /* 98 * Per-zone spinlock for the remainder. Always load at least one 99 * item. 100 */ 101 spin_lock(&z->zspin); 102 if (z->zfreecnt > z->zfreemin) { 103 n = zone_burst; 104 do { 105 item = z->zitems; 106 #ifdef INVARIANTS 107 KASSERT(item != NULL, ("zitems unexpectedly NULL")); 108 if (((void **)item)[1] != (void *)ZENTRY_FREE) 109 zerror(ZONE_ERROR_NOTFREE); 110 #endif 111 z->zitems = ((void **)item)[0]; 112 --z->zfreecnt; 113 ((void **)item)[0] = zpcpu->zitems; 114 zpcpu->zitems = item; 115 ++zpcpu->zfreecnt; 116 } while (--n > 0 && z->zfreecnt > z->zfreemin); 117 spin_unlock(&z->zspin); 118 goto retry; 119 } else { 120 spin_unlock(&z->zspin); 121 item = zget(z); 122 /* 123 * PANICFAIL allows the caller to assume that the zalloc() 124 * will always succeed. If it doesn't, we panic here. 125 */ 126 if (item == NULL && (z->zflags & ZONE_PANICFAIL)) 127 panic("zalloc(%s) failed", z->zname); 128 } 129 return item; 130 } 131 132 /* 133 * Free an item to the specified zone. 134 * 135 * No requirements. 136 */ 137 void 138 zfree(vm_zone_t z, void *item) 139 { 140 globaldata_t gd = mycpu; 141 vm_zpcpu_t *zpcpu; 142 void *tail_item; 143 long count; 144 long zmax; 145 146 zpcpu = &z->zpcpu[gd->gd_cpuid]; 147 148 /* 149 * Avoid spinlock contention by freeing into a per-cpu queue 150 */ 151 zmax = z->zmax_pcpu; 152 if (zmax < 1024) 153 zmax = 1024; 154 155 /* 156 * Add to pcpu cache 157 */ 158 crit_enter_gd(gd); 159 ((void **)item)[0] = zpcpu->zitems; 160 #ifdef INVARIANTS 161 if (((void **)item)[1] == (void *)ZENTRY_FREE) 162 zerror(ZONE_ERROR_ALREADYFREE); 163 ((void **)item)[1] = (void *)ZENTRY_FREE; 164 #endif 165 zpcpu->zitems = item; 166 ++zpcpu->zfreecnt; 167 168 if (zpcpu->zfreecnt < zmax) { 169 crit_exit_gd(gd); 170 return; 171 } 172 173 /* 174 * Hystereis, move (zmax) (calculated below) items to the pool. 175 */ 176 zmax = zmax / 2; 177 if (zmax > zone_burst) 178 zmax = zone_burst; 179 tail_item = item; 180 count = 1; 181 182 while (count < zmax) { 183 tail_item = ((void **)tail_item)[0]; 184 ++count; 185 } 186 zpcpu->zitems = ((void **)tail_item)[0]; 187 zpcpu->zfreecnt -= count; 188 189 /* 190 * Per-zone spinlock for the remainder. 191 * 192 * Also implement hysteresis by freeing a number of pcpu 193 * entries. 194 */ 195 spin_lock(&z->zspin); 196 ((void **)tail_item)[0] = z->zitems; 197 z->zitems = item; 198 z->zfreecnt += count; 199 spin_unlock(&z->zspin); 200 201 crit_exit_gd(gd); 202 } 203 204 /* 205 * This file comprises a very simple zone allocator. This is used 206 * in lieu of the malloc allocator, where needed or more optimal. 207 * 208 * Note that the initial implementation of this had coloring, and 209 * absolutely no improvement (actually perf degradation) occurred. 210 * 211 * Note also that the zones are type stable. The only restriction is 212 * that the first two longwords of a data structure can be changed 213 * between allocations. Any data that must be stable between allocations 214 * must reside in areas after the first two longwords. 215 * 216 * zinitna, zinit, zbootinit are the initialization routines. 217 * zalloc, zfree, are the allocation/free routines. 218 */ 219 220 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist); 221 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 222 static vm_pindex_t zone_kmem_pages, zone_kern_pages; 223 static long zone_kmem_kvaspace; 224 225 /* 226 * Create a zone, but don't allocate the zone structure. If the 227 * zone had been previously created by the zone boot code, initialize 228 * various parts of the zone code. 229 * 230 * If waits are not allowed during allocation (e.g. during interrupt 231 * code), a-priori allocate the kernel virtual space, and allocate 232 * only pages when needed. 233 * 234 * Arguments: 235 * z pointer to zone structure. 236 * obj pointer to VM object (opt). 237 * name name of zone. 238 * size size of zone entries. 239 * nentries number of zone entries allocated (only ZONE_INTERRUPT.) 240 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time. 241 * zalloc number of pages allocated when memory is needed. 242 * 243 * Note that when using ZONE_INTERRUPT, the size of the zone is limited 244 * by the nentries argument. The size of the memory allocatable is 245 * unlimited if ZONE_INTERRUPT is not set. 246 * 247 * No requirements. 248 */ 249 int 250 zinitna(vm_zone_t z, char *name, size_t size, long nentries, uint32_t flags) 251 { 252 size_t totsize; 253 254 /* 255 * Only zones created with zinit() are destroyable. 256 */ 257 if (z->zflags & ZONE_DESTROYABLE) 258 panic("zinitna: can't create destroyable zone"); 259 260 /* 261 * NOTE: We can only adjust zsize if we previously did not 262 * use zbootinit(). 263 */ 264 if ((z->zflags & ZONE_BOOT) == 0) { 265 z->zsize = roundup2(size, ZONE_ROUNDING); 266 spin_init(&z->zspin, "zinitna"); 267 z->zfreecnt = 0; 268 z->ztotal = 0; 269 z->zmax = 0; 270 z->zname = name; 271 z->zitems = NULL; 272 273 lwkt_gettoken(&vm_token); 274 LIST_INSERT_HEAD(&zlist, z, zlink); 275 lwkt_reltoken(&vm_token); 276 277 bzero(z->zpcpu, sizeof(z->zpcpu)); 278 } 279 280 z->zkmvec = NULL; 281 z->zkmcur = z->zkmmax = 0; 282 z->zflags |= flags; 283 284 /* 285 * If we cannot wait, allocate KVA space up front, and we will fill 286 * in pages as needed. This is particularly required when creating 287 * an allocation space for map entries in kernel_map, because we 288 * do not want to go into a recursion deadlock with 289 * vm_map_entry_reserve(). 290 */ 291 if (z->zflags & ZONE_INTERRUPT) { 292 totsize = round_page((size_t)z->zsize * nentries); 293 atomic_add_long(&zone_kmem_kvaspace, totsize); 294 295 z->zkva = kmem_alloc_pageable(&kernel_map, totsize, 296 VM_SUBSYS_ZALLOC); 297 if (z->zkva == 0) { 298 LIST_REMOVE(z, zlink); 299 return 0; 300 } 301 302 z->zpagemax = totsize / PAGE_SIZE; 303 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT | 304 VM_ALLOC_NORMAL | VM_ALLOC_RETRY; 305 z->zmax += nentries; 306 z->zmax_pcpu = z->zmax / ncpus / 16; 307 308 /* 309 * Set reasonable pcpu cache bounds. Low-memory systems 310 * might try to cache too little, large-memory systems 311 * might try to cache more than necessarsy. 312 * 313 * In particular, pvzone can wind up being excessive and 314 * waste memory unnecessarily. 315 */ 316 if (z->zmax_pcpu < 1024) 317 z->zmax_pcpu = 1024; 318 if (z->zmax_pcpu * z->zsize > 16*1024*1024) 319 z->zmax_pcpu = 16*1024*1024 / z->zsize; 320 } else { 321 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM; 322 z->zmax = 0; 323 z->zmax_pcpu = 8192; 324 } 325 326 327 if (z->zsize > PAGE_SIZE) 328 z->zfreemin = 1; 329 else 330 z->zfreemin = PAGE_SIZE / z->zsize; 331 332 z->zpagecount = 0; 333 334 /* 335 * Reduce kernel_map spam by allocating in chunks of 4 pages. 336 */ 337 z->zalloc = 4; 338 339 /* 340 * Populate the interrrupt zone at creation time rather than 341 * on first allocation, as this is a potentially long operation. 342 */ 343 if (z->zflags & ZONE_INTERRUPT) { 344 void *buf; 345 346 buf = zget(z); 347 if (buf) 348 zfree(z, buf); 349 } 350 351 return 1; 352 } 353 354 /* 355 * Subroutine same as zinitna, except zone data structure is allocated 356 * automatically by malloc. This routine should normally be used, except 357 * in certain tricky startup conditions in the VM system -- then 358 * zbootinit and zinitna can be used. Zinit is the standard zone 359 * initialization call. 360 * 361 * No requirements. 362 */ 363 vm_zone_t 364 zinit(char *name, size_t size, long nentries, uint32_t flags) 365 { 366 vm_zone_t z; 367 368 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT); 369 if (z == NULL) 370 return NULL; 371 372 z->zflags = 0; 373 if (zinitna(z, name, size, nentries, flags & ~ZONE_DESTROYABLE) == 0) { 374 kfree(z, M_ZONE); 375 return NULL; 376 } 377 378 if (flags & ZONE_DESTROYABLE) 379 z->zflags |= ZONE_DESTROYABLE; 380 381 return z; 382 } 383 384 /* 385 * Initialize a zone before the system is fully up. This routine should 386 * only be called before full VM startup. 387 * 388 * Called from the low level boot code only. 389 */ 390 void 391 zbootinit(vm_zone_t z, char *name, size_t size, void *item, long nitems) 392 { 393 long i; 394 395 spin_init(&z->zspin, "zbootinit"); 396 bzero(z->zpcpu, sizeof(z->zpcpu)); 397 z->zname = name; 398 z->zsize = size; 399 z->zpagemax = 0; 400 z->zflags = ZONE_BOOT; 401 z->zfreemin = 0; 402 z->zallocflag = 0; 403 z->zpagecount = 0; 404 z->zalloc = 0; 405 406 bzero(item, (size_t)nitems * z->zsize); 407 z->zitems = NULL; 408 for (i = 0; i < nitems; i++) { 409 ((void **)item)[0] = z->zitems; 410 #ifdef INVARIANTS 411 ((void **)item)[1] = (void *)ZENTRY_FREE; 412 #endif 413 z->zitems = item; 414 item = (uint8_t *)item + z->zsize; 415 } 416 z->zfreecnt = nitems; 417 z->zmax = nitems; 418 z->ztotal = nitems; 419 420 lwkt_gettoken(&vm_token); 421 LIST_INSERT_HEAD(&zlist, z, zlink); 422 lwkt_reltoken(&vm_token); 423 } 424 425 /* 426 * Release all resources owned by zone created with zinit(). 427 * 428 * No requirements. 429 */ 430 void 431 zdestroy(vm_zone_t z) 432 { 433 vm_pindex_t i; 434 435 if (z == NULL) 436 panic("zdestroy: null zone"); 437 if ((z->zflags & ZONE_DESTROYABLE) == 0) 438 panic("zdestroy: undestroyable zone"); 439 440 lwkt_gettoken(&vm_token); 441 LIST_REMOVE(z, zlink); 442 lwkt_reltoken(&vm_token); 443 444 /* 445 * Release virtual mappings, physical memory and update sysctl stats. 446 */ 447 KKASSERT((z->zflags & ZONE_INTERRUPT) == 0); 448 for (i = 0; i < z->zkmcur; i++) { 449 kmem_free(&kernel_map, z->zkmvec[i], 450 (size_t)z->zalloc * PAGE_SIZE); 451 atomic_subtract_long(&zone_kern_pages, z->zalloc); 452 } 453 if (z->zkmvec != NULL) 454 kfree(z->zkmvec, M_ZONE); 455 456 spin_uninit(&z->zspin); 457 kfree(z, M_ZONE); 458 } 459 460 461 /* 462 * void *zalloc(vm_zone_t zone) -- 463 * Returns an item from a specified zone. May not be called from a 464 * FAST interrupt or IPI function. 465 * 466 * void zfree(vm_zone_t zone, void *item) -- 467 * Frees an item back to a specified zone. May not be called from a 468 * FAST interrupt or IPI function. 469 */ 470 471 /* 472 * Internal zone routine. Not to be called from external (non vm_zone) code. 473 * 474 * This function may return NULL. 475 * 476 * No requirements. 477 */ 478 static void * 479 zget(vm_zone_t z) 480 { 481 vm_page_t pgs[ZONE_MAXPGLOAD]; 482 vm_page_t m; 483 long nitems; 484 long savezpc; 485 size_t nbytes; 486 size_t noffset; 487 void *item; 488 vm_pindex_t npages; 489 vm_pindex_t nalloc; 490 vm_pindex_t i; 491 492 if (z == NULL) 493 panic("zget: null zone"); 494 495 if (z->zflags & ZONE_INTERRUPT) { 496 /* 497 * Interrupt zones do not mess with the kernel_map, they 498 * simply populate an existing mapping. 499 * 500 * First allocate as many pages as we can, stopping at 501 * our limit or if the page allocation fails. 502 */ 503 for (i = 0; i < ZONE_MAXPGLOAD && i < z->zalloc; ++i) { 504 m = vm_page_alloc(NULL, 505 mycpu->gd_rand_incr++, 506 z->zallocflag); 507 if (m == NULL) 508 break; 509 pgs[i] = m; 510 } 511 nalloc = i; 512 513 /* 514 * Account for the pages. 515 * 516 * NOTE! Do not allow overlap with a prior page as it 517 * may still be undergoing allocation on another 518 * cpu. 519 */ 520 spin_lock(&z->zspin); 521 noffset = (size_t)z->zpagecount * PAGE_SIZE; 522 /* noffset -= noffset % z->zsize; */ 523 savezpc = z->zpagecount; 524 if (z->zpagecount + nalloc > z->zpagemax) 525 z->zpagecount = z->zpagemax; 526 else 527 z->zpagecount += nalloc; 528 item = (char *)z->zkva + noffset; 529 npages = z->zpagecount - savezpc; 530 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) / 531 z->zsize; 532 atomic_add_long(&zone_kmem_pages, npages); 533 spin_unlock(&z->zspin); 534 535 /* 536 * Enter the pages into the reserved KVA space. 537 */ 538 for (i = 0; i < npages; ++i) { 539 vm_offset_t zkva; 540 541 m = pgs[i]; 542 KKASSERT(m->queue == PQ_NONE); 543 m->valid = VM_PAGE_BITS_ALL; 544 vm_page_wire(m); 545 vm_page_wakeup(m); 546 547 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE; 548 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); 549 bzero((void *)zkva, PAGE_SIZE); 550 } 551 for (i = npages; i < nalloc; ++i) { 552 m = pgs[i]; 553 vm_page_free(m); 554 } 555 } else if (z->zflags & ZONE_SPECIAL) { 556 /* 557 * The special zone is the one used for vm_map_entry_t's. 558 * We have to avoid an infinite recursion in 559 * vm_map_entry_reserve() by using vm_map_entry_kreserve() 560 * instead. The map entries are pre-reserved by the kernel 561 * by vm_map_entry_reserve_cpu_init(). 562 */ 563 nbytes = (size_t)z->zalloc * PAGE_SIZE; 564 565 item = (void *)kmem_alloc3(&kernel_map, nbytes, 566 VM_SUBSYS_ZALLOC, KM_KRESERVE); 567 568 /* note: z might be modified due to blocking */ 569 if (item != NULL) { 570 atomic_add_long(&zone_kern_pages, z->zalloc); 571 bzero(item, nbytes); 572 } else { 573 nbytes = 0; 574 } 575 nitems = nbytes / z->zsize; 576 } else { 577 /* 578 * Otherwise allocate KVA from the kernel_map. 579 */ 580 nbytes = (size_t)z->zalloc * PAGE_SIZE; 581 582 item = (void *)kmem_alloc3(&kernel_map, nbytes, 583 VM_SUBSYS_ZALLOC, 0); 584 585 /* note: z might be modified due to blocking */ 586 if (item != NULL) { 587 atomic_add_long(&zone_kern_pages, z->zalloc); 588 bzero(item, nbytes); 589 590 if (z->zflags & ZONE_DESTROYABLE) { 591 if (z->zkmcur == z->zkmmax) { 592 z->zkmmax = 593 z->zkmmax==0 ? 1 : z->zkmmax*2; 594 z->zkmvec = krealloc(z->zkmvec, 595 z->zkmmax * sizeof(z->zkmvec[0]), 596 M_ZONE, M_WAITOK); 597 } 598 z->zkmvec[z->zkmcur++] = (vm_offset_t)item; 599 } 600 } else { 601 nbytes = 0; 602 } 603 nitems = nbytes / z->zsize; 604 } 605 606 /* 607 * Enter any new pages into the pool, reserving one, or get the 608 * item from the existing pool. 609 */ 610 spin_lock(&z->zspin); 611 z->ztotal += nitems; 612 613 if (nitems != 0) { 614 /* 615 * Enter pages into the pool saving one for immediate 616 * allocation. 617 */ 618 nitems -= 1; 619 for (i = 0; i < nitems; i++) { 620 ((void **)item)[0] = z->zitems; 621 #ifdef INVARIANTS 622 ((void **)item)[1] = (void *)ZENTRY_FREE; 623 #endif 624 z->zitems = item; 625 item = (uint8_t *)item + z->zsize; 626 } 627 z->zfreecnt += nitems; 628 ++z->znalloc; 629 } else if (z->zfreecnt > 0) { 630 /* 631 * Get an item from the existing pool. 632 */ 633 item = z->zitems; 634 z->zitems = ((void **)item)[0]; 635 #ifdef INVARIANTS 636 if (((void **)item)[1] != (void *)ZENTRY_FREE) 637 zerror(ZONE_ERROR_NOTFREE); 638 ((void **) item)[1] = NULL; 639 #endif 640 --z->zfreecnt; 641 ++z->znalloc; 642 } else { 643 /* 644 * No items available. 645 */ 646 item = NULL; 647 } 648 spin_unlock(&z->zspin); 649 650 /* 651 * A special zone may have used a kernel-reserved vm_map_entry. If 652 * so we have to be sure to recover our reserve so we don't run out. 653 * We will panic if we run out. 654 */ 655 if (z->zflags & ZONE_SPECIAL) 656 vm_map_entry_reserve(0); 657 658 return item; 659 } 660 661 /* 662 * No requirements. 663 */ 664 static int 665 sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 666 { 667 vm_zone_t curzone; 668 char tmpbuf[128]; 669 char tmpname[14]; 670 int error = 0; 671 672 ksnprintf(tmpbuf, sizeof(tmpbuf), 673 "\nITEM SIZE LIMIT USED FREE REQUESTS\n"); 674 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf)); 675 if (error) 676 return (error); 677 678 lwkt_gettoken(&vm_token); 679 LIST_FOREACH(curzone, &zlist, zlink) { 680 size_t i; 681 size_t len; 682 int offset; 683 long freecnt; 684 long znalloc; 685 int n; 686 687 len = strlen(curzone->zname); 688 if (len >= (sizeof(tmpname) - 1)) 689 len = (sizeof(tmpname) - 1); 690 for(i = 0; i < sizeof(tmpname) - 1; i++) 691 tmpname[i] = ' '; 692 tmpname[i] = 0; 693 memcpy(tmpname, curzone->zname, len); 694 tmpname[len] = ':'; 695 offset = 0; 696 if (curzone == LIST_FIRST(&zlist)) { 697 offset = 1; 698 tmpbuf[0] = '\n'; 699 } 700 freecnt = curzone->zfreecnt; 701 znalloc = curzone->znalloc; 702 for (n = 0; n < ncpus; ++n) { 703 freecnt += curzone->zpcpu[n].zfreecnt; 704 znalloc += curzone->zpcpu[n].znalloc; 705 } 706 707 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset, 708 "%s %6.6lu, %8.8lu, %6.6lu, %6.6lu, %8.8lu\n", 709 tmpname, curzone->zsize, curzone->zmax, 710 (curzone->ztotal - freecnt), 711 freecnt, znalloc); 712 713 len = strlen((char *)tmpbuf); 714 if (LIST_NEXT(curzone, zlink) == NULL) 715 tmpbuf[len - 1] = 0; 716 717 error = SYSCTL_OUT(req, tmpbuf, len); 718 719 if (error) 720 break; 721 } 722 lwkt_reltoken(&vm_token); 723 return (error); 724 } 725 726 #if defined(INVARIANTS) 727 728 /* 729 * Debugging only. 730 */ 731 void 732 zerror(int error) 733 { 734 char *msg; 735 736 switch (error) { 737 case ZONE_ERROR_INVALID: 738 msg = "zone: invalid zone"; 739 break; 740 case ZONE_ERROR_NOTFREE: 741 msg = "zone: entry not free"; 742 break; 743 case ZONE_ERROR_ALREADYFREE: 744 msg = "zone: freeing free entry"; 745 break; 746 default: 747 msg = "zone: invalid error"; 748 break; 749 } 750 panic("%s", msg); 751 } 752 #endif 753 754 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \ 755 NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 756 757 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_pages, 758 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone"); 759 SYSCTL_LONG(_vm, OID_AUTO, zone_burst, 760 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache"); 761 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace, 762 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone"); 763 SYSCTL_LONG(_vm, OID_AUTO, zone_kern_pages, 764 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone"); 765