1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1997, 1998 John S. Dyson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. Absolutely no warranty of function or purpose is made by the author 14 * John S. Dyson. 15 * 16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $ 17 */ 18 19 #include <sys/param.h> 20 #include <sys/queue.h> 21 #include <sys/systm.h> 22 #include <sys/kernel.h> 23 #include <sys/lock.h> 24 #include <sys/malloc.h> 25 #include <sys/sysctl.h> 26 #include <sys/vmmeter.h> 27 28 #include <vm/vm.h> 29 #include <vm/vm_object.h> 30 #include <vm/vm_page.h> 31 #include <vm/vm_map.h> 32 #include <vm/vm_kern.h> 33 #include <vm/vm_extern.h> 34 #include <vm/vm_zone.h> 35 36 #include <sys/spinlock2.h> 37 #include <vm/vm_page2.h> 38 39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header"); 40 41 #define ZONE_ERROR_INVALID 0 42 #define ZONE_ERROR_NOTFREE 1 43 #define ZONE_ERROR_ALREADYFREE 2 44 45 #define ZONE_ROUNDING 32 46 47 #define ZENTRY_FREE 0x12342378 48 49 int zone_burst = 32; 50 51 static void *zget(vm_zone_t z); 52 53 /* 54 * Return an item from the specified zone. This function is non-blocking for 55 * ZONE_INTERRUPT zones. 56 * 57 * No requirements. 58 */ 59 void * 60 zalloc(vm_zone_t z) 61 { 62 globaldata_t gd = mycpu; 63 vm_zpcpu_t *zpcpu; 64 void *item; 65 int n; 66 67 #ifdef INVARIANTS 68 if (z == NULL) 69 zerror(ZONE_ERROR_INVALID); 70 #endif 71 zpcpu = &z->zpcpu[gd->gd_cpuid]; 72 retry: 73 /* 74 * Avoid spinlock contention by allocating from a per-cpu queue 75 */ 76 if (zpcpu->zfreecnt > 0) { 77 crit_enter_gd(gd); 78 if (zpcpu->zfreecnt > 0) { 79 item = zpcpu->zitems; 80 #ifdef INVARIANTS 81 KASSERT(item != NULL, 82 ("zitems_pcpu unexpectedly NULL")); 83 if (((void **)item)[1] != (void *)ZENTRY_FREE) 84 zerror(ZONE_ERROR_NOTFREE); 85 ((void **)item)[1] = NULL; 86 #endif 87 zpcpu->zitems = ((void **) item)[0]; 88 --zpcpu->zfreecnt; 89 ++zpcpu->znalloc; 90 crit_exit_gd(gd); 91 92 return item; 93 } 94 crit_exit_gd(gd); 95 } 96 97 /* 98 * Per-zone spinlock for the remainder. Always load at least one 99 * item. 100 */ 101 spin_lock(&z->zlock); 102 if (z->zfreecnt > z->zfreemin) { 103 n = zone_burst; 104 do { 105 item = z->zitems; 106 #ifdef INVARIANTS 107 KASSERT(item != NULL, ("zitems unexpectedly NULL")); 108 if (((void **)item)[1] != (void *)ZENTRY_FREE) 109 zerror(ZONE_ERROR_NOTFREE); 110 #endif 111 z->zitems = ((void **)item)[0]; 112 --z->zfreecnt; 113 ((void **)item)[0] = zpcpu->zitems; 114 zpcpu->zitems = item; 115 ++zpcpu->zfreecnt; 116 } while (--n > 0 && z->zfreecnt > z->zfreemin); 117 spin_unlock(&z->zlock); 118 goto retry; 119 } else { 120 spin_unlock(&z->zlock); 121 item = zget(z); 122 /* 123 * PANICFAIL allows the caller to assume that the zalloc() 124 * will always succeed. If it doesn't, we panic here. 125 */ 126 if (item == NULL && (z->zflags & ZONE_PANICFAIL)) 127 panic("zalloc(%s) failed", z->zname); 128 } 129 return item; 130 } 131 132 /* 133 * Free an item to the specified zone. 134 * 135 * No requirements. 136 */ 137 void 138 zfree(vm_zone_t z, void *item) 139 { 140 globaldata_t gd = mycpu; 141 vm_zpcpu_t *zpcpu; 142 void *tail_item; 143 int count; 144 int zmax; 145 146 zpcpu = &z->zpcpu[gd->gd_cpuid]; 147 148 /* 149 * Avoid spinlock contention by freeing into a per-cpu queue 150 */ 151 zmax = z->zmax_pcpu; 152 if (zmax < 1024) 153 zmax = 1024; 154 155 /* 156 * Add to pcpu cache 157 */ 158 crit_enter_gd(gd); 159 ((void **)item)[0] = zpcpu->zitems; 160 #ifdef INVARIANTS 161 if (((void **)item)[1] == (void *)ZENTRY_FREE) 162 zerror(ZONE_ERROR_ALREADYFREE); 163 ((void **)item)[1] = (void *)ZENTRY_FREE; 164 #endif 165 zpcpu->zitems = item; 166 ++zpcpu->zfreecnt; 167 168 if (zpcpu->zfreecnt < zmax) { 169 crit_exit_gd(gd); 170 return; 171 } 172 173 /* 174 * Hystereis, move (zmax) (calculated below) items to the pool. 175 */ 176 zmax = zmax / 2; 177 if (zmax > zone_burst) 178 zmax = zone_burst; 179 tail_item = item; 180 count = 1; 181 182 while (count < zmax) { 183 tail_item = ((void **)tail_item)[0]; 184 ++count; 185 } 186 zpcpu->zitems = ((void **)tail_item)[0]; 187 zpcpu->zfreecnt -= count; 188 189 /* 190 * Per-zone spinlock for the remainder. 191 * 192 * Also implement hysteresis by freeing a number of pcpu 193 * entries. 194 */ 195 spin_lock(&z->zlock); 196 ((void **)tail_item)[0] = z->zitems; 197 z->zitems = item; 198 z->zfreecnt += count; 199 spin_unlock(&z->zlock); 200 201 crit_exit_gd(gd); 202 } 203 204 /* 205 * This file comprises a very simple zone allocator. This is used 206 * in lieu of the malloc allocator, where needed or more optimal. 207 * 208 * Note that the initial implementation of this had coloring, and 209 * absolutely no improvement (actually perf degradation) occurred. 210 * 211 * Note also that the zones are type stable. The only restriction is 212 * that the first two longwords of a data structure can be changed 213 * between allocations. Any data that must be stable between allocations 214 * must reside in areas after the first two longwords. 215 * 216 * zinitna, zinit, zbootinit are the initialization routines. 217 * zalloc, zfree, are the allocation/free routines. 218 */ 219 220 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist); 221 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 222 static int zone_kmem_pages, zone_kern_pages; 223 static long zone_kmem_kvaspace; 224 225 /* 226 * Create a zone, but don't allocate the zone structure. If the 227 * zone had been previously created by the zone boot code, initialize 228 * various parts of the zone code. 229 * 230 * If waits are not allowed during allocation (e.g. during interrupt 231 * code), a-priori allocate the kernel virtual space, and allocate 232 * only pages when needed. 233 * 234 * Arguments: 235 * z pointer to zone structure. 236 * obj pointer to VM object (opt). 237 * name name of zone. 238 * size size of zone entries. 239 * nentries number of zone entries allocated (only ZONE_INTERRUPT.) 240 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time. 241 * zalloc number of pages allocated when memory is needed. 242 * 243 * Note that when using ZONE_INTERRUPT, the size of the zone is limited 244 * by the nentries argument. The size of the memory allocatable is 245 * unlimited if ZONE_INTERRUPT is not set. 246 * 247 * No requirements. 248 */ 249 int 250 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size, 251 int nentries, int flags) 252 { 253 size_t totsize; 254 255 /* 256 * Only zones created with zinit() are destroyable. 257 */ 258 if (z->zflags & ZONE_DESTROYABLE) 259 panic("zinitna: can't create destroyable zone"); 260 261 /* 262 * NOTE: We can only adjust zsize if we previously did not 263 * use zbootinit(). 264 */ 265 if ((z->zflags & ZONE_BOOT) == 0) { 266 z->zsize = roundup2(size, ZONE_ROUNDING); 267 spin_init(&z->zlock, "zinitna"); 268 z->zfreecnt = 0; 269 z->ztotal = 0; 270 z->zmax = 0; 271 z->zname = name; 272 z->zitems = NULL; 273 274 lwkt_gettoken(&vm_token); 275 LIST_INSERT_HEAD(&zlist, z, zlink); 276 lwkt_reltoken(&vm_token); 277 278 bzero(z->zpcpu, sizeof(z->zpcpu)); 279 } 280 281 z->zkmvec = NULL; 282 z->zkmcur = z->zkmmax = 0; 283 z->zflags |= flags; 284 285 /* 286 * If we cannot wait, allocate KVA space up front, and we will fill 287 * in pages as needed. This is particularly required when creating 288 * an allocation space for map entries in kernel_map, because we 289 * do not want to go into a recursion deadlock with 290 * vm_map_entry_reserve(). 291 */ 292 if (z->zflags & ZONE_INTERRUPT) { 293 totsize = round_page((size_t)z->zsize * nentries); 294 atomic_add_long(&zone_kmem_kvaspace, totsize); 295 296 z->zkva = kmem_alloc_pageable(&kernel_map, totsize, 297 VM_SUBSYS_ZALLOC); 298 if (z->zkva == 0) { 299 LIST_REMOVE(z, zlink); 300 return 0; 301 } 302 303 z->zpagemax = totsize / PAGE_SIZE; 304 if (obj == NULL) { 305 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax); 306 } else { 307 z->zobj = obj; 308 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj); 309 vm_object_drop(obj); 310 } 311 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT | 312 VM_ALLOC_NORMAL | VM_ALLOC_RETRY; 313 z->zmax += nentries; 314 z->zmax_pcpu = z->zmax / ncpus / 16; 315 if (z->zmax_pcpu < 1024) 316 z->zmax_pcpu = 1024; 317 } else { 318 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM; 319 z->zmax = 0; 320 z->zmax_pcpu = 8192; 321 } 322 323 324 if (z->zsize > PAGE_SIZE) 325 z->zfreemin = 1; 326 else 327 z->zfreemin = PAGE_SIZE / z->zsize; 328 329 z->zpagecount = 0; 330 331 /* 332 * Reduce kernel_map spam by allocating in chunks of 4 pages. 333 */ 334 z->zalloc = 4; 335 336 /* 337 * Populate the interrrupt zone at creation time rather than 338 * on first allocation, as this is a potentially long operation. 339 */ 340 if (z->zflags & ZONE_INTERRUPT) { 341 void *buf; 342 343 buf = zget(z); 344 zfree(z, buf); 345 } 346 347 return 1; 348 } 349 350 /* 351 * Subroutine same as zinitna, except zone data structure is allocated 352 * automatically by malloc. This routine should normally be used, except 353 * in certain tricky startup conditions in the VM system -- then 354 * zbootinit and zinitna can be used. Zinit is the standard zone 355 * initialization call. 356 * 357 * No requirements. 358 */ 359 vm_zone_t 360 zinit(char *name, int size, int nentries, int flags) 361 { 362 vm_zone_t z; 363 364 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT); 365 if (z == NULL) 366 return NULL; 367 368 z->zflags = 0; 369 if (zinitna(z, NULL, name, size, nentries, 370 flags & ~ZONE_DESTROYABLE) == 0) { 371 kfree(z, M_ZONE); 372 return NULL; 373 } 374 375 if (flags & ZONE_DESTROYABLE) 376 z->zflags |= ZONE_DESTROYABLE; 377 378 return z; 379 } 380 381 /* 382 * Initialize a zone before the system is fully up. This routine should 383 * only be called before full VM startup. 384 * 385 * Called from the low level boot code only. 386 */ 387 void 388 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) 389 { 390 int i; 391 392 spin_init(&z->zlock, "zbootinit"); 393 bzero(z->zpcpu, sizeof(z->zpcpu)); 394 z->zname = name; 395 z->zsize = size; 396 z->zpagemax = 0; 397 z->zobj = NULL; 398 z->zflags = ZONE_BOOT; 399 z->zfreemin = 0; 400 z->zallocflag = 0; 401 z->zpagecount = 0; 402 z->zalloc = 0; 403 404 bzero(item, (size_t)nitems * z->zsize); 405 z->zitems = NULL; 406 for (i = 0; i < nitems; i++) { 407 ((void **)item)[0] = z->zitems; 408 #ifdef INVARIANTS 409 ((void **)item)[1] = (void *)ZENTRY_FREE; 410 #endif 411 z->zitems = item; 412 item = (uint8_t *)item + z->zsize; 413 } 414 z->zfreecnt = nitems; 415 z->zmax = nitems; 416 z->ztotal = nitems; 417 418 lwkt_gettoken(&vm_token); 419 LIST_INSERT_HEAD(&zlist, z, zlink); 420 lwkt_reltoken(&vm_token); 421 } 422 423 /* 424 * Release all resources owned by zone created with zinit(). 425 * 426 * No requirements. 427 */ 428 void 429 zdestroy(vm_zone_t z) 430 { 431 vm_page_t m; 432 int i; 433 434 if (z == NULL) 435 panic("zdestroy: null zone"); 436 if ((z->zflags & ZONE_DESTROYABLE) == 0) 437 panic("zdestroy: undestroyable zone"); 438 439 lwkt_gettoken(&vm_token); 440 LIST_REMOVE(z, zlink); 441 lwkt_reltoken(&vm_token); 442 443 /* 444 * Release virtual mappings, physical memory and update sysctl stats. 445 */ 446 if (z->zflags & ZONE_INTERRUPT) { 447 /* 448 * Pages mapped via pmap_kenter() must be removed from the 449 * kernel_pmap() before calling kmem_free() to avoid issues 450 * with kernel_pmap.pm_stats.resident_count. 451 */ 452 pmap_qremove(z->zkva, z->zpagemax); 453 vm_object_hold(z->zobj); 454 for (i = 0; i < z->zpagecount; ++i) { 455 m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd"); 456 vm_page_unwire(m, 0); 457 vm_page_free(m); 458 } 459 460 /* 461 * Free the mapping. 462 */ 463 kmem_free(&kernel_map, z->zkva, 464 (size_t)z->zpagemax * PAGE_SIZE); 465 atomic_subtract_long(&zone_kmem_kvaspace, 466 (size_t)z->zpagemax * PAGE_SIZE); 467 468 /* 469 * Free the backing object and physical pages. 470 */ 471 vm_object_deallocate(z->zobj); 472 vm_object_drop(z->zobj); 473 atomic_subtract_int(&zone_kmem_pages, z->zpagecount); 474 } else { 475 for (i = 0; i < z->zkmcur; i++) { 476 kmem_free(&kernel_map, z->zkmvec[i], 477 (size_t)z->zalloc * PAGE_SIZE); 478 atomic_subtract_int(&zone_kern_pages, z->zalloc); 479 } 480 if (z->zkmvec != NULL) 481 kfree(z->zkmvec, M_ZONE); 482 } 483 484 spin_uninit(&z->zlock); 485 kfree(z, M_ZONE); 486 } 487 488 489 /* 490 * void *zalloc(vm_zone_t zone) -- 491 * Returns an item from a specified zone. May not be called from a 492 * FAST interrupt or IPI function. 493 * 494 * void zfree(vm_zone_t zone, void *item) -- 495 * Frees an item back to a specified zone. May not be called from a 496 * FAST interrupt or IPI function. 497 */ 498 499 /* 500 * Internal zone routine. Not to be called from external (non vm_zone) code. 501 * 502 * No requirements. 503 */ 504 static void * 505 zget(vm_zone_t z) 506 { 507 int i; 508 vm_page_t m; 509 int nitems; 510 int npages; 511 int savezpc; 512 size_t nbytes; 513 size_t noffset; 514 void *item; 515 516 if (z == NULL) 517 panic("zget: null zone"); 518 519 if (z->zflags & ZONE_INTERRUPT) { 520 /* 521 * Interrupt zones do not mess with the kernel_map, they 522 * simply populate an existing mapping. 523 * 524 * First reserve the required space. 525 */ 526 vm_object_hold(z->zobj); 527 noffset = (size_t)z->zpagecount * PAGE_SIZE; 528 noffset -= noffset % z->zsize; 529 savezpc = z->zpagecount; 530 if (z->zpagecount + z->zalloc > z->zpagemax) 531 z->zpagecount = z->zpagemax; 532 else 533 z->zpagecount += z->zalloc; 534 item = (char *)z->zkva + noffset; 535 npages = z->zpagecount - savezpc; 536 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) / 537 z->zsize; 538 atomic_add_int(&zone_kmem_pages, npages); 539 540 /* 541 * Now allocate the pages. Note that we can block in the 542 * loop, so we've already done all the necessary calculations 543 * and reservations above. 544 */ 545 for (i = 0; i < npages; ++i) { 546 vm_offset_t zkva; 547 548 m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag); 549 KKASSERT(m != NULL); 550 /* note: z might be modified due to blocking */ 551 552 KKASSERT(m->queue == PQ_NONE); 553 m->valid = VM_PAGE_BITS_ALL; 554 vm_page_wire(m); 555 vm_page_wakeup(m); 556 557 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE; 558 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); 559 bzero((void *)zkva, PAGE_SIZE); 560 } 561 vm_object_drop(z->zobj); 562 } else if (z->zflags & ZONE_SPECIAL) { 563 /* 564 * The special zone is the one used for vm_map_entry_t's. 565 * We have to avoid an infinite recursion in 566 * vm_map_entry_reserve() by using vm_map_entry_kreserve() 567 * instead. The map entries are pre-reserved by the kernel 568 * by vm_map_entry_reserve_cpu_init(). 569 */ 570 nbytes = (size_t)z->zalloc * PAGE_SIZE; 571 572 item = (void *)kmem_alloc3(&kernel_map, nbytes, 573 VM_SUBSYS_ZALLOC, KM_KRESERVE); 574 575 /* note: z might be modified due to blocking */ 576 if (item != NULL) { 577 atomic_add_int(&zone_kern_pages, z->zalloc); 578 bzero(item, nbytes); 579 } else { 580 nbytes = 0; 581 } 582 nitems = nbytes / z->zsize; 583 } else { 584 /* 585 * Otherwise allocate KVA from the kernel_map. 586 */ 587 nbytes = (size_t)z->zalloc * PAGE_SIZE; 588 589 item = (void *)kmem_alloc3(&kernel_map, nbytes, 590 VM_SUBSYS_ZALLOC, 0); 591 592 /* note: z might be modified due to blocking */ 593 if (item != NULL) { 594 atomic_add_int(&zone_kern_pages, z->zalloc); 595 bzero(item, nbytes); 596 597 if (z->zflags & ZONE_DESTROYABLE) { 598 if (z->zkmcur == z->zkmmax) { 599 z->zkmmax = 600 z->zkmmax==0 ? 1 : z->zkmmax*2; 601 z->zkmvec = krealloc(z->zkmvec, 602 z->zkmmax * sizeof(z->zkmvec[0]), 603 M_ZONE, M_WAITOK); 604 } 605 z->zkmvec[z->zkmcur++] = (vm_offset_t)item; 606 } 607 } else { 608 nbytes = 0; 609 } 610 nitems = nbytes / z->zsize; 611 } 612 613 spin_lock(&z->zlock); 614 z->ztotal += nitems; 615 616 /* 617 * Save one for immediate allocation 618 */ 619 if (nitems != 0) { 620 nitems -= 1; 621 for (i = 0; i < nitems; i++) { 622 ((void **)item)[0] = z->zitems; 623 #ifdef INVARIANTS 624 ((void **)item)[1] = (void *)ZENTRY_FREE; 625 #endif 626 z->zitems = item; 627 item = (uint8_t *)item + z->zsize; 628 } 629 z->zfreecnt += nitems; 630 ++z->znalloc; 631 } else if (z->zfreecnt > 0) { 632 item = z->zitems; 633 z->zitems = ((void **)item)[0]; 634 #ifdef INVARIANTS 635 if (((void **)item)[1] != (void *)ZENTRY_FREE) 636 zerror(ZONE_ERROR_NOTFREE); 637 ((void **) item)[1] = NULL; 638 #endif 639 --z->zfreecnt; 640 ++z->znalloc; 641 } else { 642 item = NULL; 643 } 644 spin_unlock(&z->zlock); 645 646 /* 647 * A special zone may have used a kernel-reserved vm_map_entry. If 648 * so we have to be sure to recover our reserve so we don't run out. 649 * We will panic if we run out. 650 */ 651 if (z->zflags & ZONE_SPECIAL) 652 vm_map_entry_reserve(0); 653 654 return item; 655 } 656 657 /* 658 * No requirements. 659 */ 660 static int 661 sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 662 { 663 int error=0; 664 vm_zone_t curzone; 665 char tmpbuf[128]; 666 char tmpname[14]; 667 668 ksnprintf(tmpbuf, sizeof(tmpbuf), 669 "\nITEM SIZE LIMIT USED FREE REQUESTS\n"); 670 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf)); 671 if (error) 672 return (error); 673 674 lwkt_gettoken(&vm_token); 675 LIST_FOREACH(curzone, &zlist, zlink) { 676 int i; 677 int n; 678 int len; 679 int offset; 680 int freecnt; 681 int znalloc; 682 683 len = strlen(curzone->zname); 684 if (len >= (sizeof(tmpname) - 1)) 685 len = (sizeof(tmpname) - 1); 686 for(i = 0; i < sizeof(tmpname) - 1; i++) 687 tmpname[i] = ' '; 688 tmpname[i] = 0; 689 memcpy(tmpname, curzone->zname, len); 690 tmpname[len] = ':'; 691 offset = 0; 692 if (curzone == LIST_FIRST(&zlist)) { 693 offset = 1; 694 tmpbuf[0] = '\n'; 695 } 696 freecnt = curzone->zfreecnt; 697 znalloc = curzone->znalloc; 698 for (n = 0; n < ncpus; ++n) { 699 freecnt += curzone->zpcpu[n].zfreecnt; 700 znalloc += curzone->zpcpu[n].znalloc; 701 } 702 703 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset, 704 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n", 705 tmpname, curzone->zsize, curzone->zmax, 706 (curzone->ztotal - freecnt), 707 freecnt, znalloc); 708 709 len = strlen((char *)tmpbuf); 710 if (LIST_NEXT(curzone, zlink) == NULL) 711 tmpbuf[len - 1] = 0; 712 713 error = SYSCTL_OUT(req, tmpbuf, len); 714 715 if (error) 716 break; 717 } 718 lwkt_reltoken(&vm_token); 719 return (error); 720 } 721 722 #if defined(INVARIANTS) 723 724 /* 725 * Debugging only. 726 */ 727 void 728 zerror(int error) 729 { 730 char *msg; 731 732 switch (error) { 733 case ZONE_ERROR_INVALID: 734 msg = "zone: invalid zone"; 735 break; 736 case ZONE_ERROR_NOTFREE: 737 msg = "zone: entry not free"; 738 break; 739 case ZONE_ERROR_ALREADYFREE: 740 msg = "zone: freeing free entry"; 741 break; 742 default: 743 msg = "zone: invalid error"; 744 break; 745 } 746 panic("%s", msg); 747 } 748 #endif 749 750 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \ 751 NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 752 753 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages, 754 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone"); 755 SYSCTL_INT(_vm, OID_AUTO, zone_burst, 756 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache"); 757 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace, 758 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone"); 759 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages, 760 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone"); 761