1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1997, 1998 John S. Dyson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. Absolutely no warranty of function or purpose is made by the author 14 * John S. Dyson. 15 * 16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $ 17 * $DragonFly: src/sys/vm/vm_zone.c,v 1.28 2008/01/23 17:35:48 nth Exp $ 18 */ 19 20 #include <sys/param.h> 21 #include <sys/queue.h> 22 #include <sys/systm.h> 23 #include <sys/kernel.h> 24 #include <sys/lock.h> 25 #include <sys/malloc.h> 26 #include <sys/sysctl.h> 27 #include <sys/vmmeter.h> 28 29 #include <vm/vm.h> 30 #include <vm/vm_object.h> 31 #include <vm/vm_page.h> 32 #include <vm/vm_map.h> 33 #include <vm/vm_kern.h> 34 #include <vm/vm_extern.h> 35 #include <vm/vm_zone.h> 36 37 #include <sys/spinlock2.h> 38 #include <sys/mplock2.h> 39 40 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header"); 41 42 #define ZONE_ERROR_INVALID 0 43 #define ZONE_ERROR_NOTFREE 1 44 #define ZONE_ERROR_ALREADYFREE 2 45 46 #define ZONE_ROUNDING 32 47 48 #define ZENTRY_FREE 0x12342378 49 50 static void *zget(vm_zone_t z); 51 52 /* 53 * Return an item from the specified zone. This function is non-blocking for 54 * ZONE_INTERRUPT zones. 55 * 56 * No requirements. 57 */ 58 void * 59 zalloc(vm_zone_t z) 60 { 61 void *item; 62 63 #ifdef INVARIANTS 64 if (z == NULL) 65 zerror(ZONE_ERROR_INVALID); 66 #endif 67 spin_lock(&z->zlock); 68 if (z->zfreecnt > z->zfreemin) { 69 item = z->zitems; 70 #ifdef INVARIANTS 71 KASSERT(item != NULL, ("zitems unexpectedly NULL")); 72 if (((void **) item)[1] != (void *) ZENTRY_FREE) 73 zerror(ZONE_ERROR_NOTFREE); 74 ((void **) item)[1] = 0; 75 #endif 76 z->zitems = ((void **) item)[0]; 77 z->zfreecnt--; 78 z->znalloc++; 79 spin_unlock(&z->zlock); 80 } else { 81 spin_unlock(&z->zlock); 82 item = zget(z); 83 /* 84 * PANICFAIL allows the caller to assume that the zalloc() 85 * will always succeed. If it doesn't, we panic here. 86 */ 87 if (item == NULL && (z->zflags & ZONE_PANICFAIL)) 88 panic("zalloc(%s) failed", z->zname); 89 } 90 return item; 91 } 92 93 /* 94 * Free an item to the specified zone. 95 * 96 * No requirements. 97 */ 98 void 99 zfree(vm_zone_t z, void *item) 100 { 101 spin_lock(&z->zlock); 102 ((void **) item)[0] = z->zitems; 103 #ifdef INVARIANTS 104 if (((void **) item)[1] == (void *) ZENTRY_FREE) 105 zerror(ZONE_ERROR_ALREADYFREE); 106 ((void **) item)[1] = (void *) ZENTRY_FREE; 107 #endif 108 z->zitems = item; 109 z->zfreecnt++; 110 spin_unlock(&z->zlock); 111 } 112 113 /* 114 * This file comprises a very simple zone allocator. This is used 115 * in lieu of the malloc allocator, where needed or more optimal. 116 * 117 * Note that the initial implementation of this had coloring, and 118 * absolutely no improvement (actually perf degradation) occurred. 119 * 120 * Note also that the zones are type stable. The only restriction is 121 * that the first two longwords of a data structure can be changed 122 * between allocations. Any data that must be stable between allocations 123 * must reside in areas after the first two longwords. 124 * 125 * zinitna, zinit, zbootinit are the initialization routines. 126 * zalloc, zfree, are the allocation/free routines. 127 */ 128 129 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist); 130 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 131 static int zone_kmem_pages, zone_kern_pages, zone_kmem_kvaspace; 132 133 /* 134 * Create a zone, but don't allocate the zone structure. If the 135 * zone had been previously created by the zone boot code, initialize 136 * various parts of the zone code. 137 * 138 * If waits are not allowed during allocation (e.g. during interrupt 139 * code), a-priori allocate the kernel virtual space, and allocate 140 * only pages when needed. 141 * 142 * Arguments: 143 * z pointer to zone structure. 144 * obj pointer to VM object (opt). 145 * name name of zone. 146 * size size of zone entries. 147 * nentries number of zone entries allocated (only ZONE_INTERRUPT.) 148 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time. 149 * zalloc number of pages allocated when memory is needed. 150 * 151 * Note that when using ZONE_INTERRUPT, the size of the zone is limited 152 * by the nentries argument. The size of the memory allocatable is 153 * unlimited if ZONE_INTERRUPT is not set. 154 * 155 * No requirements. 156 */ 157 int 158 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size, 159 int nentries, int flags, int zalloc) 160 { 161 int totsize; 162 163 /* 164 * Only zones created with zinit() are destroyable. 165 */ 166 if (z->zflags & ZONE_DESTROYABLE) 167 panic("zinitna: can't create destroyable zone"); 168 169 /* 170 * NOTE: We can only adjust zsize if we previously did not 171 * use zbootinit(). 172 */ 173 if ((z->zflags & ZONE_BOOT) == 0) { 174 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1); 175 spin_init(&z->zlock); 176 z->zfreecnt = 0; 177 z->ztotal = 0; 178 z->zmax = 0; 179 z->zname = name; 180 z->znalloc = 0; 181 z->zitems = NULL; 182 183 lwkt_gettoken(&vm_token); 184 LIST_INSERT_HEAD(&zlist, z, zlink); 185 lwkt_reltoken(&vm_token); 186 } 187 188 z->zkmvec = NULL; 189 z->zkmcur = z->zkmmax = 0; 190 z->zflags |= flags; 191 192 /* 193 * If we cannot wait, allocate KVA space up front, and we will fill 194 * in pages as needed. This is particularly required when creating 195 * an allocation space for map entries in kernel_map, because we 196 * do not want to go into a recursion deadlock with 197 * vm_map_entry_reserve(). 198 */ 199 if (z->zflags & ZONE_INTERRUPT) { 200 totsize = round_page(z->zsize * nentries); 201 zone_kmem_kvaspace += totsize; 202 203 z->zkva = kmem_alloc_pageable(&kernel_map, totsize); 204 if (z->zkva == 0) { 205 LIST_REMOVE(z, zlink); 206 return 0; 207 } 208 209 z->zpagemax = totsize / PAGE_SIZE; 210 if (obj == NULL) { 211 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax); 212 } else { 213 z->zobj = obj; 214 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj); 215 } 216 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT; 217 z->zmax += nentries; 218 } else { 219 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM; 220 z->zmax = 0; 221 } 222 223 224 if (z->zsize > PAGE_SIZE) 225 z->zfreemin = 1; 226 else 227 z->zfreemin = PAGE_SIZE / z->zsize; 228 229 z->zpagecount = 0; 230 if (zalloc) 231 z->zalloc = zalloc; 232 else 233 z->zalloc = 1; 234 235 /* 236 * Populate the interrrupt zone at creation time rather than 237 * on first allocation, as this is a potentially long operation. 238 */ 239 if (z->zflags & ZONE_INTERRUPT) { 240 void *buf; 241 242 buf = zget(z); 243 zfree(z, buf); 244 } 245 246 return 1; 247 } 248 249 /* 250 * Subroutine same as zinitna, except zone data structure is allocated 251 * automatically by malloc. This routine should normally be used, except 252 * in certain tricky startup conditions in the VM system -- then 253 * zbootinit and zinitna can be used. Zinit is the standard zone 254 * initialization call. 255 * 256 * No requirements. 257 */ 258 vm_zone_t 259 zinit(char *name, int size, int nentries, int flags, int zalloc) 260 { 261 vm_zone_t z; 262 263 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT); 264 if (z == NULL) 265 return NULL; 266 267 z->zflags = 0; 268 if (zinitna(z, NULL, name, size, nentries, 269 flags & ~ZONE_DESTROYABLE, zalloc) == 0) { 270 kfree(z, M_ZONE); 271 return NULL; 272 } 273 274 if (flags & ZONE_DESTROYABLE) 275 z->zflags |= ZONE_DESTROYABLE; 276 277 return z; 278 } 279 280 /* 281 * Initialize a zone before the system is fully up. This routine should 282 * only be called before full VM startup. 283 * 284 * Called from the low level boot code only. 285 */ 286 void 287 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) 288 { 289 int i; 290 291 z->zname = name; 292 z->zsize = size; 293 z->zpagemax = 0; 294 z->zobj = NULL; 295 z->zflags = ZONE_BOOT; 296 z->zfreemin = 0; 297 z->zallocflag = 0; 298 z->zpagecount = 0; 299 z->zalloc = 0; 300 z->znalloc = 0; 301 spin_init(&z->zlock); 302 303 bzero(item, nitems * z->zsize); 304 z->zitems = NULL; 305 for (i = 0; i < nitems; i++) { 306 ((void **) item)[0] = z->zitems; 307 #ifdef INVARIANTS 308 ((void **) item)[1] = (void *) ZENTRY_FREE; 309 #endif 310 z->zitems = item; 311 item = (uint8_t *)item + z->zsize; 312 } 313 z->zfreecnt = nitems; 314 z->zmax = nitems; 315 z->ztotal = nitems; 316 317 lwkt_gettoken(&vm_token); 318 LIST_INSERT_HEAD(&zlist, z, zlink); 319 lwkt_reltoken(&vm_token); 320 } 321 322 /* 323 * Release all resources owned by zone created with zinit(). 324 * 325 * No requirements. 326 */ 327 void 328 zdestroy(vm_zone_t z) 329 { 330 int i; 331 332 if (z == NULL) 333 panic("zdestroy: null zone"); 334 if ((z->zflags & ZONE_DESTROYABLE) == 0) 335 panic("zdestroy: undestroyable zone"); 336 337 lwkt_gettoken(&vm_token); 338 LIST_REMOVE(z, zlink); 339 lwkt_reltoken(&vm_token); 340 341 /* 342 * Release virtual mappings, physical memory and update sysctl stats. 343 */ 344 if (z->zflags & ZONE_INTERRUPT) { 345 /* 346 * Pages mapped via pmap_kenter() must be removed from the 347 * kernel_pmap() before calling kmem_free() to avoid issues 348 * with kernel_pmap.pm_stats.resident_count. 349 */ 350 pmap_qremove(z->zkva, z->zpagemax); 351 352 /* 353 * Free the mapping. 354 */ 355 kmem_free(&kernel_map, z->zkva, z->zpagemax*PAGE_SIZE); 356 atomic_subtract_int(&zone_kmem_kvaspace, z->zpagemax*PAGE_SIZE); 357 358 /* 359 * Free the backing object and physical pages. 360 */ 361 vm_object_deallocate(z->zobj); 362 atomic_subtract_int(&zone_kmem_pages, z->zpagecount); 363 } else { 364 for (i=0; i < z->zkmcur; i++) { 365 kmem_free(&kernel_map, z->zkmvec[i], 366 z->zalloc*PAGE_SIZE); 367 atomic_subtract_int(&zone_kern_pages, z->zalloc); 368 } 369 if (z->zkmvec != NULL) 370 kfree(z->zkmvec, M_ZONE); 371 } 372 373 spin_uninit(&z->zlock); 374 kfree(z, M_ZONE); 375 } 376 377 378 /* 379 * void *zalloc(vm_zone_t zone) -- 380 * Returns an item from a specified zone. May not be called from a 381 * FAST interrupt or IPI function. 382 * 383 * void zfree(vm_zone_t zone, void *item) -- 384 * Frees an item back to a specified zone. May not be called from a 385 * FAST interrupt or IPI function. 386 */ 387 388 /* 389 * Internal zone routine. Not to be called from external (non vm_zone) code. 390 * 391 * No requirements. 392 */ 393 static void * 394 zget(vm_zone_t z) 395 { 396 int i; 397 vm_page_t m; 398 int nitems, nbytes; 399 int savezpc; 400 void *item; 401 402 if (z == NULL) 403 panic("zget: null zone"); 404 405 if (z->zflags & ZONE_INTERRUPT) { 406 /* 407 * Interrupt zones do not mess with the kernel_map, they 408 * simply populate an existing mapping. 409 */ 410 lwkt_gettoken(&vm_token); 411 savezpc = z->zpagecount; 412 nbytes = z->zpagecount * PAGE_SIZE; 413 nbytes -= nbytes % z->zsize; 414 item = (char *) z->zkva + nbytes; 415 for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax)); 416 i++) { 417 vm_offset_t zkva; 418 419 m = vm_page_alloc(z->zobj, z->zpagecount, 420 z->zallocflag); 421 /* note: z might be modified due to blocking */ 422 if (m == NULL) 423 break; 424 425 /* 426 * Unbusy page so it can freed in zdestroy(). Make 427 * sure it is not on any queue and so can not be 428 * recycled under our feet. 429 */ 430 KKASSERT(m->queue == PQ_NONE); 431 vm_page_flag_clear(m, PG_BUSY); 432 433 zkva = z->zkva + z->zpagecount * PAGE_SIZE; 434 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); /* YYY */ 435 bzero((void *)zkva, PAGE_SIZE); 436 KKASSERT(savezpc == z->zpagecount); 437 ++savezpc; 438 z->zpagecount++; 439 zone_kmem_pages++; 440 vmstats.v_wire_count++; 441 } 442 nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize; 443 lwkt_reltoken(&vm_token); 444 } else if (z->zflags & ZONE_SPECIAL) { 445 /* 446 * The special zone is the one used for vm_map_entry_t's. 447 * We have to avoid an infinite recursion in 448 * vm_map_entry_reserve() by using vm_map_entry_kreserve() 449 * instead. The map entries are pre-reserved by the kernel 450 * by vm_map_entry_reserve_cpu_init(). 451 */ 452 nbytes = z->zalloc * PAGE_SIZE; 453 454 item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE); 455 456 /* note: z might be modified due to blocking */ 457 if (item != NULL) { 458 zone_kern_pages += z->zalloc; /* not MP-safe XXX */ 459 bzero(item, nbytes); 460 } else { 461 nbytes = 0; 462 } 463 nitems = nbytes / z->zsize; 464 } else { 465 /* 466 * Otherwise allocate KVA from the kernel_map. 467 */ 468 nbytes = z->zalloc * PAGE_SIZE; 469 470 item = (void *)kmem_alloc3(&kernel_map, nbytes, 0); 471 472 /* note: z might be modified due to blocking */ 473 if (item != NULL) { 474 zone_kern_pages += z->zalloc; /* not MP-safe XXX */ 475 bzero(item, nbytes); 476 477 if (z->zflags & ZONE_DESTROYABLE) { 478 if (z->zkmcur == z->zkmmax) { 479 z->zkmmax = 480 z->zkmmax==0 ? 1 : z->zkmmax*2; 481 z->zkmvec = krealloc(z->zkmvec, 482 z->zkmmax * sizeof(z->zkmvec[0]), 483 M_ZONE, M_WAITOK); 484 } 485 z->zkmvec[z->zkmcur++] = (vm_offset_t)item; 486 } 487 } else { 488 nbytes = 0; 489 } 490 nitems = nbytes / z->zsize; 491 } 492 493 spin_lock(&z->zlock); 494 z->ztotal += nitems; 495 /* 496 * Save one for immediate allocation 497 */ 498 if (nitems != 0) { 499 nitems -= 1; 500 for (i = 0; i < nitems; i++) { 501 ((void **) item)[0] = z->zitems; 502 #ifdef INVARIANTS 503 ((void **) item)[1] = (void *) ZENTRY_FREE; 504 #endif 505 z->zitems = item; 506 item = (uint8_t *)item + z->zsize; 507 } 508 z->zfreecnt += nitems; 509 z->znalloc++; 510 } else if (z->zfreecnt > 0) { 511 item = z->zitems; 512 z->zitems = ((void **) item)[0]; 513 #ifdef INVARIANTS 514 if (((void **) item)[1] != (void *) ZENTRY_FREE) 515 zerror(ZONE_ERROR_NOTFREE); 516 ((void **) item)[1] = 0; 517 #endif 518 z->zfreecnt--; 519 z->znalloc++; 520 } else { 521 item = NULL; 522 } 523 spin_unlock(&z->zlock); 524 525 /* 526 * A special zone may have used a kernel-reserved vm_map_entry. If 527 * so we have to be sure to recover our reserve so we don't run out. 528 * We will panic if we run out. 529 */ 530 if (z->zflags & ZONE_SPECIAL) 531 vm_map_entry_reserve(0); 532 533 return item; 534 } 535 536 /* 537 * No requirements. 538 */ 539 static int 540 sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 541 { 542 int error=0; 543 vm_zone_t curzone; 544 char tmpbuf[128]; 545 char tmpname[14]; 546 547 ksnprintf(tmpbuf, sizeof(tmpbuf), 548 "\nITEM SIZE LIMIT USED FREE REQUESTS\n"); 549 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf)); 550 if (error) 551 return (error); 552 553 lwkt_gettoken(&vm_token); 554 LIST_FOREACH(curzone, &zlist, zlink) { 555 int i; 556 int len; 557 int offset; 558 559 len = strlen(curzone->zname); 560 if (len >= (sizeof(tmpname) - 1)) 561 len = (sizeof(tmpname) - 1); 562 for(i = 0; i < sizeof(tmpname) - 1; i++) 563 tmpname[i] = ' '; 564 tmpname[i] = 0; 565 memcpy(tmpname, curzone->zname, len); 566 tmpname[len] = ':'; 567 offset = 0; 568 if (curzone == LIST_FIRST(&zlist)) { 569 offset = 1; 570 tmpbuf[0] = '\n'; 571 } 572 573 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset, 574 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n", 575 tmpname, curzone->zsize, curzone->zmax, 576 (curzone->ztotal - curzone->zfreecnt), 577 curzone->zfreecnt, curzone->znalloc); 578 579 len = strlen((char *)tmpbuf); 580 if (LIST_NEXT(curzone, zlink) == NULL) 581 tmpbuf[len - 1] = 0; 582 583 error = SYSCTL_OUT(req, tmpbuf, len); 584 585 if (error) 586 break; 587 } 588 lwkt_reltoken(&vm_token); 589 return (error); 590 } 591 592 #if defined(INVARIANTS) 593 594 /* 595 * Debugging only. 596 */ 597 void 598 zerror(int error) 599 { 600 char *msg; 601 602 switch (error) { 603 case ZONE_ERROR_INVALID: 604 msg = "zone: invalid zone"; 605 break; 606 case ZONE_ERROR_NOTFREE: 607 msg = "zone: entry not free"; 608 break; 609 case ZONE_ERROR_ALREADYFREE: 610 msg = "zone: freeing free entry"; 611 break; 612 default: 613 msg = "zone: invalid error"; 614 break; 615 } 616 panic(msg); 617 } 618 #endif 619 620 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \ 621 NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 622 623 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages, 624 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone"); 625 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace, 626 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone"); 627 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages, 628 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone"); 629