1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Copyright (c) 1994 John S. Dyson 37 * Copyright (c) 1990 University of Utah. 38 * Copyright (c) 1991, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * New Swap System 70 * Matthew Dillon 71 * 72 * Radix Bitmap 'blists'. 73 * 74 * - The new swapper uses the new radix bitmap code. This should scale 75 * to arbitrarily small or arbitrarily large swap spaces and an almost 76 * arbitrary degree of fragmentation. 77 * 78 * Features: 79 * 80 * - on the fly reallocation of swap during putpages. The new system 81 * does not try to keep previously allocated swap blocks for dirty 82 * pages. 83 * 84 * - on the fly deallocation of swap 85 * 86 * - No more garbage collection required. Unnecessarily allocated swap 87 * blocks only exist for dirty vm_page_t's now and these are already 88 * cycled (in a high-load system) by the pager. We also do on-the-fly 89 * removal of invalidated swap blocks when a page is destroyed 90 * or renamed. 91 * 92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 95 */ 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/conf.h> 100 #include <sys/kernel.h> 101 #include <sys/proc.h> 102 #include <sys/buf.h> 103 #include <sys/vnode.h> 104 #include <sys/malloc.h> 105 #include <sys/vmmeter.h> 106 #include <sys/sysctl.h> 107 #include <sys/blist.h> 108 #include <sys/lock.h> 109 #include <sys/thread2.h> 110 111 #include "opt_swap.h" 112 #include <vm/vm.h> 113 #include <vm/vm_object.h> 114 #include <vm/vm_page.h> 115 #include <vm/vm_pager.h> 116 #include <vm/vm_pageout.h> 117 #include <vm/swap_pager.h> 118 #include <vm/vm_extern.h> 119 #include <vm/vm_zone.h> 120 #include <vm/vnode_pager.h> 121 122 #include <sys/buf2.h> 123 #include <vm/vm_page2.h> 124 125 #ifndef MAX_PAGEOUT_CLUSTER 126 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES 127 #endif 128 129 #define SWM_FREE 0x02 /* free, period */ 130 #define SWM_POP 0x04 /* pop out */ 131 132 #define SWBIO_READ 0x01 133 #define SWBIO_WRITE 0x02 134 #define SWBIO_SYNC 0x04 135 136 struct swfreeinfo { 137 vm_object_t object; 138 vm_pindex_t basei; 139 vm_pindex_t begi; 140 vm_pindex_t endi; /* inclusive */ 141 }; 142 143 struct swswapoffinfo { 144 vm_object_t object; 145 int devidx; 146 int shared; 147 }; 148 149 /* 150 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 151 * in the old system. 152 */ 153 154 int swap_pager_full; /* swap space exhaustion (task killing) */ 155 int swap_fail_ticks; /* when we became exhausted */ 156 int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 157 int vm_swap_cache_use; 158 int vm_swap_anon_use; 159 static int vm_report_swap_allocs; 160 161 static int nsw_rcount; /* free read buffers */ 162 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 163 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 164 static int nsw_wcount_async_max;/* assigned maximum */ 165 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 166 167 struct blist *swapblist; 168 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 169 static int swap_burst_read = 0; /* allow burst reading */ 170 static swblk_t swapiterator; /* linearize allocations */ 171 172 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin"); 173 174 /* from vm_swap.c */ 175 extern struct vnode *swapdev_vp; 176 extern struct swdevt *swdevt; 177 extern int nswdev; 178 179 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0) 180 181 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 182 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 183 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read, 184 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins"); 185 186 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use, 187 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 188 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use, 189 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 190 SYSCTL_INT(_vm, OID_AUTO, swap_size, 191 CTLFLAG_RD, &vm_swap_size, 0, ""); 192 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs, 193 CTLFLAG_RW, &vm_report_swap_allocs, 0, ""); 194 195 vm_zone_t swap_zone; 196 197 /* 198 * Red-Black tree for swblock entries 199 * 200 * The caller must hold vm_token 201 */ 202 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare, 203 vm_pindex_t, swb_index); 204 205 int 206 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2) 207 { 208 if (swb1->swb_index < swb2->swb_index) 209 return(-1); 210 if (swb1->swb_index > swb2->swb_index) 211 return(1); 212 return(0); 213 } 214 215 static 216 int 217 rb_swblock_scancmp(struct swblock *swb, void *data) 218 { 219 struct swfreeinfo *info = data; 220 221 if (swb->swb_index < info->basei) 222 return(-1); 223 if (swb->swb_index > info->endi) 224 return(1); 225 return(0); 226 } 227 228 static 229 int 230 rb_swblock_condcmp(struct swblock *swb, void *data) 231 { 232 struct swfreeinfo *info = data; 233 234 if (swb->swb_index < info->basei) 235 return(-1); 236 return(0); 237 } 238 239 /* 240 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 241 * calls hooked from other parts of the VM system and do not appear here. 242 * (see vm/swap_pager.h). 243 */ 244 245 static void swap_pager_dealloc (vm_object_t object); 246 static int swap_pager_getpage (vm_object_t, vm_page_t *, int); 247 static void swap_chain_iodone(struct bio *biox); 248 249 struct pagerops swappagerops = { 250 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 251 swap_pager_getpage, /* pagein */ 252 swap_pager_putpages, /* pageout */ 253 swap_pager_haspage /* get backing store status for page */ 254 }; 255 256 /* 257 * dmmax is in page-sized chunks with the new swap system. It was 258 * dev-bsized chunks in the old. dmmax is always a power of 2. 259 * 260 * swap_*() routines are externally accessible. swp_*() routines are 261 * internal. 262 */ 263 264 int dmmax; 265 static int dmmax_mask; 266 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 267 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 268 269 static __inline void swp_sizecheck (void); 270 static void swp_pager_async_iodone (struct bio *bio); 271 272 /* 273 * Swap bitmap functions 274 */ 275 276 static __inline void swp_pager_freeswapspace(vm_object_t object, 277 swblk_t blk, int npages); 278 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages); 279 280 /* 281 * Metadata functions 282 */ 283 284 static void swp_pager_meta_convert(vm_object_t); 285 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t); 286 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 287 static void swp_pager_meta_free_all(vm_object_t); 288 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 289 290 /* 291 * SWP_SIZECHECK() - update swap_pager_full indication 292 * 293 * update the swap_pager_almost_full indication and warn when we are 294 * about to run out of swap space, using lowat/hiwat hysteresis. 295 * 296 * Clear swap_pager_full ( task killing ) indication when lowat is met. 297 * 298 * No restrictions on call 299 * This routine may not block. 300 * SMP races are ok. 301 */ 302 static __inline void 303 swp_sizecheck(void) 304 { 305 if (vm_swap_size < nswap_lowat) { 306 if (swap_pager_almost_full == 0) { 307 kprintf("swap_pager: out of swap space\n"); 308 swap_pager_almost_full = 1; 309 swap_fail_ticks = ticks; 310 } 311 } else { 312 swap_pager_full = 0; 313 if (vm_swap_size > nswap_hiwat) 314 swap_pager_almost_full = 0; 315 } 316 } 317 318 /* 319 * SWAP_PAGER_INIT() - initialize the swap pager! 320 * 321 * Expected to be started from system init. NOTE: This code is run 322 * before much else so be careful what you depend on. Most of the VM 323 * system has yet to be initialized at this point. 324 * 325 * Called from the low level boot code only. 326 */ 327 static void 328 swap_pager_init(void *arg __unused) 329 { 330 /* 331 * Device Stripe, in PAGE_SIZE'd blocks 332 */ 333 dmmax = SWB_NPAGES * 2; 334 dmmax_mask = ~(dmmax - 1); 335 } 336 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL); 337 338 /* 339 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 340 * 341 * Expected to be started from pageout process once, prior to entering 342 * its main loop. 343 * 344 * Called from the low level boot code only. 345 */ 346 void 347 swap_pager_swap_init(void) 348 { 349 int n, n2; 350 351 /* 352 * Number of in-transit swap bp operations. Don't 353 * exhaust the pbufs completely. Make sure we 354 * initialize workable values (0 will work for hysteresis 355 * but it isn't very efficient). 356 * 357 * The nsw_cluster_max is constrained by the number of pages an XIO 358 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 359 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 360 * constrained by the swap device interleave stripe size. 361 * 362 * Currently we hardwire nsw_wcount_async to 4. This limit is 363 * designed to prevent other I/O from having high latencies due to 364 * our pageout I/O. The value 4 works well for one or two active swap 365 * devices but is probably a little low if you have more. Even so, 366 * a higher value would probably generate only a limited improvement 367 * with three or four active swap devices since the system does not 368 * typically have to pageout at extreme bandwidths. We will want 369 * at least 2 per swap devices, and 4 is a pretty good value if you 370 * have one NFS swap device due to the command/ack latency over NFS. 371 * So it all works out pretty well. 372 */ 373 374 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 375 376 nsw_rcount = (nswbuf_kva + 1) / 2; 377 nsw_wcount_sync = (nswbuf_kva + 3) / 4; 378 nsw_wcount_async = 4; 379 nsw_wcount_async_max = nsw_wcount_async; 380 381 /* 382 * The zone is dynamically allocated so generally size it to 383 * maxswzone (32MB to 512MB of KVM). Set a minimum size based 384 * on physical memory of around 8x (each swblock can hold 16 pages). 385 * 386 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio 387 * has increased dramatically. 388 */ 389 n = vmstats.v_page_count / 2; 390 if (maxswzone && n < maxswzone / sizeof(struct swblock)) 391 n = maxswzone / sizeof(struct swblock); 392 n2 = n; 393 394 do { 395 swap_zone = zinit( 396 "SWAPMETA", 397 sizeof(struct swblock), 398 n, 399 ZONE_INTERRUPT); 400 if (swap_zone != NULL) 401 break; 402 /* 403 * if the allocation failed, try a zone two thirds the 404 * size of the previous attempt. 405 */ 406 n -= ((n + 2) / 3); 407 } while (n > 0); 408 409 if (swap_zone == NULL) 410 panic("swap_pager_swap_init: swap_zone == NULL"); 411 if (n2 != n) 412 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 413 } 414 415 /* 416 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 417 * its metadata structures. 418 * 419 * This routine is called from the mmap and fork code to create a new 420 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 421 * and then converting it with swp_pager_meta_convert(). 422 * 423 * We only support unnamed objects. 424 * 425 * No restrictions. 426 */ 427 vm_object_t 428 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 429 { 430 vm_object_t object; 431 432 KKASSERT(handle == NULL); 433 object = vm_object_allocate_hold(OBJT_DEFAULT, 434 OFF_TO_IDX(offset + PAGE_MASK + size)); 435 swp_pager_meta_convert(object); 436 vm_object_drop(object); 437 438 return (object); 439 } 440 441 /* 442 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 443 * 444 * The swap backing for the object is destroyed. The code is 445 * designed such that we can reinstantiate it later, but this 446 * routine is typically called only when the entire object is 447 * about to be destroyed. 448 * 449 * The object must be locked or unreferenceable. 450 * No other requirements. 451 */ 452 static void 453 swap_pager_dealloc(vm_object_t object) 454 { 455 vm_object_hold(object); 456 vm_object_pip_wait(object, "swpdea"); 457 458 /* 459 * Free all remaining metadata. We only bother to free it from 460 * the swap meta data. We do not attempt to free swapblk's still 461 * associated with vm_page_t's for this object. We do not care 462 * if paging is still in progress on some objects. 463 */ 464 swp_pager_meta_free_all(object); 465 vm_object_drop(object); 466 } 467 468 /************************************************************************ 469 * SWAP PAGER BITMAP ROUTINES * 470 ************************************************************************/ 471 472 /* 473 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 474 * 475 * Allocate swap for the requested number of pages. The starting 476 * swap block number (a page index) is returned or SWAPBLK_NONE 477 * if the allocation failed. 478 * 479 * Also has the side effect of advising that somebody made a mistake 480 * when they configured swap and didn't configure enough. 481 * 482 * The caller must hold the object. 483 * This routine may not block. 484 */ 485 static __inline swblk_t 486 swp_pager_getswapspace(vm_object_t object, int npages) 487 { 488 swblk_t blk; 489 490 lwkt_gettoken(&vm_token); 491 blk = blist_allocat(swapblist, npages, swapiterator); 492 if (blk == SWAPBLK_NONE) 493 blk = blist_allocat(swapblist, npages, 0); 494 if (blk == SWAPBLK_NONE) { 495 if (swap_pager_full != 2) { 496 if (vm_swap_max == 0) 497 kprintf("Warning: The system would like to " 498 "page to swap but no swap space " 499 "is configured!\n"); 500 else 501 kprintf("swap_pager_getswapspace: " 502 "swap full allocating %d pages\n", 503 npages); 504 swap_pager_full = 2; 505 if (swap_pager_almost_full == 0) 506 swap_fail_ticks = ticks; 507 swap_pager_almost_full = 1; 508 } 509 } else { 510 /* swapiterator = blk; disable for now, doesn't work well */ 511 swapacctspace(blk, -npages); 512 if (object->type == OBJT_SWAP) 513 vm_swap_anon_use += npages; 514 else 515 vm_swap_cache_use += npages; 516 swp_sizecheck(); 517 } 518 lwkt_reltoken(&vm_token); 519 return(blk); 520 } 521 522 /* 523 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 524 * 525 * This routine returns the specified swap blocks back to the bitmap. 526 * 527 * Note: This routine may not block (it could in the old swap code), 528 * and through the use of the new blist routines it does not block. 529 * 530 * We must be called at splvm() to avoid races with bitmap frees from 531 * vm_page_remove() aka swap_pager_page_removed(). 532 * 533 * This routine may not block. 534 */ 535 536 static __inline void 537 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages) 538 { 539 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 540 541 lwkt_gettoken(&vm_token); 542 sp->sw_nused -= npages; 543 if (object->type == OBJT_SWAP) 544 vm_swap_anon_use -= npages; 545 else 546 vm_swap_cache_use -= npages; 547 548 if (sp->sw_flags & SW_CLOSING) { 549 lwkt_reltoken(&vm_token); 550 return; 551 } 552 553 blist_free(swapblist, blk, npages); 554 vm_swap_size += npages; 555 swp_sizecheck(); 556 lwkt_reltoken(&vm_token); 557 } 558 559 /* 560 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 561 * range within an object. 562 * 563 * This is a globally accessible routine. 564 * 565 * This routine removes swapblk assignments from swap metadata. 566 * 567 * The external callers of this routine typically have already destroyed 568 * or renamed vm_page_t's associated with this range in the object so 569 * we should be ok. 570 * 571 * No requirements. 572 */ 573 void 574 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size) 575 { 576 vm_object_hold(object); 577 swp_pager_meta_free(object, start, size); 578 vm_object_drop(object); 579 } 580 581 /* 582 * No requirements. 583 */ 584 void 585 swap_pager_freespace_all(vm_object_t object) 586 { 587 vm_object_hold(object); 588 swp_pager_meta_free_all(object); 589 vm_object_drop(object); 590 } 591 592 /* 593 * This function conditionally frees swap cache swap starting at 594 * (*basei) in the object. (count) swap blocks will be nominally freed. 595 * The actual number of blocks freed can be more or less than the 596 * requested number. 597 * 598 * This function nominally returns the number of blocks freed. However, 599 * the actual number of blocks freed may be less then the returned value. 600 * If the function is unable to exhaust the object or if it is able to 601 * free (approximately) the requested number of blocks it returns 602 * a value n > count. 603 * 604 * If we exhaust the object we will return a value n <= count. 605 * 606 * The caller must hold the object. 607 * 608 * WARNING! If count == 0 then -1 can be returned as a degenerate case, 609 * callers should always pass a count value > 0. 610 */ 611 static int swap_pager_condfree_callback(struct swblock *swap, void *data); 612 613 int 614 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count) 615 { 616 struct swfreeinfo info; 617 int n; 618 int t; 619 620 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 621 622 info.object = object; 623 info.basei = *basei; /* skip up to this page index */ 624 info.begi = count; /* max swap pages to destroy */ 625 info.endi = count * 8; /* max swblocks to scan */ 626 627 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp, 628 swap_pager_condfree_callback, &info); 629 *basei = info.basei; 630 631 /* 632 * Take the higher difference swblocks vs pages 633 */ 634 n = count - (int)info.begi; 635 t = count * 8 - (int)info.endi; 636 if (n < t) 637 n = t; 638 if (n < 1) 639 n = 1; 640 return(n); 641 } 642 643 /* 644 * The idea is to free whole meta-block to avoid fragmenting 645 * the swap space or disk I/O. We only do this if NO VM pages 646 * are present. 647 * 648 * We do not have to deal with clearing PG_SWAPPED in related VM 649 * pages because there are no related VM pages. 650 * 651 * The caller must hold the object. 652 */ 653 static int 654 swap_pager_condfree_callback(struct swblock *swap, void *data) 655 { 656 struct swfreeinfo *info = data; 657 vm_object_t object = info->object; 658 int i; 659 660 for (i = 0; i < SWAP_META_PAGES; ++i) { 661 if (vm_page_lookup(object, swap->swb_index + i)) 662 break; 663 } 664 info->basei = swap->swb_index + SWAP_META_PAGES; 665 if (i == SWAP_META_PAGES) { 666 info->begi -= swap->swb_count; 667 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES); 668 } 669 --info->endi; 670 if ((int)info->begi < 0 || (int)info->endi < 0) 671 return(-1); 672 lwkt_yield(); 673 return(0); 674 } 675 676 /* 677 * Called by vm_page_alloc() when a new VM page is inserted 678 * into a VM object. Checks whether swap has been assigned to 679 * the page and sets PG_SWAPPED as necessary. 680 * 681 * No requirements. 682 */ 683 void 684 swap_pager_page_inserted(vm_page_t m) 685 { 686 if (m->object->swblock_count) { 687 vm_object_hold(m->object); 688 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE) 689 vm_page_flag_set(m, PG_SWAPPED); 690 vm_object_drop(m->object); 691 } 692 } 693 694 /* 695 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 696 * 697 * Assigns swap blocks to the specified range within the object. The 698 * swap blocks are not zerod. Any previous swap assignment is destroyed. 699 * 700 * Returns 0 on success, -1 on failure. 701 * 702 * The caller is responsible for avoiding races in the specified range. 703 * No other requirements. 704 */ 705 int 706 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 707 { 708 int n = 0; 709 swblk_t blk = SWAPBLK_NONE; 710 vm_pindex_t beg = start; /* save start index */ 711 712 vm_object_hold(object); 713 714 while (size) { 715 if (n == 0) { 716 n = BLIST_MAX_ALLOC; 717 while ((blk = swp_pager_getswapspace(object, n)) == 718 SWAPBLK_NONE) 719 { 720 n >>= 1; 721 if (n == 0) { 722 swp_pager_meta_free(object, beg, 723 start - beg); 724 vm_object_drop(object); 725 return(-1); 726 } 727 } 728 } 729 swp_pager_meta_build(object, start, blk); 730 --size; 731 ++start; 732 ++blk; 733 --n; 734 } 735 swp_pager_meta_free(object, start, n); 736 vm_object_drop(object); 737 return(0); 738 } 739 740 /* 741 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 742 * and destroy the source. 743 * 744 * Copy any valid swapblks from the source to the destination. In 745 * cases where both the source and destination have a valid swapblk, 746 * we keep the destination's. 747 * 748 * This routine is allowed to block. It may block allocating metadata 749 * indirectly through swp_pager_meta_build() or if paging is still in 750 * progress on the source. 751 * 752 * XXX vm_page_collapse() kinda expects us not to block because we 753 * supposedly do not need to allocate memory, but for the moment we 754 * *may* have to get a little memory from the zone allocator, but 755 * it is taken from the interrupt memory. We should be ok. 756 * 757 * The source object contains no vm_page_t's (which is just as well) 758 * The source object is of type OBJT_SWAP. 759 * 760 * The source and destination objects must be held by the caller. 761 */ 762 void 763 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 764 vm_pindex_t base_index, int destroysource) 765 { 766 vm_pindex_t i; 767 768 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject)); 769 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject)); 770 771 /* 772 * transfer source to destination. 773 */ 774 for (i = 0; i < dstobject->size; ++i) { 775 swblk_t dstaddr; 776 777 /* 778 * Locate (without changing) the swapblk on the destination, 779 * unless it is invalid in which case free it silently, or 780 * if the destination is a resident page, in which case the 781 * source is thrown away. 782 */ 783 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 784 785 if (dstaddr == SWAPBLK_NONE) { 786 /* 787 * Destination has no swapblk and is not resident, 788 * copy source. 789 */ 790 swblk_t srcaddr; 791 792 srcaddr = swp_pager_meta_ctl(srcobject, 793 base_index + i, SWM_POP); 794 795 if (srcaddr != SWAPBLK_NONE) 796 swp_pager_meta_build(dstobject, i, srcaddr); 797 } else { 798 /* 799 * Destination has valid swapblk or it is represented 800 * by a resident page. We destroy the sourceblock. 801 */ 802 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE); 803 } 804 } 805 806 /* 807 * Free left over swap blocks in source. 808 * 809 * We have to revert the type to OBJT_DEFAULT so we do not accidently 810 * double-remove the object from the swap queues. 811 */ 812 if (destroysource) { 813 /* 814 * Reverting the type is not necessary, the caller is going 815 * to destroy srcobject directly, but I'm doing it here 816 * for consistency since we've removed the object from its 817 * queues. 818 */ 819 swp_pager_meta_free_all(srcobject); 820 if (srcobject->type == OBJT_SWAP) 821 srcobject->type = OBJT_DEFAULT; 822 } 823 } 824 825 /* 826 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 827 * the requested page. 828 * 829 * We determine whether good backing store exists for the requested 830 * page and return TRUE if it does, FALSE if it doesn't. 831 * 832 * If TRUE, we also try to determine how much valid, contiguous backing 833 * store exists before and after the requested page within a reasonable 834 * distance. We do not try to restrict it to the swap device stripe 835 * (that is handled in getpages/putpages). It probably isn't worth 836 * doing here. 837 * 838 * No requirements. 839 */ 840 boolean_t 841 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex) 842 { 843 swblk_t blk0; 844 845 /* 846 * do we have good backing store at the requested index ? 847 */ 848 vm_object_hold(object); 849 blk0 = swp_pager_meta_ctl(object, pindex, 0); 850 851 if (blk0 == SWAPBLK_NONE) { 852 vm_object_drop(object); 853 return (FALSE); 854 } 855 vm_object_drop(object); 856 return (TRUE); 857 } 858 859 /* 860 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 861 * 862 * This removes any associated swap backing store, whether valid or 863 * not, from the page. This operates on any VM object, not just OBJT_SWAP 864 * objects. 865 * 866 * This routine is typically called when a page is made dirty, at 867 * which point any associated swap can be freed. MADV_FREE also 868 * calls us in a special-case situation 869 * 870 * NOTE!!! If the page is clean and the swap was valid, the caller 871 * should make the page dirty before calling this routine. This routine 872 * does NOT change the m->dirty status of the page. Also: MADV_FREE 873 * depends on it. 874 * 875 * The page must be busied or soft-busied. 876 * The caller can hold the object to avoid blocking, else we might block. 877 * No other requirements. 878 */ 879 void 880 swap_pager_unswapped(vm_page_t m) 881 { 882 if (m->flags & PG_SWAPPED) { 883 vm_object_hold(m->object); 884 KKASSERT(m->flags & PG_SWAPPED); 885 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 886 vm_page_flag_clear(m, PG_SWAPPED); 887 vm_object_drop(m->object); 888 } 889 } 890 891 /* 892 * SWAP_PAGER_STRATEGY() - read, write, free blocks 893 * 894 * This implements a VM OBJECT strategy function using swap backing store. 895 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP 896 * types. 897 * 898 * This is intended to be a cacheless interface (i.e. caching occurs at 899 * higher levels), and is also used as a swap-based SSD cache for vnode 900 * and device objects. 901 * 902 * All I/O goes directly to and from the swap device. 903 * 904 * We currently attempt to run I/O synchronously or asynchronously as 905 * the caller requests. This isn't perfect because we loose error 906 * sequencing when we run multiple ops in parallel to satisfy a request. 907 * But this is swap, so we let it all hang out. 908 * 909 * No requirements. 910 */ 911 void 912 swap_pager_strategy(vm_object_t object, struct bio *bio) 913 { 914 struct buf *bp = bio->bio_buf; 915 struct bio *nbio; 916 vm_pindex_t start; 917 vm_pindex_t biox_blkno = 0; 918 int count; 919 char *data; 920 struct bio *biox; 921 struct buf *bufx; 922 #if 0 923 struct bio_track *track; 924 #endif 925 926 #if 0 927 /* 928 * tracking for swapdev vnode I/Os 929 */ 930 if (bp->b_cmd == BUF_CMD_READ) 931 track = &swapdev_vp->v_track_read; 932 else 933 track = &swapdev_vp->v_track_write; 934 #endif 935 936 if (bp->b_bcount & PAGE_MASK) { 937 bp->b_error = EINVAL; 938 bp->b_flags |= B_ERROR | B_INVAL; 939 biodone(bio); 940 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 941 "not page bounded\n", 942 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 943 return; 944 } 945 946 /* 947 * Clear error indication, initialize page index, count, data pointer. 948 */ 949 bp->b_error = 0; 950 bp->b_flags &= ~B_ERROR; 951 bp->b_resid = bp->b_bcount; 952 953 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 954 count = howmany(bp->b_bcount, PAGE_SIZE); 955 data = bp->b_data; 956 957 /* 958 * Deal with BUF_CMD_FREEBLKS 959 */ 960 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 961 /* 962 * FREE PAGE(s) - destroy underlying swap that is no longer 963 * needed. 964 */ 965 vm_object_hold(object); 966 swp_pager_meta_free(object, start, count); 967 vm_object_drop(object); 968 bp->b_resid = 0; 969 biodone(bio); 970 return; 971 } 972 973 /* 974 * We need to be able to create a new cluster of I/O's. We cannot 975 * use the caller fields of the passed bio so push a new one. 976 * 977 * Because nbio is just a placeholder for the cluster links, 978 * we can biodone() the original bio instead of nbio to make 979 * things a bit more efficient. 980 */ 981 nbio = push_bio(bio); 982 nbio->bio_offset = bio->bio_offset; 983 nbio->bio_caller_info1.cluster_head = NULL; 984 nbio->bio_caller_info2.cluster_tail = NULL; 985 986 biox = NULL; 987 bufx = NULL; 988 989 /* 990 * Execute read or write 991 */ 992 vm_object_hold(object); 993 994 while (count > 0) { 995 swblk_t blk; 996 997 /* 998 * Obtain block. If block not found and writing, allocate a 999 * new block and build it into the object. 1000 */ 1001 blk = swp_pager_meta_ctl(object, start, 0); 1002 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) { 1003 blk = swp_pager_getswapspace(object, 1); 1004 if (blk == SWAPBLK_NONE) { 1005 bp->b_error = ENOMEM; 1006 bp->b_flags |= B_ERROR; 1007 break; 1008 } 1009 swp_pager_meta_build(object, start, blk); 1010 } 1011 1012 /* 1013 * Do we have to flush our current collection? Yes if: 1014 * 1015 * - no swap block at this index 1016 * - swap block is not contiguous 1017 * - we cross a physical disk boundry in the 1018 * stripe. 1019 */ 1020 if ( 1021 biox && (biox_blkno + btoc(bufx->b_bcount) != blk || 1022 ((biox_blkno ^ blk) & dmmax_mask) 1023 ) 1024 ) { 1025 if (bp->b_cmd == BUF_CMD_READ) { 1026 ++mycpu->gd_cnt.v_swapin; 1027 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1028 } else { 1029 ++mycpu->gd_cnt.v_swapout; 1030 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1031 bufx->b_dirtyend = bufx->b_bcount; 1032 } 1033 1034 /* 1035 * Finished with this buf. 1036 */ 1037 KKASSERT(bufx->b_bcount != 0); 1038 if (bufx->b_cmd != BUF_CMD_READ) 1039 bufx->b_dirtyend = bufx->b_bcount; 1040 biox = NULL; 1041 bufx = NULL; 1042 } 1043 1044 /* 1045 * Add new swapblk to biox, instantiating biox if necessary. 1046 * Zero-fill reads are able to take a shortcut. 1047 */ 1048 if (blk == SWAPBLK_NONE) { 1049 /* 1050 * We can only get here if we are reading. Since 1051 * we are at splvm() we can safely modify b_resid, 1052 * even if chain ops are in progress. 1053 */ 1054 bzero(data, PAGE_SIZE); 1055 bp->b_resid -= PAGE_SIZE; 1056 } else { 1057 if (biox == NULL) { 1058 /* XXX chain count > 4, wait to <= 4 */ 1059 1060 bufx = getpbuf(NULL); 1061 biox = &bufx->b_bio1; 1062 cluster_append(nbio, bufx); 1063 bufx->b_flags |= (bp->b_flags & B_ORDERED); 1064 bufx->b_cmd = bp->b_cmd; 1065 biox->bio_done = swap_chain_iodone; 1066 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1067 biox->bio_caller_info1.cluster_parent = nbio; 1068 biox_blkno = blk; 1069 bufx->b_bcount = 0; 1070 bufx->b_data = data; 1071 } 1072 bufx->b_bcount += PAGE_SIZE; 1073 } 1074 --count; 1075 ++start; 1076 data += PAGE_SIZE; 1077 } 1078 1079 vm_object_drop(object); 1080 1081 /* 1082 * Flush out last buffer 1083 */ 1084 if (biox) { 1085 if (bufx->b_cmd == BUF_CMD_READ) { 1086 ++mycpu->gd_cnt.v_swapin; 1087 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1088 } else { 1089 ++mycpu->gd_cnt.v_swapout; 1090 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1091 bufx->b_dirtyend = bufx->b_bcount; 1092 } 1093 KKASSERT(bufx->b_bcount); 1094 if (bufx->b_cmd != BUF_CMD_READ) 1095 bufx->b_dirtyend = bufx->b_bcount; 1096 /* biox, bufx = NULL */ 1097 } 1098 1099 /* 1100 * Now initiate all the I/O. Be careful looping on our chain as 1101 * I/O's may complete while we are still initiating them. 1102 * 1103 * If the request is a 100% sparse read no bios will be present 1104 * and we just biodone() the buffer. 1105 */ 1106 nbio->bio_caller_info2.cluster_tail = NULL; 1107 bufx = nbio->bio_caller_info1.cluster_head; 1108 1109 if (bufx) { 1110 while (bufx) { 1111 biox = &bufx->b_bio1; 1112 BUF_KERNPROC(bufx); 1113 bufx = bufx->b_cluster_next; 1114 vn_strategy(swapdev_vp, biox); 1115 } 1116 } else { 1117 biodone(bio); 1118 } 1119 1120 /* 1121 * Completion of the cluster will also call biodone_chain(nbio). 1122 * We never call biodone(nbio) so we don't have to worry about 1123 * setting up a bio_done callback. It's handled in the sub-IO. 1124 */ 1125 /**/ 1126 } 1127 1128 /* 1129 * biodone callback 1130 * 1131 * No requirements. 1132 */ 1133 static void 1134 swap_chain_iodone(struct bio *biox) 1135 { 1136 struct buf **nextp; 1137 struct buf *bufx; /* chained sub-buffer */ 1138 struct bio *nbio; /* parent nbio with chain glue */ 1139 struct buf *bp; /* original bp associated with nbio */ 1140 int chain_empty; 1141 1142 bufx = biox->bio_buf; 1143 nbio = biox->bio_caller_info1.cluster_parent; 1144 bp = nbio->bio_buf; 1145 1146 /* 1147 * Update the original buffer 1148 */ 1149 KKASSERT(bp != NULL); 1150 if (bufx->b_flags & B_ERROR) { 1151 atomic_set_int(&bufx->b_flags, B_ERROR); 1152 bp->b_error = bufx->b_error; /* race ok */ 1153 } else if (bufx->b_resid != 0) { 1154 atomic_set_int(&bufx->b_flags, B_ERROR); 1155 bp->b_error = EINVAL; /* race ok */ 1156 } else { 1157 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1158 } 1159 1160 /* 1161 * Remove us from the chain. 1162 */ 1163 spin_lock(&swapbp_spin); 1164 nextp = &nbio->bio_caller_info1.cluster_head; 1165 while (*nextp != bufx) { 1166 KKASSERT(*nextp != NULL); 1167 nextp = &(*nextp)->b_cluster_next; 1168 } 1169 *nextp = bufx->b_cluster_next; 1170 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1171 spin_unlock(&swapbp_spin); 1172 1173 /* 1174 * Clean up bufx. If the chain is now empty we finish out 1175 * the parent. Note that we may be racing other completions 1176 * so we must use the chain_empty status from above. 1177 */ 1178 if (chain_empty) { 1179 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1180 atomic_set_int(&bp->b_flags, B_ERROR); 1181 bp->b_error = EINVAL; 1182 } 1183 biodone_chain(nbio); 1184 } 1185 relpbuf(bufx, NULL); 1186 } 1187 1188 /* 1189 * SWAP_PAGER_GETPAGES() - bring page in from swap 1190 * 1191 * The requested page may have to be brought in from swap. Calculate the 1192 * swap block and bring in additional pages if possible. All pages must 1193 * have contiguous swap block assignments and reside in the same object. 1194 * 1195 * The caller has a single vm_object_pip_add() reference prior to 1196 * calling us and we should return with the same. 1197 * 1198 * The caller has BUSY'd the page. We should return with (*mpp) left busy, 1199 * and any additinal pages unbusied. 1200 * 1201 * If the caller encounters a PG_RAM page it will pass it to us even though 1202 * it may be valid and dirty. We cannot overwrite the page in this case! 1203 * The case is used to allow us to issue pure read-aheads. 1204 * 1205 * NOTE! XXX This code does not entirely pipeline yet due to the fact that 1206 * the PG_RAM page is validated at the same time as mreq. What we 1207 * really need to do is issue a separate read-ahead pbuf. 1208 * 1209 * No requirements. 1210 */ 1211 static int 1212 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 1213 { 1214 struct buf *bp; 1215 struct bio *bio; 1216 vm_page_t mreq; 1217 vm_page_t m; 1218 vm_offset_t kva; 1219 swblk_t blk; 1220 int i; 1221 int j; 1222 int raonly; 1223 int error; 1224 u_int32_t flags; 1225 vm_page_t marray[XIO_INTERNAL_PAGES]; 1226 1227 mreq = *mpp; 1228 1229 vm_object_hold(object); 1230 if (mreq->object != object) { 1231 panic("swap_pager_getpages: object mismatch %p/%p", 1232 object, 1233 mreq->object 1234 ); 1235 } 1236 1237 /* 1238 * We don't want to overwrite a fully valid page as it might be 1239 * dirty. This case can occur when e.g. vm_fault hits a perfectly 1240 * valid page with PG_RAM set. 1241 * 1242 * In this case we see if the next page is a suitable page-in 1243 * candidate and if it is we issue read-ahead. PG_RAM will be 1244 * set on the last page of the read-ahead to continue the pipeline. 1245 */ 1246 if (mreq->valid == VM_PAGE_BITS_ALL) { 1247 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) { 1248 vm_object_drop(object); 1249 return(VM_PAGER_OK); 1250 } 1251 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0); 1252 if (blk == SWAPBLK_NONE) { 1253 vm_object_drop(object); 1254 return(VM_PAGER_OK); 1255 } 1256 m = vm_page_lookup_busy_try(object, mreq->pindex + 1, 1257 TRUE, &error); 1258 if (error) { 1259 vm_object_drop(object); 1260 return(VM_PAGER_OK); 1261 } else if (m == NULL) { 1262 /* 1263 * Use VM_ALLOC_QUICK to avoid blocking on cache 1264 * page reuse. 1265 */ 1266 m = vm_page_alloc(object, mreq->pindex + 1, 1267 VM_ALLOC_QUICK); 1268 if (m == NULL) { 1269 vm_object_drop(object); 1270 return(VM_PAGER_OK); 1271 } 1272 } else { 1273 if (m->valid) { 1274 vm_page_wakeup(m); 1275 vm_object_drop(object); 1276 return(VM_PAGER_OK); 1277 } 1278 vm_page_unqueue_nowakeup(m); 1279 } 1280 /* page is busy */ 1281 mreq = m; 1282 raonly = 1; 1283 } else { 1284 raonly = 0; 1285 } 1286 1287 /* 1288 * Try to block-read contiguous pages from swap if sequential, 1289 * otherwise just read one page. Contiguous pages from swap must 1290 * reside within a single device stripe because the I/O cannot be 1291 * broken up across multiple stripes. 1292 * 1293 * Note that blk and iblk can be SWAPBLK_NONE but the loop is 1294 * set up such that the case(s) are handled implicitly. 1295 */ 1296 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1297 marray[0] = mreq; 1298 1299 for (i = 1; swap_burst_read && 1300 i < XIO_INTERNAL_PAGES && 1301 mreq->pindex + i < object->size; ++i) { 1302 swblk_t iblk; 1303 1304 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0); 1305 if (iblk != blk + i) 1306 break; 1307 if ((blk ^ iblk) & dmmax_mask) 1308 break; 1309 m = vm_page_lookup_busy_try(object, mreq->pindex + i, 1310 TRUE, &error); 1311 if (error) { 1312 break; 1313 } else if (m == NULL) { 1314 /* 1315 * Use VM_ALLOC_QUICK to avoid blocking on cache 1316 * page reuse. 1317 */ 1318 m = vm_page_alloc(object, mreq->pindex + i, 1319 VM_ALLOC_QUICK); 1320 if (m == NULL) 1321 break; 1322 } else { 1323 if (m->valid) { 1324 vm_page_wakeup(m); 1325 break; 1326 } 1327 vm_page_unqueue_nowakeup(m); 1328 } 1329 /* page is busy */ 1330 marray[i] = m; 1331 } 1332 if (i > 1) 1333 vm_page_flag_set(marray[i - 1], PG_RAM); 1334 1335 /* 1336 * If mreq is the requested page and we have nothing to do return 1337 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead 1338 * page and must be cleaned up. 1339 */ 1340 if (blk == SWAPBLK_NONE) { 1341 KKASSERT(i == 1); 1342 if (raonly) { 1343 vnode_pager_freepage(mreq); 1344 vm_object_drop(object); 1345 return(VM_PAGER_OK); 1346 } else { 1347 vm_object_drop(object); 1348 return(VM_PAGER_FAIL); 1349 } 1350 } 1351 1352 /* 1353 * map our page(s) into kva for input 1354 */ 1355 bp = getpbuf_kva(&nsw_rcount); 1356 bio = &bp->b_bio1; 1357 kva = (vm_offset_t) bp->b_kvabase; 1358 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t)); 1359 pmap_qenter(kva, bp->b_xio.xio_pages, i); 1360 1361 bp->b_data = (caddr_t)kva; 1362 bp->b_bcount = PAGE_SIZE * i; 1363 bp->b_xio.xio_npages = i; 1364 bio->bio_done = swp_pager_async_iodone; 1365 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1366 bio->bio_caller_info1.index = SWBIO_READ; 1367 1368 /* 1369 * Set index. If raonly set the index beyond the array so all 1370 * the pages are treated the same, otherwise the original mreq is 1371 * at index 0. 1372 */ 1373 if (raonly) 1374 bio->bio_driver_info = (void *)(intptr_t)i; 1375 else 1376 bio->bio_driver_info = (void *)(intptr_t)0; 1377 1378 for (j = 0; j < i; ++j) 1379 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG); 1380 1381 mycpu->gd_cnt.v_swapin++; 1382 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1383 1384 /* 1385 * We still hold the lock on mreq, and our automatic completion routine 1386 * does not remove it. 1387 */ 1388 vm_object_pip_add(object, bp->b_xio.xio_npages); 1389 1390 /* 1391 * perform the I/O. NOTE!!! bp cannot be considered valid after 1392 * this point because we automatically release it on completion. 1393 * Instead, we look at the one page we are interested in which we 1394 * still hold a lock on even through the I/O completion. 1395 * 1396 * The other pages in our m[] array are also released on completion, 1397 * so we cannot assume they are valid anymore either. 1398 */ 1399 bp->b_cmd = BUF_CMD_READ; 1400 BUF_KERNPROC(bp); 1401 vn_strategy(swapdev_vp, bio); 1402 1403 /* 1404 * Wait for the page we want to complete. PG_SWAPINPROG is always 1405 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1406 * is set in the meta-data. 1407 * 1408 * If this is a read-ahead only we return immediately without 1409 * waiting for I/O. 1410 */ 1411 if (raonly) { 1412 vm_object_drop(object); 1413 return(VM_PAGER_OK); 1414 } 1415 1416 /* 1417 * Read-ahead includes originally requested page case. 1418 */ 1419 for (;;) { 1420 flags = mreq->flags; 1421 cpu_ccfence(); 1422 if ((flags & PG_SWAPINPROG) == 0) 1423 break; 1424 tsleep_interlock(mreq, 0); 1425 if (!atomic_cmpset_int(&mreq->flags, flags, 1426 flags | PG_WANTED | PG_REFERENCED)) { 1427 continue; 1428 } 1429 mycpu->gd_cnt.v_intrans++; 1430 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) { 1431 kprintf( 1432 "swap_pager: indefinite wait buffer: " 1433 " bp %p offset: %lld, size: %ld\n", 1434 bp, 1435 (long long)bio->bio_offset, 1436 (long)bp->b_bcount 1437 ); 1438 } 1439 } 1440 1441 /* 1442 * mreq is left bussied after completion, but all the other pages 1443 * are freed. If we had an unrecoverable read error the page will 1444 * not be valid. 1445 */ 1446 vm_object_drop(object); 1447 if (mreq->valid != VM_PAGE_BITS_ALL) 1448 return(VM_PAGER_ERROR); 1449 else 1450 return(VM_PAGER_OK); 1451 1452 /* 1453 * A final note: in a low swap situation, we cannot deallocate swap 1454 * and mark a page dirty here because the caller is likely to mark 1455 * the page clean when we return, causing the page to possibly revert 1456 * to all-zero's later. 1457 */ 1458 } 1459 1460 /* 1461 * swap_pager_putpages: 1462 * 1463 * Assign swap (if necessary) and initiate I/O on the specified pages. 1464 * 1465 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1466 * are automatically converted to SWAP objects. 1467 * 1468 * In a low memory situation we may block in vn_strategy(), but the new 1469 * vm_page reservation system coupled with properly written VFS devices 1470 * should ensure that no low-memory deadlock occurs. This is an area 1471 * which needs work. 1472 * 1473 * The parent has N vm_object_pip_add() references prior to 1474 * calling us and will remove references for rtvals[] that are 1475 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1476 * completion. 1477 * 1478 * The parent has soft-busy'd the pages it passes us and will unbusy 1479 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1480 * We need to unbusy the rest on I/O completion. 1481 * 1482 * No requirements. 1483 */ 1484 void 1485 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1486 int sync, int *rtvals) 1487 { 1488 int i; 1489 int n = 0; 1490 1491 vm_object_hold(object); 1492 1493 if (count && m[0]->object != object) { 1494 panic("swap_pager_getpages: object mismatch %p/%p", 1495 object, 1496 m[0]->object 1497 ); 1498 } 1499 1500 /* 1501 * Step 1 1502 * 1503 * Turn object into OBJT_SWAP 1504 * check for bogus sysops 1505 * force sync if not pageout process 1506 */ 1507 if (object->type == OBJT_DEFAULT) { 1508 if (object->type == OBJT_DEFAULT) 1509 swp_pager_meta_convert(object); 1510 } 1511 1512 if (curthread != pagethread) 1513 sync = TRUE; 1514 1515 /* 1516 * Step 2 1517 * 1518 * Update nsw parameters from swap_async_max sysctl values. 1519 * Do not let the sysop crash the machine with bogus numbers. 1520 */ 1521 if (swap_async_max != nsw_wcount_async_max) { 1522 int n; 1523 1524 /* 1525 * limit range 1526 */ 1527 if ((n = swap_async_max) > nswbuf_kva / 2) 1528 n = nswbuf_kva / 2; 1529 if (n < 1) 1530 n = 1; 1531 swap_async_max = n; 1532 1533 /* 1534 * Adjust difference ( if possible ). If the current async 1535 * count is too low, we may not be able to make the adjustment 1536 * at this time. 1537 * 1538 * vm_token needed for nsw_wcount sleep interlock 1539 */ 1540 lwkt_gettoken(&vm_token); 1541 n -= nsw_wcount_async_max; 1542 if (nsw_wcount_async + n >= 0) { 1543 nsw_wcount_async_max += n; 1544 pbuf_adjcount(&nsw_wcount_async, n); 1545 } 1546 lwkt_reltoken(&vm_token); 1547 } 1548 1549 /* 1550 * Step 3 1551 * 1552 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1553 * The page is left dirty until the pageout operation completes 1554 * successfully. 1555 */ 1556 1557 for (i = 0; i < count; i += n) { 1558 struct buf *bp; 1559 struct bio *bio; 1560 swblk_t blk; 1561 int j; 1562 1563 /* 1564 * Maximum I/O size is limited by a number of factors. 1565 */ 1566 1567 n = min(BLIST_MAX_ALLOC, count - i); 1568 n = min(n, nsw_cluster_max); 1569 1570 lwkt_gettoken(&vm_token); 1571 1572 /* 1573 * Get biggest block of swap we can. If we fail, fall 1574 * back and try to allocate a smaller block. Don't go 1575 * overboard trying to allocate space if it would overly 1576 * fragment swap. 1577 */ 1578 while ( 1579 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE && 1580 n > 4 1581 ) { 1582 n >>= 1; 1583 } 1584 if (blk == SWAPBLK_NONE) { 1585 for (j = 0; j < n; ++j) 1586 rtvals[i+j] = VM_PAGER_FAIL; 1587 lwkt_reltoken(&vm_token); 1588 continue; 1589 } 1590 if (vm_report_swap_allocs > 0) { 1591 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n); 1592 --vm_report_swap_allocs; 1593 } 1594 1595 /* 1596 * The I/O we are constructing cannot cross a physical 1597 * disk boundry in the swap stripe. Note: we are still 1598 * at splvm(). 1599 */ 1600 if ((blk ^ (blk + n)) & dmmax_mask) { 1601 j = ((blk + dmmax) & dmmax_mask) - blk; 1602 swp_pager_freeswapspace(object, blk + j, n - j); 1603 n = j; 1604 } 1605 1606 /* 1607 * All I/O parameters have been satisfied, build the I/O 1608 * request and assign the swap space. 1609 */ 1610 if (sync == TRUE) 1611 bp = getpbuf_kva(&nsw_wcount_sync); 1612 else 1613 bp = getpbuf_kva(&nsw_wcount_async); 1614 bio = &bp->b_bio1; 1615 1616 lwkt_reltoken(&vm_token); 1617 1618 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1619 1620 bp->b_bcount = PAGE_SIZE * n; 1621 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1622 1623 for (j = 0; j < n; ++j) { 1624 vm_page_t mreq = m[i+j]; 1625 1626 swp_pager_meta_build(mreq->object, mreq->pindex, 1627 blk + j); 1628 if (object->type == OBJT_SWAP) 1629 vm_page_dirty(mreq); 1630 rtvals[i+j] = VM_PAGER_OK; 1631 1632 vm_page_flag_set(mreq, PG_SWAPINPROG); 1633 bp->b_xio.xio_pages[j] = mreq; 1634 } 1635 bp->b_xio.xio_npages = n; 1636 1637 mycpu->gd_cnt.v_swapout++; 1638 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1639 1640 bp->b_dirtyoff = 0; /* req'd for NFS */ 1641 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1642 bp->b_cmd = BUF_CMD_WRITE; 1643 bio->bio_caller_info1.index = SWBIO_WRITE; 1644 1645 /* 1646 * asynchronous 1647 */ 1648 if (sync == FALSE) { 1649 bio->bio_done = swp_pager_async_iodone; 1650 BUF_KERNPROC(bp); 1651 vn_strategy(swapdev_vp, bio); 1652 1653 for (j = 0; j < n; ++j) 1654 rtvals[i+j] = VM_PAGER_PEND; 1655 continue; 1656 } 1657 1658 /* 1659 * Issue synchrnously. 1660 * 1661 * Wait for the sync I/O to complete, then update rtvals. 1662 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1663 * our async completion routine at the end, thus avoiding a 1664 * double-free. 1665 */ 1666 bio->bio_caller_info1.index |= SWBIO_SYNC; 1667 bio->bio_done = biodone_sync; 1668 bio->bio_flags |= BIO_SYNC; 1669 vn_strategy(swapdev_vp, bio); 1670 biowait(bio, "swwrt"); 1671 1672 for (j = 0; j < n; ++j) 1673 rtvals[i+j] = VM_PAGER_PEND; 1674 1675 /* 1676 * Now that we are through with the bp, we can call the 1677 * normal async completion, which frees everything up. 1678 */ 1679 swp_pager_async_iodone(bio); 1680 } 1681 vm_object_drop(object); 1682 } 1683 1684 /* 1685 * No requirements. 1686 * 1687 * Recalculate the low and high-water marks. 1688 */ 1689 void 1690 swap_pager_newswap(void) 1691 { 1692 if (vm_swap_max) { 1693 nswap_lowat = vm_swap_max * 4 / 100; /* 4% left */ 1694 nswap_hiwat = vm_swap_max * 6 / 100; /* 6% left */ 1695 kprintf("swap low/high-water marks set to %d/%d\n", 1696 nswap_lowat, nswap_hiwat); 1697 } else { 1698 nswap_lowat = 128; 1699 nswap_hiwat = 512; 1700 } 1701 swp_sizecheck(); 1702 } 1703 1704 /* 1705 * swp_pager_async_iodone: 1706 * 1707 * Completion routine for asynchronous reads and writes from/to swap. 1708 * Also called manually by synchronous code to finish up a bp. 1709 * 1710 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1711 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1712 * unbusy all pages except the 'main' request page. For WRITE 1713 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1714 * because we marked them all VM_PAGER_PEND on return from putpages ). 1715 * 1716 * This routine may not block. 1717 * 1718 * No requirements. 1719 */ 1720 static void 1721 swp_pager_async_iodone(struct bio *bio) 1722 { 1723 struct buf *bp = bio->bio_buf; 1724 vm_object_t object = NULL; 1725 int i; 1726 int *nswptr; 1727 1728 /* 1729 * report error 1730 */ 1731 if (bp->b_flags & B_ERROR) { 1732 kprintf( 1733 "swap_pager: I/O error - %s failed; offset %lld," 1734 "size %ld, error %d\n", 1735 ((bio->bio_caller_info1.index & SWBIO_READ) ? 1736 "pagein" : "pageout"), 1737 (long long)bio->bio_offset, 1738 (long)bp->b_bcount, 1739 bp->b_error 1740 ); 1741 } 1742 1743 /* 1744 * set object, raise to splvm(). 1745 */ 1746 if (bp->b_xio.xio_npages) 1747 object = bp->b_xio.xio_pages[0]->object; 1748 1749 /* 1750 * remove the mapping for kernel virtual 1751 */ 1752 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1753 1754 /* 1755 * cleanup pages. If an error occurs writing to swap, we are in 1756 * very serious trouble. If it happens to be a disk error, though, 1757 * we may be able to recover by reassigning the swap later on. So 1758 * in this case we remove the m->swapblk assignment for the page 1759 * but do not free it in the rlist. The errornous block(s) are thus 1760 * never reallocated as swap. Redirty the page and continue. 1761 */ 1762 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1763 vm_page_t m = bp->b_xio.xio_pages[i]; 1764 1765 if (bp->b_flags & B_ERROR) { 1766 /* 1767 * If an error occurs I'd love to throw the swapblk 1768 * away without freeing it back to swapspace, so it 1769 * can never be used again. But I can't from an 1770 * interrupt. 1771 */ 1772 1773 if (bio->bio_caller_info1.index & SWBIO_READ) { 1774 /* 1775 * When reading, reqpage needs to stay 1776 * locked for the parent, but all other 1777 * pages can be freed. We still want to 1778 * wakeup the parent waiting on the page, 1779 * though. ( also: pg_reqpage can be -1 and 1780 * not match anything ). 1781 * 1782 * We have to wake specifically requested pages 1783 * up too because we cleared PG_SWAPINPROG and 1784 * someone may be waiting for that. 1785 * 1786 * NOTE: for reads, m->dirty will probably 1787 * be overridden by the original caller of 1788 * getpages so don't play cute tricks here. 1789 * 1790 * NOTE: We can't actually free the page from 1791 * here, because this is an interrupt. It 1792 * is not legal to mess with object->memq 1793 * from an interrupt. Deactivate the page 1794 * instead. 1795 */ 1796 1797 m->valid = 0; 1798 vm_page_flag_clear(m, PG_SWAPINPROG); 1799 1800 /* 1801 * bio_driver_info holds the requested page 1802 * index. 1803 */ 1804 if (i != (int)(intptr_t)bio->bio_driver_info) { 1805 vm_page_deactivate(m); 1806 vm_page_wakeup(m); 1807 } else { 1808 vm_page_flash(m); 1809 } 1810 /* 1811 * If i == bp->b_pager.pg_reqpage, do not wake 1812 * the page up. The caller needs to. 1813 */ 1814 } else { 1815 /* 1816 * If a write error occurs remove the swap 1817 * assignment (note that PG_SWAPPED may or 1818 * may not be set depending on prior activity). 1819 * 1820 * Re-dirty OBJT_SWAP pages as there is no 1821 * other backing store, we can't throw the 1822 * page away. 1823 * 1824 * Non-OBJT_SWAP pages (aka swapcache) must 1825 * not be dirtied since they may not have 1826 * been dirty in the first place, and they 1827 * do have backing store (the vnode). 1828 */ 1829 vm_page_busy_wait(m, FALSE, "swadpg"); 1830 swp_pager_meta_ctl(m->object, m->pindex, 1831 SWM_FREE); 1832 vm_page_flag_clear(m, PG_SWAPPED); 1833 if (m->object->type == OBJT_SWAP) { 1834 vm_page_dirty(m); 1835 vm_page_activate(m); 1836 } 1837 vm_page_flag_clear(m, PG_SWAPINPROG); 1838 vm_page_io_finish(m); 1839 vm_page_wakeup(m); 1840 } 1841 } else if (bio->bio_caller_info1.index & SWBIO_READ) { 1842 /* 1843 * NOTE: for reads, m->dirty will probably be 1844 * overridden by the original caller of getpages so 1845 * we cannot set them in order to free the underlying 1846 * swap in a low-swap situation. I don't think we'd 1847 * want to do that anyway, but it was an optimization 1848 * that existed in the old swapper for a time before 1849 * it got ripped out due to precisely this problem. 1850 * 1851 * If not the requested page then deactivate it. 1852 * 1853 * Note that the requested page, reqpage, is left 1854 * busied, but we still have to wake it up. The 1855 * other pages are released (unbusied) by 1856 * vm_page_wakeup(). We do not set reqpage's 1857 * valid bits here, it is up to the caller. 1858 */ 1859 1860 /* 1861 * NOTE: can't call pmap_clear_modify(m) from an 1862 * interrupt thread, the pmap code may have to map 1863 * non-kernel pmaps and currently asserts the case. 1864 */ 1865 /*pmap_clear_modify(m);*/ 1866 m->valid = VM_PAGE_BITS_ALL; 1867 vm_page_undirty(m); 1868 vm_page_flag_clear(m, PG_SWAPINPROG); 1869 vm_page_flag_set(m, PG_SWAPPED); 1870 1871 /* 1872 * We have to wake specifically requested pages 1873 * up too because we cleared PG_SWAPINPROG and 1874 * could be waiting for it in getpages. However, 1875 * be sure to not unbusy getpages specifically 1876 * requested page - getpages expects it to be 1877 * left busy. 1878 * 1879 * bio_driver_info holds the requested page 1880 */ 1881 if (i != (int)(intptr_t)bio->bio_driver_info) { 1882 vm_page_deactivate(m); 1883 vm_page_wakeup(m); 1884 } else { 1885 vm_page_flash(m); 1886 } 1887 } else { 1888 /* 1889 * Mark the page clean but do not mess with the 1890 * pmap-layer's modified state. That state should 1891 * also be clear since the caller protected the 1892 * page VM_PROT_READ, but allow the case. 1893 * 1894 * We are in an interrupt, avoid pmap operations. 1895 * 1896 * If we have a severe page deficit, deactivate the 1897 * page. Do not try to cache it (which would also 1898 * involve a pmap op), because the page might still 1899 * be read-heavy. 1900 * 1901 * When using the swap to cache clean vnode pages 1902 * we do not mess with the page dirty bits. 1903 */ 1904 vm_page_busy_wait(m, FALSE, "swadpg"); 1905 if (m->object->type == OBJT_SWAP) 1906 vm_page_undirty(m); 1907 vm_page_flag_clear(m, PG_SWAPINPROG); 1908 vm_page_flag_set(m, PG_SWAPPED); 1909 if (vm_page_count_severe()) 1910 vm_page_deactivate(m); 1911 #if 0 1912 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1913 vm_page_protect(m, VM_PROT_READ); 1914 #endif 1915 vm_page_io_finish(m); 1916 vm_page_wakeup(m); 1917 } 1918 } 1919 1920 /* 1921 * adjust pip. NOTE: the original parent may still have its own 1922 * pip refs on the object. 1923 */ 1924 1925 if (object) 1926 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages); 1927 1928 /* 1929 * Release the physical I/O buffer. 1930 * 1931 * NOTE: Due to synchronous operations in the write case b_cmd may 1932 * already be set to BUF_CMD_DONE and BIO_SYNC may have already 1933 * been cleared. 1934 * 1935 * Use vm_token to interlock nsw_rcount/wcount wakeup? 1936 */ 1937 lwkt_gettoken(&vm_token); 1938 if (bio->bio_caller_info1.index & SWBIO_READ) 1939 nswptr = &nsw_rcount; 1940 else if (bio->bio_caller_info1.index & SWBIO_SYNC) 1941 nswptr = &nsw_wcount_sync; 1942 else 1943 nswptr = &nsw_wcount_async; 1944 bp->b_cmd = BUF_CMD_DONE; 1945 relpbuf(bp, nswptr); 1946 lwkt_reltoken(&vm_token); 1947 } 1948 1949 /* 1950 * Fault-in a potentially swapped page and remove the swap reference. 1951 * (used by swapoff code) 1952 * 1953 * object must be held. 1954 */ 1955 static __inline void 1956 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex) 1957 { 1958 struct vnode *vp; 1959 vm_page_t m; 1960 int error; 1961 1962 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1963 1964 if (object->type == OBJT_VNODE) { 1965 /* 1966 * Any swap related to a vnode is due to swapcache. We must 1967 * vget() the vnode in case it is not active (otherwise 1968 * vref() will panic). Calling vm_object_page_remove() will 1969 * ensure that any swap ref is removed interlocked with the 1970 * page. clean_only is set to TRUE so we don't throw away 1971 * dirty pages. 1972 */ 1973 vp = object->handle; 1974 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE); 1975 if (error == 0) { 1976 vm_object_page_remove(object, pindex, pindex + 1, TRUE); 1977 vput(vp); 1978 } 1979 } else { 1980 /* 1981 * Otherwise it is a normal OBJT_SWAP object and we can 1982 * fault the page in and remove the swap. 1983 */ 1984 m = vm_fault_object_page(object, IDX_TO_OFF(pindex), 1985 VM_PROT_NONE, 1986 VM_FAULT_DIRTY | VM_FAULT_UNSWAP, 1987 sharedp, &error); 1988 if (m) 1989 vm_page_unhold(m); 1990 } 1991 } 1992 1993 /* 1994 * This removes all swap blocks related to a particular device. We have 1995 * to be careful of ripups during the scan. 1996 */ 1997 static int swp_pager_swapoff_callback(struct swblock *swap, void *data); 1998 1999 int 2000 swap_pager_swapoff(int devidx) 2001 { 2002 struct swswapoffinfo info; 2003 struct vm_object marker; 2004 vm_object_t object; 2005 int n; 2006 2007 bzero(&marker, sizeof(marker)); 2008 marker.type = OBJT_MARKER; 2009 2010 for (n = 0; n < VMOBJ_HSIZE; ++n) { 2011 lwkt_gettoken(&vmobj_tokens[n]); 2012 TAILQ_INSERT_HEAD(&vm_object_lists[n], &marker, object_list); 2013 2014 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) { 2015 if (object->type == OBJT_MARKER) 2016 goto skip; 2017 if (object->type != OBJT_SWAP && 2018 object->type != OBJT_VNODE) 2019 goto skip; 2020 vm_object_hold(object); 2021 if (object->type != OBJT_SWAP && 2022 object->type != OBJT_VNODE) { 2023 vm_object_drop(object); 2024 goto skip; 2025 } 2026 info.object = object; 2027 info.shared = 0; 2028 info.devidx = devidx; 2029 swblock_rb_tree_RB_SCAN(&object->swblock_root, 2030 NULL, swp_pager_swapoff_callback, 2031 &info); 2032 vm_object_drop(object); 2033 skip: 2034 if (object == TAILQ_NEXT(&marker, object_list)) { 2035 TAILQ_REMOVE(&vm_object_lists[n], 2036 &marker, object_list); 2037 TAILQ_INSERT_AFTER(&vm_object_lists[n], object, 2038 &marker, object_list); 2039 } 2040 } 2041 TAILQ_REMOVE(&vm_object_lists[n], &marker, object_list); 2042 lwkt_reltoken(&vmobj_tokens[n]); 2043 } 2044 2045 /* 2046 * If we fail to locate all swblocks we just fail gracefully and 2047 * do not bother to restore paging on the swap device. If the 2048 * user wants to retry the user can retry. 2049 */ 2050 if (swdevt[devidx].sw_nused) 2051 return (1); 2052 else 2053 return (0); 2054 } 2055 2056 static 2057 int 2058 swp_pager_swapoff_callback(struct swblock *swap, void *data) 2059 { 2060 struct swswapoffinfo *info = data; 2061 vm_object_t object = info->object; 2062 vm_pindex_t index; 2063 swblk_t v; 2064 int i; 2065 2066 index = swap->swb_index; 2067 for (i = 0; i < SWAP_META_PAGES; ++i) { 2068 /* 2069 * Make sure we don't race a dying object. This will 2070 * kill the scan of the object's swap blocks entirely. 2071 */ 2072 if (object->flags & OBJ_DEAD) 2073 return(-1); 2074 2075 /* 2076 * Fault the page, which can obviously block. If the swap 2077 * structure disappears break out. 2078 */ 2079 v = swap->swb_pages[i]; 2080 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) { 2081 swp_pager_fault_page(object, &info->shared, 2082 swap->swb_index + i); 2083 /* swap ptr might go away */ 2084 if (RB_LOOKUP(swblock_rb_tree, 2085 &object->swblock_root, index) != swap) { 2086 break; 2087 } 2088 } 2089 } 2090 return(0); 2091 } 2092 2093 /************************************************************************ 2094 * SWAP META DATA * 2095 ************************************************************************ 2096 * 2097 * These routines manipulate the swap metadata stored in the 2098 * OBJT_SWAP object. All swp_*() routines must be called at 2099 * splvm() because swap can be freed up by the low level vm_page 2100 * code which might be called from interrupts beyond what splbio() covers. 2101 * 2102 * Swap metadata is implemented with a global hash and not directly 2103 * linked into the object. Instead the object simply contains 2104 * appropriate tracking counters. 2105 */ 2106 2107 /* 2108 * Lookup the swblock containing the specified swap block index. 2109 * 2110 * The caller must hold the object. 2111 */ 2112 static __inline 2113 struct swblock * 2114 swp_pager_lookup(vm_object_t object, vm_pindex_t index) 2115 { 2116 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2117 index &= ~(vm_pindex_t)SWAP_META_MASK; 2118 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index)); 2119 } 2120 2121 /* 2122 * Remove a swblock from the RB tree. 2123 * 2124 * The caller must hold the object. 2125 */ 2126 static __inline 2127 void 2128 swp_pager_remove(vm_object_t object, struct swblock *swap) 2129 { 2130 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2131 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap); 2132 } 2133 2134 /* 2135 * Convert default object to swap object if necessary 2136 * 2137 * The caller must hold the object. 2138 */ 2139 static void 2140 swp_pager_meta_convert(vm_object_t object) 2141 { 2142 if (object->type == OBJT_DEFAULT) { 2143 object->type = OBJT_SWAP; 2144 KKASSERT(object->swblock_count == 0); 2145 } 2146 } 2147 2148 /* 2149 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 2150 * 2151 * We first convert the object to a swap object if it is a default 2152 * object. Vnode objects do not need to be converted. 2153 * 2154 * The specified swapblk is added to the object's swap metadata. If 2155 * the swapblk is not valid, it is freed instead. Any previously 2156 * assigned swapblk is freed. 2157 * 2158 * The caller must hold the object. 2159 */ 2160 static void 2161 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk) 2162 { 2163 struct swblock *swap; 2164 struct swblock *oswap; 2165 vm_pindex_t v; 2166 2167 KKASSERT(swapblk != SWAPBLK_NONE); 2168 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2169 2170 /* 2171 * Convert object if necessary 2172 */ 2173 if (object->type == OBJT_DEFAULT) 2174 swp_pager_meta_convert(object); 2175 2176 /* 2177 * Locate swblock. If not found create, but if we aren't adding 2178 * anything just return. If we run out of space in the map we wait 2179 * and, since the hash table may have changed, retry. 2180 */ 2181 retry: 2182 swap = swp_pager_lookup(object, index); 2183 2184 if (swap == NULL) { 2185 int i; 2186 2187 swap = zalloc(swap_zone); 2188 if (swap == NULL) { 2189 vm_wait(0); 2190 goto retry; 2191 } 2192 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK; 2193 swap->swb_count = 0; 2194 2195 ++object->swblock_count; 2196 2197 for (i = 0; i < SWAP_META_PAGES; ++i) 2198 swap->swb_pages[i] = SWAPBLK_NONE; 2199 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap); 2200 KKASSERT(oswap == NULL); 2201 } 2202 2203 /* 2204 * Delete prior contents of metadata. 2205 * 2206 * NOTE: Decrement swb_count after the freeing operation (which 2207 * might block) to prevent racing destruction of the swblock. 2208 */ 2209 index &= SWAP_META_MASK; 2210 2211 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) { 2212 swap->swb_pages[index] = SWAPBLK_NONE; 2213 /* can block */ 2214 swp_pager_freeswapspace(object, v, 1); 2215 --swap->swb_count; 2216 --mycpu->gd_vmtotal.t_vm; 2217 } 2218 2219 /* 2220 * Enter block into metadata 2221 */ 2222 swap->swb_pages[index] = swapblk; 2223 if (swapblk != SWAPBLK_NONE) { 2224 ++swap->swb_count; 2225 ++mycpu->gd_vmtotal.t_vm; 2226 } 2227 } 2228 2229 /* 2230 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 2231 * 2232 * The requested range of blocks is freed, with any associated swap 2233 * returned to the swap bitmap. 2234 * 2235 * This routine will free swap metadata structures as they are cleaned 2236 * out. This routine does *NOT* operate on swap metadata associated 2237 * with resident pages. 2238 * 2239 * The caller must hold the object. 2240 */ 2241 static int swp_pager_meta_free_callback(struct swblock *swb, void *data); 2242 2243 static void 2244 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) 2245 { 2246 struct swfreeinfo info; 2247 2248 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2249 2250 /* 2251 * Nothing to do 2252 */ 2253 if (object->swblock_count == 0) { 2254 KKASSERT(RB_EMPTY(&object->swblock_root)); 2255 return; 2256 } 2257 if (count == 0) 2258 return; 2259 2260 /* 2261 * Setup for RB tree scan. Note that the pindex range can be huge 2262 * due to the 64 bit page index space so we cannot safely iterate. 2263 */ 2264 info.object = object; 2265 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK; 2266 info.begi = index; 2267 info.endi = index + count - 1; 2268 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp, 2269 swp_pager_meta_free_callback, &info); 2270 } 2271 2272 /* 2273 * The caller must hold the object. 2274 */ 2275 static 2276 int 2277 swp_pager_meta_free_callback(struct swblock *swap, void *data) 2278 { 2279 struct swfreeinfo *info = data; 2280 vm_object_t object = info->object; 2281 int index; 2282 int eindex; 2283 2284 /* 2285 * Figure out the range within the swblock. The wider scan may 2286 * return edge-case swap blocks when the start and/or end points 2287 * are in the middle of a block. 2288 */ 2289 if (swap->swb_index < info->begi) 2290 index = (int)info->begi & SWAP_META_MASK; 2291 else 2292 index = 0; 2293 2294 if (swap->swb_index + SWAP_META_PAGES > info->endi) 2295 eindex = (int)info->endi & SWAP_META_MASK; 2296 else 2297 eindex = SWAP_META_MASK; 2298 2299 /* 2300 * Scan and free the blocks. The loop terminates early 2301 * if (swap) runs out of blocks and could be freed. 2302 * 2303 * NOTE: Decrement swb_count after swp_pager_freeswapspace() 2304 * to deal with a zfree race. 2305 */ 2306 while (index <= eindex) { 2307 swblk_t v = swap->swb_pages[index]; 2308 2309 if (v != SWAPBLK_NONE) { 2310 swap->swb_pages[index] = SWAPBLK_NONE; 2311 /* can block */ 2312 swp_pager_freeswapspace(object, v, 1); 2313 --mycpu->gd_vmtotal.t_vm; 2314 if (--swap->swb_count == 0) { 2315 swp_pager_remove(object, swap); 2316 zfree(swap_zone, swap); 2317 --object->swblock_count; 2318 break; 2319 } 2320 } 2321 ++index; 2322 } 2323 2324 /* swap may be invalid here due to zfree above */ 2325 lwkt_yield(); 2326 2327 return(0); 2328 } 2329 2330 /* 2331 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2332 * 2333 * This routine locates and destroys all swap metadata associated with 2334 * an object. 2335 * 2336 * NOTE: Decrement swb_count after the freeing operation (which 2337 * might block) to prevent racing destruction of the swblock. 2338 * 2339 * The caller must hold the object. 2340 */ 2341 static void 2342 swp_pager_meta_free_all(vm_object_t object) 2343 { 2344 struct swblock *swap; 2345 int i; 2346 2347 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2348 2349 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) { 2350 swp_pager_remove(object, swap); 2351 for (i = 0; i < SWAP_META_PAGES; ++i) { 2352 swblk_t v = swap->swb_pages[i]; 2353 if (v != SWAPBLK_NONE) { 2354 /* can block */ 2355 swp_pager_freeswapspace(object, v, 1); 2356 --swap->swb_count; 2357 --mycpu->gd_vmtotal.t_vm; 2358 } 2359 } 2360 if (swap->swb_count != 0) 2361 panic("swap_pager_meta_free_all: swb_count != 0"); 2362 zfree(swap_zone, swap); 2363 --object->swblock_count; 2364 lwkt_yield(); 2365 } 2366 KKASSERT(object->swblock_count == 0); 2367 } 2368 2369 /* 2370 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2371 * 2372 * This routine is capable of looking up, popping, or freeing 2373 * swapblk assignments in the swap meta data or in the vm_page_t. 2374 * The routine typically returns the swapblk being looked-up, or popped, 2375 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2376 * was invalid. This routine will automatically free any invalid 2377 * meta-data swapblks. 2378 * 2379 * It is not possible to store invalid swapblks in the swap meta data 2380 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2381 * 2382 * When acting on a busy resident page and paging is in progress, we 2383 * have to wait until paging is complete but otherwise can act on the 2384 * busy page. 2385 * 2386 * SWM_FREE remove and free swap block from metadata 2387 * SWM_POP remove from meta data but do not free.. pop it out 2388 * 2389 * The caller must hold the object. 2390 */ 2391 static swblk_t 2392 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags) 2393 { 2394 struct swblock *swap; 2395 swblk_t r1; 2396 2397 if (object->swblock_count == 0) 2398 return(SWAPBLK_NONE); 2399 2400 r1 = SWAPBLK_NONE; 2401 swap = swp_pager_lookup(object, index); 2402 2403 if (swap != NULL) { 2404 index &= SWAP_META_MASK; 2405 r1 = swap->swb_pages[index]; 2406 2407 if (r1 != SWAPBLK_NONE) { 2408 if (flags & (SWM_FREE|SWM_POP)) { 2409 swap->swb_pages[index] = SWAPBLK_NONE; 2410 --mycpu->gd_vmtotal.t_vm; 2411 if (--swap->swb_count == 0) { 2412 swp_pager_remove(object, swap); 2413 zfree(swap_zone, swap); 2414 --object->swblock_count; 2415 } 2416 } 2417 /* swap ptr may be invalid */ 2418 if (flags & SWM_FREE) { 2419 swp_pager_freeswapspace(object, r1, 1); 2420 r1 = SWAPBLK_NONE; 2421 } 2422 } 2423 /* swap ptr may be invalid */ 2424 } 2425 return(r1); 2426 } 2427