1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Copyright (c) 1994 John S. Dyson 37 * Copyright (c) 1990 University of Utah. 38 * Copyright (c) 1991, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * New Swap System 70 * Matthew Dillon 71 * 72 * Radix Bitmap 'blists'. 73 * 74 * - The new swapper uses the new radix bitmap code. This should scale 75 * to arbitrarily small or arbitrarily large swap spaces and an almost 76 * arbitrary degree of fragmentation. 77 * 78 * Features: 79 * 80 * - on the fly reallocation of swap during putpages. The new system 81 * does not try to keep previously allocated swap blocks for dirty 82 * pages. 83 * 84 * - on the fly deallocation of swap 85 * 86 * - No more garbage collection required. Unnecessarily allocated swap 87 * blocks only exist for dirty vm_page_t's now and these are already 88 * cycled (in a high-load system) by the pager. We also do on-the-fly 89 * removal of invalidated swap blocks when a page is destroyed 90 * or renamed. 91 * 92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 95 */ 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/conf.h> 100 #include <sys/kernel.h> 101 #include <sys/proc.h> 102 #include <sys/buf.h> 103 #include <sys/vnode.h> 104 #include <sys/malloc.h> 105 #include <sys/vmmeter.h> 106 #include <sys/sysctl.h> 107 #include <sys/blist.h> 108 #include <sys/lock.h> 109 #include <sys/thread2.h> 110 111 #include "opt_swap.h" 112 #include <vm/vm.h> 113 #include <vm/vm_object.h> 114 #include <vm/vm_page.h> 115 #include <vm/vm_pager.h> 116 #include <vm/vm_pageout.h> 117 #include <vm/swap_pager.h> 118 #include <vm/vm_extern.h> 119 #include <vm/vm_zone.h> 120 #include <vm/vnode_pager.h> 121 122 #include <sys/buf2.h> 123 #include <vm/vm_page2.h> 124 125 #ifndef MAX_PAGEOUT_CLUSTER 126 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES 127 #endif 128 129 #define SWM_FREE 0x02 /* free, period */ 130 #define SWM_POP 0x04 /* pop out */ 131 132 #define SWBIO_READ 0x01 133 #define SWBIO_WRITE 0x02 134 #define SWBIO_SYNC 0x04 135 136 struct swfreeinfo { 137 vm_object_t object; 138 vm_pindex_t basei; 139 vm_pindex_t begi; 140 vm_pindex_t endi; /* inclusive */ 141 }; 142 143 struct swswapoffinfo { 144 vm_object_t object; 145 int devidx; 146 }; 147 148 /* 149 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 150 * in the old system. 151 */ 152 153 int swap_pager_full; /* swap space exhaustion (task killing) */ 154 int vm_swap_cache_use; 155 int vm_swap_anon_use; 156 static int vm_report_swap_allocs; 157 158 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 159 static int nsw_rcount; /* free read buffers */ 160 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 161 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 162 static int nsw_wcount_async_max;/* assigned maximum */ 163 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 164 165 struct blist *swapblist; 166 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 167 static int swap_burst_read = 0; /* allow burst reading */ 168 static swblk_t swapiterator; /* linearize allocations */ 169 170 /* from vm_swap.c */ 171 extern struct vnode *swapdev_vp; 172 extern struct swdevt *swdevt; 173 extern int nswdev; 174 175 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0) 176 177 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 178 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 179 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read, 180 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins"); 181 182 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use, 183 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 184 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use, 185 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 186 SYSCTL_INT(_vm, OID_AUTO, swap_size, 187 CTLFLAG_RD, &vm_swap_size, 0, ""); 188 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs, 189 CTLFLAG_RW, &vm_report_swap_allocs, 0, ""); 190 191 vm_zone_t swap_zone; 192 193 /* 194 * Red-Black tree for swblock entries 195 * 196 * The caller must hold vm_token 197 */ 198 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare, 199 vm_pindex_t, swb_index); 200 201 int 202 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2) 203 { 204 if (swb1->swb_index < swb2->swb_index) 205 return(-1); 206 if (swb1->swb_index > swb2->swb_index) 207 return(1); 208 return(0); 209 } 210 211 static 212 int 213 rb_swblock_scancmp(struct swblock *swb, void *data) 214 { 215 struct swfreeinfo *info = data; 216 217 if (swb->swb_index < info->basei) 218 return(-1); 219 if (swb->swb_index > info->endi) 220 return(1); 221 return(0); 222 } 223 224 static 225 int 226 rb_swblock_condcmp(struct swblock *swb, void *data) 227 { 228 struct swfreeinfo *info = data; 229 230 if (swb->swb_index < info->basei) 231 return(-1); 232 return(0); 233 } 234 235 /* 236 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 237 * calls hooked from other parts of the VM system and do not appear here. 238 * (see vm/swap_pager.h). 239 */ 240 241 static void swap_pager_dealloc (vm_object_t object); 242 static int swap_pager_getpage (vm_object_t, vm_page_t *, int); 243 static void swap_chain_iodone(struct bio *biox); 244 245 struct pagerops swappagerops = { 246 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 247 swap_pager_getpage, /* pagein */ 248 swap_pager_putpages, /* pageout */ 249 swap_pager_haspage /* get backing store status for page */ 250 }; 251 252 /* 253 * dmmax is in page-sized chunks with the new swap system. It was 254 * dev-bsized chunks in the old. dmmax is always a power of 2. 255 * 256 * swap_*() routines are externally accessible. swp_*() routines are 257 * internal. 258 */ 259 260 int dmmax; 261 static int dmmax_mask; 262 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 263 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 264 265 static __inline void swp_sizecheck (void); 266 static void swp_pager_async_iodone (struct bio *bio); 267 268 /* 269 * Swap bitmap functions 270 */ 271 272 static __inline void swp_pager_freeswapspace(vm_object_t object, 273 swblk_t blk, int npages); 274 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages); 275 276 /* 277 * Metadata functions 278 */ 279 280 static void swp_pager_meta_convert(vm_object_t); 281 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t); 282 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 283 static void swp_pager_meta_free_all(vm_object_t); 284 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 285 286 /* 287 * SWP_SIZECHECK() - update swap_pager_full indication 288 * 289 * update the swap_pager_almost_full indication and warn when we are 290 * about to run out of swap space, using lowat/hiwat hysteresis. 291 * 292 * Clear swap_pager_full ( task killing ) indication when lowat is met. 293 * 294 * No restrictions on call 295 * This routine may not block. 296 * SMP races are ok. 297 */ 298 static __inline void 299 swp_sizecheck(void) 300 { 301 if (vm_swap_size < nswap_lowat) { 302 if (swap_pager_almost_full == 0) { 303 kprintf("swap_pager: out of swap space\n"); 304 swap_pager_almost_full = 1; 305 } 306 } else { 307 swap_pager_full = 0; 308 if (vm_swap_size > nswap_hiwat) 309 swap_pager_almost_full = 0; 310 } 311 } 312 313 /* 314 * SWAP_PAGER_INIT() - initialize the swap pager! 315 * 316 * Expected to be started from system init. NOTE: This code is run 317 * before much else so be careful what you depend on. Most of the VM 318 * system has yet to be initialized at this point. 319 * 320 * Called from the low level boot code only. 321 */ 322 static void 323 swap_pager_init(void *arg __unused) 324 { 325 /* 326 * Device Stripe, in PAGE_SIZE'd blocks 327 */ 328 dmmax = SWB_NPAGES * 2; 329 dmmax_mask = ~(dmmax - 1); 330 } 331 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL) 332 333 /* 334 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 335 * 336 * Expected to be started from pageout process once, prior to entering 337 * its main loop. 338 * 339 * Called from the low level boot code only. 340 */ 341 void 342 swap_pager_swap_init(void) 343 { 344 int n, n2; 345 346 /* 347 * Number of in-transit swap bp operations. Don't 348 * exhaust the pbufs completely. Make sure we 349 * initialize workable values (0 will work for hysteresis 350 * but it isn't very efficient). 351 * 352 * The nsw_cluster_max is constrained by the number of pages an XIO 353 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 354 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 355 * constrained by the swap device interleave stripe size. 356 * 357 * Currently we hardwire nsw_wcount_async to 4. This limit is 358 * designed to prevent other I/O from having high latencies due to 359 * our pageout I/O. The value 4 works well for one or two active swap 360 * devices but is probably a little low if you have more. Even so, 361 * a higher value would probably generate only a limited improvement 362 * with three or four active swap devices since the system does not 363 * typically have to pageout at extreme bandwidths. We will want 364 * at least 2 per swap devices, and 4 is a pretty good value if you 365 * have one NFS swap device due to the command/ack latency over NFS. 366 * So it all works out pretty well. 367 */ 368 369 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 370 371 nsw_rcount = (nswbuf + 1) / 2; 372 nsw_wcount_sync = (nswbuf + 3) / 4; 373 nsw_wcount_async = 4; 374 nsw_wcount_async_max = nsw_wcount_async; 375 376 /* 377 * The zone is dynamically allocated so generally size it to 378 * maxswzone (32MB to 512MB of KVM). Set a minimum size based 379 * on physical memory of around 8x (each swblock can hold 16 pages). 380 * 381 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio 382 * has increased dramatically. 383 */ 384 n = vmstats.v_page_count / 2; 385 if (maxswzone && n < maxswzone / sizeof(struct swblock)) 386 n = maxswzone / sizeof(struct swblock); 387 n2 = n; 388 389 do { 390 swap_zone = zinit( 391 "SWAPMETA", 392 sizeof(struct swblock), 393 n, 394 ZONE_INTERRUPT, 395 1); 396 if (swap_zone != NULL) 397 break; 398 /* 399 * if the allocation failed, try a zone two thirds the 400 * size of the previous attempt. 401 */ 402 n -= ((n + 2) / 3); 403 } while (n > 0); 404 405 if (swap_zone == NULL) 406 panic("swap_pager_swap_init: swap_zone == NULL"); 407 if (n2 != n) 408 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 409 } 410 411 /* 412 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 413 * its metadata structures. 414 * 415 * This routine is called from the mmap and fork code to create a new 416 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 417 * and then converting it with swp_pager_meta_convert(). 418 * 419 * We only support unnamed objects. 420 * 421 * No restrictions. 422 */ 423 vm_object_t 424 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 425 { 426 vm_object_t object; 427 428 KKASSERT(handle == NULL); 429 object = vm_object_allocate_hold(OBJT_DEFAULT, 430 OFF_TO_IDX(offset + PAGE_MASK + size)); 431 swp_pager_meta_convert(object); 432 vm_object_drop(object); 433 434 return (object); 435 } 436 437 /* 438 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 439 * 440 * The swap backing for the object is destroyed. The code is 441 * designed such that we can reinstantiate it later, but this 442 * routine is typically called only when the entire object is 443 * about to be destroyed. 444 * 445 * The object must be locked or unreferenceable. 446 * No other requirements. 447 */ 448 static void 449 swap_pager_dealloc(vm_object_t object) 450 { 451 vm_object_hold(object); 452 vm_object_pip_wait(object, "swpdea"); 453 454 /* 455 * Free all remaining metadata. We only bother to free it from 456 * the swap meta data. We do not attempt to free swapblk's still 457 * associated with vm_page_t's for this object. We do not care 458 * if paging is still in progress on some objects. 459 */ 460 swp_pager_meta_free_all(object); 461 vm_object_drop(object); 462 } 463 464 /************************************************************************ 465 * SWAP PAGER BITMAP ROUTINES * 466 ************************************************************************/ 467 468 /* 469 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 470 * 471 * Allocate swap for the requested number of pages. The starting 472 * swap block number (a page index) is returned or SWAPBLK_NONE 473 * if the allocation failed. 474 * 475 * Also has the side effect of advising that somebody made a mistake 476 * when they configured swap and didn't configure enough. 477 * 478 * The caller must hold the object. 479 * This routine may not block. 480 */ 481 static __inline swblk_t 482 swp_pager_getswapspace(vm_object_t object, int npages) 483 { 484 swblk_t blk; 485 486 lwkt_gettoken(&vm_token); 487 blk = blist_allocat(swapblist, npages, swapiterator); 488 if (blk == SWAPBLK_NONE) 489 blk = blist_allocat(swapblist, npages, 0); 490 if (blk == SWAPBLK_NONE) { 491 if (swap_pager_full != 2) { 492 kprintf("swap_pager_getswapspace: failed alloc=%d\n", 493 npages); 494 swap_pager_full = 2; 495 swap_pager_almost_full = 1; 496 } 497 } else { 498 /* swapiterator = blk; disable for now, doesn't work well */ 499 swapacctspace(blk, -npages); 500 if (object->type == OBJT_SWAP) 501 vm_swap_anon_use += npages; 502 else 503 vm_swap_cache_use += npages; 504 swp_sizecheck(); 505 } 506 lwkt_reltoken(&vm_token); 507 return(blk); 508 } 509 510 /* 511 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 512 * 513 * This routine returns the specified swap blocks back to the bitmap. 514 * 515 * Note: This routine may not block (it could in the old swap code), 516 * and through the use of the new blist routines it does not block. 517 * 518 * We must be called at splvm() to avoid races with bitmap frees from 519 * vm_page_remove() aka swap_pager_page_removed(). 520 * 521 * This routine may not block. 522 */ 523 524 static __inline void 525 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages) 526 { 527 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 528 529 lwkt_gettoken(&vm_token); 530 sp->sw_nused -= npages; 531 if (object->type == OBJT_SWAP) 532 vm_swap_anon_use -= npages; 533 else 534 vm_swap_cache_use -= npages; 535 536 if (sp->sw_flags & SW_CLOSING) { 537 lwkt_reltoken(&vm_token); 538 return; 539 } 540 541 blist_free(swapblist, blk, npages); 542 vm_swap_size += npages; 543 swp_sizecheck(); 544 lwkt_reltoken(&vm_token); 545 } 546 547 /* 548 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 549 * range within an object. 550 * 551 * This is a globally accessible routine. 552 * 553 * This routine removes swapblk assignments from swap metadata. 554 * 555 * The external callers of this routine typically have already destroyed 556 * or renamed vm_page_t's associated with this range in the object so 557 * we should be ok. 558 * 559 * No requirements. 560 */ 561 void 562 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size) 563 { 564 vm_object_hold(object); 565 swp_pager_meta_free(object, start, size); 566 vm_object_drop(object); 567 } 568 569 /* 570 * No requirements. 571 */ 572 void 573 swap_pager_freespace_all(vm_object_t object) 574 { 575 vm_object_hold(object); 576 swp_pager_meta_free_all(object); 577 vm_object_drop(object); 578 } 579 580 /* 581 * This function conditionally frees swap cache swap starting at 582 * (*basei) in the object. (count) swap blocks will be nominally freed. 583 * The actual number of blocks freed can be more or less than the 584 * requested number. 585 * 586 * This function nominally returns the number of blocks freed. However, 587 * the actual number of blocks freed may be less then the returned value. 588 * If the function is unable to exhaust the object or if it is able to 589 * free (approximately) the requested number of blocks it returns 590 * a value n > count. 591 * 592 * If we exhaust the object we will return a value n <= count. 593 * 594 * The caller must hold the object. 595 * 596 * WARNING! If count == 0 then -1 can be returned as a degenerate case, 597 * callers should always pass a count value > 0. 598 */ 599 static int swap_pager_condfree_callback(struct swblock *swap, void *data); 600 601 int 602 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count) 603 { 604 struct swfreeinfo info; 605 int n; 606 int t; 607 608 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 609 610 info.object = object; 611 info.basei = *basei; /* skip up to this page index */ 612 info.begi = count; /* max swap pages to destroy */ 613 info.endi = count * 8; /* max swblocks to scan */ 614 615 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp, 616 swap_pager_condfree_callback, &info); 617 *basei = info.basei; 618 619 /* 620 * Take the higher difference swblocks vs pages 621 */ 622 n = count - (int)info.begi; 623 t = count * 8 - (int)info.endi; 624 if (n < t) 625 n = t; 626 if (n < 1) 627 n = 1; 628 return(n); 629 } 630 631 /* 632 * The idea is to free whole meta-block to avoid fragmenting 633 * the swap space or disk I/O. We only do this if NO VM pages 634 * are present. 635 * 636 * We do not have to deal with clearing PG_SWAPPED in related VM 637 * pages because there are no related VM pages. 638 * 639 * The caller must hold the object. 640 */ 641 static int 642 swap_pager_condfree_callback(struct swblock *swap, void *data) 643 { 644 struct swfreeinfo *info = data; 645 vm_object_t object = info->object; 646 int i; 647 648 for (i = 0; i < SWAP_META_PAGES; ++i) { 649 if (vm_page_lookup(object, swap->swb_index + i)) 650 break; 651 } 652 info->basei = swap->swb_index + SWAP_META_PAGES; 653 if (i == SWAP_META_PAGES) { 654 info->begi -= swap->swb_count; 655 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES); 656 } 657 --info->endi; 658 if ((int)info->begi < 0 || (int)info->endi < 0) 659 return(-1); 660 lwkt_yield(); 661 return(0); 662 } 663 664 /* 665 * Called by vm_page_alloc() when a new VM page is inserted 666 * into a VM object. Checks whether swap has been assigned to 667 * the page and sets PG_SWAPPED as necessary. 668 * 669 * No requirements. 670 */ 671 void 672 swap_pager_page_inserted(vm_page_t m) 673 { 674 if (m->object->swblock_count) { 675 vm_object_hold(m->object); 676 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE) 677 vm_page_flag_set(m, PG_SWAPPED); 678 vm_object_drop(m->object); 679 } 680 } 681 682 /* 683 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 684 * 685 * Assigns swap blocks to the specified range within the object. The 686 * swap blocks are not zerod. Any previous swap assignment is destroyed. 687 * 688 * Returns 0 on success, -1 on failure. 689 * 690 * The caller is responsible for avoiding races in the specified range. 691 * No other requirements. 692 */ 693 int 694 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 695 { 696 int n = 0; 697 swblk_t blk = SWAPBLK_NONE; 698 vm_pindex_t beg = start; /* save start index */ 699 700 vm_object_hold(object); 701 702 while (size) { 703 if (n == 0) { 704 n = BLIST_MAX_ALLOC; 705 while ((blk = swp_pager_getswapspace(object, n)) == 706 SWAPBLK_NONE) 707 { 708 n >>= 1; 709 if (n == 0) { 710 swp_pager_meta_free(object, beg, 711 start - beg); 712 vm_object_drop(object); 713 return(-1); 714 } 715 } 716 } 717 swp_pager_meta_build(object, start, blk); 718 --size; 719 ++start; 720 ++blk; 721 --n; 722 } 723 swp_pager_meta_free(object, start, n); 724 vm_object_drop(object); 725 return(0); 726 } 727 728 /* 729 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 730 * and destroy the source. 731 * 732 * Copy any valid swapblks from the source to the destination. In 733 * cases where both the source and destination have a valid swapblk, 734 * we keep the destination's. 735 * 736 * This routine is allowed to block. It may block allocating metadata 737 * indirectly through swp_pager_meta_build() or if paging is still in 738 * progress on the source. 739 * 740 * XXX vm_page_collapse() kinda expects us not to block because we 741 * supposedly do not need to allocate memory, but for the moment we 742 * *may* have to get a little memory from the zone allocator, but 743 * it is taken from the interrupt memory. We should be ok. 744 * 745 * The source object contains no vm_page_t's (which is just as well) 746 * The source object is of type OBJT_SWAP. 747 * 748 * The source and destination objects must be held by the caller. 749 */ 750 void 751 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 752 vm_pindex_t base_index, int destroysource) 753 { 754 vm_pindex_t i; 755 756 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject)); 757 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject)); 758 759 /* 760 * transfer source to destination. 761 */ 762 for (i = 0; i < dstobject->size; ++i) { 763 swblk_t dstaddr; 764 765 /* 766 * Locate (without changing) the swapblk on the destination, 767 * unless it is invalid in which case free it silently, or 768 * if the destination is a resident page, in which case the 769 * source is thrown away. 770 */ 771 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 772 773 if (dstaddr == SWAPBLK_NONE) { 774 /* 775 * Destination has no swapblk and is not resident, 776 * copy source. 777 */ 778 swblk_t srcaddr; 779 780 srcaddr = swp_pager_meta_ctl(srcobject, 781 base_index + i, SWM_POP); 782 783 if (srcaddr != SWAPBLK_NONE) 784 swp_pager_meta_build(dstobject, i, srcaddr); 785 } else { 786 /* 787 * Destination has valid swapblk or it is represented 788 * by a resident page. We destroy the sourceblock. 789 */ 790 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE); 791 } 792 } 793 794 /* 795 * Free left over swap blocks in source. 796 * 797 * We have to revert the type to OBJT_DEFAULT so we do not accidently 798 * double-remove the object from the swap queues. 799 */ 800 if (destroysource) { 801 /* 802 * Reverting the type is not necessary, the caller is going 803 * to destroy srcobject directly, but I'm doing it here 804 * for consistency since we've removed the object from its 805 * queues. 806 */ 807 swp_pager_meta_free_all(srcobject); 808 if (srcobject->type == OBJT_SWAP) 809 srcobject->type = OBJT_DEFAULT; 810 } 811 } 812 813 /* 814 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 815 * the requested page. 816 * 817 * We determine whether good backing store exists for the requested 818 * page and return TRUE if it does, FALSE if it doesn't. 819 * 820 * If TRUE, we also try to determine how much valid, contiguous backing 821 * store exists before and after the requested page within a reasonable 822 * distance. We do not try to restrict it to the swap device stripe 823 * (that is handled in getpages/putpages). It probably isn't worth 824 * doing here. 825 * 826 * No requirements. 827 */ 828 boolean_t 829 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex) 830 { 831 swblk_t blk0; 832 833 /* 834 * do we have good backing store at the requested index ? 835 */ 836 vm_object_hold(object); 837 blk0 = swp_pager_meta_ctl(object, pindex, 0); 838 839 if (blk0 == SWAPBLK_NONE) { 840 vm_object_drop(object); 841 return (FALSE); 842 } 843 vm_object_drop(object); 844 return (TRUE); 845 } 846 847 /* 848 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 849 * 850 * This removes any associated swap backing store, whether valid or 851 * not, from the page. This operates on any VM object, not just OBJT_SWAP 852 * objects. 853 * 854 * This routine is typically called when a page is made dirty, at 855 * which point any associated swap can be freed. MADV_FREE also 856 * calls us in a special-case situation 857 * 858 * NOTE!!! If the page is clean and the swap was valid, the caller 859 * should make the page dirty before calling this routine. This routine 860 * does NOT change the m->dirty status of the page. Also: MADV_FREE 861 * depends on it. 862 * 863 * The page must be busied or soft-busied. 864 * The caller can hold the object to avoid blocking, else we might block. 865 * No other requirements. 866 */ 867 void 868 swap_pager_unswapped(vm_page_t m) 869 { 870 if (m->flags & PG_SWAPPED) { 871 vm_object_hold(m->object); 872 KKASSERT(m->flags & PG_SWAPPED); 873 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 874 vm_page_flag_clear(m, PG_SWAPPED); 875 vm_object_drop(m->object); 876 } 877 } 878 879 /* 880 * SWAP_PAGER_STRATEGY() - read, write, free blocks 881 * 882 * This implements a VM OBJECT strategy function using swap backing store. 883 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP 884 * types. 885 * 886 * This is intended to be a cacheless interface (i.e. caching occurs at 887 * higher levels), and is also used as a swap-based SSD cache for vnode 888 * and device objects. 889 * 890 * All I/O goes directly to and from the swap device. 891 * 892 * We currently attempt to run I/O synchronously or asynchronously as 893 * the caller requests. This isn't perfect because we loose error 894 * sequencing when we run multiple ops in parallel to satisfy a request. 895 * But this is swap, so we let it all hang out. 896 * 897 * No requirements. 898 */ 899 void 900 swap_pager_strategy(vm_object_t object, struct bio *bio) 901 { 902 struct buf *bp = bio->bio_buf; 903 struct bio *nbio; 904 vm_pindex_t start; 905 vm_pindex_t biox_blkno = 0; 906 int count; 907 char *data; 908 struct bio *biox; 909 struct buf *bufx; 910 #if 0 911 struct bio_track *track; 912 #endif 913 914 #if 0 915 /* 916 * tracking for swapdev vnode I/Os 917 */ 918 if (bp->b_cmd == BUF_CMD_READ) 919 track = &swapdev_vp->v_track_read; 920 else 921 track = &swapdev_vp->v_track_write; 922 #endif 923 924 if (bp->b_bcount & PAGE_MASK) { 925 bp->b_error = EINVAL; 926 bp->b_flags |= B_ERROR | B_INVAL; 927 biodone(bio); 928 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 929 "not page bounded\n", 930 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 931 return; 932 } 933 934 /* 935 * Clear error indication, initialize page index, count, data pointer. 936 */ 937 bp->b_error = 0; 938 bp->b_flags &= ~B_ERROR; 939 bp->b_resid = bp->b_bcount; 940 941 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 942 count = howmany(bp->b_bcount, PAGE_SIZE); 943 data = bp->b_data; 944 945 /* 946 * Deal with BUF_CMD_FREEBLKS 947 */ 948 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 949 /* 950 * FREE PAGE(s) - destroy underlying swap that is no longer 951 * needed. 952 */ 953 vm_object_hold(object); 954 swp_pager_meta_free(object, start, count); 955 vm_object_drop(object); 956 bp->b_resid = 0; 957 biodone(bio); 958 return; 959 } 960 961 /* 962 * We need to be able to create a new cluster of I/O's. We cannot 963 * use the caller fields of the passed bio so push a new one. 964 * 965 * Because nbio is just a placeholder for the cluster links, 966 * we can biodone() the original bio instead of nbio to make 967 * things a bit more efficient. 968 */ 969 nbio = push_bio(bio); 970 nbio->bio_offset = bio->bio_offset; 971 nbio->bio_caller_info1.cluster_head = NULL; 972 nbio->bio_caller_info2.cluster_tail = NULL; 973 974 biox = NULL; 975 bufx = NULL; 976 977 /* 978 * Execute read or write 979 */ 980 vm_object_hold(object); 981 982 while (count > 0) { 983 swblk_t blk; 984 985 /* 986 * Obtain block. If block not found and writing, allocate a 987 * new block and build it into the object. 988 */ 989 blk = swp_pager_meta_ctl(object, start, 0); 990 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) { 991 blk = swp_pager_getswapspace(object, 1); 992 if (blk == SWAPBLK_NONE) { 993 bp->b_error = ENOMEM; 994 bp->b_flags |= B_ERROR; 995 break; 996 } 997 swp_pager_meta_build(object, start, blk); 998 } 999 1000 /* 1001 * Do we have to flush our current collection? Yes if: 1002 * 1003 * - no swap block at this index 1004 * - swap block is not contiguous 1005 * - we cross a physical disk boundry in the 1006 * stripe. 1007 */ 1008 if ( 1009 biox && (biox_blkno + btoc(bufx->b_bcount) != blk || 1010 ((biox_blkno ^ blk) & dmmax_mask) 1011 ) 1012 ) { 1013 if (bp->b_cmd == BUF_CMD_READ) { 1014 ++mycpu->gd_cnt.v_swapin; 1015 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1016 } else { 1017 ++mycpu->gd_cnt.v_swapout; 1018 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1019 bufx->b_dirtyend = bufx->b_bcount; 1020 } 1021 1022 /* 1023 * Finished with this buf. 1024 */ 1025 KKASSERT(bufx->b_bcount != 0); 1026 if (bufx->b_cmd != BUF_CMD_READ) 1027 bufx->b_dirtyend = bufx->b_bcount; 1028 biox = NULL; 1029 bufx = NULL; 1030 } 1031 1032 /* 1033 * Add new swapblk to biox, instantiating biox if necessary. 1034 * Zero-fill reads are able to take a shortcut. 1035 */ 1036 if (blk == SWAPBLK_NONE) { 1037 /* 1038 * We can only get here if we are reading. Since 1039 * we are at splvm() we can safely modify b_resid, 1040 * even if chain ops are in progress. 1041 */ 1042 bzero(data, PAGE_SIZE); 1043 bp->b_resid -= PAGE_SIZE; 1044 } else { 1045 if (biox == NULL) { 1046 /* XXX chain count > 4, wait to <= 4 */ 1047 1048 bufx = getpbuf(NULL); 1049 biox = &bufx->b_bio1; 1050 cluster_append(nbio, bufx); 1051 bufx->b_flags |= (bp->b_flags & B_ORDERED); 1052 bufx->b_cmd = bp->b_cmd; 1053 biox->bio_done = swap_chain_iodone; 1054 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1055 biox->bio_caller_info1.cluster_parent = nbio; 1056 biox_blkno = blk; 1057 bufx->b_bcount = 0; 1058 bufx->b_data = data; 1059 } 1060 bufx->b_bcount += PAGE_SIZE; 1061 } 1062 --count; 1063 ++start; 1064 data += PAGE_SIZE; 1065 } 1066 1067 vm_object_drop(object); 1068 1069 /* 1070 * Flush out last buffer 1071 */ 1072 if (biox) { 1073 if (bufx->b_cmd == BUF_CMD_READ) { 1074 ++mycpu->gd_cnt.v_swapin; 1075 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1076 } else { 1077 ++mycpu->gd_cnt.v_swapout; 1078 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1079 bufx->b_dirtyend = bufx->b_bcount; 1080 } 1081 KKASSERT(bufx->b_bcount); 1082 if (bufx->b_cmd != BUF_CMD_READ) 1083 bufx->b_dirtyend = bufx->b_bcount; 1084 /* biox, bufx = NULL */ 1085 } 1086 1087 /* 1088 * Now initiate all the I/O. Be careful looping on our chain as 1089 * I/O's may complete while we are still initiating them. 1090 * 1091 * If the request is a 100% sparse read no bios will be present 1092 * and we just biodone() the buffer. 1093 */ 1094 nbio->bio_caller_info2.cluster_tail = NULL; 1095 bufx = nbio->bio_caller_info1.cluster_head; 1096 1097 if (bufx) { 1098 while (bufx) { 1099 biox = &bufx->b_bio1; 1100 BUF_KERNPROC(bufx); 1101 bufx = bufx->b_cluster_next; 1102 vn_strategy(swapdev_vp, biox); 1103 } 1104 } else { 1105 biodone(bio); 1106 } 1107 1108 /* 1109 * Completion of the cluster will also call biodone_chain(nbio). 1110 * We never call biodone(nbio) so we don't have to worry about 1111 * setting up a bio_done callback. It's handled in the sub-IO. 1112 */ 1113 /**/ 1114 } 1115 1116 /* 1117 * biodone callback 1118 * 1119 * No requirements. 1120 */ 1121 static void 1122 swap_chain_iodone(struct bio *biox) 1123 { 1124 struct buf **nextp; 1125 struct buf *bufx; /* chained sub-buffer */ 1126 struct bio *nbio; /* parent nbio with chain glue */ 1127 struct buf *bp; /* original bp associated with nbio */ 1128 int chain_empty; 1129 1130 bufx = biox->bio_buf; 1131 nbio = biox->bio_caller_info1.cluster_parent; 1132 bp = nbio->bio_buf; 1133 1134 /* 1135 * Update the original buffer 1136 */ 1137 KKASSERT(bp != NULL); 1138 if (bufx->b_flags & B_ERROR) { 1139 atomic_set_int(&bufx->b_flags, B_ERROR); 1140 bp->b_error = bufx->b_error; /* race ok */ 1141 } else if (bufx->b_resid != 0) { 1142 atomic_set_int(&bufx->b_flags, B_ERROR); 1143 bp->b_error = EINVAL; /* race ok */ 1144 } else { 1145 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1146 } 1147 1148 /* 1149 * Remove us from the chain. 1150 */ 1151 spin_lock(&bp->b_lock.lk_spinlock); 1152 nextp = &nbio->bio_caller_info1.cluster_head; 1153 while (*nextp != bufx) { 1154 KKASSERT(*nextp != NULL); 1155 nextp = &(*nextp)->b_cluster_next; 1156 } 1157 *nextp = bufx->b_cluster_next; 1158 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1159 spin_unlock(&bp->b_lock.lk_spinlock); 1160 1161 /* 1162 * Clean up bufx. If the chain is now empty we finish out 1163 * the parent. Note that we may be racing other completions 1164 * so we must use the chain_empty status from above. 1165 */ 1166 if (chain_empty) { 1167 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1168 atomic_set_int(&bp->b_flags, B_ERROR); 1169 bp->b_error = EINVAL; 1170 } 1171 biodone_chain(nbio); 1172 } 1173 relpbuf(bufx, NULL); 1174 } 1175 1176 /* 1177 * SWAP_PAGER_GETPAGES() - bring page in from swap 1178 * 1179 * The requested page may have to be brought in from swap. Calculate the 1180 * swap block and bring in additional pages if possible. All pages must 1181 * have contiguous swap block assignments and reside in the same object. 1182 * 1183 * The caller has a single vm_object_pip_add() reference prior to 1184 * calling us and we should return with the same. 1185 * 1186 * The caller has BUSY'd the page. We should return with (*mpp) left busy, 1187 * and any additinal pages unbusied. 1188 * 1189 * If the caller encounters a PG_RAM page it will pass it to us even though 1190 * it may be valid and dirty. We cannot overwrite the page in this case! 1191 * The case is used to allow us to issue pure read-aheads. 1192 * 1193 * NOTE! XXX This code does not entirely pipeline yet due to the fact that 1194 * the PG_RAM page is validated at the same time as mreq. What we 1195 * really need to do is issue a separate read-ahead pbuf. 1196 * 1197 * No requirements. 1198 */ 1199 static int 1200 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 1201 { 1202 struct buf *bp; 1203 struct bio *bio; 1204 vm_page_t mreq; 1205 vm_page_t m; 1206 vm_offset_t kva; 1207 swblk_t blk; 1208 int i; 1209 int j; 1210 int raonly; 1211 int error; 1212 u_int32_t flags; 1213 vm_page_t marray[XIO_INTERNAL_PAGES]; 1214 1215 mreq = *mpp; 1216 1217 vm_object_hold(object); 1218 if (mreq->object != object) { 1219 panic("swap_pager_getpages: object mismatch %p/%p", 1220 object, 1221 mreq->object 1222 ); 1223 } 1224 1225 /* 1226 * We don't want to overwrite a fully valid page as it might be 1227 * dirty. This case can occur when e.g. vm_fault hits a perfectly 1228 * valid page with PG_RAM set. 1229 * 1230 * In this case we see if the next page is a suitable page-in 1231 * candidate and if it is we issue read-ahead. PG_RAM will be 1232 * set on the last page of the read-ahead to continue the pipeline. 1233 */ 1234 if (mreq->valid == VM_PAGE_BITS_ALL) { 1235 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) { 1236 vm_object_drop(object); 1237 return(VM_PAGER_OK); 1238 } 1239 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0); 1240 if (blk == SWAPBLK_NONE) { 1241 vm_object_drop(object); 1242 return(VM_PAGER_OK); 1243 } 1244 m = vm_page_lookup_busy_try(object, mreq->pindex + 1, 1245 TRUE, &error); 1246 if (error) { 1247 vm_object_drop(object); 1248 return(VM_PAGER_OK); 1249 } else if (m == NULL) { 1250 /* 1251 * Use VM_ALLOC_QUICK to avoid blocking on cache 1252 * page reuse. 1253 */ 1254 m = vm_page_alloc(object, mreq->pindex + 1, 1255 VM_ALLOC_QUICK); 1256 if (m == NULL) { 1257 vm_object_drop(object); 1258 return(VM_PAGER_OK); 1259 } 1260 } else { 1261 if (m->valid) { 1262 vm_page_wakeup(m); 1263 vm_object_drop(object); 1264 return(VM_PAGER_OK); 1265 } 1266 vm_page_unqueue_nowakeup(m); 1267 } 1268 /* page is busy */ 1269 mreq = m; 1270 raonly = 1; 1271 } else { 1272 raonly = 0; 1273 } 1274 1275 /* 1276 * Try to block-read contiguous pages from swap if sequential, 1277 * otherwise just read one page. Contiguous pages from swap must 1278 * reside within a single device stripe because the I/O cannot be 1279 * broken up across multiple stripes. 1280 * 1281 * Note that blk and iblk can be SWAPBLK_NONE but the loop is 1282 * set up such that the case(s) are handled implicitly. 1283 */ 1284 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1285 marray[0] = mreq; 1286 1287 for (i = 1; swap_burst_read && 1288 i < XIO_INTERNAL_PAGES && 1289 mreq->pindex + i < object->size; ++i) { 1290 swblk_t iblk; 1291 1292 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0); 1293 if (iblk != blk + i) 1294 break; 1295 if ((blk ^ iblk) & dmmax_mask) 1296 break; 1297 m = vm_page_lookup_busy_try(object, mreq->pindex + i, 1298 TRUE, &error); 1299 if (error) { 1300 break; 1301 } else if (m == NULL) { 1302 /* 1303 * Use VM_ALLOC_QUICK to avoid blocking on cache 1304 * page reuse. 1305 */ 1306 m = vm_page_alloc(object, mreq->pindex + i, 1307 VM_ALLOC_QUICK); 1308 if (m == NULL) 1309 break; 1310 } else { 1311 if (m->valid) { 1312 vm_page_wakeup(m); 1313 break; 1314 } 1315 vm_page_unqueue_nowakeup(m); 1316 } 1317 /* page is busy */ 1318 marray[i] = m; 1319 } 1320 if (i > 1) 1321 vm_page_flag_set(marray[i - 1], PG_RAM); 1322 1323 /* 1324 * If mreq is the requested page and we have nothing to do return 1325 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead 1326 * page and must be cleaned up. 1327 */ 1328 if (blk == SWAPBLK_NONE) { 1329 KKASSERT(i == 1); 1330 if (raonly) { 1331 vnode_pager_freepage(mreq); 1332 vm_object_drop(object); 1333 return(VM_PAGER_OK); 1334 } else { 1335 vm_object_drop(object); 1336 return(VM_PAGER_FAIL); 1337 } 1338 } 1339 1340 /* 1341 * map our page(s) into kva for input 1342 */ 1343 bp = getpbuf_kva(&nsw_rcount); 1344 bio = &bp->b_bio1; 1345 kva = (vm_offset_t) bp->b_kvabase; 1346 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t)); 1347 pmap_qenter(kva, bp->b_xio.xio_pages, i); 1348 1349 bp->b_data = (caddr_t)kva; 1350 bp->b_bcount = PAGE_SIZE * i; 1351 bp->b_xio.xio_npages = i; 1352 bio->bio_done = swp_pager_async_iodone; 1353 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1354 bio->bio_caller_info1.index = SWBIO_READ; 1355 1356 /* 1357 * Set index. If raonly set the index beyond the array so all 1358 * the pages are treated the same, otherwise the original mreq is 1359 * at index 0. 1360 */ 1361 if (raonly) 1362 bio->bio_driver_info = (void *)(intptr_t)i; 1363 else 1364 bio->bio_driver_info = (void *)(intptr_t)0; 1365 1366 for (j = 0; j < i; ++j) 1367 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG); 1368 1369 mycpu->gd_cnt.v_swapin++; 1370 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1371 1372 /* 1373 * We still hold the lock on mreq, and our automatic completion routine 1374 * does not remove it. 1375 */ 1376 vm_object_pip_add(object, bp->b_xio.xio_npages); 1377 1378 /* 1379 * perform the I/O. NOTE!!! bp cannot be considered valid after 1380 * this point because we automatically release it on completion. 1381 * Instead, we look at the one page we are interested in which we 1382 * still hold a lock on even through the I/O completion. 1383 * 1384 * The other pages in our m[] array are also released on completion, 1385 * so we cannot assume they are valid anymore either. 1386 */ 1387 bp->b_cmd = BUF_CMD_READ; 1388 BUF_KERNPROC(bp); 1389 vn_strategy(swapdev_vp, bio); 1390 1391 /* 1392 * Wait for the page we want to complete. PG_SWAPINPROG is always 1393 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1394 * is set in the meta-data. 1395 * 1396 * If this is a read-ahead only we return immediately without 1397 * waiting for I/O. 1398 */ 1399 if (raonly) { 1400 vm_object_drop(object); 1401 return(VM_PAGER_OK); 1402 } 1403 1404 /* 1405 * Read-ahead includes originally requested page case. 1406 */ 1407 for (;;) { 1408 flags = mreq->flags; 1409 cpu_ccfence(); 1410 if ((flags & PG_SWAPINPROG) == 0) 1411 break; 1412 tsleep_interlock(mreq, 0); 1413 if (!atomic_cmpset_int(&mreq->flags, flags, 1414 flags | PG_WANTED | PG_REFERENCED)) { 1415 continue; 1416 } 1417 mycpu->gd_cnt.v_intrans++; 1418 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) { 1419 kprintf( 1420 "swap_pager: indefinite wait buffer: " 1421 " offset: %lld, size: %ld\n", 1422 (long long)bio->bio_offset, 1423 (long)bp->b_bcount 1424 ); 1425 } 1426 } 1427 1428 /* 1429 * mreq is left bussied after completion, but all the other pages 1430 * are freed. If we had an unrecoverable read error the page will 1431 * not be valid. 1432 */ 1433 vm_object_drop(object); 1434 if (mreq->valid != VM_PAGE_BITS_ALL) 1435 return(VM_PAGER_ERROR); 1436 else 1437 return(VM_PAGER_OK); 1438 1439 /* 1440 * A final note: in a low swap situation, we cannot deallocate swap 1441 * and mark a page dirty here because the caller is likely to mark 1442 * the page clean when we return, causing the page to possibly revert 1443 * to all-zero's later. 1444 */ 1445 } 1446 1447 /* 1448 * swap_pager_putpages: 1449 * 1450 * Assign swap (if necessary) and initiate I/O on the specified pages. 1451 * 1452 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1453 * are automatically converted to SWAP objects. 1454 * 1455 * In a low memory situation we may block in vn_strategy(), but the new 1456 * vm_page reservation system coupled with properly written VFS devices 1457 * should ensure that no low-memory deadlock occurs. This is an area 1458 * which needs work. 1459 * 1460 * The parent has N vm_object_pip_add() references prior to 1461 * calling us and will remove references for rtvals[] that are 1462 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1463 * completion. 1464 * 1465 * The parent has soft-busy'd the pages it passes us and will unbusy 1466 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1467 * We need to unbusy the rest on I/O completion. 1468 * 1469 * No requirements. 1470 */ 1471 void 1472 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1473 boolean_t sync, int *rtvals) 1474 { 1475 int i; 1476 int n = 0; 1477 1478 vm_object_hold(object); 1479 1480 if (count && m[0]->object != object) { 1481 panic("swap_pager_getpages: object mismatch %p/%p", 1482 object, 1483 m[0]->object 1484 ); 1485 } 1486 1487 /* 1488 * Step 1 1489 * 1490 * Turn object into OBJT_SWAP 1491 * check for bogus sysops 1492 * force sync if not pageout process 1493 */ 1494 if (object->type == OBJT_DEFAULT) { 1495 if (object->type == OBJT_DEFAULT) 1496 swp_pager_meta_convert(object); 1497 } 1498 1499 if (curthread != pagethread) 1500 sync = TRUE; 1501 1502 /* 1503 * Step 2 1504 * 1505 * Update nsw parameters from swap_async_max sysctl values. 1506 * Do not let the sysop crash the machine with bogus numbers. 1507 */ 1508 if (swap_async_max != nsw_wcount_async_max) { 1509 int n; 1510 1511 /* 1512 * limit range 1513 */ 1514 if ((n = swap_async_max) > nswbuf / 2) 1515 n = nswbuf / 2; 1516 if (n < 1) 1517 n = 1; 1518 swap_async_max = n; 1519 1520 /* 1521 * Adjust difference ( if possible ). If the current async 1522 * count is too low, we may not be able to make the adjustment 1523 * at this time. 1524 * 1525 * vm_token needed for nsw_wcount sleep interlock 1526 */ 1527 lwkt_gettoken(&vm_token); 1528 n -= nsw_wcount_async_max; 1529 if (nsw_wcount_async + n >= 0) { 1530 nsw_wcount_async_max += n; 1531 pbuf_adjcount(&nsw_wcount_async, n); 1532 } 1533 lwkt_reltoken(&vm_token); 1534 } 1535 1536 /* 1537 * Step 3 1538 * 1539 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1540 * The page is left dirty until the pageout operation completes 1541 * successfully. 1542 */ 1543 1544 for (i = 0; i < count; i += n) { 1545 struct buf *bp; 1546 struct bio *bio; 1547 swblk_t blk; 1548 int j; 1549 1550 /* 1551 * Maximum I/O size is limited by a number of factors. 1552 */ 1553 1554 n = min(BLIST_MAX_ALLOC, count - i); 1555 n = min(n, nsw_cluster_max); 1556 1557 lwkt_gettoken(&vm_token); 1558 1559 /* 1560 * Get biggest block of swap we can. If we fail, fall 1561 * back and try to allocate a smaller block. Don't go 1562 * overboard trying to allocate space if it would overly 1563 * fragment swap. 1564 */ 1565 while ( 1566 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE && 1567 n > 4 1568 ) { 1569 n >>= 1; 1570 } 1571 if (blk == SWAPBLK_NONE) { 1572 for (j = 0; j < n; ++j) 1573 rtvals[i+j] = VM_PAGER_FAIL; 1574 lwkt_reltoken(&vm_token); 1575 continue; 1576 } 1577 if (vm_report_swap_allocs > 0) { 1578 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n); 1579 --vm_report_swap_allocs; 1580 } 1581 1582 /* 1583 * The I/O we are constructing cannot cross a physical 1584 * disk boundry in the swap stripe. Note: we are still 1585 * at splvm(). 1586 */ 1587 if ((blk ^ (blk + n)) & dmmax_mask) { 1588 j = ((blk + dmmax) & dmmax_mask) - blk; 1589 swp_pager_freeswapspace(object, blk + j, n - j); 1590 n = j; 1591 } 1592 1593 /* 1594 * All I/O parameters have been satisfied, build the I/O 1595 * request and assign the swap space. 1596 */ 1597 if (sync == TRUE) 1598 bp = getpbuf_kva(&nsw_wcount_sync); 1599 else 1600 bp = getpbuf_kva(&nsw_wcount_async); 1601 bio = &bp->b_bio1; 1602 1603 lwkt_reltoken(&vm_token); 1604 1605 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1606 1607 bp->b_bcount = PAGE_SIZE * n; 1608 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1609 1610 for (j = 0; j < n; ++j) { 1611 vm_page_t mreq = m[i+j]; 1612 1613 swp_pager_meta_build(mreq->object, mreq->pindex, 1614 blk + j); 1615 if (object->type == OBJT_SWAP) 1616 vm_page_dirty(mreq); 1617 rtvals[i+j] = VM_PAGER_OK; 1618 1619 vm_page_flag_set(mreq, PG_SWAPINPROG); 1620 bp->b_xio.xio_pages[j] = mreq; 1621 } 1622 bp->b_xio.xio_npages = n; 1623 1624 mycpu->gd_cnt.v_swapout++; 1625 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1626 1627 bp->b_dirtyoff = 0; /* req'd for NFS */ 1628 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1629 bp->b_cmd = BUF_CMD_WRITE; 1630 bio->bio_caller_info1.index = SWBIO_WRITE; 1631 1632 /* 1633 * asynchronous 1634 */ 1635 if (sync == FALSE) { 1636 bio->bio_done = swp_pager_async_iodone; 1637 BUF_KERNPROC(bp); 1638 vn_strategy(swapdev_vp, bio); 1639 1640 for (j = 0; j < n; ++j) 1641 rtvals[i+j] = VM_PAGER_PEND; 1642 continue; 1643 } 1644 1645 /* 1646 * Issue synchrnously. 1647 * 1648 * Wait for the sync I/O to complete, then update rtvals. 1649 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1650 * our async completion routine at the end, thus avoiding a 1651 * double-free. 1652 */ 1653 bio->bio_caller_info1.index |= SWBIO_SYNC; 1654 bio->bio_done = biodone_sync; 1655 bio->bio_flags |= BIO_SYNC; 1656 vn_strategy(swapdev_vp, bio); 1657 biowait(bio, "swwrt"); 1658 1659 for (j = 0; j < n; ++j) 1660 rtvals[i+j] = VM_PAGER_PEND; 1661 1662 /* 1663 * Now that we are through with the bp, we can call the 1664 * normal async completion, which frees everything up. 1665 */ 1666 swp_pager_async_iodone(bio); 1667 } 1668 vm_object_drop(object); 1669 } 1670 1671 /* 1672 * No requirements. 1673 */ 1674 void 1675 swap_pager_newswap(void) 1676 { 1677 swp_sizecheck(); 1678 } 1679 1680 /* 1681 * swp_pager_async_iodone: 1682 * 1683 * Completion routine for asynchronous reads and writes from/to swap. 1684 * Also called manually by synchronous code to finish up a bp. 1685 * 1686 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1687 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1688 * unbusy all pages except the 'main' request page. For WRITE 1689 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1690 * because we marked them all VM_PAGER_PEND on return from putpages ). 1691 * 1692 * This routine may not block. 1693 * 1694 * No requirements. 1695 */ 1696 static void 1697 swp_pager_async_iodone(struct bio *bio) 1698 { 1699 struct buf *bp = bio->bio_buf; 1700 vm_object_t object = NULL; 1701 int i; 1702 int *nswptr; 1703 1704 /* 1705 * report error 1706 */ 1707 if (bp->b_flags & B_ERROR) { 1708 kprintf( 1709 "swap_pager: I/O error - %s failed; offset %lld," 1710 "size %ld, error %d\n", 1711 ((bio->bio_caller_info1.index & SWBIO_READ) ? 1712 "pagein" : "pageout"), 1713 (long long)bio->bio_offset, 1714 (long)bp->b_bcount, 1715 bp->b_error 1716 ); 1717 } 1718 1719 /* 1720 * set object, raise to splvm(). 1721 */ 1722 if (bp->b_xio.xio_npages) 1723 object = bp->b_xio.xio_pages[0]->object; 1724 1725 /* 1726 * remove the mapping for kernel virtual 1727 */ 1728 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1729 1730 /* 1731 * cleanup pages. If an error occurs writing to swap, we are in 1732 * very serious trouble. If it happens to be a disk error, though, 1733 * we may be able to recover by reassigning the swap later on. So 1734 * in this case we remove the m->swapblk assignment for the page 1735 * but do not free it in the rlist. The errornous block(s) are thus 1736 * never reallocated as swap. Redirty the page and continue. 1737 */ 1738 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1739 vm_page_t m = bp->b_xio.xio_pages[i]; 1740 1741 if (bp->b_flags & B_ERROR) { 1742 /* 1743 * If an error occurs I'd love to throw the swapblk 1744 * away without freeing it back to swapspace, so it 1745 * can never be used again. But I can't from an 1746 * interrupt. 1747 */ 1748 1749 if (bio->bio_caller_info1.index & SWBIO_READ) { 1750 /* 1751 * When reading, reqpage needs to stay 1752 * locked for the parent, but all other 1753 * pages can be freed. We still want to 1754 * wakeup the parent waiting on the page, 1755 * though. ( also: pg_reqpage can be -1 and 1756 * not match anything ). 1757 * 1758 * We have to wake specifically requested pages 1759 * up too because we cleared PG_SWAPINPROG and 1760 * someone may be waiting for that. 1761 * 1762 * NOTE: for reads, m->dirty will probably 1763 * be overridden by the original caller of 1764 * getpages so don't play cute tricks here. 1765 * 1766 * NOTE: We can't actually free the page from 1767 * here, because this is an interrupt. It 1768 * is not legal to mess with object->memq 1769 * from an interrupt. Deactivate the page 1770 * instead. 1771 */ 1772 1773 m->valid = 0; 1774 vm_page_flag_clear(m, PG_ZERO); 1775 vm_page_flag_clear(m, PG_SWAPINPROG); 1776 1777 /* 1778 * bio_driver_info holds the requested page 1779 * index. 1780 */ 1781 if (i != (int)(intptr_t)bio->bio_driver_info) { 1782 vm_page_deactivate(m); 1783 vm_page_wakeup(m); 1784 } else { 1785 vm_page_flash(m); 1786 } 1787 /* 1788 * If i == bp->b_pager.pg_reqpage, do not wake 1789 * the page up. The caller needs to. 1790 */ 1791 } else { 1792 /* 1793 * If a write error occurs remove the swap 1794 * assignment (note that PG_SWAPPED may or 1795 * may not be set depending on prior activity). 1796 * 1797 * Re-dirty OBJT_SWAP pages as there is no 1798 * other backing store, we can't throw the 1799 * page away. 1800 * 1801 * Non-OBJT_SWAP pages (aka swapcache) must 1802 * not be dirtied since they may not have 1803 * been dirty in the first place, and they 1804 * do have backing store (the vnode). 1805 */ 1806 vm_page_busy_wait(m, FALSE, "swadpg"); 1807 swp_pager_meta_ctl(m->object, m->pindex, 1808 SWM_FREE); 1809 vm_page_flag_clear(m, PG_SWAPPED); 1810 if (m->object->type == OBJT_SWAP) { 1811 vm_page_dirty(m); 1812 vm_page_activate(m); 1813 } 1814 vm_page_flag_clear(m, PG_SWAPINPROG); 1815 vm_page_io_finish(m); 1816 vm_page_wakeup(m); 1817 } 1818 } else if (bio->bio_caller_info1.index & SWBIO_READ) { 1819 /* 1820 * NOTE: for reads, m->dirty will probably be 1821 * overridden by the original caller of getpages so 1822 * we cannot set them in order to free the underlying 1823 * swap in a low-swap situation. I don't think we'd 1824 * want to do that anyway, but it was an optimization 1825 * that existed in the old swapper for a time before 1826 * it got ripped out due to precisely this problem. 1827 * 1828 * clear PG_ZERO in page. 1829 * 1830 * If not the requested page then deactivate it. 1831 * 1832 * Note that the requested page, reqpage, is left 1833 * busied, but we still have to wake it up. The 1834 * other pages are released (unbusied) by 1835 * vm_page_wakeup(). We do not set reqpage's 1836 * valid bits here, it is up to the caller. 1837 */ 1838 1839 /* 1840 * NOTE: can't call pmap_clear_modify(m) from an 1841 * interrupt thread, the pmap code may have to map 1842 * non-kernel pmaps and currently asserts the case. 1843 */ 1844 /*pmap_clear_modify(m);*/ 1845 m->valid = VM_PAGE_BITS_ALL; 1846 vm_page_undirty(m); 1847 vm_page_flag_clear(m, PG_ZERO | PG_SWAPINPROG); 1848 vm_page_flag_set(m, PG_SWAPPED); 1849 1850 /* 1851 * We have to wake specifically requested pages 1852 * up too because we cleared PG_SWAPINPROG and 1853 * could be waiting for it in getpages. However, 1854 * be sure to not unbusy getpages specifically 1855 * requested page - getpages expects it to be 1856 * left busy. 1857 * 1858 * bio_driver_info holds the requested page 1859 */ 1860 if (i != (int)(intptr_t)bio->bio_driver_info) { 1861 vm_page_deactivate(m); 1862 vm_page_wakeup(m); 1863 } else { 1864 vm_page_flash(m); 1865 } 1866 } else { 1867 /* 1868 * Mark the page clean but do not mess with the 1869 * pmap-layer's modified state. That state should 1870 * also be clear since the caller protected the 1871 * page VM_PROT_READ, but allow the case. 1872 * 1873 * We are in an interrupt, avoid pmap operations. 1874 * 1875 * If we have a severe page deficit, deactivate the 1876 * page. Do not try to cache it (which would also 1877 * involve a pmap op), because the page might still 1878 * be read-heavy. 1879 * 1880 * When using the swap to cache clean vnode pages 1881 * we do not mess with the page dirty bits. 1882 */ 1883 vm_page_busy_wait(m, FALSE, "swadpg"); 1884 if (m->object->type == OBJT_SWAP) 1885 vm_page_undirty(m); 1886 vm_page_flag_clear(m, PG_SWAPINPROG); 1887 vm_page_flag_set(m, PG_SWAPPED); 1888 if (vm_page_count_severe()) 1889 vm_page_deactivate(m); 1890 #if 0 1891 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1892 vm_page_protect(m, VM_PROT_READ); 1893 #endif 1894 vm_page_io_finish(m); 1895 vm_page_wakeup(m); 1896 } 1897 } 1898 1899 /* 1900 * adjust pip. NOTE: the original parent may still have its own 1901 * pip refs on the object. 1902 */ 1903 1904 if (object) 1905 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages); 1906 1907 /* 1908 * Release the physical I/O buffer. 1909 * 1910 * NOTE: Due to synchronous operations in the write case b_cmd may 1911 * already be set to BUF_CMD_DONE and BIO_SYNC may have already 1912 * been cleared. 1913 * 1914 * Use vm_token to interlock nsw_rcount/wcount wakeup? 1915 */ 1916 lwkt_gettoken(&vm_token); 1917 if (bio->bio_caller_info1.index & SWBIO_READ) 1918 nswptr = &nsw_rcount; 1919 else if (bio->bio_caller_info1.index & SWBIO_SYNC) 1920 nswptr = &nsw_wcount_sync; 1921 else 1922 nswptr = &nsw_wcount_async; 1923 bp->b_cmd = BUF_CMD_DONE; 1924 relpbuf(bp, nswptr); 1925 lwkt_reltoken(&vm_token); 1926 } 1927 1928 /* 1929 * Fault-in a potentially swapped page and remove the swap reference. 1930 * (used by swapoff code) 1931 * 1932 * object must be held. 1933 */ 1934 static __inline void 1935 swp_pager_fault_page(vm_object_t object, vm_pindex_t pindex) 1936 { 1937 struct vnode *vp; 1938 vm_page_t m; 1939 int error; 1940 1941 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1942 1943 if (object->type == OBJT_VNODE) { 1944 /* 1945 * Any swap related to a vnode is due to swapcache. We must 1946 * vget() the vnode in case it is not active (otherwise 1947 * vref() will panic). Calling vm_object_page_remove() will 1948 * ensure that any swap ref is removed interlocked with the 1949 * page. clean_only is set to TRUE so we don't throw away 1950 * dirty pages. 1951 */ 1952 vp = object->handle; 1953 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE); 1954 if (error == 0) { 1955 vm_object_page_remove(object, pindex, pindex + 1, TRUE); 1956 vput(vp); 1957 } 1958 } else { 1959 /* 1960 * Otherwise it is a normal OBJT_SWAP object and we can 1961 * fault the page in and remove the swap. 1962 */ 1963 m = vm_fault_object_page(object, IDX_TO_OFF(pindex), 1964 VM_PROT_NONE, 1965 VM_FAULT_DIRTY | VM_FAULT_UNSWAP, 1966 0, &error); 1967 if (m) 1968 vm_page_unhold(m); 1969 } 1970 } 1971 1972 /* 1973 * This removes all swap blocks related to a particular device. We have 1974 * to be careful of ripups during the scan. 1975 */ 1976 static int swp_pager_swapoff_callback(struct swblock *swap, void *data); 1977 1978 int 1979 swap_pager_swapoff(int devidx) 1980 { 1981 struct vm_object marker; 1982 vm_object_t object; 1983 struct swswapoffinfo info; 1984 1985 bzero(&marker, sizeof(marker)); 1986 marker.type = OBJT_MARKER; 1987 1988 lwkt_gettoken(&vmobj_token); 1989 TAILQ_INSERT_HEAD(&vm_object_list, &marker, object_list); 1990 1991 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) { 1992 if (object->type == OBJT_MARKER) 1993 goto skip; 1994 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE) 1995 goto skip; 1996 vm_object_hold(object); 1997 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE) { 1998 vm_object_drop(object); 1999 goto skip; 2000 } 2001 info.object = object; 2002 info.devidx = devidx; 2003 swblock_rb_tree_RB_SCAN(&object->swblock_root, 2004 NULL, 2005 swp_pager_swapoff_callback, 2006 &info); 2007 vm_object_drop(object); 2008 skip: 2009 if (object == TAILQ_NEXT(&marker, object_list)) { 2010 TAILQ_REMOVE(&vm_object_list, &marker, object_list); 2011 TAILQ_INSERT_AFTER(&vm_object_list, object, 2012 &marker, object_list); 2013 } 2014 } 2015 TAILQ_REMOVE(&vm_object_list, &marker, object_list); 2016 lwkt_reltoken(&vmobj_token); 2017 2018 /* 2019 * If we fail to locate all swblocks we just fail gracefully and 2020 * do not bother to restore paging on the swap device. If the 2021 * user wants to retry the user can retry. 2022 */ 2023 if (swdevt[devidx].sw_nused) 2024 return (1); 2025 else 2026 return (0); 2027 } 2028 2029 static 2030 int 2031 swp_pager_swapoff_callback(struct swblock *swap, void *data) 2032 { 2033 struct swswapoffinfo *info = data; 2034 vm_object_t object = info->object; 2035 vm_pindex_t index; 2036 swblk_t v; 2037 int i; 2038 2039 index = swap->swb_index; 2040 for (i = 0; i < SWAP_META_PAGES; ++i) { 2041 /* 2042 * Make sure we don't race a dying object. This will 2043 * kill the scan of the object's swap blocks entirely. 2044 */ 2045 if (object->flags & OBJ_DEAD) 2046 return(-1); 2047 2048 /* 2049 * Fault the page, which can obviously block. If the swap 2050 * structure disappears break out. 2051 */ 2052 v = swap->swb_pages[i]; 2053 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) { 2054 swp_pager_fault_page(object, swap->swb_index + i); 2055 /* swap ptr might go away */ 2056 if (RB_LOOKUP(swblock_rb_tree, 2057 &object->swblock_root, index) != swap) { 2058 break; 2059 } 2060 } 2061 } 2062 return(0); 2063 } 2064 2065 /************************************************************************ 2066 * SWAP META DATA * 2067 ************************************************************************ 2068 * 2069 * These routines manipulate the swap metadata stored in the 2070 * OBJT_SWAP object. All swp_*() routines must be called at 2071 * splvm() because swap can be freed up by the low level vm_page 2072 * code which might be called from interrupts beyond what splbio() covers. 2073 * 2074 * Swap metadata is implemented with a global hash and not directly 2075 * linked into the object. Instead the object simply contains 2076 * appropriate tracking counters. 2077 */ 2078 2079 /* 2080 * Lookup the swblock containing the specified swap block index. 2081 * 2082 * The caller must hold the object. 2083 */ 2084 static __inline 2085 struct swblock * 2086 swp_pager_lookup(vm_object_t object, vm_pindex_t index) 2087 { 2088 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2089 index &= ~(vm_pindex_t)SWAP_META_MASK; 2090 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index)); 2091 } 2092 2093 /* 2094 * Remove a swblock from the RB tree. 2095 * 2096 * The caller must hold the object. 2097 */ 2098 static __inline 2099 void 2100 swp_pager_remove(vm_object_t object, struct swblock *swap) 2101 { 2102 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2103 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap); 2104 } 2105 2106 /* 2107 * Convert default object to swap object if necessary 2108 * 2109 * The caller must hold the object. 2110 */ 2111 static void 2112 swp_pager_meta_convert(vm_object_t object) 2113 { 2114 if (object->type == OBJT_DEFAULT) { 2115 object->type = OBJT_SWAP; 2116 KKASSERT(object->swblock_count == 0); 2117 } 2118 } 2119 2120 /* 2121 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 2122 * 2123 * We first convert the object to a swap object if it is a default 2124 * object. Vnode objects do not need to be converted. 2125 * 2126 * The specified swapblk is added to the object's swap metadata. If 2127 * the swapblk is not valid, it is freed instead. Any previously 2128 * assigned swapblk is freed. 2129 * 2130 * The caller must hold the object. 2131 */ 2132 static void 2133 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk) 2134 { 2135 struct swblock *swap; 2136 struct swblock *oswap; 2137 vm_pindex_t v; 2138 2139 KKASSERT(swapblk != SWAPBLK_NONE); 2140 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2141 2142 /* 2143 * Convert object if necessary 2144 */ 2145 if (object->type == OBJT_DEFAULT) 2146 swp_pager_meta_convert(object); 2147 2148 /* 2149 * Locate swblock. If not found create, but if we aren't adding 2150 * anything just return. If we run out of space in the map we wait 2151 * and, since the hash table may have changed, retry. 2152 */ 2153 retry: 2154 swap = swp_pager_lookup(object, index); 2155 2156 if (swap == NULL) { 2157 int i; 2158 2159 swap = zalloc(swap_zone); 2160 if (swap == NULL) { 2161 vm_wait(0); 2162 goto retry; 2163 } 2164 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK; 2165 swap->swb_count = 0; 2166 2167 ++object->swblock_count; 2168 2169 for (i = 0; i < SWAP_META_PAGES; ++i) 2170 swap->swb_pages[i] = SWAPBLK_NONE; 2171 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap); 2172 KKASSERT(oswap == NULL); 2173 } 2174 2175 /* 2176 * Delete prior contents of metadata. 2177 * 2178 * NOTE: Decrement swb_count after the freeing operation (which 2179 * might block) to prevent racing destruction of the swblock. 2180 */ 2181 index &= SWAP_META_MASK; 2182 2183 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) { 2184 swap->swb_pages[index] = SWAPBLK_NONE; 2185 /* can block */ 2186 swp_pager_freeswapspace(object, v, 1); 2187 --swap->swb_count; 2188 --mycpu->gd_vmtotal.t_vm; 2189 } 2190 2191 /* 2192 * Enter block into metadata 2193 */ 2194 swap->swb_pages[index] = swapblk; 2195 if (swapblk != SWAPBLK_NONE) { 2196 ++swap->swb_count; 2197 ++mycpu->gd_vmtotal.t_vm; 2198 } 2199 } 2200 2201 /* 2202 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 2203 * 2204 * The requested range of blocks is freed, with any associated swap 2205 * returned to the swap bitmap. 2206 * 2207 * This routine will free swap metadata structures as they are cleaned 2208 * out. This routine does *NOT* operate on swap metadata associated 2209 * with resident pages. 2210 * 2211 * The caller must hold the object. 2212 */ 2213 static int swp_pager_meta_free_callback(struct swblock *swb, void *data); 2214 2215 static void 2216 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) 2217 { 2218 struct swfreeinfo info; 2219 2220 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2221 2222 /* 2223 * Nothing to do 2224 */ 2225 if (object->swblock_count == 0) { 2226 KKASSERT(RB_EMPTY(&object->swblock_root)); 2227 return; 2228 } 2229 if (count == 0) 2230 return; 2231 2232 /* 2233 * Setup for RB tree scan. Note that the pindex range can be huge 2234 * due to the 64 bit page index space so we cannot safely iterate. 2235 */ 2236 info.object = object; 2237 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK; 2238 info.begi = index; 2239 info.endi = index + count - 1; 2240 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp, 2241 swp_pager_meta_free_callback, &info); 2242 } 2243 2244 /* 2245 * The caller must hold the object. 2246 */ 2247 static 2248 int 2249 swp_pager_meta_free_callback(struct swblock *swap, void *data) 2250 { 2251 struct swfreeinfo *info = data; 2252 vm_object_t object = info->object; 2253 int index; 2254 int eindex; 2255 2256 /* 2257 * Figure out the range within the swblock. The wider scan may 2258 * return edge-case swap blocks when the start and/or end points 2259 * are in the middle of a block. 2260 */ 2261 if (swap->swb_index < info->begi) 2262 index = (int)info->begi & SWAP_META_MASK; 2263 else 2264 index = 0; 2265 2266 if (swap->swb_index + SWAP_META_PAGES > info->endi) 2267 eindex = (int)info->endi & SWAP_META_MASK; 2268 else 2269 eindex = SWAP_META_MASK; 2270 2271 /* 2272 * Scan and free the blocks. The loop terminates early 2273 * if (swap) runs out of blocks and could be freed. 2274 * 2275 * NOTE: Decrement swb_count after swp_pager_freeswapspace() 2276 * to deal with a zfree race. 2277 */ 2278 while (index <= eindex) { 2279 swblk_t v = swap->swb_pages[index]; 2280 2281 if (v != SWAPBLK_NONE) { 2282 swap->swb_pages[index] = SWAPBLK_NONE; 2283 /* can block */ 2284 swp_pager_freeswapspace(object, v, 1); 2285 --mycpu->gd_vmtotal.t_vm; 2286 if (--swap->swb_count == 0) { 2287 swp_pager_remove(object, swap); 2288 zfree(swap_zone, swap); 2289 --object->swblock_count; 2290 break; 2291 } 2292 } 2293 ++index; 2294 } 2295 2296 /* swap may be invalid here due to zfree above */ 2297 lwkt_yield(); 2298 2299 return(0); 2300 } 2301 2302 /* 2303 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2304 * 2305 * This routine locates and destroys all swap metadata associated with 2306 * an object. 2307 * 2308 * NOTE: Decrement swb_count after the freeing operation (which 2309 * might block) to prevent racing destruction of the swblock. 2310 * 2311 * The caller must hold the object. 2312 */ 2313 static void 2314 swp_pager_meta_free_all(vm_object_t object) 2315 { 2316 struct swblock *swap; 2317 int i; 2318 2319 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2320 2321 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) { 2322 swp_pager_remove(object, swap); 2323 for (i = 0; i < SWAP_META_PAGES; ++i) { 2324 swblk_t v = swap->swb_pages[i]; 2325 if (v != SWAPBLK_NONE) { 2326 /* can block */ 2327 swp_pager_freeswapspace(object, v, 1); 2328 --swap->swb_count; 2329 --mycpu->gd_vmtotal.t_vm; 2330 } 2331 } 2332 if (swap->swb_count != 0) 2333 panic("swap_pager_meta_free_all: swb_count != 0"); 2334 zfree(swap_zone, swap); 2335 --object->swblock_count; 2336 lwkt_yield(); 2337 } 2338 KKASSERT(object->swblock_count == 0); 2339 } 2340 2341 /* 2342 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2343 * 2344 * This routine is capable of looking up, popping, or freeing 2345 * swapblk assignments in the swap meta data or in the vm_page_t. 2346 * The routine typically returns the swapblk being looked-up, or popped, 2347 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2348 * was invalid. This routine will automatically free any invalid 2349 * meta-data swapblks. 2350 * 2351 * It is not possible to store invalid swapblks in the swap meta data 2352 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2353 * 2354 * When acting on a busy resident page and paging is in progress, we 2355 * have to wait until paging is complete but otherwise can act on the 2356 * busy page. 2357 * 2358 * SWM_FREE remove and free swap block from metadata 2359 * SWM_POP remove from meta data but do not free.. pop it out 2360 * 2361 * The caller must hold the object. 2362 */ 2363 static swblk_t 2364 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags) 2365 { 2366 struct swblock *swap; 2367 swblk_t r1; 2368 2369 if (object->swblock_count == 0) 2370 return(SWAPBLK_NONE); 2371 2372 r1 = SWAPBLK_NONE; 2373 swap = swp_pager_lookup(object, index); 2374 2375 if (swap != NULL) { 2376 index &= SWAP_META_MASK; 2377 r1 = swap->swb_pages[index]; 2378 2379 if (r1 != SWAPBLK_NONE) { 2380 if (flags & (SWM_FREE|SWM_POP)) { 2381 swap->swb_pages[index] = SWAPBLK_NONE; 2382 --mycpu->gd_vmtotal.t_vm; 2383 if (--swap->swb_count == 0) { 2384 swp_pager_remove(object, swap); 2385 zfree(swap_zone, swap); 2386 --object->swblock_count; 2387 } 2388 } 2389 /* swap ptr may be invalid */ 2390 if (flags & SWM_FREE) { 2391 swp_pager_freeswapspace(object, r1, 1); 2392 r1 = SWAPBLK_NONE; 2393 } 2394 } 2395 /* swap ptr may be invalid */ 2396 } 2397 return(r1); 2398 } 2399