1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Copyright (c) 1994 John S. Dyson 37 * Copyright (c) 1990 University of Utah. 38 * Copyright (c) 1991, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * New Swap System 70 * Matthew Dillon 71 * 72 * Radix Bitmap 'blists'. 73 * 74 * - The new swapper uses the new radix bitmap code. This should scale 75 * to arbitrarily small or arbitrarily large swap spaces and an almost 76 * arbitrary degree of fragmentation. 77 * 78 * Features: 79 * 80 * - on the fly reallocation of swap during putpages. The new system 81 * does not try to keep previously allocated swap blocks for dirty 82 * pages. 83 * 84 * - on the fly deallocation of swap 85 * 86 * - No more garbage collection required. Unnecessarily allocated swap 87 * blocks only exist for dirty vm_page_t's now and these are already 88 * cycled (in a high-load system) by the pager. We also do on-the-fly 89 * removal of invalidated swap blocks when a page is destroyed 90 * or renamed. 91 * 92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 95 */ 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/conf.h> 100 #include <sys/kernel.h> 101 #include <sys/proc.h> 102 #include <sys/buf.h> 103 #include <sys/vnode.h> 104 #include <sys/malloc.h> 105 #include <sys/vmmeter.h> 106 #include <sys/sysctl.h> 107 #include <sys/blist.h> 108 #include <sys/lock.h> 109 #include <sys/thread2.h> 110 111 #include <unistd.h> 112 #include "opt_swap.h" 113 #include <vm/vm.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_page.h> 116 #include <vm/vm_pager.h> 117 #include <vm/vm_pageout.h> 118 #include <vm/swap_pager.h> 119 #include <vm/vm_extern.h> 120 #include <vm/vm_zone.h> 121 #include <vm/vnode_pager.h> 122 123 #include <sys/buf2.h> 124 #include <vm/vm_page2.h> 125 126 #ifndef MAX_PAGEOUT_CLUSTER 127 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES 128 #endif 129 130 #define SWM_FREE 0x02 /* free, period */ 131 #define SWM_POP 0x04 /* pop out */ 132 133 #define SWBIO_READ 0x01 134 #define SWBIO_WRITE 0x02 135 #define SWBIO_SYNC 0x04 136 #define SWBIO_TTC 0x08 /* for VM_PAGER_TRY_TO_CACHE */ 137 138 struct swfreeinfo { 139 vm_object_t object; 140 vm_pindex_t basei; 141 vm_pindex_t begi; 142 vm_pindex_t endi; /* inclusive */ 143 }; 144 145 struct swswapoffinfo { 146 vm_object_t object; 147 int devidx; 148 int shared; 149 }; 150 151 /* 152 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 153 * in the old system. 154 */ 155 156 int swap_pager_full; /* swap space exhaustion (task killing) */ 157 int swap_fail_ticks; /* when we became exhausted */ 158 int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 159 swblk_t vm_swap_cache_use; 160 swblk_t vm_swap_anon_use; 161 static int vm_report_swap_allocs; 162 163 static int nsw_rcount; /* free read buffers */ 164 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 165 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 166 static int nsw_wcount_async_max;/* assigned maximum */ 167 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 168 169 struct blist *swapblist; 170 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 171 static int swap_burst_read = 0; /* allow burst reading */ 172 static swblk_t swapiterator; /* linearize allocations */ 173 int swap_user_async = 0; /* user swap pager operation can be async */ 174 175 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin"); 176 177 /* from vm_swap.c */ 178 extern struct vnode *swapdev_vp; 179 extern struct swdevt *swdevt; 180 extern int nswdev; 181 182 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / SWB_DMMAX % nswdev : 0) 183 184 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 185 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 186 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read, 187 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins"); 188 SYSCTL_INT(_vm, OID_AUTO, swap_user_async, 189 CTLFLAG_RW, &swap_user_async, 0, "Allow async uuser swap write I/O"); 190 191 #if SWBLK_BITS == 64 192 SYSCTL_LONG(_vm, OID_AUTO, swap_cache_use, 193 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 194 SYSCTL_LONG(_vm, OID_AUTO, swap_anon_use, 195 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 196 SYSCTL_LONG(_vm, OID_AUTO, swap_size, 197 CTLFLAG_RD, &vm_swap_size, 0, ""); 198 #else 199 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use, 200 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 201 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use, 202 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 203 SYSCTL_INT(_vm, OID_AUTO, swap_size, 204 CTLFLAG_RD, &vm_swap_size, 0, ""); 205 #endif 206 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs, 207 CTLFLAG_RW, &vm_report_swap_allocs, 0, ""); 208 209 vm_zone_t swap_zone; 210 211 /* 212 * Red-Black tree for swblock entries 213 * 214 * The caller must hold vm_token 215 */ 216 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare, 217 vm_pindex_t, swb_index); 218 219 int 220 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2) 221 { 222 if (swb1->swb_index < swb2->swb_index) 223 return(-1); 224 if (swb1->swb_index > swb2->swb_index) 225 return(1); 226 return(0); 227 } 228 229 static 230 int 231 rb_swblock_scancmp(struct swblock *swb, void *data) 232 { 233 struct swfreeinfo *info = data; 234 235 if (swb->swb_index < info->basei) 236 return(-1); 237 if (swb->swb_index > info->endi) 238 return(1); 239 return(0); 240 } 241 242 static 243 int 244 rb_swblock_condcmp(struct swblock *swb, void *data) 245 { 246 struct swfreeinfo *info = data; 247 248 if (swb->swb_index < info->basei) 249 return(-1); 250 return(0); 251 } 252 253 /* 254 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 255 * calls hooked from other parts of the VM system and do not appear here. 256 * (see vm/swap_pager.h). 257 */ 258 259 static void swap_pager_dealloc (vm_object_t object); 260 static int swap_pager_getpage (vm_object_t, vm_page_t *, int); 261 static void swap_chain_iodone(struct bio *biox); 262 263 struct pagerops swappagerops = { 264 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 265 swap_pager_getpage, /* pagein */ 266 swap_pager_putpages, /* pageout */ 267 swap_pager_haspage /* get backing store status for page */ 268 }; 269 270 /* 271 * SWB_DMMAX is in page-sized chunks with the new swap system. It was 272 * dev-bsized chunks in the old. SWB_DMMAX is always a power of 2. 273 * 274 * swap_*() routines are externally accessible. swp_*() routines are 275 * internal. 276 */ 277 278 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 279 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 280 281 static __inline void swp_sizecheck (void); 282 static void swp_pager_async_iodone (struct bio *bio); 283 284 /* 285 * Swap bitmap functions 286 */ 287 288 static __inline void swp_pager_freeswapspace(vm_object_t object, 289 swblk_t blk, int npages); 290 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages); 291 292 /* 293 * Metadata functions 294 */ 295 296 static void swp_pager_meta_convert(vm_object_t); 297 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t); 298 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 299 static void swp_pager_meta_free_all(vm_object_t); 300 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 301 302 /* 303 * SWP_SIZECHECK() - update swap_pager_full indication 304 * 305 * update the swap_pager_almost_full indication and warn when we are 306 * about to run out of swap space, using lowat/hiwat hysteresis. 307 * 308 * Clear swap_pager_full ( task killing ) indication when lowat is met. 309 * 310 * No restrictions on call 311 * This routine may not block. 312 * SMP races are ok. 313 */ 314 static __inline void 315 swp_sizecheck(void) 316 { 317 if (vm_swap_size < nswap_lowat) { 318 if (swap_pager_almost_full == 0) { 319 kprintf("swap_pager: out of swap space\n"); 320 swap_pager_almost_full = 1; 321 swap_fail_ticks = ticks; 322 } 323 } else { 324 swap_pager_full = 0; 325 if (vm_swap_size > nswap_hiwat) 326 swap_pager_almost_full = 0; 327 } 328 } 329 330 /* 331 * SWAP_PAGER_INIT() - initialize the swap pager! 332 * 333 * Expected to be started from system init. NOTE: This code is run 334 * before much else so be careful what you depend on. Most of the VM 335 * system has yet to be initialized at this point. 336 * 337 * Called from the low level boot code only. 338 */ 339 static void 340 swap_pager_init(void *arg __unused) 341 { 342 } 343 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL); 344 345 /* 346 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 347 * 348 * Expected to be started from pageout process once, prior to entering 349 * its main loop. 350 * 351 * Called from the low level boot code only. 352 */ 353 void 354 swap_pager_swap_init(void) 355 { 356 int n, n2; 357 358 /* 359 * Number of in-transit swap bp operations. Don't 360 * exhaust the pbufs completely. Make sure we 361 * initialize workable values (0 will work for hysteresis 362 * but it isn't very efficient). 363 * 364 * The nsw_cluster_max is constrained by the number of pages an XIO 365 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 366 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 367 * constrained by the swap device interleave stripe size. 368 * 369 * Currently we hardwire nsw_wcount_async to 4. This limit is 370 * designed to prevent other I/O from having high latencies due to 371 * our pageout I/O. The value 4 works well for one or two active swap 372 * devices but is probably a little low if you have more. Even so, 373 * a higher value would probably generate only a limited improvement 374 * with three or four active swap devices since the system does not 375 * typically have to pageout at extreme bandwidths. We will want 376 * at least 2 per swap devices, and 4 is a pretty good value if you 377 * have one NFS swap device due to the command/ack latency over NFS. 378 * So it all works out pretty well. 379 */ 380 381 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 382 383 nsw_rcount = (nswbuf_kva + 1) / 2; 384 nsw_wcount_sync = (nswbuf_kva + 3) / 4; 385 nsw_wcount_async = 4; 386 nsw_wcount_async_max = nsw_wcount_async; 387 388 /* 389 * The zone is dynamically allocated so generally size it to 390 * maxswzone (32MB to 256GB of KVM). Set a minimum size based 391 * on physical memory of around 8x (each swblock can hold 16 pages). 392 * 393 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio 394 * has increased dramatically. 395 */ 396 n = vmstats.v_page_count / 2; 397 if (maxswzone && n < maxswzone / sizeof(struct swblock)) 398 n = maxswzone / sizeof(struct swblock); 399 n2 = n; 400 401 do { 402 swap_zone = zinit( 403 "SWAPMETA", 404 sizeof(struct swblock), 405 n, 406 ZONE_INTERRUPT); 407 if (swap_zone != NULL) 408 break; 409 /* 410 * if the allocation failed, try a zone two thirds the 411 * size of the previous attempt. 412 */ 413 n -= ((n + 2) / 3); 414 } while (n > 0); 415 416 if (swap_zone == NULL) 417 panic("swap_pager_swap_init: swap_zone == NULL"); 418 if (n2 != n) 419 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 420 } 421 422 /* 423 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 424 * its metadata structures. 425 * 426 * This routine is called from the mmap and fork code to create a new 427 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 428 * and then converting it with swp_pager_meta_convert(). 429 * 430 * We only support unnamed objects. 431 * 432 * No restrictions. 433 */ 434 vm_object_t 435 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 436 { 437 vm_object_t object; 438 439 KKASSERT(handle == NULL); 440 object = vm_object_allocate_hold(OBJT_DEFAULT, 441 OFF_TO_IDX(offset + PAGE_MASK + size)); 442 swp_pager_meta_convert(object); 443 vm_object_drop(object); 444 445 return (object); 446 } 447 448 /* 449 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 450 * 451 * The swap backing for the object is destroyed. The code is 452 * designed such that we can reinstantiate it later, but this 453 * routine is typically called only when the entire object is 454 * about to be destroyed. 455 * 456 * The object must be locked or unreferenceable. 457 * No other requirements. 458 */ 459 static void 460 swap_pager_dealloc(vm_object_t object) 461 { 462 vm_object_hold(object); 463 vm_object_pip_wait(object, "swpdea"); 464 465 /* 466 * Free all remaining metadata. We only bother to free it from 467 * the swap meta data. We do not attempt to free swapblk's still 468 * associated with vm_page_t's for this object. We do not care 469 * if paging is still in progress on some objects. 470 */ 471 swp_pager_meta_free_all(object); 472 vm_object_drop(object); 473 } 474 475 /************************************************************************ 476 * SWAP PAGER BITMAP ROUTINES * 477 ************************************************************************/ 478 479 /* 480 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 481 * 482 * Allocate swap for the requested number of pages. The starting 483 * swap block number (a page index) is returned or SWAPBLK_NONE 484 * if the allocation failed. 485 * 486 * Also has the side effect of advising that somebody made a mistake 487 * when they configured swap and didn't configure enough. 488 * 489 * The caller must hold the object. 490 * This routine may not block. 491 */ 492 static __inline swblk_t 493 swp_pager_getswapspace(vm_object_t object, int npages) 494 { 495 swblk_t blk; 496 497 lwkt_gettoken(&vm_token); 498 blk = blist_allocat(swapblist, npages, swapiterator); 499 if (blk == SWAPBLK_NONE) 500 blk = blist_allocat(swapblist, npages, 0); 501 if (blk == SWAPBLK_NONE) { 502 if (swap_pager_full != 2) { 503 if (vm_swap_max == 0) 504 kprintf("Warning: The system would like to " 505 "page to swap but no swap space " 506 "is configured!\n"); 507 else 508 kprintf("swap_pager_getswapspace: " 509 "swap full allocating %d pages\n", 510 npages); 511 swap_pager_full = 2; 512 if (swap_pager_almost_full == 0) 513 swap_fail_ticks = ticks; 514 swap_pager_almost_full = 1; 515 } 516 } else { 517 /* swapiterator = blk; disable for now, doesn't work well */ 518 swapacctspace(blk, -npages); 519 if (object->type == OBJT_SWAP) 520 vm_swap_anon_use += npages; 521 else 522 vm_swap_cache_use += npages; 523 swp_sizecheck(); 524 } 525 lwkt_reltoken(&vm_token); 526 return(blk); 527 } 528 529 /* 530 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 531 * 532 * This routine returns the specified swap blocks back to the bitmap. 533 * 534 * Note: This routine may not block (it could in the old swap code), 535 * and through the use of the new blist routines it does not block. 536 * 537 * This routine may not block. 538 */ 539 540 static __inline void 541 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages) 542 { 543 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 544 545 lwkt_gettoken(&vm_token); 546 sp->sw_nused -= npages; 547 if (object->type == OBJT_SWAP) 548 vm_swap_anon_use -= npages; 549 else 550 vm_swap_cache_use -= npages; 551 552 if (sp->sw_flags & SW_CLOSING) { 553 lwkt_reltoken(&vm_token); 554 return; 555 } 556 557 blist_free(swapblist, blk, npages); 558 vm_swap_size += npages; 559 swp_sizecheck(); 560 lwkt_reltoken(&vm_token); 561 } 562 563 /* 564 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 565 * range within an object. 566 * 567 * This is a globally accessible routine. 568 * 569 * This routine removes swapblk assignments from swap metadata. 570 * 571 * The external callers of this routine typically have already destroyed 572 * or renamed vm_page_t's associated with this range in the object so 573 * we should be ok. 574 * 575 * No requirements. 576 */ 577 void 578 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size) 579 { 580 vm_object_hold(object); 581 swp_pager_meta_free(object, start, size); 582 vm_object_drop(object); 583 } 584 585 /* 586 * No requirements. 587 */ 588 void 589 swap_pager_freespace_all(vm_object_t object) 590 { 591 vm_object_hold(object); 592 swp_pager_meta_free_all(object); 593 vm_object_drop(object); 594 } 595 596 /* 597 * This function conditionally frees swap cache swap starting at 598 * (*basei) in the object. (count) swap blocks will be nominally freed. 599 * The actual number of blocks freed can be more or less than the 600 * requested number. 601 * 602 * This function nominally returns the number of blocks freed. However, 603 * the actual number of blocks freed may be less then the returned value. 604 * If the function is unable to exhaust the object or if it is able to 605 * free (approximately) the requested number of blocks it returns 606 * a value n > count. 607 * 608 * If we exhaust the object we will return a value n <= count. 609 * 610 * The caller must hold the object. 611 * 612 * WARNING! If count == 0 then -1 can be returned as a degenerate case, 613 * callers should always pass a count value > 0. 614 */ 615 static int swap_pager_condfree_callback(struct swblock *swap, void *data); 616 617 int 618 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count) 619 { 620 struct swfreeinfo info; 621 int n; 622 int t; 623 624 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 625 626 info.object = object; 627 info.basei = *basei; /* skip up to this page index */ 628 info.begi = count; /* max swap pages to destroy */ 629 info.endi = count * 8; /* max swblocks to scan */ 630 631 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp, 632 swap_pager_condfree_callback, &info); 633 *basei = info.basei; 634 635 /* 636 * Take the higher difference swblocks vs pages 637 */ 638 n = count - (int)info.begi; 639 t = count * 8 - (int)info.endi; 640 if (n < t) 641 n = t; 642 if (n < 1) 643 n = 1; 644 return(n); 645 } 646 647 /* 648 * The idea is to free whole meta-block to avoid fragmenting 649 * the swap space or disk I/O. We only do this if NO VM pages 650 * are present. 651 * 652 * We do not have to deal with clearing PG_SWAPPED in related VM 653 * pages because there are no related VM pages. 654 * 655 * The caller must hold the object. 656 */ 657 static int 658 swap_pager_condfree_callback(struct swblock *swap, void *data) 659 { 660 struct swfreeinfo *info = data; 661 vm_object_t object = info->object; 662 int i; 663 664 for (i = 0; i < SWAP_META_PAGES; ++i) { 665 if (vm_page_lookup(object, swap->swb_index + i)) 666 break; 667 } 668 info->basei = swap->swb_index + SWAP_META_PAGES; 669 if (i == SWAP_META_PAGES) { 670 info->begi -= swap->swb_count; 671 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES); 672 } 673 --info->endi; 674 if ((int)info->begi < 0 || (int)info->endi < 0) 675 return(-1); 676 lwkt_yield(); 677 return(0); 678 } 679 680 /* 681 * Called by vm_page_alloc() when a new VM page is inserted 682 * into a VM object. Checks whether swap has been assigned to 683 * the page and sets PG_SWAPPED as necessary. 684 * 685 * (m) must be busied by caller and remains busied on return. 686 */ 687 void 688 swap_pager_page_inserted(vm_page_t m) 689 { 690 if (m->object->swblock_count) { 691 vm_object_hold(m->object); 692 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE) 693 vm_page_flag_set(m, PG_SWAPPED); 694 vm_object_drop(m->object); 695 } 696 } 697 698 /* 699 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 700 * 701 * Assigns swap blocks to the specified range within the object. The 702 * swap blocks are not zerod. Any previous swap assignment is destroyed. 703 * 704 * Returns 0 on success, -1 on failure. 705 * 706 * The caller is responsible for avoiding races in the specified range. 707 * No other requirements. 708 */ 709 int 710 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 711 { 712 int n = 0; 713 swblk_t blk = SWAPBLK_NONE; 714 vm_pindex_t beg = start; /* save start index */ 715 716 vm_object_hold(object); 717 718 while (size) { 719 if (n == 0) { 720 n = BLIST_MAX_ALLOC; 721 while ((blk = swp_pager_getswapspace(object, n)) == 722 SWAPBLK_NONE) 723 { 724 n >>= 1; 725 if (n == 0) { 726 swp_pager_meta_free(object, beg, 727 start - beg); 728 vm_object_drop(object); 729 return(-1); 730 } 731 } 732 } 733 swp_pager_meta_build(object, start, blk); 734 --size; 735 ++start; 736 ++blk; 737 --n; 738 } 739 swp_pager_meta_free(object, start, n); 740 vm_object_drop(object); 741 return(0); 742 } 743 744 /* 745 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 746 * and destroy the source. 747 * 748 * Copy any valid swapblks from the source to the destination. In 749 * cases where both the source and destination have a valid swapblk, 750 * we keep the destination's. 751 * 752 * This routine is allowed to block. It may block allocating metadata 753 * indirectly through swp_pager_meta_build() or if paging is still in 754 * progress on the source. 755 * 756 * XXX vm_page_collapse() kinda expects us not to block because we 757 * supposedly do not need to allocate memory, but for the moment we 758 * *may* have to get a little memory from the zone allocator, but 759 * it is taken from the interrupt memory. We should be ok. 760 * 761 * The source object contains no vm_page_t's (which is just as well) 762 * The source object is of type OBJT_SWAP. 763 * 764 * The source and destination objects must be held by the caller. 765 */ 766 void 767 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 768 vm_pindex_t base_index, int destroysource) 769 { 770 vm_pindex_t i; 771 772 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject)); 773 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject)); 774 775 /* 776 * transfer source to destination. 777 */ 778 for (i = 0; i < dstobject->size; ++i) { 779 swblk_t dstaddr; 780 781 /* 782 * Locate (without changing) the swapblk on the destination, 783 * unless it is invalid in which case free it silently, or 784 * if the destination is a resident page, in which case the 785 * source is thrown away. 786 */ 787 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 788 789 if (dstaddr == SWAPBLK_NONE) { 790 /* 791 * Destination has no swapblk and is not resident, 792 * copy source. 793 */ 794 swblk_t srcaddr; 795 796 srcaddr = swp_pager_meta_ctl(srcobject, 797 base_index + i, SWM_POP); 798 799 if (srcaddr != SWAPBLK_NONE) 800 swp_pager_meta_build(dstobject, i, srcaddr); 801 } else { 802 /* 803 * Destination has valid swapblk or it is represented 804 * by a resident page. We destroy the sourceblock. 805 */ 806 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE); 807 } 808 } 809 810 /* 811 * Free left over swap blocks in source. 812 * 813 * We have to revert the type to OBJT_DEFAULT so we do not accidently 814 * double-remove the object from the swap queues. 815 */ 816 if (destroysource) { 817 /* 818 * Reverting the type is not necessary, the caller is going 819 * to destroy srcobject directly, but I'm doing it here 820 * for consistency since we've removed the object from its 821 * queues. 822 */ 823 swp_pager_meta_free_all(srcobject); 824 if (srcobject->type == OBJT_SWAP) 825 srcobject->type = OBJT_DEFAULT; 826 } 827 } 828 829 /* 830 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 831 * the requested page. 832 * 833 * We determine whether good backing store exists for the requested 834 * page and return TRUE if it does, FALSE if it doesn't. 835 * 836 * If TRUE, we also try to determine how much valid, contiguous backing 837 * store exists before and after the requested page within a reasonable 838 * distance. We do not try to restrict it to the swap device stripe 839 * (that is handled in getpages/putpages). It probably isn't worth 840 * doing here. 841 * 842 * No requirements. 843 */ 844 boolean_t 845 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex) 846 { 847 swblk_t blk0; 848 849 /* 850 * do we have good backing store at the requested index ? 851 */ 852 vm_object_hold(object); 853 blk0 = swp_pager_meta_ctl(object, pindex, 0); 854 855 if (blk0 == SWAPBLK_NONE) { 856 vm_object_drop(object); 857 return (FALSE); 858 } 859 vm_object_drop(object); 860 return (TRUE); 861 } 862 863 /* 864 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 865 * 866 * This removes any associated swap backing store, whether valid or 867 * not, from the page. This operates on any VM object, not just OBJT_SWAP 868 * objects. 869 * 870 * This routine is typically called when a page is made dirty, at 871 * which point any associated swap can be freed. MADV_FREE also 872 * calls us in a special-case situation 873 * 874 * NOTE!!! If the page is clean and the swap was valid, the caller 875 * should make the page dirty before calling this routine. 876 * This routine does NOT change the m->dirty status of the page. 877 * Also: MADV_FREE depends on it. 878 * 879 * The page must be busied. 880 * The caller can hold the object to avoid blocking, else we might block. 881 * No other requirements. 882 */ 883 void 884 swap_pager_unswapped(vm_page_t m) 885 { 886 if (m->flags & PG_SWAPPED) { 887 vm_object_hold(m->object); 888 KKASSERT(m->flags & PG_SWAPPED); 889 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 890 vm_page_flag_clear(m, PG_SWAPPED); 891 vm_object_drop(m->object); 892 } 893 } 894 895 /* 896 * SWAP_PAGER_STRATEGY() - read, write, free blocks 897 * 898 * This implements a VM OBJECT strategy function using swap backing store. 899 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP 900 * types. 901 * 902 * This is intended to be a cacheless interface (i.e. caching occurs at 903 * higher levels), and is also used as a swap-based SSD cache for vnode 904 * and device objects. 905 * 906 * All I/O goes directly to and from the swap device. 907 * 908 * We currently attempt to run I/O synchronously or asynchronously as 909 * the caller requests. This isn't perfect because we loose error 910 * sequencing when we run multiple ops in parallel to satisfy a request. 911 * But this is swap, so we let it all hang out. 912 * 913 * No requirements. 914 */ 915 void 916 swap_pager_strategy(vm_object_t object, struct bio *bio) 917 { 918 struct buf *bp = bio->bio_buf; 919 struct bio *nbio; 920 vm_pindex_t start; 921 vm_pindex_t biox_blkno = 0; 922 int count; 923 char *data; 924 struct bio *biox; 925 struct buf *bufx; 926 #if 0 927 struct bio_track *track; 928 #endif 929 930 #if 0 931 /* 932 * tracking for swapdev vnode I/Os 933 */ 934 if (bp->b_cmd == BUF_CMD_READ) 935 track = &swapdev_vp->v_track_read; 936 else 937 track = &swapdev_vp->v_track_write; 938 #endif 939 940 if (bp->b_bcount & PAGE_MASK) { 941 bp->b_error = EINVAL; 942 bp->b_flags |= B_ERROR | B_INVAL; 943 biodone(bio); 944 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 945 "not page bounded\n", 946 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 947 return; 948 } 949 950 /* 951 * Clear error indication, initialize page index, count, data pointer. 952 */ 953 bp->b_error = 0; 954 bp->b_flags &= ~B_ERROR; 955 bp->b_resid = bp->b_bcount; 956 957 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 958 count = howmany(bp->b_bcount, PAGE_SIZE); 959 data = bp->b_data; 960 961 /* 962 * Deal with BUF_CMD_FREEBLKS 963 */ 964 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 965 /* 966 * FREE PAGE(s) - destroy underlying swap that is no longer 967 * needed. 968 */ 969 vm_object_hold(object); 970 swp_pager_meta_free(object, start, count); 971 vm_object_drop(object); 972 bp->b_resid = 0; 973 biodone(bio); 974 return; 975 } 976 977 /* 978 * We need to be able to create a new cluster of I/O's. We cannot 979 * use the caller fields of the passed bio so push a new one. 980 * 981 * Because nbio is just a placeholder for the cluster links, 982 * we can biodone() the original bio instead of nbio to make 983 * things a bit more efficient. 984 */ 985 nbio = push_bio(bio); 986 nbio->bio_offset = bio->bio_offset; 987 nbio->bio_caller_info1.cluster_head = NULL; 988 nbio->bio_caller_info2.cluster_tail = NULL; 989 990 biox = NULL; 991 bufx = NULL; 992 993 /* 994 * Execute read or write 995 */ 996 vm_object_hold(object); 997 998 while (count > 0) { 999 swblk_t blk; 1000 1001 /* 1002 * Obtain block. If block not found and writing, allocate a 1003 * new block and build it into the object. 1004 */ 1005 blk = swp_pager_meta_ctl(object, start, 0); 1006 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) { 1007 blk = swp_pager_getswapspace(object, 1); 1008 if (blk == SWAPBLK_NONE) { 1009 bp->b_error = ENOMEM; 1010 bp->b_flags |= B_ERROR; 1011 break; 1012 } 1013 swp_pager_meta_build(object, start, blk); 1014 } 1015 1016 /* 1017 * Do we have to flush our current collection? Yes if: 1018 * 1019 * - no swap block at this index 1020 * - swap block is not contiguous 1021 * - we cross a physical disk boundry in the 1022 * stripe. 1023 */ 1024 if ( 1025 biox && (biox_blkno + btoc(bufx->b_bcount) != blk || 1026 ((biox_blkno ^ blk) & ~SWB_DMMASK) 1027 ) 1028 ) { 1029 if (bp->b_cmd == BUF_CMD_READ) { 1030 ++mycpu->gd_cnt.v_swapin; 1031 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1032 } else { 1033 ++mycpu->gd_cnt.v_swapout; 1034 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1035 bufx->b_dirtyend = bufx->b_bcount; 1036 } 1037 1038 /* 1039 * Finished with this buf. 1040 */ 1041 KKASSERT(bufx->b_bcount != 0); 1042 if (bufx->b_cmd != BUF_CMD_READ) 1043 bufx->b_dirtyend = bufx->b_bcount; 1044 biox = NULL; 1045 bufx = NULL; 1046 } 1047 1048 /* 1049 * Add new swapblk to biox, instantiating biox if necessary. 1050 * Zero-fill reads are able to take a shortcut. 1051 */ 1052 if (blk == SWAPBLK_NONE) { 1053 /* 1054 * We can only get here if we are reading. 1055 */ 1056 bzero(data, PAGE_SIZE); 1057 bp->b_resid -= PAGE_SIZE; 1058 } else { 1059 if (biox == NULL) { 1060 /* XXX chain count > 4, wait to <= 4 */ 1061 1062 bufx = getpbuf(NULL); 1063 biox = &bufx->b_bio1; 1064 cluster_append(nbio, bufx); 1065 bufx->b_cmd = bp->b_cmd; 1066 biox->bio_done = swap_chain_iodone; 1067 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1068 biox->bio_caller_info1.cluster_parent = nbio; 1069 biox_blkno = blk; 1070 bufx->b_bcount = 0; 1071 bufx->b_data = data; 1072 } 1073 bufx->b_bcount += PAGE_SIZE; 1074 } 1075 --count; 1076 ++start; 1077 data += PAGE_SIZE; 1078 } 1079 1080 vm_object_drop(object); 1081 1082 /* 1083 * Flush out last buffer 1084 */ 1085 if (biox) { 1086 if (bufx->b_cmd == BUF_CMD_READ) { 1087 ++mycpu->gd_cnt.v_swapin; 1088 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1089 } else { 1090 ++mycpu->gd_cnt.v_swapout; 1091 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1092 bufx->b_dirtyend = bufx->b_bcount; 1093 } 1094 KKASSERT(bufx->b_bcount); 1095 if (bufx->b_cmd != BUF_CMD_READ) 1096 bufx->b_dirtyend = bufx->b_bcount; 1097 /* biox, bufx = NULL */ 1098 } 1099 1100 /* 1101 * Now initiate all the I/O. Be careful looping on our chain as 1102 * I/O's may complete while we are still initiating them. 1103 * 1104 * If the request is a 100% sparse read no bios will be present 1105 * and we just biodone() the buffer. 1106 */ 1107 nbio->bio_caller_info2.cluster_tail = NULL; 1108 bufx = nbio->bio_caller_info1.cluster_head; 1109 1110 if (bufx) { 1111 while (bufx) { 1112 biox = &bufx->b_bio1; 1113 BUF_KERNPROC(bufx); 1114 bufx = bufx->b_cluster_next; 1115 vn_strategy(swapdev_vp, biox); 1116 } 1117 } else { 1118 biodone(bio); 1119 } 1120 1121 /* 1122 * Completion of the cluster will also call biodone_chain(nbio). 1123 * We never call biodone(nbio) so we don't have to worry about 1124 * setting up a bio_done callback. It's handled in the sub-IO. 1125 */ 1126 /**/ 1127 } 1128 1129 /* 1130 * biodone callback 1131 * 1132 * No requirements. 1133 */ 1134 static void 1135 swap_chain_iodone(struct bio *biox) 1136 { 1137 struct buf **nextp; 1138 struct buf *bufx; /* chained sub-buffer */ 1139 struct bio *nbio; /* parent nbio with chain glue */ 1140 struct buf *bp; /* original bp associated with nbio */ 1141 int chain_empty; 1142 1143 bufx = biox->bio_buf; 1144 nbio = biox->bio_caller_info1.cluster_parent; 1145 bp = nbio->bio_buf; 1146 1147 /* 1148 * Update the original buffer 1149 */ 1150 KKASSERT(bp != NULL); 1151 if (bufx->b_flags & B_ERROR) { 1152 atomic_set_int(&bufx->b_flags, B_ERROR); 1153 bp->b_error = bufx->b_error; /* race ok */ 1154 } else if (bufx->b_resid != 0) { 1155 atomic_set_int(&bufx->b_flags, B_ERROR); 1156 bp->b_error = EINVAL; /* race ok */ 1157 } else { 1158 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1159 } 1160 1161 /* 1162 * Remove us from the chain. 1163 */ 1164 spin_lock(&swapbp_spin); 1165 nextp = &nbio->bio_caller_info1.cluster_head; 1166 while (*nextp != bufx) { 1167 KKASSERT(*nextp != NULL); 1168 nextp = &(*nextp)->b_cluster_next; 1169 } 1170 *nextp = bufx->b_cluster_next; 1171 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1172 spin_unlock(&swapbp_spin); 1173 1174 /* 1175 * Clean up bufx. If the chain is now empty we finish out 1176 * the parent. Note that we may be racing other completions 1177 * so we must use the chain_empty status from above. 1178 */ 1179 if (chain_empty) { 1180 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1181 atomic_set_int(&bp->b_flags, B_ERROR); 1182 bp->b_error = EINVAL; 1183 } 1184 biodone_chain(nbio); 1185 } 1186 relpbuf(bufx, NULL); 1187 } 1188 1189 /* 1190 * SWAP_PAGER_GETPAGES() - bring page in from swap 1191 * 1192 * The requested page may have to be brought in from swap. Calculate the 1193 * swap block and bring in additional pages if possible. All pages must 1194 * have contiguous swap block assignments and reside in the same object. 1195 * 1196 * The caller has a single vm_object_pip_add() reference prior to 1197 * calling us and we should return with the same. 1198 * 1199 * The caller has BUSY'd the page. We should return with (*mpp) left busy, 1200 * and any additinal pages unbusied. 1201 * 1202 * If the caller encounters a PG_RAM page it will pass it to us even though 1203 * it may be valid and dirty. We cannot overwrite the page in this case! 1204 * The case is used to allow us to issue pure read-aheads. 1205 * 1206 * NOTE! XXX This code does not entirely pipeline yet due to the fact that 1207 * the PG_RAM page is validated at the same time as mreq. What we 1208 * really need to do is issue a separate read-ahead pbuf. 1209 * 1210 * No requirements. 1211 */ 1212 static int 1213 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 1214 { 1215 struct buf *bp; 1216 struct bio *bio; 1217 vm_page_t mreq; 1218 vm_page_t m; 1219 vm_offset_t kva; 1220 swblk_t blk; 1221 int i; 1222 int j; 1223 int raonly; 1224 int error; 1225 u_int32_t flags; 1226 vm_page_t marray[XIO_INTERNAL_PAGES]; 1227 1228 mreq = *mpp; 1229 1230 vm_object_hold(object); 1231 if (mreq->object != object) { 1232 panic("swap_pager_getpages: object mismatch %p/%p", 1233 object, 1234 mreq->object 1235 ); 1236 } 1237 1238 /* 1239 * We don't want to overwrite a fully valid page as it might be 1240 * dirty. This case can occur when e.g. vm_fault hits a perfectly 1241 * valid page with PG_RAM set. 1242 * 1243 * In this case we see if the next page is a suitable page-in 1244 * candidate and if it is we issue read-ahead. PG_RAM will be 1245 * set on the last page of the read-ahead to continue the pipeline. 1246 */ 1247 if (mreq->valid == VM_PAGE_BITS_ALL) { 1248 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) { 1249 vm_object_drop(object); 1250 return(VM_PAGER_OK); 1251 } 1252 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0); 1253 if (blk == SWAPBLK_NONE) { 1254 vm_object_drop(object); 1255 return(VM_PAGER_OK); 1256 } 1257 m = vm_page_lookup_busy_try(object, mreq->pindex + 1, 1258 TRUE, &error); 1259 if (error) { 1260 vm_object_drop(object); 1261 return(VM_PAGER_OK); 1262 } else if (m == NULL) { 1263 /* 1264 * Use VM_ALLOC_QUICK to avoid blocking on cache 1265 * page reuse. 1266 */ 1267 m = vm_page_alloc(object, mreq->pindex + 1, 1268 VM_ALLOC_QUICK); 1269 if (m == NULL) { 1270 vm_object_drop(object); 1271 return(VM_PAGER_OK); 1272 } 1273 } else { 1274 if (m->valid) { 1275 vm_page_wakeup(m); 1276 vm_object_drop(object); 1277 return(VM_PAGER_OK); 1278 } 1279 vm_page_unqueue_nowakeup(m); 1280 } 1281 /* page is busy */ 1282 mreq = m; 1283 raonly = 1; 1284 } else { 1285 raonly = 0; 1286 } 1287 1288 /* 1289 * Try to block-read contiguous pages from swap if sequential, 1290 * otherwise just read one page. Contiguous pages from swap must 1291 * reside within a single device stripe because the I/O cannot be 1292 * broken up across multiple stripes. 1293 * 1294 * Note that blk and iblk can be SWAPBLK_NONE but the loop is 1295 * set up such that the case(s) are handled implicitly. 1296 */ 1297 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1298 marray[0] = mreq; 1299 1300 for (i = 1; i <= swap_burst_read && 1301 i < XIO_INTERNAL_PAGES && 1302 mreq->pindex + i < object->size; ++i) { 1303 swblk_t iblk; 1304 1305 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0); 1306 if (iblk != blk + i) 1307 break; 1308 if ((blk ^ iblk) & ~SWB_DMMASK) 1309 break; 1310 m = vm_page_lookup_busy_try(object, mreq->pindex + i, 1311 TRUE, &error); 1312 if (error) { 1313 break; 1314 } else if (m == NULL) { 1315 /* 1316 * Use VM_ALLOC_QUICK to avoid blocking on cache 1317 * page reuse. 1318 */ 1319 m = vm_page_alloc(object, mreq->pindex + i, 1320 VM_ALLOC_QUICK); 1321 if (m == NULL) 1322 break; 1323 } else { 1324 if (m->valid) { 1325 vm_page_wakeup(m); 1326 break; 1327 } 1328 vm_page_unqueue_nowakeup(m); 1329 } 1330 /* page is busy */ 1331 marray[i] = m; 1332 } 1333 if (i > 1) 1334 vm_page_flag_set(marray[i - 1], PG_RAM); 1335 1336 /* 1337 * If mreq is the requested page and we have nothing to do return 1338 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead 1339 * page and must be cleaned up. 1340 */ 1341 if (blk == SWAPBLK_NONE) { 1342 KKASSERT(i == 1); 1343 if (raonly) { 1344 vnode_pager_freepage(mreq); 1345 vm_object_drop(object); 1346 return(VM_PAGER_OK); 1347 } else { 1348 vm_object_drop(object); 1349 return(VM_PAGER_FAIL); 1350 } 1351 } 1352 1353 /* 1354 * map our page(s) into kva for input 1355 */ 1356 bp = getpbuf_kva(&nsw_rcount); 1357 bio = &bp->b_bio1; 1358 kva = (vm_offset_t) bp->b_kvabase; 1359 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t)); 1360 pmap_qenter(kva, bp->b_xio.xio_pages, i); 1361 1362 bp->b_data = (caddr_t)kva; 1363 bp->b_bcount = PAGE_SIZE * i; 1364 bp->b_xio.xio_npages = i; 1365 bio->bio_done = swp_pager_async_iodone; 1366 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1367 bio->bio_caller_info1.index = SWBIO_READ; 1368 1369 /* 1370 * Set index. If raonly set the index beyond the array so all 1371 * the pages are treated the same, otherwise the original mreq is 1372 * at index 0. 1373 */ 1374 if (raonly) 1375 bio->bio_driver_info = (void *)(intptr_t)i; 1376 else 1377 bio->bio_driver_info = (void *)(intptr_t)0; 1378 1379 for (j = 0; j < i; ++j) 1380 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG); 1381 1382 mycpu->gd_cnt.v_swapin++; 1383 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1384 1385 /* 1386 * We still hold the lock on mreq, and our automatic completion routine 1387 * does not remove it. 1388 */ 1389 vm_object_pip_add(object, bp->b_xio.xio_npages); 1390 1391 /* 1392 * perform the I/O. NOTE!!! bp cannot be considered valid after 1393 * this point because we automatically release it on completion. 1394 * Instead, we look at the one page we are interested in which we 1395 * still hold a lock on even through the I/O completion. 1396 * 1397 * The other pages in our m[] array are also released on completion, 1398 * so we cannot assume they are valid anymore either. 1399 */ 1400 bp->b_cmd = BUF_CMD_READ; 1401 BUF_KERNPROC(bp); 1402 vn_strategy(swapdev_vp, bio); 1403 1404 /* 1405 * Wait for the page we want to complete. PG_SWAPINPROG is always 1406 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1407 * is set in the meta-data. 1408 * 1409 * If this is a read-ahead only we return immediately without 1410 * waiting for I/O. 1411 */ 1412 if (raonly) { 1413 vm_object_drop(object); 1414 return(VM_PAGER_OK); 1415 } 1416 1417 /* 1418 * Read-ahead includes originally requested page case. 1419 */ 1420 for (;;) { 1421 flags = mreq->flags; 1422 cpu_ccfence(); 1423 if ((flags & PG_SWAPINPROG) == 0) 1424 break; 1425 tsleep_interlock(mreq, 0); 1426 if (!atomic_cmpset_int(&mreq->flags, flags, 1427 flags | PG_WANTED | PG_REFERENCED)) { 1428 continue; 1429 } 1430 mycpu->gd_cnt.v_intrans++; 1431 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) { 1432 kprintf( 1433 "swap_pager: indefinite wait buffer: " 1434 " bp %p offset: %lld, size: %ld\n", 1435 bp, 1436 (long long)bio->bio_offset, 1437 (long)bp->b_bcount 1438 ); 1439 } 1440 } 1441 1442 /* 1443 * Disallow speculative reads prior to the PG_SWAPINPROG test. 1444 */ 1445 cpu_lfence(); 1446 1447 /* 1448 * mreq is left busied after completion, but all the other pages 1449 * are freed. If we had an unrecoverable read error the page will 1450 * not be valid. 1451 */ 1452 vm_object_drop(object); 1453 if (mreq->valid != VM_PAGE_BITS_ALL) 1454 return(VM_PAGER_ERROR); 1455 else 1456 return(VM_PAGER_OK); 1457 1458 /* 1459 * A final note: in a low swap situation, we cannot deallocate swap 1460 * and mark a page dirty here because the caller is likely to mark 1461 * the page clean when we return, causing the page to possibly revert 1462 * to all-zero's later. 1463 */ 1464 } 1465 1466 /* 1467 * swap_pager_putpages: 1468 * 1469 * Assign swap (if necessary) and initiate I/O on the specified pages. 1470 * 1471 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1472 * are automatically converted to SWAP objects. 1473 * 1474 * In a low memory situation we may block in vn_strategy(), but the new 1475 * vm_page reservation system coupled with properly written VFS devices 1476 * should ensure that no low-memory deadlock occurs. This is an area 1477 * which needs work. 1478 * 1479 * The parent has N vm_object_pip_add() references prior to 1480 * calling us and will remove references for rtvals[] that are 1481 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1482 * completion. 1483 * 1484 * The parent has soft-busy'd the pages it passes us and will unbusy 1485 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1486 * We need to unbusy the rest on I/O completion. 1487 * 1488 * No requirements. 1489 */ 1490 void 1491 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1492 int flags, int *rtvals) 1493 { 1494 int i; 1495 int n = 0; 1496 1497 vm_object_hold(object); 1498 1499 if (count && m[0]->object != object) { 1500 panic("swap_pager_getpages: object mismatch %p/%p", 1501 object, 1502 m[0]->object 1503 ); 1504 } 1505 1506 /* 1507 * Step 1 1508 * 1509 * Turn object into OBJT_SWAP 1510 * Check for bogus sysops 1511 * 1512 * Force sync if not pageout process, we don't want any single 1513 * non-pageout process to be able to hog the I/O subsystem! This 1514 * can be overridden by setting. 1515 */ 1516 if (object->type == OBJT_DEFAULT) { 1517 if (object->type == OBJT_DEFAULT) 1518 swp_pager_meta_convert(object); 1519 } 1520 1521 /* 1522 * Normally we force synchronous swap I/O if this is not the 1523 * pageout daemon to prevent any single user process limited 1524 * via RLIMIT_RSS from hogging swap write bandwidth. 1525 */ 1526 if (curthread != pagethread && swap_user_async == 0) 1527 flags |= VM_PAGER_PUT_SYNC; 1528 1529 /* 1530 * Step 2 1531 * 1532 * Update nsw parameters from swap_async_max sysctl values. 1533 * Do not let the sysop crash the machine with bogus numbers. 1534 */ 1535 if (swap_async_max != nsw_wcount_async_max) { 1536 int n; 1537 1538 /* 1539 * limit range 1540 */ 1541 if ((n = swap_async_max) > nswbuf_kva / 2) 1542 n = nswbuf_kva / 2; 1543 if (n < 1) 1544 n = 1; 1545 swap_async_max = n; 1546 1547 /* 1548 * Adjust difference ( if possible ). If the current async 1549 * count is too low, we may not be able to make the adjustment 1550 * at this time. 1551 * 1552 * vm_token needed for nsw_wcount sleep interlock 1553 */ 1554 lwkt_gettoken(&vm_token); 1555 n -= nsw_wcount_async_max; 1556 if (nsw_wcount_async + n >= 0) { 1557 nsw_wcount_async_max += n; 1558 pbuf_adjcount(&nsw_wcount_async, n); 1559 } 1560 lwkt_reltoken(&vm_token); 1561 } 1562 1563 /* 1564 * Step 3 1565 * 1566 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1567 * The page is left dirty until the pageout operation completes 1568 * successfully. 1569 */ 1570 1571 for (i = 0; i < count; i += n) { 1572 struct buf *bp; 1573 struct bio *bio; 1574 swblk_t blk; 1575 int j; 1576 1577 /* 1578 * Maximum I/O size is limited by a number of factors. 1579 */ 1580 1581 n = min(BLIST_MAX_ALLOC, count - i); 1582 n = min(n, nsw_cluster_max); 1583 1584 lwkt_gettoken(&vm_token); 1585 1586 /* 1587 * Get biggest block of swap we can. If we fail, fall 1588 * back and try to allocate a smaller block. Don't go 1589 * overboard trying to allocate space if it would overly 1590 * fragment swap. 1591 */ 1592 while ( 1593 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE && 1594 n > 4 1595 ) { 1596 n >>= 1; 1597 } 1598 if (blk == SWAPBLK_NONE) { 1599 for (j = 0; j < n; ++j) 1600 rtvals[i+j] = VM_PAGER_FAIL; 1601 lwkt_reltoken(&vm_token); 1602 continue; 1603 } 1604 if (vm_report_swap_allocs > 0) { 1605 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n); 1606 --vm_report_swap_allocs; 1607 } 1608 1609 /* 1610 * The I/O we are constructing cannot cross a physical 1611 * disk boundry in the swap stripe. 1612 */ 1613 if ((blk ^ (blk + n)) & ~SWB_DMMASK) { 1614 j = ((blk + SWB_DMMAX) & ~SWB_DMMASK) - blk; 1615 swp_pager_freeswapspace(object, blk + j, n - j); 1616 n = j; 1617 } 1618 1619 /* 1620 * All I/O parameters have been satisfied, build the I/O 1621 * request and assign the swap space. 1622 */ 1623 if ((flags & VM_PAGER_PUT_SYNC)) 1624 bp = getpbuf_kva(&nsw_wcount_sync); 1625 else 1626 bp = getpbuf_kva(&nsw_wcount_async); 1627 bio = &bp->b_bio1; 1628 1629 lwkt_reltoken(&vm_token); 1630 1631 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1632 1633 bp->b_bcount = PAGE_SIZE * n; 1634 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1635 1636 for (j = 0; j < n; ++j) { 1637 vm_page_t mreq = m[i+j]; 1638 1639 swp_pager_meta_build(mreq->object, mreq->pindex, 1640 blk + j); 1641 if (object->type == OBJT_SWAP) 1642 vm_page_dirty(mreq); 1643 rtvals[i+j] = VM_PAGER_OK; 1644 1645 vm_page_flag_set(mreq, PG_SWAPINPROG); 1646 bp->b_xio.xio_pages[j] = mreq; 1647 } 1648 bp->b_xio.xio_npages = n; 1649 1650 mycpu->gd_cnt.v_swapout++; 1651 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1652 1653 bp->b_dirtyoff = 0; /* req'd for NFS */ 1654 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1655 bp->b_cmd = BUF_CMD_WRITE; 1656 bio->bio_caller_info1.index = SWBIO_WRITE; 1657 1658 #if 0 1659 /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */ 1660 bio->bio_crc = iscsi_crc32(bp->b_data, bp->b_bcount); 1661 { 1662 uint32_t crc = 0; 1663 for (j = 0; j < n; ++j) { 1664 vm_page_t mm = bp->b_xio.xio_pages[j]; 1665 char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mm)); 1666 crc = iscsi_crc32_ext(p, PAGE_SIZE, crc); 1667 } 1668 if (bio->bio_crc != crc) { 1669 kprintf("PREWRITE MISMATCH-A " 1670 "bdata=%08x dmap=%08x bdata=%08x (%d)\n", 1671 bio->bio_crc, 1672 crc, 1673 iscsi_crc32(bp->b_data, bp->b_bcount), 1674 bp->b_bcount); 1675 #ifdef _KERNEL_VIRTUAL 1676 madvise(bp->b_data, bp->b_bcount, MADV_INVAL); 1677 #endif 1678 crc = 0; 1679 for (j = 0; j < n; ++j) { 1680 vm_page_t mm = bp->b_xio.xio_pages[j]; 1681 char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mm)); 1682 crc = iscsi_crc32_ext(p, PAGE_SIZE, crc); 1683 } 1684 kprintf("PREWRITE MISMATCH-B " 1685 "bdata=%08x dmap=%08x\n", 1686 iscsi_crc32(bp->b_data, bp->b_bcount), 1687 crc); 1688 } 1689 } 1690 #endif 1691 1692 /* 1693 * asynchronous 1694 */ 1695 if ((flags & VM_PAGER_PUT_SYNC) == 0) { 1696 bio->bio_done = swp_pager_async_iodone; 1697 BUF_KERNPROC(bp); 1698 vn_strategy(swapdev_vp, bio); 1699 1700 for (j = 0; j < n; ++j) 1701 rtvals[i+j] = VM_PAGER_PEND; 1702 continue; 1703 } 1704 1705 /* 1706 * Issue synchrnously. 1707 * 1708 * Wait for the sync I/O to complete, then update rtvals. 1709 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1710 * our async completion routine at the end, thus avoiding a 1711 * double-free. 1712 */ 1713 bio->bio_caller_info1.index |= SWBIO_SYNC; 1714 if (flags & VM_PAGER_TRY_TO_CACHE) 1715 bio->bio_caller_info1.index |= SWBIO_TTC; 1716 bio->bio_done = biodone_sync; 1717 bio->bio_flags |= BIO_SYNC; 1718 vn_strategy(swapdev_vp, bio); 1719 biowait(bio, "swwrt"); 1720 1721 for (j = 0; j < n; ++j) 1722 rtvals[i+j] = VM_PAGER_PEND; 1723 1724 /* 1725 * Now that we are through with the bp, we can call the 1726 * normal async completion, which frees everything up. 1727 */ 1728 swp_pager_async_iodone(bio); 1729 } 1730 vm_object_drop(object); 1731 } 1732 1733 /* 1734 * No requirements. 1735 * 1736 * Recalculate the low and high-water marks. 1737 */ 1738 void 1739 swap_pager_newswap(void) 1740 { 1741 /* 1742 * NOTE: vm_swap_max cannot exceed 1 billion blocks, which is the 1743 * limitation imposed by the blist code. Remember that this 1744 * will be divided by NSWAP_MAX (4), so each swap device is 1745 * limited to around a terrabyte. 1746 */ 1747 if (vm_swap_max) { 1748 nswap_lowat = (int64_t)vm_swap_max * 4 / 100; /* 4% left */ 1749 nswap_hiwat = (int64_t)vm_swap_max * 6 / 100; /* 6% left */ 1750 kprintf("swap low/high-water marks set to %d/%d\n", 1751 nswap_lowat, nswap_hiwat); 1752 } else { 1753 nswap_lowat = 128; 1754 nswap_hiwat = 512; 1755 } 1756 swp_sizecheck(); 1757 } 1758 1759 /* 1760 * swp_pager_async_iodone: 1761 * 1762 * Completion routine for asynchronous reads and writes from/to swap. 1763 * Also called manually by synchronous code to finish up a bp. 1764 * 1765 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1766 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1767 * unbusy all pages except the 'main' request page. For WRITE 1768 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1769 * because we marked them all VM_PAGER_PEND on return from putpages ). 1770 * 1771 * This routine may not block. 1772 * 1773 * No requirements. 1774 */ 1775 static void 1776 swp_pager_async_iodone(struct bio *bio) 1777 { 1778 struct buf *bp = bio->bio_buf; 1779 vm_object_t object = NULL; 1780 int i; 1781 int *nswptr; 1782 1783 /* 1784 * report error 1785 */ 1786 if (bp->b_flags & B_ERROR) { 1787 kprintf( 1788 "swap_pager: I/O error - %s failed; offset %lld," 1789 "size %ld, error %d\n", 1790 ((bio->bio_caller_info1.index & SWBIO_READ) ? 1791 "pagein" : "pageout"), 1792 (long long)bio->bio_offset, 1793 (long)bp->b_bcount, 1794 bp->b_error 1795 ); 1796 } 1797 1798 /* 1799 * set object. 1800 */ 1801 if (bp->b_xio.xio_npages) 1802 object = bp->b_xio.xio_pages[0]->object; 1803 1804 #if 0 1805 /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */ 1806 if (bio->bio_caller_info1.index & SWBIO_WRITE) { 1807 if (bio->bio_crc != iscsi_crc32(bp->b_data, bp->b_bcount)) { 1808 kprintf("SWAPOUT: BADCRC %08x %08x\n", 1809 bio->bio_crc, 1810 iscsi_crc32(bp->b_data, bp->b_bcount)); 1811 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1812 vm_page_t m = bp->b_xio.xio_pages[i]; 1813 if (m->flags & PG_WRITEABLE) 1814 kprintf("SWAPOUT: " 1815 "%d/%d %p writable\n", 1816 i, bp->b_xio.xio_npages, m); 1817 } 1818 } 1819 } 1820 #endif 1821 1822 /* 1823 * remove the mapping for kernel virtual 1824 */ 1825 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1826 1827 /* 1828 * cleanup pages. If an error occurs writing to swap, we are in 1829 * very serious trouble. If it happens to be a disk error, though, 1830 * we may be able to recover by reassigning the swap later on. So 1831 * in this case we remove the m->swapblk assignment for the page 1832 * but do not free it in the rlist. The errornous block(s) are thus 1833 * never reallocated as swap. Redirty the page and continue. 1834 */ 1835 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1836 vm_page_t m = bp->b_xio.xio_pages[i]; 1837 1838 if (bp->b_flags & B_ERROR) { 1839 /* 1840 * If an error occurs I'd love to throw the swapblk 1841 * away without freeing it back to swapspace, so it 1842 * can never be used again. But I can't from an 1843 * interrupt. 1844 */ 1845 1846 if (bio->bio_caller_info1.index & SWBIO_READ) { 1847 /* 1848 * When reading, reqpage needs to stay 1849 * locked for the parent, but all other 1850 * pages can be freed. We still want to 1851 * wakeup the parent waiting on the page, 1852 * though. ( also: pg_reqpage can be -1 and 1853 * not match anything ). 1854 * 1855 * We have to wake specifically requested pages 1856 * up too because we cleared PG_SWAPINPROG and 1857 * someone may be waiting for that. 1858 * 1859 * NOTE: For reads, m->dirty will probably 1860 * be overridden by the original caller 1861 * of getpages so don't play cute tricks 1862 * here. 1863 * 1864 * NOTE: We can't actually free the page from 1865 * here, because this is an interrupt. 1866 * It is not legal to mess with 1867 * object->memq from an interrupt. 1868 * Deactivate the page instead. 1869 * 1870 * WARNING! The instant PG_SWAPINPROG is 1871 * cleared another cpu may start 1872 * using the mreq page (it will 1873 * check m->valid immediately). 1874 */ 1875 1876 m->valid = 0; 1877 vm_page_flag_clear(m, PG_SWAPINPROG); 1878 1879 /* 1880 * bio_driver_info holds the requested page 1881 * index. 1882 */ 1883 if (i != (int)(intptr_t)bio->bio_driver_info) { 1884 vm_page_deactivate(m); 1885 vm_page_wakeup(m); 1886 } else { 1887 vm_page_flash(m); 1888 } 1889 /* 1890 * If i == bp->b_pager.pg_reqpage, do not wake 1891 * the page up. The caller needs to. 1892 */ 1893 } else { 1894 /* 1895 * If a write error occurs remove the swap 1896 * assignment (note that PG_SWAPPED may or 1897 * may not be set depending on prior activity). 1898 * 1899 * Re-dirty OBJT_SWAP pages as there is no 1900 * other backing store, we can't throw the 1901 * page away. 1902 * 1903 * Non-OBJT_SWAP pages (aka swapcache) must 1904 * not be dirtied since they may not have 1905 * been dirty in the first place, and they 1906 * do have backing store (the vnode). 1907 */ 1908 vm_page_busy_wait(m, FALSE, "swadpg"); 1909 vm_object_hold(m->object); 1910 swp_pager_meta_ctl(m->object, m->pindex, 1911 SWM_FREE); 1912 vm_page_flag_clear(m, PG_SWAPPED); 1913 vm_object_drop(m->object); 1914 if (m->object->type == OBJT_SWAP) { 1915 vm_page_dirty(m); 1916 vm_page_activate(m); 1917 } 1918 vm_page_io_finish(m); 1919 vm_page_flag_clear(m, PG_SWAPINPROG); 1920 vm_page_wakeup(m); 1921 } 1922 } else if (bio->bio_caller_info1.index & SWBIO_READ) { 1923 /* 1924 * NOTE: for reads, m->dirty will probably be 1925 * overridden by the original caller of getpages so 1926 * we cannot set them in order to free the underlying 1927 * swap in a low-swap situation. I don't think we'd 1928 * want to do that anyway, but it was an optimization 1929 * that existed in the old swapper for a time before 1930 * it got ripped out due to precisely this problem. 1931 * 1932 * If not the requested page then deactivate it. 1933 * 1934 * Note that the requested page, reqpage, is left 1935 * busied, but we still have to wake it up. The 1936 * other pages are released (unbusied) by 1937 * vm_page_wakeup(). We do not set reqpage's 1938 * valid bits here, it is up to the caller. 1939 */ 1940 1941 /* 1942 * NOTE: Can't call pmap_clear_modify(m) from an 1943 * interrupt thread, the pmap code may have to 1944 * map non-kernel pmaps and currently asserts 1945 * the case. 1946 * 1947 * WARNING! The instant PG_SWAPINPROG is 1948 * cleared another cpu may start 1949 * using the mreq page (it will 1950 * check m->valid immediately). 1951 */ 1952 /*pmap_clear_modify(m);*/ 1953 m->valid = VM_PAGE_BITS_ALL; 1954 vm_page_undirty(m); 1955 vm_page_flag_set(m, PG_SWAPPED); 1956 vm_page_flag_clear(m, PG_SWAPINPROG); 1957 1958 /* 1959 * We have to wake specifically requested pages 1960 * up too because we cleared PG_SWAPINPROG and 1961 * could be waiting for it in getpages. However, 1962 * be sure to not unbusy getpages specifically 1963 * requested page - getpages expects it to be 1964 * left busy. 1965 * 1966 * bio_driver_info holds the requested page 1967 */ 1968 if (i != (int)(intptr_t)bio->bio_driver_info) { 1969 vm_page_deactivate(m); 1970 vm_page_wakeup(m); 1971 } else { 1972 vm_page_flash(m); 1973 } 1974 } else { 1975 /* 1976 * Mark the page clean but do not mess with the 1977 * pmap-layer's modified state. That state should 1978 * also be clear since the caller protected the 1979 * page VM_PROT_READ, but allow the case. 1980 * 1981 * We are in an interrupt, avoid pmap operations. 1982 * 1983 * If we have a severe page deficit, deactivate the 1984 * page. Do not try to cache it (which would also 1985 * involve a pmap op), because the page might still 1986 * be read-heavy. 1987 * 1988 * When using the swap to cache clean vnode pages 1989 * we do not mess with the page dirty bits. 1990 * 1991 * NOTE! Nobody is waiting for the key mreq page 1992 * on write completion. 1993 */ 1994 vm_page_busy_wait(m, FALSE, "swadpg"); 1995 if (m->object->type == OBJT_SWAP) 1996 vm_page_undirty(m); 1997 vm_page_flag_set(m, PG_SWAPPED); 1998 vm_page_flag_clear(m, PG_SWAPINPROG); 1999 if (vm_page_count_severe()) 2000 vm_page_deactivate(m); 2001 vm_page_io_finish(m); 2002 if (bio->bio_caller_info1.index & SWBIO_TTC) 2003 vm_page_try_to_cache(m); 2004 else 2005 vm_page_wakeup(m); 2006 } 2007 } 2008 2009 /* 2010 * adjust pip. NOTE: the original parent may still have its own 2011 * pip refs on the object. 2012 */ 2013 2014 if (object) 2015 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages); 2016 2017 /* 2018 * Release the physical I/O buffer. 2019 * 2020 * NOTE: Due to synchronous operations in the write case b_cmd may 2021 * already be set to BUF_CMD_DONE and BIO_SYNC may have already 2022 * been cleared. 2023 * 2024 * Use vm_token to interlock nsw_rcount/wcount wakeup? 2025 */ 2026 lwkt_gettoken(&vm_token); 2027 if (bio->bio_caller_info1.index & SWBIO_READ) 2028 nswptr = &nsw_rcount; 2029 else if (bio->bio_caller_info1.index & SWBIO_SYNC) 2030 nswptr = &nsw_wcount_sync; 2031 else 2032 nswptr = &nsw_wcount_async; 2033 bp->b_cmd = BUF_CMD_DONE; 2034 relpbuf(bp, nswptr); 2035 lwkt_reltoken(&vm_token); 2036 } 2037 2038 /* 2039 * Fault-in a potentially swapped page and remove the swap reference. 2040 * (used by swapoff code) 2041 * 2042 * object must be held. 2043 */ 2044 static __inline void 2045 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex) 2046 { 2047 struct vnode *vp; 2048 vm_page_t m; 2049 int error; 2050 2051 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2052 2053 if (object->type == OBJT_VNODE) { 2054 /* 2055 * Any swap related to a vnode is due to swapcache. We must 2056 * vget() the vnode in case it is not active (otherwise 2057 * vref() will panic). Calling vm_object_page_remove() will 2058 * ensure that any swap ref is removed interlocked with the 2059 * page. clean_only is set to TRUE so we don't throw away 2060 * dirty pages. 2061 */ 2062 vp = object->handle; 2063 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE); 2064 if (error == 0) { 2065 vm_object_page_remove(object, pindex, pindex + 1, TRUE); 2066 vput(vp); 2067 } 2068 } else { 2069 /* 2070 * Otherwise it is a normal OBJT_SWAP object and we can 2071 * fault the page in and remove the swap. 2072 */ 2073 m = vm_fault_object_page(object, IDX_TO_OFF(pindex), 2074 VM_PROT_NONE, 2075 VM_FAULT_DIRTY | VM_FAULT_UNSWAP, 2076 sharedp, &error); 2077 if (m) 2078 vm_page_unhold(m); 2079 } 2080 } 2081 2082 /* 2083 * This removes all swap blocks related to a particular device. We have 2084 * to be careful of ripups during the scan. 2085 */ 2086 static int swp_pager_swapoff_callback(struct swblock *swap, void *data); 2087 2088 int 2089 swap_pager_swapoff(int devidx) 2090 { 2091 struct vm_object_hash *hash; 2092 struct swswapoffinfo info; 2093 struct vm_object marker; 2094 vm_object_t object; 2095 int n; 2096 2097 bzero(&marker, sizeof(marker)); 2098 marker.type = OBJT_MARKER; 2099 2100 for (n = 0; n < VMOBJ_HSIZE; ++n) { 2101 hash = &vm_object_hash[n]; 2102 2103 lwkt_gettoken(&hash->token); 2104 TAILQ_INSERT_HEAD(&hash->list, &marker, object_list); 2105 2106 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) { 2107 if (object->type == OBJT_MARKER) 2108 goto skip; 2109 if (object->type != OBJT_SWAP && 2110 object->type != OBJT_VNODE) 2111 goto skip; 2112 vm_object_hold(object); 2113 if (object->type != OBJT_SWAP && 2114 object->type != OBJT_VNODE) { 2115 vm_object_drop(object); 2116 goto skip; 2117 } 2118 info.object = object; 2119 info.shared = 0; 2120 info.devidx = devidx; 2121 swblock_rb_tree_RB_SCAN(&object->swblock_root, 2122 NULL, swp_pager_swapoff_callback, 2123 &info); 2124 vm_object_drop(object); 2125 skip: 2126 if (object == TAILQ_NEXT(&marker, object_list)) { 2127 TAILQ_REMOVE(&hash->list, &marker, object_list); 2128 TAILQ_INSERT_AFTER(&hash->list, object, 2129 &marker, object_list); 2130 } 2131 } 2132 TAILQ_REMOVE(&hash->list, &marker, object_list); 2133 lwkt_reltoken(&hash->token); 2134 } 2135 2136 /* 2137 * If we fail to locate all swblocks we just fail gracefully and 2138 * do not bother to restore paging on the swap device. If the 2139 * user wants to retry the user can retry. 2140 */ 2141 if (swdevt[devidx].sw_nused) 2142 return (1); 2143 else 2144 return (0); 2145 } 2146 2147 static 2148 int 2149 swp_pager_swapoff_callback(struct swblock *swap, void *data) 2150 { 2151 struct swswapoffinfo *info = data; 2152 vm_object_t object = info->object; 2153 vm_pindex_t index; 2154 swblk_t v; 2155 int i; 2156 2157 index = swap->swb_index; 2158 for (i = 0; i < SWAP_META_PAGES; ++i) { 2159 /* 2160 * Make sure we don't race a dying object. This will 2161 * kill the scan of the object's swap blocks entirely. 2162 */ 2163 if (object->flags & OBJ_DEAD) 2164 return(-1); 2165 2166 /* 2167 * Fault the page, which can obviously block. If the swap 2168 * structure disappears break out. 2169 */ 2170 v = swap->swb_pages[i]; 2171 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) { 2172 swp_pager_fault_page(object, &info->shared, 2173 swap->swb_index + i); 2174 /* swap ptr might go away */ 2175 if (RB_LOOKUP(swblock_rb_tree, 2176 &object->swblock_root, index) != swap) { 2177 break; 2178 } 2179 } 2180 } 2181 return(0); 2182 } 2183 2184 /************************************************************************ 2185 * SWAP META DATA * 2186 ************************************************************************ 2187 * 2188 * These routines manipulate the swap metadata stored in the 2189 * OBJT_SWAP object. 2190 * 2191 * Swap metadata is implemented with a global hash and not directly 2192 * linked into the object. Instead the object simply contains 2193 * appropriate tracking counters. 2194 */ 2195 2196 /* 2197 * Lookup the swblock containing the specified swap block index. 2198 * 2199 * The caller must hold the object. 2200 */ 2201 static __inline 2202 struct swblock * 2203 swp_pager_lookup(vm_object_t object, vm_pindex_t index) 2204 { 2205 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2206 index &= ~(vm_pindex_t)SWAP_META_MASK; 2207 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index)); 2208 } 2209 2210 /* 2211 * Remove a swblock from the RB tree. 2212 * 2213 * The caller must hold the object. 2214 */ 2215 static __inline 2216 void 2217 swp_pager_remove(vm_object_t object, struct swblock *swap) 2218 { 2219 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2220 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap); 2221 } 2222 2223 /* 2224 * Convert default object to swap object if necessary 2225 * 2226 * The caller must hold the object. 2227 */ 2228 static void 2229 swp_pager_meta_convert(vm_object_t object) 2230 { 2231 if (object->type == OBJT_DEFAULT) { 2232 object->type = OBJT_SWAP; 2233 KKASSERT(object->swblock_count == 0); 2234 } 2235 } 2236 2237 /* 2238 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 2239 * 2240 * We first convert the object to a swap object if it is a default 2241 * object. Vnode objects do not need to be converted. 2242 * 2243 * The specified swapblk is added to the object's swap metadata. If 2244 * the swapblk is not valid, it is freed instead. Any previously 2245 * assigned swapblk is freed. 2246 * 2247 * The caller must hold the object. 2248 */ 2249 static void 2250 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk) 2251 { 2252 struct swblock *swap; 2253 struct swblock *oswap; 2254 vm_pindex_t v; 2255 2256 KKASSERT(swapblk != SWAPBLK_NONE); 2257 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2258 2259 /* 2260 * Convert object if necessary 2261 */ 2262 if (object->type == OBJT_DEFAULT) 2263 swp_pager_meta_convert(object); 2264 2265 /* 2266 * Locate swblock. If not found create, but if we aren't adding 2267 * anything just return. If we run out of space in the map we wait 2268 * and, since the hash table may have changed, retry. 2269 */ 2270 retry: 2271 swap = swp_pager_lookup(object, index); 2272 2273 if (swap == NULL) { 2274 int i; 2275 2276 swap = zalloc(swap_zone); 2277 if (swap == NULL) { 2278 vm_wait(0); 2279 goto retry; 2280 } 2281 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK; 2282 swap->swb_count = 0; 2283 2284 ++object->swblock_count; 2285 2286 for (i = 0; i < SWAP_META_PAGES; ++i) 2287 swap->swb_pages[i] = SWAPBLK_NONE; 2288 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap); 2289 KKASSERT(oswap == NULL); 2290 } 2291 2292 /* 2293 * Delete prior contents of metadata. 2294 * 2295 * NOTE: Decrement swb_count after the freeing operation (which 2296 * might block) to prevent racing destruction of the swblock. 2297 */ 2298 index &= SWAP_META_MASK; 2299 2300 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) { 2301 swap->swb_pages[index] = SWAPBLK_NONE; 2302 /* can block */ 2303 swp_pager_freeswapspace(object, v, 1); 2304 --swap->swb_count; 2305 --mycpu->gd_vmtotal.t_vm; 2306 } 2307 2308 /* 2309 * Enter block into metadata 2310 */ 2311 swap->swb_pages[index] = swapblk; 2312 if (swapblk != SWAPBLK_NONE) { 2313 ++swap->swb_count; 2314 ++mycpu->gd_vmtotal.t_vm; 2315 } 2316 } 2317 2318 /* 2319 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 2320 * 2321 * The requested range of blocks is freed, with any associated swap 2322 * returned to the swap bitmap. 2323 * 2324 * This routine will free swap metadata structures as they are cleaned 2325 * out. This routine does *NOT* operate on swap metadata associated 2326 * with resident pages. 2327 * 2328 * The caller must hold the object. 2329 */ 2330 static int swp_pager_meta_free_callback(struct swblock *swb, void *data); 2331 2332 static void 2333 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) 2334 { 2335 struct swfreeinfo info; 2336 2337 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2338 2339 /* 2340 * Nothing to do 2341 */ 2342 if (object->swblock_count == 0) { 2343 KKASSERT(RB_EMPTY(&object->swblock_root)); 2344 return; 2345 } 2346 if (count == 0) 2347 return; 2348 2349 /* 2350 * Setup for RB tree scan. Note that the pindex range can be huge 2351 * due to the 64 bit page index space so we cannot safely iterate. 2352 */ 2353 info.object = object; 2354 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK; 2355 info.begi = index; 2356 info.endi = index + count - 1; 2357 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp, 2358 swp_pager_meta_free_callback, &info); 2359 } 2360 2361 /* 2362 * The caller must hold the object. 2363 */ 2364 static 2365 int 2366 swp_pager_meta_free_callback(struct swblock *swap, void *data) 2367 { 2368 struct swfreeinfo *info = data; 2369 vm_object_t object = info->object; 2370 int index; 2371 int eindex; 2372 2373 /* 2374 * Figure out the range within the swblock. The wider scan may 2375 * return edge-case swap blocks when the start and/or end points 2376 * are in the middle of a block. 2377 */ 2378 if (swap->swb_index < info->begi) 2379 index = (int)info->begi & SWAP_META_MASK; 2380 else 2381 index = 0; 2382 2383 if (swap->swb_index + SWAP_META_PAGES > info->endi) 2384 eindex = (int)info->endi & SWAP_META_MASK; 2385 else 2386 eindex = SWAP_META_MASK; 2387 2388 /* 2389 * Scan and free the blocks. The loop terminates early 2390 * if (swap) runs out of blocks and could be freed. 2391 * 2392 * NOTE: Decrement swb_count after swp_pager_freeswapspace() 2393 * to deal with a zfree race. 2394 */ 2395 while (index <= eindex) { 2396 swblk_t v = swap->swb_pages[index]; 2397 2398 if (v != SWAPBLK_NONE) { 2399 swap->swb_pages[index] = SWAPBLK_NONE; 2400 /* can block */ 2401 swp_pager_freeswapspace(object, v, 1); 2402 --mycpu->gd_vmtotal.t_vm; 2403 if (--swap->swb_count == 0) { 2404 swp_pager_remove(object, swap); 2405 zfree(swap_zone, swap); 2406 --object->swblock_count; 2407 break; 2408 } 2409 } 2410 ++index; 2411 } 2412 2413 /* swap may be invalid here due to zfree above */ 2414 lwkt_yield(); 2415 2416 return(0); 2417 } 2418 2419 /* 2420 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2421 * 2422 * This routine locates and destroys all swap metadata associated with 2423 * an object. 2424 * 2425 * NOTE: Decrement swb_count after the freeing operation (which 2426 * might block) to prevent racing destruction of the swblock. 2427 * 2428 * The caller must hold the object. 2429 */ 2430 static void 2431 swp_pager_meta_free_all(vm_object_t object) 2432 { 2433 struct swblock *swap; 2434 int i; 2435 2436 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2437 2438 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) { 2439 swp_pager_remove(object, swap); 2440 for (i = 0; i < SWAP_META_PAGES; ++i) { 2441 swblk_t v = swap->swb_pages[i]; 2442 if (v != SWAPBLK_NONE) { 2443 /* can block */ 2444 swp_pager_freeswapspace(object, v, 1); 2445 --swap->swb_count; 2446 --mycpu->gd_vmtotal.t_vm; 2447 } 2448 } 2449 if (swap->swb_count != 0) 2450 panic("swap_pager_meta_free_all: swb_count != 0"); 2451 zfree(swap_zone, swap); 2452 --object->swblock_count; 2453 lwkt_yield(); 2454 } 2455 KKASSERT(object->swblock_count == 0); 2456 } 2457 2458 /* 2459 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2460 * 2461 * This routine is capable of looking up, popping, or freeing 2462 * swapblk assignments in the swap meta data or in the vm_page_t. 2463 * The routine typically returns the swapblk being looked-up, or popped, 2464 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2465 * was invalid. This routine will automatically free any invalid 2466 * meta-data swapblks. 2467 * 2468 * It is not possible to store invalid swapblks in the swap meta data 2469 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2470 * 2471 * When acting on a busy resident page and paging is in progress, we 2472 * have to wait until paging is complete but otherwise can act on the 2473 * busy page. 2474 * 2475 * SWM_FREE remove and free swap block from metadata 2476 * SWM_POP remove from meta data but do not free.. pop it out 2477 * 2478 * The caller must hold the object. 2479 */ 2480 static swblk_t 2481 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags) 2482 { 2483 struct swblock *swap; 2484 swblk_t r1; 2485 2486 if (object->swblock_count == 0) 2487 return(SWAPBLK_NONE); 2488 2489 r1 = SWAPBLK_NONE; 2490 swap = swp_pager_lookup(object, index); 2491 2492 if (swap != NULL) { 2493 index &= SWAP_META_MASK; 2494 r1 = swap->swb_pages[index]; 2495 2496 if (r1 != SWAPBLK_NONE) { 2497 if (flags & (SWM_FREE|SWM_POP)) { 2498 swap->swb_pages[index] = SWAPBLK_NONE; 2499 --mycpu->gd_vmtotal.t_vm; 2500 if (--swap->swb_count == 0) { 2501 swp_pager_remove(object, swap); 2502 zfree(swap_zone, swap); 2503 --object->swblock_count; 2504 } 2505 } 2506 /* swap ptr may be invalid */ 2507 if (flags & SWM_FREE) { 2508 swp_pager_freeswapspace(object, r1, 1); 2509 r1 = SWAPBLK_NONE; 2510 } 2511 } 2512 /* swap ptr may be invalid */ 2513 } 2514 return(r1); 2515 } 2516