1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Copyright (c) 1994 John S. Dyson 37 * Copyright (c) 1990 University of Utah. 38 * Copyright (c) 1991, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * New Swap System 70 * Matthew Dillon 71 * 72 * Radix Bitmap 'blists'. 73 * 74 * - The new swapper uses the new radix bitmap code. This should scale 75 * to arbitrarily small or arbitrarily large swap spaces and an almost 76 * arbitrary degree of fragmentation. 77 * 78 * Features: 79 * 80 * - on the fly reallocation of swap during putpages. The new system 81 * does not try to keep previously allocated swap blocks for dirty 82 * pages. 83 * 84 * - on the fly deallocation of swap 85 * 86 * - No more garbage collection required. Unnecessarily allocated swap 87 * blocks only exist for dirty vm_page_t's now and these are already 88 * cycled (in a high-load system) by the pager. We also do on-the-fly 89 * removal of invalidated swap blocks when a page is destroyed 90 * or renamed. 91 * 92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 95 */ 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/conf.h> 100 #include <sys/kernel.h> 101 #include <sys/proc.h> 102 #include <sys/buf.h> 103 #include <sys/vnode.h> 104 #include <sys/malloc.h> 105 #include <sys/vmmeter.h> 106 #include <sys/sysctl.h> 107 #include <sys/blist.h> 108 #include <sys/lock.h> 109 #include <sys/kcollect.h> 110 111 #include <unistd.h> 112 #include "opt_swap.h" 113 #include <vm/vm.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_page.h> 116 #include <vm/vm_pager.h> 117 #include <vm/vm_pageout.h> 118 #include <vm/swap_pager.h> 119 #include <vm/vm_extern.h> 120 #include <vm/vm_zone.h> 121 #include <vm/vnode_pager.h> 122 123 #include <sys/buf2.h> 124 #include <vm/vm_page2.h> 125 126 #ifndef MAX_PAGEOUT_CLUSTER 127 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES 128 #endif 129 130 #define SWM_FREE 0x02 /* free, period */ 131 #define SWM_POP 0x04 /* pop out */ 132 133 #define SWBIO_READ 0x01 134 #define SWBIO_WRITE 0x02 135 #define SWBIO_SYNC 0x04 136 #define SWBIO_TTC 0x08 /* for VM_PAGER_TRY_TO_CACHE */ 137 138 struct swfreeinfo { 139 vm_object_t object; 140 vm_pindex_t basei; 141 vm_pindex_t begi; 142 vm_pindex_t endi; /* inclusive */ 143 }; 144 145 struct swswapoffinfo { 146 vm_object_t object; 147 int devidx; 148 int shared; 149 }; 150 151 /* 152 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 153 * in the old system. 154 */ 155 156 int swap_pager_full; /* swap space exhaustion (task killing) */ 157 int swap_fail_ticks; /* when we became exhausted */ 158 int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 159 swblk_t vm_swap_cache_use; 160 swblk_t vm_swap_anon_use; 161 static int vm_report_swap_allocs; 162 163 static int nsw_rcount; /* free read buffers */ 164 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 165 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 166 static int nsw_wcount_async_max;/* assigned maximum */ 167 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 168 169 struct blist *swapblist; 170 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 171 static int swap_burst_read = 0; /* allow burst reading */ 172 static swblk_t swapiterator; /* linearize allocations */ 173 int swap_user_async = 0; /* user swap pager operation can be async */ 174 175 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin"); 176 177 /* from vm_swap.c */ 178 extern struct vnode *swapdev_vp; 179 extern struct swdevt *swdevt; 180 extern int nswdev; 181 182 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / SWB_DMMAX % nswdev : 0) 183 184 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 185 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 186 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read, 187 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins"); 188 SYSCTL_INT(_vm, OID_AUTO, swap_user_async, 189 CTLFLAG_RW, &swap_user_async, 0, "Allow async uuser swap write I/O"); 190 191 #if SWBLK_BITS == 64 192 SYSCTL_LONG(_vm, OID_AUTO, swap_cache_use, 193 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 194 SYSCTL_LONG(_vm, OID_AUTO, swap_anon_use, 195 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 196 SYSCTL_LONG(_vm, OID_AUTO, swap_size, 197 CTLFLAG_RD, &vm_swap_size, 0, ""); 198 #else 199 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use, 200 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 201 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use, 202 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 203 SYSCTL_INT(_vm, OID_AUTO, swap_size, 204 CTLFLAG_RD, &vm_swap_size, 0, ""); 205 #endif 206 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs, 207 CTLFLAG_RW, &vm_report_swap_allocs, 0, ""); 208 209 vm_zone_t swap_zone; 210 211 /* 212 * Red-Black tree for swblock entries 213 * 214 * The caller must hold vm_token 215 */ 216 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare, 217 vm_pindex_t, swb_index); 218 219 int 220 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2) 221 { 222 if (swb1->swb_index < swb2->swb_index) 223 return(-1); 224 if (swb1->swb_index > swb2->swb_index) 225 return(1); 226 return(0); 227 } 228 229 static 230 int 231 rb_swblock_scancmp(struct swblock *swb, void *data) 232 { 233 struct swfreeinfo *info = data; 234 235 if (swb->swb_index < info->basei) 236 return(-1); 237 if (swb->swb_index > info->endi) 238 return(1); 239 return(0); 240 } 241 242 static 243 int 244 rb_swblock_condcmp(struct swblock *swb, void *data) 245 { 246 struct swfreeinfo *info = data; 247 248 if (swb->swb_index < info->basei) 249 return(-1); 250 return(0); 251 } 252 253 /* 254 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 255 * calls hooked from other parts of the VM system and do not appear here. 256 * (see vm/swap_pager.h). 257 */ 258 259 static void swap_pager_dealloc (vm_object_t object); 260 static int swap_pager_getpage (vm_object_t, vm_page_t *, int); 261 static void swap_chain_iodone(struct bio *biox); 262 263 struct pagerops swappagerops = { 264 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 265 swap_pager_getpage, /* pagein */ 266 swap_pager_putpages, /* pageout */ 267 swap_pager_haspage /* get backing store status for page */ 268 }; 269 270 /* 271 * SWB_DMMAX is in page-sized chunks with the new swap system. It was 272 * dev-bsized chunks in the old. SWB_DMMAX is always a power of 2. 273 * 274 * swap_*() routines are externally accessible. swp_*() routines are 275 * internal. 276 */ 277 278 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 279 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 280 281 static __inline void swp_sizecheck (void); 282 static void swp_pager_async_iodone (struct bio *bio); 283 284 /* 285 * Swap bitmap functions 286 */ 287 288 static __inline void swp_pager_freeswapspace(vm_object_t object, 289 swblk_t blk, int npages); 290 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages); 291 292 /* 293 * Metadata functions 294 */ 295 296 static void swp_pager_meta_convert(vm_object_t); 297 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t); 298 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 299 static void swp_pager_meta_free_all(vm_object_t); 300 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 301 302 /* 303 * SWP_SIZECHECK() - update swap_pager_full indication 304 * 305 * update the swap_pager_almost_full indication and warn when we are 306 * about to run out of swap space, using lowat/hiwat hysteresis. 307 * 308 * Clear swap_pager_full ( task killing ) indication when lowat is met. 309 * 310 * No restrictions on call 311 * This routine may not block. 312 * SMP races are ok. 313 */ 314 static __inline void 315 swp_sizecheck(void) 316 { 317 if (vm_swap_size < nswap_lowat) { 318 if (swap_pager_almost_full == 0) { 319 kprintf("swap_pager: out of swap space\n"); 320 swap_pager_almost_full = 1; 321 swap_fail_ticks = ticks; 322 } 323 } else { 324 swap_pager_full = 0; 325 if (vm_swap_size > nswap_hiwat) 326 swap_pager_almost_full = 0; 327 } 328 } 329 330 /* 331 * Long-term data collection on 10-second interval. Return the value 332 * for KCOLLECT_SWAPPCT and set the values for SWAPANO and SWAPCCAC. 333 * 334 * Return total swap in the scale field. This can change if swap is 335 * regularly added or removed and may cause some historical confusion 336 * in that case, but SWAPPCT will always be historically accurate. 337 */ 338 339 #define PTOB(value) ((uint64_t)(value) << PAGE_SHIFT) 340 341 static uint64_t 342 collect_swap_callback(int n) 343 { 344 uint64_t total = vm_swap_max; 345 uint64_t anon = vm_swap_anon_use; 346 uint64_t cache = vm_swap_cache_use; 347 348 if (total == 0) /* avoid divide by zero */ 349 total = 1; 350 kcollect_setvalue(KCOLLECT_SWAPANO, PTOB(anon)); 351 kcollect_setvalue(KCOLLECT_SWAPCAC, PTOB(cache)); 352 kcollect_setscale(KCOLLECT_SWAPANO, 353 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, PTOB(total))); 354 kcollect_setscale(KCOLLECT_SWAPCAC, 355 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, PTOB(total))); 356 return (((anon + cache) * 10000 + (total >> 1)) / total); 357 } 358 359 /* 360 * SWAP_PAGER_INIT() - initialize the swap pager! 361 * 362 * Expected to be started from system init. NOTE: This code is run 363 * before much else so be careful what you depend on. Most of the VM 364 * system has yet to be initialized at this point. 365 * 366 * Called from the low level boot code only. 367 */ 368 static void 369 swap_pager_init(void *arg __unused) 370 { 371 kcollect_register(KCOLLECT_SWAPPCT, "swapuse", collect_swap_callback, 372 KCOLLECT_SCALE(KCOLLECT_SWAPPCT_FORMAT, 0)); 373 kcollect_register(KCOLLECT_SWAPANO, "swapano", NULL, 374 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, 0)); 375 kcollect_register(KCOLLECT_SWAPCAC, "swapcac", NULL, 376 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, 0)); 377 } 378 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL); 379 380 /* 381 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 382 * 383 * Expected to be started from pageout process once, prior to entering 384 * its main loop. 385 * 386 * Called from the low level boot code only. 387 */ 388 void 389 swap_pager_swap_init(void) 390 { 391 int n, n2; 392 393 /* 394 * Number of in-transit swap bp operations. Don't 395 * exhaust the pbufs completely. Make sure we 396 * initialize workable values (0 will work for hysteresis 397 * but it isn't very efficient). 398 * 399 * The nsw_cluster_max is constrained by the number of pages an XIO 400 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 401 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 402 * constrained by the swap device interleave stripe size. 403 * 404 * Currently we hardwire nsw_wcount_async to 4. This limit is 405 * designed to prevent other I/O from having high latencies due to 406 * our pageout I/O. The value 4 works well for one or two active swap 407 * devices but is probably a little low if you have more. Even so, 408 * a higher value would probably generate only a limited improvement 409 * with three or four active swap devices since the system does not 410 * typically have to pageout at extreme bandwidths. We will want 411 * at least 2 per swap devices, and 4 is a pretty good value if you 412 * have one NFS swap device due to the command/ack latency over NFS. 413 * So it all works out pretty well. 414 */ 415 416 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 417 418 nsw_rcount = (nswbuf_kva + 1) / 2; 419 nsw_wcount_sync = (nswbuf_kva + 3) / 4; 420 nsw_wcount_async = 4; 421 nsw_wcount_async_max = nsw_wcount_async; 422 423 /* 424 * The zone is dynamically allocated so generally size it to 425 * maxswzone (32MB to 256GB of KVM). Set a minimum size based 426 * on physical memory of around 8x (each swblock can hold 16 pages). 427 * 428 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio 429 * has increased dramatically. 430 */ 431 n = vmstats.v_page_count / 2; 432 if (maxswzone && n < maxswzone / sizeof(struct swblock)) 433 n = maxswzone / sizeof(struct swblock); 434 n2 = n; 435 436 do { 437 swap_zone = zinit( 438 "SWAPMETA", 439 sizeof(struct swblock), 440 n, 441 ZONE_INTERRUPT); 442 if (swap_zone != NULL) 443 break; 444 /* 445 * if the allocation failed, try a zone two thirds the 446 * size of the previous attempt. 447 */ 448 n -= ((n + 2) / 3); 449 } while (n > 0); 450 451 if (swap_zone == NULL) 452 panic("swap_pager_swap_init: swap_zone == NULL"); 453 if (n2 != n) 454 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 455 } 456 457 /* 458 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 459 * its metadata structures. 460 * 461 * This routine is called from the mmap and fork code to create a new 462 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 463 * and then converting it with swp_pager_meta_convert(). 464 * 465 * We only support unnamed objects. 466 * 467 * No restrictions. 468 */ 469 vm_object_t 470 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 471 { 472 vm_object_t object; 473 474 KKASSERT(handle == NULL); 475 object = vm_object_allocate_hold(OBJT_DEFAULT, 476 OFF_TO_IDX(offset + PAGE_MASK + size)); 477 swp_pager_meta_convert(object); 478 vm_object_drop(object); 479 480 return (object); 481 } 482 483 /* 484 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 485 * 486 * The swap backing for the object is destroyed. The code is 487 * designed such that we can reinstantiate it later, but this 488 * routine is typically called only when the entire object is 489 * about to be destroyed. 490 * 491 * The object must be locked or unreferenceable. 492 * No other requirements. 493 */ 494 static void 495 swap_pager_dealloc(vm_object_t object) 496 { 497 vm_object_hold(object); 498 vm_object_pip_wait(object, "swpdea"); 499 500 /* 501 * Free all remaining metadata. We only bother to free it from 502 * the swap meta data. We do not attempt to free swapblk's still 503 * associated with vm_page_t's for this object. We do not care 504 * if paging is still in progress on some objects. 505 */ 506 swp_pager_meta_free_all(object); 507 vm_object_drop(object); 508 } 509 510 /************************************************************************ 511 * SWAP PAGER BITMAP ROUTINES * 512 ************************************************************************/ 513 514 /* 515 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 516 * 517 * Allocate swap for the requested number of pages. The starting 518 * swap block number (a page index) is returned or SWAPBLK_NONE 519 * if the allocation failed. 520 * 521 * Also has the side effect of advising that somebody made a mistake 522 * when they configured swap and didn't configure enough. 523 * 524 * The caller must hold the object. 525 * This routine may not block. 526 */ 527 static __inline swblk_t 528 swp_pager_getswapspace(vm_object_t object, int npages) 529 { 530 swblk_t blk; 531 532 lwkt_gettoken(&vm_token); 533 blk = blist_allocat(swapblist, npages, swapiterator); 534 if (blk == SWAPBLK_NONE) 535 blk = blist_allocat(swapblist, npages, 0); 536 if (blk == SWAPBLK_NONE) { 537 if (swap_pager_full != 2) { 538 if (vm_swap_max == 0) 539 kprintf("Warning: The system would like to " 540 "page to swap but no swap space " 541 "is configured!\n"); 542 else 543 kprintf("swap_pager_getswapspace: " 544 "swap full allocating %d pages\n", 545 npages); 546 swap_pager_full = 2; 547 if (swap_pager_almost_full == 0) 548 swap_fail_ticks = ticks; 549 swap_pager_almost_full = 1; 550 } 551 } else { 552 /* swapiterator = blk; disable for now, doesn't work well */ 553 swapacctspace(blk, -npages); 554 if (object->type == OBJT_SWAP) 555 vm_swap_anon_use += npages; 556 else 557 vm_swap_cache_use += npages; 558 swp_sizecheck(); 559 } 560 lwkt_reltoken(&vm_token); 561 return(blk); 562 } 563 564 /* 565 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 566 * 567 * This routine returns the specified swap blocks back to the bitmap. 568 * 569 * Note: This routine may not block (it could in the old swap code), 570 * and through the use of the new blist routines it does not block. 571 * 572 * This routine may not block. 573 */ 574 575 static __inline void 576 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages) 577 { 578 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 579 580 lwkt_gettoken(&vm_token); 581 sp->sw_nused -= npages; 582 if (object->type == OBJT_SWAP) 583 vm_swap_anon_use -= npages; 584 else 585 vm_swap_cache_use -= npages; 586 587 if (sp->sw_flags & SW_CLOSING) { 588 lwkt_reltoken(&vm_token); 589 return; 590 } 591 592 blist_free(swapblist, blk, npages); 593 vm_swap_size += npages; 594 swp_sizecheck(); 595 lwkt_reltoken(&vm_token); 596 } 597 598 /* 599 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 600 * range within an object. 601 * 602 * This is a globally accessible routine. 603 * 604 * This routine removes swapblk assignments from swap metadata. 605 * 606 * The external callers of this routine typically have already destroyed 607 * or renamed vm_page_t's associated with this range in the object so 608 * we should be ok. 609 * 610 * No requirements. 611 */ 612 void 613 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size) 614 { 615 vm_object_hold(object); 616 swp_pager_meta_free(object, start, size); 617 vm_object_drop(object); 618 } 619 620 /* 621 * No requirements. 622 */ 623 void 624 swap_pager_freespace_all(vm_object_t object) 625 { 626 vm_object_hold(object); 627 swp_pager_meta_free_all(object); 628 vm_object_drop(object); 629 } 630 631 /* 632 * This function conditionally frees swap cache swap starting at 633 * (*basei) in the object. (count) swap blocks will be nominally freed. 634 * The actual number of blocks freed can be more or less than the 635 * requested number. 636 * 637 * This function nominally returns the number of blocks freed. However, 638 * the actual number of blocks freed may be less then the returned value. 639 * If the function is unable to exhaust the object or if it is able to 640 * free (approximately) the requested number of blocks it returns 641 * a value n > count. 642 * 643 * If we exhaust the object we will return a value n <= count. 644 * 645 * The caller must hold the object. 646 * 647 * WARNING! If count == 0 then -1 can be returned as a degenerate case, 648 * callers should always pass a count value > 0. 649 */ 650 static int swap_pager_condfree_callback(struct swblock *swap, void *data); 651 652 int 653 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count) 654 { 655 struct swfreeinfo info; 656 int n; 657 int t; 658 659 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 660 661 info.object = object; 662 info.basei = *basei; /* skip up to this page index */ 663 info.begi = count; /* max swap pages to destroy */ 664 info.endi = count * 8; /* max swblocks to scan */ 665 666 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp, 667 swap_pager_condfree_callback, &info); 668 *basei = info.basei; 669 670 /* 671 * Take the higher difference swblocks vs pages 672 */ 673 n = count - (int)info.begi; 674 t = count * 8 - (int)info.endi; 675 if (n < t) 676 n = t; 677 if (n < 1) 678 n = 1; 679 return(n); 680 } 681 682 /* 683 * The idea is to free whole meta-block to avoid fragmenting 684 * the swap space or disk I/O. We only do this if NO VM pages 685 * are present. 686 * 687 * We do not have to deal with clearing PG_SWAPPED in related VM 688 * pages because there are no related VM pages. 689 * 690 * The caller must hold the object. 691 */ 692 static int 693 swap_pager_condfree_callback(struct swblock *swap, void *data) 694 { 695 struct swfreeinfo *info = data; 696 vm_object_t object = info->object; 697 int i; 698 699 for (i = 0; i < SWAP_META_PAGES; ++i) { 700 if (vm_page_lookup(object, swap->swb_index + i)) 701 break; 702 } 703 info->basei = swap->swb_index + SWAP_META_PAGES; 704 if (i == SWAP_META_PAGES) { 705 info->begi -= swap->swb_count; 706 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES); 707 } 708 --info->endi; 709 if ((int)info->begi < 0 || (int)info->endi < 0) 710 return(-1); 711 lwkt_yield(); 712 return(0); 713 } 714 715 /* 716 * Called by vm_page_alloc() when a new VM page is inserted 717 * into a VM object. Checks whether swap has been assigned to 718 * the page and sets PG_SWAPPED as necessary. 719 * 720 * (m) must be busied by caller and remains busied on return. 721 */ 722 void 723 swap_pager_page_inserted(vm_page_t m) 724 { 725 if (m->object->swblock_count) { 726 vm_object_hold(m->object); 727 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE) 728 vm_page_flag_set(m, PG_SWAPPED); 729 vm_object_drop(m->object); 730 } 731 } 732 733 /* 734 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 735 * 736 * Assigns swap blocks to the specified range within the object. The 737 * swap blocks are not zerod. Any previous swap assignment is destroyed. 738 * 739 * Returns 0 on success, -1 on failure. 740 * 741 * The caller is responsible for avoiding races in the specified range. 742 * No other requirements. 743 */ 744 int 745 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 746 { 747 int n = 0; 748 swblk_t blk = SWAPBLK_NONE; 749 vm_pindex_t beg = start; /* save start index */ 750 751 vm_object_hold(object); 752 753 while (size) { 754 if (n == 0) { 755 n = BLIST_MAX_ALLOC; 756 while ((blk = swp_pager_getswapspace(object, n)) == 757 SWAPBLK_NONE) 758 { 759 n >>= 1; 760 if (n == 0) { 761 swp_pager_meta_free(object, beg, 762 start - beg); 763 vm_object_drop(object); 764 return(-1); 765 } 766 } 767 } 768 swp_pager_meta_build(object, start, blk); 769 --size; 770 ++start; 771 ++blk; 772 --n; 773 } 774 swp_pager_meta_free(object, start, n); 775 vm_object_drop(object); 776 return(0); 777 } 778 779 /* 780 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 781 * and destroy the source. 782 * 783 * Copy any valid swapblks from the source to the destination. In 784 * cases where both the source and destination have a valid swapblk, 785 * we keep the destination's. 786 * 787 * This routine is allowed to block. It may block allocating metadata 788 * indirectly through swp_pager_meta_build() or if paging is still in 789 * progress on the source. 790 * 791 * XXX vm_page_collapse() kinda expects us not to block because we 792 * supposedly do not need to allocate memory, but for the moment we 793 * *may* have to get a little memory from the zone allocator, but 794 * it is taken from the interrupt memory. We should be ok. 795 * 796 * The source object contains no vm_page_t's (which is just as well) 797 * The source object is of type OBJT_SWAP. 798 * 799 * The source and destination objects must be held by the caller. 800 */ 801 void 802 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 803 vm_pindex_t base_index, int destroysource) 804 { 805 vm_pindex_t i; 806 807 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject)); 808 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject)); 809 810 /* 811 * transfer source to destination. 812 */ 813 for (i = 0; i < dstobject->size; ++i) { 814 swblk_t dstaddr; 815 816 /* 817 * Locate (without changing) the swapblk on the destination, 818 * unless it is invalid in which case free it silently, or 819 * if the destination is a resident page, in which case the 820 * source is thrown away. 821 */ 822 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 823 824 if (dstaddr == SWAPBLK_NONE) { 825 /* 826 * Destination has no swapblk and is not resident, 827 * copy source. 828 */ 829 swblk_t srcaddr; 830 831 srcaddr = swp_pager_meta_ctl(srcobject, 832 base_index + i, SWM_POP); 833 834 if (srcaddr != SWAPBLK_NONE) 835 swp_pager_meta_build(dstobject, i, srcaddr); 836 } else { 837 /* 838 * Destination has valid swapblk or it is represented 839 * by a resident page. We destroy the sourceblock. 840 */ 841 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE); 842 } 843 } 844 845 /* 846 * Free left over swap blocks in source. 847 * 848 * We have to revert the type to OBJT_DEFAULT so we do not accidently 849 * double-remove the object from the swap queues. 850 */ 851 if (destroysource) { 852 /* 853 * Reverting the type is not necessary, the caller is going 854 * to destroy srcobject directly, but I'm doing it here 855 * for consistency since we've removed the object from its 856 * queues. 857 */ 858 swp_pager_meta_free_all(srcobject); 859 if (srcobject->type == OBJT_SWAP) 860 srcobject->type = OBJT_DEFAULT; 861 } 862 } 863 864 /* 865 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 866 * the requested page. 867 * 868 * We determine whether good backing store exists for the requested 869 * page and return TRUE if it does, FALSE if it doesn't. 870 * 871 * If TRUE, we also try to determine how much valid, contiguous backing 872 * store exists before and after the requested page within a reasonable 873 * distance. We do not try to restrict it to the swap device stripe 874 * (that is handled in getpages/putpages). It probably isn't worth 875 * doing here. 876 * 877 * No requirements. 878 */ 879 boolean_t 880 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex) 881 { 882 swblk_t blk0; 883 884 /* 885 * do we have good backing store at the requested index ? 886 */ 887 vm_object_hold(object); 888 blk0 = swp_pager_meta_ctl(object, pindex, 0); 889 890 if (blk0 == SWAPBLK_NONE) { 891 vm_object_drop(object); 892 return (FALSE); 893 } 894 vm_object_drop(object); 895 return (TRUE); 896 } 897 898 /* 899 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 900 * 901 * This removes any associated swap backing store, whether valid or 902 * not, from the page. This operates on any VM object, not just OBJT_SWAP 903 * objects. 904 * 905 * This routine is typically called when a page is made dirty, at 906 * which point any associated swap can be freed. MADV_FREE also 907 * calls us in a special-case situation 908 * 909 * NOTE!!! If the page is clean and the swap was valid, the caller 910 * should make the page dirty before calling this routine. 911 * This routine does NOT change the m->dirty status of the page. 912 * Also: MADV_FREE depends on it. 913 * 914 * The page must be busied. 915 * The caller can hold the object to avoid blocking, else we might block. 916 * No other requirements. 917 */ 918 void 919 swap_pager_unswapped(vm_page_t m) 920 { 921 if (m->flags & PG_SWAPPED) { 922 vm_object_hold(m->object); 923 KKASSERT(m->flags & PG_SWAPPED); 924 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 925 vm_page_flag_clear(m, PG_SWAPPED); 926 vm_object_drop(m->object); 927 } 928 } 929 930 /* 931 * SWAP_PAGER_STRATEGY() - read, write, free blocks 932 * 933 * This implements a VM OBJECT strategy function using swap backing store. 934 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP 935 * types. Only BUF_CMD_{READ,WRITE,FREEBLKS} is supported, any other 936 * requests will return EINVAL. 937 * 938 * This is intended to be a cacheless interface (i.e. caching occurs at 939 * higher levels), and is also used as a swap-based SSD cache for vnode 940 * and device objects. 941 * 942 * All I/O goes directly to and from the swap device. 943 * 944 * We currently attempt to run I/O synchronously or asynchronously as 945 * the caller requests. This isn't perfect because we loose error 946 * sequencing when we run multiple ops in parallel to satisfy a request. 947 * But this is swap, so we let it all hang out. 948 * 949 * NOTE: This function supports the KVABIO API wherein bp->b_data might 950 * not be synchronized to the current cpu. 951 * 952 * No requirements. 953 */ 954 void 955 swap_pager_strategy(vm_object_t object, struct bio *bio) 956 { 957 struct buf *bp = bio->bio_buf; 958 struct bio *nbio; 959 vm_pindex_t start; 960 vm_pindex_t biox_blkno = 0; 961 int count; 962 char *data; 963 struct bio *biox; 964 struct buf *bufx; 965 #if 0 966 struct bio_track *track; 967 #endif 968 969 #if 0 970 /* 971 * tracking for swapdev vnode I/Os 972 */ 973 if (bp->b_cmd == BUF_CMD_READ) 974 track = &swapdev_vp->v_track_read; 975 else 976 track = &swapdev_vp->v_track_write; 977 #endif 978 979 /* 980 * Only supported commands 981 */ 982 if (bp->b_cmd != BUF_CMD_FREEBLKS && 983 bp->b_cmd != BUF_CMD_READ && 984 bp->b_cmd != BUF_CMD_WRITE) { 985 bp->b_error = EINVAL; 986 bp->b_flags |= B_ERROR | B_INVAL; 987 biodone(bio); 988 return; 989 } 990 991 /* 992 * bcount must be an integral number of pages. 993 */ 994 if (bp->b_bcount & PAGE_MASK) { 995 bp->b_error = EINVAL; 996 bp->b_flags |= B_ERROR | B_INVAL; 997 biodone(bio); 998 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 999 "not page bounded\n", 1000 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 1001 return; 1002 } 1003 1004 /* 1005 * Clear error indication, initialize page index, count, data pointer. 1006 */ 1007 bp->b_error = 0; 1008 bp->b_flags &= ~B_ERROR; 1009 bp->b_resid = bp->b_bcount; 1010 1011 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 1012 count = howmany(bp->b_bcount, PAGE_SIZE); 1013 1014 /* 1015 * WARNING! Do not dereference *data without issuing a bkvasync() 1016 */ 1017 data = bp->b_data; 1018 1019 /* 1020 * Deal with BUF_CMD_FREEBLKS 1021 */ 1022 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 1023 /* 1024 * FREE PAGE(s) - destroy underlying swap that is no longer 1025 * needed. 1026 */ 1027 vm_object_hold(object); 1028 swp_pager_meta_free(object, start, count); 1029 vm_object_drop(object); 1030 bp->b_resid = 0; 1031 biodone(bio); 1032 return; 1033 } 1034 1035 /* 1036 * We need to be able to create a new cluster of I/O's. We cannot 1037 * use the caller fields of the passed bio so push a new one. 1038 * 1039 * Because nbio is just a placeholder for the cluster links, 1040 * we can biodone() the original bio instead of nbio to make 1041 * things a bit more efficient. 1042 */ 1043 nbio = push_bio(bio); 1044 nbio->bio_offset = bio->bio_offset; 1045 nbio->bio_caller_info1.cluster_head = NULL; 1046 nbio->bio_caller_info2.cluster_tail = NULL; 1047 1048 biox = NULL; 1049 bufx = NULL; 1050 1051 /* 1052 * Execute read or write 1053 */ 1054 vm_object_hold(object); 1055 1056 while (count > 0) { 1057 swblk_t blk; 1058 1059 /* 1060 * Obtain block. If block not found and writing, allocate a 1061 * new block and build it into the object. 1062 */ 1063 blk = swp_pager_meta_ctl(object, start, 0); 1064 if ((blk == SWAPBLK_NONE) && bp->b_cmd == BUF_CMD_WRITE) { 1065 blk = swp_pager_getswapspace(object, 1); 1066 if (blk == SWAPBLK_NONE) { 1067 bp->b_error = ENOMEM; 1068 bp->b_flags |= B_ERROR; 1069 break; 1070 } 1071 swp_pager_meta_build(object, start, blk); 1072 } 1073 1074 /* 1075 * Do we have to flush our current collection? Yes if: 1076 * 1077 * - no swap block at this index 1078 * - swap block is not contiguous 1079 * - we cross a physical disk boundry in the 1080 * stripe. 1081 */ 1082 if (biox && 1083 (biox_blkno + btoc(bufx->b_bcount) != blk || 1084 ((biox_blkno ^ blk) & ~SWB_DMMASK))) { 1085 switch(bp->b_cmd) { 1086 case BUF_CMD_READ: 1087 ++mycpu->gd_cnt.v_swapin; 1088 mycpu->gd_cnt.v_swappgsin += 1089 btoc(bufx->b_bcount); 1090 break; 1091 case BUF_CMD_WRITE: 1092 ++mycpu->gd_cnt.v_swapout; 1093 mycpu->gd_cnt.v_swappgsout += 1094 btoc(bufx->b_bcount); 1095 bufx->b_dirtyend = bufx->b_bcount; 1096 break; 1097 default: 1098 /* NOT REACHED */ 1099 break; 1100 } 1101 1102 /* 1103 * Finished with this buf. 1104 */ 1105 KKASSERT(bufx->b_bcount != 0); 1106 if (bufx->b_cmd != BUF_CMD_READ) 1107 bufx->b_dirtyend = bufx->b_bcount; 1108 biox = NULL; 1109 bufx = NULL; 1110 } 1111 1112 /* 1113 * Add new swapblk to biox, instantiating biox if necessary. 1114 * Zero-fill reads are able to take a shortcut. 1115 */ 1116 if (blk == SWAPBLK_NONE) { 1117 /* 1118 * We can only get here if we are reading. 1119 */ 1120 bkvasync(bp); 1121 bzero(data, PAGE_SIZE); 1122 bp->b_resid -= PAGE_SIZE; 1123 } else { 1124 if (biox == NULL) { 1125 /* XXX chain count > 4, wait to <= 4 */ 1126 1127 bufx = getpbuf(NULL); 1128 bufx->b_flags |= B_KVABIO; 1129 biox = &bufx->b_bio1; 1130 cluster_append(nbio, bufx); 1131 bufx->b_cmd = bp->b_cmd; 1132 biox->bio_done = swap_chain_iodone; 1133 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1134 biox->bio_caller_info1.cluster_parent = nbio; 1135 biox_blkno = blk; 1136 bufx->b_bcount = 0; 1137 bufx->b_data = data; 1138 } 1139 bufx->b_bcount += PAGE_SIZE; 1140 } 1141 --count; 1142 ++start; 1143 data += PAGE_SIZE; 1144 } 1145 1146 vm_object_drop(object); 1147 1148 /* 1149 * Flush out last buffer 1150 */ 1151 if (biox) { 1152 if (bufx->b_cmd == BUF_CMD_READ) { 1153 ++mycpu->gd_cnt.v_swapin; 1154 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1155 } else { 1156 ++mycpu->gd_cnt.v_swapout; 1157 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1158 bufx->b_dirtyend = bufx->b_bcount; 1159 } 1160 KKASSERT(bufx->b_bcount); 1161 if (bufx->b_cmd != BUF_CMD_READ) 1162 bufx->b_dirtyend = bufx->b_bcount; 1163 /* biox, bufx = NULL */ 1164 } 1165 1166 /* 1167 * Now initiate all the I/O. Be careful looping on our chain as 1168 * I/O's may complete while we are still initiating them. 1169 * 1170 * If the request is a 100% sparse read no bios will be present 1171 * and we just biodone() the buffer. 1172 */ 1173 nbio->bio_caller_info2.cluster_tail = NULL; 1174 bufx = nbio->bio_caller_info1.cluster_head; 1175 1176 if (bufx) { 1177 while (bufx) { 1178 biox = &bufx->b_bio1; 1179 BUF_KERNPROC(bufx); 1180 bufx = bufx->b_cluster_next; 1181 vn_strategy(swapdev_vp, biox); 1182 } 1183 } else { 1184 biodone(bio); 1185 } 1186 1187 /* 1188 * Completion of the cluster will also call biodone_chain(nbio). 1189 * We never call biodone(nbio) so we don't have to worry about 1190 * setting up a bio_done callback. It's handled in the sub-IO. 1191 */ 1192 /**/ 1193 } 1194 1195 /* 1196 * biodone callback 1197 * 1198 * No requirements. 1199 */ 1200 static void 1201 swap_chain_iodone(struct bio *biox) 1202 { 1203 struct buf **nextp; 1204 struct buf *bufx; /* chained sub-buffer */ 1205 struct bio *nbio; /* parent nbio with chain glue */ 1206 struct buf *bp; /* original bp associated with nbio */ 1207 int chain_empty; 1208 1209 bufx = biox->bio_buf; 1210 nbio = biox->bio_caller_info1.cluster_parent; 1211 bp = nbio->bio_buf; 1212 1213 /* 1214 * Update the original buffer 1215 */ 1216 KKASSERT(bp != NULL); 1217 if (bufx->b_flags & B_ERROR) { 1218 atomic_set_int(&bufx->b_flags, B_ERROR); 1219 bp->b_error = bufx->b_error; /* race ok */ 1220 } else if (bufx->b_resid != 0) { 1221 atomic_set_int(&bufx->b_flags, B_ERROR); 1222 bp->b_error = EINVAL; /* race ok */ 1223 } else { 1224 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1225 } 1226 1227 /* 1228 * Remove us from the chain. 1229 */ 1230 spin_lock(&swapbp_spin); 1231 nextp = &nbio->bio_caller_info1.cluster_head; 1232 while (*nextp != bufx) { 1233 KKASSERT(*nextp != NULL); 1234 nextp = &(*nextp)->b_cluster_next; 1235 } 1236 *nextp = bufx->b_cluster_next; 1237 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1238 spin_unlock(&swapbp_spin); 1239 1240 /* 1241 * Clean up bufx. If the chain is now empty we finish out 1242 * the parent. Note that we may be racing other completions 1243 * so we must use the chain_empty status from above. 1244 */ 1245 if (chain_empty) { 1246 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1247 atomic_set_int(&bp->b_flags, B_ERROR); 1248 bp->b_error = EINVAL; 1249 } 1250 biodone_chain(nbio); 1251 } 1252 relpbuf(bufx, NULL); 1253 } 1254 1255 /* 1256 * SWAP_PAGER_GETPAGES() - bring page in from swap 1257 * 1258 * The requested page may have to be brought in from swap. Calculate the 1259 * swap block and bring in additional pages if possible. All pages must 1260 * have contiguous swap block assignments and reside in the same object. 1261 * 1262 * The caller has a single vm_object_pip_add() reference prior to 1263 * calling us and we should return with the same. 1264 * 1265 * The caller has BUSY'd the page. We should return with (*mpp) left busy, 1266 * and any additinal pages unbusied. 1267 * 1268 * If the caller encounters a PG_RAM page it will pass it to us even though 1269 * it may be valid and dirty. We cannot overwrite the page in this case! 1270 * The case is used to allow us to issue pure read-aheads. 1271 * 1272 * NOTE! XXX This code does not entirely pipeline yet due to the fact that 1273 * the PG_RAM page is validated at the same time as mreq. What we 1274 * really need to do is issue a separate read-ahead pbuf. 1275 * 1276 * No requirements. 1277 */ 1278 static int 1279 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 1280 { 1281 struct buf *bp; 1282 struct bio *bio; 1283 vm_page_t mreq; 1284 vm_page_t m; 1285 vm_offset_t kva; 1286 swblk_t blk; 1287 int i; 1288 int j; 1289 int raonly; 1290 int error; 1291 u_int32_t busy_count; 1292 vm_page_t marray[XIO_INTERNAL_PAGES]; 1293 1294 mreq = *mpp; 1295 1296 vm_object_hold(object); 1297 if (mreq->object != object) { 1298 panic("swap_pager_getpages: object mismatch %p/%p", 1299 object, 1300 mreq->object 1301 ); 1302 } 1303 1304 /* 1305 * We don't want to overwrite a fully valid page as it might be 1306 * dirty. This case can occur when e.g. vm_fault hits a perfectly 1307 * valid page with PG_RAM set. 1308 * 1309 * In this case we see if the next page is a suitable page-in 1310 * candidate and if it is we issue read-ahead. PG_RAM will be 1311 * set on the last page of the read-ahead to continue the pipeline. 1312 */ 1313 if (mreq->valid == VM_PAGE_BITS_ALL) { 1314 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) { 1315 vm_object_drop(object); 1316 return(VM_PAGER_OK); 1317 } 1318 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0); 1319 if (blk == SWAPBLK_NONE) { 1320 vm_object_drop(object); 1321 return(VM_PAGER_OK); 1322 } 1323 m = vm_page_lookup_busy_try(object, mreq->pindex + 1, 1324 TRUE, &error); 1325 if (error) { 1326 vm_object_drop(object); 1327 return(VM_PAGER_OK); 1328 } else if (m == NULL) { 1329 /* 1330 * Use VM_ALLOC_QUICK to avoid blocking on cache 1331 * page reuse. 1332 */ 1333 m = vm_page_alloc(object, mreq->pindex + 1, 1334 VM_ALLOC_QUICK); 1335 if (m == NULL) { 1336 vm_object_drop(object); 1337 return(VM_PAGER_OK); 1338 } 1339 } else { 1340 if (m->valid) { 1341 vm_page_wakeup(m); 1342 vm_object_drop(object); 1343 return(VM_PAGER_OK); 1344 } 1345 vm_page_unqueue_nowakeup(m); 1346 } 1347 /* page is busy */ 1348 mreq = m; 1349 raonly = 1; 1350 } else { 1351 raonly = 0; 1352 } 1353 1354 /* 1355 * Try to block-read contiguous pages from swap if sequential, 1356 * otherwise just read one page. Contiguous pages from swap must 1357 * reside within a single device stripe because the I/O cannot be 1358 * broken up across multiple stripes. 1359 * 1360 * Note that blk and iblk can be SWAPBLK_NONE but the loop is 1361 * set up such that the case(s) are handled implicitly. 1362 */ 1363 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1364 marray[0] = mreq; 1365 1366 for (i = 1; i <= swap_burst_read && 1367 i < XIO_INTERNAL_PAGES && 1368 mreq->pindex + i < object->size; ++i) { 1369 swblk_t iblk; 1370 1371 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0); 1372 if (iblk != blk + i) 1373 break; 1374 if ((blk ^ iblk) & ~SWB_DMMASK) 1375 break; 1376 m = vm_page_lookup_busy_try(object, mreq->pindex + i, 1377 TRUE, &error); 1378 if (error) { 1379 break; 1380 } else if (m == NULL) { 1381 /* 1382 * Use VM_ALLOC_QUICK to avoid blocking on cache 1383 * page reuse. 1384 */ 1385 m = vm_page_alloc(object, mreq->pindex + i, 1386 VM_ALLOC_QUICK); 1387 if (m == NULL) 1388 break; 1389 } else { 1390 if (m->valid) { 1391 vm_page_wakeup(m); 1392 break; 1393 } 1394 vm_page_unqueue_nowakeup(m); 1395 } 1396 /* page is busy */ 1397 marray[i] = m; 1398 } 1399 if (i > 1) 1400 vm_page_flag_set(marray[i - 1], PG_RAM); 1401 1402 /* 1403 * If mreq is the requested page and we have nothing to do return 1404 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead 1405 * page and must be cleaned up. 1406 */ 1407 if (blk == SWAPBLK_NONE) { 1408 KKASSERT(i == 1); 1409 if (raonly) { 1410 vnode_pager_freepage(mreq); 1411 vm_object_drop(object); 1412 return(VM_PAGER_OK); 1413 } else { 1414 vm_object_drop(object); 1415 return(VM_PAGER_FAIL); 1416 } 1417 } 1418 1419 /* 1420 * Map our page(s) into kva for input 1421 * 1422 * Use the KVABIO API to avoid synchronizing the pmap. 1423 */ 1424 bp = getpbuf_kva(&nsw_rcount); 1425 bio = &bp->b_bio1; 1426 kva = (vm_offset_t) bp->b_kvabase; 1427 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t)); 1428 pmap_qenter_noinval(kva, bp->b_xio.xio_pages, i); 1429 1430 bp->b_data = (caddr_t)kva; 1431 bp->b_bcount = PAGE_SIZE * i; 1432 bp->b_xio.xio_npages = i; 1433 bp->b_flags |= B_KVABIO; 1434 bio->bio_done = swp_pager_async_iodone; 1435 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1436 bio->bio_caller_info1.index = SWBIO_READ; 1437 1438 /* 1439 * Set index. If raonly set the index beyond the array so all 1440 * the pages are treated the same, otherwise the original mreq is 1441 * at index 0. 1442 */ 1443 if (raonly) 1444 bio->bio_driver_info = (void *)(intptr_t)i; 1445 else 1446 bio->bio_driver_info = (void *)(intptr_t)0; 1447 1448 for (j = 0; j < i; ++j) { 1449 atomic_set_int(&bp->b_xio.xio_pages[j]->busy_count, 1450 PBUSY_SWAPINPROG); 1451 } 1452 1453 mycpu->gd_cnt.v_swapin++; 1454 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1455 1456 /* 1457 * We still hold the lock on mreq, and our automatic completion routine 1458 * does not remove it. 1459 */ 1460 vm_object_pip_add(object, bp->b_xio.xio_npages); 1461 1462 /* 1463 * perform the I/O. NOTE!!! bp cannot be considered valid after 1464 * this point because we automatically release it on completion. 1465 * Instead, we look at the one page we are interested in which we 1466 * still hold a lock on even through the I/O completion. 1467 * 1468 * The other pages in our m[] array are also released on completion, 1469 * so we cannot assume they are valid anymore either. 1470 */ 1471 bp->b_cmd = BUF_CMD_READ; 1472 BUF_KERNPROC(bp); 1473 vn_strategy(swapdev_vp, bio); 1474 1475 /* 1476 * Wait for the page we want to complete. PBUSY_SWAPINPROG is always 1477 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1478 * is set in the meta-data. 1479 * 1480 * If this is a read-ahead only we return immediately without 1481 * waiting for I/O. 1482 */ 1483 if (raonly) { 1484 vm_object_drop(object); 1485 return(VM_PAGER_OK); 1486 } 1487 1488 /* 1489 * Read-ahead includes originally requested page case. 1490 */ 1491 for (;;) { 1492 busy_count = mreq->busy_count; 1493 cpu_ccfence(); 1494 if ((busy_count & PBUSY_SWAPINPROG) == 0) 1495 break; 1496 tsleep_interlock(mreq, 0); 1497 if (!atomic_cmpset_int(&mreq->busy_count, busy_count, 1498 busy_count | 1499 PBUSY_SWAPINPROG | PBUSY_WANTED)) { 1500 continue; 1501 } 1502 atomic_set_int(&mreq->flags, PG_REFERENCED); 1503 mycpu->gd_cnt.v_intrans++; 1504 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) { 1505 kprintf( 1506 "swap_pager: indefinite wait buffer: " 1507 " bp %p offset: %lld, size: %ld\n", 1508 bp, 1509 (long long)bio->bio_offset, 1510 (long)bp->b_bcount 1511 ); 1512 } 1513 } 1514 1515 /* 1516 * Disallow speculative reads prior to the SWAPINPROG test. 1517 */ 1518 cpu_lfence(); 1519 1520 /* 1521 * mreq is left busied after completion, but all the other pages 1522 * are freed. If we had an unrecoverable read error the page will 1523 * not be valid. 1524 */ 1525 vm_object_drop(object); 1526 if (mreq->valid != VM_PAGE_BITS_ALL) 1527 return(VM_PAGER_ERROR); 1528 else 1529 return(VM_PAGER_OK); 1530 1531 /* 1532 * A final note: in a low swap situation, we cannot deallocate swap 1533 * and mark a page dirty here because the caller is likely to mark 1534 * the page clean when we return, causing the page to possibly revert 1535 * to all-zero's later. 1536 */ 1537 } 1538 1539 /* 1540 * swap_pager_putpages: 1541 * 1542 * Assign swap (if necessary) and initiate I/O on the specified pages. 1543 * 1544 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1545 * are automatically converted to SWAP objects. 1546 * 1547 * In a low memory situation we may block in vn_strategy(), but the new 1548 * vm_page reservation system coupled with properly written VFS devices 1549 * should ensure that no low-memory deadlock occurs. This is an area 1550 * which needs work. 1551 * 1552 * The parent has N vm_object_pip_add() references prior to 1553 * calling us and will remove references for rtvals[] that are 1554 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1555 * completion. 1556 * 1557 * The parent has soft-busy'd the pages it passes us and will unbusy 1558 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1559 * We need to unbusy the rest on I/O completion. 1560 * 1561 * No requirements. 1562 */ 1563 void 1564 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1565 int flags, int *rtvals) 1566 { 1567 int i; 1568 int n = 0; 1569 1570 vm_object_hold(object); 1571 1572 if (count && m[0]->object != object) { 1573 panic("swap_pager_getpages: object mismatch %p/%p", 1574 object, 1575 m[0]->object 1576 ); 1577 } 1578 1579 /* 1580 * Step 1 1581 * 1582 * Turn object into OBJT_SWAP 1583 * Check for bogus sysops 1584 * 1585 * Force sync if not pageout process, we don't want any single 1586 * non-pageout process to be able to hog the I/O subsystem! This 1587 * can be overridden by setting. 1588 */ 1589 if (object->type == OBJT_DEFAULT) { 1590 if (object->type == OBJT_DEFAULT) 1591 swp_pager_meta_convert(object); 1592 } 1593 1594 /* 1595 * Normally we force synchronous swap I/O if this is not the 1596 * pageout daemon to prevent any single user process limited 1597 * via RLIMIT_RSS from hogging swap write bandwidth. 1598 */ 1599 if (curthread != pagethread && 1600 curthread != emergpager && 1601 swap_user_async == 0) { 1602 flags |= VM_PAGER_PUT_SYNC; 1603 } 1604 1605 /* 1606 * Step 2 1607 * 1608 * Update nsw parameters from swap_async_max sysctl values. 1609 * Do not let the sysop crash the machine with bogus numbers. 1610 */ 1611 if (swap_async_max != nsw_wcount_async_max) { 1612 int n; 1613 1614 /* 1615 * limit range 1616 */ 1617 if ((n = swap_async_max) > nswbuf_kva / 2) 1618 n = nswbuf_kva / 2; 1619 if (n < 1) 1620 n = 1; 1621 swap_async_max = n; 1622 1623 /* 1624 * Adjust difference ( if possible ). If the current async 1625 * count is too low, we may not be able to make the adjustment 1626 * at this time. 1627 * 1628 * vm_token needed for nsw_wcount sleep interlock 1629 */ 1630 lwkt_gettoken(&vm_token); 1631 n -= nsw_wcount_async_max; 1632 if (nsw_wcount_async + n >= 0) { 1633 nsw_wcount_async_max += n; 1634 pbuf_adjcount(&nsw_wcount_async, n); 1635 } 1636 lwkt_reltoken(&vm_token); 1637 } 1638 1639 /* 1640 * Step 3 1641 * 1642 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1643 * The page is left dirty until the pageout operation completes 1644 * successfully. 1645 */ 1646 1647 for (i = 0; i < count; i += n) { 1648 struct buf *bp; 1649 struct bio *bio; 1650 swblk_t blk; 1651 int j; 1652 1653 /* 1654 * Maximum I/O size is limited by a number of factors. 1655 */ 1656 1657 n = min(BLIST_MAX_ALLOC, count - i); 1658 n = min(n, nsw_cluster_max); 1659 1660 lwkt_gettoken(&vm_token); 1661 1662 /* 1663 * Get biggest block of swap we can. If we fail, fall 1664 * back and try to allocate a smaller block. Don't go 1665 * overboard trying to allocate space if it would overly 1666 * fragment swap. 1667 */ 1668 while ( 1669 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE && 1670 n > 4 1671 ) { 1672 n >>= 1; 1673 } 1674 if (blk == SWAPBLK_NONE) { 1675 for (j = 0; j < n; ++j) 1676 rtvals[i+j] = VM_PAGER_FAIL; 1677 lwkt_reltoken(&vm_token); 1678 continue; 1679 } 1680 if (vm_report_swap_allocs > 0) { 1681 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n); 1682 --vm_report_swap_allocs; 1683 } 1684 1685 /* 1686 * The I/O we are constructing cannot cross a physical 1687 * disk boundry in the swap stripe. 1688 */ 1689 if ((blk ^ (blk + n)) & ~SWB_DMMASK) { 1690 j = ((blk + SWB_DMMAX) & ~SWB_DMMASK) - blk; 1691 swp_pager_freeswapspace(object, blk + j, n - j); 1692 n = j; 1693 } 1694 1695 /* 1696 * All I/O parameters have been satisfied, build the I/O 1697 * request and assign the swap space. 1698 * 1699 * Use the KVABIO API to avoid synchronizing the pmap. 1700 */ 1701 if ((flags & VM_PAGER_PUT_SYNC)) 1702 bp = getpbuf_kva(&nsw_wcount_sync); 1703 else 1704 bp = getpbuf_kva(&nsw_wcount_async); 1705 bio = &bp->b_bio1; 1706 1707 lwkt_reltoken(&vm_token); 1708 1709 pmap_qenter_noinval((vm_offset_t)bp->b_data, &m[i], n); 1710 1711 bp->b_flags |= B_KVABIO; 1712 bp->b_bcount = PAGE_SIZE * n; 1713 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1714 1715 for (j = 0; j < n; ++j) { 1716 vm_page_t mreq = m[i+j]; 1717 1718 swp_pager_meta_build(mreq->object, mreq->pindex, 1719 blk + j); 1720 if (object->type == OBJT_SWAP) 1721 vm_page_dirty(mreq); 1722 rtvals[i+j] = VM_PAGER_OK; 1723 1724 atomic_set_int(&mreq->busy_count, PBUSY_SWAPINPROG); 1725 bp->b_xio.xio_pages[j] = mreq; 1726 } 1727 bp->b_xio.xio_npages = n; 1728 1729 mycpu->gd_cnt.v_swapout++; 1730 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1731 1732 bp->b_dirtyoff = 0; /* req'd for NFS */ 1733 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1734 bp->b_cmd = BUF_CMD_WRITE; 1735 bio->bio_caller_info1.index = SWBIO_WRITE; 1736 1737 /* 1738 * asynchronous 1739 */ 1740 if ((flags & VM_PAGER_PUT_SYNC) == 0) { 1741 bio->bio_done = swp_pager_async_iodone; 1742 BUF_KERNPROC(bp); 1743 vn_strategy(swapdev_vp, bio); 1744 1745 for (j = 0; j < n; ++j) 1746 rtvals[i+j] = VM_PAGER_PEND; 1747 continue; 1748 } 1749 1750 /* 1751 * Issue synchrnously. 1752 * 1753 * Wait for the sync I/O to complete, then update rtvals. 1754 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1755 * our async completion routine at the end, thus avoiding a 1756 * double-free. 1757 */ 1758 bio->bio_caller_info1.index |= SWBIO_SYNC; 1759 if (flags & VM_PAGER_TRY_TO_CACHE) 1760 bio->bio_caller_info1.index |= SWBIO_TTC; 1761 bio->bio_done = biodone_sync; 1762 bio->bio_flags |= BIO_SYNC; 1763 vn_strategy(swapdev_vp, bio); 1764 biowait(bio, "swwrt"); 1765 1766 for (j = 0; j < n; ++j) 1767 rtvals[i+j] = VM_PAGER_PEND; 1768 1769 /* 1770 * Now that we are through with the bp, we can call the 1771 * normal async completion, which frees everything up. 1772 */ 1773 swp_pager_async_iodone(bio); 1774 } 1775 vm_object_drop(object); 1776 } 1777 1778 /* 1779 * No requirements. 1780 * 1781 * Recalculate the low and high-water marks. 1782 */ 1783 void 1784 swap_pager_newswap(void) 1785 { 1786 /* 1787 * NOTE: vm_swap_max cannot exceed 1 billion blocks, which is the 1788 * limitation imposed by the blist code. Remember that this 1789 * will be divided by NSWAP_MAX (4), so each swap device is 1790 * limited to around a terrabyte. 1791 */ 1792 if (vm_swap_max) { 1793 nswap_lowat = (int64_t)vm_swap_max * 4 / 100; /* 4% left */ 1794 nswap_hiwat = (int64_t)vm_swap_max * 6 / 100; /* 6% left */ 1795 kprintf("swap low/high-water marks set to %d/%d\n", 1796 nswap_lowat, nswap_hiwat); 1797 } else { 1798 nswap_lowat = 128; 1799 nswap_hiwat = 512; 1800 } 1801 swp_sizecheck(); 1802 } 1803 1804 /* 1805 * swp_pager_async_iodone: 1806 * 1807 * Completion routine for asynchronous reads and writes from/to swap. 1808 * Also called manually by synchronous code to finish up a bp. 1809 * 1810 * For READ operations, the pages are BUSY'd. For WRITE operations, 1811 * the pages are vm_page_t->busy'd. For READ operations, we BUSY 1812 * unbusy all pages except the 'main' request page. For WRITE 1813 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1814 * because we marked them all VM_PAGER_PEND on return from putpages ). 1815 * 1816 * This routine may not block. 1817 * 1818 * No requirements. 1819 */ 1820 static void 1821 swp_pager_async_iodone(struct bio *bio) 1822 { 1823 struct buf *bp = bio->bio_buf; 1824 vm_object_t object = NULL; 1825 int i; 1826 int *nswptr; 1827 1828 /* 1829 * report error 1830 */ 1831 if (bp->b_flags & B_ERROR) { 1832 kprintf( 1833 "swap_pager: I/O error - %s failed; offset %lld," 1834 "size %ld, error %d\n", 1835 ((bio->bio_caller_info1.index & SWBIO_READ) ? 1836 "pagein" : "pageout"), 1837 (long long)bio->bio_offset, 1838 (long)bp->b_bcount, 1839 bp->b_error 1840 ); 1841 } 1842 1843 /* 1844 * set object. 1845 */ 1846 if (bp->b_xio.xio_npages) 1847 object = bp->b_xio.xio_pages[0]->object; 1848 1849 #if 0 1850 /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */ 1851 if (bio->bio_caller_info1.index & SWBIO_WRITE) { 1852 if (bio->bio_crc != iscsi_crc32(bp->b_data, bp->b_bcount)) { 1853 kprintf("SWAPOUT: BADCRC %08x %08x\n", 1854 bio->bio_crc, 1855 iscsi_crc32(bp->b_data, bp->b_bcount)); 1856 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1857 vm_page_t m = bp->b_xio.xio_pages[i]; 1858 if (m->flags & PG_WRITEABLE) 1859 kprintf("SWAPOUT: " 1860 "%d/%d %p writable\n", 1861 i, bp->b_xio.xio_npages, m); 1862 } 1863 } 1864 } 1865 #endif 1866 1867 /* 1868 * remove the mapping for kernel virtual 1869 */ 1870 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1871 1872 /* 1873 * cleanup pages. If an error occurs writing to swap, we are in 1874 * very serious trouble. If it happens to be a disk error, though, 1875 * we may be able to recover by reassigning the swap later on. So 1876 * in this case we remove the m->swapblk assignment for the page 1877 * but do not free it in the rlist. The errornous block(s) are thus 1878 * never reallocated as swap. Redirty the page and continue. 1879 */ 1880 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1881 vm_page_t m = bp->b_xio.xio_pages[i]; 1882 1883 if (bp->b_flags & B_ERROR) { 1884 /* 1885 * If an error occurs I'd love to throw the swapblk 1886 * away without freeing it back to swapspace, so it 1887 * can never be used again. But I can't from an 1888 * interrupt. 1889 */ 1890 1891 if (bio->bio_caller_info1.index & SWBIO_READ) { 1892 /* 1893 * When reading, reqpage needs to stay 1894 * locked for the parent, but all other 1895 * pages can be freed. We still want to 1896 * wakeup the parent waiting on the page, 1897 * though. ( also: pg_reqpage can be -1 and 1898 * not match anything ). 1899 * 1900 * We have to wake specifically requested pages 1901 * up too because we cleared SWAPINPROG and 1902 * someone may be waiting for that. 1903 * 1904 * NOTE: For reads, m->dirty will probably 1905 * be overridden by the original caller 1906 * of getpages so don't play cute tricks 1907 * here. 1908 * 1909 * NOTE: We can't actually free the page from 1910 * here, because this is an interrupt. 1911 * It is not legal to mess with 1912 * object->memq from an interrupt. 1913 * Deactivate the page instead. 1914 * 1915 * WARNING! The instant SWAPINPROG is 1916 * cleared another cpu may start 1917 * using the mreq page (it will 1918 * check m->valid immediately). 1919 */ 1920 1921 m->valid = 0; 1922 atomic_clear_int(&m->busy_count, 1923 PBUSY_SWAPINPROG); 1924 1925 /* 1926 * bio_driver_info holds the requested page 1927 * index. 1928 */ 1929 if (i != (int)(intptr_t)bio->bio_driver_info) { 1930 vm_page_deactivate(m); 1931 vm_page_wakeup(m); 1932 } else { 1933 vm_page_flash(m); 1934 } 1935 /* 1936 * If i == bp->b_pager.pg_reqpage, do not wake 1937 * the page up. The caller needs to. 1938 */ 1939 } else { 1940 /* 1941 * If a write error occurs remove the swap 1942 * assignment (note that PG_SWAPPED may or 1943 * may not be set depending on prior activity). 1944 * 1945 * Re-dirty OBJT_SWAP pages as there is no 1946 * other backing store, we can't throw the 1947 * page away. 1948 * 1949 * Non-OBJT_SWAP pages (aka swapcache) must 1950 * not be dirtied since they may not have 1951 * been dirty in the first place, and they 1952 * do have backing store (the vnode). 1953 */ 1954 vm_page_busy_wait(m, FALSE, "swadpg"); 1955 vm_object_hold(m->object); 1956 swp_pager_meta_ctl(m->object, m->pindex, 1957 SWM_FREE); 1958 vm_page_flag_clear(m, PG_SWAPPED); 1959 vm_object_drop(m->object); 1960 if (m->object->type == OBJT_SWAP) { 1961 vm_page_dirty(m); 1962 vm_page_activate(m); 1963 } 1964 vm_page_io_finish(m); 1965 atomic_clear_int(&m->busy_count, 1966 PBUSY_SWAPINPROG); 1967 vm_page_wakeup(m); 1968 } 1969 } else if (bio->bio_caller_info1.index & SWBIO_READ) { 1970 /* 1971 * NOTE: for reads, m->dirty will probably be 1972 * overridden by the original caller of getpages so 1973 * we cannot set them in order to free the underlying 1974 * swap in a low-swap situation. I don't think we'd 1975 * want to do that anyway, but it was an optimization 1976 * that existed in the old swapper for a time before 1977 * it got ripped out due to precisely this problem. 1978 * 1979 * If not the requested page then deactivate it. 1980 * 1981 * Note that the requested page, reqpage, is left 1982 * busied, but we still have to wake it up. The 1983 * other pages are released (unbusied) by 1984 * vm_page_wakeup(). We do not set reqpage's 1985 * valid bits here, it is up to the caller. 1986 */ 1987 1988 /* 1989 * NOTE: Can't call pmap_clear_modify(m) from an 1990 * interrupt thread, the pmap code may have to 1991 * map non-kernel pmaps and currently asserts 1992 * the case. 1993 * 1994 * WARNING! The instant SWAPINPROG is 1995 * cleared another cpu may start 1996 * using the mreq page (it will 1997 * check m->valid immediately). 1998 */ 1999 /*pmap_clear_modify(m);*/ 2000 m->valid = VM_PAGE_BITS_ALL; 2001 vm_page_undirty(m); 2002 vm_page_flag_set(m, PG_SWAPPED); 2003 atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG); 2004 2005 /* 2006 * We have to wake specifically requested pages 2007 * up too because we cleared SWAPINPROG and 2008 * could be waiting for it in getpages. However, 2009 * be sure to not unbusy getpages specifically 2010 * requested page - getpages expects it to be 2011 * left busy. 2012 * 2013 * bio_driver_info holds the requested page 2014 */ 2015 if (i != (int)(intptr_t)bio->bio_driver_info) { 2016 vm_page_deactivate(m); 2017 vm_page_wakeup(m); 2018 } else { 2019 vm_page_flash(m); 2020 } 2021 } else { 2022 /* 2023 * Mark the page clean but do not mess with the 2024 * pmap-layer's modified state. That state should 2025 * also be clear since the caller protected the 2026 * page VM_PROT_READ, but allow the case. 2027 * 2028 * We are in an interrupt, avoid pmap operations. 2029 * 2030 * If we have a severe page deficit, deactivate the 2031 * page. Do not try to cache it (which would also 2032 * involve a pmap op), because the page might still 2033 * be read-heavy. 2034 * 2035 * When using the swap to cache clean vnode pages 2036 * we do not mess with the page dirty bits. 2037 * 2038 * NOTE! Nobody is waiting for the key mreq page 2039 * on write completion. 2040 */ 2041 vm_page_busy_wait(m, FALSE, "swadpg"); 2042 if (m->object->type == OBJT_SWAP) 2043 vm_page_undirty(m); 2044 vm_page_flag_set(m, PG_SWAPPED); 2045 atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG); 2046 if (vm_page_count_severe()) 2047 vm_page_deactivate(m); 2048 vm_page_io_finish(m); 2049 if (bio->bio_caller_info1.index & SWBIO_TTC) 2050 vm_page_try_to_cache(m); 2051 else 2052 vm_page_wakeup(m); 2053 } 2054 } 2055 2056 /* 2057 * adjust pip. NOTE: the original parent may still have its own 2058 * pip refs on the object. 2059 */ 2060 2061 if (object) 2062 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages); 2063 2064 /* 2065 * Release the physical I/O buffer. 2066 * 2067 * NOTE: Due to synchronous operations in the write case b_cmd may 2068 * already be set to BUF_CMD_DONE and BIO_SYNC may have already 2069 * been cleared. 2070 * 2071 * Use vm_token to interlock nsw_rcount/wcount wakeup? 2072 */ 2073 lwkt_gettoken(&vm_token); 2074 if (bio->bio_caller_info1.index & SWBIO_READ) 2075 nswptr = &nsw_rcount; 2076 else if (bio->bio_caller_info1.index & SWBIO_SYNC) 2077 nswptr = &nsw_wcount_sync; 2078 else 2079 nswptr = &nsw_wcount_async; 2080 bp->b_cmd = BUF_CMD_DONE; 2081 relpbuf(bp, nswptr); 2082 lwkt_reltoken(&vm_token); 2083 } 2084 2085 /* 2086 * Fault-in a potentially swapped page and remove the swap reference. 2087 * (used by swapoff code) 2088 * 2089 * object must be held. 2090 */ 2091 static __inline void 2092 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex) 2093 { 2094 struct vnode *vp; 2095 vm_page_t m; 2096 int error; 2097 2098 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2099 2100 if (object->type == OBJT_VNODE) { 2101 /* 2102 * Any swap related to a vnode is due to swapcache. We must 2103 * vget() the vnode in case it is not active (otherwise 2104 * vref() will panic). Calling vm_object_page_remove() will 2105 * ensure that any swap ref is removed interlocked with the 2106 * page. clean_only is set to TRUE so we don't throw away 2107 * dirty pages. 2108 */ 2109 vp = object->handle; 2110 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE); 2111 if (error == 0) { 2112 vm_object_page_remove(object, pindex, pindex + 1, TRUE); 2113 vput(vp); 2114 } 2115 } else { 2116 /* 2117 * Otherwise it is a normal OBJT_SWAP object and we can 2118 * fault the page in and remove the swap. 2119 */ 2120 m = vm_fault_object_page(object, IDX_TO_OFF(pindex), 2121 VM_PROT_NONE, 2122 VM_FAULT_DIRTY | VM_FAULT_UNSWAP, 2123 sharedp, &error); 2124 if (m) 2125 vm_page_unhold(m); 2126 } 2127 } 2128 2129 /* 2130 * This removes all swap blocks related to a particular device. We have 2131 * to be careful of ripups during the scan. 2132 */ 2133 static int swp_pager_swapoff_callback(struct swblock *swap, void *data); 2134 2135 int 2136 swap_pager_swapoff(int devidx) 2137 { 2138 struct vm_object_hash *hash; 2139 struct swswapoffinfo info; 2140 struct vm_object marker; 2141 vm_object_t object; 2142 int n; 2143 2144 bzero(&marker, sizeof(marker)); 2145 marker.type = OBJT_MARKER; 2146 2147 for (n = 0; n < VMOBJ_HSIZE; ++n) { 2148 hash = &vm_object_hash[n]; 2149 2150 lwkt_gettoken(&hash->token); 2151 TAILQ_INSERT_HEAD(&hash->list, &marker, object_list); 2152 2153 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) { 2154 if (object->type == OBJT_MARKER) 2155 goto skip; 2156 if (object->type != OBJT_SWAP && 2157 object->type != OBJT_VNODE) 2158 goto skip; 2159 vm_object_hold(object); 2160 if (object->type != OBJT_SWAP && 2161 object->type != OBJT_VNODE) { 2162 vm_object_drop(object); 2163 goto skip; 2164 } 2165 2166 /* 2167 * Object is special in that we can't just pagein 2168 * into vm_page's in it (tmpfs, vn). 2169 */ 2170 if ((object->flags & OBJ_NOPAGEIN) && 2171 RB_ROOT(&object->swblock_root)) { 2172 vm_object_drop(object); 2173 goto skip; 2174 } 2175 2176 info.object = object; 2177 info.shared = 0; 2178 info.devidx = devidx; 2179 swblock_rb_tree_RB_SCAN(&object->swblock_root, 2180 NULL, swp_pager_swapoff_callback, 2181 &info); 2182 vm_object_drop(object); 2183 skip: 2184 if (object == TAILQ_NEXT(&marker, object_list)) { 2185 TAILQ_REMOVE(&hash->list, &marker, object_list); 2186 TAILQ_INSERT_AFTER(&hash->list, object, 2187 &marker, object_list); 2188 } 2189 } 2190 TAILQ_REMOVE(&hash->list, &marker, object_list); 2191 lwkt_reltoken(&hash->token); 2192 } 2193 2194 /* 2195 * If we fail to locate all swblocks we just fail gracefully and 2196 * do not bother to restore paging on the swap device. If the 2197 * user wants to retry the user can retry. 2198 */ 2199 if (swdevt[devidx].sw_nused) 2200 return (1); 2201 else 2202 return (0); 2203 } 2204 2205 static 2206 int 2207 swp_pager_swapoff_callback(struct swblock *swap, void *data) 2208 { 2209 struct swswapoffinfo *info = data; 2210 vm_object_t object = info->object; 2211 vm_pindex_t index; 2212 swblk_t v; 2213 int i; 2214 2215 index = swap->swb_index; 2216 for (i = 0; i < SWAP_META_PAGES; ++i) { 2217 /* 2218 * Make sure we don't race a dying object. This will 2219 * kill the scan of the object's swap blocks entirely. 2220 */ 2221 if (object->flags & OBJ_DEAD) 2222 return(-1); 2223 2224 /* 2225 * Fault the page, which can obviously block. If the swap 2226 * structure disappears break out. 2227 */ 2228 v = swap->swb_pages[i]; 2229 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) { 2230 swp_pager_fault_page(object, &info->shared, 2231 swap->swb_index + i); 2232 /* swap ptr might go away */ 2233 if (RB_LOOKUP(swblock_rb_tree, 2234 &object->swblock_root, index) != swap) { 2235 break; 2236 } 2237 } 2238 } 2239 return(0); 2240 } 2241 2242 /************************************************************************ 2243 * SWAP META DATA * 2244 ************************************************************************ 2245 * 2246 * These routines manipulate the swap metadata stored in the 2247 * OBJT_SWAP object. 2248 * 2249 * Swap metadata is implemented with a global hash and not directly 2250 * linked into the object. Instead the object simply contains 2251 * appropriate tracking counters. 2252 */ 2253 2254 /* 2255 * Lookup the swblock containing the specified swap block index. 2256 * 2257 * The caller must hold the object. 2258 */ 2259 static __inline 2260 struct swblock * 2261 swp_pager_lookup(vm_object_t object, vm_pindex_t index) 2262 { 2263 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2264 index &= ~(vm_pindex_t)SWAP_META_MASK; 2265 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index)); 2266 } 2267 2268 /* 2269 * Remove a swblock from the RB tree. 2270 * 2271 * The caller must hold the object. 2272 */ 2273 static __inline 2274 void 2275 swp_pager_remove(vm_object_t object, struct swblock *swap) 2276 { 2277 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2278 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap); 2279 } 2280 2281 /* 2282 * Convert default object to swap object if necessary 2283 * 2284 * The caller must hold the object. 2285 */ 2286 static void 2287 swp_pager_meta_convert(vm_object_t object) 2288 { 2289 if (object->type == OBJT_DEFAULT) { 2290 object->type = OBJT_SWAP; 2291 KKASSERT(object->swblock_count == 0); 2292 } 2293 } 2294 2295 /* 2296 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 2297 * 2298 * We first convert the object to a swap object if it is a default 2299 * object. Vnode objects do not need to be converted. 2300 * 2301 * The specified swapblk is added to the object's swap metadata. If 2302 * the swapblk is not valid, it is freed instead. Any previously 2303 * assigned swapblk is freed. 2304 * 2305 * The caller must hold the object. 2306 */ 2307 static void 2308 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk) 2309 { 2310 struct swblock *swap; 2311 struct swblock *oswap; 2312 vm_pindex_t v; 2313 2314 KKASSERT(swapblk != SWAPBLK_NONE); 2315 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2316 2317 /* 2318 * Convert object if necessary 2319 */ 2320 if (object->type == OBJT_DEFAULT) 2321 swp_pager_meta_convert(object); 2322 2323 /* 2324 * Locate swblock. If not found create, but if we aren't adding 2325 * anything just return. If we run out of space in the map we wait 2326 * and, since the hash table may have changed, retry. 2327 */ 2328 retry: 2329 swap = swp_pager_lookup(object, index); 2330 2331 if (swap == NULL) { 2332 int i; 2333 2334 swap = zalloc(swap_zone); 2335 if (swap == NULL) { 2336 vm_wait(0); 2337 goto retry; 2338 } 2339 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK; 2340 swap->swb_count = 0; 2341 2342 ++object->swblock_count; 2343 2344 for (i = 0; i < SWAP_META_PAGES; ++i) 2345 swap->swb_pages[i] = SWAPBLK_NONE; 2346 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap); 2347 KKASSERT(oswap == NULL); 2348 } 2349 2350 /* 2351 * Delete prior contents of metadata. 2352 * 2353 * NOTE: Decrement swb_count after the freeing operation (which 2354 * might block) to prevent racing destruction of the swblock. 2355 */ 2356 index &= SWAP_META_MASK; 2357 2358 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) { 2359 swap->swb_pages[index] = SWAPBLK_NONE; 2360 /* can block */ 2361 swp_pager_freeswapspace(object, v, 1); 2362 --swap->swb_count; 2363 --mycpu->gd_vmtotal.t_vm; 2364 } 2365 2366 /* 2367 * Enter block into metadata 2368 */ 2369 swap->swb_pages[index] = swapblk; 2370 if (swapblk != SWAPBLK_NONE) { 2371 ++swap->swb_count; 2372 ++mycpu->gd_vmtotal.t_vm; 2373 } 2374 } 2375 2376 /* 2377 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 2378 * 2379 * The requested range of blocks is freed, with any associated swap 2380 * returned to the swap bitmap. 2381 * 2382 * This routine will free swap metadata structures as they are cleaned 2383 * out. This routine does *NOT* operate on swap metadata associated 2384 * with resident pages. 2385 * 2386 * The caller must hold the object. 2387 */ 2388 static int swp_pager_meta_free_callback(struct swblock *swb, void *data); 2389 2390 static void 2391 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) 2392 { 2393 struct swfreeinfo info; 2394 2395 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2396 2397 /* 2398 * Nothing to do 2399 */ 2400 if (object->swblock_count == 0) { 2401 KKASSERT(RB_EMPTY(&object->swblock_root)); 2402 return; 2403 } 2404 if (count == 0) 2405 return; 2406 2407 /* 2408 * Setup for RB tree scan. Note that the pindex range can be huge 2409 * due to the 64 bit page index space so we cannot safely iterate. 2410 */ 2411 info.object = object; 2412 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK; 2413 info.begi = index; 2414 info.endi = index + count - 1; 2415 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp, 2416 swp_pager_meta_free_callback, &info); 2417 } 2418 2419 /* 2420 * The caller must hold the object. 2421 */ 2422 static 2423 int 2424 swp_pager_meta_free_callback(struct swblock *swap, void *data) 2425 { 2426 struct swfreeinfo *info = data; 2427 vm_object_t object = info->object; 2428 int index; 2429 int eindex; 2430 2431 /* 2432 * Figure out the range within the swblock. The wider scan may 2433 * return edge-case swap blocks when the start and/or end points 2434 * are in the middle of a block. 2435 */ 2436 if (swap->swb_index < info->begi) 2437 index = (int)info->begi & SWAP_META_MASK; 2438 else 2439 index = 0; 2440 2441 if (swap->swb_index + SWAP_META_PAGES > info->endi) 2442 eindex = (int)info->endi & SWAP_META_MASK; 2443 else 2444 eindex = SWAP_META_MASK; 2445 2446 /* 2447 * Scan and free the blocks. The loop terminates early 2448 * if (swap) runs out of blocks and could be freed. 2449 * 2450 * NOTE: Decrement swb_count after swp_pager_freeswapspace() 2451 * to deal with a zfree race. 2452 */ 2453 while (index <= eindex) { 2454 swblk_t v = swap->swb_pages[index]; 2455 2456 if (v != SWAPBLK_NONE) { 2457 swap->swb_pages[index] = SWAPBLK_NONE; 2458 /* can block */ 2459 swp_pager_freeswapspace(object, v, 1); 2460 --mycpu->gd_vmtotal.t_vm; 2461 if (--swap->swb_count == 0) { 2462 swp_pager_remove(object, swap); 2463 zfree(swap_zone, swap); 2464 --object->swblock_count; 2465 break; 2466 } 2467 } 2468 ++index; 2469 } 2470 2471 /* swap may be invalid here due to zfree above */ 2472 lwkt_yield(); 2473 2474 return(0); 2475 } 2476 2477 /* 2478 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2479 * 2480 * This routine locates and destroys all swap metadata associated with 2481 * an object. 2482 * 2483 * NOTE: Decrement swb_count after the freeing operation (which 2484 * might block) to prevent racing destruction of the swblock. 2485 * 2486 * The caller must hold the object. 2487 */ 2488 static void 2489 swp_pager_meta_free_all(vm_object_t object) 2490 { 2491 struct swblock *swap; 2492 int i; 2493 2494 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2495 2496 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) { 2497 swp_pager_remove(object, swap); 2498 for (i = 0; i < SWAP_META_PAGES; ++i) { 2499 swblk_t v = swap->swb_pages[i]; 2500 if (v != SWAPBLK_NONE) { 2501 /* can block */ 2502 swp_pager_freeswapspace(object, v, 1); 2503 --swap->swb_count; 2504 --mycpu->gd_vmtotal.t_vm; 2505 } 2506 } 2507 if (swap->swb_count != 0) 2508 panic("swap_pager_meta_free_all: swb_count != 0"); 2509 zfree(swap_zone, swap); 2510 --object->swblock_count; 2511 lwkt_yield(); 2512 } 2513 KKASSERT(object->swblock_count == 0); 2514 } 2515 2516 /* 2517 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2518 * 2519 * This routine is capable of looking up, popping, or freeing 2520 * swapblk assignments in the swap meta data or in the vm_page_t. 2521 * The routine typically returns the swapblk being looked-up, or popped, 2522 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2523 * was invalid. This routine will automatically free any invalid 2524 * meta-data swapblks. 2525 * 2526 * It is not possible to store invalid swapblks in the swap meta data 2527 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2528 * 2529 * When acting on a busy resident page and paging is in progress, we 2530 * have to wait until paging is complete but otherwise can act on the 2531 * busy page. 2532 * 2533 * SWM_FREE remove and free swap block from metadata 2534 * SWM_POP remove from meta data but do not free.. pop it out 2535 * 2536 * The caller must hold the object. 2537 */ 2538 static swblk_t 2539 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags) 2540 { 2541 struct swblock *swap; 2542 swblk_t r1; 2543 2544 if (object->swblock_count == 0) 2545 return(SWAPBLK_NONE); 2546 2547 r1 = SWAPBLK_NONE; 2548 swap = swp_pager_lookup(object, index); 2549 2550 if (swap != NULL) { 2551 index &= SWAP_META_MASK; 2552 r1 = swap->swb_pages[index]; 2553 2554 if (r1 != SWAPBLK_NONE) { 2555 if (flags & (SWM_FREE|SWM_POP)) { 2556 swap->swb_pages[index] = SWAPBLK_NONE; 2557 --mycpu->gd_vmtotal.t_vm; 2558 if (--swap->swb_count == 0) { 2559 swp_pager_remove(object, swap); 2560 zfree(swap_zone, swap); 2561 --object->swblock_count; 2562 } 2563 } 2564 /* swap ptr may be invalid */ 2565 if (flags & SWM_FREE) { 2566 swp_pager_freeswapspace(object, r1, 1); 2567 r1 = SWAPBLK_NONE; 2568 } 2569 } 2570 /* swap ptr may be invalid */ 2571 } 2572 return(r1); 2573 } 2574