1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Copyright (c) 1994 John S. Dyson 37 * Copyright (c) 1990 University of Utah. 38 * Copyright (c) 1991, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * New Swap System 70 * Matthew Dillon 71 * 72 * Radix Bitmap 'blists'. 73 * 74 * - The new swapper uses the new radix bitmap code. This should scale 75 * to arbitrarily small or arbitrarily large swap spaces and an almost 76 * arbitrary degree of fragmentation. 77 * 78 * Features: 79 * 80 * - on the fly reallocation of swap during putpages. The new system 81 * does not try to keep previously allocated swap blocks for dirty 82 * pages. 83 * 84 * - on the fly deallocation of swap 85 * 86 * - No more garbage collection required. Unnecessarily allocated swap 87 * blocks only exist for dirty vm_page_t's now and these are already 88 * cycled (in a high-load system) by the pager. We also do on-the-fly 89 * removal of invalidated swap blocks when a page is destroyed 90 * or renamed. 91 * 92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 95 */ 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/conf.h> 100 #include <sys/kernel.h> 101 #include <sys/proc.h> 102 #include <sys/buf.h> 103 #include <sys/vnode.h> 104 #include <sys/malloc.h> 105 #include <sys/vmmeter.h> 106 #include <sys/sysctl.h> 107 #include <sys/blist.h> 108 #include <sys/lock.h> 109 #include <sys/kcollect.h> 110 111 #include <unistd.h> 112 #include "opt_swap.h" 113 #include <vm/vm.h> 114 #include <vm/vm_object.h> 115 #include <vm/vm_page.h> 116 #include <vm/vm_pager.h> 117 #include <vm/vm_pageout.h> 118 #include <vm/swap_pager.h> 119 #include <vm/vm_extern.h> 120 #include <vm/vm_zone.h> 121 #include <vm/vnode_pager.h> 122 123 #include <sys/thread2.h> 124 #include <sys/buf2.h> 125 #include <vm/vm_page2.h> 126 127 #ifndef MAX_PAGEOUT_CLUSTER 128 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES 129 #endif 130 131 #define SWM_FREE 0x02 /* free, period */ 132 #define SWM_POP 0x04 /* pop out */ 133 134 #define SWBIO_READ 0x01 135 #define SWBIO_WRITE 0x02 136 #define SWBIO_SYNC 0x04 137 #define SWBIO_TTC 0x08 /* for VM_PAGER_TRY_TO_CACHE */ 138 139 struct swfreeinfo { 140 vm_object_t object; 141 vm_pindex_t basei; 142 vm_pindex_t begi; 143 vm_pindex_t endi; /* inclusive */ 144 }; 145 146 struct swswapoffinfo { 147 vm_object_t object; 148 int devidx; 149 int shared; 150 }; 151 152 /* 153 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 154 * in the old system. 155 */ 156 157 int swap_pager_full; /* swap space exhaustion (task killing) */ 158 int swap_fail_ticks; /* when we became exhausted */ 159 int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 160 swblk_t vm_swap_cache_use; 161 swblk_t vm_swap_anon_use; 162 static int vm_report_swap_allocs; 163 164 static int nsw_rcount; /* free read buffers */ 165 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 166 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 167 static int nsw_wcount_async_max;/* assigned maximum */ 168 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 169 170 struct blist *swapblist; 171 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 172 static int swap_burst_read = 0; /* allow burst reading */ 173 static swblk_t swapiterator; /* linearize allocations */ 174 int swap_user_async = 0; /* user swap pager operation can be async */ 175 176 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin"); 177 178 /* from vm_swap.c */ 179 extern struct vnode *swapdev_vp; 180 extern struct swdevt *swdevt; 181 extern int nswdev; 182 183 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / SWB_DMMAX % nswdev : 0) 184 185 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 186 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 187 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read, 188 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins"); 189 SYSCTL_INT(_vm, OID_AUTO, swap_user_async, 190 CTLFLAG_RW, &swap_user_async, 0, "Allow async uuser swap write I/O"); 191 192 #if SWBLK_BITS == 64 193 SYSCTL_LONG(_vm, OID_AUTO, swap_cache_use, 194 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 195 SYSCTL_LONG(_vm, OID_AUTO, swap_anon_use, 196 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 197 SYSCTL_LONG(_vm, OID_AUTO, swap_size, 198 CTLFLAG_RD, &vm_swap_size, 0, ""); 199 #else 200 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use, 201 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 202 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use, 203 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 204 SYSCTL_INT(_vm, OID_AUTO, swap_size, 205 CTLFLAG_RD, &vm_swap_size, 0, ""); 206 #endif 207 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs, 208 CTLFLAG_RW, &vm_report_swap_allocs, 0, ""); 209 210 vm_zone_t swap_zone; 211 212 /* 213 * Red-Black tree for swblock entries 214 * 215 * The caller must hold vm_token 216 */ 217 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare, 218 vm_pindex_t, swb_index); 219 220 int 221 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2) 222 { 223 if (swb1->swb_index < swb2->swb_index) 224 return(-1); 225 if (swb1->swb_index > swb2->swb_index) 226 return(1); 227 return(0); 228 } 229 230 static 231 int 232 rb_swblock_scancmp(struct swblock *swb, void *data) 233 { 234 struct swfreeinfo *info = data; 235 236 if (swb->swb_index < info->basei) 237 return(-1); 238 if (swb->swb_index > info->endi) 239 return(1); 240 return(0); 241 } 242 243 static 244 int 245 rb_swblock_condcmp(struct swblock *swb, void *data) 246 { 247 struct swfreeinfo *info = data; 248 249 if (swb->swb_index < info->basei) 250 return(-1); 251 return(0); 252 } 253 254 /* 255 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 256 * calls hooked from other parts of the VM system and do not appear here. 257 * (see vm/swap_pager.h). 258 */ 259 260 static void swap_pager_dealloc (vm_object_t object); 261 static int swap_pager_getpage (vm_object_t, vm_page_t *, int); 262 static void swap_chain_iodone(struct bio *biox); 263 264 struct pagerops swappagerops = { 265 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 266 swap_pager_getpage, /* pagein */ 267 swap_pager_putpages, /* pageout */ 268 swap_pager_haspage /* get backing store status for page */ 269 }; 270 271 /* 272 * SWB_DMMAX is in page-sized chunks with the new swap system. It was 273 * dev-bsized chunks in the old. SWB_DMMAX is always a power of 2. 274 * 275 * swap_*() routines are externally accessible. swp_*() routines are 276 * internal. 277 */ 278 279 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 280 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 281 282 static __inline void swp_sizecheck (void); 283 static void swp_pager_async_iodone (struct bio *bio); 284 285 /* 286 * Swap bitmap functions 287 */ 288 289 static __inline void swp_pager_freeswapspace(vm_object_t object, 290 swblk_t blk, int npages); 291 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages); 292 293 /* 294 * Metadata functions 295 */ 296 297 static void swp_pager_meta_convert(vm_object_t); 298 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t); 299 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 300 static void swp_pager_meta_free_all(vm_object_t); 301 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 302 303 /* 304 * SWP_SIZECHECK() - update swap_pager_full indication 305 * 306 * update the swap_pager_almost_full indication and warn when we are 307 * about to run out of swap space, using lowat/hiwat hysteresis. 308 * 309 * Clear swap_pager_full ( task killing ) indication when lowat is met. 310 * 311 * No restrictions on call 312 * This routine may not block. 313 * SMP races are ok. 314 */ 315 static __inline void 316 swp_sizecheck(void) 317 { 318 if (vm_swap_size < nswap_lowat) { 319 if (swap_pager_almost_full == 0) { 320 kprintf("swap_pager: out of swap space\n"); 321 swap_pager_almost_full = 1; 322 swap_fail_ticks = ticks; 323 } 324 } else { 325 swap_pager_full = 0; 326 if (vm_swap_size > nswap_hiwat) 327 swap_pager_almost_full = 0; 328 } 329 } 330 331 /* 332 * Long-term data collection on 10-second interval. Return the value 333 * for KCOLLECT_SWAPPCT and set the values for SWAPANO and SWAPCCAC. 334 * 335 * Return total swap in the scale field. This can change if swap is 336 * regularly added or removed and may cause some historical confusion 337 * in that case, but SWAPPCT will always be historically accurate. 338 */ 339 340 #define PTOB(value) ((uint64_t)(value) << PAGE_SHIFT) 341 342 static uint64_t 343 collect_swap_callback(int n) 344 { 345 uint64_t total = vm_swap_max; 346 uint64_t anon = vm_swap_anon_use; 347 uint64_t cache = vm_swap_cache_use; 348 349 if (total == 0) /* avoid divide by zero */ 350 total = 1; 351 kcollect_setvalue(KCOLLECT_SWAPANO, PTOB(anon)); 352 kcollect_setvalue(KCOLLECT_SWAPCAC, PTOB(cache)); 353 kcollect_setscale(KCOLLECT_SWAPANO, 354 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, PTOB(total))); 355 kcollect_setscale(KCOLLECT_SWAPCAC, 356 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, PTOB(total))); 357 return (((anon + cache) * 10000 + (total >> 1)) / total); 358 } 359 360 /* 361 * SWAP_PAGER_INIT() - initialize the swap pager! 362 * 363 * Expected to be started from system init. NOTE: This code is run 364 * before much else so be careful what you depend on. Most of the VM 365 * system has yet to be initialized at this point. 366 * 367 * Called from the low level boot code only. 368 */ 369 static void 370 swap_pager_init(void *arg __unused) 371 { 372 kcollect_register(KCOLLECT_SWAPPCT, "swapuse", collect_swap_callback, 373 KCOLLECT_SCALE(KCOLLECT_SWAPPCT_FORMAT, 0)); 374 kcollect_register(KCOLLECT_SWAPANO, "swapano", NULL, 375 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, 0)); 376 kcollect_register(KCOLLECT_SWAPCAC, "swapcac", NULL, 377 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, 0)); 378 } 379 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL); 380 381 /* 382 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 383 * 384 * Expected to be started from pageout process once, prior to entering 385 * its main loop. 386 * 387 * Called from the low level boot code only. 388 */ 389 void 390 swap_pager_swap_init(void) 391 { 392 int n, n2; 393 394 /* 395 * Number of in-transit swap bp operations. Don't 396 * exhaust the pbufs completely. Make sure we 397 * initialize workable values (0 will work for hysteresis 398 * but it isn't very efficient). 399 * 400 * The nsw_cluster_max is constrained by the number of pages an XIO 401 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 402 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 403 * constrained by the swap device interleave stripe size. 404 * 405 * Currently we hardwire nsw_wcount_async to 4. This limit is 406 * designed to prevent other I/O from having high latencies due to 407 * our pageout I/O. The value 4 works well for one or two active swap 408 * devices but is probably a little low if you have more. Even so, 409 * a higher value would probably generate only a limited improvement 410 * with three or four active swap devices since the system does not 411 * typically have to pageout at extreme bandwidths. We will want 412 * at least 2 per swap devices, and 4 is a pretty good value if you 413 * have one NFS swap device due to the command/ack latency over NFS. 414 * So it all works out pretty well. 415 */ 416 417 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 418 419 nsw_rcount = (nswbuf_kva + 1) / 2; 420 nsw_wcount_sync = (nswbuf_kva + 3) / 4; 421 nsw_wcount_async = 4; 422 nsw_wcount_async_max = nsw_wcount_async; 423 424 /* 425 * The zone is dynamically allocated so generally size it to 426 * maxswzone (32MB to 256GB of KVM). Set a minimum size based 427 * on physical memory of around 8x (each swblock can hold 16 pages). 428 * 429 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio 430 * has increased dramatically. 431 */ 432 n = vmstats.v_page_count / 2; 433 if (maxswzone && n < maxswzone / sizeof(struct swblock)) 434 n = maxswzone / sizeof(struct swblock); 435 n2 = n; 436 437 do { 438 swap_zone = zinit( 439 "SWAPMETA", 440 sizeof(struct swblock), 441 n, 442 ZONE_INTERRUPT); 443 if (swap_zone != NULL) 444 break; 445 /* 446 * if the allocation failed, try a zone two thirds the 447 * size of the previous attempt. 448 */ 449 n -= ((n + 2) / 3); 450 } while (n > 0); 451 452 if (swap_zone == NULL) 453 panic("swap_pager_swap_init: swap_zone == NULL"); 454 if (n2 != n) 455 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 456 } 457 458 /* 459 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 460 * its metadata structures. 461 * 462 * This routine is called from the mmap and fork code to create a new 463 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 464 * and then converting it with swp_pager_meta_convert(). 465 * 466 * We only support unnamed objects. 467 * 468 * No restrictions. 469 */ 470 vm_object_t 471 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 472 { 473 vm_object_t object; 474 475 KKASSERT(handle == NULL); 476 object = vm_object_allocate_hold(OBJT_DEFAULT, 477 OFF_TO_IDX(offset + PAGE_MASK + size)); 478 swp_pager_meta_convert(object); 479 vm_object_drop(object); 480 481 return (object); 482 } 483 484 /* 485 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 486 * 487 * The swap backing for the object is destroyed. The code is 488 * designed such that we can reinstantiate it later, but this 489 * routine is typically called only when the entire object is 490 * about to be destroyed. 491 * 492 * The object must be locked or unreferenceable. 493 * No other requirements. 494 */ 495 static void 496 swap_pager_dealloc(vm_object_t object) 497 { 498 vm_object_hold(object); 499 vm_object_pip_wait(object, "swpdea"); 500 501 /* 502 * Free all remaining metadata. We only bother to free it from 503 * the swap meta data. We do not attempt to free swapblk's still 504 * associated with vm_page_t's for this object. We do not care 505 * if paging is still in progress on some objects. 506 */ 507 swp_pager_meta_free_all(object); 508 vm_object_drop(object); 509 } 510 511 /************************************************************************ 512 * SWAP PAGER BITMAP ROUTINES * 513 ************************************************************************/ 514 515 /* 516 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 517 * 518 * Allocate swap for the requested number of pages. The starting 519 * swap block number (a page index) is returned or SWAPBLK_NONE 520 * if the allocation failed. 521 * 522 * Also has the side effect of advising that somebody made a mistake 523 * when they configured swap and didn't configure enough. 524 * 525 * The caller must hold the object. 526 * This routine may not block. 527 */ 528 static __inline swblk_t 529 swp_pager_getswapspace(vm_object_t object, int npages) 530 { 531 swblk_t blk; 532 533 lwkt_gettoken(&vm_token); 534 blk = blist_allocat(swapblist, npages, swapiterator); 535 if (blk == SWAPBLK_NONE) 536 blk = blist_allocat(swapblist, npages, 0); 537 if (blk == SWAPBLK_NONE) { 538 if (swap_pager_full != 2) { 539 if (vm_swap_max == 0) 540 kprintf("Warning: The system would like to " 541 "page to swap but no swap space " 542 "is configured!\n"); 543 else 544 kprintf("swap_pager_getswapspace: " 545 "swap full allocating %d pages\n", 546 npages); 547 swap_pager_full = 2; 548 if (swap_pager_almost_full == 0) 549 swap_fail_ticks = ticks; 550 swap_pager_almost_full = 1; 551 } 552 } else { 553 /* swapiterator = blk; disable for now, doesn't work well */ 554 swapacctspace(blk, -npages); 555 if (object->type == OBJT_SWAP) 556 vm_swap_anon_use += npages; 557 else 558 vm_swap_cache_use += npages; 559 swp_sizecheck(); 560 } 561 lwkt_reltoken(&vm_token); 562 return(blk); 563 } 564 565 /* 566 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 567 * 568 * This routine returns the specified swap blocks back to the bitmap. 569 * 570 * Note: This routine may not block (it could in the old swap code), 571 * and through the use of the new blist routines it does not block. 572 * 573 * This routine may not block. 574 */ 575 576 static __inline void 577 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages) 578 { 579 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 580 581 lwkt_gettoken(&vm_token); 582 sp->sw_nused -= npages; 583 if (object->type == OBJT_SWAP) 584 vm_swap_anon_use -= npages; 585 else 586 vm_swap_cache_use -= npages; 587 588 if (sp->sw_flags & SW_CLOSING) { 589 lwkt_reltoken(&vm_token); 590 return; 591 } 592 593 blist_free(swapblist, blk, npages); 594 vm_swap_size += npages; 595 swp_sizecheck(); 596 lwkt_reltoken(&vm_token); 597 } 598 599 /* 600 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 601 * range within an object. 602 * 603 * This is a globally accessible routine. 604 * 605 * This routine removes swapblk assignments from swap metadata. 606 * 607 * The external callers of this routine typically have already destroyed 608 * or renamed vm_page_t's associated with this range in the object so 609 * we should be ok. 610 * 611 * No requirements. 612 */ 613 void 614 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size) 615 { 616 vm_object_hold(object); 617 swp_pager_meta_free(object, start, size); 618 vm_object_drop(object); 619 } 620 621 /* 622 * No requirements. 623 */ 624 void 625 swap_pager_freespace_all(vm_object_t object) 626 { 627 vm_object_hold(object); 628 swp_pager_meta_free_all(object); 629 vm_object_drop(object); 630 } 631 632 /* 633 * This function conditionally frees swap cache swap starting at 634 * (*basei) in the object. (count) swap blocks will be nominally freed. 635 * The actual number of blocks freed can be more or less than the 636 * requested number. 637 * 638 * This function nominally returns the number of blocks freed. However, 639 * the actual number of blocks freed may be less then the returned value. 640 * If the function is unable to exhaust the object or if it is able to 641 * free (approximately) the requested number of blocks it returns 642 * a value n > count. 643 * 644 * If we exhaust the object we will return a value n <= count. 645 * 646 * The caller must hold the object. 647 * 648 * WARNING! If count == 0 then -1 can be returned as a degenerate case, 649 * callers should always pass a count value > 0. 650 */ 651 static int swap_pager_condfree_callback(struct swblock *swap, void *data); 652 653 int 654 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count) 655 { 656 struct swfreeinfo info; 657 int n; 658 int t; 659 660 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 661 662 info.object = object; 663 info.basei = *basei; /* skip up to this page index */ 664 info.begi = count; /* max swap pages to destroy */ 665 info.endi = count * 8; /* max swblocks to scan */ 666 667 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp, 668 swap_pager_condfree_callback, &info); 669 *basei = info.basei; 670 671 /* 672 * Take the higher difference swblocks vs pages 673 */ 674 n = count - (int)info.begi; 675 t = count * 8 - (int)info.endi; 676 if (n < t) 677 n = t; 678 if (n < 1) 679 n = 1; 680 return(n); 681 } 682 683 /* 684 * The idea is to free whole meta-block to avoid fragmenting 685 * the swap space or disk I/O. We only do this if NO VM pages 686 * are present. 687 * 688 * We do not have to deal with clearing PG_SWAPPED in related VM 689 * pages because there are no related VM pages. 690 * 691 * The caller must hold the object. 692 */ 693 static int 694 swap_pager_condfree_callback(struct swblock *swap, void *data) 695 { 696 struct swfreeinfo *info = data; 697 vm_object_t object = info->object; 698 int i; 699 700 for (i = 0; i < SWAP_META_PAGES; ++i) { 701 if (vm_page_lookup(object, swap->swb_index + i)) 702 break; 703 } 704 info->basei = swap->swb_index + SWAP_META_PAGES; 705 if (i == SWAP_META_PAGES) { 706 info->begi -= swap->swb_count; 707 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES); 708 } 709 --info->endi; 710 if ((int)info->begi < 0 || (int)info->endi < 0) 711 return(-1); 712 lwkt_yield(); 713 return(0); 714 } 715 716 /* 717 * Called by vm_page_alloc() when a new VM page is inserted 718 * into a VM object. Checks whether swap has been assigned to 719 * the page and sets PG_SWAPPED as necessary. 720 * 721 * (m) must be busied by caller and remains busied on return. 722 */ 723 void 724 swap_pager_page_inserted(vm_page_t m) 725 { 726 if (m->object->swblock_count) { 727 vm_object_hold(m->object); 728 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE) 729 vm_page_flag_set(m, PG_SWAPPED); 730 vm_object_drop(m->object); 731 } 732 } 733 734 /* 735 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 736 * 737 * Assigns swap blocks to the specified range within the object. The 738 * swap blocks are not zerod. Any previous swap assignment is destroyed. 739 * 740 * Returns 0 on success, -1 on failure. 741 * 742 * The caller is responsible for avoiding races in the specified range. 743 * No other requirements. 744 */ 745 int 746 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 747 { 748 int n = 0; 749 swblk_t blk = SWAPBLK_NONE; 750 vm_pindex_t beg = start; /* save start index */ 751 752 vm_object_hold(object); 753 754 while (size) { 755 if (n == 0) { 756 n = BLIST_MAX_ALLOC; 757 while ((blk = swp_pager_getswapspace(object, n)) == 758 SWAPBLK_NONE) 759 { 760 n >>= 1; 761 if (n == 0) { 762 swp_pager_meta_free(object, beg, 763 start - beg); 764 vm_object_drop(object); 765 return(-1); 766 } 767 } 768 } 769 swp_pager_meta_build(object, start, blk); 770 --size; 771 ++start; 772 ++blk; 773 --n; 774 } 775 swp_pager_meta_free(object, start, n); 776 vm_object_drop(object); 777 return(0); 778 } 779 780 /* 781 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 782 * and destroy the source. 783 * 784 * Copy any valid swapblks from the source to the destination. In 785 * cases where both the source and destination have a valid swapblk, 786 * we keep the destination's. 787 * 788 * This routine is allowed to block. It may block allocating metadata 789 * indirectly through swp_pager_meta_build() or if paging is still in 790 * progress on the source. 791 * 792 * XXX vm_page_collapse() kinda expects us not to block because we 793 * supposedly do not need to allocate memory, but for the moment we 794 * *may* have to get a little memory from the zone allocator, but 795 * it is taken from the interrupt memory. We should be ok. 796 * 797 * The source object contains no vm_page_t's (which is just as well) 798 * The source object is of type OBJT_SWAP. 799 * 800 * The source and destination objects must be held by the caller. 801 */ 802 void 803 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 804 vm_pindex_t base_index, int destroysource) 805 { 806 vm_pindex_t i; 807 808 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject)); 809 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject)); 810 811 /* 812 * transfer source to destination. 813 */ 814 for (i = 0; i < dstobject->size; ++i) { 815 swblk_t dstaddr; 816 817 /* 818 * Locate (without changing) the swapblk on the destination, 819 * unless it is invalid in which case free it silently, or 820 * if the destination is a resident page, in which case the 821 * source is thrown away. 822 */ 823 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 824 825 if (dstaddr == SWAPBLK_NONE) { 826 /* 827 * Destination has no swapblk and is not resident, 828 * copy source. 829 */ 830 swblk_t srcaddr; 831 832 srcaddr = swp_pager_meta_ctl(srcobject, 833 base_index + i, SWM_POP); 834 835 if (srcaddr != SWAPBLK_NONE) 836 swp_pager_meta_build(dstobject, i, srcaddr); 837 } else { 838 /* 839 * Destination has valid swapblk or it is represented 840 * by a resident page. We destroy the sourceblock. 841 */ 842 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE); 843 } 844 } 845 846 /* 847 * Free left over swap blocks in source. 848 * 849 * We have to revert the type to OBJT_DEFAULT so we do not accidently 850 * double-remove the object from the swap queues. 851 */ 852 if (destroysource) { 853 /* 854 * Reverting the type is not necessary, the caller is going 855 * to destroy srcobject directly, but I'm doing it here 856 * for consistency since we've removed the object from its 857 * queues. 858 */ 859 swp_pager_meta_free_all(srcobject); 860 if (srcobject->type == OBJT_SWAP) 861 srcobject->type = OBJT_DEFAULT; 862 } 863 } 864 865 /* 866 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 867 * the requested page. 868 * 869 * We determine whether good backing store exists for the requested 870 * page and return TRUE if it does, FALSE if it doesn't. 871 * 872 * If TRUE, we also try to determine how much valid, contiguous backing 873 * store exists before and after the requested page within a reasonable 874 * distance. We do not try to restrict it to the swap device stripe 875 * (that is handled in getpages/putpages). It probably isn't worth 876 * doing here. 877 * 878 * No requirements. 879 */ 880 boolean_t 881 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex) 882 { 883 swblk_t blk0; 884 885 /* 886 * do we have good backing store at the requested index ? 887 */ 888 vm_object_hold(object); 889 blk0 = swp_pager_meta_ctl(object, pindex, 0); 890 891 if (blk0 == SWAPBLK_NONE) { 892 vm_object_drop(object); 893 return (FALSE); 894 } 895 vm_object_drop(object); 896 return (TRUE); 897 } 898 899 /* 900 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 901 * 902 * This removes any associated swap backing store, whether valid or 903 * not, from the page. This operates on any VM object, not just OBJT_SWAP 904 * objects. 905 * 906 * This routine is typically called when a page is made dirty, at 907 * which point any associated swap can be freed. MADV_FREE also 908 * calls us in a special-case situation 909 * 910 * NOTE!!! If the page is clean and the swap was valid, the caller 911 * should make the page dirty before calling this routine. 912 * This routine does NOT change the m->dirty status of the page. 913 * Also: MADV_FREE depends on it. 914 * 915 * The page must be busied. 916 * The caller can hold the object to avoid blocking, else we might block. 917 * No other requirements. 918 */ 919 void 920 swap_pager_unswapped(vm_page_t m) 921 { 922 if (m->flags & PG_SWAPPED) { 923 vm_object_hold(m->object); 924 KKASSERT(m->flags & PG_SWAPPED); 925 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 926 vm_page_flag_clear(m, PG_SWAPPED); 927 vm_object_drop(m->object); 928 } 929 } 930 931 /* 932 * SWAP_PAGER_STRATEGY() - read, write, free blocks 933 * 934 * This implements a VM OBJECT strategy function using swap backing store. 935 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP 936 * types. Only BUF_CMD_{READ,WRITE,FREEBLKS} is supported, any other 937 * requests will return EINVAL. 938 * 939 * This is intended to be a cacheless interface (i.e. caching occurs at 940 * higher levels), and is also used as a swap-based SSD cache for vnode 941 * and device objects. 942 * 943 * All I/O goes directly to and from the swap device. 944 * 945 * We currently attempt to run I/O synchronously or asynchronously as 946 * the caller requests. This isn't perfect because we loose error 947 * sequencing when we run multiple ops in parallel to satisfy a request. 948 * But this is swap, so we let it all hang out. 949 * 950 * NOTE: This function supports the KVABIO API wherein bp->b_data might 951 * not be synchronized to the current cpu. 952 * 953 * No requirements. 954 */ 955 void 956 swap_pager_strategy(vm_object_t object, struct bio *bio) 957 { 958 struct buf *bp = bio->bio_buf; 959 struct bio *nbio; 960 vm_pindex_t start; 961 vm_pindex_t biox_blkno = 0; 962 int count; 963 char *data; 964 struct bio *biox; 965 struct buf *bufx; 966 #if 0 967 struct bio_track *track; 968 #endif 969 970 #if 0 971 /* 972 * tracking for swapdev vnode I/Os 973 */ 974 if (bp->b_cmd == BUF_CMD_READ) 975 track = &swapdev_vp->v_track_read; 976 else 977 track = &swapdev_vp->v_track_write; 978 #endif 979 980 /* 981 * Only supported commands 982 */ 983 if (bp->b_cmd != BUF_CMD_FREEBLKS && 984 bp->b_cmd != BUF_CMD_READ && 985 bp->b_cmd != BUF_CMD_WRITE) { 986 bp->b_error = EINVAL; 987 bp->b_flags |= B_ERROR | B_INVAL; 988 biodone(bio); 989 return; 990 } 991 992 /* 993 * bcount must be an integral number of pages. 994 */ 995 if (bp->b_bcount & PAGE_MASK) { 996 bp->b_error = EINVAL; 997 bp->b_flags |= B_ERROR | B_INVAL; 998 biodone(bio); 999 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 1000 "not page bounded\n", 1001 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 1002 return; 1003 } 1004 1005 /* 1006 * Clear error indication, initialize page index, count, data pointer. 1007 */ 1008 bp->b_error = 0; 1009 bp->b_flags &= ~B_ERROR; 1010 bp->b_resid = bp->b_bcount; 1011 1012 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 1013 count = howmany(bp->b_bcount, PAGE_SIZE); 1014 1015 /* 1016 * WARNING! Do not dereference *data without issuing a bkvasync() 1017 */ 1018 data = bp->b_data; 1019 1020 /* 1021 * Deal with BUF_CMD_FREEBLKS 1022 */ 1023 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 1024 /* 1025 * FREE PAGE(s) - destroy underlying swap that is no longer 1026 * needed. 1027 */ 1028 vm_object_hold(object); 1029 swp_pager_meta_free(object, start, count); 1030 vm_object_drop(object); 1031 bp->b_resid = 0; 1032 biodone(bio); 1033 return; 1034 } 1035 1036 /* 1037 * We need to be able to create a new cluster of I/O's. We cannot 1038 * use the caller fields of the passed bio so push a new one. 1039 * 1040 * Because nbio is just a placeholder for the cluster links, 1041 * we can biodone() the original bio instead of nbio to make 1042 * things a bit more efficient. 1043 */ 1044 nbio = push_bio(bio); 1045 nbio->bio_offset = bio->bio_offset; 1046 nbio->bio_caller_info1.cluster_head = NULL; 1047 nbio->bio_caller_info2.cluster_tail = NULL; 1048 1049 biox = NULL; 1050 bufx = NULL; 1051 1052 /* 1053 * Execute read or write 1054 */ 1055 vm_object_hold(object); 1056 1057 while (count > 0) { 1058 swblk_t blk; 1059 1060 /* 1061 * Obtain block. If block not found and writing, allocate a 1062 * new block and build it into the object. 1063 */ 1064 blk = swp_pager_meta_ctl(object, start, 0); 1065 if ((blk == SWAPBLK_NONE) && bp->b_cmd == BUF_CMD_WRITE) { 1066 blk = swp_pager_getswapspace(object, 1); 1067 if (blk == SWAPBLK_NONE) { 1068 bp->b_error = ENOMEM; 1069 bp->b_flags |= B_ERROR; 1070 break; 1071 } 1072 swp_pager_meta_build(object, start, blk); 1073 } 1074 1075 /* 1076 * Do we have to flush our current collection? Yes if: 1077 * 1078 * - no swap block at this index 1079 * - swap block is not contiguous 1080 * - we cross a physical disk boundry in the 1081 * stripe. 1082 */ 1083 if (biox && 1084 (biox_blkno + btoc(bufx->b_bcount) != blk || 1085 ((biox_blkno ^ blk) & ~SWB_DMMASK))) { 1086 switch(bp->b_cmd) { 1087 case BUF_CMD_READ: 1088 ++mycpu->gd_cnt.v_swapin; 1089 mycpu->gd_cnt.v_swappgsin += 1090 btoc(bufx->b_bcount); 1091 break; 1092 case BUF_CMD_WRITE: 1093 ++mycpu->gd_cnt.v_swapout; 1094 mycpu->gd_cnt.v_swappgsout += 1095 btoc(bufx->b_bcount); 1096 bufx->b_dirtyend = bufx->b_bcount; 1097 break; 1098 default: 1099 /* NOT REACHED */ 1100 break; 1101 } 1102 1103 /* 1104 * Finished with this buf. 1105 */ 1106 KKASSERT(bufx->b_bcount != 0); 1107 if (bufx->b_cmd != BUF_CMD_READ) 1108 bufx->b_dirtyend = bufx->b_bcount; 1109 biox = NULL; 1110 bufx = NULL; 1111 } 1112 1113 /* 1114 * Add new swapblk to biox, instantiating biox if necessary. 1115 * Zero-fill reads are able to take a shortcut. 1116 */ 1117 if (blk == SWAPBLK_NONE) { 1118 /* 1119 * We can only get here if we are reading. 1120 */ 1121 bkvasync(bp); 1122 bzero(data, PAGE_SIZE); 1123 bp->b_resid -= PAGE_SIZE; 1124 } else { 1125 if (biox == NULL) { 1126 /* XXX chain count > 4, wait to <= 4 */ 1127 1128 bufx = getpbuf(NULL); 1129 bufx->b_flags |= B_KVABIO; 1130 biox = &bufx->b_bio1; 1131 cluster_append(nbio, bufx); 1132 bufx->b_cmd = bp->b_cmd; 1133 biox->bio_done = swap_chain_iodone; 1134 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1135 biox->bio_caller_info1.cluster_parent = nbio; 1136 biox_blkno = blk; 1137 bufx->b_bcount = 0; 1138 bufx->b_data = data; 1139 } 1140 bufx->b_bcount += PAGE_SIZE; 1141 } 1142 --count; 1143 ++start; 1144 data += PAGE_SIZE; 1145 } 1146 1147 vm_object_drop(object); 1148 1149 /* 1150 * Flush out last buffer 1151 */ 1152 if (biox) { 1153 if (bufx->b_cmd == BUF_CMD_READ) { 1154 ++mycpu->gd_cnt.v_swapin; 1155 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1156 } else { 1157 ++mycpu->gd_cnt.v_swapout; 1158 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1159 bufx->b_dirtyend = bufx->b_bcount; 1160 } 1161 KKASSERT(bufx->b_bcount); 1162 if (bufx->b_cmd != BUF_CMD_READ) 1163 bufx->b_dirtyend = bufx->b_bcount; 1164 /* biox, bufx = NULL */ 1165 } 1166 1167 /* 1168 * Now initiate all the I/O. Be careful looping on our chain as 1169 * I/O's may complete while we are still initiating them. 1170 * 1171 * If the request is a 100% sparse read no bios will be present 1172 * and we just biodone() the buffer. 1173 */ 1174 nbio->bio_caller_info2.cluster_tail = NULL; 1175 bufx = nbio->bio_caller_info1.cluster_head; 1176 1177 if (bufx) { 1178 while (bufx) { 1179 biox = &bufx->b_bio1; 1180 BUF_KERNPROC(bufx); 1181 bufx = bufx->b_cluster_next; 1182 vn_strategy(swapdev_vp, biox); 1183 } 1184 } else { 1185 biodone(bio); 1186 } 1187 1188 /* 1189 * Completion of the cluster will also call biodone_chain(nbio). 1190 * We never call biodone(nbio) so we don't have to worry about 1191 * setting up a bio_done callback. It's handled in the sub-IO. 1192 */ 1193 /**/ 1194 } 1195 1196 /* 1197 * biodone callback 1198 * 1199 * No requirements. 1200 */ 1201 static void 1202 swap_chain_iodone(struct bio *biox) 1203 { 1204 struct buf **nextp; 1205 struct buf *bufx; /* chained sub-buffer */ 1206 struct bio *nbio; /* parent nbio with chain glue */ 1207 struct buf *bp; /* original bp associated with nbio */ 1208 int chain_empty; 1209 1210 bufx = biox->bio_buf; 1211 nbio = biox->bio_caller_info1.cluster_parent; 1212 bp = nbio->bio_buf; 1213 1214 /* 1215 * Update the original buffer 1216 */ 1217 KKASSERT(bp != NULL); 1218 if (bufx->b_flags & B_ERROR) { 1219 atomic_set_int(&bufx->b_flags, B_ERROR); 1220 bp->b_error = bufx->b_error; /* race ok */ 1221 } else if (bufx->b_resid != 0) { 1222 atomic_set_int(&bufx->b_flags, B_ERROR); 1223 bp->b_error = EINVAL; /* race ok */ 1224 } else { 1225 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1226 } 1227 1228 /* 1229 * Remove us from the chain. 1230 */ 1231 spin_lock(&swapbp_spin); 1232 nextp = &nbio->bio_caller_info1.cluster_head; 1233 while (*nextp != bufx) { 1234 KKASSERT(*nextp != NULL); 1235 nextp = &(*nextp)->b_cluster_next; 1236 } 1237 *nextp = bufx->b_cluster_next; 1238 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1239 spin_unlock(&swapbp_spin); 1240 1241 /* 1242 * Clean up bufx. If the chain is now empty we finish out 1243 * the parent. Note that we may be racing other completions 1244 * so we must use the chain_empty status from above. 1245 */ 1246 if (chain_empty) { 1247 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1248 atomic_set_int(&bp->b_flags, B_ERROR); 1249 bp->b_error = EINVAL; 1250 } 1251 biodone_chain(nbio); 1252 } 1253 relpbuf(bufx, NULL); 1254 } 1255 1256 /* 1257 * SWAP_PAGER_GETPAGES() - bring page in from swap 1258 * 1259 * The requested page may have to be brought in from swap. Calculate the 1260 * swap block and bring in additional pages if possible. All pages must 1261 * have contiguous swap block assignments and reside in the same object. 1262 * 1263 * The caller has a single vm_object_pip_add() reference prior to 1264 * calling us and we should return with the same. 1265 * 1266 * The caller has BUSY'd the page. We should return with (*mpp) left busy, 1267 * and any additinal pages unbusied. 1268 * 1269 * If the caller encounters a PG_RAM page it will pass it to us even though 1270 * it may be valid and dirty. We cannot overwrite the page in this case! 1271 * The case is used to allow us to issue pure read-aheads. 1272 * 1273 * NOTE! XXX This code does not entirely pipeline yet due to the fact that 1274 * the PG_RAM page is validated at the same time as mreq. What we 1275 * really need to do is issue a separate read-ahead pbuf. 1276 * 1277 * No requirements. 1278 */ 1279 static int 1280 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 1281 { 1282 struct buf *bp; 1283 struct bio *bio; 1284 vm_page_t mreq; 1285 vm_page_t m; 1286 vm_offset_t kva; 1287 swblk_t blk; 1288 int i; 1289 int j; 1290 int raonly; 1291 int error; 1292 u_int32_t busy_count; 1293 vm_page_t marray[XIO_INTERNAL_PAGES]; 1294 1295 mreq = *mpp; 1296 1297 vm_object_hold(object); 1298 if (mreq->object != object) { 1299 panic("swap_pager_getpages: object mismatch %p/%p", 1300 object, 1301 mreq->object 1302 ); 1303 } 1304 1305 /* 1306 * We don't want to overwrite a fully valid page as it might be 1307 * dirty. This case can occur when e.g. vm_fault hits a perfectly 1308 * valid page with PG_RAM set. 1309 * 1310 * In this case we see if the next page is a suitable page-in 1311 * candidate and if it is we issue read-ahead. PG_RAM will be 1312 * set on the last page of the read-ahead to continue the pipeline. 1313 */ 1314 if (mreq->valid == VM_PAGE_BITS_ALL) { 1315 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) { 1316 vm_object_drop(object); 1317 return(VM_PAGER_OK); 1318 } 1319 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0); 1320 if (blk == SWAPBLK_NONE) { 1321 vm_object_drop(object); 1322 return(VM_PAGER_OK); 1323 } 1324 m = vm_page_lookup_busy_try(object, mreq->pindex + 1, 1325 TRUE, &error); 1326 if (error) { 1327 vm_object_drop(object); 1328 return(VM_PAGER_OK); 1329 } else if (m == NULL) { 1330 /* 1331 * Use VM_ALLOC_QUICK to avoid blocking on cache 1332 * page reuse. 1333 */ 1334 m = vm_page_alloc(object, mreq->pindex + 1, 1335 VM_ALLOC_QUICK); 1336 if (m == NULL) { 1337 vm_object_drop(object); 1338 return(VM_PAGER_OK); 1339 } 1340 } else { 1341 if (m->valid) { 1342 vm_page_wakeup(m); 1343 vm_object_drop(object); 1344 return(VM_PAGER_OK); 1345 } 1346 vm_page_unqueue_nowakeup(m); 1347 } 1348 /* page is busy */ 1349 mreq = m; 1350 raonly = 1; 1351 } else { 1352 raonly = 0; 1353 } 1354 1355 /* 1356 * Try to block-read contiguous pages from swap if sequential, 1357 * otherwise just read one page. Contiguous pages from swap must 1358 * reside within a single device stripe because the I/O cannot be 1359 * broken up across multiple stripes. 1360 * 1361 * Note that blk and iblk can be SWAPBLK_NONE but the loop is 1362 * set up such that the case(s) are handled implicitly. 1363 */ 1364 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1365 marray[0] = mreq; 1366 1367 for (i = 1; i <= swap_burst_read && 1368 i < XIO_INTERNAL_PAGES && 1369 mreq->pindex + i < object->size; ++i) { 1370 swblk_t iblk; 1371 1372 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0); 1373 if (iblk != blk + i) 1374 break; 1375 if ((blk ^ iblk) & ~SWB_DMMASK) 1376 break; 1377 m = vm_page_lookup_busy_try(object, mreq->pindex + i, 1378 TRUE, &error); 1379 if (error) { 1380 break; 1381 } else if (m == NULL) { 1382 /* 1383 * Use VM_ALLOC_QUICK to avoid blocking on cache 1384 * page reuse. 1385 */ 1386 m = vm_page_alloc(object, mreq->pindex + i, 1387 VM_ALLOC_QUICK); 1388 if (m == NULL) 1389 break; 1390 } else { 1391 if (m->valid) { 1392 vm_page_wakeup(m); 1393 break; 1394 } 1395 vm_page_unqueue_nowakeup(m); 1396 } 1397 /* page is busy */ 1398 marray[i] = m; 1399 } 1400 if (i > 1) 1401 vm_page_flag_set(marray[i - 1], PG_RAM); 1402 1403 /* 1404 * If mreq is the requested page and we have nothing to do return 1405 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead 1406 * page and must be cleaned up. 1407 */ 1408 if (blk == SWAPBLK_NONE) { 1409 KKASSERT(i == 1); 1410 if (raonly) { 1411 vnode_pager_freepage(mreq); 1412 vm_object_drop(object); 1413 return(VM_PAGER_OK); 1414 } else { 1415 vm_object_drop(object); 1416 return(VM_PAGER_FAIL); 1417 } 1418 } 1419 1420 /* 1421 * Map our page(s) into kva for input 1422 * 1423 * Use the KVABIO API to avoid synchronizing the pmap. 1424 */ 1425 bp = getpbuf_kva(&nsw_rcount); 1426 bio = &bp->b_bio1; 1427 kva = (vm_offset_t) bp->b_kvabase; 1428 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t)); 1429 pmap_qenter_noinval(kva, bp->b_xio.xio_pages, i); 1430 1431 bp->b_data = (caddr_t)kva; 1432 bp->b_bcount = PAGE_SIZE * i; 1433 bp->b_xio.xio_npages = i; 1434 bp->b_flags |= B_KVABIO; 1435 bio->bio_done = swp_pager_async_iodone; 1436 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1437 bio->bio_caller_info1.index = SWBIO_READ; 1438 1439 /* 1440 * Set index. If raonly set the index beyond the array so all 1441 * the pages are treated the same, otherwise the original mreq is 1442 * at index 0. 1443 */ 1444 if (raonly) 1445 bio->bio_driver_info = (void *)(intptr_t)i; 1446 else 1447 bio->bio_driver_info = (void *)(intptr_t)0; 1448 1449 for (j = 0; j < i; ++j) { 1450 atomic_set_int(&bp->b_xio.xio_pages[j]->busy_count, 1451 PBUSY_SWAPINPROG); 1452 } 1453 1454 mycpu->gd_cnt.v_swapin++; 1455 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1456 1457 /* 1458 * We still hold the lock on mreq, and our automatic completion routine 1459 * does not remove it. 1460 */ 1461 vm_object_pip_add(object, bp->b_xio.xio_npages); 1462 1463 /* 1464 * perform the I/O. NOTE!!! bp cannot be considered valid after 1465 * this point because we automatically release it on completion. 1466 * Instead, we look at the one page we are interested in which we 1467 * still hold a lock on even through the I/O completion. 1468 * 1469 * The other pages in our m[] array are also released on completion, 1470 * so we cannot assume they are valid anymore either. 1471 */ 1472 bp->b_cmd = BUF_CMD_READ; 1473 BUF_KERNPROC(bp); 1474 vn_strategy(swapdev_vp, bio); 1475 1476 /* 1477 * Wait for the page we want to complete. PBUSY_SWAPINPROG is always 1478 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1479 * is set in the meta-data. 1480 * 1481 * If this is a read-ahead only we return immediately without 1482 * waiting for I/O. 1483 */ 1484 if (raonly) { 1485 vm_object_drop(object); 1486 return(VM_PAGER_OK); 1487 } 1488 1489 /* 1490 * Read-ahead includes originally requested page case. 1491 */ 1492 for (;;) { 1493 busy_count = mreq->busy_count; 1494 cpu_ccfence(); 1495 if ((busy_count & PBUSY_SWAPINPROG) == 0) 1496 break; 1497 tsleep_interlock(mreq, 0); 1498 if (!atomic_cmpset_int(&mreq->busy_count, busy_count, 1499 busy_count | 1500 PBUSY_SWAPINPROG | PBUSY_WANTED)) { 1501 continue; 1502 } 1503 atomic_set_int(&mreq->flags, PG_REFERENCED); 1504 mycpu->gd_cnt.v_intrans++; 1505 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) { 1506 kprintf( 1507 "swap_pager: indefinite wait buffer: " 1508 " bp %p offset: %lld, size: %ld\n", 1509 bp, 1510 (long long)bio->bio_offset, 1511 (long)bp->b_bcount 1512 ); 1513 } 1514 } 1515 1516 /* 1517 * Disallow speculative reads prior to the SWAPINPROG test. 1518 */ 1519 cpu_lfence(); 1520 1521 /* 1522 * mreq is left busied after completion, but all the other pages 1523 * are freed. If we had an unrecoverable read error the page will 1524 * not be valid. 1525 */ 1526 vm_object_drop(object); 1527 if (mreq->valid != VM_PAGE_BITS_ALL) 1528 return(VM_PAGER_ERROR); 1529 else 1530 return(VM_PAGER_OK); 1531 1532 /* 1533 * A final note: in a low swap situation, we cannot deallocate swap 1534 * and mark a page dirty here because the caller is likely to mark 1535 * the page clean when we return, causing the page to possibly revert 1536 * to all-zero's later. 1537 */ 1538 } 1539 1540 /* 1541 * swap_pager_putpages: 1542 * 1543 * Assign swap (if necessary) and initiate I/O on the specified pages. 1544 * 1545 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1546 * are automatically converted to SWAP objects. 1547 * 1548 * In a low memory situation we may block in vn_strategy(), but the new 1549 * vm_page reservation system coupled with properly written VFS devices 1550 * should ensure that no low-memory deadlock occurs. This is an area 1551 * which needs work. 1552 * 1553 * The parent has N vm_object_pip_add() references prior to 1554 * calling us and will remove references for rtvals[] that are 1555 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1556 * completion. 1557 * 1558 * The parent has soft-busy'd the pages it passes us and will unbusy 1559 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1560 * We need to unbusy the rest on I/O completion. 1561 * 1562 * No requirements. 1563 */ 1564 void 1565 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1566 int flags, int *rtvals) 1567 { 1568 int i; 1569 int n = 0; 1570 1571 vm_object_hold(object); 1572 1573 if (count && m[0]->object != object) { 1574 panic("swap_pager_getpages: object mismatch %p/%p", 1575 object, 1576 m[0]->object 1577 ); 1578 } 1579 1580 /* 1581 * Step 1 1582 * 1583 * Turn object into OBJT_SWAP 1584 * Check for bogus sysops 1585 * 1586 * Force sync if not pageout process, we don't want any single 1587 * non-pageout process to be able to hog the I/O subsystem! This 1588 * can be overridden by setting. 1589 */ 1590 if (object->type == OBJT_DEFAULT) { 1591 if (object->type == OBJT_DEFAULT) 1592 swp_pager_meta_convert(object); 1593 } 1594 1595 /* 1596 * Normally we force synchronous swap I/O if this is not the 1597 * pageout daemon to prevent any single user process limited 1598 * via RLIMIT_RSS from hogging swap write bandwidth. 1599 */ 1600 if (curthread != pagethread && 1601 curthread != emergpager && 1602 swap_user_async == 0) { 1603 flags |= VM_PAGER_PUT_SYNC; 1604 } 1605 1606 /* 1607 * Step 2 1608 * 1609 * Update nsw parameters from swap_async_max sysctl values. 1610 * Do not let the sysop crash the machine with bogus numbers. 1611 */ 1612 if (swap_async_max != nsw_wcount_async_max) { 1613 int n; 1614 1615 /* 1616 * limit range 1617 */ 1618 if ((n = swap_async_max) > nswbuf_kva / 2) 1619 n = nswbuf_kva / 2; 1620 if (n < 1) 1621 n = 1; 1622 swap_async_max = n; 1623 1624 /* 1625 * Adjust difference ( if possible ). If the current async 1626 * count is too low, we may not be able to make the adjustment 1627 * at this time. 1628 * 1629 * vm_token needed for nsw_wcount sleep interlock 1630 */ 1631 lwkt_gettoken(&vm_token); 1632 n -= nsw_wcount_async_max; 1633 if (nsw_wcount_async + n >= 0) { 1634 nsw_wcount_async_max += n; 1635 pbuf_adjcount(&nsw_wcount_async, n); 1636 } 1637 lwkt_reltoken(&vm_token); 1638 } 1639 1640 /* 1641 * Step 3 1642 * 1643 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1644 * The page is left dirty until the pageout operation completes 1645 * successfully. 1646 */ 1647 1648 for (i = 0; i < count; i += n) { 1649 struct buf *bp; 1650 struct bio *bio; 1651 swblk_t blk; 1652 int j; 1653 1654 /* 1655 * Maximum I/O size is limited by a number of factors. 1656 */ 1657 1658 n = min(BLIST_MAX_ALLOC, count - i); 1659 n = min(n, nsw_cluster_max); 1660 1661 lwkt_gettoken(&vm_token); 1662 1663 /* 1664 * Get biggest block of swap we can. If we fail, fall 1665 * back and try to allocate a smaller block. Don't go 1666 * overboard trying to allocate space if it would overly 1667 * fragment swap. 1668 */ 1669 while ( 1670 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE && 1671 n > 4 1672 ) { 1673 n >>= 1; 1674 } 1675 if (blk == SWAPBLK_NONE) { 1676 for (j = 0; j < n; ++j) 1677 rtvals[i+j] = VM_PAGER_FAIL; 1678 lwkt_reltoken(&vm_token); 1679 continue; 1680 } 1681 if (vm_report_swap_allocs > 0) { 1682 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n); 1683 --vm_report_swap_allocs; 1684 } 1685 1686 /* 1687 * The I/O we are constructing cannot cross a physical 1688 * disk boundry in the swap stripe. 1689 */ 1690 if ((blk ^ (blk + n)) & ~SWB_DMMASK) { 1691 j = ((blk + SWB_DMMAX) & ~SWB_DMMASK) - blk; 1692 swp_pager_freeswapspace(object, blk + j, n - j); 1693 n = j; 1694 } 1695 1696 /* 1697 * All I/O parameters have been satisfied, build the I/O 1698 * request and assign the swap space. 1699 * 1700 * Use the KVABIO API to avoid synchronizing the pmap. 1701 */ 1702 if ((flags & VM_PAGER_PUT_SYNC)) 1703 bp = getpbuf_kva(&nsw_wcount_sync); 1704 else 1705 bp = getpbuf_kva(&nsw_wcount_async); 1706 bio = &bp->b_bio1; 1707 1708 lwkt_reltoken(&vm_token); 1709 1710 pmap_qenter_noinval((vm_offset_t)bp->b_data, &m[i], n); 1711 1712 bp->b_flags |= B_KVABIO; 1713 bp->b_bcount = PAGE_SIZE * n; 1714 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1715 1716 for (j = 0; j < n; ++j) { 1717 vm_page_t mreq = m[i+j]; 1718 1719 swp_pager_meta_build(mreq->object, mreq->pindex, 1720 blk + j); 1721 if (object->type == OBJT_SWAP) 1722 vm_page_dirty(mreq); 1723 rtvals[i+j] = VM_PAGER_OK; 1724 1725 atomic_set_int(&mreq->busy_count, PBUSY_SWAPINPROG); 1726 bp->b_xio.xio_pages[j] = mreq; 1727 } 1728 bp->b_xio.xio_npages = n; 1729 1730 mycpu->gd_cnt.v_swapout++; 1731 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1732 1733 bp->b_dirtyoff = 0; /* req'd for NFS */ 1734 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1735 bp->b_cmd = BUF_CMD_WRITE; 1736 bio->bio_caller_info1.index = SWBIO_WRITE; 1737 1738 /* 1739 * asynchronous 1740 */ 1741 if ((flags & VM_PAGER_PUT_SYNC) == 0) { 1742 bio->bio_done = swp_pager_async_iodone; 1743 BUF_KERNPROC(bp); 1744 vn_strategy(swapdev_vp, bio); 1745 1746 for (j = 0; j < n; ++j) 1747 rtvals[i+j] = VM_PAGER_PEND; 1748 continue; 1749 } 1750 1751 /* 1752 * Issue synchrnously. 1753 * 1754 * Wait for the sync I/O to complete, then update rtvals. 1755 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1756 * our async completion routine at the end, thus avoiding a 1757 * double-free. 1758 */ 1759 bio->bio_caller_info1.index |= SWBIO_SYNC; 1760 if (flags & VM_PAGER_TRY_TO_CACHE) 1761 bio->bio_caller_info1.index |= SWBIO_TTC; 1762 bio->bio_done = biodone_sync; 1763 bio->bio_flags |= BIO_SYNC; 1764 vn_strategy(swapdev_vp, bio); 1765 biowait(bio, "swwrt"); 1766 1767 for (j = 0; j < n; ++j) 1768 rtvals[i+j] = VM_PAGER_PEND; 1769 1770 /* 1771 * Now that we are through with the bp, we can call the 1772 * normal async completion, which frees everything up. 1773 */ 1774 swp_pager_async_iodone(bio); 1775 } 1776 vm_object_drop(object); 1777 } 1778 1779 /* 1780 * No requirements. 1781 * 1782 * Recalculate the low and high-water marks. 1783 */ 1784 void 1785 swap_pager_newswap(void) 1786 { 1787 /* 1788 * NOTE: vm_swap_max cannot exceed 1 billion blocks, which is the 1789 * limitation imposed by the blist code. Remember that this 1790 * will be divided by NSWAP_MAX (4), so each swap device is 1791 * limited to around a terrabyte. 1792 */ 1793 if (vm_swap_max) { 1794 nswap_lowat = (int64_t)vm_swap_max * 4 / 100; /* 4% left */ 1795 nswap_hiwat = (int64_t)vm_swap_max * 6 / 100; /* 6% left */ 1796 kprintf("swap low/high-water marks set to %d/%d\n", 1797 nswap_lowat, nswap_hiwat); 1798 } else { 1799 nswap_lowat = 128; 1800 nswap_hiwat = 512; 1801 } 1802 swp_sizecheck(); 1803 } 1804 1805 /* 1806 * swp_pager_async_iodone: 1807 * 1808 * Completion routine for asynchronous reads and writes from/to swap. 1809 * Also called manually by synchronous code to finish up a bp. 1810 * 1811 * For READ operations, the pages are BUSY'd. For WRITE operations, 1812 * the pages are vm_page_t->busy'd. For READ operations, we BUSY 1813 * unbusy all pages except the 'main' request page. For WRITE 1814 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1815 * because we marked them all VM_PAGER_PEND on return from putpages ). 1816 * 1817 * This routine may not block. 1818 * 1819 * No requirements. 1820 */ 1821 static void 1822 swp_pager_async_iodone(struct bio *bio) 1823 { 1824 struct buf *bp = bio->bio_buf; 1825 vm_object_t object = NULL; 1826 int i; 1827 int *nswptr; 1828 1829 /* 1830 * report error 1831 */ 1832 if (bp->b_flags & B_ERROR) { 1833 kprintf( 1834 "swap_pager: I/O error - %s failed; offset %lld," 1835 "size %ld, error %d\n", 1836 ((bio->bio_caller_info1.index & SWBIO_READ) ? 1837 "pagein" : "pageout"), 1838 (long long)bio->bio_offset, 1839 (long)bp->b_bcount, 1840 bp->b_error 1841 ); 1842 } 1843 1844 /* 1845 * set object. 1846 */ 1847 if (bp->b_xio.xio_npages) 1848 object = bp->b_xio.xio_pages[0]->object; 1849 1850 #if 0 1851 /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */ 1852 if (bio->bio_caller_info1.index & SWBIO_WRITE) { 1853 if (bio->bio_crc != iscsi_crc32(bp->b_data, bp->b_bcount)) { 1854 kprintf("SWAPOUT: BADCRC %08x %08x\n", 1855 bio->bio_crc, 1856 iscsi_crc32(bp->b_data, bp->b_bcount)); 1857 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1858 vm_page_t m = bp->b_xio.xio_pages[i]; 1859 if (m->flags & PG_WRITEABLE) 1860 kprintf("SWAPOUT: " 1861 "%d/%d %p writable\n", 1862 i, bp->b_xio.xio_npages, m); 1863 } 1864 } 1865 } 1866 #endif 1867 1868 /* 1869 * remove the mapping for kernel virtual 1870 */ 1871 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1872 1873 /* 1874 * cleanup pages. If an error occurs writing to swap, we are in 1875 * very serious trouble. If it happens to be a disk error, though, 1876 * we may be able to recover by reassigning the swap later on. So 1877 * in this case we remove the m->swapblk assignment for the page 1878 * but do not free it in the rlist. The errornous block(s) are thus 1879 * never reallocated as swap. Redirty the page and continue. 1880 */ 1881 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1882 vm_page_t m = bp->b_xio.xio_pages[i]; 1883 1884 if (bp->b_flags & B_ERROR) { 1885 /* 1886 * If an error occurs I'd love to throw the swapblk 1887 * away without freeing it back to swapspace, so it 1888 * can never be used again. But I can't from an 1889 * interrupt. 1890 */ 1891 1892 if (bio->bio_caller_info1.index & SWBIO_READ) { 1893 /* 1894 * When reading, reqpage needs to stay 1895 * locked for the parent, but all other 1896 * pages can be freed. We still want to 1897 * wakeup the parent waiting on the page, 1898 * though. ( also: pg_reqpage can be -1 and 1899 * not match anything ). 1900 * 1901 * We have to wake specifically requested pages 1902 * up too because we cleared SWAPINPROG and 1903 * someone may be waiting for that. 1904 * 1905 * NOTE: For reads, m->dirty will probably 1906 * be overridden by the original caller 1907 * of getpages so don't play cute tricks 1908 * here. 1909 * 1910 * NOTE: We can't actually free the page from 1911 * here, because this is an interrupt. 1912 * It is not legal to mess with 1913 * object->memq from an interrupt. 1914 * Deactivate the page instead. 1915 * 1916 * WARNING! The instant SWAPINPROG is 1917 * cleared another cpu may start 1918 * using the mreq page (it will 1919 * check m->valid immediately). 1920 */ 1921 1922 m->valid = 0; 1923 atomic_clear_int(&m->busy_count, 1924 PBUSY_SWAPINPROG); 1925 1926 /* 1927 * bio_driver_info holds the requested page 1928 * index. 1929 */ 1930 if (i != (int)(intptr_t)bio->bio_driver_info) { 1931 vm_page_deactivate(m); 1932 vm_page_wakeup(m); 1933 } else { 1934 vm_page_flash(m); 1935 } 1936 /* 1937 * If i == bp->b_pager.pg_reqpage, do not wake 1938 * the page up. The caller needs to. 1939 */ 1940 } else { 1941 /* 1942 * If a write error occurs remove the swap 1943 * assignment (note that PG_SWAPPED may or 1944 * may not be set depending on prior activity). 1945 * 1946 * Re-dirty OBJT_SWAP pages as there is no 1947 * other backing store, we can't throw the 1948 * page away. 1949 * 1950 * Non-OBJT_SWAP pages (aka swapcache) must 1951 * not be dirtied since they may not have 1952 * been dirty in the first place, and they 1953 * do have backing store (the vnode). 1954 */ 1955 vm_page_busy_wait(m, FALSE, "swadpg"); 1956 vm_object_hold(m->object); 1957 swp_pager_meta_ctl(m->object, m->pindex, 1958 SWM_FREE); 1959 vm_page_flag_clear(m, PG_SWAPPED); 1960 vm_object_drop(m->object); 1961 if (m->object->type == OBJT_SWAP) { 1962 vm_page_dirty(m); 1963 vm_page_activate(m); 1964 } 1965 vm_page_io_finish(m); 1966 atomic_clear_int(&m->busy_count, 1967 PBUSY_SWAPINPROG); 1968 vm_page_wakeup(m); 1969 } 1970 } else if (bio->bio_caller_info1.index & SWBIO_READ) { 1971 /* 1972 * NOTE: for reads, m->dirty will probably be 1973 * overridden by the original caller of getpages so 1974 * we cannot set them in order to free the underlying 1975 * swap in a low-swap situation. I don't think we'd 1976 * want to do that anyway, but it was an optimization 1977 * that existed in the old swapper for a time before 1978 * it got ripped out due to precisely this problem. 1979 * 1980 * If not the requested page then deactivate it. 1981 * 1982 * Note that the requested page, reqpage, is left 1983 * busied, but we still have to wake it up. The 1984 * other pages are released (unbusied) by 1985 * vm_page_wakeup(). We do not set reqpage's 1986 * valid bits here, it is up to the caller. 1987 */ 1988 1989 /* 1990 * NOTE: Can't call pmap_clear_modify(m) from an 1991 * interrupt thread, the pmap code may have to 1992 * map non-kernel pmaps and currently asserts 1993 * the case. 1994 * 1995 * WARNING! The instant SWAPINPROG is 1996 * cleared another cpu may start 1997 * using the mreq page (it will 1998 * check m->valid immediately). 1999 */ 2000 /*pmap_clear_modify(m);*/ 2001 m->valid = VM_PAGE_BITS_ALL; 2002 vm_page_undirty(m); 2003 vm_page_flag_set(m, PG_SWAPPED); 2004 atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG); 2005 2006 /* 2007 * We have to wake specifically requested pages 2008 * up too because we cleared SWAPINPROG and 2009 * could be waiting for it in getpages. However, 2010 * be sure to not unbusy getpages specifically 2011 * requested page - getpages expects it to be 2012 * left busy. 2013 * 2014 * bio_driver_info holds the requested page 2015 */ 2016 if (i != (int)(intptr_t)bio->bio_driver_info) { 2017 vm_page_deactivate(m); 2018 vm_page_wakeup(m); 2019 } else { 2020 vm_page_flash(m); 2021 } 2022 } else { 2023 /* 2024 * Mark the page clean but do not mess with the 2025 * pmap-layer's modified state. That state should 2026 * also be clear since the caller protected the 2027 * page VM_PROT_READ, but allow the case. 2028 * 2029 * We are in an interrupt, avoid pmap operations. 2030 * 2031 * If we have a severe page deficit, deactivate the 2032 * page. Do not try to cache it (which would also 2033 * involve a pmap op), because the page might still 2034 * be read-heavy. 2035 * 2036 * When using the swap to cache clean vnode pages 2037 * we do not mess with the page dirty bits. 2038 * 2039 * NOTE! Nobody is waiting for the key mreq page 2040 * on write completion. 2041 */ 2042 vm_page_busy_wait(m, FALSE, "swadpg"); 2043 if (m->object->type == OBJT_SWAP) 2044 vm_page_undirty(m); 2045 vm_page_flag_set(m, PG_SWAPPED); 2046 atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG); 2047 if (vm_page_count_severe()) 2048 vm_page_deactivate(m); 2049 vm_page_io_finish(m); 2050 if (bio->bio_caller_info1.index & SWBIO_TTC) 2051 vm_page_try_to_cache(m); 2052 else 2053 vm_page_wakeup(m); 2054 } 2055 } 2056 2057 /* 2058 * adjust pip. NOTE: the original parent may still have its own 2059 * pip refs on the object. 2060 */ 2061 2062 if (object) 2063 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages); 2064 2065 /* 2066 * Release the physical I/O buffer. 2067 * 2068 * NOTE: Due to synchronous operations in the write case b_cmd may 2069 * already be set to BUF_CMD_DONE and BIO_SYNC may have already 2070 * been cleared. 2071 * 2072 * Use vm_token to interlock nsw_rcount/wcount wakeup? 2073 */ 2074 lwkt_gettoken(&vm_token); 2075 if (bio->bio_caller_info1.index & SWBIO_READ) 2076 nswptr = &nsw_rcount; 2077 else if (bio->bio_caller_info1.index & SWBIO_SYNC) 2078 nswptr = &nsw_wcount_sync; 2079 else 2080 nswptr = &nsw_wcount_async; 2081 bp->b_cmd = BUF_CMD_DONE; 2082 relpbuf(bp, nswptr); 2083 lwkt_reltoken(&vm_token); 2084 } 2085 2086 /* 2087 * Fault-in a potentially swapped page and remove the swap reference. 2088 * (used by swapoff code) 2089 * 2090 * object must be held. 2091 */ 2092 static __inline void 2093 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex) 2094 { 2095 struct vnode *vp; 2096 vm_page_t m; 2097 int error; 2098 2099 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2100 2101 if (object->type == OBJT_VNODE) { 2102 /* 2103 * Any swap related to a vnode is due to swapcache. We must 2104 * vget() the vnode in case it is not active (otherwise 2105 * vref() will panic). Calling vm_object_page_remove() will 2106 * ensure that any swap ref is removed interlocked with the 2107 * page. clean_only is set to TRUE so we don't throw away 2108 * dirty pages. 2109 */ 2110 vp = object->handle; 2111 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE); 2112 if (error == 0) { 2113 vm_object_page_remove(object, pindex, pindex + 1, TRUE); 2114 vput(vp); 2115 } 2116 } else { 2117 /* 2118 * Otherwise it is a normal OBJT_SWAP object and we can 2119 * fault the page in and remove the swap. 2120 */ 2121 m = vm_fault_object_page(object, IDX_TO_OFF(pindex), 2122 VM_PROT_NONE, 2123 VM_FAULT_DIRTY | VM_FAULT_UNSWAP, 2124 sharedp, &error); 2125 if (m) 2126 vm_page_unhold(m); 2127 } 2128 } 2129 2130 /* 2131 * This removes all swap blocks related to a particular device. We have 2132 * to be careful of ripups during the scan. 2133 */ 2134 static int swp_pager_swapoff_callback(struct swblock *swap, void *data); 2135 2136 int 2137 swap_pager_swapoff(int devidx) 2138 { 2139 struct vm_object_hash *hash; 2140 struct swswapoffinfo info; 2141 struct vm_object marker; 2142 vm_object_t object; 2143 int n; 2144 2145 bzero(&marker, sizeof(marker)); 2146 marker.type = OBJT_MARKER; 2147 2148 for (n = 0; n < VMOBJ_HSIZE; ++n) { 2149 hash = &vm_object_hash[n]; 2150 2151 lwkt_gettoken(&hash->token); 2152 TAILQ_INSERT_HEAD(&hash->list, &marker, object_list); 2153 2154 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) { 2155 if (object->type == OBJT_MARKER) 2156 goto skip; 2157 if (object->type != OBJT_SWAP && 2158 object->type != OBJT_VNODE) 2159 goto skip; 2160 vm_object_hold(object); 2161 if (object->type != OBJT_SWAP && 2162 object->type != OBJT_VNODE) { 2163 vm_object_drop(object); 2164 goto skip; 2165 } 2166 2167 /* 2168 * Object is special in that we can't just pagein 2169 * into vm_page's in it (tmpfs, vn). 2170 */ 2171 if ((object->flags & OBJ_NOPAGEIN) && 2172 RB_ROOT(&object->swblock_root)) { 2173 vm_object_drop(object); 2174 goto skip; 2175 } 2176 2177 info.object = object; 2178 info.shared = 0; 2179 info.devidx = devidx; 2180 swblock_rb_tree_RB_SCAN(&object->swblock_root, 2181 NULL, swp_pager_swapoff_callback, 2182 &info); 2183 vm_object_drop(object); 2184 skip: 2185 if (object == TAILQ_NEXT(&marker, object_list)) { 2186 TAILQ_REMOVE(&hash->list, &marker, object_list); 2187 TAILQ_INSERT_AFTER(&hash->list, object, 2188 &marker, object_list); 2189 } 2190 } 2191 TAILQ_REMOVE(&hash->list, &marker, object_list); 2192 lwkt_reltoken(&hash->token); 2193 } 2194 2195 /* 2196 * If we fail to locate all swblocks we just fail gracefully and 2197 * do not bother to restore paging on the swap device. If the 2198 * user wants to retry the user can retry. 2199 */ 2200 if (swdevt[devidx].sw_nused) 2201 return (1); 2202 else 2203 return (0); 2204 } 2205 2206 static 2207 int 2208 swp_pager_swapoff_callback(struct swblock *swap, void *data) 2209 { 2210 struct swswapoffinfo *info = data; 2211 vm_object_t object = info->object; 2212 vm_pindex_t index; 2213 swblk_t v; 2214 int i; 2215 2216 index = swap->swb_index; 2217 for (i = 0; i < SWAP_META_PAGES; ++i) { 2218 /* 2219 * Make sure we don't race a dying object. This will 2220 * kill the scan of the object's swap blocks entirely. 2221 */ 2222 if (object->flags & OBJ_DEAD) 2223 return(-1); 2224 2225 /* 2226 * Fault the page, which can obviously block. If the swap 2227 * structure disappears break out. 2228 */ 2229 v = swap->swb_pages[i]; 2230 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) { 2231 swp_pager_fault_page(object, &info->shared, 2232 swap->swb_index + i); 2233 /* swap ptr might go away */ 2234 if (RB_LOOKUP(swblock_rb_tree, 2235 &object->swblock_root, index) != swap) { 2236 break; 2237 } 2238 } 2239 } 2240 return(0); 2241 } 2242 2243 /************************************************************************ 2244 * SWAP META DATA * 2245 ************************************************************************ 2246 * 2247 * These routines manipulate the swap metadata stored in the 2248 * OBJT_SWAP object. 2249 * 2250 * Swap metadata is implemented with a global hash and not directly 2251 * linked into the object. Instead the object simply contains 2252 * appropriate tracking counters. 2253 */ 2254 2255 /* 2256 * Lookup the swblock containing the specified swap block index. 2257 * 2258 * The caller must hold the object. 2259 */ 2260 static __inline 2261 struct swblock * 2262 swp_pager_lookup(vm_object_t object, vm_pindex_t index) 2263 { 2264 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2265 index &= ~(vm_pindex_t)SWAP_META_MASK; 2266 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index)); 2267 } 2268 2269 /* 2270 * Remove a swblock from the RB tree. 2271 * 2272 * The caller must hold the object. 2273 */ 2274 static __inline 2275 void 2276 swp_pager_remove(vm_object_t object, struct swblock *swap) 2277 { 2278 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2279 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap); 2280 } 2281 2282 /* 2283 * Convert default object to swap object if necessary 2284 * 2285 * The caller must hold the object. 2286 */ 2287 static void 2288 swp_pager_meta_convert(vm_object_t object) 2289 { 2290 if (object->type == OBJT_DEFAULT) { 2291 object->type = OBJT_SWAP; 2292 KKASSERT(object->swblock_count == 0); 2293 } 2294 } 2295 2296 /* 2297 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 2298 * 2299 * We first convert the object to a swap object if it is a default 2300 * object. Vnode objects do not need to be converted. 2301 * 2302 * The specified swapblk is added to the object's swap metadata. If 2303 * the swapblk is not valid, it is freed instead. Any previously 2304 * assigned swapblk is freed. 2305 * 2306 * The caller must hold the object. 2307 */ 2308 static void 2309 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk) 2310 { 2311 struct swblock *swap; 2312 struct swblock *oswap; 2313 vm_pindex_t v; 2314 2315 KKASSERT(swapblk != SWAPBLK_NONE); 2316 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2317 2318 /* 2319 * Convert object if necessary 2320 */ 2321 if (object->type == OBJT_DEFAULT) 2322 swp_pager_meta_convert(object); 2323 2324 /* 2325 * Locate swblock. If not found create, but if we aren't adding 2326 * anything just return. If we run out of space in the map we wait 2327 * and, since the hash table may have changed, retry. 2328 */ 2329 retry: 2330 swap = swp_pager_lookup(object, index); 2331 2332 if (swap == NULL) { 2333 int i; 2334 2335 swap = zalloc(swap_zone); 2336 if (swap == NULL) { 2337 vm_wait(0); 2338 goto retry; 2339 } 2340 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK; 2341 swap->swb_count = 0; 2342 2343 ++object->swblock_count; 2344 2345 for (i = 0; i < SWAP_META_PAGES; ++i) 2346 swap->swb_pages[i] = SWAPBLK_NONE; 2347 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap); 2348 KKASSERT(oswap == NULL); 2349 } 2350 2351 /* 2352 * Delete prior contents of metadata. 2353 * 2354 * NOTE: Decrement swb_count after the freeing operation (which 2355 * might block) to prevent racing destruction of the swblock. 2356 */ 2357 index &= SWAP_META_MASK; 2358 2359 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) { 2360 swap->swb_pages[index] = SWAPBLK_NONE; 2361 /* can block */ 2362 swp_pager_freeswapspace(object, v, 1); 2363 --swap->swb_count; 2364 --mycpu->gd_vmtotal.t_vm; 2365 } 2366 2367 /* 2368 * Enter block into metadata 2369 */ 2370 swap->swb_pages[index] = swapblk; 2371 if (swapblk != SWAPBLK_NONE) { 2372 ++swap->swb_count; 2373 ++mycpu->gd_vmtotal.t_vm; 2374 } 2375 } 2376 2377 /* 2378 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 2379 * 2380 * The requested range of blocks is freed, with any associated swap 2381 * returned to the swap bitmap. 2382 * 2383 * This routine will free swap metadata structures as they are cleaned 2384 * out. This routine does *NOT* operate on swap metadata associated 2385 * with resident pages. 2386 * 2387 * The caller must hold the object. 2388 */ 2389 static int swp_pager_meta_free_callback(struct swblock *swb, void *data); 2390 2391 static void 2392 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) 2393 { 2394 struct swfreeinfo info; 2395 2396 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2397 2398 /* 2399 * Nothing to do 2400 */ 2401 if (object->swblock_count == 0) { 2402 KKASSERT(RB_EMPTY(&object->swblock_root)); 2403 return; 2404 } 2405 if (count == 0) 2406 return; 2407 2408 /* 2409 * Setup for RB tree scan. Note that the pindex range can be huge 2410 * due to the 64 bit page index space so we cannot safely iterate. 2411 */ 2412 info.object = object; 2413 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK; 2414 info.begi = index; 2415 info.endi = index + count - 1; 2416 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp, 2417 swp_pager_meta_free_callback, &info); 2418 } 2419 2420 /* 2421 * The caller must hold the object. 2422 */ 2423 static 2424 int 2425 swp_pager_meta_free_callback(struct swblock *swap, void *data) 2426 { 2427 struct swfreeinfo *info = data; 2428 vm_object_t object = info->object; 2429 int index; 2430 int eindex; 2431 2432 /* 2433 * Figure out the range within the swblock. The wider scan may 2434 * return edge-case swap blocks when the start and/or end points 2435 * are in the middle of a block. 2436 */ 2437 if (swap->swb_index < info->begi) 2438 index = (int)info->begi & SWAP_META_MASK; 2439 else 2440 index = 0; 2441 2442 if (swap->swb_index + SWAP_META_PAGES > info->endi) 2443 eindex = (int)info->endi & SWAP_META_MASK; 2444 else 2445 eindex = SWAP_META_MASK; 2446 2447 /* 2448 * Scan and free the blocks. The loop terminates early 2449 * if (swap) runs out of blocks and could be freed. 2450 * 2451 * NOTE: Decrement swb_count after swp_pager_freeswapspace() 2452 * to deal with a zfree race. 2453 */ 2454 while (index <= eindex) { 2455 swblk_t v = swap->swb_pages[index]; 2456 2457 if (v != SWAPBLK_NONE) { 2458 swap->swb_pages[index] = SWAPBLK_NONE; 2459 /* can block */ 2460 swp_pager_freeswapspace(object, v, 1); 2461 --mycpu->gd_vmtotal.t_vm; 2462 if (--swap->swb_count == 0) { 2463 swp_pager_remove(object, swap); 2464 zfree(swap_zone, swap); 2465 --object->swblock_count; 2466 break; 2467 } 2468 } 2469 ++index; 2470 } 2471 2472 /* swap may be invalid here due to zfree above */ 2473 lwkt_yield(); 2474 2475 return(0); 2476 } 2477 2478 /* 2479 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2480 * 2481 * This routine locates and destroys all swap metadata associated with 2482 * an object. 2483 * 2484 * NOTE: Decrement swb_count after the freeing operation (which 2485 * might block) to prevent racing destruction of the swblock. 2486 * 2487 * The caller must hold the object. 2488 */ 2489 static void 2490 swp_pager_meta_free_all(vm_object_t object) 2491 { 2492 struct swblock *swap; 2493 int i; 2494 2495 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2496 2497 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) { 2498 swp_pager_remove(object, swap); 2499 for (i = 0; i < SWAP_META_PAGES; ++i) { 2500 swblk_t v = swap->swb_pages[i]; 2501 if (v != SWAPBLK_NONE) { 2502 /* can block */ 2503 swp_pager_freeswapspace(object, v, 1); 2504 --swap->swb_count; 2505 --mycpu->gd_vmtotal.t_vm; 2506 } 2507 } 2508 if (swap->swb_count != 0) 2509 panic("swap_pager_meta_free_all: swb_count != 0"); 2510 zfree(swap_zone, swap); 2511 --object->swblock_count; 2512 lwkt_yield(); 2513 } 2514 KKASSERT(object->swblock_count == 0); 2515 } 2516 2517 /* 2518 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2519 * 2520 * This routine is capable of looking up, popping, or freeing 2521 * swapblk assignments in the swap meta data or in the vm_page_t. 2522 * The routine typically returns the swapblk being looked-up, or popped, 2523 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2524 * was invalid. This routine will automatically free any invalid 2525 * meta-data swapblks. 2526 * 2527 * It is not possible to store invalid swapblks in the swap meta data 2528 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2529 * 2530 * When acting on a busy resident page and paging is in progress, we 2531 * have to wait until paging is complete but otherwise can act on the 2532 * busy page. 2533 * 2534 * SWM_FREE remove and free swap block from metadata 2535 * SWM_POP remove from meta data but do not free.. pop it out 2536 * 2537 * The caller must hold the object. 2538 */ 2539 static swblk_t 2540 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags) 2541 { 2542 struct swblock *swap; 2543 swblk_t r1; 2544 2545 if (object->swblock_count == 0) 2546 return(SWAPBLK_NONE); 2547 2548 r1 = SWAPBLK_NONE; 2549 swap = swp_pager_lookup(object, index); 2550 2551 if (swap != NULL) { 2552 index &= SWAP_META_MASK; 2553 r1 = swap->swb_pages[index]; 2554 2555 if (r1 != SWAPBLK_NONE) { 2556 if (flags & (SWM_FREE|SWM_POP)) { 2557 swap->swb_pages[index] = SWAPBLK_NONE; 2558 --mycpu->gd_vmtotal.t_vm; 2559 if (--swap->swb_count == 0) { 2560 swp_pager_remove(object, swap); 2561 zfree(swap_zone, swap); 2562 --object->swblock_count; 2563 } 2564 } 2565 /* swap ptr may be invalid */ 2566 if (flags & SWM_FREE) { 2567 swp_pager_freeswapspace(object, r1, 1); 2568 r1 = SWAPBLK_NONE; 2569 } 2570 } 2571 /* swap ptr may be invalid */ 2572 } 2573 return(r1); 2574 } 2575