1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Copyright (c) 1994 John S. Dyson 37 * Copyright (c) 1990 University of Utah. 38 * Copyright (c) 1991, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. Neither the name of the University nor the names of its contributors 54 * may be used to endorse or promote products derived from this software 55 * without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 67 * SUCH DAMAGE. 68 * 69 * New Swap System 70 * Matthew Dillon 71 * 72 * Radix Bitmap 'blists'. 73 * 74 * - The new swapper uses the new radix bitmap code. This should scale 75 * to arbitrarily small or arbitrarily large swap spaces and an almost 76 * arbitrary degree of fragmentation. 77 * 78 * Features: 79 * 80 * - on the fly reallocation of swap during putpages. The new system 81 * does not try to keep previously allocated swap blocks for dirty 82 * pages. 83 * 84 * - on the fly deallocation of swap 85 * 86 * - No more garbage collection required. Unnecessarily allocated swap 87 * blocks only exist for dirty vm_page_t's now and these are already 88 * cycled (in a high-load system) by the pager. We also do on-the-fly 89 * removal of invalidated swap blocks when a page is destroyed 90 * or renamed. 91 * 92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 95 */ 96 97 #include "opt_swap.h" 98 #include <sys/param.h> 99 #include <sys/systm.h> 100 #include <sys/conf.h> 101 #include <sys/kernel.h> 102 #include <sys/proc.h> 103 #include <sys/buf.h> 104 #include <sys/vnode.h> 105 #include <sys/malloc.h> 106 #include <sys/vmmeter.h> 107 #include <sys/sysctl.h> 108 #include <sys/blist.h> 109 #include <sys/lock.h> 110 #include <sys/kcollect.h> 111 112 #include <vm/vm.h> 113 #include <vm/vm_object.h> 114 #include <vm/vm_page.h> 115 #include <vm/vm_pager.h> 116 #include <vm/vm_pageout.h> 117 #include <vm/swap_pager.h> 118 #include <vm/vm_extern.h> 119 #include <vm/vm_zone.h> 120 #include <vm/vnode_pager.h> 121 122 #include <sys/buf2.h> 123 #include <vm/vm_page2.h> 124 125 #ifndef MAX_PAGEOUT_CLUSTER 126 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES 127 #endif 128 129 #define SWM_FREE 0x02 /* free, period */ 130 #define SWM_POP 0x04 /* pop out */ 131 132 #define SWBIO_READ 0x01 133 #define SWBIO_WRITE 0x02 134 #define SWBIO_SYNC 0x04 135 #define SWBIO_TTC 0x08 /* for OBJPC_TRY_TO_CACHE */ 136 137 struct swfreeinfo { 138 vm_object_t object; 139 vm_pindex_t basei; 140 vm_pindex_t begi; 141 vm_pindex_t endi; /* inclusive */ 142 }; 143 144 struct swswapoffinfo { 145 vm_object_t object; 146 int devidx; 147 int shared; 148 }; 149 150 /* 151 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 152 * in the old system. 153 */ 154 155 int swap_pager_full; /* swap space exhaustion (task killing) */ 156 int swap_fail_ticks; /* when we became exhausted */ 157 int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 158 swblk_t vm_swap_cache_use; 159 swblk_t vm_swap_anon_use; 160 static int vm_report_swap_allocs; 161 162 static struct krate kswaprate = { 1 }; 163 static int nsw_rcount; /* free read buffers */ 164 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 165 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 166 static int nsw_wcount_async_max;/* assigned maximum */ 167 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 168 169 struct blist *swapblist; 170 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 171 static int swap_burst_read = 0; /* allow burst reading */ 172 static swblk_t swapiterator; /* linearize allocations */ 173 int swap_user_async = 0; /* user swap pager operation can be async */ 174 175 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin"); 176 177 /* from vm_swap.c */ 178 extern struct vnode *swapdev_vp; 179 extern struct swdevt *swdevt; 180 extern int nswdev; 181 182 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / SWB_DMMAX % nswdev : 0) 183 184 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 185 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 186 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read, 187 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins"); 188 SYSCTL_INT(_vm, OID_AUTO, swap_user_async, 189 CTLFLAG_RW, &swap_user_async, 0, "Allow async uuser swap write I/O"); 190 191 #if SWBLK_BITS == 64 192 SYSCTL_LONG(_vm, OID_AUTO, swap_cache_use, 193 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 194 SYSCTL_LONG(_vm, OID_AUTO, swap_anon_use, 195 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 196 SYSCTL_LONG(_vm, OID_AUTO, swap_free, 197 CTLFLAG_RD, &vm_swap_size, 0, ""); 198 SYSCTL_LONG(_vm, OID_AUTO, swap_size, 199 CTLFLAG_RD, &vm_swap_max, 0, ""); 200 #else 201 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use, 202 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 203 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use, 204 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 205 SYSCTL_INT(_vm, OID_AUTO, swap_free, 206 CTLFLAG_RD, &vm_swap_size, 0, ""); 207 SYSCTL_INT(_vm, OID_AUTO, swap_size, 208 CTLFLAG_RD, &vm_swap_max, 0, ""); 209 #endif 210 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs, 211 CTLFLAG_RW, &vm_report_swap_allocs, 0, ""); 212 213 __read_mostly vm_zone_t swap_zone; 214 215 /* 216 * Red-Black tree for swblock entries 217 * 218 * The caller must hold vm_token 219 */ 220 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare, 221 vm_pindex_t, swb_index); 222 223 int 224 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2) 225 { 226 if (swb1->swb_index < swb2->swb_index) 227 return(-1); 228 if (swb1->swb_index > swb2->swb_index) 229 return(1); 230 return(0); 231 } 232 233 static 234 int 235 rb_swblock_scancmp(struct swblock *swb, void *data) 236 { 237 struct swfreeinfo *info = data; 238 239 if (swb->swb_index < info->basei) 240 return(-1); 241 if (swb->swb_index > info->endi) 242 return(1); 243 return(0); 244 } 245 246 static 247 int 248 rb_swblock_condcmp(struct swblock *swb, void *data) 249 { 250 struct swfreeinfo *info = data; 251 252 if (swb->swb_index < info->basei) 253 return(-1); 254 return(0); 255 } 256 257 /* 258 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 259 * calls hooked from other parts of the VM system and do not appear here. 260 * (see vm/swap_pager.h). 261 */ 262 263 static void swap_pager_dealloc (vm_object_t object); 264 static int swap_pager_getpage (vm_object_t, vm_pindex_t, vm_page_t *, int); 265 static void swap_chain_iodone(struct bio *biox); 266 267 struct pagerops swappagerops = { 268 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 269 swap_pager_getpage, /* pagein */ 270 swap_pager_putpages, /* pageout */ 271 swap_pager_haspage /* get backing store status for page */ 272 }; 273 274 /* 275 * SWB_DMMAX is in page-sized chunks with the new swap system. It was 276 * dev-bsized chunks in the old. SWB_DMMAX is always a power of 2. 277 * 278 * swap_*() routines are externally accessible. swp_*() routines are 279 * internal. 280 */ 281 282 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 283 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 284 285 static __inline void swp_sizecheck (void); 286 static void swp_pager_async_iodone (struct bio *bio); 287 288 /* 289 * Swap bitmap functions 290 */ 291 292 static __inline void swp_pager_freeswapspace(vm_object_t object, 293 swblk_t blk, int npages); 294 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages); 295 296 /* 297 * Metadata functions 298 */ 299 300 static void swp_pager_meta_convert(vm_object_t); 301 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t); 302 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 303 static void swp_pager_meta_free_all(vm_object_t); 304 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 305 306 /* 307 * SWP_SIZECHECK() - update swap_pager_full indication 308 * 309 * update the swap_pager_almost_full indication and warn when we are 310 * about to run out of swap space, using lowat/hiwat hysteresis. 311 * 312 * Clear swap_pager_full ( task killing ) indication when lowat is met. 313 * 314 * No restrictions on call 315 * This routine may not block. 316 * SMP races are ok. 317 */ 318 static __inline void 319 swp_sizecheck(void) 320 { 321 if (vm_swap_size < nswap_lowat) { 322 if (swap_pager_almost_full == 0) { 323 kprintf("swap_pager: out of swap space\n"); 324 swap_pager_almost_full = 1; 325 swap_fail_ticks = ticks; 326 } 327 } else { 328 swap_pager_full = 0; 329 if (vm_swap_size > nswap_hiwat) 330 swap_pager_almost_full = 0; 331 } 332 } 333 334 /* 335 * Long-term data collection on 10-second interval. Return the value 336 * for KCOLLECT_SWAPPCT and set the values for SWAPANO and SWAPCCAC. 337 * 338 * Return total swap in the scale field. This can change if swap is 339 * regularly added or removed and may cause some historical confusion 340 * in that case, but SWAPPCT will always be historically accurate. 341 */ 342 343 #define PTOB(value) ((uint64_t)(value) << PAGE_SHIFT) 344 345 static uint64_t 346 collect_swap_callback(int n) 347 { 348 uint64_t total = vm_swap_max; 349 uint64_t anon = vm_swap_anon_use; 350 uint64_t cache = vm_swap_cache_use; 351 352 if (total == 0) /* avoid divide by zero */ 353 total = 1; 354 kcollect_setvalue(KCOLLECT_SWAPANO, PTOB(anon)); 355 kcollect_setvalue(KCOLLECT_SWAPCAC, PTOB(cache)); 356 kcollect_setscale(KCOLLECT_SWAPANO, 357 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, PTOB(total))); 358 kcollect_setscale(KCOLLECT_SWAPCAC, 359 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, PTOB(total))); 360 return (((anon + cache) * 10000 + (total >> 1)) / total); 361 } 362 363 /* 364 * SWAP_PAGER_INIT() - initialize the swap pager! 365 * 366 * Expected to be started from system init. NOTE: This code is run 367 * before much else so be careful what you depend on. Most of the VM 368 * system has yet to be initialized at this point. 369 * 370 * Called from the low level boot code only. 371 */ 372 static void 373 swap_pager_init(void *arg __unused) 374 { 375 kcollect_register(KCOLLECT_SWAPPCT, "swapuse", collect_swap_callback, 376 KCOLLECT_SCALE(KCOLLECT_SWAPPCT_FORMAT, 0)); 377 kcollect_register(KCOLLECT_SWAPANO, "swapano", NULL, 378 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, 0)); 379 kcollect_register(KCOLLECT_SWAPCAC, "swapcac", NULL, 380 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, 0)); 381 } 382 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL); 383 384 /* 385 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 386 * 387 * Expected to be started from pageout process once, prior to entering 388 * its main loop. 389 * 390 * Called from the low level boot code only. 391 */ 392 void 393 swap_pager_swap_init(void) 394 { 395 int n, n2; 396 397 /* 398 * Number of in-transit swap bp operations. Don't 399 * exhaust the pbufs completely. Make sure we 400 * initialize workable values (0 will work for hysteresis 401 * but it isn't very efficient). 402 * 403 * The nsw_cluster_max is constrained by the number of pages an XIO 404 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 405 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 406 * constrained by the swap device interleave stripe size. 407 * 408 * Currently we hardwire nsw_wcount_async to 4. This limit is 409 * designed to prevent other I/O from having high latencies due to 410 * our pageout I/O. The value 4 works well for one or two active swap 411 * devices but is probably a little low if you have more. Even so, 412 * a higher value would probably generate only a limited improvement 413 * with three or four active swap devices since the system does not 414 * typically have to pageout at extreme bandwidths. We will want 415 * at least 2 per swap devices, and 4 is a pretty good value if you 416 * have one NFS swap device due to the command/ack latency over NFS. 417 * So it all works out pretty well. 418 */ 419 420 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 421 422 nsw_rcount = (nswbuf_kva + 1) / 2; 423 nsw_wcount_sync = (nswbuf_kva + 3) / 4; 424 nsw_wcount_async = 4; 425 nsw_wcount_async_max = nsw_wcount_async; 426 427 /* 428 * The zone is dynamically allocated so generally size it to 429 * maxswzone (32MB to 256GB of KVM). Set a minimum size based 430 * on physical memory of around 8x (each swblock can hold 16 pages). 431 * 432 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio 433 * has increased dramatically. 434 */ 435 n = vmstats.v_page_count / 2; 436 if (maxswzone && n < maxswzone / sizeof(struct swblock)) 437 n = maxswzone / sizeof(struct swblock); 438 n2 = n; 439 440 do { 441 swap_zone = zinit( 442 "SWAPMETA", 443 sizeof(struct swblock), 444 n, 445 ZONE_INTERRUPT); 446 if (swap_zone != NULL) 447 break; 448 /* 449 * if the allocation failed, try a zone two thirds the 450 * size of the previous attempt. 451 */ 452 n -= ((n + 2) / 3); 453 } while (n > 0); 454 455 if (swap_zone == NULL) 456 panic("swap_pager_swap_init: swap_zone == NULL"); 457 if (n2 != n) 458 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 459 } 460 461 /* 462 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 463 * its metadata structures. 464 * 465 * This routine is called from the mmap and fork code to create a new 466 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 467 * and then converting it with swp_pager_meta_convert(). 468 * 469 * We only support unnamed objects. 470 * 471 * No restrictions. 472 */ 473 vm_object_t 474 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 475 { 476 vm_object_t object; 477 478 KKASSERT(handle == NULL); 479 object = vm_object_allocate_hold(OBJT_DEFAULT, 480 OFF_TO_IDX(offset + PAGE_MASK + size)); 481 swp_pager_meta_convert(object); 482 vm_object_drop(object); 483 484 return (object); 485 } 486 487 /* 488 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 489 * 490 * The swap backing for the object is destroyed. The code is 491 * designed such that we can reinstantiate it later, but this 492 * routine is typically called only when the entire object is 493 * about to be destroyed. 494 * 495 * The object must be locked or unreferenceable. 496 * No other requirements. 497 */ 498 static void 499 swap_pager_dealloc(vm_object_t object) 500 { 501 vm_object_hold(object); 502 vm_object_pip_wait(object, "swpdea"); 503 504 /* 505 * Free all remaining metadata. We only bother to free it from 506 * the swap meta data. We do not attempt to free swapblk's still 507 * associated with vm_page_t's for this object. We do not care 508 * if paging is still in progress on some objects. 509 */ 510 swp_pager_meta_free_all(object); 511 vm_object_drop(object); 512 } 513 514 /************************************************************************ 515 * SWAP PAGER BITMAP ROUTINES * 516 ************************************************************************/ 517 518 /* 519 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 520 * 521 * Allocate swap for the requested number of pages. The starting 522 * swap block number (a page index) is returned or SWAPBLK_NONE 523 * if the allocation failed. 524 * 525 * Also has the side effect of advising that somebody made a mistake 526 * when they configured swap and didn't configure enough. 527 * 528 * The caller must hold the object. 529 * This routine may not block. 530 */ 531 static __inline swblk_t 532 swp_pager_getswapspace(vm_object_t object, int npages) 533 { 534 swblk_t blk; 535 536 lwkt_gettoken(&vm_token); 537 blk = blist_allocat(swapblist, npages, swapiterator); 538 if (blk == SWAPBLK_NONE) 539 blk = blist_allocat(swapblist, npages, 0); 540 if (blk == SWAPBLK_NONE) { 541 if (swap_pager_full != 2) { 542 if (vm_swap_max == 0) { 543 krateprintf(&kswaprate, 544 "Warning: The system would like to " 545 "page to swap but no swap space " 546 "is configured!\n"); 547 } else { 548 krateprintf(&kswaprate, 549 "swap_pager_getswapspace: " 550 "swap full allocating %d pages\n", 551 npages); 552 } 553 swap_pager_full = 2; 554 if (swap_pager_almost_full == 0) 555 swap_fail_ticks = ticks; 556 swap_pager_almost_full = 1; 557 } 558 } else { 559 /* swapiterator = blk; disable for now, doesn't work well */ 560 swapacctspace(blk, -npages); 561 if (object->type == OBJT_SWAP) 562 vm_swap_anon_use += npages; 563 else 564 vm_swap_cache_use += npages; 565 swp_sizecheck(); 566 } 567 lwkt_reltoken(&vm_token); 568 return(blk); 569 } 570 571 /* 572 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 573 * 574 * This routine returns the specified swap blocks back to the bitmap. 575 * 576 * Note: This routine may not block (it could in the old swap code), 577 * and through the use of the new blist routines it does not block. 578 * 579 * This routine may not block. 580 */ 581 582 static __inline void 583 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages) 584 { 585 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 586 587 lwkt_gettoken(&vm_token); 588 sp->sw_nused -= npages; 589 if (object->type == OBJT_SWAP) 590 vm_swap_anon_use -= npages; 591 else 592 vm_swap_cache_use -= npages; 593 594 if (sp->sw_flags & SW_CLOSING) { 595 lwkt_reltoken(&vm_token); 596 return; 597 } 598 599 blist_free(swapblist, blk, npages); 600 vm_swap_size += npages; 601 swp_sizecheck(); 602 lwkt_reltoken(&vm_token); 603 } 604 605 /* 606 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 607 * range within an object. 608 * 609 * This is a globally accessible routine. 610 * 611 * This routine removes swapblk assignments from swap metadata. 612 * 613 * The external callers of this routine typically have already destroyed 614 * or renamed vm_page_t's associated with this range in the object so 615 * we should be ok. 616 * 617 * No requirements. 618 */ 619 void 620 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size) 621 { 622 if (object->swblock_count == 0) 623 return; 624 vm_object_hold(object); 625 swp_pager_meta_free(object, start, size); 626 vm_object_drop(object); 627 } 628 629 /* 630 * No requirements. 631 */ 632 void 633 swap_pager_freespace_all(vm_object_t object) 634 { 635 if (object->swblock_count == 0) 636 return; 637 vm_object_hold(object); 638 swp_pager_meta_free_all(object); 639 vm_object_drop(object); 640 } 641 642 /* 643 * This function conditionally frees swap cache swap starting at 644 * (*basei) in the object. (count) swap blocks will be nominally freed. 645 * The actual number of blocks freed can be more or less than the 646 * requested number. 647 * 648 * This function nominally returns the number of blocks freed. However, 649 * the actual number of blocks freed may be less then the returned value. 650 * If the function is unable to exhaust the object or if it is able to 651 * free (approximately) the requested number of blocks it returns 652 * a value n > count. 653 * 654 * If we exhaust the object we will return a value n <= count. 655 * 656 * The caller must hold the object. 657 * 658 * WARNING! If count == 0 then -1 can be returned as a degenerate case, 659 * callers should always pass a count value > 0. 660 */ 661 static int swap_pager_condfree_callback(struct swblock *swap, void *data); 662 663 int 664 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count) 665 { 666 struct swfreeinfo info; 667 int n; 668 int t; 669 670 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 671 672 info.object = object; 673 info.basei = *basei; /* skip up to this page index */ 674 info.begi = count; /* max swap pages to destroy */ 675 info.endi = count * 8; /* max swblocks to scan */ 676 677 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp, 678 swap_pager_condfree_callback, &info); 679 *basei = info.basei; 680 681 /* 682 * Take the higher difference swblocks vs pages 683 */ 684 n = count - (int)info.begi; 685 t = count * 8 - (int)info.endi; 686 if (n < t) 687 n = t; 688 if (n < 1) 689 n = 1; 690 return(n); 691 } 692 693 /* 694 * The idea is to free whole meta-block to avoid fragmenting 695 * the swap space or disk I/O. We only do this if NO VM pages 696 * are present. 697 * 698 * We do not have to deal with clearing PG_SWAPPED in related VM 699 * pages because there are no related VM pages. 700 * 701 * The caller must hold the object. 702 */ 703 static int 704 swap_pager_condfree_callback(struct swblock *swap, void *data) 705 { 706 struct swfreeinfo *info = data; 707 vm_object_t object = info->object; 708 int i; 709 710 for (i = 0; i < SWAP_META_PAGES; ++i) { 711 if (vm_page_lookup(object, swap->swb_index + i)) 712 break; 713 } 714 info->basei = swap->swb_index + SWAP_META_PAGES; 715 if (i == SWAP_META_PAGES) { 716 info->begi -= swap->swb_count; 717 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES); 718 } 719 --info->endi; 720 if ((int)info->begi < 0 || (int)info->endi < 0) 721 return(-1); 722 lwkt_yield(); 723 return(0); 724 } 725 726 /* 727 * Called by vm_page_alloc() when a new VM page is inserted 728 * into a VM object. Checks whether swap has been assigned to 729 * the page and sets PG_SWAPPED as necessary. 730 * 731 * (m) must be busied by caller and remains busied on return. 732 */ 733 void 734 swap_pager_page_inserted(vm_page_t m) 735 { 736 if (m->object->swblock_count) { 737 vm_object_hold(m->object); 738 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE) 739 vm_page_flag_set(m, PG_SWAPPED); 740 vm_object_drop(m->object); 741 } 742 } 743 744 /* 745 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 746 * 747 * Assigns swap blocks to the specified range within the object. The 748 * swap blocks are not zerod. Any previous swap assignment is destroyed. 749 * 750 * Returns 0 on success, -1 on failure. 751 * 752 * The caller is responsible for avoiding races in the specified range. 753 * No other requirements. 754 */ 755 int 756 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 757 { 758 int n = 0; 759 swblk_t blk = SWAPBLK_NONE; 760 vm_pindex_t beg = start; /* save start index */ 761 762 vm_object_hold(object); 763 764 while (size) { 765 if (n == 0) { 766 n = BLIST_MAX_ALLOC; 767 while ((blk = swp_pager_getswapspace(object, n)) == 768 SWAPBLK_NONE) 769 { 770 n >>= 1; 771 if (n == 0) { 772 swp_pager_meta_free(object, beg, 773 start - beg); 774 vm_object_drop(object); 775 return(-1); 776 } 777 } 778 } 779 swp_pager_meta_build(object, start, blk); 780 --size; 781 ++start; 782 ++blk; 783 --n; 784 } 785 swp_pager_meta_free(object, start, n); 786 vm_object_drop(object); 787 return(0); 788 } 789 790 /* 791 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 792 * and destroy the source. 793 * 794 * Copy any valid swapblks from the source to the destination. In 795 * cases where both the source and destination have a valid swapblk, 796 * we keep the destination's. 797 * 798 * This routine is allowed to block. It may block allocating metadata 799 * indirectly through swp_pager_meta_build() or if paging is still in 800 * progress on the source. 801 * 802 * XXX vm_page_collapse() kinda expects us not to block because we 803 * supposedly do not need to allocate memory, but for the moment we 804 * *may* have to get a little memory from the zone allocator, but 805 * it is taken from the interrupt memory. We should be ok. 806 * 807 * The source object contains no vm_page_t's (which is just as well) 808 * The source object is of type OBJT_SWAP. 809 * 810 * The source and destination objects must be held by the caller. 811 */ 812 void 813 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 814 vm_pindex_t base_index, int destroysource) 815 { 816 vm_pindex_t i; 817 818 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject)); 819 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject)); 820 821 /* 822 * transfer source to destination. 823 */ 824 for (i = 0; i < dstobject->size; ++i) { 825 swblk_t dstaddr; 826 827 /* 828 * Locate (without changing) the swapblk on the destination, 829 * unless it is invalid in which case free it silently, or 830 * if the destination is a resident page, in which case the 831 * source is thrown away. 832 */ 833 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 834 835 if (dstaddr == SWAPBLK_NONE) { 836 /* 837 * Destination has no swapblk and is not resident, 838 * copy source. 839 */ 840 swblk_t srcaddr; 841 842 srcaddr = swp_pager_meta_ctl(srcobject, 843 base_index + i, SWM_POP); 844 845 if (srcaddr != SWAPBLK_NONE) 846 swp_pager_meta_build(dstobject, i, srcaddr); 847 } else { 848 /* 849 * Destination has valid swapblk or it is represented 850 * by a resident page. We destroy the sourceblock. 851 */ 852 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE); 853 } 854 } 855 856 /* 857 * Free left over swap blocks in source. 858 * 859 * We have to revert the type to OBJT_DEFAULT so we do not accidently 860 * double-remove the object from the swap queues. 861 */ 862 if (destroysource) { 863 /* 864 * Reverting the type is not necessary, the caller is going 865 * to destroy srcobject directly, but I'm doing it here 866 * for consistency since we've removed the object from its 867 * queues. 868 */ 869 swp_pager_meta_free_all(srcobject); 870 if (srcobject->type == OBJT_SWAP) 871 srcobject->type = OBJT_DEFAULT; 872 } 873 } 874 875 /* 876 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 877 * the requested page. 878 * 879 * We determine whether good backing store exists for the requested 880 * page and return TRUE if it does, FALSE if it doesn't. 881 * 882 * If TRUE, we also try to determine how much valid, contiguous backing 883 * store exists before and after the requested page within a reasonable 884 * distance. We do not try to restrict it to the swap device stripe 885 * (that is handled in getpages/putpages). It probably isn't worth 886 * doing here. 887 * 888 * No requirements. 889 */ 890 boolean_t 891 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex) 892 { 893 swblk_t blk0; 894 895 /* 896 * do we have good backing store at the requested index ? 897 */ 898 vm_object_hold(object); 899 blk0 = swp_pager_meta_ctl(object, pindex, 0); 900 901 if (blk0 == SWAPBLK_NONE) { 902 vm_object_drop(object); 903 return (FALSE); 904 } 905 vm_object_drop(object); 906 return (TRUE); 907 } 908 909 /* 910 * Object must be held exclusive or shared by the caller. 911 */ 912 boolean_t 913 swap_pager_haspage_locked(vm_object_t object, vm_pindex_t pindex) 914 { 915 if (swp_pager_meta_ctl(object, pindex, 0) == SWAPBLK_NONE) 916 return FALSE; 917 return TRUE; 918 } 919 920 /* 921 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 922 * 923 * This removes any associated swap backing store, whether valid or 924 * not, from the page. This operates on any VM object, not just OBJT_SWAP 925 * objects. 926 * 927 * This routine is typically called when a page is made dirty, at 928 * which point any associated swap can be freed. MADV_FREE also 929 * calls us in a special-case situation 930 * 931 * NOTE!!! If the page is clean and the swap was valid, the caller 932 * should make the page dirty before calling this routine. 933 * This routine does NOT change the m->dirty status of the page. 934 * Also: MADV_FREE depends on it. 935 * 936 * The page must be busied. 937 * The caller can hold the object to avoid blocking, else we might block. 938 * No other requirements. 939 */ 940 void 941 swap_pager_unswapped(vm_page_t m) 942 { 943 if (m->flags & PG_SWAPPED) { 944 vm_object_hold(m->object); 945 KKASSERT(m->flags & PG_SWAPPED); 946 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 947 vm_page_flag_clear(m, PG_SWAPPED); 948 vm_object_drop(m->object); 949 } 950 } 951 952 /* 953 * SWAP_PAGER_STRATEGY() - read, write, free blocks 954 * 955 * This implements a VM OBJECT strategy function using swap backing store. 956 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP 957 * types. Only BUF_CMD_{READ,WRITE,FREEBLKS} is supported, any other 958 * requests will return EINVAL. 959 * 960 * This is intended to be a cacheless interface (i.e. caching occurs at 961 * higher levels), and is also used as a swap-based SSD cache for vnode 962 * and device objects. 963 * 964 * All I/O goes directly to and from the swap device. 965 * 966 * We currently attempt to run I/O synchronously or asynchronously as 967 * the caller requests. This isn't perfect because we loose error 968 * sequencing when we run multiple ops in parallel to satisfy a request. 969 * But this is swap, so we let it all hang out. 970 * 971 * NOTE: This function supports the KVABIO API wherein bp->b_data might 972 * not be synchronized to the current cpu. 973 * 974 * No requirements. 975 */ 976 void 977 swap_pager_strategy(vm_object_t object, struct bio *bio) 978 { 979 struct buf *bp = bio->bio_buf; 980 struct bio *nbio; 981 vm_pindex_t start; 982 vm_pindex_t biox_blkno = 0; 983 int count; 984 char *data; 985 struct bio *biox; 986 struct buf *bufx; 987 #if 0 988 struct bio_track *track; 989 #endif 990 991 #if 0 992 /* 993 * tracking for swapdev vnode I/Os 994 */ 995 if (bp->b_cmd == BUF_CMD_READ) 996 track = &swapdev_vp->v_track_read; 997 else 998 track = &swapdev_vp->v_track_write; 999 #endif 1000 1001 /* 1002 * Only supported commands 1003 */ 1004 if (bp->b_cmd != BUF_CMD_FREEBLKS && 1005 bp->b_cmd != BUF_CMD_READ && 1006 bp->b_cmd != BUF_CMD_WRITE) { 1007 bp->b_error = EINVAL; 1008 bp->b_flags |= B_ERROR | B_INVAL; 1009 biodone(bio); 1010 return; 1011 } 1012 1013 /* 1014 * bcount must be an integral number of pages. 1015 */ 1016 if (bp->b_bcount & PAGE_MASK) { 1017 bp->b_error = EINVAL; 1018 bp->b_flags |= B_ERROR | B_INVAL; 1019 biodone(bio); 1020 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 1021 "not page bounded\n", 1022 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 1023 return; 1024 } 1025 1026 /* 1027 * Clear error indication, initialize page index, count, data pointer. 1028 */ 1029 bp->b_error = 0; 1030 bp->b_flags &= ~B_ERROR; 1031 bp->b_resid = bp->b_bcount; 1032 1033 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 1034 count = howmany(bp->b_bcount, PAGE_SIZE); 1035 1036 /* 1037 * WARNING! Do not dereference *data without issuing a bkvasync() 1038 */ 1039 data = bp->b_data; 1040 1041 /* 1042 * Deal with BUF_CMD_FREEBLKS 1043 */ 1044 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 1045 /* 1046 * FREE PAGE(s) - destroy underlying swap that is no longer 1047 * needed. 1048 */ 1049 vm_object_hold(object); 1050 swp_pager_meta_free(object, start, count); 1051 vm_object_drop(object); 1052 bp->b_resid = 0; 1053 biodone(bio); 1054 return; 1055 } 1056 1057 /* 1058 * We need to be able to create a new cluster of I/O's. We cannot 1059 * use the caller fields of the passed bio so push a new one. 1060 * 1061 * Because nbio is just a placeholder for the cluster links, 1062 * we can biodone() the original bio instead of nbio to make 1063 * things a bit more efficient. 1064 */ 1065 nbio = push_bio(bio); 1066 nbio->bio_offset = bio->bio_offset; 1067 nbio->bio_caller_info1.cluster_head = NULL; 1068 nbio->bio_caller_info2.cluster_tail = NULL; 1069 1070 biox = NULL; 1071 bufx = NULL; 1072 1073 /* 1074 * Execute read or write 1075 */ 1076 vm_object_hold(object); 1077 1078 while (count > 0) { 1079 swblk_t blk; 1080 1081 /* 1082 * Obtain block. If block not found and writing, allocate a 1083 * new block and build it into the object. 1084 */ 1085 blk = swp_pager_meta_ctl(object, start, 0); 1086 if ((blk == SWAPBLK_NONE) && bp->b_cmd == BUF_CMD_WRITE) { 1087 blk = swp_pager_getswapspace(object, 1); 1088 if (blk == SWAPBLK_NONE) { 1089 bp->b_error = ENOMEM; 1090 bp->b_flags |= B_ERROR; 1091 break; 1092 } 1093 swp_pager_meta_build(object, start, blk); 1094 } 1095 1096 /* 1097 * Do we have to flush our current collection? Yes if: 1098 * 1099 * - no swap block at this index 1100 * - swap block is not contiguous 1101 * - we cross a physical disk boundry in the 1102 * stripe. 1103 */ 1104 if (biox && 1105 (biox_blkno + btoc(bufx->b_bcount) != blk || 1106 ((biox_blkno ^ blk) & ~SWB_DMMASK))) { 1107 switch(bp->b_cmd) { 1108 case BUF_CMD_READ: 1109 ++mycpu->gd_cnt.v_swapin; 1110 mycpu->gd_cnt.v_swappgsin += 1111 btoc(bufx->b_bcount); 1112 break; 1113 case BUF_CMD_WRITE: 1114 ++mycpu->gd_cnt.v_swapout; 1115 mycpu->gd_cnt.v_swappgsout += 1116 btoc(bufx->b_bcount); 1117 bufx->b_dirtyend = bufx->b_bcount; 1118 break; 1119 default: 1120 /* NOT REACHED */ 1121 break; 1122 } 1123 1124 /* 1125 * Finished with this buf. 1126 */ 1127 KKASSERT(bufx->b_bcount != 0); 1128 if (bufx->b_cmd != BUF_CMD_READ) 1129 bufx->b_dirtyend = bufx->b_bcount; 1130 biox = NULL; 1131 bufx = NULL; 1132 } 1133 1134 /* 1135 * Add new swapblk to biox, instantiating biox if necessary. 1136 * Zero-fill reads are able to take a shortcut. 1137 */ 1138 if (blk == SWAPBLK_NONE) { 1139 /* 1140 * We can only get here if we are reading. 1141 */ 1142 bkvasync(bp); 1143 bzero(data, PAGE_SIZE); 1144 bp->b_resid -= PAGE_SIZE; 1145 } else { 1146 if (biox == NULL) { 1147 /* XXX chain count > 4, wait to <= 4 */ 1148 1149 bufx = getpbuf(NULL); 1150 bufx->b_flags |= B_KVABIO; 1151 biox = &bufx->b_bio1; 1152 cluster_append(nbio, bufx); 1153 bufx->b_cmd = bp->b_cmd; 1154 biox->bio_done = swap_chain_iodone; 1155 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1156 biox->bio_caller_info1.cluster_parent = nbio; 1157 biox_blkno = blk; 1158 bufx->b_bcount = 0; 1159 bufx->b_data = data; 1160 } 1161 bufx->b_bcount += PAGE_SIZE; 1162 } 1163 --count; 1164 ++start; 1165 data += PAGE_SIZE; 1166 } 1167 1168 vm_object_drop(object); 1169 1170 /* 1171 * Flush out last buffer 1172 */ 1173 if (biox) { 1174 if (bufx->b_cmd == BUF_CMD_READ) { 1175 ++mycpu->gd_cnt.v_swapin; 1176 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1177 } else { 1178 ++mycpu->gd_cnt.v_swapout; 1179 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1180 bufx->b_dirtyend = bufx->b_bcount; 1181 } 1182 KKASSERT(bufx->b_bcount); 1183 if (bufx->b_cmd != BUF_CMD_READ) 1184 bufx->b_dirtyend = bufx->b_bcount; 1185 /* biox, bufx = NULL */ 1186 } 1187 1188 /* 1189 * Now initiate all the I/O. Be careful looping on our chain as 1190 * I/O's may complete while we are still initiating them. 1191 * 1192 * If the request is a 100% sparse read no bios will be present 1193 * and we just biodone() the buffer. 1194 */ 1195 nbio->bio_caller_info2.cluster_tail = NULL; 1196 bufx = nbio->bio_caller_info1.cluster_head; 1197 1198 if (bufx) { 1199 while (bufx) { 1200 biox = &bufx->b_bio1; 1201 BUF_KERNPROC(bufx); 1202 bufx = bufx->b_cluster_next; 1203 vn_strategy(swapdev_vp, biox); 1204 } 1205 } else { 1206 biodone(bio); 1207 } 1208 1209 /* 1210 * Completion of the cluster will also call biodone_chain(nbio). 1211 * We never call biodone(nbio) so we don't have to worry about 1212 * setting up a bio_done callback. It's handled in the sub-IO. 1213 */ 1214 /**/ 1215 } 1216 1217 /* 1218 * biodone callback 1219 * 1220 * No requirements. 1221 */ 1222 static void 1223 swap_chain_iodone(struct bio *biox) 1224 { 1225 struct buf **nextp; 1226 struct buf *bufx; /* chained sub-buffer */ 1227 struct bio *nbio; /* parent nbio with chain glue */ 1228 struct buf *bp; /* original bp associated with nbio */ 1229 int chain_empty; 1230 1231 bufx = biox->bio_buf; 1232 nbio = biox->bio_caller_info1.cluster_parent; 1233 bp = nbio->bio_buf; 1234 1235 /* 1236 * Update the original buffer 1237 */ 1238 KKASSERT(bp != NULL); 1239 if (bufx->b_flags & B_ERROR) { 1240 atomic_set_int(&bufx->b_flags, B_ERROR); 1241 bp->b_error = bufx->b_error; /* race ok */ 1242 } else if (bufx->b_resid != 0) { 1243 atomic_set_int(&bufx->b_flags, B_ERROR); 1244 bp->b_error = EINVAL; /* race ok */ 1245 } else { 1246 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1247 } 1248 1249 /* 1250 * Remove us from the chain. 1251 */ 1252 spin_lock(&swapbp_spin); 1253 nextp = &nbio->bio_caller_info1.cluster_head; 1254 while (*nextp != bufx) { 1255 KKASSERT(*nextp != NULL); 1256 nextp = &(*nextp)->b_cluster_next; 1257 } 1258 *nextp = bufx->b_cluster_next; 1259 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1260 spin_unlock(&swapbp_spin); 1261 1262 /* 1263 * Clean up bufx. If the chain is now empty we finish out 1264 * the parent. Note that we may be racing other completions 1265 * so we must use the chain_empty status from above. 1266 */ 1267 if (chain_empty) { 1268 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1269 atomic_set_int(&bp->b_flags, B_ERROR); 1270 bp->b_error = EINVAL; 1271 } 1272 biodone_chain(nbio); 1273 } 1274 relpbuf(bufx, NULL); 1275 } 1276 1277 /* 1278 * SWAP_PAGER_GETPAGES() - bring page in from swap 1279 * 1280 * The requested page may have to be brought in from swap. Calculate the 1281 * swap block and bring in additional pages if possible. All pages must 1282 * have contiguous swap block assignments and reside in the same object. 1283 * 1284 * The caller has a single vm_object_pip_add() reference prior to 1285 * calling us and we should return with the same. 1286 * 1287 * The caller has BUSY'd the page. We should return with (*mpp) left busy, 1288 * and any additinal pages unbusied. 1289 * 1290 * If the caller encounters a PG_RAM page it will pass it to us even though 1291 * it may be valid and dirty. We cannot overwrite the page in this case! 1292 * The case is used to allow us to issue pure read-aheads. 1293 * 1294 * NOTE! XXX This code does not entirely pipeline yet due to the fact that 1295 * the PG_RAM page is validated at the same time as mreq. What we 1296 * really need to do is issue a separate read-ahead pbuf. 1297 * 1298 * No requirements. 1299 */ 1300 static int 1301 swap_pager_getpage(vm_object_t object, vm_pindex_t pindex, 1302 vm_page_t *mpp, int seqaccess) 1303 { 1304 struct buf *bp; 1305 struct bio *bio; 1306 vm_page_t mreq; 1307 vm_page_t m; 1308 vm_offset_t kva; 1309 swblk_t blk; 1310 int i; 1311 int j; 1312 int raonly; 1313 int error; 1314 u_int32_t busy_count; 1315 vm_page_t marray[XIO_INTERNAL_PAGES]; 1316 1317 mreq = *mpp; 1318 1319 vm_object_hold(object); 1320 if (mreq->object != object) { 1321 panic("swap_pager_getpages: object mismatch %p/%p", 1322 object, 1323 mreq->object 1324 ); 1325 } 1326 1327 /* 1328 * We don't want to overwrite a fully valid page as it might be 1329 * dirty. This case can occur when e.g. vm_fault hits a perfectly 1330 * valid page with PG_RAM set. 1331 * 1332 * In this case we see if the next page is a suitable page-in 1333 * candidate and if it is we issue read-ahead. PG_RAM will be 1334 * set on the last page of the read-ahead to continue the pipeline. 1335 */ 1336 if (mreq->valid == VM_PAGE_BITS_ALL) { 1337 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) { 1338 vm_object_drop(object); 1339 return(VM_PAGER_OK); 1340 } 1341 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0); 1342 if (blk == SWAPBLK_NONE) { 1343 vm_object_drop(object); 1344 return(VM_PAGER_OK); 1345 } 1346 m = vm_page_lookup_busy_try(object, mreq->pindex + 1, 1347 TRUE, &error); 1348 if (error) { 1349 vm_object_drop(object); 1350 return(VM_PAGER_OK); 1351 } else if (m == NULL) { 1352 /* 1353 * Use VM_ALLOC_QUICK to avoid blocking on cache 1354 * page reuse. 1355 */ 1356 m = vm_page_alloc(object, mreq->pindex + 1, 1357 VM_ALLOC_QUICK); 1358 if (m == NULL) { 1359 vm_object_drop(object); 1360 return(VM_PAGER_OK); 1361 } 1362 } else { 1363 if (m->valid) { 1364 vm_page_wakeup(m); 1365 vm_object_drop(object); 1366 return(VM_PAGER_OK); 1367 } 1368 vm_page_unqueue_nowakeup(m); 1369 } 1370 /* page is busy */ 1371 mreq = m; 1372 raonly = 1; 1373 } else { 1374 raonly = 0; 1375 } 1376 1377 /* 1378 * Try to block-read contiguous pages from swap if sequential, 1379 * otherwise just read one page. Contiguous pages from swap must 1380 * reside within a single device stripe because the I/O cannot be 1381 * broken up across multiple stripes. 1382 * 1383 * Note that blk and iblk can be SWAPBLK_NONE but the loop is 1384 * set up such that the case(s) are handled implicitly. 1385 */ 1386 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1387 marray[0] = mreq; 1388 1389 for (i = 1; i <= swap_burst_read && 1390 i < XIO_INTERNAL_PAGES && 1391 mreq->pindex + i < object->size; ++i) { 1392 swblk_t iblk; 1393 1394 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0); 1395 if (iblk != blk + i) 1396 break; 1397 if ((blk ^ iblk) & ~SWB_DMMASK) 1398 break; 1399 m = vm_page_lookup_busy_try(object, mreq->pindex + i, 1400 TRUE, &error); 1401 if (error) { 1402 break; 1403 } else if (m == NULL) { 1404 /* 1405 * Use VM_ALLOC_QUICK to avoid blocking on cache 1406 * page reuse. 1407 */ 1408 m = vm_page_alloc(object, mreq->pindex + i, 1409 VM_ALLOC_QUICK); 1410 if (m == NULL) 1411 break; 1412 } else { 1413 if (m->valid) { 1414 vm_page_wakeup(m); 1415 break; 1416 } 1417 vm_page_unqueue_nowakeup(m); 1418 } 1419 /* page is busy */ 1420 marray[i] = m; 1421 } 1422 if (i > 1) 1423 vm_page_flag_set(marray[i - 1], PG_RAM); 1424 1425 /* 1426 * If mreq is the requested page and we have nothing to do return 1427 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead 1428 * page and must be cleaned up. 1429 */ 1430 if (blk == SWAPBLK_NONE) { 1431 KKASSERT(i == 1); 1432 if (raonly) { 1433 vnode_pager_freepage(mreq); 1434 vm_object_drop(object); 1435 return(VM_PAGER_OK); 1436 } else { 1437 vm_object_drop(object); 1438 return(VM_PAGER_FAIL); 1439 } 1440 } 1441 1442 /* 1443 * Map our page(s) into kva for input 1444 * 1445 * Use the KVABIO API to avoid synchronizing the pmap. 1446 */ 1447 bp = getpbuf_kva(&nsw_rcount); 1448 bio = &bp->b_bio1; 1449 kva = (vm_offset_t) bp->b_kvabase; 1450 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t)); 1451 pmap_qenter_noinval(kva, bp->b_xio.xio_pages, i); 1452 1453 bp->b_data = (caddr_t)kva; 1454 bp->b_bcount = PAGE_SIZE * i; 1455 bp->b_xio.xio_npages = i; 1456 bp->b_flags |= B_KVABIO; 1457 bio->bio_done = swp_pager_async_iodone; 1458 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1459 bio->bio_caller_info1.index = SWBIO_READ; 1460 1461 /* 1462 * Set index. If raonly set the index beyond the array so all 1463 * the pages are treated the same, otherwise the original mreq is 1464 * at index 0. 1465 */ 1466 if (raonly) 1467 bio->bio_driver_info = (void *)(intptr_t)i; 1468 else 1469 bio->bio_driver_info = (void *)(intptr_t)0; 1470 1471 for (j = 0; j < i; ++j) { 1472 atomic_set_int(&bp->b_xio.xio_pages[j]->busy_count, 1473 PBUSY_SWAPINPROG); 1474 } 1475 1476 mycpu->gd_cnt.v_swapin++; 1477 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1478 1479 /* 1480 * We still hold the lock on mreq, and our automatic completion routine 1481 * does not remove it. 1482 */ 1483 vm_object_pip_add(object, bp->b_xio.xio_npages); 1484 1485 /* 1486 * perform the I/O. NOTE!!! bp cannot be considered valid after 1487 * this point because we automatically release it on completion. 1488 * Instead, we look at the one page we are interested in which we 1489 * still hold a lock on even through the I/O completion. 1490 * 1491 * The other pages in our m[] array are also released on completion, 1492 * so we cannot assume they are valid anymore either. 1493 */ 1494 bp->b_cmd = BUF_CMD_READ; 1495 BUF_KERNPROC(bp); 1496 vn_strategy(swapdev_vp, bio); 1497 1498 /* 1499 * Wait for the page we want to complete. PBUSY_SWAPINPROG is always 1500 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1501 * is set in the meta-data. 1502 * 1503 * If this is a read-ahead only we return immediately without 1504 * waiting for I/O. 1505 */ 1506 if (raonly) { 1507 vm_object_drop(object); 1508 return(VM_PAGER_OK); 1509 } 1510 1511 /* 1512 * Read-ahead includes originally requested page case. 1513 */ 1514 for (;;) { 1515 busy_count = mreq->busy_count; 1516 cpu_ccfence(); 1517 if ((busy_count & PBUSY_SWAPINPROG) == 0) 1518 break; 1519 tsleep_interlock(mreq, 0); 1520 if (!atomic_cmpset_int(&mreq->busy_count, busy_count, 1521 busy_count | 1522 PBUSY_SWAPINPROG | PBUSY_WANTED)) { 1523 continue; 1524 } 1525 atomic_set_int(&mreq->flags, PG_REFERENCED); 1526 mycpu->gd_cnt.v_intrans++; 1527 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) { 1528 kprintf( 1529 "swap_pager: indefinite wait buffer: " 1530 " bp %p offset: %lld, size: %ld " 1531 " m=%p busy=%08x flags=%08x\n", 1532 bp, 1533 (long long)bio->bio_offset, 1534 (long)bp->b_bcount, 1535 mreq, mreq->busy_count, mreq->flags); 1536 } 1537 } 1538 1539 /* 1540 * Disallow speculative reads prior to the SWAPINPROG test. 1541 */ 1542 cpu_lfence(); 1543 1544 /* 1545 * mreq is left busied after completion, but all the other pages 1546 * are freed. If we had an unrecoverable read error the page will 1547 * not be valid. 1548 */ 1549 vm_object_drop(object); 1550 if (mreq->valid != VM_PAGE_BITS_ALL) 1551 return(VM_PAGER_ERROR); 1552 else 1553 return(VM_PAGER_OK); 1554 1555 /* 1556 * A final note: in a low swap situation, we cannot deallocate swap 1557 * and mark a page dirty here because the caller is likely to mark 1558 * the page clean when we return, causing the page to possibly revert 1559 * to all-zero's later. 1560 */ 1561 } 1562 1563 /* 1564 * swap_pager_putpages: 1565 * 1566 * Assign swap (if necessary) and initiate I/O on the specified pages. 1567 * 1568 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1569 * are automatically converted to SWAP objects. 1570 * 1571 * In a low memory situation we may block in vn_strategy(), but the new 1572 * vm_page reservation system coupled with properly written VFS devices 1573 * should ensure that no low-memory deadlock occurs. This is an area 1574 * which needs work. 1575 * 1576 * The parent has N vm_object_pip_add() references prior to 1577 * calling us and will remove references for rtvals[] that are 1578 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1579 * completion. 1580 * 1581 * The parent has soft-busy'd the pages it passes us and will unbusy 1582 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1583 * We need to unbusy the rest on I/O completion. 1584 * 1585 * No requirements. 1586 */ 1587 void 1588 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1589 int flags, int *rtvals) 1590 { 1591 int i; 1592 int n = 0; 1593 1594 vm_object_hold(object); 1595 1596 if (count && m[0]->object != object) { 1597 panic("swap_pager_getpages: object mismatch %p/%p", 1598 object, 1599 m[0]->object 1600 ); 1601 } 1602 1603 /* 1604 * Step 1 1605 * 1606 * Turn object into OBJT_SWAP 1607 * Check for bogus sysops 1608 * 1609 * Force sync if not pageout process, we don't want any single 1610 * non-pageout process to be able to hog the I/O subsystem! This 1611 * can be overridden by setting. 1612 */ 1613 if (object->type == OBJT_DEFAULT) { 1614 if (object->type == OBJT_DEFAULT) 1615 swp_pager_meta_convert(object); 1616 } 1617 1618 /* 1619 * Normally we force synchronous swap I/O if this is not the 1620 * pageout daemon to prevent any single user process limited 1621 * via RLIMIT_RSS from hogging swap write bandwidth. 1622 */ 1623 if (curthread != pagethread && 1624 curthread != emergpager && 1625 swap_user_async == 0) { 1626 flags |= OBJPC_SYNC; 1627 } 1628 1629 /* 1630 * Step 2 1631 * 1632 * Update nsw parameters from swap_async_max sysctl values. 1633 * Do not let the sysop crash the machine with bogus numbers. 1634 */ 1635 if (swap_async_max != nsw_wcount_async_max) { 1636 int n; 1637 1638 /* 1639 * limit range 1640 */ 1641 if ((n = swap_async_max) > nswbuf_kva / 2) 1642 n = nswbuf_kva / 2; 1643 if (n < 1) 1644 n = 1; 1645 swap_async_max = n; 1646 1647 /* 1648 * Adjust difference ( if possible ). If the current async 1649 * count is too low, we may not be able to make the adjustment 1650 * at this time. 1651 * 1652 * vm_token needed for nsw_wcount sleep interlock 1653 */ 1654 lwkt_gettoken(&vm_token); 1655 n -= nsw_wcount_async_max; 1656 if (nsw_wcount_async + n >= 0) { 1657 nsw_wcount_async_max += n; 1658 pbuf_adjcount(&nsw_wcount_async, n); 1659 } 1660 lwkt_reltoken(&vm_token); 1661 } 1662 1663 /* 1664 * Step 3 1665 * 1666 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1667 * The page is left dirty until the pageout operation completes 1668 * successfully. 1669 */ 1670 1671 for (i = 0; i < count; i += n) { 1672 struct buf *bp; 1673 struct bio *bio; 1674 swblk_t blk; 1675 int j; 1676 1677 /* 1678 * Maximum I/O size is limited by a number of factors. 1679 */ 1680 1681 n = min(BLIST_MAX_ALLOC, count - i); 1682 n = min(n, nsw_cluster_max); 1683 1684 lwkt_gettoken(&vm_token); 1685 1686 /* 1687 * Get biggest block of swap we can. If we fail, fall 1688 * back and try to allocate a smaller block. Don't go 1689 * overboard trying to allocate space if it would overly 1690 * fragment swap. 1691 */ 1692 while ( 1693 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE && 1694 n > 4 1695 ) { 1696 n >>= 1; 1697 } 1698 if (blk == SWAPBLK_NONE) { 1699 for (j = 0; j < n; ++j) 1700 rtvals[i+j] = VM_PAGER_FAIL; 1701 lwkt_reltoken(&vm_token); 1702 continue; 1703 } 1704 if (vm_report_swap_allocs > 0) { 1705 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n); 1706 --vm_report_swap_allocs; 1707 } 1708 1709 /* 1710 * The I/O we are constructing cannot cross a physical 1711 * disk boundry in the swap stripe. 1712 */ 1713 if ((blk ^ (blk + n)) & ~SWB_DMMASK) { 1714 j = ((blk + SWB_DMMAX) & ~SWB_DMMASK) - blk; 1715 swp_pager_freeswapspace(object, blk + j, n - j); 1716 n = j; 1717 } 1718 1719 /* 1720 * All I/O parameters have been satisfied, build the I/O 1721 * request and assign the swap space. 1722 * 1723 * Use the KVABIO API to avoid synchronizing the pmap. 1724 */ 1725 if ((flags & OBJPC_SYNC)) 1726 bp = getpbuf_kva(&nsw_wcount_sync); 1727 else 1728 bp = getpbuf_kva(&nsw_wcount_async); 1729 bio = &bp->b_bio1; 1730 1731 lwkt_reltoken(&vm_token); 1732 1733 pmap_qenter_noinval((vm_offset_t)bp->b_data, &m[i], n); 1734 1735 bp->b_flags |= B_KVABIO; 1736 bp->b_bcount = PAGE_SIZE * n; 1737 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1738 1739 for (j = 0; j < n; ++j) { 1740 vm_page_t mreq = m[i+j]; 1741 1742 swp_pager_meta_build(mreq->object, mreq->pindex, 1743 blk + j); 1744 if (object->type == OBJT_SWAP) 1745 vm_page_dirty(mreq); 1746 rtvals[i+j] = VM_PAGER_OK; 1747 1748 atomic_set_int(&mreq->busy_count, PBUSY_SWAPINPROG); 1749 bp->b_xio.xio_pages[j] = mreq; 1750 } 1751 bp->b_xio.xio_npages = n; 1752 1753 mycpu->gd_cnt.v_swapout++; 1754 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1755 1756 bp->b_dirtyoff = 0; /* req'd for NFS */ 1757 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1758 bp->b_cmd = BUF_CMD_WRITE; 1759 bio->bio_caller_info1.index = SWBIO_WRITE; 1760 1761 /* 1762 * asynchronous 1763 */ 1764 if ((flags & OBJPC_SYNC) == 0) { 1765 bio->bio_done = swp_pager_async_iodone; 1766 BUF_KERNPROC(bp); 1767 vn_strategy(swapdev_vp, bio); 1768 1769 for (j = 0; j < n; ++j) 1770 rtvals[i+j] = VM_PAGER_PEND; 1771 continue; 1772 } 1773 1774 /* 1775 * Issue synchrnously. 1776 * 1777 * Wait for the sync I/O to complete, then update rtvals. 1778 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1779 * our async completion routine at the end, thus avoiding a 1780 * double-free. 1781 */ 1782 bio->bio_caller_info1.index |= SWBIO_SYNC; 1783 if (flags & OBJPC_TRY_TO_CACHE) 1784 bio->bio_caller_info1.index |= SWBIO_TTC; 1785 bio->bio_done = biodone_sync; 1786 bio->bio_flags |= BIO_SYNC; 1787 vn_strategy(swapdev_vp, bio); 1788 biowait(bio, "swwrt"); 1789 1790 for (j = 0; j < n; ++j) 1791 rtvals[i+j] = VM_PAGER_PEND; 1792 1793 /* 1794 * Now that we are through with the bp, we can call the 1795 * normal async completion, which frees everything up. 1796 */ 1797 swp_pager_async_iodone(bio); 1798 } 1799 vm_object_drop(object); 1800 } 1801 1802 /* 1803 * No requirements. 1804 * 1805 * Recalculate the low and high-water marks. 1806 */ 1807 void 1808 swap_pager_newswap(void) 1809 { 1810 /* 1811 * NOTE: vm_swap_max cannot exceed 1 billion blocks, which is the 1812 * limitation imposed by the blist code. Remember that this 1813 * will be divided by NSWAP_MAX (4), so each swap device is 1814 * limited to around a terrabyte. 1815 */ 1816 if (vm_swap_max) { 1817 nswap_lowat = (int64_t)vm_swap_max * 4 / 100; /* 4% left */ 1818 nswap_hiwat = (int64_t)vm_swap_max * 6 / 100; /* 6% left */ 1819 kprintf("swap low/high-water marks set to %d/%d\n", 1820 nswap_lowat, nswap_hiwat); 1821 } else { 1822 nswap_lowat = 128; 1823 nswap_hiwat = 512; 1824 } 1825 swp_sizecheck(); 1826 } 1827 1828 /* 1829 * swp_pager_async_iodone: 1830 * 1831 * Completion routine for asynchronous reads and writes from/to swap. 1832 * Also called manually by synchronous code to finish up a bp. 1833 * 1834 * For READ operations, the pages are BUSY'd. For WRITE operations, 1835 * the pages are vm_page_t->busy'd. For READ operations, we BUSY 1836 * unbusy all pages except the 'main' request page. For WRITE 1837 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1838 * because we marked them all VM_PAGER_PEND on return from putpages ). 1839 * 1840 * This routine may not block. 1841 * 1842 * No requirements. 1843 */ 1844 static void 1845 swp_pager_async_iodone(struct bio *bio) 1846 { 1847 struct buf *bp = bio->bio_buf; 1848 vm_object_t object = NULL; 1849 int i; 1850 int *nswptr; 1851 1852 /* 1853 * report error 1854 */ 1855 if (bp->b_flags & B_ERROR) { 1856 kprintf( 1857 "swap_pager: I/O error - %s failed; offset %lld," 1858 "size %ld, error %d\n", 1859 ((bio->bio_caller_info1.index & SWBIO_READ) ? 1860 "pagein" : "pageout"), 1861 (long long)bio->bio_offset, 1862 (long)bp->b_bcount, 1863 bp->b_error 1864 ); 1865 } 1866 1867 /* 1868 * set object. 1869 */ 1870 if (bp->b_xio.xio_npages) 1871 object = bp->b_xio.xio_pages[0]->object; 1872 1873 #if 0 1874 /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */ 1875 if (bio->bio_caller_info1.index & SWBIO_WRITE) { 1876 if (bio->bio_crc != iscsi_crc32(bp->b_data, bp->b_bcount)) { 1877 kprintf("SWAPOUT: BADCRC %08x %08x\n", 1878 bio->bio_crc, 1879 iscsi_crc32(bp->b_data, bp->b_bcount)); 1880 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1881 vm_page_t m = bp->b_xio.xio_pages[i]; 1882 if ((m->flags & PG_WRITEABLE) && 1883 (pmap_mapped_sync(m) & PG_WRITEABLE)) { 1884 kprintf("SWAPOUT: " 1885 "%d/%d %p writable\n", 1886 i, bp->b_xio.xio_npages, m); 1887 } 1888 } 1889 } 1890 } 1891 #endif 1892 1893 /* 1894 * remove the mapping for kernel virtual 1895 */ 1896 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1897 1898 /* 1899 * cleanup pages. If an error occurs writing to swap, we are in 1900 * very serious trouble. If it happens to be a disk error, though, 1901 * we may be able to recover by reassigning the swap later on. So 1902 * in this case we remove the m->swapblk assignment for the page 1903 * but do not free it in the rlist. The errornous block(s) are thus 1904 * never reallocated as swap. Redirty the page and continue. 1905 */ 1906 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1907 vm_page_t m = bp->b_xio.xio_pages[i]; 1908 1909 if (bp->b_flags & B_ERROR) { 1910 /* 1911 * If an error occurs I'd love to throw the swapblk 1912 * away without freeing it back to swapspace, so it 1913 * can never be used again. But I can't from an 1914 * interrupt. 1915 */ 1916 1917 if (bio->bio_caller_info1.index & SWBIO_READ) { 1918 /* 1919 * When reading, reqpage needs to stay 1920 * locked for the parent, but all other 1921 * pages can be freed. We still want to 1922 * wakeup the parent waiting on the page, 1923 * though. ( also: pg_reqpage can be -1 and 1924 * not match anything ). 1925 * 1926 * We have to wake specifically requested pages 1927 * up too because we cleared SWAPINPROG and 1928 * someone may be waiting for that. 1929 * 1930 * NOTE: For reads, m->dirty will probably 1931 * be overridden by the original caller 1932 * of getpages so don't play cute tricks 1933 * here. 1934 * 1935 * NOTE: We can't actually free the page from 1936 * here, because this is an interrupt. 1937 * It is not legal to mess with 1938 * object->memq from an interrupt. 1939 * Deactivate the page instead. 1940 * 1941 * WARNING! The instant SWAPINPROG is 1942 * cleared another cpu may start 1943 * using the mreq page (it will 1944 * check m->valid immediately). 1945 */ 1946 1947 m->valid = 0; 1948 atomic_clear_int(&m->busy_count, 1949 PBUSY_SWAPINPROG); 1950 1951 /* 1952 * bio_driver_info holds the requested page 1953 * index. 1954 */ 1955 if (i != (int)(intptr_t)bio->bio_driver_info) { 1956 vm_page_deactivate(m); 1957 vm_page_wakeup(m); 1958 } else { 1959 vm_page_flash(m); 1960 } 1961 /* 1962 * If i == bp->b_pager.pg_reqpage, do not wake 1963 * the page up. The caller needs to. 1964 */ 1965 } else { 1966 /* 1967 * If a write error occurs remove the swap 1968 * assignment (note that PG_SWAPPED may or 1969 * may not be set depending on prior activity). 1970 * 1971 * Re-dirty OBJT_SWAP pages as there is no 1972 * other backing store, we can't throw the 1973 * page away. 1974 * 1975 * Non-OBJT_SWAP pages (aka swapcache) must 1976 * not be dirtied since they may not have 1977 * been dirty in the first place, and they 1978 * do have backing store (the vnode). 1979 */ 1980 vm_page_busy_wait(m, FALSE, "swadpg"); 1981 vm_object_hold(m->object); 1982 swp_pager_meta_ctl(m->object, m->pindex, 1983 SWM_FREE); 1984 vm_page_flag_clear(m, PG_SWAPPED); 1985 vm_object_drop(m->object); 1986 if (m->object->type == OBJT_SWAP) { 1987 vm_page_dirty(m); 1988 vm_page_activate(m); 1989 } 1990 vm_page_io_finish(m); 1991 atomic_clear_int(&m->busy_count, 1992 PBUSY_SWAPINPROG); 1993 vm_page_wakeup(m); 1994 } 1995 } else if (bio->bio_caller_info1.index & SWBIO_READ) { 1996 /* 1997 * NOTE: for reads, m->dirty will probably be 1998 * overridden by the original caller of getpages so 1999 * we cannot set them in order to free the underlying 2000 * swap in a low-swap situation. I don't think we'd 2001 * want to do that anyway, but it was an optimization 2002 * that existed in the old swapper for a time before 2003 * it got ripped out due to precisely this problem. 2004 * 2005 * If not the requested page then deactivate it. 2006 * 2007 * Note that the requested page, reqpage, is left 2008 * busied, but we still have to wake it up. The 2009 * other pages are released (unbusied) by 2010 * vm_page_wakeup(). We do not set reqpage's 2011 * valid bits here, it is up to the caller. 2012 */ 2013 2014 /* 2015 * NOTE: Can't call pmap_clear_modify(m) from an 2016 * interrupt thread, the pmap code may have to 2017 * map non-kernel pmaps and currently asserts 2018 * the case. 2019 * 2020 * WARNING! The instant SWAPINPROG is 2021 * cleared another cpu may start 2022 * using the mreq page (it will 2023 * check m->valid immediately). 2024 */ 2025 /*pmap_clear_modify(m);*/ 2026 m->valid = VM_PAGE_BITS_ALL; 2027 vm_page_undirty(m); 2028 vm_page_flag_set(m, PG_SWAPPED); 2029 atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG); 2030 2031 /* 2032 * We have to wake specifically requested pages 2033 * up too because we cleared SWAPINPROG and 2034 * could be waiting for it in getpages. However, 2035 * be sure to not unbusy getpages specifically 2036 * requested page - getpages expects it to be 2037 * left busy. 2038 * 2039 * bio_driver_info holds the requested page 2040 */ 2041 if (i != (int)(intptr_t)bio->bio_driver_info) { 2042 vm_page_deactivate(m); 2043 vm_page_wakeup(m); 2044 } else { 2045 vm_page_flash(m); 2046 } 2047 } else { 2048 /* 2049 * Mark the page clean but do not mess with the 2050 * pmap-layer's modified state. That state should 2051 * also be clear since the caller protected the 2052 * page VM_PROT_READ, but allow the case. 2053 * 2054 * We are in an interrupt, avoid pmap operations. 2055 * 2056 * If we have a severe page deficit, deactivate the 2057 * page. Do not try to cache it (which would also 2058 * involve a pmap op), because the page might still 2059 * be read-heavy. 2060 * 2061 * When using the swap to cache clean vnode pages 2062 * we do not mess with the page dirty bits. 2063 * 2064 * NOTE! Nobody is waiting for the key mreq page 2065 * on write completion. 2066 */ 2067 vm_page_busy_wait(m, FALSE, "swadpg"); 2068 if (m->object->type == OBJT_SWAP) 2069 vm_page_undirty(m); 2070 vm_page_flag_set(m, PG_SWAPPED); 2071 atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG); 2072 if (vm_paging_severe()) 2073 vm_page_deactivate(m); 2074 vm_page_io_finish(m); 2075 if (bio->bio_caller_info1.index & SWBIO_TTC) 2076 vm_page_try_to_cache(m); 2077 else 2078 vm_page_wakeup(m); 2079 } 2080 } 2081 2082 /* 2083 * adjust pip. NOTE: the original parent may still have its own 2084 * pip refs on the object. 2085 */ 2086 2087 if (object) 2088 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages); 2089 2090 /* 2091 * Release the physical I/O buffer. 2092 * 2093 * NOTE: Due to synchronous operations in the write case b_cmd may 2094 * already be set to BUF_CMD_DONE and BIO_SYNC may have already 2095 * been cleared. 2096 * 2097 * Use vm_token to interlock nsw_rcount/wcount wakeup? 2098 */ 2099 lwkt_gettoken(&vm_token); 2100 if (bio->bio_caller_info1.index & SWBIO_READ) 2101 nswptr = &nsw_rcount; 2102 else if (bio->bio_caller_info1.index & SWBIO_SYNC) 2103 nswptr = &nsw_wcount_sync; 2104 else 2105 nswptr = &nsw_wcount_async; 2106 bp->b_cmd = BUF_CMD_DONE; 2107 relpbuf(bp, nswptr); 2108 lwkt_reltoken(&vm_token); 2109 } 2110 2111 /* 2112 * Fault-in a potentially swapped page and remove the swap reference. 2113 * (used by swapoff code) 2114 * 2115 * object must be held. 2116 */ 2117 static __inline void 2118 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex) 2119 { 2120 struct vnode *vp; 2121 vm_page_t m; 2122 int error; 2123 2124 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2125 2126 if (object->type == OBJT_VNODE) { 2127 /* 2128 * Any swap related to a vnode is due to swapcache. We must 2129 * vget() the vnode in case it is not active (otherwise 2130 * vref() will panic). Calling vm_object_page_remove() will 2131 * ensure that any swap ref is removed interlocked with the 2132 * page. clean_only is set to TRUE so we don't throw away 2133 * dirty pages. 2134 */ 2135 vp = object->handle; 2136 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE); 2137 if (error == 0) { 2138 vm_object_page_remove(object, pindex, pindex + 1, TRUE); 2139 vput(vp); 2140 } 2141 } else { 2142 /* 2143 * Otherwise it is a normal OBJT_SWAP object and we can 2144 * fault the page in and remove the swap. 2145 */ 2146 m = vm_fault_object_page(object, IDX_TO_OFF(pindex), 2147 VM_PROT_NONE, 2148 VM_FAULT_DIRTY | VM_FAULT_UNSWAP, 2149 sharedp, &error); 2150 if (m) 2151 vm_page_unhold(m); 2152 } 2153 } 2154 2155 /* 2156 * This removes all swap blocks related to a particular device. We have 2157 * to be careful of ripups during the scan. 2158 */ 2159 static int swp_pager_swapoff_callback(struct swblock *swap, void *data); 2160 2161 int 2162 swap_pager_swapoff(int devidx) 2163 { 2164 struct vm_object_hash *hash; 2165 struct swswapoffinfo info; 2166 struct vm_object marker; 2167 vm_object_t object; 2168 int n; 2169 2170 bzero(&marker, sizeof(marker)); 2171 marker.type = OBJT_MARKER; 2172 2173 for (n = 0; n < VMOBJ_HSIZE; ++n) { 2174 hash = &vm_object_hash[n]; 2175 2176 lwkt_gettoken(&hash->token); 2177 TAILQ_INSERT_HEAD(&hash->list, &marker, object_entry); 2178 2179 while ((object = TAILQ_NEXT(&marker, object_entry)) != NULL) { 2180 if (object->type == OBJT_MARKER) 2181 goto skip; 2182 if (object->type != OBJT_SWAP && 2183 object->type != OBJT_VNODE) 2184 goto skip; 2185 vm_object_hold(object); 2186 if (object->type != OBJT_SWAP && 2187 object->type != OBJT_VNODE) { 2188 vm_object_drop(object); 2189 goto skip; 2190 } 2191 2192 /* 2193 * Object is special in that we can't just pagein 2194 * into vm_page's in it (tmpfs, vn). 2195 */ 2196 if ((object->flags & OBJ_NOPAGEIN) && 2197 RB_ROOT(&object->swblock_root)) { 2198 vm_object_drop(object); 2199 goto skip; 2200 } 2201 2202 info.object = object; 2203 info.shared = 0; 2204 info.devidx = devidx; 2205 swblock_rb_tree_RB_SCAN(&object->swblock_root, 2206 NULL, swp_pager_swapoff_callback, 2207 &info); 2208 vm_object_drop(object); 2209 skip: 2210 if (object == TAILQ_NEXT(&marker, object_entry)) { 2211 TAILQ_REMOVE(&hash->list, &marker, 2212 object_entry); 2213 TAILQ_INSERT_AFTER(&hash->list, object, 2214 &marker, object_entry); 2215 } 2216 } 2217 TAILQ_REMOVE(&hash->list, &marker, object_entry); 2218 lwkt_reltoken(&hash->token); 2219 } 2220 2221 /* 2222 * If we fail to locate all swblocks we just fail gracefully and 2223 * do not bother to restore paging on the swap device. If the 2224 * user wants to retry the user can retry. 2225 */ 2226 if (swdevt[devidx].sw_nused) 2227 return (1); 2228 else 2229 return (0); 2230 } 2231 2232 static 2233 int 2234 swp_pager_swapoff_callback(struct swblock *swap, void *data) 2235 { 2236 struct swswapoffinfo *info = data; 2237 vm_object_t object = info->object; 2238 vm_pindex_t index; 2239 swblk_t v; 2240 int i; 2241 2242 index = swap->swb_index; 2243 for (i = 0; i < SWAP_META_PAGES; ++i) { 2244 /* 2245 * Make sure we don't race a dying object. This will 2246 * kill the scan of the object's swap blocks entirely. 2247 */ 2248 if (object->flags & OBJ_DEAD) 2249 return(-1); 2250 2251 /* 2252 * Fault the page, which can obviously block. If the swap 2253 * structure disappears break out. 2254 */ 2255 v = swap->swb_pages[i]; 2256 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) { 2257 swp_pager_fault_page(object, &info->shared, 2258 swap->swb_index + i); 2259 /* swap ptr might go away */ 2260 if (RB_LOOKUP(swblock_rb_tree, 2261 &object->swblock_root, index) != swap) { 2262 break; 2263 } 2264 } 2265 } 2266 return(0); 2267 } 2268 2269 /************************************************************************ 2270 * SWAP META DATA * 2271 ************************************************************************ 2272 * 2273 * These routines manipulate the swap metadata stored in the 2274 * OBJT_SWAP object. 2275 * 2276 * Swap metadata is implemented with a global hash and not directly 2277 * linked into the object. Instead the object simply contains 2278 * appropriate tracking counters. 2279 */ 2280 2281 /* 2282 * Lookup the swblock containing the specified swap block index. 2283 * 2284 * The caller must hold the object. 2285 */ 2286 static __inline 2287 struct swblock * 2288 swp_pager_lookup(vm_object_t object, vm_pindex_t index) 2289 { 2290 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2291 index &= ~(vm_pindex_t)SWAP_META_MASK; 2292 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index)); 2293 } 2294 2295 /* 2296 * Remove a swblock from the RB tree. 2297 * 2298 * The caller must hold the object. 2299 */ 2300 static __inline 2301 void 2302 swp_pager_remove(vm_object_t object, struct swblock *swap) 2303 { 2304 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2305 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap); 2306 } 2307 2308 /* 2309 * Convert default object to swap object if necessary 2310 * 2311 * The caller must hold the object. 2312 */ 2313 static void 2314 swp_pager_meta_convert(vm_object_t object) 2315 { 2316 if (object->type == OBJT_DEFAULT) { 2317 object->type = OBJT_SWAP; 2318 KKASSERT(object->swblock_count == 0); 2319 } 2320 } 2321 2322 /* 2323 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 2324 * 2325 * We first convert the object to a swap object if it is a default 2326 * object. Vnode objects do not need to be converted. 2327 * 2328 * The specified swapblk is added to the object's swap metadata. If 2329 * the swapblk is not valid, it is freed instead. Any previously 2330 * assigned swapblk is freed. 2331 * 2332 * The caller must hold the object. 2333 */ 2334 static void 2335 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk) 2336 { 2337 struct swblock *swap; 2338 struct swblock *oswap; 2339 vm_pindex_t v; 2340 2341 KKASSERT(swapblk != SWAPBLK_NONE); 2342 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2343 2344 /* 2345 * Convert object if necessary 2346 */ 2347 if (object->type == OBJT_DEFAULT) 2348 swp_pager_meta_convert(object); 2349 2350 /* 2351 * Locate swblock. If not found create, but if we aren't adding 2352 * anything just return. If we run out of space in the map we wait 2353 * and, since the hash table may have changed, retry. 2354 */ 2355 retry: 2356 swap = swp_pager_lookup(object, index); 2357 2358 if (swap == NULL) { 2359 int i; 2360 2361 swap = zalloc(swap_zone); 2362 if (swap == NULL) { 2363 vm_wait(0); 2364 goto retry; 2365 } 2366 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK; 2367 swap->swb_count = 0; 2368 2369 ++object->swblock_count; 2370 2371 for (i = 0; i < SWAP_META_PAGES; ++i) 2372 swap->swb_pages[i] = SWAPBLK_NONE; 2373 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap); 2374 KKASSERT(oswap == NULL); 2375 } 2376 2377 /* 2378 * Delete prior contents of metadata. 2379 * 2380 * NOTE: Decrement swb_count after the freeing operation (which 2381 * might block) to prevent racing destruction of the swblock. 2382 */ 2383 index &= SWAP_META_MASK; 2384 2385 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) { 2386 swap->swb_pages[index] = SWAPBLK_NONE; 2387 /* can block */ 2388 swp_pager_freeswapspace(object, v, 1); 2389 --swap->swb_count; 2390 --mycpu->gd_vmtotal.t_vm; 2391 } 2392 2393 /* 2394 * Enter block into metadata 2395 */ 2396 swap->swb_pages[index] = swapblk; 2397 if (swapblk != SWAPBLK_NONE) { 2398 ++swap->swb_count; 2399 ++mycpu->gd_vmtotal.t_vm; 2400 } 2401 } 2402 2403 /* 2404 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 2405 * 2406 * The requested range of blocks is freed, with any associated swap 2407 * returned to the swap bitmap. 2408 * 2409 * This routine will free swap metadata structures as they are cleaned 2410 * out. This routine does *NOT* operate on swap metadata associated 2411 * with resident pages. 2412 * 2413 * The caller must hold the object. 2414 */ 2415 static int swp_pager_meta_free_callback(struct swblock *swb, void *data); 2416 2417 static void 2418 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) 2419 { 2420 struct swfreeinfo info; 2421 2422 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2423 2424 /* 2425 * Nothing to do 2426 */ 2427 if (object->swblock_count == 0) { 2428 KKASSERT(RB_EMPTY(&object->swblock_root)); 2429 return; 2430 } 2431 if (count == 0) 2432 return; 2433 2434 /* 2435 * Setup for RB tree scan. Note that the pindex range can be huge 2436 * due to the 64 bit page index space so we cannot safely iterate. 2437 */ 2438 info.object = object; 2439 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK; 2440 info.begi = index; 2441 info.endi = index + count - 1; 2442 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp, 2443 swp_pager_meta_free_callback, &info); 2444 } 2445 2446 /* 2447 * The caller must hold the object. 2448 */ 2449 static 2450 int 2451 swp_pager_meta_free_callback(struct swblock *swap, void *data) 2452 { 2453 struct swfreeinfo *info = data; 2454 vm_object_t object = info->object; 2455 int index; 2456 int eindex; 2457 2458 /* 2459 * Figure out the range within the swblock. The wider scan may 2460 * return edge-case swap blocks when the start and/or end points 2461 * are in the middle of a block. 2462 */ 2463 if (swap->swb_index < info->begi) 2464 index = (int)info->begi & SWAP_META_MASK; 2465 else 2466 index = 0; 2467 2468 if (swap->swb_index + SWAP_META_PAGES > info->endi) 2469 eindex = (int)info->endi & SWAP_META_MASK; 2470 else 2471 eindex = SWAP_META_MASK; 2472 2473 /* 2474 * Scan and free the blocks. The loop terminates early 2475 * if (swap) runs out of blocks and could be freed. 2476 * 2477 * NOTE: Decrement swb_count after swp_pager_freeswapspace() 2478 * to deal with a zfree race. 2479 */ 2480 while (index <= eindex) { 2481 swblk_t v = swap->swb_pages[index]; 2482 2483 if (v != SWAPBLK_NONE) { 2484 swap->swb_pages[index] = SWAPBLK_NONE; 2485 /* can block */ 2486 swp_pager_freeswapspace(object, v, 1); 2487 --mycpu->gd_vmtotal.t_vm; 2488 if (--swap->swb_count == 0) { 2489 swp_pager_remove(object, swap); 2490 zfree(swap_zone, swap); 2491 --object->swblock_count; 2492 break; 2493 } 2494 } 2495 ++index; 2496 } 2497 2498 /* swap may be invalid here due to zfree above */ 2499 lwkt_yield(); 2500 2501 return(0); 2502 } 2503 2504 /* 2505 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2506 * 2507 * This routine locates and destroys all swap metadata associated with 2508 * an object. 2509 * 2510 * NOTE: Decrement swb_count after the freeing operation (which 2511 * might block) to prevent racing destruction of the swblock. 2512 * 2513 * The caller must hold the object. 2514 */ 2515 static void 2516 swp_pager_meta_free_all(vm_object_t object) 2517 { 2518 struct swblock *swap; 2519 int i; 2520 2521 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2522 2523 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) { 2524 swp_pager_remove(object, swap); 2525 for (i = 0; i < SWAP_META_PAGES; ++i) { 2526 swblk_t v = swap->swb_pages[i]; 2527 if (v != SWAPBLK_NONE) { 2528 /* can block */ 2529 swp_pager_freeswapspace(object, v, 1); 2530 --swap->swb_count; 2531 --mycpu->gd_vmtotal.t_vm; 2532 } 2533 } 2534 if (swap->swb_count != 0) 2535 panic("swap_pager_meta_free_all: swb_count != 0"); 2536 zfree(swap_zone, swap); 2537 --object->swblock_count; 2538 lwkt_yield(); 2539 } 2540 KKASSERT(object->swblock_count == 0); 2541 } 2542 2543 /* 2544 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2545 * 2546 * This routine is capable of looking up, popping, or freeing 2547 * swapblk assignments in the swap meta data or in the vm_page_t. 2548 * The routine typically returns the swapblk being looked-up, or popped, 2549 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2550 * was invalid. This routine will automatically free any invalid 2551 * meta-data swapblks. 2552 * 2553 * It is not possible to store invalid swapblks in the swap meta data 2554 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2555 * 2556 * When acting on a busy resident page and paging is in progress, we 2557 * have to wait until paging is complete but otherwise can act on the 2558 * busy page. 2559 * 2560 * SWM_FREE remove and free swap block from metadata 2561 * SWM_POP remove from meta data but do not free.. pop it out 2562 * 2563 * The caller must hold the object. 2564 */ 2565 static swblk_t 2566 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags) 2567 { 2568 struct swblock *swap; 2569 swblk_t r1; 2570 2571 if (object->swblock_count == 0) 2572 return(SWAPBLK_NONE); 2573 2574 r1 = SWAPBLK_NONE; 2575 swap = swp_pager_lookup(object, index); 2576 2577 if (swap != NULL) { 2578 index &= SWAP_META_MASK; 2579 r1 = swap->swb_pages[index]; 2580 2581 if (r1 != SWAPBLK_NONE) { 2582 if (flags & (SWM_FREE|SWM_POP)) { 2583 swap->swb_pages[index] = SWAPBLK_NONE; 2584 --mycpu->gd_vmtotal.t_vm; 2585 if (--swap->swb_count == 0) { 2586 swp_pager_remove(object, swap); 2587 zfree(swap_zone, swap); 2588 --object->swblock_count; 2589 } 2590 } 2591 /* swap ptr may be invalid */ 2592 if (flags & SWM_FREE) { 2593 swp_pager_freeswapspace(object, r1, 1); 2594 r1 = SWAPBLK_NONE; 2595 } 2596 } 2597 /* swap ptr may be invalid */ 2598 } 2599 return(r1); 2600 } 2601