1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * Copyright (c) 1994 John S. Dyson 37 * Copyright (c) 1990 University of Utah. 38 * Copyright (c) 1991, 1993 39 * The Regents of the University of California. All rights reserved. 40 * 41 * This code is derived from software contributed to Berkeley by 42 * the Systems Programming Group of the University of Utah Computer 43 * Science Department. 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. All advertising materials mentioning features or use of this software 54 * must display the following acknowledgement: 55 * This product includes software developed by the University of 56 * California, Berkeley and its contributors. 57 * 4. Neither the name of the University nor the names of its contributors 58 * may be used to endorse or promote products derived from this software 59 * without specific prior written permission. 60 * 61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * SUCH DAMAGE. 72 * 73 * New Swap System 74 * Matthew Dillon 75 * 76 * Radix Bitmap 'blists'. 77 * 78 * - The new swapper uses the new radix bitmap code. This should scale 79 * to arbitrarily small or arbitrarily large swap spaces and an almost 80 * arbitrary degree of fragmentation. 81 * 82 * Features: 83 * 84 * - on the fly reallocation of swap during putpages. The new system 85 * does not try to keep previously allocated swap blocks for dirty 86 * pages. 87 * 88 * - on the fly deallocation of swap 89 * 90 * - No more garbage collection required. Unnecessarily allocated swap 91 * blocks only exist for dirty vm_page_t's now and these are already 92 * cycled (in a high-load system) by the pager. We also do on-the-fly 93 * removal of invalidated swap blocks when a page is destroyed 94 * or renamed. 95 * 96 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 97 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 99 */ 100 101 #include <sys/param.h> 102 #include <sys/systm.h> 103 #include <sys/conf.h> 104 #include <sys/kernel.h> 105 #include <sys/proc.h> 106 #include <sys/buf.h> 107 #include <sys/vnode.h> 108 #include <sys/malloc.h> 109 #include <sys/vmmeter.h> 110 #include <sys/sysctl.h> 111 #include <sys/blist.h> 112 #include <sys/lock.h> 113 #include <sys/thread2.h> 114 115 #ifndef MAX_PAGEOUT_CLUSTER 116 #define MAX_PAGEOUT_CLUSTER 16 117 #endif 118 119 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 120 121 #include "opt_swap.h" 122 #include <vm/vm.h> 123 #include <vm/vm_object.h> 124 #include <vm/vm_page.h> 125 #include <vm/vm_pager.h> 126 #include <vm/vm_pageout.h> 127 #include <vm/swap_pager.h> 128 #include <vm/vm_extern.h> 129 #include <vm/vm_zone.h> 130 #include <vm/vnode_pager.h> 131 132 #include <sys/buf2.h> 133 #include <vm/vm_page2.h> 134 135 #define SWM_FREE 0x02 /* free, period */ 136 #define SWM_POP 0x04 /* pop out */ 137 138 #define SWBIO_READ 0x01 139 #define SWBIO_WRITE 0x02 140 #define SWBIO_SYNC 0x04 141 142 struct swfreeinfo { 143 vm_object_t object; 144 vm_pindex_t basei; 145 vm_pindex_t begi; 146 vm_pindex_t endi; /* inclusive */ 147 }; 148 149 /* 150 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 151 * in the old system. 152 */ 153 154 int swap_pager_full; /* swap space exhaustion (task killing) */ 155 int vm_swap_cache_use; 156 int vm_swap_anon_use; 157 158 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 159 static int nsw_rcount; /* free read buffers */ 160 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 161 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 162 static int nsw_wcount_async_max;/* assigned maximum */ 163 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 164 165 struct blist *swapblist; 166 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 167 static int swap_burst_read = 0; /* allow burst reading */ 168 169 /* from vm_swap.c */ 170 extern struct vnode *swapdev_vp; 171 extern struct swdevt *swdevt; 172 extern int nswdev; 173 174 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0) 175 176 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 177 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 178 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read, 179 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins"); 180 181 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use, 182 CTLFLAG_RD, &vm_swap_cache_use, 0, ""); 183 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use, 184 CTLFLAG_RD, &vm_swap_anon_use, 0, ""); 185 SYSCTL_INT(_vm, OID_AUTO, swap_size, 186 CTLFLAG_RD, &vm_swap_size, 0, ""); 187 188 vm_zone_t swap_zone; 189 190 /* 191 * Red-Black tree for swblock entries 192 * 193 * The caller must hold vm_token 194 */ 195 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare, 196 vm_pindex_t, swb_index); 197 198 int 199 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2) 200 { 201 if (swb1->swb_index < swb2->swb_index) 202 return(-1); 203 if (swb1->swb_index > swb2->swb_index) 204 return(1); 205 return(0); 206 } 207 208 static 209 int 210 rb_swblock_scancmp(struct swblock *swb, void *data) 211 { 212 struct swfreeinfo *info = data; 213 214 if (swb->swb_index < info->basei) 215 return(-1); 216 if (swb->swb_index > info->endi) 217 return(1); 218 return(0); 219 } 220 221 static 222 int 223 rb_swblock_condcmp(struct swblock *swb, void *data) 224 { 225 struct swfreeinfo *info = data; 226 227 if (swb->swb_index < info->basei) 228 return(-1); 229 return(0); 230 } 231 232 /* 233 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 234 * calls hooked from other parts of the VM system and do not appear here. 235 * (see vm/swap_pager.h). 236 */ 237 238 static void swap_pager_dealloc (vm_object_t object); 239 static int swap_pager_getpage (vm_object_t, vm_page_t *, int); 240 static void swap_chain_iodone(struct bio *biox); 241 242 struct pagerops swappagerops = { 243 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 244 swap_pager_getpage, /* pagein */ 245 swap_pager_putpages, /* pageout */ 246 swap_pager_haspage /* get backing store status for page */ 247 }; 248 249 /* 250 * dmmax is in page-sized chunks with the new swap system. It was 251 * dev-bsized chunks in the old. dmmax is always a power of 2. 252 * 253 * swap_*() routines are externally accessible. swp_*() routines are 254 * internal. 255 */ 256 257 int dmmax; 258 static int dmmax_mask; 259 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 260 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 261 262 static __inline void swp_sizecheck (void); 263 static void swp_pager_async_iodone (struct bio *bio); 264 265 /* 266 * Swap bitmap functions 267 */ 268 269 static __inline void swp_pager_freeswapspace(vm_object_t object, 270 swblk_t blk, int npages); 271 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages); 272 273 /* 274 * Metadata functions 275 */ 276 277 static void swp_pager_meta_convert(vm_object_t); 278 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t); 279 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 280 static void swp_pager_meta_free_all(vm_object_t); 281 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 282 283 /* 284 * SWP_SIZECHECK() - update swap_pager_full indication 285 * 286 * update the swap_pager_almost_full indication and warn when we are 287 * about to run out of swap space, using lowat/hiwat hysteresis. 288 * 289 * Clear swap_pager_full ( task killing ) indication when lowat is met. 290 * 291 * No restrictions on call 292 * This routine may not block. 293 * SMP races are ok. 294 */ 295 static __inline void 296 swp_sizecheck(void) 297 { 298 if (vm_swap_size < nswap_lowat) { 299 if (swap_pager_almost_full == 0) { 300 kprintf("swap_pager: out of swap space\n"); 301 swap_pager_almost_full = 1; 302 } 303 } else { 304 swap_pager_full = 0; 305 if (vm_swap_size > nswap_hiwat) 306 swap_pager_almost_full = 0; 307 } 308 } 309 310 /* 311 * SWAP_PAGER_INIT() - initialize the swap pager! 312 * 313 * Expected to be started from system init. NOTE: This code is run 314 * before much else so be careful what you depend on. Most of the VM 315 * system has yet to be initialized at this point. 316 * 317 * Called from the low level boot code only. 318 */ 319 static void 320 swap_pager_init(void *arg __unused) 321 { 322 /* 323 * Device Stripe, in PAGE_SIZE'd blocks 324 */ 325 dmmax = SWB_NPAGES * 2; 326 dmmax_mask = ~(dmmax - 1); 327 } 328 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL) 329 330 /* 331 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 332 * 333 * Expected to be started from pageout process once, prior to entering 334 * its main loop. 335 * 336 * Called from the low level boot code only. 337 */ 338 void 339 swap_pager_swap_init(void) 340 { 341 int n, n2; 342 343 /* 344 * Number of in-transit swap bp operations. Don't 345 * exhaust the pbufs completely. Make sure we 346 * initialize workable values (0 will work for hysteresis 347 * but it isn't very efficient). 348 * 349 * The nsw_cluster_max is constrained by the number of pages an XIO 350 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 351 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 352 * constrained by the swap device interleave stripe size. 353 * 354 * Currently we hardwire nsw_wcount_async to 4. This limit is 355 * designed to prevent other I/O from having high latencies due to 356 * our pageout I/O. The value 4 works well for one or two active swap 357 * devices but is probably a little low if you have more. Even so, 358 * a higher value would probably generate only a limited improvement 359 * with three or four active swap devices since the system does not 360 * typically have to pageout at extreme bandwidths. We will want 361 * at least 2 per swap devices, and 4 is a pretty good value if you 362 * have one NFS swap device due to the command/ack latency over NFS. 363 * So it all works out pretty well. 364 */ 365 366 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 367 368 nsw_rcount = (nswbuf + 1) / 2; 369 nsw_wcount_sync = (nswbuf + 3) / 4; 370 nsw_wcount_async = 4; 371 nsw_wcount_async_max = nsw_wcount_async; 372 373 /* 374 * The zone is dynamically allocated so generally size it to 375 * maxswzone (32MB to 512MB of KVM). Set a minimum size based 376 * on physical memory of around 8x (each swblock can hold 16 pages). 377 * 378 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio 379 * has increased dramatically. 380 */ 381 n = vmstats.v_page_count / 2; 382 if (maxswzone && n < maxswzone / sizeof(struct swblock)) 383 n = maxswzone / sizeof(struct swblock); 384 n2 = n; 385 386 do { 387 swap_zone = zinit( 388 "SWAPMETA", 389 sizeof(struct swblock), 390 n, 391 ZONE_INTERRUPT, 392 1); 393 if (swap_zone != NULL) 394 break; 395 /* 396 * if the allocation failed, try a zone two thirds the 397 * size of the previous attempt. 398 */ 399 n -= ((n + 2) / 3); 400 } while (n > 0); 401 402 if (swap_zone == NULL) 403 panic("swap_pager_swap_init: swap_zone == NULL"); 404 if (n2 != n) 405 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 406 } 407 408 /* 409 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 410 * its metadata structures. 411 * 412 * This routine is called from the mmap and fork code to create a new 413 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 414 * and then converting it with swp_pager_meta_convert(). 415 * 416 * We only support unnamed objects. 417 * 418 * No restrictions. 419 */ 420 vm_object_t 421 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 422 { 423 vm_object_t object; 424 425 KKASSERT(handle == NULL); 426 object = vm_object_allocate_hold(OBJT_DEFAULT, 427 OFF_TO_IDX(offset + PAGE_MASK + size)); 428 swp_pager_meta_convert(object); 429 vm_object_drop(object); 430 431 return (object); 432 } 433 434 /* 435 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 436 * 437 * The swap backing for the object is destroyed. The code is 438 * designed such that we can reinstantiate it later, but this 439 * routine is typically called only when the entire object is 440 * about to be destroyed. 441 * 442 * The object must be locked or unreferenceable. 443 * No other requirements. 444 */ 445 static void 446 swap_pager_dealloc(vm_object_t object) 447 { 448 vm_object_hold(object); 449 vm_object_pip_wait(object, "swpdea"); 450 451 /* 452 * Free all remaining metadata. We only bother to free it from 453 * the swap meta data. We do not attempt to free swapblk's still 454 * associated with vm_page_t's for this object. We do not care 455 * if paging is still in progress on some objects. 456 */ 457 swp_pager_meta_free_all(object); 458 vm_object_drop(object); 459 } 460 461 /************************************************************************ 462 * SWAP PAGER BITMAP ROUTINES * 463 ************************************************************************/ 464 465 /* 466 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 467 * 468 * Allocate swap for the requested number of pages. The starting 469 * swap block number (a page index) is returned or SWAPBLK_NONE 470 * if the allocation failed. 471 * 472 * Also has the side effect of advising that somebody made a mistake 473 * when they configured swap and didn't configure enough. 474 * 475 * The caller must hold the object. 476 * This routine may not block. 477 */ 478 static __inline swblk_t 479 swp_pager_getswapspace(vm_object_t object, int npages) 480 { 481 swblk_t blk; 482 483 lwkt_gettoken(&vm_token); 484 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 485 if (swap_pager_full != 2) { 486 kprintf("swap_pager_getswapspace: failed alloc=%d\n", 487 npages); 488 swap_pager_full = 2; 489 swap_pager_almost_full = 1; 490 } 491 } else { 492 swapacctspace(blk, -npages); 493 if (object->type == OBJT_SWAP) 494 vm_swap_anon_use += npages; 495 else 496 vm_swap_cache_use += npages; 497 swp_sizecheck(); 498 } 499 lwkt_reltoken(&vm_token); 500 return(blk); 501 } 502 503 /* 504 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 505 * 506 * This routine returns the specified swap blocks back to the bitmap. 507 * 508 * Note: This routine may not block (it could in the old swap code), 509 * and through the use of the new blist routines it does not block. 510 * 511 * We must be called at splvm() to avoid races with bitmap frees from 512 * vm_page_remove() aka swap_pager_page_removed(). 513 * 514 * This routine may not block. 515 */ 516 517 static __inline void 518 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages) 519 { 520 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 521 522 lwkt_gettoken(&vm_token); 523 sp->sw_nused -= npages; 524 if (object->type == OBJT_SWAP) 525 vm_swap_anon_use -= npages; 526 else 527 vm_swap_cache_use -= npages; 528 529 if (sp->sw_flags & SW_CLOSING) { 530 lwkt_reltoken(&vm_token); 531 return; 532 } 533 534 blist_free(swapblist, blk, npages); 535 vm_swap_size += npages; 536 swp_sizecheck(); 537 lwkt_reltoken(&vm_token); 538 } 539 540 /* 541 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 542 * range within an object. 543 * 544 * This is a globally accessible routine. 545 * 546 * This routine removes swapblk assignments from swap metadata. 547 * 548 * The external callers of this routine typically have already destroyed 549 * or renamed vm_page_t's associated with this range in the object so 550 * we should be ok. 551 * 552 * No requirements. 553 */ 554 void 555 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size) 556 { 557 vm_object_hold(object); 558 swp_pager_meta_free(object, start, size); 559 vm_object_drop(object); 560 } 561 562 /* 563 * No requirements. 564 */ 565 void 566 swap_pager_freespace_all(vm_object_t object) 567 { 568 vm_object_hold(object); 569 swp_pager_meta_free_all(object); 570 vm_object_drop(object); 571 } 572 573 /* 574 * This function conditionally frees swap cache swap starting at 575 * (*basei) in the object. (count) swap blocks will be nominally freed. 576 * The actual number of blocks freed can be more or less than the 577 * requested number. 578 * 579 * This function nominally returns the number of blocks freed. However, 580 * the actual number of blocks freed may be less then the returned value. 581 * If the function is unable to exhaust the object or if it is able to 582 * free (approximately) the requested number of blocks it returns 583 * a value n > count. 584 * 585 * If we exhaust the object we will return a value n <= count. 586 * 587 * The caller must hold the object. 588 * 589 * WARNING! If count == 0 then -1 can be returned as a degenerate case, 590 * callers should always pass a count value > 0. 591 */ 592 static int swap_pager_condfree_callback(struct swblock *swap, void *data); 593 594 int 595 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count) 596 { 597 struct swfreeinfo info; 598 int n; 599 int t; 600 601 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 602 603 info.object = object; 604 info.basei = *basei; /* skip up to this page index */ 605 info.begi = count; /* max swap pages to destroy */ 606 info.endi = count * 8; /* max swblocks to scan */ 607 608 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp, 609 swap_pager_condfree_callback, &info); 610 *basei = info.basei; 611 612 /* 613 * Take the higher difference swblocks vs pages 614 */ 615 n = count - (int)info.begi; 616 t = count * 8 - (int)info.endi; 617 if (n < t) 618 n = t; 619 if (n < 1) 620 n = 1; 621 return(n); 622 } 623 624 /* 625 * The idea is to free whole meta-block to avoid fragmenting 626 * the swap space or disk I/O. We only do this if NO VM pages 627 * are present. 628 * 629 * We do not have to deal with clearing PG_SWAPPED in related VM 630 * pages because there are no related VM pages. 631 * 632 * The caller must hold the object. 633 */ 634 static int 635 swap_pager_condfree_callback(struct swblock *swap, void *data) 636 { 637 struct swfreeinfo *info = data; 638 vm_object_t object = info->object; 639 int i; 640 641 for (i = 0; i < SWAP_META_PAGES; ++i) { 642 if (vm_page_lookup(object, swap->swb_index + i)) 643 break; 644 } 645 info->basei = swap->swb_index + SWAP_META_PAGES; 646 if (i == SWAP_META_PAGES) { 647 info->begi -= swap->swb_count; 648 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES); 649 } 650 --info->endi; 651 if ((int)info->begi < 0 || (int)info->endi < 0) 652 return(-1); 653 lwkt_yield(); 654 return(0); 655 } 656 657 /* 658 * Called by vm_page_alloc() when a new VM page is inserted 659 * into a VM object. Checks whether swap has been assigned to 660 * the page and sets PG_SWAPPED as necessary. 661 * 662 * No requirements. 663 */ 664 void 665 swap_pager_page_inserted(vm_page_t m) 666 { 667 if (m->object->swblock_count) { 668 vm_object_hold(m->object); 669 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE) 670 vm_page_flag_set(m, PG_SWAPPED); 671 vm_object_drop(m->object); 672 } 673 } 674 675 /* 676 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 677 * 678 * Assigns swap blocks to the specified range within the object. The 679 * swap blocks are not zerod. Any previous swap assignment is destroyed. 680 * 681 * Returns 0 on success, -1 on failure. 682 * 683 * The caller is responsible for avoiding races in the specified range. 684 * No other requirements. 685 */ 686 int 687 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 688 { 689 int n = 0; 690 swblk_t blk = SWAPBLK_NONE; 691 vm_pindex_t beg = start; /* save start index */ 692 693 vm_object_hold(object); 694 695 while (size) { 696 if (n == 0) { 697 n = BLIST_MAX_ALLOC; 698 while ((blk = swp_pager_getswapspace(object, n)) == 699 SWAPBLK_NONE) 700 { 701 n >>= 1; 702 if (n == 0) { 703 swp_pager_meta_free(object, beg, 704 start - beg); 705 vm_object_drop(object); 706 return(-1); 707 } 708 } 709 } 710 swp_pager_meta_build(object, start, blk); 711 --size; 712 ++start; 713 ++blk; 714 --n; 715 } 716 swp_pager_meta_free(object, start, n); 717 vm_object_drop(object); 718 return(0); 719 } 720 721 /* 722 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 723 * and destroy the source. 724 * 725 * Copy any valid swapblks from the source to the destination. In 726 * cases where both the source and destination have a valid swapblk, 727 * we keep the destination's. 728 * 729 * This routine is allowed to block. It may block allocating metadata 730 * indirectly through swp_pager_meta_build() or if paging is still in 731 * progress on the source. 732 * 733 * XXX vm_page_collapse() kinda expects us not to block because we 734 * supposedly do not need to allocate memory, but for the moment we 735 * *may* have to get a little memory from the zone allocator, but 736 * it is taken from the interrupt memory. We should be ok. 737 * 738 * The source object contains no vm_page_t's (which is just as well) 739 * The source object is of type OBJT_SWAP. 740 * 741 * The source and destination objects must be held by the caller. 742 */ 743 void 744 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 745 vm_pindex_t base_index, int destroysource) 746 { 747 vm_pindex_t i; 748 749 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject)); 750 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject)); 751 752 /* 753 * transfer source to destination. 754 */ 755 for (i = 0; i < dstobject->size; ++i) { 756 swblk_t dstaddr; 757 758 /* 759 * Locate (without changing) the swapblk on the destination, 760 * unless it is invalid in which case free it silently, or 761 * if the destination is a resident page, in which case the 762 * source is thrown away. 763 */ 764 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 765 766 if (dstaddr == SWAPBLK_NONE) { 767 /* 768 * Destination has no swapblk and is not resident, 769 * copy source. 770 */ 771 swblk_t srcaddr; 772 773 srcaddr = swp_pager_meta_ctl(srcobject, 774 base_index + i, SWM_POP); 775 776 if (srcaddr != SWAPBLK_NONE) 777 swp_pager_meta_build(dstobject, i, srcaddr); 778 } else { 779 /* 780 * Destination has valid swapblk or it is represented 781 * by a resident page. We destroy the sourceblock. 782 */ 783 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE); 784 } 785 } 786 787 /* 788 * Free left over swap blocks in source. 789 * 790 * We have to revert the type to OBJT_DEFAULT so we do not accidently 791 * double-remove the object from the swap queues. 792 */ 793 if (destroysource) { 794 /* 795 * Reverting the type is not necessary, the caller is going 796 * to destroy srcobject directly, but I'm doing it here 797 * for consistency since we've removed the object from its 798 * queues. 799 */ 800 swp_pager_meta_free_all(srcobject); 801 if (srcobject->type == OBJT_SWAP) 802 srcobject->type = OBJT_DEFAULT; 803 } 804 } 805 806 /* 807 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 808 * the requested page. 809 * 810 * We determine whether good backing store exists for the requested 811 * page and return TRUE if it does, FALSE if it doesn't. 812 * 813 * If TRUE, we also try to determine how much valid, contiguous backing 814 * store exists before and after the requested page within a reasonable 815 * distance. We do not try to restrict it to the swap device stripe 816 * (that is handled in getpages/putpages). It probably isn't worth 817 * doing here. 818 * 819 * No requirements. 820 */ 821 boolean_t 822 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex) 823 { 824 swblk_t blk0; 825 826 /* 827 * do we have good backing store at the requested index ? 828 */ 829 vm_object_hold(object); 830 blk0 = swp_pager_meta_ctl(object, pindex, 0); 831 832 if (blk0 == SWAPBLK_NONE) { 833 vm_object_drop(object); 834 return (FALSE); 835 } 836 vm_object_drop(object); 837 return (TRUE); 838 } 839 840 /* 841 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 842 * 843 * This removes any associated swap backing store, whether valid or 844 * not, from the page. This operates on any VM object, not just OBJT_SWAP 845 * objects. 846 * 847 * This routine is typically called when a page is made dirty, at 848 * which point any associated swap can be freed. MADV_FREE also 849 * calls us in a special-case situation 850 * 851 * NOTE!!! If the page is clean and the swap was valid, the caller 852 * should make the page dirty before calling this routine. This routine 853 * does NOT change the m->dirty status of the page. Also: MADV_FREE 854 * depends on it. 855 * 856 * The page must be busied or soft-busied. 857 * The caller can hold the object to avoid blocking, else we might block. 858 * No other requirements. 859 */ 860 void 861 swap_pager_unswapped(vm_page_t m) 862 { 863 if (m->flags & PG_SWAPPED) { 864 vm_object_hold(m->object); 865 KKASSERT(m->flags & PG_SWAPPED); 866 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 867 vm_page_flag_clear(m, PG_SWAPPED); 868 vm_object_drop(m->object); 869 } 870 } 871 872 /* 873 * SWAP_PAGER_STRATEGY() - read, write, free blocks 874 * 875 * This implements a VM OBJECT strategy function using swap backing store. 876 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP 877 * types. 878 * 879 * This is intended to be a cacheless interface (i.e. caching occurs at 880 * higher levels), and is also used as a swap-based SSD cache for vnode 881 * and device objects. 882 * 883 * All I/O goes directly to and from the swap device. 884 * 885 * We currently attempt to run I/O synchronously or asynchronously as 886 * the caller requests. This isn't perfect because we loose error 887 * sequencing when we run multiple ops in parallel to satisfy a request. 888 * But this is swap, so we let it all hang out. 889 * 890 * No requirements. 891 */ 892 void 893 swap_pager_strategy(vm_object_t object, struct bio *bio) 894 { 895 struct buf *bp = bio->bio_buf; 896 struct bio *nbio; 897 vm_pindex_t start; 898 vm_pindex_t biox_blkno = 0; 899 int count; 900 char *data; 901 struct bio *biox; 902 struct buf *bufx; 903 struct bio_track *track; 904 905 /* 906 * tracking for swapdev vnode I/Os 907 */ 908 if (bp->b_cmd == BUF_CMD_READ) 909 track = &swapdev_vp->v_track_read; 910 else 911 track = &swapdev_vp->v_track_write; 912 913 if (bp->b_bcount & PAGE_MASK) { 914 bp->b_error = EINVAL; 915 bp->b_flags |= B_ERROR | B_INVAL; 916 biodone(bio); 917 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 918 "not page bounded\n", 919 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 920 return; 921 } 922 923 /* 924 * Clear error indication, initialize page index, count, data pointer. 925 */ 926 bp->b_error = 0; 927 bp->b_flags &= ~B_ERROR; 928 bp->b_resid = bp->b_bcount; 929 930 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 931 count = howmany(bp->b_bcount, PAGE_SIZE); 932 data = bp->b_data; 933 934 /* 935 * Deal with BUF_CMD_FREEBLKS 936 */ 937 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 938 /* 939 * FREE PAGE(s) - destroy underlying swap that is no longer 940 * needed. 941 */ 942 vm_object_hold(object); 943 swp_pager_meta_free(object, start, count); 944 vm_object_drop(object); 945 bp->b_resid = 0; 946 biodone(bio); 947 return; 948 } 949 950 /* 951 * We need to be able to create a new cluster of I/O's. We cannot 952 * use the caller fields of the passed bio so push a new one. 953 * 954 * Because nbio is just a placeholder for the cluster links, 955 * we can biodone() the original bio instead of nbio to make 956 * things a bit more efficient. 957 */ 958 nbio = push_bio(bio); 959 nbio->bio_offset = bio->bio_offset; 960 nbio->bio_caller_info1.cluster_head = NULL; 961 nbio->bio_caller_info2.cluster_tail = NULL; 962 963 biox = NULL; 964 bufx = NULL; 965 966 /* 967 * Execute read or write 968 */ 969 vm_object_hold(object); 970 971 while (count > 0) { 972 swblk_t blk; 973 974 /* 975 * Obtain block. If block not found and writing, allocate a 976 * new block and build it into the object. 977 */ 978 blk = swp_pager_meta_ctl(object, start, 0); 979 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) { 980 blk = swp_pager_getswapspace(object, 1); 981 if (blk == SWAPBLK_NONE) { 982 bp->b_error = ENOMEM; 983 bp->b_flags |= B_ERROR; 984 break; 985 } 986 swp_pager_meta_build(object, start, blk); 987 } 988 989 /* 990 * Do we have to flush our current collection? Yes if: 991 * 992 * - no swap block at this index 993 * - swap block is not contiguous 994 * - we cross a physical disk boundry in the 995 * stripe. 996 */ 997 if ( 998 biox && (biox_blkno + btoc(bufx->b_bcount) != blk || 999 ((biox_blkno ^ blk) & dmmax_mask) 1000 ) 1001 ) { 1002 if (bp->b_cmd == BUF_CMD_READ) { 1003 ++mycpu->gd_cnt.v_swapin; 1004 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1005 } else { 1006 ++mycpu->gd_cnt.v_swapout; 1007 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1008 bufx->b_dirtyend = bufx->b_bcount; 1009 } 1010 1011 /* 1012 * Finished with this buf. 1013 */ 1014 KKASSERT(bufx->b_bcount != 0); 1015 if (bufx->b_cmd != BUF_CMD_READ) 1016 bufx->b_dirtyend = bufx->b_bcount; 1017 biox = NULL; 1018 bufx = NULL; 1019 } 1020 1021 /* 1022 * Add new swapblk to biox, instantiating biox if necessary. 1023 * Zero-fill reads are able to take a shortcut. 1024 */ 1025 if (blk == SWAPBLK_NONE) { 1026 /* 1027 * We can only get here if we are reading. Since 1028 * we are at splvm() we can safely modify b_resid, 1029 * even if chain ops are in progress. 1030 */ 1031 bzero(data, PAGE_SIZE); 1032 bp->b_resid -= PAGE_SIZE; 1033 } else { 1034 if (biox == NULL) { 1035 /* XXX chain count > 4, wait to <= 4 */ 1036 1037 bufx = getpbuf(NULL); 1038 biox = &bufx->b_bio1; 1039 cluster_append(nbio, bufx); 1040 bufx->b_flags |= (bufx->b_flags & B_ORDERED); 1041 bufx->b_cmd = bp->b_cmd; 1042 biox->bio_done = swap_chain_iodone; 1043 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1044 biox->bio_caller_info1.cluster_parent = nbio; 1045 biox_blkno = blk; 1046 bufx->b_bcount = 0; 1047 bufx->b_data = data; 1048 } 1049 bufx->b_bcount += PAGE_SIZE; 1050 } 1051 --count; 1052 ++start; 1053 data += PAGE_SIZE; 1054 } 1055 1056 vm_object_drop(object); 1057 1058 /* 1059 * Flush out last buffer 1060 */ 1061 if (biox) { 1062 if (bufx->b_cmd == BUF_CMD_READ) { 1063 ++mycpu->gd_cnt.v_swapin; 1064 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1065 } else { 1066 ++mycpu->gd_cnt.v_swapout; 1067 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1068 bufx->b_dirtyend = bufx->b_bcount; 1069 } 1070 KKASSERT(bufx->b_bcount); 1071 if (bufx->b_cmd != BUF_CMD_READ) 1072 bufx->b_dirtyend = bufx->b_bcount; 1073 /* biox, bufx = NULL */ 1074 } 1075 1076 /* 1077 * Now initiate all the I/O. Be careful looping on our chain as 1078 * I/O's may complete while we are still initiating them. 1079 * 1080 * If the request is a 100% sparse read no bios will be present 1081 * and we just biodone() the buffer. 1082 */ 1083 nbio->bio_caller_info2.cluster_tail = NULL; 1084 bufx = nbio->bio_caller_info1.cluster_head; 1085 1086 if (bufx) { 1087 while (bufx) { 1088 biox = &bufx->b_bio1; 1089 BUF_KERNPROC(bufx); 1090 bufx = bufx->b_cluster_next; 1091 vn_strategy(swapdev_vp, biox); 1092 } 1093 } else { 1094 biodone(bio); 1095 } 1096 1097 /* 1098 * Completion of the cluster will also call biodone_chain(nbio). 1099 * We never call biodone(nbio) so we don't have to worry about 1100 * setting up a bio_done callback. It's handled in the sub-IO. 1101 */ 1102 /**/ 1103 } 1104 1105 /* 1106 * biodone callback 1107 * 1108 * No requirements. 1109 */ 1110 static void 1111 swap_chain_iodone(struct bio *biox) 1112 { 1113 struct buf **nextp; 1114 struct buf *bufx; /* chained sub-buffer */ 1115 struct bio *nbio; /* parent nbio with chain glue */ 1116 struct buf *bp; /* original bp associated with nbio */ 1117 int chain_empty; 1118 1119 bufx = biox->bio_buf; 1120 nbio = biox->bio_caller_info1.cluster_parent; 1121 bp = nbio->bio_buf; 1122 1123 /* 1124 * Update the original buffer 1125 */ 1126 KKASSERT(bp != NULL); 1127 if (bufx->b_flags & B_ERROR) { 1128 atomic_set_int(&bufx->b_flags, B_ERROR); 1129 bp->b_error = bufx->b_error; /* race ok */ 1130 } else if (bufx->b_resid != 0) { 1131 atomic_set_int(&bufx->b_flags, B_ERROR); 1132 bp->b_error = EINVAL; /* race ok */ 1133 } else { 1134 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1135 } 1136 1137 /* 1138 * Remove us from the chain. 1139 */ 1140 spin_lock(&bp->b_lock.lk_spinlock); 1141 nextp = &nbio->bio_caller_info1.cluster_head; 1142 while (*nextp != bufx) { 1143 KKASSERT(*nextp != NULL); 1144 nextp = &(*nextp)->b_cluster_next; 1145 } 1146 *nextp = bufx->b_cluster_next; 1147 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1148 spin_unlock(&bp->b_lock.lk_spinlock); 1149 1150 /* 1151 * Clean up bufx. If the chain is now empty we finish out 1152 * the parent. Note that we may be racing other completions 1153 * so we must use the chain_empty status from above. 1154 */ 1155 if (chain_empty) { 1156 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1157 atomic_set_int(&bp->b_flags, B_ERROR); 1158 bp->b_error = EINVAL; 1159 } 1160 biodone_chain(nbio); 1161 } 1162 relpbuf(bufx, NULL); 1163 } 1164 1165 /* 1166 * SWAP_PAGER_GETPAGES() - bring page in from swap 1167 * 1168 * The requested page may have to be brought in from swap. Calculate the 1169 * swap block and bring in additional pages if possible. All pages must 1170 * have contiguous swap block assignments and reside in the same object. 1171 * 1172 * The caller has a single vm_object_pip_add() reference prior to 1173 * calling us and we should return with the same. 1174 * 1175 * The caller has BUSY'd the page. We should return with (*mpp) left busy, 1176 * and any additinal pages unbusied. 1177 * 1178 * If the caller encounters a PG_RAM page it will pass it to us even though 1179 * it may be valid and dirty. We cannot overwrite the page in this case! 1180 * The case is used to allow us to issue pure read-aheads. 1181 * 1182 * NOTE! XXX This code does not entirely pipeline yet due to the fact that 1183 * the PG_RAM page is validated at the same time as mreq. What we 1184 * really need to do is issue a separate read-ahead pbuf. 1185 * 1186 * No requirements. 1187 */ 1188 static int 1189 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 1190 { 1191 struct buf *bp; 1192 struct bio *bio; 1193 vm_page_t mreq; 1194 vm_page_t m; 1195 vm_offset_t kva; 1196 swblk_t blk; 1197 int i; 1198 int j; 1199 int raonly; 1200 int error; 1201 u_int32_t flags; 1202 vm_page_t marray[XIO_INTERNAL_PAGES]; 1203 1204 mreq = *mpp; 1205 1206 vm_object_hold(object); 1207 if (mreq->object != object) { 1208 panic("swap_pager_getpages: object mismatch %p/%p", 1209 object, 1210 mreq->object 1211 ); 1212 } 1213 1214 /* 1215 * We don't want to overwrite a fully valid page as it might be 1216 * dirty. This case can occur when e.g. vm_fault hits a perfectly 1217 * valid page with PG_RAM set. 1218 * 1219 * In this case we see if the next page is a suitable page-in 1220 * candidate and if it is we issue read-ahead. PG_RAM will be 1221 * set on the last page of the read-ahead to continue the pipeline. 1222 */ 1223 if (mreq->valid == VM_PAGE_BITS_ALL) { 1224 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) { 1225 vm_object_drop(object); 1226 return(VM_PAGER_OK); 1227 } 1228 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0); 1229 if (blk == SWAPBLK_NONE) { 1230 vm_object_drop(object); 1231 return(VM_PAGER_OK); 1232 } 1233 m = vm_page_lookup_busy_try(object, mreq->pindex + 1, 1234 TRUE, &error); 1235 if (error) { 1236 vm_object_drop(object); 1237 return(VM_PAGER_OK); 1238 } else if (m == NULL) { 1239 /* 1240 * Use VM_ALLOC_QUICK to avoid blocking on cache 1241 * page reuse. 1242 */ 1243 m = vm_page_alloc(object, mreq->pindex + 1, 1244 VM_ALLOC_QUICK); 1245 if (m == NULL) { 1246 vm_object_drop(object); 1247 return(VM_PAGER_OK); 1248 } 1249 } else { 1250 if (m->valid) { 1251 vm_page_wakeup(m); 1252 vm_object_drop(object); 1253 return(VM_PAGER_OK); 1254 } 1255 vm_page_unqueue_nowakeup(m); 1256 } 1257 /* page is busy */ 1258 mreq = m; 1259 raonly = 1; 1260 } else { 1261 raonly = 0; 1262 } 1263 1264 /* 1265 * Try to block-read contiguous pages from swap if sequential, 1266 * otherwise just read one page. Contiguous pages from swap must 1267 * reside within a single device stripe because the I/O cannot be 1268 * broken up across multiple stripes. 1269 * 1270 * Note that blk and iblk can be SWAPBLK_NONE but the loop is 1271 * set up such that the case(s) are handled implicitly. 1272 */ 1273 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1274 marray[0] = mreq; 1275 1276 for (i = 1; swap_burst_read && 1277 i < XIO_INTERNAL_PAGES && 1278 mreq->pindex + i < object->size; ++i) { 1279 swblk_t iblk; 1280 1281 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0); 1282 if (iblk != blk + i) 1283 break; 1284 if ((blk ^ iblk) & dmmax_mask) 1285 break; 1286 m = vm_page_lookup_busy_try(object, mreq->pindex + i, 1287 TRUE, &error); 1288 if (error) { 1289 break; 1290 } else if (m == NULL) { 1291 /* 1292 * Use VM_ALLOC_QUICK to avoid blocking on cache 1293 * page reuse. 1294 */ 1295 m = vm_page_alloc(object, mreq->pindex + i, 1296 VM_ALLOC_QUICK); 1297 if (m == NULL) 1298 break; 1299 } else { 1300 if (m->valid) { 1301 vm_page_wakeup(m); 1302 break; 1303 } 1304 vm_page_unqueue_nowakeup(m); 1305 } 1306 /* page is busy */ 1307 marray[i] = m; 1308 } 1309 if (i > 1) 1310 vm_page_flag_set(marray[i - 1], PG_RAM); 1311 1312 /* 1313 * If mreq is the requested page and we have nothing to do return 1314 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead 1315 * page and must be cleaned up. 1316 */ 1317 if (blk == SWAPBLK_NONE) { 1318 KKASSERT(i == 1); 1319 if (raonly) { 1320 vnode_pager_freepage(mreq); 1321 vm_object_drop(object); 1322 return(VM_PAGER_OK); 1323 } else { 1324 vm_object_drop(object); 1325 return(VM_PAGER_FAIL); 1326 } 1327 } 1328 1329 /* 1330 * map our page(s) into kva for input 1331 */ 1332 bp = getpbuf_kva(&nsw_rcount); 1333 bio = &bp->b_bio1; 1334 kva = (vm_offset_t) bp->b_kvabase; 1335 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t)); 1336 pmap_qenter(kva, bp->b_xio.xio_pages, i); 1337 1338 bp->b_data = (caddr_t)kva; 1339 bp->b_bcount = PAGE_SIZE * i; 1340 bp->b_xio.xio_npages = i; 1341 bio->bio_done = swp_pager_async_iodone; 1342 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1343 bio->bio_caller_info1.index = SWBIO_READ; 1344 1345 /* 1346 * Set index. If raonly set the index beyond the array so all 1347 * the pages are treated the same, otherwise the original mreq is 1348 * at index 0. 1349 */ 1350 if (raonly) 1351 bio->bio_driver_info = (void *)(intptr_t)i; 1352 else 1353 bio->bio_driver_info = (void *)(intptr_t)0; 1354 1355 for (j = 0; j < i; ++j) 1356 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG); 1357 1358 mycpu->gd_cnt.v_swapin++; 1359 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1360 1361 /* 1362 * We still hold the lock on mreq, and our automatic completion routine 1363 * does not remove it. 1364 */ 1365 vm_object_pip_add(object, bp->b_xio.xio_npages); 1366 1367 /* 1368 * perform the I/O. NOTE!!! bp cannot be considered valid after 1369 * this point because we automatically release it on completion. 1370 * Instead, we look at the one page we are interested in which we 1371 * still hold a lock on even through the I/O completion. 1372 * 1373 * The other pages in our m[] array are also released on completion, 1374 * so we cannot assume they are valid anymore either. 1375 */ 1376 bp->b_cmd = BUF_CMD_READ; 1377 BUF_KERNPROC(bp); 1378 vn_strategy(swapdev_vp, bio); 1379 1380 /* 1381 * Wait for the page we want to complete. PG_SWAPINPROG is always 1382 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1383 * is set in the meta-data. 1384 * 1385 * If this is a read-ahead only we return immediately without 1386 * waiting for I/O. 1387 */ 1388 if (raonly) { 1389 vm_object_drop(object); 1390 return(VM_PAGER_OK); 1391 } 1392 1393 /* 1394 * Read-ahead includes originally requested page case. 1395 */ 1396 for (;;) { 1397 flags = mreq->flags; 1398 cpu_ccfence(); 1399 if ((flags & PG_SWAPINPROG) == 0) 1400 break; 1401 tsleep_interlock(mreq, 0); 1402 if (!atomic_cmpset_int(&mreq->flags, flags, 1403 flags | PG_WANTED | PG_REFERENCED)) { 1404 continue; 1405 } 1406 mycpu->gd_cnt.v_intrans++; 1407 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) { 1408 kprintf( 1409 "swap_pager: indefinite wait buffer: " 1410 " offset: %lld, size: %ld\n", 1411 (long long)bio->bio_offset, 1412 (long)bp->b_bcount 1413 ); 1414 } 1415 } 1416 1417 /* 1418 * mreq is left bussied after completion, but all the other pages 1419 * are freed. If we had an unrecoverable read error the page will 1420 * not be valid. 1421 */ 1422 vm_object_drop(object); 1423 if (mreq->valid != VM_PAGE_BITS_ALL) 1424 return(VM_PAGER_ERROR); 1425 else 1426 return(VM_PAGER_OK); 1427 1428 /* 1429 * A final note: in a low swap situation, we cannot deallocate swap 1430 * and mark a page dirty here because the caller is likely to mark 1431 * the page clean when we return, causing the page to possibly revert 1432 * to all-zero's later. 1433 */ 1434 } 1435 1436 /* 1437 * swap_pager_putpages: 1438 * 1439 * Assign swap (if necessary) and initiate I/O on the specified pages. 1440 * 1441 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1442 * are automatically converted to SWAP objects. 1443 * 1444 * In a low memory situation we may block in vn_strategy(), but the new 1445 * vm_page reservation system coupled with properly written VFS devices 1446 * should ensure that no low-memory deadlock occurs. This is an area 1447 * which needs work. 1448 * 1449 * The parent has N vm_object_pip_add() references prior to 1450 * calling us and will remove references for rtvals[] that are 1451 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1452 * completion. 1453 * 1454 * The parent has soft-busy'd the pages it passes us and will unbusy 1455 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1456 * We need to unbusy the rest on I/O completion. 1457 * 1458 * No requirements. 1459 */ 1460 void 1461 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1462 boolean_t sync, int *rtvals) 1463 { 1464 int i; 1465 int n = 0; 1466 1467 vm_object_hold(object); 1468 1469 if (count && m[0]->object != object) { 1470 panic("swap_pager_getpages: object mismatch %p/%p", 1471 object, 1472 m[0]->object 1473 ); 1474 } 1475 1476 /* 1477 * Step 1 1478 * 1479 * Turn object into OBJT_SWAP 1480 * check for bogus sysops 1481 * force sync if not pageout process 1482 */ 1483 if (object->type == OBJT_DEFAULT) { 1484 if (object->type == OBJT_DEFAULT) 1485 swp_pager_meta_convert(object); 1486 } 1487 1488 if (curthread != pagethread) 1489 sync = TRUE; 1490 1491 /* 1492 * Step 2 1493 * 1494 * Update nsw parameters from swap_async_max sysctl values. 1495 * Do not let the sysop crash the machine with bogus numbers. 1496 */ 1497 1498 if (swap_async_max != nsw_wcount_async_max) { 1499 int n; 1500 1501 /* 1502 * limit range 1503 */ 1504 if ((n = swap_async_max) > nswbuf / 2) 1505 n = nswbuf / 2; 1506 if (n < 1) 1507 n = 1; 1508 swap_async_max = n; 1509 1510 /* 1511 * Adjust difference ( if possible ). If the current async 1512 * count is too low, we may not be able to make the adjustment 1513 * at this time. 1514 * 1515 * vm_token needed for nsw_wcount sleep interlock 1516 */ 1517 lwkt_gettoken(&vm_token); 1518 n -= nsw_wcount_async_max; 1519 if (nsw_wcount_async + n >= 0) { 1520 nsw_wcount_async += n; 1521 nsw_wcount_async_max += n; 1522 wakeup(&nsw_wcount_async); 1523 } 1524 lwkt_reltoken(&vm_token); 1525 } 1526 1527 /* 1528 * Step 3 1529 * 1530 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1531 * The page is left dirty until the pageout operation completes 1532 * successfully. 1533 */ 1534 1535 for (i = 0; i < count; i += n) { 1536 struct buf *bp; 1537 struct bio *bio; 1538 swblk_t blk; 1539 int j; 1540 1541 /* 1542 * Maximum I/O size is limited by a number of factors. 1543 */ 1544 1545 n = min(BLIST_MAX_ALLOC, count - i); 1546 n = min(n, nsw_cluster_max); 1547 1548 lwkt_gettoken(&vm_token); 1549 1550 /* 1551 * Get biggest block of swap we can. If we fail, fall 1552 * back and try to allocate a smaller block. Don't go 1553 * overboard trying to allocate space if it would overly 1554 * fragment swap. 1555 */ 1556 while ( 1557 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE && 1558 n > 4 1559 ) { 1560 n >>= 1; 1561 } 1562 if (blk == SWAPBLK_NONE) { 1563 for (j = 0; j < n; ++j) 1564 rtvals[i+j] = VM_PAGER_FAIL; 1565 lwkt_reltoken(&vm_token); 1566 continue; 1567 } 1568 1569 /* 1570 * The I/O we are constructing cannot cross a physical 1571 * disk boundry in the swap stripe. Note: we are still 1572 * at splvm(). 1573 */ 1574 if ((blk ^ (blk + n)) & dmmax_mask) { 1575 j = ((blk + dmmax) & dmmax_mask) - blk; 1576 swp_pager_freeswapspace(object, blk + j, n - j); 1577 n = j; 1578 } 1579 1580 /* 1581 * All I/O parameters have been satisfied, build the I/O 1582 * request and assign the swap space. 1583 */ 1584 if (sync == TRUE) 1585 bp = getpbuf_kva(&nsw_wcount_sync); 1586 else 1587 bp = getpbuf_kva(&nsw_wcount_async); 1588 bio = &bp->b_bio1; 1589 1590 lwkt_reltoken(&vm_token); 1591 1592 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1593 1594 bp->b_bcount = PAGE_SIZE * n; 1595 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1596 1597 for (j = 0; j < n; ++j) { 1598 vm_page_t mreq = m[i+j]; 1599 1600 swp_pager_meta_build(mreq->object, mreq->pindex, 1601 blk + j); 1602 if (object->type == OBJT_SWAP) 1603 vm_page_dirty(mreq); 1604 rtvals[i+j] = VM_PAGER_OK; 1605 1606 vm_page_flag_set(mreq, PG_SWAPINPROG); 1607 bp->b_xio.xio_pages[j] = mreq; 1608 } 1609 bp->b_xio.xio_npages = n; 1610 1611 mycpu->gd_cnt.v_swapout++; 1612 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1613 1614 bp->b_dirtyoff = 0; /* req'd for NFS */ 1615 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1616 bp->b_cmd = BUF_CMD_WRITE; 1617 bio->bio_caller_info1.index = SWBIO_WRITE; 1618 1619 /* 1620 * asynchronous 1621 */ 1622 if (sync == FALSE) { 1623 bio->bio_done = swp_pager_async_iodone; 1624 BUF_KERNPROC(bp); 1625 vn_strategy(swapdev_vp, bio); 1626 1627 for (j = 0; j < n; ++j) 1628 rtvals[i+j] = VM_PAGER_PEND; 1629 continue; 1630 } 1631 1632 /* 1633 * Issue synchrnously. 1634 * 1635 * Wait for the sync I/O to complete, then update rtvals. 1636 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1637 * our async completion routine at the end, thus avoiding a 1638 * double-free. 1639 */ 1640 bio->bio_caller_info1.index |= SWBIO_SYNC; 1641 bio->bio_done = biodone_sync; 1642 bio->bio_flags |= BIO_SYNC; 1643 vn_strategy(swapdev_vp, bio); 1644 biowait(bio, "swwrt"); 1645 1646 for (j = 0; j < n; ++j) 1647 rtvals[i+j] = VM_PAGER_PEND; 1648 1649 /* 1650 * Now that we are through with the bp, we can call the 1651 * normal async completion, which frees everything up. 1652 */ 1653 swp_pager_async_iodone(bio); 1654 } 1655 vm_object_drop(object); 1656 } 1657 1658 /* 1659 * No requirements. 1660 */ 1661 void 1662 swap_pager_newswap(void) 1663 { 1664 swp_sizecheck(); 1665 } 1666 1667 /* 1668 * swp_pager_async_iodone: 1669 * 1670 * Completion routine for asynchronous reads and writes from/to swap. 1671 * Also called manually by synchronous code to finish up a bp. 1672 * 1673 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1674 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1675 * unbusy all pages except the 'main' request page. For WRITE 1676 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1677 * because we marked them all VM_PAGER_PEND on return from putpages ). 1678 * 1679 * This routine may not block. 1680 * 1681 * No requirements. 1682 */ 1683 static void 1684 swp_pager_async_iodone(struct bio *bio) 1685 { 1686 struct buf *bp = bio->bio_buf; 1687 vm_object_t object = NULL; 1688 int i; 1689 int *nswptr; 1690 1691 /* 1692 * report error 1693 */ 1694 if (bp->b_flags & B_ERROR) { 1695 kprintf( 1696 "swap_pager: I/O error - %s failed; offset %lld," 1697 "size %ld, error %d\n", 1698 ((bio->bio_caller_info1.index & SWBIO_READ) ? 1699 "pagein" : "pageout"), 1700 (long long)bio->bio_offset, 1701 (long)bp->b_bcount, 1702 bp->b_error 1703 ); 1704 } 1705 1706 /* 1707 * set object, raise to splvm(). 1708 */ 1709 if (bp->b_xio.xio_npages) 1710 object = bp->b_xio.xio_pages[0]->object; 1711 1712 /* 1713 * remove the mapping for kernel virtual 1714 */ 1715 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1716 1717 /* 1718 * cleanup pages. If an error occurs writing to swap, we are in 1719 * very serious trouble. If it happens to be a disk error, though, 1720 * we may be able to recover by reassigning the swap later on. So 1721 * in this case we remove the m->swapblk assignment for the page 1722 * but do not free it in the rlist. The errornous block(s) are thus 1723 * never reallocated as swap. Redirty the page and continue. 1724 */ 1725 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1726 vm_page_t m = bp->b_xio.xio_pages[i]; 1727 1728 if (bp->b_flags & B_ERROR) { 1729 /* 1730 * If an error occurs I'd love to throw the swapblk 1731 * away without freeing it back to swapspace, so it 1732 * can never be used again. But I can't from an 1733 * interrupt. 1734 */ 1735 1736 if (bio->bio_caller_info1.index & SWBIO_READ) { 1737 /* 1738 * When reading, reqpage needs to stay 1739 * locked for the parent, but all other 1740 * pages can be freed. We still want to 1741 * wakeup the parent waiting on the page, 1742 * though. ( also: pg_reqpage can be -1 and 1743 * not match anything ). 1744 * 1745 * We have to wake specifically requested pages 1746 * up too because we cleared PG_SWAPINPROG and 1747 * someone may be waiting for that. 1748 * 1749 * NOTE: for reads, m->dirty will probably 1750 * be overridden by the original caller of 1751 * getpages so don't play cute tricks here. 1752 * 1753 * NOTE: We can't actually free the page from 1754 * here, because this is an interrupt. It 1755 * is not legal to mess with object->memq 1756 * from an interrupt. Deactivate the page 1757 * instead. 1758 */ 1759 1760 m->valid = 0; 1761 vm_page_flag_clear(m, PG_ZERO); 1762 vm_page_flag_clear(m, PG_SWAPINPROG); 1763 1764 /* 1765 * bio_driver_info holds the requested page 1766 * index. 1767 */ 1768 if (i != (int)(intptr_t)bio->bio_driver_info) { 1769 vm_page_deactivate(m); 1770 vm_page_wakeup(m); 1771 } else { 1772 vm_page_flash(m); 1773 } 1774 /* 1775 * If i == bp->b_pager.pg_reqpage, do not wake 1776 * the page up. The caller needs to. 1777 */ 1778 } else { 1779 /* 1780 * If a write error occurs remove the swap 1781 * assignment (note that PG_SWAPPED may or 1782 * may not be set depending on prior activity). 1783 * 1784 * Re-dirty OBJT_SWAP pages as there is no 1785 * other backing store, we can't throw the 1786 * page away. 1787 * 1788 * Non-OBJT_SWAP pages (aka swapcache) must 1789 * not be dirtied since they may not have 1790 * been dirty in the first place, and they 1791 * do have backing store (the vnode). 1792 */ 1793 vm_page_busy_wait(m, FALSE, "swadpg"); 1794 swp_pager_meta_ctl(m->object, m->pindex, 1795 SWM_FREE); 1796 vm_page_flag_clear(m, PG_SWAPPED); 1797 if (m->object->type == OBJT_SWAP) { 1798 vm_page_dirty(m); 1799 vm_page_activate(m); 1800 } 1801 vm_page_flag_clear(m, PG_SWAPINPROG); 1802 vm_page_io_finish(m); 1803 vm_page_wakeup(m); 1804 } 1805 } else if (bio->bio_caller_info1.index & SWBIO_READ) { 1806 /* 1807 * NOTE: for reads, m->dirty will probably be 1808 * overridden by the original caller of getpages so 1809 * we cannot set them in order to free the underlying 1810 * swap in a low-swap situation. I don't think we'd 1811 * want to do that anyway, but it was an optimization 1812 * that existed in the old swapper for a time before 1813 * it got ripped out due to precisely this problem. 1814 * 1815 * clear PG_ZERO in page. 1816 * 1817 * If not the requested page then deactivate it. 1818 * 1819 * Note that the requested page, reqpage, is left 1820 * busied, but we still have to wake it up. The 1821 * other pages are released (unbusied) by 1822 * vm_page_wakeup(). We do not set reqpage's 1823 * valid bits here, it is up to the caller. 1824 */ 1825 1826 /* 1827 * NOTE: can't call pmap_clear_modify(m) from an 1828 * interrupt thread, the pmap code may have to map 1829 * non-kernel pmaps and currently asserts the case. 1830 */ 1831 /*pmap_clear_modify(m);*/ 1832 m->valid = VM_PAGE_BITS_ALL; 1833 vm_page_undirty(m); 1834 vm_page_flag_clear(m, PG_ZERO | PG_SWAPINPROG); 1835 vm_page_flag_set(m, PG_SWAPPED); 1836 1837 /* 1838 * We have to wake specifically requested pages 1839 * up too because we cleared PG_SWAPINPROG and 1840 * could be waiting for it in getpages. However, 1841 * be sure to not unbusy getpages specifically 1842 * requested page - getpages expects it to be 1843 * left busy. 1844 * 1845 * bio_driver_info holds the requested page 1846 */ 1847 if (i != (int)(intptr_t)bio->bio_driver_info) { 1848 vm_page_deactivate(m); 1849 vm_page_wakeup(m); 1850 } else { 1851 vm_page_flash(m); 1852 } 1853 } else { 1854 /* 1855 * Mark the page clean but do not mess with the 1856 * pmap-layer's modified state. That state should 1857 * also be clear since the caller protected the 1858 * page VM_PROT_READ, but allow the case. 1859 * 1860 * We are in an interrupt, avoid pmap operations. 1861 * 1862 * If we have a severe page deficit, deactivate the 1863 * page. Do not try to cache it (which would also 1864 * involve a pmap op), because the page might still 1865 * be read-heavy. 1866 * 1867 * When using the swap to cache clean vnode pages 1868 * we do not mess with the page dirty bits. 1869 */ 1870 vm_page_busy_wait(m, FALSE, "swadpg"); 1871 if (m->object->type == OBJT_SWAP) 1872 vm_page_undirty(m); 1873 vm_page_flag_clear(m, PG_SWAPINPROG); 1874 vm_page_flag_set(m, PG_SWAPPED); 1875 if (vm_page_count_severe()) 1876 vm_page_deactivate(m); 1877 #if 0 1878 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1879 vm_page_protect(m, VM_PROT_READ); 1880 #endif 1881 vm_page_io_finish(m); 1882 vm_page_wakeup(m); 1883 } 1884 } 1885 1886 /* 1887 * adjust pip. NOTE: the original parent may still have its own 1888 * pip refs on the object. 1889 */ 1890 1891 if (object) 1892 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages); 1893 1894 /* 1895 * Release the physical I/O buffer. 1896 * 1897 * NOTE: Due to synchronous operations in the write case b_cmd may 1898 * already be set to BUF_CMD_DONE and BIO_SYNC may have already 1899 * been cleared. 1900 * 1901 * Use vm_token to interlock nsw_rcount/wcount wakeup? 1902 */ 1903 lwkt_gettoken(&vm_token); 1904 if (bio->bio_caller_info1.index & SWBIO_READ) 1905 nswptr = &nsw_rcount; 1906 else if (bio->bio_caller_info1.index & SWBIO_SYNC) 1907 nswptr = &nsw_wcount_sync; 1908 else 1909 nswptr = &nsw_wcount_async; 1910 bp->b_cmd = BUF_CMD_DONE; 1911 relpbuf(bp, nswptr); 1912 lwkt_reltoken(&vm_token); 1913 } 1914 1915 /* 1916 * Fault-in a potentially swapped page and remove the swap reference. 1917 * 1918 * object must be held. 1919 */ 1920 static __inline void 1921 swp_pager_fault_page(vm_object_t object, vm_pindex_t pindex) 1922 { 1923 struct vnode *vp; 1924 vm_page_t m; 1925 int error; 1926 1927 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1928 1929 if (object->type == OBJT_VNODE) { 1930 /* 1931 * Any swap related to a vnode is due to swapcache. We must 1932 * vget() the vnode in case it is not active (otherwise 1933 * vref() will panic). Calling vm_object_page_remove() will 1934 * ensure that any swap ref is removed interlocked with the 1935 * page. clean_only is set to TRUE so we don't throw away 1936 * dirty pages. 1937 */ 1938 vp = object->handle; 1939 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE); 1940 if (error == 0) { 1941 vm_object_page_remove(object, pindex, pindex + 1, TRUE); 1942 vput(vp); 1943 } 1944 } else { 1945 /* 1946 * Otherwise it is a normal OBJT_SWAP object and we can 1947 * fault the page in and remove the swap. 1948 */ 1949 m = vm_fault_object_page(object, IDX_TO_OFF(pindex), 1950 VM_PROT_NONE, 1951 VM_FAULT_DIRTY | VM_FAULT_UNSWAP, 1952 &error); 1953 if (m) 1954 vm_page_unhold(m); 1955 } 1956 } 1957 1958 int 1959 swap_pager_swapoff(int devidx) 1960 { 1961 vm_object_t object; 1962 struct swblock *swap; 1963 swblk_t v; 1964 int i; 1965 1966 lwkt_gettoken(&vmobj_token); 1967 rescan: 1968 TAILQ_FOREACH(object, &vm_object_list, object_list) { 1969 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE) 1970 continue; 1971 vm_object_hold(object); 1972 if (object->type == OBJT_SWAP || object->type == OBJT_VNODE) { 1973 RB_FOREACH(swap, 1974 swblock_rb_tree, &object->swblock_root) { 1975 for (i = 0; i < SWAP_META_PAGES; ++i) { 1976 v = swap->swb_pages[i]; 1977 if (v != SWAPBLK_NONE && 1978 BLK2DEVIDX(v) == devidx) { 1979 swp_pager_fault_page( 1980 object, 1981 swap->swb_index + i); 1982 vm_object_drop(object); 1983 goto rescan; 1984 } 1985 } 1986 } 1987 } 1988 vm_object_drop(object); 1989 } 1990 lwkt_reltoken(&vmobj_token); 1991 1992 /* 1993 * If we fail to locate all swblocks we just fail gracefully and 1994 * do not bother to restore paging on the swap device. If the 1995 * user wants to retry the user can retry. 1996 */ 1997 if (swdevt[devidx].sw_nused) 1998 return (1); 1999 else 2000 return (0); 2001 } 2002 2003 /************************************************************************ 2004 * SWAP META DATA * 2005 ************************************************************************ 2006 * 2007 * These routines manipulate the swap metadata stored in the 2008 * OBJT_SWAP object. All swp_*() routines must be called at 2009 * splvm() because swap can be freed up by the low level vm_page 2010 * code which might be called from interrupts beyond what splbio() covers. 2011 * 2012 * Swap metadata is implemented with a global hash and not directly 2013 * linked into the object. Instead the object simply contains 2014 * appropriate tracking counters. 2015 */ 2016 2017 /* 2018 * Lookup the swblock containing the specified swap block index. 2019 * 2020 * The caller must hold the object. 2021 */ 2022 static __inline 2023 struct swblock * 2024 swp_pager_lookup(vm_object_t object, vm_pindex_t index) 2025 { 2026 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2027 index &= ~(vm_pindex_t)SWAP_META_MASK; 2028 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index)); 2029 } 2030 2031 /* 2032 * Remove a swblock from the RB tree. 2033 * 2034 * The caller must hold the object. 2035 */ 2036 static __inline 2037 void 2038 swp_pager_remove(vm_object_t object, struct swblock *swap) 2039 { 2040 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2041 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap); 2042 } 2043 2044 /* 2045 * Convert default object to swap object if necessary 2046 * 2047 * The caller must hold the object. 2048 */ 2049 static void 2050 swp_pager_meta_convert(vm_object_t object) 2051 { 2052 if (object->type == OBJT_DEFAULT) { 2053 object->type = OBJT_SWAP; 2054 KKASSERT(object->swblock_count == 0); 2055 } 2056 } 2057 2058 /* 2059 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 2060 * 2061 * We first convert the object to a swap object if it is a default 2062 * object. Vnode objects do not need to be converted. 2063 * 2064 * The specified swapblk is added to the object's swap metadata. If 2065 * the swapblk is not valid, it is freed instead. Any previously 2066 * assigned swapblk is freed. 2067 * 2068 * The caller must hold the object. 2069 */ 2070 static void 2071 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk) 2072 { 2073 struct swblock *swap; 2074 struct swblock *oswap; 2075 vm_pindex_t v; 2076 2077 KKASSERT(swapblk != SWAPBLK_NONE); 2078 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2079 2080 /* 2081 * Convert object if necessary 2082 */ 2083 if (object->type == OBJT_DEFAULT) 2084 swp_pager_meta_convert(object); 2085 2086 /* 2087 * Locate swblock. If not found create, but if we aren't adding 2088 * anything just return. If we run out of space in the map we wait 2089 * and, since the hash table may have changed, retry. 2090 */ 2091 retry: 2092 swap = swp_pager_lookup(object, index); 2093 2094 if (swap == NULL) { 2095 int i; 2096 2097 swap = zalloc(swap_zone); 2098 if (swap == NULL) { 2099 vm_wait(0); 2100 goto retry; 2101 } 2102 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK; 2103 swap->swb_count = 0; 2104 2105 ++object->swblock_count; 2106 2107 for (i = 0; i < SWAP_META_PAGES; ++i) 2108 swap->swb_pages[i] = SWAPBLK_NONE; 2109 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap); 2110 KKASSERT(oswap == NULL); 2111 } 2112 2113 /* 2114 * Delete prior contents of metadata. 2115 * 2116 * NOTE: Decrement swb_count after the freeing operation (which 2117 * might block) to prevent racing destruction of the swblock. 2118 */ 2119 index &= SWAP_META_MASK; 2120 2121 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) { 2122 swap->swb_pages[index] = SWAPBLK_NONE; 2123 /* can block */ 2124 swp_pager_freeswapspace(object, v, 1); 2125 --swap->swb_count; 2126 } 2127 2128 /* 2129 * Enter block into metadata 2130 */ 2131 swap->swb_pages[index] = swapblk; 2132 if (swapblk != SWAPBLK_NONE) 2133 ++swap->swb_count; 2134 } 2135 2136 /* 2137 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 2138 * 2139 * The requested range of blocks is freed, with any associated swap 2140 * returned to the swap bitmap. 2141 * 2142 * This routine will free swap metadata structures as they are cleaned 2143 * out. This routine does *NOT* operate on swap metadata associated 2144 * with resident pages. 2145 * 2146 * The caller must hold the object. 2147 */ 2148 static int swp_pager_meta_free_callback(struct swblock *swb, void *data); 2149 2150 static void 2151 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) 2152 { 2153 struct swfreeinfo info; 2154 2155 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2156 2157 /* 2158 * Nothing to do 2159 */ 2160 if (object->swblock_count == 0) { 2161 KKASSERT(RB_EMPTY(&object->swblock_root)); 2162 return; 2163 } 2164 if (count == 0) 2165 return; 2166 2167 /* 2168 * Setup for RB tree scan. Note that the pindex range can be huge 2169 * due to the 64 bit page index space so we cannot safely iterate. 2170 */ 2171 info.object = object; 2172 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK; 2173 info.begi = index; 2174 info.endi = index + count - 1; 2175 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp, 2176 swp_pager_meta_free_callback, &info); 2177 } 2178 2179 /* 2180 * The caller must hold the object. 2181 */ 2182 static 2183 int 2184 swp_pager_meta_free_callback(struct swblock *swap, void *data) 2185 { 2186 struct swfreeinfo *info = data; 2187 vm_object_t object = info->object; 2188 int index; 2189 int eindex; 2190 2191 /* 2192 * Figure out the range within the swblock. The wider scan may 2193 * return edge-case swap blocks when the start and/or end points 2194 * are in the middle of a block. 2195 */ 2196 if (swap->swb_index < info->begi) 2197 index = (int)info->begi & SWAP_META_MASK; 2198 else 2199 index = 0; 2200 2201 if (swap->swb_index + SWAP_META_PAGES > info->endi) 2202 eindex = (int)info->endi & SWAP_META_MASK; 2203 else 2204 eindex = SWAP_META_MASK; 2205 2206 /* 2207 * Scan and free the blocks. The loop terminates early 2208 * if (swap) runs out of blocks and could be freed. 2209 * 2210 * NOTE: Decrement swb_count after swp_pager_freeswapspace() 2211 * to deal with a zfree race. 2212 */ 2213 while (index <= eindex) { 2214 swblk_t v = swap->swb_pages[index]; 2215 2216 if (v != SWAPBLK_NONE) { 2217 swap->swb_pages[index] = SWAPBLK_NONE; 2218 /* can block */ 2219 swp_pager_freeswapspace(object, v, 1); 2220 if (--swap->swb_count == 0) { 2221 swp_pager_remove(object, swap); 2222 zfree(swap_zone, swap); 2223 --object->swblock_count; 2224 break; 2225 } 2226 } 2227 ++index; 2228 } 2229 /* swap may be invalid here due to zfree above */ 2230 return(0); 2231 } 2232 2233 /* 2234 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2235 * 2236 * This routine locates and destroys all swap metadata associated with 2237 * an object. 2238 * 2239 * NOTE: Decrement swb_count after the freeing operation (which 2240 * might block) to prevent racing destruction of the swblock. 2241 * 2242 * The caller must hold the object. 2243 */ 2244 static void 2245 swp_pager_meta_free_all(vm_object_t object) 2246 { 2247 struct swblock *swap; 2248 int i; 2249 2250 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 2251 2252 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) { 2253 swp_pager_remove(object, swap); 2254 for (i = 0; i < SWAP_META_PAGES; ++i) { 2255 swblk_t v = swap->swb_pages[i]; 2256 if (v != SWAPBLK_NONE) { 2257 /* can block */ 2258 swp_pager_freeswapspace(object, v, 1); 2259 --swap->swb_count; 2260 } 2261 } 2262 if (swap->swb_count != 0) 2263 panic("swap_pager_meta_free_all: swb_count != 0"); 2264 zfree(swap_zone, swap); 2265 --object->swblock_count; 2266 } 2267 KKASSERT(object->swblock_count == 0); 2268 } 2269 2270 /* 2271 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2272 * 2273 * This routine is capable of looking up, popping, or freeing 2274 * swapblk assignments in the swap meta data or in the vm_page_t. 2275 * The routine typically returns the swapblk being looked-up, or popped, 2276 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2277 * was invalid. This routine will automatically free any invalid 2278 * meta-data swapblks. 2279 * 2280 * It is not possible to store invalid swapblks in the swap meta data 2281 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2282 * 2283 * When acting on a busy resident page and paging is in progress, we 2284 * have to wait until paging is complete but otherwise can act on the 2285 * busy page. 2286 * 2287 * SWM_FREE remove and free swap block from metadata 2288 * SWM_POP remove from meta data but do not free.. pop it out 2289 * 2290 * The caller must hold the object. 2291 */ 2292 static swblk_t 2293 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags) 2294 { 2295 struct swblock *swap; 2296 swblk_t r1; 2297 2298 if (object->swblock_count == 0) 2299 return(SWAPBLK_NONE); 2300 2301 r1 = SWAPBLK_NONE; 2302 swap = swp_pager_lookup(object, index); 2303 2304 if (swap != NULL) { 2305 index &= SWAP_META_MASK; 2306 r1 = swap->swb_pages[index]; 2307 2308 if (r1 != SWAPBLK_NONE) { 2309 if (flags & (SWM_FREE|SWM_POP)) { 2310 swap->swb_pages[index] = SWAPBLK_NONE; 2311 if (--swap->swb_count == 0) { 2312 swp_pager_remove(object, swap); 2313 zfree(swap_zone, swap); 2314 --object->swblock_count; 2315 } 2316 } 2317 /* swap ptr may be invalid */ 2318 if (flags & SWM_FREE) { 2319 swp_pager_freeswapspace(object, r1, 1); 2320 r1 = SWAPBLK_NONE; 2321 } 2322 } 2323 /* swap ptr may be invalid */ 2324 } 2325 return(r1); 2326 } 2327