1 /* 2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1994 John S. Dyson 35 * Copyright (c) 1990 University of Utah. 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * the Systems Programming Group of the University of Utah Computer 41 * Science Department. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * New Swap System 72 * Matthew Dillon 73 * 74 * Radix Bitmap 'blists'. 75 * 76 * - The new swapper uses the new radix bitmap code. This should scale 77 * to arbitrarily small or arbitrarily large swap spaces and an almost 78 * arbitrary degree of fragmentation. 79 * 80 * Features: 81 * 82 * - on the fly reallocation of swap during putpages. The new system 83 * does not try to keep previously allocated swap blocks for dirty 84 * pages. 85 * 86 * - on the fly deallocation of swap 87 * 88 * - No more garbage collection required. Unnecessarily allocated swap 89 * blocks only exist for dirty vm_page_t's now and these are already 90 * cycled (in a high-load system) by the pager. We also do on-the-fly 91 * removal of invalidated swap blocks when a page is destroyed 92 * or renamed. 93 * 94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 95 * 96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 97 * 98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.32 2008/07/01 02:02:56 dillon Exp $ 100 */ 101 102 #include <sys/param.h> 103 #include <sys/systm.h> 104 #include <sys/conf.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/buf.h> 108 #include <sys/vnode.h> 109 #include <sys/malloc.h> 110 #include <sys/vmmeter.h> 111 #include <sys/sysctl.h> 112 #include <sys/blist.h> 113 #include <sys/lock.h> 114 #include <sys/thread2.h> 115 116 #ifndef MAX_PAGEOUT_CLUSTER 117 #define MAX_PAGEOUT_CLUSTER 16 118 #endif 119 120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 121 122 #include "opt_swap.h" 123 #include <vm/vm.h> 124 #include <vm/vm_object.h> 125 #include <vm/vm_page.h> 126 #include <vm/vm_pager.h> 127 #include <vm/vm_pageout.h> 128 #include <vm/swap_pager.h> 129 #include <vm/vm_extern.h> 130 #include <vm/vm_zone.h> 131 132 #include <sys/buf2.h> 133 #include <vm/vm_page2.h> 134 135 #define SWM_FREE 0x02 /* free, period */ 136 #define SWM_POP 0x04 /* pop out */ 137 138 #define AUTOCHAINDONE ((struct buf *)(intptr_t)-1) 139 140 /* 141 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 142 * in the old system. 143 */ 144 145 extern int vm_swap_size; /* number of free swap blocks, in pages */ 146 147 int swap_pager_full; /* swap space exhaustion (task killing) */ 148 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 149 static int nsw_rcount; /* free read buffers */ 150 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 151 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 152 static int nsw_wcount_async_max;/* assigned maximum */ 153 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 154 static int sw_alloc_interlock; /* swap pager allocation interlock */ 155 156 struct blist *swapblist; 157 static struct swblock **swhash; 158 static int swhash_mask; 159 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 160 161 extern struct vnode *swapdev_vp; /* from vm_swap.c */ 162 163 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 164 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 165 166 /* 167 * "named" and "unnamed" anon region objects. Try to reduce the overhead 168 * of searching a named list by hashing it just a little. 169 */ 170 171 #define NOBJLISTS 8 172 173 #define NOBJLIST(handle) \ 174 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 175 176 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 177 struct pagerlst swap_pager_un_object_list; 178 vm_zone_t swap_zone; 179 180 /* 181 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 182 * calls hooked from other parts of the VM system and do not appear here. 183 * (see vm/swap_pager.h). 184 */ 185 186 static vm_object_t 187 swap_pager_alloc (void *handle, off_t size, 188 vm_prot_t prot, off_t offset); 189 static void swap_pager_dealloc (vm_object_t object); 190 static int swap_pager_getpages (vm_object_t, vm_page_t *, int, int); 191 static void swap_pager_init (void); 192 static void swap_pager_unswapped (vm_page_t); 193 static void swap_pager_strategy (vm_object_t, struct bio *); 194 static void swap_chain_iodone(struct bio *biox); 195 196 struct pagerops swappagerops = { 197 swap_pager_init, /* early system initialization of pager */ 198 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 199 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 200 swap_pager_getpages, /* pagein */ 201 swap_pager_putpages, /* pageout */ 202 swap_pager_haspage, /* get backing store status for page */ 203 swap_pager_unswapped, /* remove swap related to page */ 204 swap_pager_strategy /* pager strategy call */ 205 }; 206 207 /* 208 * dmmax is in page-sized chunks with the new swap system. It was 209 * dev-bsized chunks in the old. dmmax is always a power of 2. 210 * 211 * swap_*() routines are externally accessible. swp_*() routines are 212 * internal. 213 */ 214 215 int dmmax; 216 static int dmmax_mask; 217 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 218 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 219 220 static __inline void swp_sizecheck (void); 221 static void swp_pager_sync_iodone (struct bio *bio); 222 static void swp_pager_async_iodone (struct bio *bio); 223 224 /* 225 * Swap bitmap functions 226 */ 227 228 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages); 229 static __inline daddr_t swp_pager_getswapspace (int npages); 230 231 /* 232 * Metadata functions 233 */ 234 235 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t); 236 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t); 237 static void swp_pager_meta_free_all (vm_object_t); 238 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int); 239 240 /* 241 * SWP_SIZECHECK() - update swap_pager_full indication 242 * 243 * update the swap_pager_almost_full indication and warn when we are 244 * about to run out of swap space, using lowat/hiwat hysteresis. 245 * 246 * Clear swap_pager_full ( task killing ) indication when lowat is met. 247 * 248 * No restrictions on call 249 * This routine may not block. 250 * This routine must be called at splvm() 251 */ 252 253 static __inline void 254 swp_sizecheck(void) 255 { 256 if (vm_swap_size < nswap_lowat) { 257 if (swap_pager_almost_full == 0) { 258 kprintf("swap_pager: out of swap space\n"); 259 swap_pager_almost_full = 1; 260 } 261 } else { 262 swap_pager_full = 0; 263 if (vm_swap_size > nswap_hiwat) 264 swap_pager_almost_full = 0; 265 } 266 } 267 268 /* 269 * SWAP_PAGER_INIT() - initialize the swap pager! 270 * 271 * Expected to be started from system init. NOTE: This code is run 272 * before much else so be careful what you depend on. Most of the VM 273 * system has yet to be initialized at this point. 274 */ 275 276 static void 277 swap_pager_init(void) 278 { 279 /* 280 * Initialize object lists 281 */ 282 int i; 283 284 for (i = 0; i < NOBJLISTS; ++i) 285 TAILQ_INIT(&swap_pager_object_list[i]); 286 TAILQ_INIT(&swap_pager_un_object_list); 287 288 /* 289 * Device Stripe, in PAGE_SIZE'd blocks 290 */ 291 292 dmmax = SWB_NPAGES * 2; 293 dmmax_mask = ~(dmmax - 1); 294 } 295 296 /* 297 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 298 * 299 * Expected to be started from pageout process once, prior to entering 300 * its main loop. 301 */ 302 303 void 304 swap_pager_swap_init(void) 305 { 306 int n, n2; 307 308 /* 309 * Number of in-transit swap bp operations. Don't 310 * exhaust the pbufs completely. Make sure we 311 * initialize workable values (0 will work for hysteresis 312 * but it isn't very efficient). 313 * 314 * The nsw_cluster_max is constrained by the number of pages an XIO 315 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 316 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 317 * constrained by the swap device interleave stripe size. 318 * 319 * Currently we hardwire nsw_wcount_async to 4. This limit is 320 * designed to prevent other I/O from having high latencies due to 321 * our pageout I/O. The value 4 works well for one or two active swap 322 * devices but is probably a little low if you have more. Even so, 323 * a higher value would probably generate only a limited improvement 324 * with three or four active swap devices since the system does not 325 * typically have to pageout at extreme bandwidths. We will want 326 * at least 2 per swap devices, and 4 is a pretty good value if you 327 * have one NFS swap device due to the command/ack latency over NFS. 328 * So it all works out pretty well. 329 */ 330 331 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 332 333 nsw_rcount = (nswbuf + 1) / 2; 334 nsw_wcount_sync = (nswbuf + 3) / 4; 335 nsw_wcount_async = 4; 336 nsw_wcount_async_max = nsw_wcount_async; 337 338 /* 339 * Initialize our zone. Right now I'm just guessing on the number 340 * we need based on the number of pages in the system. Each swblock 341 * can hold 16 pages, so this is probably overkill. This reservation 342 * is typically limited to around 32MB by default. 343 */ 344 n = vmstats.v_page_count / 2; 345 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 346 n = maxswzone / sizeof(struct swblock); 347 n2 = n; 348 349 do { 350 swap_zone = zinit( 351 "SWAPMETA", 352 sizeof(struct swblock), 353 n, 354 ZONE_INTERRUPT, 355 1); 356 if (swap_zone != NULL) 357 break; 358 /* 359 * if the allocation failed, try a zone two thirds the 360 * size of the previous attempt. 361 */ 362 n -= ((n + 2) / 3); 363 } while (n > 0); 364 365 if (swap_zone == NULL) 366 panic("swap_pager_swap_init: swap_zone == NULL"); 367 if (n2 != n) 368 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 369 n2 = n; 370 371 /* 372 * Initialize our meta-data hash table. The swapper does not need to 373 * be quite as efficient as the VM system, so we do not use an 374 * oversized hash table. 375 * 376 * n: size of hash table, must be power of 2 377 * swhash_mask: hash table index mask 378 */ 379 380 for (n = 1; n < n2 / 8; n *= 2) 381 ; 382 383 swhash = kmalloc(sizeof(struct swblock *) * n, M_VMPGDATA, 384 M_WAITOK | M_ZERO); 385 386 swhash_mask = n - 1; 387 } 388 389 /* 390 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 391 * its metadata structures. 392 * 393 * This routine is called from the mmap and fork code to create a new 394 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 395 * and then converting it with swp_pager_meta_build(). 396 * 397 * This routine may block in vm_object_allocate() and create a named 398 * object lookup race, so we must interlock. We must also run at 399 * splvm() for the object lookup to handle races with interrupts, but 400 * we do not have to maintain splvm() in between the lookup and the 401 * add because (I believe) it is not possible to attempt to create 402 * a new swap object w/handle when a default object with that handle 403 * already exists. 404 */ 405 406 static vm_object_t 407 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 408 { 409 vm_object_t object; 410 411 if (handle) { 412 /* 413 * Reference existing named region or allocate new one. There 414 * should not be a race here against swp_pager_meta_build() 415 * as called from vm_page_remove() in regards to the lookup 416 * of the handle. 417 */ 418 419 while (sw_alloc_interlock) { 420 sw_alloc_interlock = -1; 421 tsleep(&sw_alloc_interlock, 0, "swpalc", 0); 422 } 423 sw_alloc_interlock = 1; 424 425 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 426 427 if (object != NULL) { 428 vm_object_reference(object); 429 } else { 430 object = vm_object_allocate(OBJT_DEFAULT, 431 OFF_TO_IDX(offset + PAGE_MASK + size)); 432 object->handle = handle; 433 434 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 435 } 436 437 if (sw_alloc_interlock < 0) 438 wakeup(&sw_alloc_interlock); 439 440 sw_alloc_interlock = 0; 441 } else { 442 object = vm_object_allocate(OBJT_DEFAULT, 443 OFF_TO_IDX(offset + PAGE_MASK + size)); 444 445 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 446 } 447 448 return (object); 449 } 450 451 /* 452 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 453 * 454 * The swap backing for the object is destroyed. The code is 455 * designed such that we can reinstantiate it later, but this 456 * routine is typically called only when the entire object is 457 * about to be destroyed. 458 * 459 * This routine may block, but no longer does. 460 * 461 * The object must be locked or unreferenceable. 462 */ 463 464 static void 465 swap_pager_dealloc(vm_object_t object) 466 { 467 /* 468 * Remove from list right away so lookups will fail if we block for 469 * pageout completion. 470 */ 471 472 if (object->handle == NULL) { 473 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 474 } else { 475 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 476 } 477 478 vm_object_pip_wait(object, "swpdea"); 479 480 /* 481 * Free all remaining metadata. We only bother to free it from 482 * the swap meta data. We do not attempt to free swapblk's still 483 * associated with vm_page_t's for this object. We do not care 484 * if paging is still in progress on some objects. 485 */ 486 crit_enter(); 487 swp_pager_meta_free_all(object); 488 crit_exit(); 489 } 490 491 /************************************************************************ 492 * SWAP PAGER BITMAP ROUTINES * 493 ************************************************************************/ 494 495 /* 496 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 497 * 498 * Allocate swap for the requested number of pages. The starting 499 * swap block number (a page index) is returned or SWAPBLK_NONE 500 * if the allocation failed. 501 * 502 * Also has the side effect of advising that somebody made a mistake 503 * when they configured swap and didn't configure enough. 504 * 505 * Must be called at splvm() to avoid races with bitmap frees from 506 * vm_page_remove() aka swap_pager_page_removed(). 507 * 508 * This routine may not block 509 * This routine must be called at splvm(). 510 */ 511 512 static __inline daddr_t 513 swp_pager_getswapspace(int npages) 514 { 515 daddr_t blk; 516 517 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 518 if (swap_pager_full != 2) { 519 kprintf("swap_pager_getswapspace: failed\n"); 520 swap_pager_full = 2; 521 swap_pager_almost_full = 1; 522 } 523 } else { 524 vm_swap_size -= npages; 525 swp_sizecheck(); 526 } 527 return(blk); 528 } 529 530 /* 531 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 532 * 533 * This routine returns the specified swap blocks back to the bitmap. 534 * 535 * Note: This routine may not block (it could in the old swap code), 536 * and through the use of the new blist routines it does not block. 537 * 538 * We must be called at splvm() to avoid races with bitmap frees from 539 * vm_page_remove() aka swap_pager_page_removed(). 540 * 541 * This routine may not block 542 * This routine must be called at splvm(). 543 */ 544 545 static __inline void 546 swp_pager_freeswapspace(daddr_t blk, int npages) 547 { 548 blist_free(swapblist, blk, npages); 549 vm_swap_size += npages; 550 swp_sizecheck(); 551 } 552 553 /* 554 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 555 * range within an object. 556 * 557 * This is a globally accessible routine. 558 * 559 * This routine removes swapblk assignments from swap metadata. 560 * 561 * The external callers of this routine typically have already destroyed 562 * or renamed vm_page_t's associated with this range in the object so 563 * we should be ok. 564 * 565 * This routine may be called at any spl. We up our spl to splvm temporarily 566 * in order to perform the metadata removal. 567 */ 568 569 void 570 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 571 { 572 crit_enter(); 573 swp_pager_meta_free(object, start, size); 574 crit_exit(); 575 } 576 577 /* 578 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 579 * 580 * Assigns swap blocks to the specified range within the object. The 581 * swap blocks are not zerod. Any previous swap assignment is destroyed. 582 * 583 * Returns 0 on success, -1 on failure. 584 */ 585 586 int 587 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 588 { 589 int n = 0; 590 daddr_t blk = SWAPBLK_NONE; 591 vm_pindex_t beg = start; /* save start index */ 592 593 crit_enter(); 594 while (size) { 595 if (n == 0) { 596 n = BLIST_MAX_ALLOC; 597 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 598 n >>= 1; 599 if (n == 0) { 600 swp_pager_meta_free(object, beg, start - beg); 601 crit_exit(); 602 return(-1); 603 } 604 } 605 } 606 swp_pager_meta_build(object, start, blk); 607 --size; 608 ++start; 609 ++blk; 610 --n; 611 } 612 swp_pager_meta_free(object, start, n); 613 crit_exit(); 614 return(0); 615 } 616 617 /* 618 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 619 * and destroy the source. 620 * 621 * Copy any valid swapblks from the source to the destination. In 622 * cases where both the source and destination have a valid swapblk, 623 * we keep the destination's. 624 * 625 * This routine is allowed to block. It may block allocating metadata 626 * indirectly through swp_pager_meta_build() or if paging is still in 627 * progress on the source. 628 * 629 * This routine can be called at any spl 630 * 631 * XXX vm_page_collapse() kinda expects us not to block because we 632 * supposedly do not need to allocate memory, but for the moment we 633 * *may* have to get a little memory from the zone allocator, but 634 * it is taken from the interrupt memory. We should be ok. 635 * 636 * The source object contains no vm_page_t's (which is just as well) 637 * 638 * The source object is of type OBJT_SWAP. 639 * 640 * The source and destination objects must be locked or 641 * inaccessible (XXX are they ?) 642 */ 643 644 void 645 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 646 vm_pindex_t offset, int destroysource) 647 { 648 vm_pindex_t i; 649 650 crit_enter(); 651 652 /* 653 * If destroysource is set, we remove the source object from the 654 * swap_pager internal queue now. 655 */ 656 657 if (destroysource) { 658 if (srcobject->handle == NULL) { 659 TAILQ_REMOVE( 660 &swap_pager_un_object_list, 661 srcobject, 662 pager_object_list 663 ); 664 } else { 665 TAILQ_REMOVE( 666 NOBJLIST(srcobject->handle), 667 srcobject, 668 pager_object_list 669 ); 670 } 671 } 672 673 /* 674 * transfer source to destination. 675 */ 676 677 for (i = 0; i < dstobject->size; ++i) { 678 daddr_t dstaddr; 679 680 /* 681 * Locate (without changing) the swapblk on the destination, 682 * unless it is invalid in which case free it silently, or 683 * if the destination is a resident page, in which case the 684 * source is thrown away. 685 */ 686 687 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 688 689 if (dstaddr == SWAPBLK_NONE) { 690 /* 691 * Destination has no swapblk and is not resident, 692 * copy source. 693 */ 694 daddr_t srcaddr; 695 696 srcaddr = swp_pager_meta_ctl( 697 srcobject, 698 i + offset, 699 SWM_POP 700 ); 701 702 if (srcaddr != SWAPBLK_NONE) 703 swp_pager_meta_build(dstobject, i, srcaddr); 704 } else { 705 /* 706 * Destination has valid swapblk or it is represented 707 * by a resident page. We destroy the sourceblock. 708 */ 709 710 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 711 } 712 } 713 714 /* 715 * Free left over swap blocks in source. 716 * 717 * We have to revert the type to OBJT_DEFAULT so we do not accidently 718 * double-remove the object from the swap queues. 719 */ 720 721 if (destroysource) { 722 swp_pager_meta_free_all(srcobject); 723 /* 724 * Reverting the type is not necessary, the caller is going 725 * to destroy srcobject directly, but I'm doing it here 726 * for consistency since we've removed the object from its 727 * queues. 728 */ 729 srcobject->type = OBJT_DEFAULT; 730 } 731 crit_exit(); 732 } 733 734 /* 735 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 736 * the requested page. 737 * 738 * We determine whether good backing store exists for the requested 739 * page and return TRUE if it does, FALSE if it doesn't. 740 * 741 * If TRUE, we also try to determine how much valid, contiguous backing 742 * store exists before and after the requested page within a reasonable 743 * distance. We do not try to restrict it to the swap device stripe 744 * (that is handled in getpages/putpages). It probably isn't worth 745 * doing here. 746 */ 747 748 boolean_t 749 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 750 int *after) 751 { 752 daddr_t blk0; 753 754 /* 755 * do we have good backing store at the requested index ? 756 */ 757 758 crit_enter(); 759 blk0 = swp_pager_meta_ctl(object, pindex, 0); 760 761 if (blk0 == SWAPBLK_NONE) { 762 crit_exit(); 763 if (before) 764 *before = 0; 765 if (after) 766 *after = 0; 767 return (FALSE); 768 } 769 770 /* 771 * find backwards-looking contiguous good backing store 772 */ 773 774 if (before != NULL) { 775 int i; 776 777 for (i = 1; i < (SWB_NPAGES/2); ++i) { 778 daddr_t blk; 779 780 if (i > pindex) 781 break; 782 blk = swp_pager_meta_ctl(object, pindex - i, 0); 783 if (blk != blk0 - i) 784 break; 785 } 786 *before = (i - 1); 787 } 788 789 /* 790 * find forward-looking contiguous good backing store 791 */ 792 793 if (after != NULL) { 794 int i; 795 796 for (i = 1; i < (SWB_NPAGES/2); ++i) { 797 daddr_t blk; 798 799 blk = swp_pager_meta_ctl(object, pindex + i, 0); 800 if (blk != blk0 + i) 801 break; 802 } 803 *after = (i - 1); 804 } 805 crit_exit(); 806 return (TRUE); 807 } 808 809 /* 810 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 811 * 812 * This removes any associated swap backing store, whether valid or 813 * not, from the page. 814 * 815 * This routine is typically called when a page is made dirty, at 816 * which point any associated swap can be freed. MADV_FREE also 817 * calls us in a special-case situation 818 * 819 * NOTE!!! If the page is clean and the swap was valid, the caller 820 * should make the page dirty before calling this routine. This routine 821 * does NOT change the m->dirty status of the page. Also: MADV_FREE 822 * depends on it. 823 * 824 * This routine may not block 825 * This routine must be called at splvm() 826 */ 827 828 static void 829 swap_pager_unswapped(vm_page_t m) 830 { 831 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 832 } 833 834 /* 835 * SWAP_PAGER_STRATEGY() - read, write, free blocks 836 * 837 * This implements the vm_pager_strategy() interface to swap and allows 838 * other parts of the system to directly access swap as backing store 839 * through vm_objects of type OBJT_SWAP. This is intended to be a 840 * cacheless interface ( i.e. caching occurs at higher levels ). 841 * Therefore we do not maintain any resident pages. All I/O goes 842 * directly to and from the swap device. 843 * 844 * We currently attempt to run I/O synchronously or asynchronously as 845 * the caller requests. This isn't perfect because we loose error 846 * sequencing when we run multiple ops in parallel to satisfy a request. 847 * But this is swap, so we let it all hang out. 848 */ 849 850 static void 851 swap_pager_strategy(vm_object_t object, struct bio *bio) 852 { 853 struct buf *bp = bio->bio_buf; 854 struct bio *nbio; 855 vm_pindex_t start; 856 vm_pindex_t biox_blkno = 0; 857 int count; 858 char *data; 859 struct bio *biox = NULL; 860 struct buf *bufx = NULL; 861 struct bio_track *track; 862 863 /* 864 * tracking for swapdev vnode I/Os 865 */ 866 if (bp->b_cmd == BUF_CMD_READ) 867 track = &swapdev_vp->v_track_read; 868 else 869 track = &swapdev_vp->v_track_write; 870 871 if (bp->b_bcount & PAGE_MASK) { 872 bp->b_error = EINVAL; 873 bp->b_flags |= B_ERROR | B_INVAL; 874 biodone(bio); 875 kprintf("swap_pager_strategy: bp %p offset %lld size %d, not page bounded\n", bp, bio->bio_offset, (int)bp->b_bcount); 876 return; 877 } 878 879 /* 880 * Clear error indication, initialize page index, count, data pointer. 881 */ 882 bp->b_error = 0; 883 bp->b_flags &= ~B_ERROR; 884 bp->b_resid = bp->b_bcount; 885 886 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 887 count = howmany(bp->b_bcount, PAGE_SIZE); 888 data = bp->b_data; 889 890 crit_enter(); 891 892 /* 893 * Deal with BUF_CMD_FREEBLKS 894 */ 895 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 896 /* 897 * FREE PAGE(s) - destroy underlying swap that is no longer 898 * needed. 899 */ 900 swp_pager_meta_free(object, start, count); 901 crit_exit(); 902 bp->b_resid = 0; 903 biodone(bio); 904 return; 905 } 906 907 /* 908 * We need to be able to create a new cluster of I/O's. We cannot 909 * use the caller fields of the passed bio so push a new one. 910 * 911 * Because nbio is just a placeholder for the cluster links, 912 * we can biodone() the original bio instead of nbio to make 913 * things a bit more efficient. 914 */ 915 nbio = push_bio(bio); 916 nbio->bio_offset = bio->bio_offset; 917 nbio->bio_caller_info1.cluster_head = NULL; 918 nbio->bio_caller_info2.cluster_tail = NULL; 919 920 /* 921 * Execute read or write 922 */ 923 924 while (count > 0) { 925 daddr_t blk; 926 927 /* 928 * Obtain block. If block not found and writing, allocate a 929 * new block and build it into the object. 930 */ 931 932 blk = swp_pager_meta_ctl(object, start, 0); 933 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) { 934 blk = swp_pager_getswapspace(1); 935 if (blk == SWAPBLK_NONE) { 936 bp->b_error = ENOMEM; 937 bp->b_flags |= B_ERROR; 938 break; 939 } 940 swp_pager_meta_build(object, start, blk); 941 } 942 943 /* 944 * Do we have to flush our current collection? Yes if: 945 * 946 * - no swap block at this index 947 * - swap block is not contiguous 948 * - we cross a physical disk boundry in the 949 * stripe. 950 */ 951 952 if ( 953 biox && (biox_blkno + btoc(bufx->b_bcount) != blk || 954 ((biox_blkno ^ blk) & dmmax_mask) 955 ) 956 ) { 957 crit_exit(); 958 if (bp->b_cmd == BUF_CMD_READ) { 959 ++mycpu->gd_cnt.v_swapin; 960 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 961 } else { 962 ++mycpu->gd_cnt.v_swapout; 963 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 964 bufx->b_dirtyend = bufx->b_bcount; 965 } 966 967 /* 968 * Flush the biox to the swap device. 969 */ 970 if (bufx->b_bcount) { 971 if (bufx->b_cmd != BUF_CMD_READ) 972 bufx->b_dirtyend = bufx->b_bcount; 973 BUF_KERNPROC(bufx); 974 vn_strategy(swapdev_vp, biox); 975 } else { 976 biodone(biox); 977 } 978 crit_enter(); 979 biox = NULL; 980 bufx = NULL; 981 } 982 983 /* 984 * Add new swapblk to biox, instantiating biox if necessary. 985 * Zero-fill reads are able to take a shortcut. 986 */ 987 if (blk == SWAPBLK_NONE) { 988 /* 989 * We can only get here if we are reading. Since 990 * we are at splvm() we can safely modify b_resid, 991 * even if chain ops are in progress. 992 */ 993 bzero(data, PAGE_SIZE); 994 bp->b_resid -= PAGE_SIZE; 995 } else { 996 if (biox == NULL) { 997 /* XXX chain count > 4, wait to <= 4 */ 998 999 bufx = getpbuf(NULL); 1000 biox = &bufx->b_bio1; 1001 cluster_append(nbio, bufx); 1002 bufx->b_flags |= (bufx->b_flags & B_ORDERED) | 1003 B_ASYNC; 1004 bufx->b_cmd = bp->b_cmd; 1005 biox->bio_done = swap_chain_iodone; 1006 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 1007 biox->bio_caller_info1.cluster_parent = nbio; 1008 biox_blkno = blk; 1009 bufx->b_bcount = 0; 1010 bufx->b_data = data; 1011 } 1012 bufx->b_bcount += PAGE_SIZE; 1013 } 1014 --count; 1015 ++start; 1016 data += PAGE_SIZE; 1017 } 1018 1019 /* 1020 * Flush out last buffer 1021 */ 1022 crit_exit(); 1023 1024 if (biox) { 1025 if ((bp->b_flags & B_ASYNC) == 0) 1026 bufx->b_flags &= ~B_ASYNC; 1027 if (bufx->b_cmd == BUF_CMD_READ) { 1028 ++mycpu->gd_cnt.v_swapin; 1029 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1030 } else { 1031 ++mycpu->gd_cnt.v_swapout; 1032 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1033 bufx->b_dirtyend = bufx->b_bcount; 1034 } 1035 if (bufx->b_bcount) { 1036 if (bufx->b_cmd != BUF_CMD_READ) 1037 bufx->b_dirtyend = bufx->b_bcount; 1038 BUF_KERNPROC(bufx); 1039 vn_strategy(swapdev_vp, biox); 1040 } else { 1041 biodone(biox); 1042 } 1043 /* biox, bufx = NULL */ 1044 } 1045 1046 /* 1047 * Wait for completion. Now that we are no longer using 1048 * cluster_append, use the cluster_tail field to indicate 1049 * auto-completion if there are still I/O's in progress. 1050 */ 1051 if (bp->b_flags & B_ASYNC) { 1052 crit_enter(); 1053 if (nbio->bio_caller_info1.cluster_head == NULL) { 1054 biodone(bio); 1055 } else { 1056 nbio->bio_caller_info2.cluster_tail = AUTOCHAINDONE; 1057 } 1058 crit_exit(); 1059 } else { 1060 crit_enter(); 1061 while (nbio->bio_caller_info1.cluster_head != NULL) { 1062 bp->b_flags |= B_WANT; 1063 tsleep(bp, 0, "bpchain", 0); 1064 } 1065 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1066 bp->b_flags |= B_ERROR; 1067 bp->b_error = EINVAL; 1068 } 1069 biodone(bio); 1070 crit_exit(); 1071 } 1072 } 1073 1074 static void 1075 swap_chain_iodone(struct bio *biox) 1076 { 1077 struct buf **nextp; 1078 struct buf *bufx; /* chained sub-buffer */ 1079 struct bio *nbio; /* parent nbio with chain glue */ 1080 struct buf *bp; /* original bp associated with nbio */ 1081 1082 bufx = biox->bio_buf; 1083 nbio = biox->bio_caller_info1.cluster_parent; 1084 bp = nbio->bio_buf; 1085 1086 /* 1087 * Update the original buffer 1088 */ 1089 KKASSERT(bp != NULL); 1090 if (bufx->b_flags & B_ERROR) { 1091 bp->b_flags |= B_ERROR; 1092 bp->b_error = bufx->b_error; 1093 } else if (bufx->b_resid != 0) { 1094 bp->b_flags |= B_ERROR; 1095 bp->b_error = EINVAL; 1096 } else { 1097 bp->b_resid -= bufx->b_bcount; 1098 } 1099 1100 /* 1101 * Remove us from the chain. It is sufficient to clean up 1102 * cluster_head. Once the chain is operational cluster_tail 1103 * may be used to indicate AUTOCHAINDONE. Note that I/O's 1104 * can complete while the swap system is still appending new 1105 * BIOs to the chain. 1106 */ 1107 nextp = &nbio->bio_caller_info1.cluster_head; 1108 while (*nextp != bufx) { 1109 KKASSERT(*nextp != NULL); 1110 nextp = &(*nextp)->b_cluster_next; 1111 } 1112 *nextp = bufx->b_cluster_next; 1113 if (bp->b_flags & B_WANT) { 1114 bp->b_flags &= ~B_WANT; 1115 wakeup(bp); 1116 } 1117 1118 /* 1119 * Clean up bufx. If this was the last buffer in the chain 1120 * and AUTOCHAINDONE was set, finish off the original I/O 1121 * as well. 1122 * 1123 * nbio was just a fake BIO layer to hold the cluster links, 1124 * we can issue the biodone() on the layer above it. 1125 */ 1126 if (nbio->bio_caller_info1.cluster_head == NULL && 1127 nbio->bio_caller_info2.cluster_tail == AUTOCHAINDONE 1128 ) { 1129 nbio->bio_caller_info2.cluster_tail = NULL; 1130 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1131 bp->b_flags |= B_ERROR; 1132 bp->b_error = EINVAL; 1133 } 1134 biodone(nbio->bio_prev); 1135 } 1136 bufx->b_flags &= ~B_ASYNC; 1137 relpbuf(bufx, NULL); 1138 } 1139 1140 /* 1141 * SWAP_PAGER_GETPAGES() - bring pages in from swap 1142 * 1143 * Attempt to retrieve (m, count) pages from backing store, but make 1144 * sure we retrieve at least m[reqpage]. We try to load in as large 1145 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1146 * belongs to the same object. 1147 * 1148 * The code is designed for asynchronous operation and 1149 * immediate-notification of 'reqpage' but tends not to be 1150 * used that way. Please do not optimize-out this algorithmic 1151 * feature, I intend to improve on it in the future. 1152 * 1153 * The parent has a single vm_object_pip_add() reference prior to 1154 * calling us and we should return with the same. 1155 * 1156 * The parent has BUSY'd the pages. We should return with 'm' 1157 * left busy, but the others adjusted. 1158 */ 1159 1160 static int 1161 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 1162 { 1163 struct buf *bp; 1164 struct bio *bio; 1165 vm_page_t mreq; 1166 int i; 1167 int j; 1168 daddr_t blk; 1169 vm_offset_t kva; 1170 vm_pindex_t lastpindex; 1171 1172 mreq = m[reqpage]; 1173 1174 if (mreq->object != object) { 1175 panic("swap_pager_getpages: object mismatch %p/%p", 1176 object, 1177 mreq->object 1178 ); 1179 } 1180 1181 /* 1182 * Calculate range to retrieve. The pages have already been assigned 1183 * their swapblks. We require a *contiguous* range that falls entirely 1184 * within a single device stripe. If we do not supply it, bad things 1185 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1186 * loops are set up such that the case(s) are handled implicitly. 1187 * 1188 * The swp_*() calls must be made at splvm(). vm_page_free() does 1189 * not need to be, but it will go a little faster if it is. 1190 */ 1191 crit_enter(); 1192 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1193 1194 for (i = reqpage - 1; i >= 0; --i) { 1195 daddr_t iblk; 1196 1197 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1198 if (blk != iblk + (reqpage - i)) 1199 break; 1200 if ((blk ^ iblk) & dmmax_mask) 1201 break; 1202 } 1203 ++i; 1204 1205 for (j = reqpage + 1; j < count; ++j) { 1206 daddr_t jblk; 1207 1208 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1209 if (blk != jblk - (j - reqpage)) 1210 break; 1211 if ((blk ^ jblk) & dmmax_mask) 1212 break; 1213 } 1214 1215 /* 1216 * free pages outside our collection range. Note: we never free 1217 * mreq, it must remain busy throughout. 1218 */ 1219 1220 { 1221 int k; 1222 1223 for (k = 0; k < i; ++k) 1224 vm_page_free(m[k]); 1225 for (k = j; k < count; ++k) 1226 vm_page_free(m[k]); 1227 } 1228 crit_exit(); 1229 1230 1231 /* 1232 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1233 * still busy, but the others unbusied. 1234 */ 1235 1236 if (blk == SWAPBLK_NONE) 1237 return(VM_PAGER_FAIL); 1238 1239 /* 1240 * Get a swap buffer header to perform the IO 1241 */ 1242 1243 bp = getpbuf(&nsw_rcount); 1244 bio = &bp->b_bio1; 1245 kva = (vm_offset_t) bp->b_data; 1246 1247 /* 1248 * map our page(s) into kva for input 1249 */ 1250 1251 pmap_qenter(kva, m + i, j - i); 1252 1253 bp->b_data = (caddr_t) kva; 1254 bp->b_bcount = PAGE_SIZE * (j - i); 1255 bio->bio_done = swp_pager_async_iodone; 1256 bio->bio_offset = (off_t)(blk - (reqpage - i)) << PAGE_SHIFT; 1257 bio->bio_driver_info = (void *)(reqpage - i); 1258 1259 { 1260 int k; 1261 1262 for (k = i; k < j; ++k) { 1263 bp->b_xio.xio_pages[k - i] = m[k]; 1264 vm_page_flag_set(m[k], PG_SWAPINPROG); 1265 } 1266 } 1267 bp->b_xio.xio_npages = j - i; 1268 1269 mycpu->gd_cnt.v_swapin++; 1270 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1271 1272 /* 1273 * We still hold the lock on mreq, and our automatic completion routine 1274 * does not remove it. 1275 */ 1276 1277 vm_object_pip_add(mreq->object, bp->b_xio.xio_npages); 1278 lastpindex = m[j-1]->pindex; 1279 1280 /* 1281 * perform the I/O. NOTE!!! bp cannot be considered valid after 1282 * this point because we automatically release it on completion. 1283 * Instead, we look at the one page we are interested in which we 1284 * still hold a lock on even through the I/O completion. 1285 * 1286 * The other pages in our m[] array are also released on completion, 1287 * so we cannot assume they are valid anymore either. 1288 */ 1289 1290 bp->b_cmd = BUF_CMD_READ; 1291 BUF_KERNPROC(bp); 1292 vn_strategy(swapdev_vp, bio); 1293 1294 /* 1295 * wait for the page we want to complete. PG_SWAPINPROG is always 1296 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1297 * is set in the meta-data. 1298 */ 1299 1300 crit_enter(); 1301 1302 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1303 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1304 mycpu->gd_cnt.v_intrans++; 1305 if (tsleep(mreq, 0, "swread", hz*20)) { 1306 kprintf( 1307 "swap_pager: indefinite wait buffer: " 1308 " offset: %lld, size: %d\n", 1309 bio->bio_offset, bp->b_bcount 1310 ); 1311 } 1312 } 1313 1314 crit_exit(); 1315 1316 /* 1317 * mreq is left bussied after completion, but all the other pages 1318 * are freed. If we had an unrecoverable read error the page will 1319 * not be valid. 1320 */ 1321 1322 if (mreq->valid != VM_PAGE_BITS_ALL) { 1323 return(VM_PAGER_ERROR); 1324 } else { 1325 return(VM_PAGER_OK); 1326 } 1327 1328 /* 1329 * A final note: in a low swap situation, we cannot deallocate swap 1330 * and mark a page dirty here because the caller is likely to mark 1331 * the page clean when we return, causing the page to possibly revert 1332 * to all-zero's later. 1333 */ 1334 } 1335 1336 /* 1337 * swap_pager_putpages: 1338 * 1339 * Assign swap (if necessary) and initiate I/O on the specified pages. 1340 * 1341 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1342 * are automatically converted to SWAP objects. 1343 * 1344 * In a low memory situation we may block in vn_strategy(), but the new 1345 * vm_page reservation system coupled with properly written VFS devices 1346 * should ensure that no low-memory deadlock occurs. This is an area 1347 * which needs work. 1348 * 1349 * The parent has N vm_object_pip_add() references prior to 1350 * calling us and will remove references for rtvals[] that are 1351 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1352 * completion. 1353 * 1354 * The parent has soft-busy'd the pages it passes us and will unbusy 1355 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1356 * We need to unbusy the rest on I/O completion. 1357 */ 1358 void 1359 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1360 boolean_t sync, int *rtvals) 1361 { 1362 int i; 1363 int n = 0; 1364 1365 if (count && m[0]->object != object) { 1366 panic("swap_pager_getpages: object mismatch %p/%p", 1367 object, 1368 m[0]->object 1369 ); 1370 } 1371 1372 /* 1373 * Step 1 1374 * 1375 * Turn object into OBJT_SWAP 1376 * check for bogus sysops 1377 * force sync if not pageout process 1378 */ 1379 1380 if (object->type != OBJT_SWAP) 1381 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1382 1383 if (curthread != pagethread) 1384 sync = TRUE; 1385 1386 /* 1387 * Step 2 1388 * 1389 * Update nsw parameters from swap_async_max sysctl values. 1390 * Do not let the sysop crash the machine with bogus numbers. 1391 */ 1392 1393 if (swap_async_max != nsw_wcount_async_max) { 1394 int n; 1395 1396 /* 1397 * limit range 1398 */ 1399 if ((n = swap_async_max) > nswbuf / 2) 1400 n = nswbuf / 2; 1401 if (n < 1) 1402 n = 1; 1403 swap_async_max = n; 1404 1405 /* 1406 * Adjust difference ( if possible ). If the current async 1407 * count is too low, we may not be able to make the adjustment 1408 * at this time. 1409 */ 1410 crit_enter(); 1411 n -= nsw_wcount_async_max; 1412 if (nsw_wcount_async + n >= 0) { 1413 nsw_wcount_async += n; 1414 nsw_wcount_async_max += n; 1415 wakeup(&nsw_wcount_async); 1416 } 1417 crit_exit(); 1418 } 1419 1420 /* 1421 * Step 3 1422 * 1423 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1424 * The page is left dirty until the pageout operation completes 1425 * successfully. 1426 */ 1427 1428 for (i = 0; i < count; i += n) { 1429 struct buf *bp; 1430 struct bio *bio; 1431 daddr_t blk; 1432 int j; 1433 1434 /* 1435 * Maximum I/O size is limited by a number of factors. 1436 */ 1437 1438 n = min(BLIST_MAX_ALLOC, count - i); 1439 n = min(n, nsw_cluster_max); 1440 1441 crit_enter(); 1442 1443 /* 1444 * Get biggest block of swap we can. If we fail, fall 1445 * back and try to allocate a smaller block. Don't go 1446 * overboard trying to allocate space if it would overly 1447 * fragment swap. 1448 */ 1449 while ( 1450 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1451 n > 4 1452 ) { 1453 n >>= 1; 1454 } 1455 if (blk == SWAPBLK_NONE) { 1456 for (j = 0; j < n; ++j) 1457 rtvals[i+j] = VM_PAGER_FAIL; 1458 crit_exit(); 1459 continue; 1460 } 1461 1462 /* 1463 * The I/O we are constructing cannot cross a physical 1464 * disk boundry in the swap stripe. Note: we are still 1465 * at splvm(). 1466 */ 1467 if ((blk ^ (blk + n)) & dmmax_mask) { 1468 j = ((blk + dmmax) & dmmax_mask) - blk; 1469 swp_pager_freeswapspace(blk + j, n - j); 1470 n = j; 1471 } 1472 1473 /* 1474 * All I/O parameters have been satisfied, build the I/O 1475 * request and assign the swap space. 1476 */ 1477 1478 if (sync == TRUE) 1479 bp = getpbuf(&nsw_wcount_sync); 1480 else 1481 bp = getpbuf(&nsw_wcount_async); 1482 bio = &bp->b_bio1; 1483 1484 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1485 1486 bp->b_bcount = PAGE_SIZE * n; 1487 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1488 1489 for (j = 0; j < n; ++j) { 1490 vm_page_t mreq = m[i+j]; 1491 1492 swp_pager_meta_build( 1493 mreq->object, 1494 mreq->pindex, 1495 blk + j 1496 ); 1497 vm_page_dirty(mreq); 1498 rtvals[i+j] = VM_PAGER_OK; 1499 1500 vm_page_flag_set(mreq, PG_SWAPINPROG); 1501 bp->b_xio.xio_pages[j] = mreq; 1502 } 1503 bp->b_xio.xio_npages = n; 1504 1505 mycpu->gd_cnt.v_swapout++; 1506 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1507 1508 crit_exit(); 1509 1510 bp->b_dirtyoff = 0; /* req'd for NFS */ 1511 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1512 bp->b_cmd = BUF_CMD_WRITE; 1513 1514 /* 1515 * asynchronous 1516 */ 1517 if (sync == FALSE) { 1518 bp->b_flags |= B_ASYNC; 1519 bio->bio_done = swp_pager_async_iodone; 1520 BUF_KERNPROC(bp); 1521 vn_strategy(swapdev_vp, bio); 1522 1523 for (j = 0; j < n; ++j) 1524 rtvals[i+j] = VM_PAGER_PEND; 1525 continue; 1526 } 1527 1528 /* 1529 * synchronous 1530 */ 1531 1532 bio->bio_done = swp_pager_sync_iodone; 1533 vn_strategy(swapdev_vp, bio); 1534 1535 /* 1536 * Wait for the sync I/O to complete, then update rtvals. 1537 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1538 * our async completion routine at the end, thus avoiding a 1539 * double-free. 1540 */ 1541 crit_enter(); 1542 1543 while (bp->b_cmd != BUF_CMD_DONE) 1544 tsleep(bp, 0, "swwrt", 0); 1545 1546 for (j = 0; j < n; ++j) 1547 rtvals[i+j] = VM_PAGER_PEND; 1548 1549 /* 1550 * Now that we are through with the bp, we can call the 1551 * normal async completion, which frees everything up. 1552 */ 1553 1554 swp_pager_async_iodone(bio); 1555 1556 crit_exit(); 1557 } 1558 } 1559 1560 /* 1561 * swap_pager_sync_iodone: 1562 * 1563 * Completion routine for synchronous reads and writes from/to swap. 1564 * We just mark the bp is complete and wake up anyone waiting on it. 1565 * 1566 * This routine may not block. This routine is called at splbio() or better. 1567 */ 1568 1569 static void 1570 swp_pager_sync_iodone(struct bio *bio) 1571 { 1572 struct buf *bp = bio->bio_buf; 1573 1574 bp->b_flags &= ~B_ASYNC; 1575 bp->b_cmd = BUF_CMD_DONE; 1576 wakeup(bp); 1577 } 1578 1579 /* 1580 * swp_pager_async_iodone: 1581 * 1582 * Completion routine for asynchronous reads and writes from/to swap. 1583 * Also called manually by synchronous code to finish up a bp. 1584 * 1585 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1586 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1587 * unbusy all pages except the 'main' request page. For WRITE 1588 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1589 * because we marked them all VM_PAGER_PEND on return from putpages ). 1590 * 1591 * This routine may not block. 1592 */ 1593 1594 static void 1595 swp_pager_async_iodone(struct bio *bio) 1596 { 1597 struct buf *bp = bio->bio_buf; 1598 vm_object_t object = NULL; 1599 int i; 1600 int *nswptr; 1601 1602 /* 1603 * report error 1604 */ 1605 if (bp->b_flags & B_ERROR) { 1606 kprintf( 1607 "swap_pager: I/O error - %s failed; offset %lld," 1608 "size %ld, error %d\n", 1609 ((bp->b_cmd == BUF_CMD_READ) ? "pagein" : "pageout"), 1610 bio->bio_offset, 1611 (long)bp->b_bcount, 1612 bp->b_error 1613 ); 1614 } 1615 1616 /* 1617 * set object, raise to splvm(). 1618 */ 1619 1620 if (bp->b_xio.xio_npages) 1621 object = bp->b_xio.xio_pages[0]->object; 1622 crit_enter(); 1623 1624 /* 1625 * remove the mapping for kernel virtual 1626 */ 1627 1628 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1629 1630 /* 1631 * cleanup pages. If an error occurs writing to swap, we are in 1632 * very serious trouble. If it happens to be a disk error, though, 1633 * we may be able to recover by reassigning the swap later on. So 1634 * in this case we remove the m->swapblk assignment for the page 1635 * but do not free it in the rlist. The errornous block(s) are thus 1636 * never reallocated as swap. Redirty the page and continue. 1637 */ 1638 1639 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1640 vm_page_t m = bp->b_xio.xio_pages[i]; 1641 1642 vm_page_flag_clear(m, PG_SWAPINPROG); 1643 1644 if (bp->b_flags & B_ERROR) { 1645 /* 1646 * If an error occurs I'd love to throw the swapblk 1647 * away without freeing it back to swapspace, so it 1648 * can never be used again. But I can't from an 1649 * interrupt. 1650 */ 1651 1652 if (bp->b_cmd == BUF_CMD_READ) { 1653 /* 1654 * When reading, reqpage needs to stay 1655 * locked for the parent, but all other 1656 * pages can be freed. We still want to 1657 * wakeup the parent waiting on the page, 1658 * though. ( also: pg_reqpage can be -1 and 1659 * not match anything ). 1660 * 1661 * We have to wake specifically requested pages 1662 * up too because we cleared PG_SWAPINPROG and 1663 * someone may be waiting for that. 1664 * 1665 * NOTE: for reads, m->dirty will probably 1666 * be overridden by the original caller of 1667 * getpages so don't play cute tricks here. 1668 * 1669 * NOTE: We can't actually free the page from 1670 * here, because this is an interrupt. It 1671 * is not legal to mess with object->memq 1672 * from an interrupt. Deactivate the page 1673 * instead. 1674 */ 1675 1676 m->valid = 0; 1677 vm_page_flag_clear(m, PG_ZERO); 1678 1679 /* 1680 * bio_driver_info holds the requested page 1681 * index. 1682 */ 1683 if (i != (int)bio->bio_driver_info) { 1684 vm_page_deactivate(m); 1685 vm_page_wakeup(m); 1686 } else { 1687 vm_page_flash(m); 1688 } 1689 /* 1690 * If i == bp->b_pager.pg_reqpage, do not wake 1691 * the page up. The caller needs to. 1692 */ 1693 } else { 1694 /* 1695 * If a write error occurs, reactivate page 1696 * so it doesn't clog the inactive list, 1697 * then finish the I/O. 1698 */ 1699 vm_page_dirty(m); 1700 vm_page_activate(m); 1701 vm_page_io_finish(m); 1702 } 1703 } else if (bp->b_cmd == BUF_CMD_READ) { 1704 /* 1705 * NOTE: for reads, m->dirty will probably be 1706 * overridden by the original caller of getpages so 1707 * we cannot set them in order to free the underlying 1708 * swap in a low-swap situation. I don't think we'd 1709 * want to do that anyway, but it was an optimization 1710 * that existed in the old swapper for a time before 1711 * it got ripped out due to precisely this problem. 1712 * 1713 * clear PG_ZERO in page. 1714 * 1715 * If not the requested page then deactivate it. 1716 * 1717 * Note that the requested page, reqpage, is left 1718 * busied, but we still have to wake it up. The 1719 * other pages are released (unbusied) by 1720 * vm_page_wakeup(). We do not set reqpage's 1721 * valid bits here, it is up to the caller. 1722 */ 1723 1724 /* 1725 * NOTE: can't call pmap_clear_modify(m) from an 1726 * interrupt thread, the pmap code may have to map 1727 * non-kernel pmaps and currently asserts the case. 1728 */ 1729 /*pmap_clear_modify(m);*/ 1730 m->valid = VM_PAGE_BITS_ALL; 1731 vm_page_undirty(m); 1732 vm_page_flag_clear(m, PG_ZERO); 1733 1734 /* 1735 * We have to wake specifically requested pages 1736 * up too because we cleared PG_SWAPINPROG and 1737 * could be waiting for it in getpages. However, 1738 * be sure to not unbusy getpages specifically 1739 * requested page - getpages expects it to be 1740 * left busy. 1741 * 1742 * bio_driver_info holds the requested page 1743 */ 1744 if (i != (int)bio->bio_driver_info) { 1745 vm_page_deactivate(m); 1746 vm_page_wakeup(m); 1747 } else { 1748 vm_page_flash(m); 1749 } 1750 } else { 1751 /* 1752 * Mark the page clean but do not mess with the 1753 * pmap-layer's modified state. That state should 1754 * also be clear since the caller protected the 1755 * page VM_PROT_READ, but allow the case. 1756 * 1757 * We are in an interrupt, avoid pmap operations. 1758 * 1759 * If we have a severe page deficit, deactivate the 1760 * page. Do not try to cache it (which would also 1761 * involve a pmap op), because the page might still 1762 * be read-heavy. 1763 */ 1764 vm_page_undirty(m); 1765 vm_page_io_finish(m); 1766 if (vm_page_count_severe()) 1767 vm_page_deactivate(m); 1768 #if 0 1769 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1770 vm_page_protect(m, VM_PROT_READ); 1771 #endif 1772 } 1773 } 1774 1775 /* 1776 * adjust pip. NOTE: the original parent may still have its own 1777 * pip refs on the object. 1778 */ 1779 1780 if (object) 1781 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages); 1782 1783 /* 1784 * release the physical I/O buffer 1785 */ 1786 if (bp->b_cmd == BUF_CMD_READ) 1787 nswptr = &nsw_rcount; 1788 else if (bp->b_flags & B_ASYNC) 1789 nswptr = &nsw_wcount_async; 1790 else 1791 nswptr = &nsw_wcount_sync; 1792 bp->b_cmd = BUF_CMD_DONE; 1793 relpbuf(bp, nswptr); 1794 crit_exit(); 1795 } 1796 1797 /************************************************************************ 1798 * SWAP META DATA * 1799 ************************************************************************ 1800 * 1801 * These routines manipulate the swap metadata stored in the 1802 * OBJT_SWAP object. All swp_*() routines must be called at 1803 * splvm() because swap can be freed up by the low level vm_page 1804 * code which might be called from interrupts beyond what splbio() covers. 1805 * 1806 * Swap metadata is implemented with a global hash and not directly 1807 * linked into the object. Instead the object simply contains 1808 * appropriate tracking counters. 1809 */ 1810 1811 /* 1812 * SWP_PAGER_HASH() - hash swap meta data 1813 * 1814 * This is an inline helper function which hashes the swapblk given 1815 * the object and page index. It returns a pointer to a pointer 1816 * to the object, or a pointer to a NULL pointer if it could not 1817 * find a swapblk. 1818 * 1819 * This routine must be called at splvm(). 1820 */ 1821 1822 static __inline struct swblock ** 1823 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1824 { 1825 struct swblock **pswap; 1826 struct swblock *swap; 1827 1828 index &= ~SWAP_META_MASK; 1829 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1830 1831 while ((swap = *pswap) != NULL) { 1832 if (swap->swb_object == object && 1833 swap->swb_index == index 1834 ) { 1835 break; 1836 } 1837 pswap = &swap->swb_hnext; 1838 } 1839 return(pswap); 1840 } 1841 1842 /* 1843 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1844 * 1845 * We first convert the object to a swap object if it is a default 1846 * object. 1847 * 1848 * The specified swapblk is added to the object's swap metadata. If 1849 * the swapblk is not valid, it is freed instead. Any previously 1850 * assigned swapblk is freed. 1851 * 1852 * This routine must be called at splvm(), except when used to convert 1853 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1854 1855 */ 1856 1857 static void 1858 swp_pager_meta_build( 1859 vm_object_t object, 1860 vm_pindex_t index, 1861 daddr_t swapblk 1862 ) { 1863 struct swblock *swap; 1864 struct swblock **pswap; 1865 1866 /* 1867 * Convert default object to swap object if necessary 1868 */ 1869 1870 if (object->type != OBJT_SWAP) { 1871 object->type = OBJT_SWAP; 1872 object->un_pager.swp.swp_bcount = 0; 1873 1874 if (object->handle != NULL) { 1875 TAILQ_INSERT_TAIL( 1876 NOBJLIST(object->handle), 1877 object, 1878 pager_object_list 1879 ); 1880 } else { 1881 TAILQ_INSERT_TAIL( 1882 &swap_pager_un_object_list, 1883 object, 1884 pager_object_list 1885 ); 1886 } 1887 } 1888 1889 /* 1890 * Locate hash entry. If not found create, but if we aren't adding 1891 * anything just return. If we run out of space in the map we wait 1892 * and, since the hash table may have changed, retry. 1893 */ 1894 1895 retry: 1896 pswap = swp_pager_hash(object, index); 1897 1898 if ((swap = *pswap) == NULL) { 1899 int i; 1900 1901 if (swapblk == SWAPBLK_NONE) 1902 return; 1903 1904 swap = *pswap = zalloc(swap_zone); 1905 if (swap == NULL) { 1906 vm_wait(0); 1907 goto retry; 1908 } 1909 swap->swb_hnext = NULL; 1910 swap->swb_object = object; 1911 swap->swb_index = index & ~SWAP_META_MASK; 1912 swap->swb_count = 0; 1913 1914 ++object->un_pager.swp.swp_bcount; 1915 1916 for (i = 0; i < SWAP_META_PAGES; ++i) 1917 swap->swb_pages[i] = SWAPBLK_NONE; 1918 } 1919 1920 /* 1921 * Delete prior contents of metadata 1922 */ 1923 1924 index &= SWAP_META_MASK; 1925 1926 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1927 swp_pager_freeswapspace(swap->swb_pages[index], 1); 1928 --swap->swb_count; 1929 } 1930 1931 /* 1932 * Enter block into metadata 1933 */ 1934 1935 swap->swb_pages[index] = swapblk; 1936 if (swapblk != SWAPBLK_NONE) 1937 ++swap->swb_count; 1938 } 1939 1940 /* 1941 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1942 * 1943 * The requested range of blocks is freed, with any associated swap 1944 * returned to the swap bitmap. 1945 * 1946 * This routine will free swap metadata structures as they are cleaned 1947 * out. This routine does *NOT* operate on swap metadata associated 1948 * with resident pages. 1949 * 1950 * This routine must be called at splvm() 1951 */ 1952 1953 static void 1954 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1955 { 1956 if (object->type != OBJT_SWAP) 1957 return; 1958 1959 while (count > 0) { 1960 struct swblock **pswap; 1961 struct swblock *swap; 1962 1963 pswap = swp_pager_hash(object, index); 1964 1965 if ((swap = *pswap) != NULL) { 1966 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1967 1968 if (v != SWAPBLK_NONE) { 1969 swp_pager_freeswapspace(v, 1); 1970 swap->swb_pages[index & SWAP_META_MASK] = 1971 SWAPBLK_NONE; 1972 if (--swap->swb_count == 0) { 1973 *pswap = swap->swb_hnext; 1974 zfree(swap_zone, swap); 1975 --object->un_pager.swp.swp_bcount; 1976 } 1977 } 1978 --count; 1979 ++index; 1980 } else { 1981 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1982 count -= n; 1983 index += n; 1984 } 1985 } 1986 } 1987 1988 /* 1989 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1990 * 1991 * This routine locates and destroys all swap metadata associated with 1992 * an object. 1993 * 1994 * This routine must be called at splvm() 1995 */ 1996 1997 static void 1998 swp_pager_meta_free_all(vm_object_t object) 1999 { 2000 daddr_t index = 0; 2001 2002 if (object->type != OBJT_SWAP) 2003 return; 2004 2005 while (object->un_pager.swp.swp_bcount) { 2006 struct swblock **pswap; 2007 struct swblock *swap; 2008 2009 pswap = swp_pager_hash(object, index); 2010 if ((swap = *pswap) != NULL) { 2011 int i; 2012 2013 for (i = 0; i < SWAP_META_PAGES; ++i) { 2014 daddr_t v = swap->swb_pages[i]; 2015 if (v != SWAPBLK_NONE) { 2016 --swap->swb_count; 2017 swp_pager_freeswapspace(v, 1); 2018 } 2019 } 2020 if (swap->swb_count != 0) 2021 panic("swap_pager_meta_free_all: swb_count != 0"); 2022 *pswap = swap->swb_hnext; 2023 zfree(swap_zone, swap); 2024 --object->un_pager.swp.swp_bcount; 2025 } 2026 index += SWAP_META_PAGES; 2027 if (index > 0x20000000) 2028 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 2029 } 2030 } 2031 2032 /* 2033 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2034 * 2035 * This routine is capable of looking up, popping, or freeing 2036 * swapblk assignments in the swap meta data or in the vm_page_t. 2037 * The routine typically returns the swapblk being looked-up, or popped, 2038 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2039 * was invalid. This routine will automatically free any invalid 2040 * meta-data swapblks. 2041 * 2042 * It is not possible to store invalid swapblks in the swap meta data 2043 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2044 * 2045 * When acting on a busy resident page and paging is in progress, we 2046 * have to wait until paging is complete but otherwise can act on the 2047 * busy page. 2048 * 2049 * This routine must be called at splvm(). 2050 * 2051 * SWM_FREE remove and free swap block from metadata 2052 * SWM_POP remove from meta data but do not free.. pop it out 2053 */ 2054 2055 static daddr_t 2056 swp_pager_meta_ctl( 2057 vm_object_t object, 2058 vm_pindex_t index, 2059 int flags 2060 ) { 2061 struct swblock **pswap; 2062 struct swblock *swap; 2063 daddr_t r1; 2064 2065 /* 2066 * The meta data only exists of the object is OBJT_SWAP 2067 * and even then might not be allocated yet. 2068 */ 2069 2070 if (object->type != OBJT_SWAP) 2071 return(SWAPBLK_NONE); 2072 2073 r1 = SWAPBLK_NONE; 2074 pswap = swp_pager_hash(object, index); 2075 2076 if ((swap = *pswap) != NULL) { 2077 index &= SWAP_META_MASK; 2078 r1 = swap->swb_pages[index]; 2079 2080 if (r1 != SWAPBLK_NONE) { 2081 if (flags & SWM_FREE) { 2082 swp_pager_freeswapspace(r1, 1); 2083 r1 = SWAPBLK_NONE; 2084 } 2085 if (flags & (SWM_FREE|SWM_POP)) { 2086 swap->swb_pages[index] = SWAPBLK_NONE; 2087 if (--swap->swb_count == 0) { 2088 *pswap = swap->swb_hnext; 2089 zfree(swap_zone, swap); 2090 --object->un_pager.swp.swp_bcount; 2091 } 2092 } 2093 } 2094 } 2095 return(r1); 2096 } 2097