1 /* 2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1994 John S. Dyson 35 * Copyright (c) 1990 University of Utah. 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * the Systems Programming Group of the University of Utah Computer 41 * Science Department. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * New Swap System 72 * Matthew Dillon 73 * 74 * Radix Bitmap 'blists'. 75 * 76 * - The new swapper uses the new radix bitmap code. This should scale 77 * to arbitrarily small or arbitrarily large swap spaces and an almost 78 * arbitrary degree of fragmentation. 79 * 80 * Features: 81 * 82 * - on the fly reallocation of swap during putpages. The new system 83 * does not try to keep previously allocated swap blocks for dirty 84 * pages. 85 * 86 * - on the fly deallocation of swap 87 * 88 * - No more garbage collection required. Unnecessarily allocated swap 89 * blocks only exist for dirty vm_page_t's now and these are already 90 * cycled (in a high-load system) by the pager. We also do on-the-fly 91 * removal of invalidated swap blocks when a page is destroyed 92 * or renamed. 93 * 94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 95 * 96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 97 * 98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.18 2006/02/17 19:18:08 dillon Exp $ 100 */ 101 102 #include <sys/param.h> 103 #include <sys/systm.h> 104 #include <sys/conf.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/buf.h> 108 #include <sys/vnode.h> 109 #include <sys/malloc.h> 110 #include <sys/vmmeter.h> 111 #include <sys/sysctl.h> 112 #include <sys/blist.h> 113 #include <sys/lock.h> 114 #include <sys/thread2.h> 115 116 #ifndef MAX_PAGEOUT_CLUSTER 117 #define MAX_PAGEOUT_CLUSTER 16 118 #endif 119 120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 121 122 #include "opt_swap.h" 123 #include <vm/vm.h> 124 #include <vm/vm_object.h> 125 #include <vm/vm_page.h> 126 #include <vm/vm_pager.h> 127 #include <vm/vm_pageout.h> 128 #include <vm/swap_pager.h> 129 #include <vm/vm_extern.h> 130 #include <vm/vm_zone.h> 131 132 #include <sys/buf2.h> 133 #include <vm/vm_page2.h> 134 135 #define SWM_FREE 0x02 /* free, period */ 136 #define SWM_POP 0x04 /* pop out */ 137 138 /* 139 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 140 * in the old system. 141 */ 142 143 extern int vm_swap_size; /* number of free swap blocks, in pages */ 144 145 int swap_pager_full; /* swap space exhaustion (task killing) */ 146 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 147 static int nsw_rcount; /* free read buffers */ 148 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 149 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 150 static int nsw_wcount_async_max;/* assigned maximum */ 151 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 152 static int sw_alloc_interlock; /* swap pager allocation interlock */ 153 154 struct blist *swapblist; 155 static struct swblock **swhash; 156 static int swhash_mask; 157 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 158 159 extern struct vnode *swapdev_vp; /* from vm_swap.c */ 160 161 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 162 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 163 164 /* 165 * "named" and "unnamed" anon region objects. Try to reduce the overhead 166 * of searching a named list by hashing it just a little. 167 */ 168 169 #define NOBJLISTS 8 170 171 #define NOBJLIST(handle) \ 172 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 173 174 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 175 struct pagerlst swap_pager_un_object_list; 176 vm_zone_t swap_zone; 177 178 /* 179 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 180 * calls hooked from other parts of the VM system and do not appear here. 181 * (see vm/swap_pager.h). 182 */ 183 184 static vm_object_t 185 swap_pager_alloc (void *handle, vm_ooffset_t size, 186 vm_prot_t prot, vm_ooffset_t offset); 187 static void swap_pager_dealloc (vm_object_t object); 188 static int swap_pager_getpages (vm_object_t, vm_page_t *, int, int); 189 static void swap_pager_init (void); 190 static void swap_pager_unswapped (vm_page_t); 191 static void swap_pager_strategy (vm_object_t, struct bio *); 192 static void swap_chain_iodone(struct bio *biox); 193 194 struct pagerops swappagerops = { 195 swap_pager_init, /* early system initialization of pager */ 196 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 197 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 198 swap_pager_getpages, /* pagein */ 199 swap_pager_putpages, /* pageout */ 200 swap_pager_haspage, /* get backing store status for page */ 201 swap_pager_unswapped, /* remove swap related to page */ 202 swap_pager_strategy /* pager strategy call */ 203 }; 204 205 /* 206 * dmmax is in page-sized chunks with the new swap system. It was 207 * dev-bsized chunks in the old. dmmax is always a power of 2. 208 * 209 * swap_*() routines are externally accessible. swp_*() routines are 210 * internal. 211 */ 212 213 int dmmax; 214 static int dmmax_mask; 215 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 216 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 217 218 static __inline void swp_sizecheck (void); 219 static void swp_pager_sync_iodone (struct bio *bio); 220 static void swp_pager_async_iodone (struct bio *bio); 221 222 /* 223 * Swap bitmap functions 224 */ 225 226 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages); 227 static __inline daddr_t swp_pager_getswapspace (int npages); 228 229 /* 230 * Metadata functions 231 */ 232 233 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t); 234 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t); 235 static void swp_pager_meta_free_all (vm_object_t); 236 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int); 237 238 /* 239 * SWP_SIZECHECK() - update swap_pager_full indication 240 * 241 * update the swap_pager_almost_full indication and warn when we are 242 * about to run out of swap space, using lowat/hiwat hysteresis. 243 * 244 * Clear swap_pager_full ( task killing ) indication when lowat is met. 245 * 246 * No restrictions on call 247 * This routine may not block. 248 * This routine must be called at splvm() 249 */ 250 251 static __inline void 252 swp_sizecheck(void) 253 { 254 if (vm_swap_size < nswap_lowat) { 255 if (swap_pager_almost_full == 0) { 256 printf("swap_pager: out of swap space\n"); 257 swap_pager_almost_full = 1; 258 } 259 } else { 260 swap_pager_full = 0; 261 if (vm_swap_size > nswap_hiwat) 262 swap_pager_almost_full = 0; 263 } 264 } 265 266 /* 267 * SWAP_PAGER_INIT() - initialize the swap pager! 268 * 269 * Expected to be started from system init. NOTE: This code is run 270 * before much else so be careful what you depend on. Most of the VM 271 * system has yet to be initialized at this point. 272 */ 273 274 static void 275 swap_pager_init(void) 276 { 277 /* 278 * Initialize object lists 279 */ 280 int i; 281 282 for (i = 0; i < NOBJLISTS; ++i) 283 TAILQ_INIT(&swap_pager_object_list[i]); 284 TAILQ_INIT(&swap_pager_un_object_list); 285 286 /* 287 * Device Stripe, in PAGE_SIZE'd blocks 288 */ 289 290 dmmax = SWB_NPAGES * 2; 291 dmmax_mask = ~(dmmax - 1); 292 } 293 294 /* 295 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 296 * 297 * Expected to be started from pageout process once, prior to entering 298 * its main loop. 299 */ 300 301 void 302 swap_pager_swap_init(void) 303 { 304 int n, n2; 305 306 /* 307 * Number of in-transit swap bp operations. Don't 308 * exhaust the pbufs completely. Make sure we 309 * initialize workable values (0 will work for hysteresis 310 * but it isn't very efficient). 311 * 312 * The nsw_cluster_max is constrained by the number of pages an XIO 313 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 314 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 315 * constrained by the swap device interleave stripe size. 316 * 317 * Currently we hardwire nsw_wcount_async to 4. This limit is 318 * designed to prevent other I/O from having high latencies due to 319 * our pageout I/O. The value 4 works well for one or two active swap 320 * devices but is probably a little low if you have more. Even so, 321 * a higher value would probably generate only a limited improvement 322 * with three or four active swap devices since the system does not 323 * typically have to pageout at extreme bandwidths. We will want 324 * at least 2 per swap devices, and 4 is a pretty good value if you 325 * have one NFS swap device due to the command/ack latency over NFS. 326 * So it all works out pretty well. 327 */ 328 329 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 330 331 nsw_rcount = (nswbuf + 1) / 2; 332 nsw_wcount_sync = (nswbuf + 3) / 4; 333 nsw_wcount_async = 4; 334 nsw_wcount_async_max = nsw_wcount_async; 335 336 /* 337 * Initialize our zone. Right now I'm just guessing on the number 338 * we need based on the number of pages in the system. Each swblock 339 * can hold 16 pages, so this is probably overkill. This reservation 340 * is typically limited to around 32MB by default. 341 */ 342 n = vmstats.v_page_count / 2; 343 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 344 n = maxswzone / sizeof(struct swblock); 345 n2 = n; 346 347 do { 348 swap_zone = zinit( 349 "SWAPMETA", 350 sizeof(struct swblock), 351 n, 352 ZONE_INTERRUPT, 353 1); 354 if (swap_zone != NULL) 355 break; 356 /* 357 * if the allocation failed, try a zone two thirds the 358 * size of the previous attempt. 359 */ 360 n -= ((n + 2) / 3); 361 } while (n > 0); 362 363 if (swap_zone == NULL) 364 panic("swap_pager_swap_init: swap_zone == NULL"); 365 if (n2 != n) 366 printf("Swap zone entries reduced from %d to %d.\n", n2, n); 367 n2 = n; 368 369 /* 370 * Initialize our meta-data hash table. The swapper does not need to 371 * be quite as efficient as the VM system, so we do not use an 372 * oversized hash table. 373 * 374 * n: size of hash table, must be power of 2 375 * swhash_mask: hash table index mask 376 */ 377 378 for (n = 1; n < n2 / 8; n *= 2) 379 ; 380 381 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK); 382 bzero(swhash, sizeof(struct swblock *) * n); 383 384 swhash_mask = n - 1; 385 } 386 387 /* 388 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 389 * its metadata structures. 390 * 391 * This routine is called from the mmap and fork code to create a new 392 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 393 * and then converting it with swp_pager_meta_build(). 394 * 395 * This routine may block in vm_object_allocate() and create a named 396 * object lookup race, so we must interlock. We must also run at 397 * splvm() for the object lookup to handle races with interrupts, but 398 * we do not have to maintain splvm() in between the lookup and the 399 * add because (I believe) it is not possible to attempt to create 400 * a new swap object w/handle when a default object with that handle 401 * already exists. 402 */ 403 404 static vm_object_t 405 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 406 vm_ooffset_t offset) 407 { 408 vm_object_t object; 409 410 if (handle) { 411 /* 412 * Reference existing named region or allocate new one. There 413 * should not be a race here against swp_pager_meta_build() 414 * as called from vm_page_remove() in regards to the lookup 415 * of the handle. 416 */ 417 418 while (sw_alloc_interlock) { 419 sw_alloc_interlock = -1; 420 tsleep(&sw_alloc_interlock, 0, "swpalc", 0); 421 } 422 sw_alloc_interlock = 1; 423 424 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 425 426 if (object != NULL) { 427 vm_object_reference(object); 428 } else { 429 object = vm_object_allocate(OBJT_DEFAULT, 430 OFF_TO_IDX(offset + PAGE_MASK + size)); 431 object->handle = handle; 432 433 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 434 } 435 436 if (sw_alloc_interlock < 0) 437 wakeup(&sw_alloc_interlock); 438 439 sw_alloc_interlock = 0; 440 } else { 441 object = vm_object_allocate(OBJT_DEFAULT, 442 OFF_TO_IDX(offset + PAGE_MASK + size)); 443 444 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 445 } 446 447 return (object); 448 } 449 450 /* 451 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 452 * 453 * The swap backing for the object is destroyed. The code is 454 * designed such that we can reinstantiate it later, but this 455 * routine is typically called only when the entire object is 456 * about to be destroyed. 457 * 458 * This routine may block, but no longer does. 459 * 460 * The object must be locked or unreferenceable. 461 */ 462 463 static void 464 swap_pager_dealloc(vm_object_t object) 465 { 466 /* 467 * Remove from list right away so lookups will fail if we block for 468 * pageout completion. 469 */ 470 471 if (object->handle == NULL) { 472 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 473 } else { 474 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 475 } 476 477 vm_object_pip_wait(object, "swpdea"); 478 479 /* 480 * Free all remaining metadata. We only bother to free it from 481 * the swap meta data. We do not attempt to free swapblk's still 482 * associated with vm_page_t's for this object. We do not care 483 * if paging is still in progress on some objects. 484 */ 485 crit_enter(); 486 swp_pager_meta_free_all(object); 487 crit_exit(); 488 } 489 490 /************************************************************************ 491 * SWAP PAGER BITMAP ROUTINES * 492 ************************************************************************/ 493 494 /* 495 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 496 * 497 * Allocate swap for the requested number of pages. The starting 498 * swap block number (a page index) is returned or SWAPBLK_NONE 499 * if the allocation failed. 500 * 501 * Also has the side effect of advising that somebody made a mistake 502 * when they configured swap and didn't configure enough. 503 * 504 * Must be called at splvm() to avoid races with bitmap frees from 505 * vm_page_remove() aka swap_pager_page_removed(). 506 * 507 * This routine may not block 508 * This routine must be called at splvm(). 509 */ 510 511 static __inline daddr_t 512 swp_pager_getswapspace(int npages) 513 { 514 daddr_t blk; 515 516 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 517 if (swap_pager_full != 2) { 518 printf("swap_pager_getswapspace: failed\n"); 519 swap_pager_full = 2; 520 swap_pager_almost_full = 1; 521 } 522 } else { 523 vm_swap_size -= npages; 524 swp_sizecheck(); 525 } 526 return(blk); 527 } 528 529 /* 530 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 531 * 532 * This routine returns the specified swap blocks back to the bitmap. 533 * 534 * Note: This routine may not block (it could in the old swap code), 535 * and through the use of the new blist routines it does not block. 536 * 537 * We must be called at splvm() to avoid races with bitmap frees from 538 * vm_page_remove() aka swap_pager_page_removed(). 539 * 540 * This routine may not block 541 * This routine must be called at splvm(). 542 */ 543 544 static __inline void 545 swp_pager_freeswapspace(daddr_t blk, int npages) 546 { 547 blist_free(swapblist, blk, npages); 548 vm_swap_size += npages; 549 swp_sizecheck(); 550 } 551 552 /* 553 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 554 * range within an object. 555 * 556 * This is a globally accessible routine. 557 * 558 * This routine removes swapblk assignments from swap metadata. 559 * 560 * The external callers of this routine typically have already destroyed 561 * or renamed vm_page_t's associated with this range in the object so 562 * we should be ok. 563 * 564 * This routine may be called at any spl. We up our spl to splvm temporarily 565 * in order to perform the metadata removal. 566 */ 567 568 void 569 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 570 { 571 crit_enter(); 572 swp_pager_meta_free(object, start, size); 573 crit_exit(); 574 } 575 576 /* 577 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 578 * 579 * Assigns swap blocks to the specified range within the object. The 580 * swap blocks are not zerod. Any previous swap assignment is destroyed. 581 * 582 * Returns 0 on success, -1 on failure. 583 */ 584 585 int 586 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 587 { 588 int n = 0; 589 daddr_t blk = SWAPBLK_NONE; 590 vm_pindex_t beg = start; /* save start index */ 591 592 crit_enter(); 593 while (size) { 594 if (n == 0) { 595 n = BLIST_MAX_ALLOC; 596 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 597 n >>= 1; 598 if (n == 0) { 599 swp_pager_meta_free(object, beg, start - beg); 600 crit_exit(); 601 return(-1); 602 } 603 } 604 } 605 swp_pager_meta_build(object, start, blk); 606 --size; 607 ++start; 608 ++blk; 609 --n; 610 } 611 swp_pager_meta_free(object, start, n); 612 crit_exit(); 613 return(0); 614 } 615 616 /* 617 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 618 * and destroy the source. 619 * 620 * Copy any valid swapblks from the source to the destination. In 621 * cases where both the source and destination have a valid swapblk, 622 * we keep the destination's. 623 * 624 * This routine is allowed to block. It may block allocating metadata 625 * indirectly through swp_pager_meta_build() or if paging is still in 626 * progress on the source. 627 * 628 * This routine can be called at any spl 629 * 630 * XXX vm_page_collapse() kinda expects us not to block because we 631 * supposedly do not need to allocate memory, but for the moment we 632 * *may* have to get a little memory from the zone allocator, but 633 * it is taken from the interrupt memory. We should be ok. 634 * 635 * The source object contains no vm_page_t's (which is just as well) 636 * 637 * The source object is of type OBJT_SWAP. 638 * 639 * The source and destination objects must be locked or 640 * inaccessible (XXX are they ?) 641 */ 642 643 void 644 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 645 vm_pindex_t offset, int destroysource) 646 { 647 vm_pindex_t i; 648 649 crit_enter(); 650 651 /* 652 * If destroysource is set, we remove the source object from the 653 * swap_pager internal queue now. 654 */ 655 656 if (destroysource) { 657 if (srcobject->handle == NULL) { 658 TAILQ_REMOVE( 659 &swap_pager_un_object_list, 660 srcobject, 661 pager_object_list 662 ); 663 } else { 664 TAILQ_REMOVE( 665 NOBJLIST(srcobject->handle), 666 srcobject, 667 pager_object_list 668 ); 669 } 670 } 671 672 /* 673 * transfer source to destination. 674 */ 675 676 for (i = 0; i < dstobject->size; ++i) { 677 daddr_t dstaddr; 678 679 /* 680 * Locate (without changing) the swapblk on the destination, 681 * unless it is invalid in which case free it silently, or 682 * if the destination is a resident page, in which case the 683 * source is thrown away. 684 */ 685 686 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 687 688 if (dstaddr == SWAPBLK_NONE) { 689 /* 690 * Destination has no swapblk and is not resident, 691 * copy source. 692 */ 693 daddr_t srcaddr; 694 695 srcaddr = swp_pager_meta_ctl( 696 srcobject, 697 i + offset, 698 SWM_POP 699 ); 700 701 if (srcaddr != SWAPBLK_NONE) 702 swp_pager_meta_build(dstobject, i, srcaddr); 703 } else { 704 /* 705 * Destination has valid swapblk or it is represented 706 * by a resident page. We destroy the sourceblock. 707 */ 708 709 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 710 } 711 } 712 713 /* 714 * Free left over swap blocks in source. 715 * 716 * We have to revert the type to OBJT_DEFAULT so we do not accidently 717 * double-remove the object from the swap queues. 718 */ 719 720 if (destroysource) { 721 swp_pager_meta_free_all(srcobject); 722 /* 723 * Reverting the type is not necessary, the caller is going 724 * to destroy srcobject directly, but I'm doing it here 725 * for consistency since we've removed the object from its 726 * queues. 727 */ 728 srcobject->type = OBJT_DEFAULT; 729 } 730 crit_exit(); 731 } 732 733 /* 734 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 735 * the requested page. 736 * 737 * We determine whether good backing store exists for the requested 738 * page and return TRUE if it does, FALSE if it doesn't. 739 * 740 * If TRUE, we also try to determine how much valid, contiguous backing 741 * store exists before and after the requested page within a reasonable 742 * distance. We do not try to restrict it to the swap device stripe 743 * (that is handled in getpages/putpages). It probably isn't worth 744 * doing here. 745 */ 746 747 boolean_t 748 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 749 int *after) 750 { 751 daddr_t blk0; 752 753 /* 754 * do we have good backing store at the requested index ? 755 */ 756 757 crit_enter(); 758 blk0 = swp_pager_meta_ctl(object, pindex, 0); 759 760 if (blk0 == SWAPBLK_NONE) { 761 crit_exit(); 762 if (before) 763 *before = 0; 764 if (after) 765 *after = 0; 766 return (FALSE); 767 } 768 769 /* 770 * find backwards-looking contiguous good backing store 771 */ 772 773 if (before != NULL) { 774 int i; 775 776 for (i = 1; i < (SWB_NPAGES/2); ++i) { 777 daddr_t blk; 778 779 if (i > pindex) 780 break; 781 blk = swp_pager_meta_ctl(object, pindex - i, 0); 782 if (blk != blk0 - i) 783 break; 784 } 785 *before = (i - 1); 786 } 787 788 /* 789 * find forward-looking contiguous good backing store 790 */ 791 792 if (after != NULL) { 793 int i; 794 795 for (i = 1; i < (SWB_NPAGES/2); ++i) { 796 daddr_t blk; 797 798 blk = swp_pager_meta_ctl(object, pindex + i, 0); 799 if (blk != blk0 + i) 800 break; 801 } 802 *after = (i - 1); 803 } 804 crit_exit(); 805 return (TRUE); 806 } 807 808 /* 809 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 810 * 811 * This removes any associated swap backing store, whether valid or 812 * not, from the page. 813 * 814 * This routine is typically called when a page is made dirty, at 815 * which point any associated swap can be freed. MADV_FREE also 816 * calls us in a special-case situation 817 * 818 * NOTE!!! If the page is clean and the swap was valid, the caller 819 * should make the page dirty before calling this routine. This routine 820 * does NOT change the m->dirty status of the page. Also: MADV_FREE 821 * depends on it. 822 * 823 * This routine may not block 824 * This routine must be called at splvm() 825 */ 826 827 static void 828 swap_pager_unswapped(vm_page_t m) 829 { 830 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 831 } 832 833 /* 834 * SWAP_PAGER_STRATEGY() - read, write, free blocks 835 * 836 * This implements the vm_pager_strategy() interface to swap and allows 837 * other parts of the system to directly access swap as backing store 838 * through vm_objects of type OBJT_SWAP. This is intended to be a 839 * cacheless interface ( i.e. caching occurs at higher levels ). 840 * Therefore we do not maintain any resident pages. All I/O goes 841 * directly to and from the swap device. 842 * 843 * Note that bio_blkno is scaled for PAGE_SIZE 844 * 845 * We currently attempt to run I/O synchronously or asynchronously as 846 * the caller requests. This isn't perfect because we loose error 847 * sequencing when we run multiple ops in parallel to satisfy a request. 848 * But this is swap, so we let it all hang out. 849 */ 850 851 static void 852 swap_pager_strategy(vm_object_t object, struct bio *bio) 853 { 854 struct buf *bp = bio->bio_buf; 855 struct bio *nbio; 856 vm_pindex_t start; 857 int count; 858 char *data; 859 struct bio *biox = NULL; 860 struct buf *bufx = NULL; 861 struct bio_track *track; 862 863 /* 864 * tracking for swapdev vnode I/Os 865 */ 866 if (bp->b_flags & B_READ) 867 track = &swapdev_vp->v_track_read; 868 else 869 track = &swapdev_vp->v_track_write; 870 871 if (bp->b_bcount & PAGE_MASK) { 872 bp->b_error = EINVAL; 873 bp->b_flags |= B_ERROR | B_INVAL; 874 biodone(bio); 875 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bio->bio_blkno, (int)bp->b_bcount); 876 return; 877 } 878 879 /* 880 * Clear error indication, initialize page index, count, data pointer. 881 */ 882 bp->b_error = 0; 883 bp->b_flags &= ~B_ERROR; 884 bp->b_resid = bp->b_bcount; 885 886 start = bio->bio_blkno; 887 count = howmany(bp->b_bcount, PAGE_SIZE); 888 data = bp->b_data; 889 890 crit_enter(); 891 892 /* 893 * Deal with B_FREEBUF 894 */ 895 if (bp->b_flags & B_FREEBUF) { 896 /* 897 * FREE PAGE(s) - destroy underlying swap that is no longer 898 * needed. 899 */ 900 swp_pager_meta_free(object, start, count); 901 crit_exit(); 902 bp->b_resid = 0; 903 biodone(bio); 904 return; 905 } 906 907 /* 908 * We need to be able to create a new cluster of I/O's. We cannot 909 * use the caller fields of the passed bio so push a new one. 910 * 911 * Because nbio is just a placeholder for the cluster links, 912 * we can biodone() the original bio instead of nbio to make 913 * things a bit more efficient. 914 */ 915 nbio = push_bio(bio); 916 nbio->bio_blkno = bio->bio_blkno; 917 nbio->bio_caller_info1.cluster_head = NULL; 918 nbio->bio_caller_info2.cluster_tail = NULL; 919 920 /* 921 * Execute read or write 922 */ 923 924 while (count > 0) { 925 daddr_t blk; 926 927 /* 928 * Obtain block. If block not found and writing, allocate a 929 * new block and build it into the object. 930 */ 931 932 blk = swp_pager_meta_ctl(object, start, 0); 933 if ((blk == SWAPBLK_NONE) && (bp->b_flags & B_READ) == 0) { 934 blk = swp_pager_getswapspace(1); 935 if (blk == SWAPBLK_NONE) { 936 bp->b_error = ENOMEM; 937 bp->b_flags |= B_ERROR; 938 break; 939 } 940 swp_pager_meta_build(object, start, blk); 941 } 942 943 /* 944 * Do we have to flush our current collection? Yes if: 945 * 946 * - no swap block at this index 947 * - swap block is not contiguous 948 * - we cross a physical disk boundry in the 949 * stripe. 950 */ 951 952 if ( 953 biox && (biox->bio_blkno + btoc(bufx->b_bcount) != blk || 954 ((biox->bio_blkno ^ blk) & dmmax_mask) 955 ) 956 ) { 957 crit_exit(); 958 if (bp->b_flags & B_READ) { 959 ++mycpu->gd_cnt.v_swapin; 960 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 961 } else { 962 ++mycpu->gd_cnt.v_swapout; 963 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 964 bufx->b_dirtyend = bufx->b_bcount; 965 } 966 967 /* 968 * Flush the biox to the swap device. 969 */ 970 if (bufx->b_bcount) { 971 bufx->b_bufsize = bufx->b_bcount; 972 if ((bufx->b_flags & B_READ) == 0) 973 bufx->b_dirtyend = bufx->b_bcount; 974 BUF_KERNPROC(bufx); 975 vn_strategy(bufx->b_vp, biox); 976 } else { 977 biodone(biox); 978 } 979 crit_enter(); 980 biox = NULL; 981 bufx = NULL; 982 } 983 984 /* 985 * Add new swapblk to biox, instantiating biox if necessary. 986 * Zero-fill reads are able to take a shortcut. 987 */ 988 if (blk == SWAPBLK_NONE) { 989 /* 990 * We can only get here if we are reading. Since 991 * we are at splvm() we can safely modify b_resid, 992 * even if chain ops are in progress. 993 */ 994 bzero(data, PAGE_SIZE); 995 bp->b_resid -= PAGE_SIZE; 996 } else { 997 if (biox == NULL) { 998 /* XXX chain count > 4, wait to <= 4 */ 999 1000 bufx = getpbuf(NULL); 1001 biox = &bufx->b_bio1; 1002 cluster_append(nbio, bufx); 1003 bufx->b_flags = (bufx->b_flags & B_ORDERED) | 1004 (bp->b_flags & B_READ) | 1005 B_ASYNC; 1006 pbgetvp(swapdev_vp, bufx); 1007 biox->bio_done = swap_chain_iodone; 1008 biox->bio_blkno = blk; 1009 biox->bio_caller_info1.cluster_parent = nbio; 1010 bufx->b_bcount = 0; 1011 bufx->b_data = data; 1012 } 1013 bufx->b_bcount += PAGE_SIZE; 1014 } 1015 --count; 1016 ++start; 1017 data += PAGE_SIZE; 1018 } 1019 1020 /* 1021 * Flush out last buffer 1022 */ 1023 crit_exit(); 1024 1025 if (biox) { 1026 if ((bp->b_flags & B_ASYNC) == 0) 1027 bufx->b_flags &= ~B_ASYNC; 1028 if (bufx->b_flags & B_READ) { 1029 ++mycpu->gd_cnt.v_swapin; 1030 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1031 } else { 1032 ++mycpu->gd_cnt.v_swapout; 1033 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1034 bufx->b_dirtyend = bufx->b_bcount; 1035 } 1036 if (bufx->b_bcount) { 1037 bufx->b_bufsize = bufx->b_bcount; 1038 if ((bufx->b_flags & B_READ) == 0) 1039 bufx->b_dirtyend = bufx->b_bcount; 1040 BUF_KERNPROC(bufx); 1041 vn_strategy(bufx->b_vp, biox); 1042 } else { 1043 biodone(biox); 1044 } 1045 /* biox, bufx = NULL */ 1046 } 1047 1048 /* 1049 * Wait for completion. 1050 */ 1051 if (bp->b_flags & B_ASYNC) { 1052 crit_enter(); 1053 if (nbio->bio_caller_info1.cluster_head == NULL) { 1054 biodone(bio); 1055 } else { 1056 bp->b_xflags |= BX_AUTOCHAINDONE; 1057 } 1058 crit_exit(); 1059 } else { 1060 crit_enter(); 1061 while (nbio->bio_caller_info1.cluster_head != NULL) { 1062 bp->b_flags |= B_WANT; 1063 tsleep(bp, 0, "bpchain", 0); 1064 } 1065 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1066 bp->b_flags |= B_ERROR; 1067 bp->b_error = EINVAL; 1068 } 1069 biodone(bio); 1070 crit_exit(); 1071 } 1072 } 1073 1074 static void 1075 swap_chain_iodone(struct bio *biox) 1076 { 1077 struct buf **nextp; 1078 struct buf *bufx; /* chained sub-buffer */ 1079 struct bio *nbio; /* parent nbio with chain glue */ 1080 struct buf *bp; /* original bp associated with nbio */ 1081 1082 bufx = biox->bio_buf; 1083 nbio = biox->bio_caller_info1.cluster_parent; 1084 bp = nbio->bio_buf; 1085 1086 /* 1087 * Update the original buffer 1088 */ 1089 KKASSERT(bp != NULL); 1090 if (bufx->b_flags & B_ERROR) { 1091 bp->b_flags |= B_ERROR; 1092 bp->b_error = bufx->b_error; 1093 } else if (bufx->b_resid != 0) { 1094 bp->b_flags |= B_ERROR; 1095 bp->b_error = EINVAL; 1096 } else { 1097 bp->b_resid -= bufx->b_bcount; 1098 } 1099 1100 /* 1101 * Remove us from the chain. It is sufficient to clean up 1102 * cluster_head. We do not have to clean up cluster_tail. 1103 */ 1104 nextp = &nbio->bio_caller_info1.cluster_head; 1105 while (*nextp != bufx) { 1106 KKASSERT(*nextp != NULL); 1107 nextp = &(*nextp)->b_cluster_next; 1108 } 1109 *nextp = bufx->b_cluster_next; 1110 if (bp->b_flags & B_WANT) { 1111 bp->b_flags &= ~B_WANT; 1112 wakeup(bp); 1113 } 1114 1115 /* 1116 * Clean up bufx. If this was the last buffer in the chain 1117 * and BX_AUTOCHAINDONE was set, finish off the original I/O 1118 * as well. 1119 * 1120 * nbio was just a fake BIO layer to hold the cluster links, 1121 * we can issue the biodone() on the layer above it. 1122 */ 1123 if (nbio->bio_caller_info1.cluster_head == NULL && 1124 (bp->b_xflags & BX_AUTOCHAINDONE)) { 1125 bp->b_xflags &= ~BX_AUTOCHAINDONE; 1126 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1127 bp->b_flags |= B_ERROR; 1128 bp->b_error = EINVAL; 1129 } 1130 biodone(nbio->bio_prev); 1131 } 1132 bufx->b_flags |= B_DONE; 1133 bufx->b_flags &= ~B_ASYNC; 1134 relpbuf(bufx, NULL); 1135 } 1136 1137 /* 1138 * SWAP_PAGER_GETPAGES() - bring pages in from swap 1139 * 1140 * Attempt to retrieve (m, count) pages from backing store, but make 1141 * sure we retrieve at least m[reqpage]. We try to load in as large 1142 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1143 * belongs to the same object. 1144 * 1145 * The code is designed for asynchronous operation and 1146 * immediate-notification of 'reqpage' but tends not to be 1147 * used that way. Please do not optimize-out this algorithmic 1148 * feature, I intend to improve on it in the future. 1149 * 1150 * The parent has a single vm_object_pip_add() reference prior to 1151 * calling us and we should return with the same. 1152 * 1153 * The parent has BUSY'd the pages. We should return with 'm' 1154 * left busy, but the others adjusted. 1155 */ 1156 1157 static int 1158 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 1159 { 1160 struct buf *bp; 1161 struct bio *bio; 1162 vm_page_t mreq; 1163 int i; 1164 int j; 1165 daddr_t blk; 1166 vm_offset_t kva; 1167 vm_pindex_t lastpindex; 1168 1169 mreq = m[reqpage]; 1170 1171 if (mreq->object != object) { 1172 panic("swap_pager_getpages: object mismatch %p/%p", 1173 object, 1174 mreq->object 1175 ); 1176 } 1177 /* 1178 * Calculate range to retrieve. The pages have already been assigned 1179 * their swapblks. We require a *contiguous* range that falls entirely 1180 * within a single device stripe. If we do not supply it, bad things 1181 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1182 * loops are set up such that the case(s) are handled implicitly. 1183 * 1184 * The swp_*() calls must be made at splvm(). vm_page_free() does 1185 * not need to be, but it will go a little faster if it is. 1186 */ 1187 1188 crit_enter(); 1189 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1190 1191 for (i = reqpage - 1; i >= 0; --i) { 1192 daddr_t iblk; 1193 1194 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1195 if (blk != iblk + (reqpage - i)) 1196 break; 1197 if ((blk ^ iblk) & dmmax_mask) 1198 break; 1199 } 1200 ++i; 1201 1202 for (j = reqpage + 1; j < count; ++j) { 1203 daddr_t jblk; 1204 1205 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1206 if (blk != jblk - (j - reqpage)) 1207 break; 1208 if ((blk ^ jblk) & dmmax_mask) 1209 break; 1210 } 1211 1212 /* 1213 * free pages outside our collection range. Note: we never free 1214 * mreq, it must remain busy throughout. 1215 */ 1216 1217 { 1218 int k; 1219 1220 for (k = 0; k < i; ++k) 1221 vm_page_free(m[k]); 1222 for (k = j; k < count; ++k) 1223 vm_page_free(m[k]); 1224 } 1225 crit_exit(); 1226 1227 1228 /* 1229 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1230 * still busy, but the others unbusied. 1231 */ 1232 1233 if (blk == SWAPBLK_NONE) 1234 return(VM_PAGER_FAIL); 1235 1236 /* 1237 * Get a swap buffer header to perform the IO 1238 */ 1239 1240 bp = getpbuf(&nsw_rcount); 1241 bio = &bp->b_bio1; 1242 kva = (vm_offset_t) bp->b_data; 1243 1244 /* 1245 * map our page(s) into kva for input 1246 * 1247 * NOTE: B_PAGING is set by pbgetvp() 1248 */ 1249 1250 pmap_qenter(kva, m + i, j - i); 1251 1252 bp->b_flags = B_READ; 1253 bp->b_data = (caddr_t) kva; 1254 bp->b_bcount = PAGE_SIZE * (j - i); 1255 bp->b_bufsize = PAGE_SIZE * (j - i); 1256 bio->bio_done = swp_pager_async_iodone; 1257 bio->bio_blkno = blk - (reqpage - i); 1258 bio->bio_driver_info = (void *)(reqpage - i); 1259 1260 { 1261 int k; 1262 1263 for (k = i; k < j; ++k) { 1264 bp->b_xio.xio_pages[k - i] = m[k]; 1265 vm_page_flag_set(m[k], PG_SWAPINPROG); 1266 } 1267 } 1268 bp->b_xio.xio_npages = j - i; 1269 1270 pbgetvp(swapdev_vp, bp); 1271 1272 mycpu->gd_cnt.v_swapin++; 1273 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1274 1275 /* 1276 * We still hold the lock on mreq, and our automatic completion routine 1277 * does not remove it. 1278 */ 1279 1280 vm_object_pip_add(mreq->object, bp->b_xio.xio_npages); 1281 lastpindex = m[j-1]->pindex; 1282 1283 /* 1284 * perform the I/O. NOTE!!! bp cannot be considered valid after 1285 * this point because we automatically release it on completion. 1286 * Instead, we look at the one page we are interested in which we 1287 * still hold a lock on even through the I/O completion. 1288 * 1289 * The other pages in our m[] array are also released on completion, 1290 * so we cannot assume they are valid anymore either. 1291 * 1292 * NOTE: bio_blkno may be destroyed by the call to vn_strategy() 1293 * XXX should not be, any more. 1294 */ 1295 1296 BUF_KERNPROC(bp); 1297 vn_strategy(swapdev_vp, bio); 1298 1299 /* 1300 * wait for the page we want to complete. PG_SWAPINPROG is always 1301 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1302 * is set in the meta-data. 1303 */ 1304 1305 crit_enter(); 1306 1307 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1308 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1309 mycpu->gd_cnt.v_intrans++; 1310 if (tsleep(mreq, 0, "swread", hz*20)) { 1311 printf( 1312 "swap_pager: indefinite wait buffer: " 1313 " blkno: %ld, size: %ld\n", 1314 (long)bio->bio_blkno, bp->b_bcount 1315 ); 1316 } 1317 } 1318 1319 crit_exit(); 1320 1321 /* 1322 * mreq is left bussied after completion, but all the other pages 1323 * are freed. If we had an unrecoverable read error the page will 1324 * not be valid. 1325 */ 1326 1327 if (mreq->valid != VM_PAGE_BITS_ALL) { 1328 return(VM_PAGER_ERROR); 1329 } else { 1330 return(VM_PAGER_OK); 1331 } 1332 1333 /* 1334 * A final note: in a low swap situation, we cannot deallocate swap 1335 * and mark a page dirty here because the caller is likely to mark 1336 * the page clean when we return, causing the page to possibly revert 1337 * to all-zero's later. 1338 */ 1339 } 1340 1341 /* 1342 * swap_pager_putpages: 1343 * 1344 * Assign swap (if necessary) and initiate I/O on the specified pages. 1345 * 1346 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1347 * are automatically converted to SWAP objects. 1348 * 1349 * In a low memory situation we may block in vn_strategy(), but the new 1350 * vm_page reservation system coupled with properly written VFS devices 1351 * should ensure that no low-memory deadlock occurs. This is an area 1352 * which needs work. 1353 * 1354 * The parent has N vm_object_pip_add() references prior to 1355 * calling us and will remove references for rtvals[] that are 1356 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1357 * completion. 1358 * 1359 * The parent has soft-busy'd the pages it passes us and will unbusy 1360 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1361 * We need to unbusy the rest on I/O completion. 1362 */ 1363 1364 void 1365 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, 1366 int *rtvals) 1367 { 1368 int i; 1369 int n = 0; 1370 1371 if (count && m[0]->object != object) { 1372 panic("swap_pager_getpages: object mismatch %p/%p", 1373 object, 1374 m[0]->object 1375 ); 1376 } 1377 /* 1378 * Step 1 1379 * 1380 * Turn object into OBJT_SWAP 1381 * check for bogus sysops 1382 * force sync if not pageout process 1383 */ 1384 1385 if (object->type != OBJT_SWAP) 1386 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1387 1388 if (curthread != pagethread) 1389 sync = TRUE; 1390 1391 /* 1392 * Step 2 1393 * 1394 * Update nsw parameters from swap_async_max sysctl values. 1395 * Do not let the sysop crash the machine with bogus numbers. 1396 */ 1397 1398 if (swap_async_max != nsw_wcount_async_max) { 1399 int n; 1400 1401 /* 1402 * limit range 1403 */ 1404 if ((n = swap_async_max) > nswbuf / 2) 1405 n = nswbuf / 2; 1406 if (n < 1) 1407 n = 1; 1408 swap_async_max = n; 1409 1410 /* 1411 * Adjust difference ( if possible ). If the current async 1412 * count is too low, we may not be able to make the adjustment 1413 * at this time. 1414 */ 1415 crit_enter(); 1416 n -= nsw_wcount_async_max; 1417 if (nsw_wcount_async + n >= 0) { 1418 nsw_wcount_async += n; 1419 nsw_wcount_async_max += n; 1420 wakeup(&nsw_wcount_async); 1421 } 1422 crit_exit(); 1423 } 1424 1425 /* 1426 * Step 3 1427 * 1428 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1429 * The page is left dirty until the pageout operation completes 1430 * successfully. 1431 */ 1432 1433 for (i = 0; i < count; i += n) { 1434 struct buf *bp; 1435 struct bio *bio; 1436 daddr_t blk; 1437 int j; 1438 1439 /* 1440 * Maximum I/O size is limited by a number of factors. 1441 */ 1442 1443 n = min(BLIST_MAX_ALLOC, count - i); 1444 n = min(n, nsw_cluster_max); 1445 1446 crit_enter(); 1447 1448 /* 1449 * Get biggest block of swap we can. If we fail, fall 1450 * back and try to allocate a smaller block. Don't go 1451 * overboard trying to allocate space if it would overly 1452 * fragment swap. 1453 */ 1454 while ( 1455 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1456 n > 4 1457 ) { 1458 n >>= 1; 1459 } 1460 if (blk == SWAPBLK_NONE) { 1461 for (j = 0; j < n; ++j) 1462 rtvals[i+j] = VM_PAGER_FAIL; 1463 crit_exit(); 1464 continue; 1465 } 1466 1467 /* 1468 * The I/O we are constructing cannot cross a physical 1469 * disk boundry in the swap stripe. Note: we are still 1470 * at splvm(). 1471 */ 1472 if ((blk ^ (blk + n)) & dmmax_mask) { 1473 j = ((blk + dmmax) & dmmax_mask) - blk; 1474 swp_pager_freeswapspace(blk + j, n - j); 1475 n = j; 1476 } 1477 1478 /* 1479 * All I/O parameters have been satisfied, build the I/O 1480 * request and assign the swap space. 1481 * 1482 * NOTE: B_PAGING is set by pbgetvp() 1483 */ 1484 1485 if (sync == TRUE) { 1486 bp = getpbuf(&nsw_wcount_sync); 1487 } else { 1488 bp = getpbuf(&nsw_wcount_async); 1489 bp->b_flags = B_ASYNC; 1490 } 1491 bio = &bp->b_bio1; 1492 1493 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1494 1495 bp->b_bcount = PAGE_SIZE * n; 1496 bp->b_bufsize = PAGE_SIZE * n; 1497 bio->bio_blkno = blk; 1498 1499 pbgetvp(swapdev_vp, bp); 1500 1501 for (j = 0; j < n; ++j) { 1502 vm_page_t mreq = m[i+j]; 1503 1504 swp_pager_meta_build( 1505 mreq->object, 1506 mreq->pindex, 1507 blk + j 1508 ); 1509 vm_page_dirty(mreq); 1510 rtvals[i+j] = VM_PAGER_OK; 1511 1512 vm_page_flag_set(mreq, PG_SWAPINPROG); 1513 bp->b_xio.xio_pages[j] = mreq; 1514 } 1515 bp->b_xio.xio_npages = n; 1516 /* 1517 * Must set dirty range for NFS to work. 1518 */ 1519 bp->b_dirtyoff = 0; 1520 bp->b_dirtyend = bp->b_bcount; 1521 1522 mycpu->gd_cnt.v_swapout++; 1523 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1524 1525 crit_exit(); 1526 1527 /* 1528 * asynchronous 1529 * 1530 * NOTE: bio_blkno is destroyed by the call to vn_strategy() 1531 * XXX it should not be destroyed any more 1532 */ 1533 1534 if (sync == FALSE) { 1535 bio->bio_done = swp_pager_async_iodone; 1536 BUF_KERNPROC(bp); 1537 vn_strategy(swapdev_vp, bio); 1538 1539 for (j = 0; j < n; ++j) 1540 rtvals[i+j] = VM_PAGER_PEND; 1541 continue; 1542 } 1543 1544 /* 1545 * synchronous 1546 * 1547 * NOTE: bio_blkno is destroyed by the call to vn_strategy() 1548 * XXX it should not be destroyed any more 1549 */ 1550 1551 bio->bio_done = swp_pager_sync_iodone; 1552 vn_strategy(swapdev_vp, bio); 1553 1554 /* 1555 * Wait for the sync I/O to complete, then update rtvals. 1556 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1557 * our async completion routine at the end, thus avoiding a 1558 * double-free. 1559 */ 1560 crit_enter(); 1561 1562 while ((bp->b_flags & B_DONE) == 0) { 1563 tsleep(bp, 0, "swwrt", 0); 1564 } 1565 1566 for (j = 0; j < n; ++j) 1567 rtvals[i+j] = VM_PAGER_PEND; 1568 1569 /* 1570 * Now that we are through with the bp, we can call the 1571 * normal async completion, which frees everything up. 1572 */ 1573 1574 swp_pager_async_iodone(bio); 1575 1576 crit_exit(); 1577 } 1578 } 1579 1580 /* 1581 * swap_pager_sync_iodone: 1582 * 1583 * Completion routine for synchronous reads and writes from/to swap. 1584 * We just mark the bp is complete and wake up anyone waiting on it. 1585 * 1586 * This routine may not block. This routine is called at splbio() or better. 1587 */ 1588 1589 static void 1590 swp_pager_sync_iodone(struct bio *bio) 1591 { 1592 struct buf *bp = bio->bio_buf; 1593 1594 bp->b_flags |= B_DONE; 1595 bp->b_flags &= ~B_ASYNC; 1596 wakeup(bp); 1597 } 1598 1599 /* 1600 * swp_pager_async_iodone: 1601 * 1602 * Completion routine for asynchronous reads and writes from/to swap. 1603 * Also called manually by synchronous code to finish up a bp. 1604 * 1605 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1606 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1607 * unbusy all pages except the 'main' request page. For WRITE 1608 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1609 * because we marked them all VM_PAGER_PEND on return from putpages ). 1610 * 1611 * This routine may not block. 1612 */ 1613 1614 static void 1615 swp_pager_async_iodone(struct bio *bio) 1616 { 1617 struct buf *bp = bio->bio_buf; 1618 vm_object_t object = NULL; 1619 int i; 1620 1621 bp->b_flags |= B_DONE; 1622 1623 /* 1624 * report error 1625 */ 1626 1627 if (bp->b_flags & B_ERROR) { 1628 printf( 1629 "swap_pager: I/O error - %s failed; blkno %ld," 1630 "size %ld, error %d\n", 1631 ((bp->b_flags & B_READ) ? "pagein" : "pageout"), 1632 (long)bio->bio_blkno, 1633 (long)bp->b_bcount, 1634 bp->b_error 1635 ); 1636 } 1637 1638 /* 1639 * set object, raise to splvm(). 1640 */ 1641 1642 if (bp->b_xio.xio_npages) 1643 object = bp->b_xio.xio_pages[0]->object; 1644 crit_enter(); 1645 1646 /* 1647 * remove the mapping for kernel virtual 1648 */ 1649 1650 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1651 1652 /* 1653 * cleanup pages. If an error occurs writing to swap, we are in 1654 * very serious trouble. If it happens to be a disk error, though, 1655 * we may be able to recover by reassigning the swap later on. So 1656 * in this case we remove the m->swapblk assignment for the page 1657 * but do not free it in the rlist. The errornous block(s) are thus 1658 * never reallocated as swap. Redirty the page and continue. 1659 */ 1660 1661 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1662 vm_page_t m = bp->b_xio.xio_pages[i]; 1663 1664 vm_page_flag_clear(m, PG_SWAPINPROG); 1665 1666 if (bp->b_flags & B_ERROR) { 1667 /* 1668 * If an error occurs I'd love to throw the swapblk 1669 * away without freeing it back to swapspace, so it 1670 * can never be used again. But I can't from an 1671 * interrupt. 1672 */ 1673 1674 if (bp->b_flags & B_READ) { 1675 /* 1676 * When reading, reqpage needs to stay 1677 * locked for the parent, but all other 1678 * pages can be freed. We still want to 1679 * wakeup the parent waiting on the page, 1680 * though. ( also: pg_reqpage can be -1 and 1681 * not match anything ). 1682 * 1683 * We have to wake specifically requested pages 1684 * up too because we cleared PG_SWAPINPROG and 1685 * someone may be waiting for that. 1686 * 1687 * NOTE: for reads, m->dirty will probably 1688 * be overridden by the original caller of 1689 * getpages so don't play cute tricks here. 1690 * 1691 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1692 * AS THIS MESSES WITH object->memq, and it is 1693 * not legal to mess with object->memq from an 1694 * interrupt. 1695 */ 1696 1697 m->valid = 0; 1698 vm_page_flag_clear(m, PG_ZERO); 1699 1700 /* 1701 * bio_driver_info holds the requested page 1702 * index. 1703 */ 1704 if (i != (int)bio->bio_driver_info) 1705 vm_page_free(m); 1706 else 1707 vm_page_flash(m); 1708 /* 1709 * If i == bp->b_pager.pg_reqpage, do not wake 1710 * the page up. The caller needs to. 1711 */ 1712 } else { 1713 /* 1714 * If a write error occurs, reactivate page 1715 * so it doesn't clog the inactive list, 1716 * then finish the I/O. 1717 */ 1718 vm_page_dirty(m); 1719 vm_page_activate(m); 1720 vm_page_io_finish(m); 1721 } 1722 } else if (bp->b_flags & B_READ) { 1723 /* 1724 * For read success, clear dirty bits. Nobody should 1725 * have this page mapped but don't take any chances, 1726 * make sure the pmap modify bits are also cleared. 1727 * 1728 * NOTE: for reads, m->dirty will probably be 1729 * overridden by the original caller of getpages so 1730 * we cannot set them in order to free the underlying 1731 * swap in a low-swap situation. I don't think we'd 1732 * want to do that anyway, but it was an optimization 1733 * that existed in the old swapper for a time before 1734 * it got ripped out due to precisely this problem. 1735 * 1736 * clear PG_ZERO in page. 1737 * 1738 * If not the requested page then deactivate it. 1739 * 1740 * Note that the requested page, reqpage, is left 1741 * busied, but we still have to wake it up. The 1742 * other pages are released (unbusied) by 1743 * vm_page_wakeup(). We do not set reqpage's 1744 * valid bits here, it is up to the caller. 1745 */ 1746 1747 pmap_clear_modify(m); 1748 m->valid = VM_PAGE_BITS_ALL; 1749 vm_page_undirty(m); 1750 vm_page_flag_clear(m, PG_ZERO); 1751 1752 /* 1753 * We have to wake specifically requested pages 1754 * up too because we cleared PG_SWAPINPROG and 1755 * could be waiting for it in getpages. However, 1756 * be sure to not unbusy getpages specifically 1757 * requested page - getpages expects it to be 1758 * left busy. 1759 * 1760 * bio_driver_info holds the requested page 1761 */ 1762 if (i != (int)bio->bio_driver_info) { 1763 vm_page_deactivate(m); 1764 vm_page_wakeup(m); 1765 } else { 1766 vm_page_flash(m); 1767 } 1768 } else { 1769 /* 1770 * For write success, clear the modify and dirty 1771 * status, then finish the I/O ( which decrements the 1772 * busy count and possibly wakes waiter's up ). 1773 */ 1774 pmap_clear_modify(m); 1775 vm_page_undirty(m); 1776 vm_page_io_finish(m); 1777 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1778 vm_page_protect(m, VM_PROT_READ); 1779 } 1780 } 1781 1782 /* 1783 * adjust pip. NOTE: the original parent may still have its own 1784 * pip refs on the object. 1785 */ 1786 1787 if (object) 1788 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages); 1789 1790 /* 1791 * release the physical I/O buffer 1792 */ 1793 1794 relpbuf( 1795 bp, 1796 ((bp->b_flags & B_READ) ? &nsw_rcount : 1797 ((bp->b_flags & B_ASYNC) ? 1798 &nsw_wcount_async : 1799 &nsw_wcount_sync 1800 ) 1801 ) 1802 ); 1803 crit_exit(); 1804 } 1805 1806 /************************************************************************ 1807 * SWAP META DATA * 1808 ************************************************************************ 1809 * 1810 * These routines manipulate the swap metadata stored in the 1811 * OBJT_SWAP object. All swp_*() routines must be called at 1812 * splvm() because swap can be freed up by the low level vm_page 1813 * code which might be called from interrupts beyond what splbio() covers. 1814 * 1815 * Swap metadata is implemented with a global hash and not directly 1816 * linked into the object. Instead the object simply contains 1817 * appropriate tracking counters. 1818 */ 1819 1820 /* 1821 * SWP_PAGER_HASH() - hash swap meta data 1822 * 1823 * This is an inline helper function which hashes the swapblk given 1824 * the object and page index. It returns a pointer to a pointer 1825 * to the object, or a pointer to a NULL pointer if it could not 1826 * find a swapblk. 1827 * 1828 * This routine must be called at splvm(). 1829 */ 1830 1831 static __inline struct swblock ** 1832 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1833 { 1834 struct swblock **pswap; 1835 struct swblock *swap; 1836 1837 index &= ~SWAP_META_MASK; 1838 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1839 1840 while ((swap = *pswap) != NULL) { 1841 if (swap->swb_object == object && 1842 swap->swb_index == index 1843 ) { 1844 break; 1845 } 1846 pswap = &swap->swb_hnext; 1847 } 1848 return(pswap); 1849 } 1850 1851 /* 1852 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1853 * 1854 * We first convert the object to a swap object if it is a default 1855 * object. 1856 * 1857 * The specified swapblk is added to the object's swap metadata. If 1858 * the swapblk is not valid, it is freed instead. Any previously 1859 * assigned swapblk is freed. 1860 * 1861 * This routine must be called at splvm(), except when used to convert 1862 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1863 1864 */ 1865 1866 static void 1867 swp_pager_meta_build( 1868 vm_object_t object, 1869 vm_pindex_t index, 1870 daddr_t swapblk 1871 ) { 1872 struct swblock *swap; 1873 struct swblock **pswap; 1874 1875 /* 1876 * Convert default object to swap object if necessary 1877 */ 1878 1879 if (object->type != OBJT_SWAP) { 1880 object->type = OBJT_SWAP; 1881 object->un_pager.swp.swp_bcount = 0; 1882 1883 if (object->handle != NULL) { 1884 TAILQ_INSERT_TAIL( 1885 NOBJLIST(object->handle), 1886 object, 1887 pager_object_list 1888 ); 1889 } else { 1890 TAILQ_INSERT_TAIL( 1891 &swap_pager_un_object_list, 1892 object, 1893 pager_object_list 1894 ); 1895 } 1896 } 1897 1898 /* 1899 * Locate hash entry. If not found create, but if we aren't adding 1900 * anything just return. If we run out of space in the map we wait 1901 * and, since the hash table may have changed, retry. 1902 */ 1903 1904 retry: 1905 pswap = swp_pager_hash(object, index); 1906 1907 if ((swap = *pswap) == NULL) { 1908 int i; 1909 1910 if (swapblk == SWAPBLK_NONE) 1911 return; 1912 1913 swap = *pswap = zalloc(swap_zone); 1914 if (swap == NULL) { 1915 vm_wait(); 1916 goto retry; 1917 } 1918 swap->swb_hnext = NULL; 1919 swap->swb_object = object; 1920 swap->swb_index = index & ~SWAP_META_MASK; 1921 swap->swb_count = 0; 1922 1923 ++object->un_pager.swp.swp_bcount; 1924 1925 for (i = 0; i < SWAP_META_PAGES; ++i) 1926 swap->swb_pages[i] = SWAPBLK_NONE; 1927 } 1928 1929 /* 1930 * Delete prior contents of metadata 1931 */ 1932 1933 index &= SWAP_META_MASK; 1934 1935 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1936 swp_pager_freeswapspace(swap->swb_pages[index], 1); 1937 --swap->swb_count; 1938 } 1939 1940 /* 1941 * Enter block into metadata 1942 */ 1943 1944 swap->swb_pages[index] = swapblk; 1945 if (swapblk != SWAPBLK_NONE) 1946 ++swap->swb_count; 1947 } 1948 1949 /* 1950 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1951 * 1952 * The requested range of blocks is freed, with any associated swap 1953 * returned to the swap bitmap. 1954 * 1955 * This routine will free swap metadata structures as they are cleaned 1956 * out. This routine does *NOT* operate on swap metadata associated 1957 * with resident pages. 1958 * 1959 * This routine must be called at splvm() 1960 */ 1961 1962 static void 1963 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1964 { 1965 if (object->type != OBJT_SWAP) 1966 return; 1967 1968 while (count > 0) { 1969 struct swblock **pswap; 1970 struct swblock *swap; 1971 1972 pswap = swp_pager_hash(object, index); 1973 1974 if ((swap = *pswap) != NULL) { 1975 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1976 1977 if (v != SWAPBLK_NONE) { 1978 swp_pager_freeswapspace(v, 1); 1979 swap->swb_pages[index & SWAP_META_MASK] = 1980 SWAPBLK_NONE; 1981 if (--swap->swb_count == 0) { 1982 *pswap = swap->swb_hnext; 1983 zfree(swap_zone, swap); 1984 --object->un_pager.swp.swp_bcount; 1985 } 1986 } 1987 --count; 1988 ++index; 1989 } else { 1990 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1991 count -= n; 1992 index += n; 1993 } 1994 } 1995 } 1996 1997 /* 1998 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1999 * 2000 * This routine locates and destroys all swap metadata associated with 2001 * an object. 2002 * 2003 * This routine must be called at splvm() 2004 */ 2005 2006 static void 2007 swp_pager_meta_free_all(vm_object_t object) 2008 { 2009 daddr_t index = 0; 2010 2011 if (object->type != OBJT_SWAP) 2012 return; 2013 2014 while (object->un_pager.swp.swp_bcount) { 2015 struct swblock **pswap; 2016 struct swblock *swap; 2017 2018 pswap = swp_pager_hash(object, index); 2019 if ((swap = *pswap) != NULL) { 2020 int i; 2021 2022 for (i = 0; i < SWAP_META_PAGES; ++i) { 2023 daddr_t v = swap->swb_pages[i]; 2024 if (v != SWAPBLK_NONE) { 2025 --swap->swb_count; 2026 swp_pager_freeswapspace(v, 1); 2027 } 2028 } 2029 if (swap->swb_count != 0) 2030 panic("swap_pager_meta_free_all: swb_count != 0"); 2031 *pswap = swap->swb_hnext; 2032 zfree(swap_zone, swap); 2033 --object->un_pager.swp.swp_bcount; 2034 } 2035 index += SWAP_META_PAGES; 2036 if (index > 0x20000000) 2037 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 2038 } 2039 } 2040 2041 /* 2042 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2043 * 2044 * This routine is capable of looking up, popping, or freeing 2045 * swapblk assignments in the swap meta data or in the vm_page_t. 2046 * The routine typically returns the swapblk being looked-up, or popped, 2047 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2048 * was invalid. This routine will automatically free any invalid 2049 * meta-data swapblks. 2050 * 2051 * It is not possible to store invalid swapblks in the swap meta data 2052 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2053 * 2054 * When acting on a busy resident page and paging is in progress, we 2055 * have to wait until paging is complete but otherwise can act on the 2056 * busy page. 2057 * 2058 * This routine must be called at splvm(). 2059 * 2060 * SWM_FREE remove and free swap block from metadata 2061 * SWM_POP remove from meta data but do not free.. pop it out 2062 */ 2063 2064 static daddr_t 2065 swp_pager_meta_ctl( 2066 vm_object_t object, 2067 vm_pindex_t index, 2068 int flags 2069 ) { 2070 struct swblock **pswap; 2071 struct swblock *swap; 2072 daddr_t r1; 2073 2074 /* 2075 * The meta data only exists of the object is OBJT_SWAP 2076 * and even then might not be allocated yet. 2077 */ 2078 2079 if (object->type != OBJT_SWAP) 2080 return(SWAPBLK_NONE); 2081 2082 r1 = SWAPBLK_NONE; 2083 pswap = swp_pager_hash(object, index); 2084 2085 if ((swap = *pswap) != NULL) { 2086 index &= SWAP_META_MASK; 2087 r1 = swap->swb_pages[index]; 2088 2089 if (r1 != SWAPBLK_NONE) { 2090 if (flags & SWM_FREE) { 2091 swp_pager_freeswapspace(r1, 1); 2092 r1 = SWAPBLK_NONE; 2093 } 2094 if (flags & (SWM_FREE|SWM_POP)) { 2095 swap->swb_pages[index] = SWAPBLK_NONE; 2096 if (--swap->swb_count == 0) { 2097 *pswap = swap->swb_hnext; 2098 zfree(swap_zone, swap); 2099 --object->un_pager.swp.swp_bcount; 2100 } 2101 } 2102 } 2103 } 2104 return(r1); 2105 } 2106