1 /* 2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1994 John S. Dyson 35 * Copyright (c) 1990 University of Utah. 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * the Systems Programming Group of the University of Utah Computer 41 * Science Department. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * New Swap System 72 * Matthew Dillon 73 * 74 * Radix Bitmap 'blists'. 75 * 76 * - The new swapper uses the new radix bitmap code. This should scale 77 * to arbitrarily small or arbitrarily large swap spaces and an almost 78 * arbitrary degree of fragmentation. 79 * 80 * Features: 81 * 82 * - on the fly reallocation of swap during putpages. The new system 83 * does not try to keep previously allocated swap blocks for dirty 84 * pages. 85 * 86 * - on the fly deallocation of swap 87 * 88 * - No more garbage collection required. Unnecessarily allocated swap 89 * blocks only exist for dirty vm_page_t's now and these are already 90 * cycled (in a high-load system) by the pager. We also do on-the-fly 91 * removal of invalidated swap blocks when a page is destroyed 92 * or renamed. 93 * 94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 95 * 96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 97 * 98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.17 2005/08/03 16:36:33 hmp Exp $ 100 */ 101 102 #include <sys/param.h> 103 #include <sys/systm.h> 104 #include <sys/conf.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/buf.h> 108 #include <sys/vnode.h> 109 #include <sys/malloc.h> 110 #include <sys/vmmeter.h> 111 #include <sys/sysctl.h> 112 #include <sys/blist.h> 113 #include <sys/lock.h> 114 #include <sys/thread2.h> 115 116 #ifndef MAX_PAGEOUT_CLUSTER 117 #define MAX_PAGEOUT_CLUSTER 16 118 #endif 119 120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 121 122 #include "opt_swap.h" 123 #include <vm/vm.h> 124 #include <vm/vm_object.h> 125 #include <vm/vm_page.h> 126 #include <vm/vm_pager.h> 127 #include <vm/vm_pageout.h> 128 #include <vm/swap_pager.h> 129 #include <vm/vm_extern.h> 130 #include <vm/vm_zone.h> 131 132 #include <sys/buf2.h> 133 #include <vm/vm_page2.h> 134 135 #define SWM_FREE 0x02 /* free, period */ 136 #define SWM_POP 0x04 /* pop out */ 137 138 /* 139 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 140 * in the old system. 141 */ 142 143 extern int vm_swap_size; /* number of free swap blocks, in pages */ 144 145 int swap_pager_full; /* swap space exhaustion (task killing) */ 146 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 147 static int nsw_rcount; /* free read buffers */ 148 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 149 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 150 static int nsw_wcount_async_max;/* assigned maximum */ 151 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 152 static int sw_alloc_interlock; /* swap pager allocation interlock */ 153 154 struct blist *swapblist; 155 static struct swblock **swhash; 156 static int swhash_mask; 157 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 158 159 extern struct vnode *swapdev_vp; /* from vm_swap.c */ 160 161 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 162 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 163 164 /* 165 * "named" and "unnamed" anon region objects. Try to reduce the overhead 166 * of searching a named list by hashing it just a little. 167 */ 168 169 #define NOBJLISTS 8 170 171 #define NOBJLIST(handle) \ 172 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 173 174 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 175 struct pagerlst swap_pager_un_object_list; 176 vm_zone_t swap_zone; 177 178 /* 179 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 180 * calls hooked from other parts of the VM system and do not appear here. 181 * (see vm/swap_pager.h). 182 */ 183 184 static vm_object_t 185 swap_pager_alloc (void *handle, vm_ooffset_t size, 186 vm_prot_t prot, vm_ooffset_t offset); 187 static void swap_pager_dealloc (vm_object_t object); 188 static int swap_pager_getpages (vm_object_t, vm_page_t *, int, int); 189 static void swap_pager_init (void); 190 static void swap_pager_unswapped (vm_page_t); 191 static void swap_pager_strategy (vm_object_t, struct buf *); 192 193 struct pagerops swappagerops = { 194 swap_pager_init, /* early system initialization of pager */ 195 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 196 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 197 swap_pager_getpages, /* pagein */ 198 swap_pager_putpages, /* pageout */ 199 swap_pager_haspage, /* get backing store status for page */ 200 swap_pager_unswapped, /* remove swap related to page */ 201 swap_pager_strategy /* pager strategy call */ 202 }; 203 204 /* 205 * dmmax is in page-sized chunks with the new swap system. It was 206 * dev-bsized chunks in the old. dmmax is always a power of 2. 207 * 208 * swap_*() routines are externally accessible. swp_*() routines are 209 * internal. 210 */ 211 212 int dmmax; 213 static int dmmax_mask; 214 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 215 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 216 217 static __inline void swp_sizecheck (void); 218 static void swp_pager_sync_iodone (struct buf *bp); 219 static void swp_pager_async_iodone (struct buf *bp); 220 221 /* 222 * Swap bitmap functions 223 */ 224 225 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages); 226 static __inline daddr_t swp_pager_getswapspace (int npages); 227 228 /* 229 * Metadata functions 230 */ 231 232 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t); 233 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t); 234 static void swp_pager_meta_free_all (vm_object_t); 235 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int); 236 237 /* 238 * SWP_SIZECHECK() - update swap_pager_full indication 239 * 240 * update the swap_pager_almost_full indication and warn when we are 241 * about to run out of swap space, using lowat/hiwat hysteresis. 242 * 243 * Clear swap_pager_full ( task killing ) indication when lowat is met. 244 * 245 * No restrictions on call 246 * This routine may not block. 247 * This routine must be called at splvm() 248 */ 249 250 static __inline void 251 swp_sizecheck(void) 252 { 253 if (vm_swap_size < nswap_lowat) { 254 if (swap_pager_almost_full == 0) { 255 printf("swap_pager: out of swap space\n"); 256 swap_pager_almost_full = 1; 257 } 258 } else { 259 swap_pager_full = 0; 260 if (vm_swap_size > nswap_hiwat) 261 swap_pager_almost_full = 0; 262 } 263 } 264 265 /* 266 * SWAP_PAGER_INIT() - initialize the swap pager! 267 * 268 * Expected to be started from system init. NOTE: This code is run 269 * before much else so be careful what you depend on. Most of the VM 270 * system has yet to be initialized at this point. 271 */ 272 273 static void 274 swap_pager_init(void) 275 { 276 /* 277 * Initialize object lists 278 */ 279 int i; 280 281 for (i = 0; i < NOBJLISTS; ++i) 282 TAILQ_INIT(&swap_pager_object_list[i]); 283 TAILQ_INIT(&swap_pager_un_object_list); 284 285 /* 286 * Device Stripe, in PAGE_SIZE'd blocks 287 */ 288 289 dmmax = SWB_NPAGES * 2; 290 dmmax_mask = ~(dmmax - 1); 291 } 292 293 /* 294 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 295 * 296 * Expected to be started from pageout process once, prior to entering 297 * its main loop. 298 */ 299 300 void 301 swap_pager_swap_init(void) 302 { 303 int n, n2; 304 305 /* 306 * Number of in-transit swap bp operations. Don't 307 * exhaust the pbufs completely. Make sure we 308 * initialize workable values (0 will work for hysteresis 309 * but it isn't very efficient). 310 * 311 * The nsw_cluster_max is constrained by the number of pages an XIO 312 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 313 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 314 * constrained by the swap device interleave stripe size. 315 * 316 * Currently we hardwire nsw_wcount_async to 4. This limit is 317 * designed to prevent other I/O from having high latencies due to 318 * our pageout I/O. The value 4 works well for one or two active swap 319 * devices but is probably a little low if you have more. Even so, 320 * a higher value would probably generate only a limited improvement 321 * with three or four active swap devices since the system does not 322 * typically have to pageout at extreme bandwidths. We will want 323 * at least 2 per swap devices, and 4 is a pretty good value if you 324 * have one NFS swap device due to the command/ack latency over NFS. 325 * So it all works out pretty well. 326 */ 327 328 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 329 330 nsw_rcount = (nswbuf + 1) / 2; 331 nsw_wcount_sync = (nswbuf + 3) / 4; 332 nsw_wcount_async = 4; 333 nsw_wcount_async_max = nsw_wcount_async; 334 335 /* 336 * Initialize our zone. Right now I'm just guessing on the number 337 * we need based on the number of pages in the system. Each swblock 338 * can hold 16 pages, so this is probably overkill. This reservation 339 * is typically limited to around 32MB by default. 340 */ 341 n = vmstats.v_page_count / 2; 342 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 343 n = maxswzone / sizeof(struct swblock); 344 n2 = n; 345 346 do { 347 swap_zone = zinit( 348 "SWAPMETA", 349 sizeof(struct swblock), 350 n, 351 ZONE_INTERRUPT, 352 1); 353 if (swap_zone != NULL) 354 break; 355 /* 356 * if the allocation failed, try a zone two thirds the 357 * size of the previous attempt. 358 */ 359 n -= ((n + 2) / 3); 360 } while (n > 0); 361 362 if (swap_zone == NULL) 363 panic("swap_pager_swap_init: swap_zone == NULL"); 364 if (n2 != n) 365 printf("Swap zone entries reduced from %d to %d.\n", n2, n); 366 n2 = n; 367 368 /* 369 * Initialize our meta-data hash table. The swapper does not need to 370 * be quite as efficient as the VM system, so we do not use an 371 * oversized hash table. 372 * 373 * n: size of hash table, must be power of 2 374 * swhash_mask: hash table index mask 375 */ 376 377 for (n = 1; n < n2 / 8; n *= 2) 378 ; 379 380 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK); 381 bzero(swhash, sizeof(struct swblock *) * n); 382 383 swhash_mask = n - 1; 384 } 385 386 /* 387 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 388 * its metadata structures. 389 * 390 * This routine is called from the mmap and fork code to create a new 391 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 392 * and then converting it with swp_pager_meta_build(). 393 * 394 * This routine may block in vm_object_allocate() and create a named 395 * object lookup race, so we must interlock. We must also run at 396 * splvm() for the object lookup to handle races with interrupts, but 397 * we do not have to maintain splvm() in between the lookup and the 398 * add because (I believe) it is not possible to attempt to create 399 * a new swap object w/handle when a default object with that handle 400 * already exists. 401 */ 402 403 static vm_object_t 404 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 405 vm_ooffset_t offset) 406 { 407 vm_object_t object; 408 409 if (handle) { 410 /* 411 * Reference existing named region or allocate new one. There 412 * should not be a race here against swp_pager_meta_build() 413 * as called from vm_page_remove() in regards to the lookup 414 * of the handle. 415 */ 416 417 while (sw_alloc_interlock) { 418 sw_alloc_interlock = -1; 419 tsleep(&sw_alloc_interlock, 0, "swpalc", 0); 420 } 421 sw_alloc_interlock = 1; 422 423 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 424 425 if (object != NULL) { 426 vm_object_reference(object); 427 } else { 428 object = vm_object_allocate(OBJT_DEFAULT, 429 OFF_TO_IDX(offset + PAGE_MASK + size)); 430 object->handle = handle; 431 432 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 433 } 434 435 if (sw_alloc_interlock < 0) 436 wakeup(&sw_alloc_interlock); 437 438 sw_alloc_interlock = 0; 439 } else { 440 object = vm_object_allocate(OBJT_DEFAULT, 441 OFF_TO_IDX(offset + PAGE_MASK + size)); 442 443 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 444 } 445 446 return (object); 447 } 448 449 /* 450 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 451 * 452 * The swap backing for the object is destroyed. The code is 453 * designed such that we can reinstantiate it later, but this 454 * routine is typically called only when the entire object is 455 * about to be destroyed. 456 * 457 * This routine may block, but no longer does. 458 * 459 * The object must be locked or unreferenceable. 460 */ 461 462 static void 463 swap_pager_dealloc(vm_object_t object) 464 { 465 /* 466 * Remove from list right away so lookups will fail if we block for 467 * pageout completion. 468 */ 469 470 if (object->handle == NULL) { 471 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 472 } else { 473 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 474 } 475 476 vm_object_pip_wait(object, "swpdea"); 477 478 /* 479 * Free all remaining metadata. We only bother to free it from 480 * the swap meta data. We do not attempt to free swapblk's still 481 * associated with vm_page_t's for this object. We do not care 482 * if paging is still in progress on some objects. 483 */ 484 crit_enter(); 485 swp_pager_meta_free_all(object); 486 crit_exit(); 487 } 488 489 /************************************************************************ 490 * SWAP PAGER BITMAP ROUTINES * 491 ************************************************************************/ 492 493 /* 494 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 495 * 496 * Allocate swap for the requested number of pages. The starting 497 * swap block number (a page index) is returned or SWAPBLK_NONE 498 * if the allocation failed. 499 * 500 * Also has the side effect of advising that somebody made a mistake 501 * when they configured swap and didn't configure enough. 502 * 503 * Must be called at splvm() to avoid races with bitmap frees from 504 * vm_page_remove() aka swap_pager_page_removed(). 505 * 506 * This routine may not block 507 * This routine must be called at splvm(). 508 */ 509 510 static __inline daddr_t 511 swp_pager_getswapspace(int npages) 512 { 513 daddr_t blk; 514 515 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 516 if (swap_pager_full != 2) { 517 printf("swap_pager_getswapspace: failed\n"); 518 swap_pager_full = 2; 519 swap_pager_almost_full = 1; 520 } 521 } else { 522 vm_swap_size -= npages; 523 swp_sizecheck(); 524 } 525 return(blk); 526 } 527 528 /* 529 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 530 * 531 * This routine returns the specified swap blocks back to the bitmap. 532 * 533 * Note: This routine may not block (it could in the old swap code), 534 * and through the use of the new blist routines it does not block. 535 * 536 * We must be called at splvm() to avoid races with bitmap frees from 537 * vm_page_remove() aka swap_pager_page_removed(). 538 * 539 * This routine may not block 540 * This routine must be called at splvm(). 541 */ 542 543 static __inline void 544 swp_pager_freeswapspace(daddr_t blk, int npages) 545 { 546 blist_free(swapblist, blk, npages); 547 vm_swap_size += npages; 548 swp_sizecheck(); 549 } 550 551 /* 552 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 553 * range within an object. 554 * 555 * This is a globally accessible routine. 556 * 557 * This routine removes swapblk assignments from swap metadata. 558 * 559 * The external callers of this routine typically have already destroyed 560 * or renamed vm_page_t's associated with this range in the object so 561 * we should be ok. 562 * 563 * This routine may be called at any spl. We up our spl to splvm temporarily 564 * in order to perform the metadata removal. 565 */ 566 567 void 568 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 569 { 570 crit_enter(); 571 swp_pager_meta_free(object, start, size); 572 crit_exit(); 573 } 574 575 /* 576 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 577 * 578 * Assigns swap blocks to the specified range within the object. The 579 * swap blocks are not zerod. Any previous swap assignment is destroyed. 580 * 581 * Returns 0 on success, -1 on failure. 582 */ 583 584 int 585 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 586 { 587 int n = 0; 588 daddr_t blk = SWAPBLK_NONE; 589 vm_pindex_t beg = start; /* save start index */ 590 591 crit_enter(); 592 while (size) { 593 if (n == 0) { 594 n = BLIST_MAX_ALLOC; 595 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 596 n >>= 1; 597 if (n == 0) { 598 swp_pager_meta_free(object, beg, start - beg); 599 crit_exit(); 600 return(-1); 601 } 602 } 603 } 604 swp_pager_meta_build(object, start, blk); 605 --size; 606 ++start; 607 ++blk; 608 --n; 609 } 610 swp_pager_meta_free(object, start, n); 611 crit_exit(); 612 return(0); 613 } 614 615 /* 616 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 617 * and destroy the source. 618 * 619 * Copy any valid swapblks from the source to the destination. In 620 * cases where both the source and destination have a valid swapblk, 621 * we keep the destination's. 622 * 623 * This routine is allowed to block. It may block allocating metadata 624 * indirectly through swp_pager_meta_build() or if paging is still in 625 * progress on the source. 626 * 627 * This routine can be called at any spl 628 * 629 * XXX vm_page_collapse() kinda expects us not to block because we 630 * supposedly do not need to allocate memory, but for the moment we 631 * *may* have to get a little memory from the zone allocator, but 632 * it is taken from the interrupt memory. We should be ok. 633 * 634 * The source object contains no vm_page_t's (which is just as well) 635 * 636 * The source object is of type OBJT_SWAP. 637 * 638 * The source and destination objects must be locked or 639 * inaccessible (XXX are they ?) 640 */ 641 642 void 643 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 644 vm_pindex_t offset, int destroysource) 645 { 646 vm_pindex_t i; 647 648 crit_enter(); 649 650 /* 651 * If destroysource is set, we remove the source object from the 652 * swap_pager internal queue now. 653 */ 654 655 if (destroysource) { 656 if (srcobject->handle == NULL) { 657 TAILQ_REMOVE( 658 &swap_pager_un_object_list, 659 srcobject, 660 pager_object_list 661 ); 662 } else { 663 TAILQ_REMOVE( 664 NOBJLIST(srcobject->handle), 665 srcobject, 666 pager_object_list 667 ); 668 } 669 } 670 671 /* 672 * transfer source to destination. 673 */ 674 675 for (i = 0; i < dstobject->size; ++i) { 676 daddr_t dstaddr; 677 678 /* 679 * Locate (without changing) the swapblk on the destination, 680 * unless it is invalid in which case free it silently, or 681 * if the destination is a resident page, in which case the 682 * source is thrown away. 683 */ 684 685 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 686 687 if (dstaddr == SWAPBLK_NONE) { 688 /* 689 * Destination has no swapblk and is not resident, 690 * copy source. 691 */ 692 daddr_t srcaddr; 693 694 srcaddr = swp_pager_meta_ctl( 695 srcobject, 696 i + offset, 697 SWM_POP 698 ); 699 700 if (srcaddr != SWAPBLK_NONE) 701 swp_pager_meta_build(dstobject, i, srcaddr); 702 } else { 703 /* 704 * Destination has valid swapblk or it is represented 705 * by a resident page. We destroy the sourceblock. 706 */ 707 708 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 709 } 710 } 711 712 /* 713 * Free left over swap blocks in source. 714 * 715 * We have to revert the type to OBJT_DEFAULT so we do not accidently 716 * double-remove the object from the swap queues. 717 */ 718 719 if (destroysource) { 720 swp_pager_meta_free_all(srcobject); 721 /* 722 * Reverting the type is not necessary, the caller is going 723 * to destroy srcobject directly, but I'm doing it here 724 * for consistency since we've removed the object from its 725 * queues. 726 */ 727 srcobject->type = OBJT_DEFAULT; 728 } 729 crit_exit(); 730 } 731 732 /* 733 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 734 * the requested page. 735 * 736 * We determine whether good backing store exists for the requested 737 * page and return TRUE if it does, FALSE if it doesn't. 738 * 739 * If TRUE, we also try to determine how much valid, contiguous backing 740 * store exists before and after the requested page within a reasonable 741 * distance. We do not try to restrict it to the swap device stripe 742 * (that is handled in getpages/putpages). It probably isn't worth 743 * doing here. 744 */ 745 746 boolean_t 747 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 748 int *after) 749 { 750 daddr_t blk0; 751 752 /* 753 * do we have good backing store at the requested index ? 754 */ 755 756 crit_enter(); 757 blk0 = swp_pager_meta_ctl(object, pindex, 0); 758 759 if (blk0 == SWAPBLK_NONE) { 760 crit_exit(); 761 if (before) 762 *before = 0; 763 if (after) 764 *after = 0; 765 return (FALSE); 766 } 767 768 /* 769 * find backwards-looking contiguous good backing store 770 */ 771 772 if (before != NULL) { 773 int i; 774 775 for (i = 1; i < (SWB_NPAGES/2); ++i) { 776 daddr_t blk; 777 778 if (i > pindex) 779 break; 780 blk = swp_pager_meta_ctl(object, pindex - i, 0); 781 if (blk != blk0 - i) 782 break; 783 } 784 *before = (i - 1); 785 } 786 787 /* 788 * find forward-looking contiguous good backing store 789 */ 790 791 if (after != NULL) { 792 int i; 793 794 for (i = 1; i < (SWB_NPAGES/2); ++i) { 795 daddr_t blk; 796 797 blk = swp_pager_meta_ctl(object, pindex + i, 0); 798 if (blk != blk0 + i) 799 break; 800 } 801 *after = (i - 1); 802 } 803 crit_exit(); 804 return (TRUE); 805 } 806 807 /* 808 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 809 * 810 * This removes any associated swap backing store, whether valid or 811 * not, from the page. 812 * 813 * This routine is typically called when a page is made dirty, at 814 * which point any associated swap can be freed. MADV_FREE also 815 * calls us in a special-case situation 816 * 817 * NOTE!!! If the page is clean and the swap was valid, the caller 818 * should make the page dirty before calling this routine. This routine 819 * does NOT change the m->dirty status of the page. Also: MADV_FREE 820 * depends on it. 821 * 822 * This routine may not block 823 * This routine must be called at splvm() 824 */ 825 826 static void 827 swap_pager_unswapped(vm_page_t m) 828 { 829 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 830 } 831 832 /* 833 * SWAP_PAGER_STRATEGY() - read, write, free blocks 834 * 835 * This implements the vm_pager_strategy() interface to swap and allows 836 * other parts of the system to directly access swap as backing store 837 * through vm_objects of type OBJT_SWAP. This is intended to be a 838 * cacheless interface ( i.e. caching occurs at higher levels ). 839 * Therefore we do not maintain any resident pages. All I/O goes 840 * directly to and from the swap device. 841 * 842 * Note that b_blkno is scaled for PAGE_SIZE 843 * 844 * We currently attempt to run I/O synchronously or asynchronously as 845 * the caller requests. This isn't perfect because we loose error 846 * sequencing when we run multiple ops in parallel to satisfy a request. 847 * But this is swap, so we let it all hang out. 848 */ 849 850 static void 851 swap_pager_strategy(vm_object_t object, struct buf *bp) 852 { 853 vm_pindex_t start; 854 int count; 855 char *data; 856 struct buf *nbp = NULL; 857 858 if (bp->b_bcount & PAGE_MASK) { 859 bp->b_error = EINVAL; 860 bp->b_flags |= B_ERROR | B_INVAL; 861 biodone(bp); 862 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount); 863 return; 864 } 865 866 /* 867 * Clear error indication, initialize page index, count, data pointer. 868 */ 869 870 bp->b_error = 0; 871 bp->b_flags &= ~B_ERROR; 872 bp->b_resid = bp->b_bcount; 873 874 start = bp->b_pblkno; 875 count = howmany(bp->b_bcount, PAGE_SIZE); 876 data = bp->b_data; 877 878 crit_enter(); 879 880 /* 881 * Deal with B_FREEBUF 882 */ 883 884 if (bp->b_flags & B_FREEBUF) { 885 /* 886 * FREE PAGE(s) - destroy underlying swap that is no longer 887 * needed. 888 */ 889 swp_pager_meta_free(object, start, count); 890 crit_exit(); 891 bp->b_resid = 0; 892 biodone(bp); 893 return; 894 } 895 896 /* 897 * Execute read or write 898 */ 899 900 while (count > 0) { 901 daddr_t blk; 902 903 /* 904 * Obtain block. If block not found and writing, allocate a 905 * new block and build it into the object. 906 */ 907 908 blk = swp_pager_meta_ctl(object, start, 0); 909 if ((blk == SWAPBLK_NONE) && (bp->b_flags & B_READ) == 0) { 910 blk = swp_pager_getswapspace(1); 911 if (blk == SWAPBLK_NONE) { 912 bp->b_error = ENOMEM; 913 bp->b_flags |= B_ERROR; 914 break; 915 } 916 swp_pager_meta_build(object, start, blk); 917 } 918 919 /* 920 * Do we have to flush our current collection? Yes if: 921 * 922 * - no swap block at this index 923 * - swap block is not contiguous 924 * - we cross a physical disk boundry in the 925 * stripe. 926 */ 927 928 if ( 929 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 930 ((nbp->b_blkno ^ blk) & dmmax_mask) 931 ) 932 ) { 933 crit_exit(); 934 if (bp->b_flags & B_READ) { 935 ++mycpu->gd_cnt.v_swapin; 936 mycpu->gd_cnt.v_swappgsin += btoc(nbp->b_bcount); 937 } else { 938 ++mycpu->gd_cnt.v_swapout; 939 mycpu->gd_cnt.v_swappgsout += btoc(nbp->b_bcount); 940 nbp->b_dirtyend = nbp->b_bcount; 941 } 942 flushchainbuf(nbp); 943 crit_enter(); 944 nbp = NULL; 945 } 946 947 /* 948 * Add new swapblk to nbp, instantiating nbp if necessary. 949 * Zero-fill reads are able to take a shortcut. 950 */ 951 952 if (blk == SWAPBLK_NONE) { 953 /* 954 * We can only get here if we are reading. Since 955 * we are at splvm() we can safely modify b_resid, 956 * even if chain ops are in progress. 957 */ 958 bzero(data, PAGE_SIZE); 959 bp->b_resid -= PAGE_SIZE; 960 } else { 961 if (nbp == NULL) { 962 nbp = getchainbuf(bp, swapdev_vp, (bp->b_flags & B_READ) | B_ASYNC); 963 nbp->b_blkno = blk; 964 nbp->b_bcount = 0; 965 nbp->b_data = data; 966 } 967 nbp->b_bcount += PAGE_SIZE; 968 } 969 --count; 970 ++start; 971 data += PAGE_SIZE; 972 } 973 974 /* 975 * Flush out last buffer 976 */ 977 978 crit_exit(); 979 980 if (nbp) { 981 if ((bp->b_flags & B_ASYNC) == 0) 982 nbp->b_flags &= ~B_ASYNC; 983 if (nbp->b_flags & B_READ) { 984 ++mycpu->gd_cnt.v_swapin; 985 mycpu->gd_cnt.v_swappgsin += btoc(nbp->b_bcount); 986 } else { 987 ++mycpu->gd_cnt.v_swapout; 988 mycpu->gd_cnt.v_swappgsout += btoc(nbp->b_bcount); 989 nbp->b_dirtyend = nbp->b_bcount; 990 } 991 flushchainbuf(nbp); 992 /* nbp = NULL; */ 993 } 994 995 /* 996 * Wait for completion. 997 */ 998 999 if (bp->b_flags & B_ASYNC) { 1000 autochaindone(bp); 1001 } else { 1002 waitchainbuf(bp, 0, 1); 1003 } 1004 } 1005 1006 /* 1007 * SWAP_PAGER_GETPAGES() - bring pages in from swap 1008 * 1009 * Attempt to retrieve (m, count) pages from backing store, but make 1010 * sure we retrieve at least m[reqpage]. We try to load in as large 1011 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1012 * belongs to the same object. 1013 * 1014 * The code is designed for asynchronous operation and 1015 * immediate-notification of 'reqpage' but tends not to be 1016 * used that way. Please do not optimize-out this algorithmic 1017 * feature, I intend to improve on it in the future. 1018 * 1019 * The parent has a single vm_object_pip_add() reference prior to 1020 * calling us and we should return with the same. 1021 * 1022 * The parent has BUSY'd the pages. We should return with 'm' 1023 * left busy, but the others adjusted. 1024 */ 1025 1026 static int 1027 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 1028 { 1029 struct buf *bp; 1030 vm_page_t mreq; 1031 int i; 1032 int j; 1033 daddr_t blk; 1034 vm_offset_t kva; 1035 vm_pindex_t lastpindex; 1036 1037 mreq = m[reqpage]; 1038 1039 if (mreq->object != object) { 1040 panic("swap_pager_getpages: object mismatch %p/%p", 1041 object, 1042 mreq->object 1043 ); 1044 } 1045 /* 1046 * Calculate range to retrieve. The pages have already been assigned 1047 * their swapblks. We require a *contiguous* range that falls entirely 1048 * within a single device stripe. If we do not supply it, bad things 1049 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1050 * loops are set up such that the case(s) are handled implicitly. 1051 * 1052 * The swp_*() calls must be made at splvm(). vm_page_free() does 1053 * not need to be, but it will go a little faster if it is. 1054 */ 1055 1056 crit_enter(); 1057 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1058 1059 for (i = reqpage - 1; i >= 0; --i) { 1060 daddr_t iblk; 1061 1062 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1063 if (blk != iblk + (reqpage - i)) 1064 break; 1065 if ((blk ^ iblk) & dmmax_mask) 1066 break; 1067 } 1068 ++i; 1069 1070 for (j = reqpage + 1; j < count; ++j) { 1071 daddr_t jblk; 1072 1073 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1074 if (blk != jblk - (j - reqpage)) 1075 break; 1076 if ((blk ^ jblk) & dmmax_mask) 1077 break; 1078 } 1079 1080 /* 1081 * free pages outside our collection range. Note: we never free 1082 * mreq, it must remain busy throughout. 1083 */ 1084 1085 { 1086 int k; 1087 1088 for (k = 0; k < i; ++k) 1089 vm_page_free(m[k]); 1090 for (k = j; k < count; ++k) 1091 vm_page_free(m[k]); 1092 } 1093 crit_exit(); 1094 1095 1096 /* 1097 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1098 * still busy, but the others unbusied. 1099 */ 1100 1101 if (blk == SWAPBLK_NONE) 1102 return(VM_PAGER_FAIL); 1103 1104 /* 1105 * Get a swap buffer header to perform the IO 1106 */ 1107 1108 bp = getpbuf(&nsw_rcount); 1109 kva = (vm_offset_t) bp->b_data; 1110 1111 /* 1112 * map our page(s) into kva for input 1113 * 1114 * NOTE: B_PAGING is set by pbgetvp() 1115 */ 1116 1117 pmap_qenter(kva, m + i, j - i); 1118 1119 bp->b_flags = B_READ; 1120 bp->b_iodone = swp_pager_async_iodone; 1121 bp->b_data = (caddr_t) kva; 1122 bp->b_blkno = blk - (reqpage - i); 1123 bp->b_bcount = PAGE_SIZE * (j - i); 1124 bp->b_bufsize = PAGE_SIZE * (j - i); 1125 bp->b_pager.pg_reqpage = reqpage - i; 1126 1127 { 1128 int k; 1129 1130 for (k = i; k < j; ++k) { 1131 bp->b_xio.xio_pages[k - i] = m[k]; 1132 vm_page_flag_set(m[k], PG_SWAPINPROG); 1133 } 1134 } 1135 bp->b_xio.xio_npages = j - i; 1136 1137 pbgetvp(swapdev_vp, bp); 1138 1139 mycpu->gd_cnt.v_swapin++; 1140 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1141 1142 /* 1143 * We still hold the lock on mreq, and our automatic completion routine 1144 * does not remove it. 1145 */ 1146 1147 vm_object_pip_add(mreq->object, bp->b_xio.xio_npages); 1148 lastpindex = m[j-1]->pindex; 1149 1150 /* 1151 * perform the I/O. NOTE!!! bp cannot be considered valid after 1152 * this point because we automatically release it on completion. 1153 * Instead, we look at the one page we are interested in which we 1154 * still hold a lock on even through the I/O completion. 1155 * 1156 * The other pages in our m[] array are also released on completion, 1157 * so we cannot assume they are valid anymore either. 1158 * 1159 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1160 */ 1161 1162 BUF_KERNPROC(bp); 1163 VOP_STRATEGY(bp->b_vp, bp); 1164 1165 /* 1166 * wait for the page we want to complete. PG_SWAPINPROG is always 1167 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1168 * is set in the meta-data. 1169 */ 1170 1171 crit_enter(); 1172 1173 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1174 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1175 mycpu->gd_cnt.v_intrans++; 1176 if (tsleep(mreq, 0, "swread", hz*20)) { 1177 printf( 1178 "swap_pager: indefinite wait buffer: device:" 1179 " %s, blkno: %ld, size: %ld\n", 1180 devtoname(bp->b_dev), (long)bp->b_blkno, 1181 bp->b_bcount 1182 ); 1183 } 1184 } 1185 1186 crit_exit(); 1187 1188 /* 1189 * mreq is left bussied after completion, but all the other pages 1190 * are freed. If we had an unrecoverable read error the page will 1191 * not be valid. 1192 */ 1193 1194 if (mreq->valid != VM_PAGE_BITS_ALL) { 1195 return(VM_PAGER_ERROR); 1196 } else { 1197 return(VM_PAGER_OK); 1198 } 1199 1200 /* 1201 * A final note: in a low swap situation, we cannot deallocate swap 1202 * and mark a page dirty here because the caller is likely to mark 1203 * the page clean when we return, causing the page to possibly revert 1204 * to all-zero's later. 1205 */ 1206 } 1207 1208 /* 1209 * swap_pager_putpages: 1210 * 1211 * Assign swap (if necessary) and initiate I/O on the specified pages. 1212 * 1213 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1214 * are automatically converted to SWAP objects. 1215 * 1216 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1217 * vm_page reservation system coupled with properly written VFS devices 1218 * should ensure that no low-memory deadlock occurs. This is an area 1219 * which needs work. 1220 * 1221 * The parent has N vm_object_pip_add() references prior to 1222 * calling us and will remove references for rtvals[] that are 1223 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1224 * completion. 1225 * 1226 * The parent has soft-busy'd the pages it passes us and will unbusy 1227 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1228 * We need to unbusy the rest on I/O completion. 1229 */ 1230 1231 void 1232 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, 1233 int *rtvals) 1234 { 1235 int i; 1236 int n = 0; 1237 1238 if (count && m[0]->object != object) { 1239 panic("swap_pager_getpages: object mismatch %p/%p", 1240 object, 1241 m[0]->object 1242 ); 1243 } 1244 /* 1245 * Step 1 1246 * 1247 * Turn object into OBJT_SWAP 1248 * check for bogus sysops 1249 * force sync if not pageout process 1250 */ 1251 1252 if (object->type != OBJT_SWAP) 1253 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1254 1255 if (curthread != pagethread) 1256 sync = TRUE; 1257 1258 /* 1259 * Step 2 1260 * 1261 * Update nsw parameters from swap_async_max sysctl values. 1262 * Do not let the sysop crash the machine with bogus numbers. 1263 */ 1264 1265 if (swap_async_max != nsw_wcount_async_max) { 1266 int n; 1267 1268 /* 1269 * limit range 1270 */ 1271 if ((n = swap_async_max) > nswbuf / 2) 1272 n = nswbuf / 2; 1273 if (n < 1) 1274 n = 1; 1275 swap_async_max = n; 1276 1277 /* 1278 * Adjust difference ( if possible ). If the current async 1279 * count is too low, we may not be able to make the adjustment 1280 * at this time. 1281 */ 1282 crit_enter(); 1283 n -= nsw_wcount_async_max; 1284 if (nsw_wcount_async + n >= 0) { 1285 nsw_wcount_async += n; 1286 nsw_wcount_async_max += n; 1287 wakeup(&nsw_wcount_async); 1288 } 1289 crit_exit(); 1290 } 1291 1292 /* 1293 * Step 3 1294 * 1295 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1296 * The page is left dirty until the pageout operation completes 1297 * successfully. 1298 */ 1299 1300 for (i = 0; i < count; i += n) { 1301 int j; 1302 struct buf *bp; 1303 daddr_t blk; 1304 1305 /* 1306 * Maximum I/O size is limited by a number of factors. 1307 */ 1308 1309 n = min(BLIST_MAX_ALLOC, count - i); 1310 n = min(n, nsw_cluster_max); 1311 1312 crit_enter(); 1313 1314 /* 1315 * Get biggest block of swap we can. If we fail, fall 1316 * back and try to allocate a smaller block. Don't go 1317 * overboard trying to allocate space if it would overly 1318 * fragment swap. 1319 */ 1320 while ( 1321 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1322 n > 4 1323 ) { 1324 n >>= 1; 1325 } 1326 if (blk == SWAPBLK_NONE) { 1327 for (j = 0; j < n; ++j) 1328 rtvals[i+j] = VM_PAGER_FAIL; 1329 crit_exit(); 1330 continue; 1331 } 1332 1333 /* 1334 * The I/O we are constructing cannot cross a physical 1335 * disk boundry in the swap stripe. Note: we are still 1336 * at splvm(). 1337 */ 1338 if ((blk ^ (blk + n)) & dmmax_mask) { 1339 j = ((blk + dmmax) & dmmax_mask) - blk; 1340 swp_pager_freeswapspace(blk + j, n - j); 1341 n = j; 1342 } 1343 1344 /* 1345 * All I/O parameters have been satisfied, build the I/O 1346 * request and assign the swap space. 1347 * 1348 * NOTE: B_PAGING is set by pbgetvp() 1349 */ 1350 1351 if (sync == TRUE) { 1352 bp = getpbuf(&nsw_wcount_sync); 1353 } else { 1354 bp = getpbuf(&nsw_wcount_async); 1355 bp->b_flags = B_ASYNC; 1356 } 1357 bp->b_spc = NULL; /* not used, but NULL-out anyway */ 1358 1359 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1360 1361 bp->b_bcount = PAGE_SIZE * n; 1362 bp->b_bufsize = PAGE_SIZE * n; 1363 bp->b_blkno = blk; 1364 1365 pbgetvp(swapdev_vp, bp); 1366 1367 for (j = 0; j < n; ++j) { 1368 vm_page_t mreq = m[i+j]; 1369 1370 swp_pager_meta_build( 1371 mreq->object, 1372 mreq->pindex, 1373 blk + j 1374 ); 1375 vm_page_dirty(mreq); 1376 rtvals[i+j] = VM_PAGER_OK; 1377 1378 vm_page_flag_set(mreq, PG_SWAPINPROG); 1379 bp->b_xio.xio_pages[j] = mreq; 1380 } 1381 bp->b_xio.xio_npages = n; 1382 /* 1383 * Must set dirty range for NFS to work. 1384 */ 1385 bp->b_dirtyoff = 0; 1386 bp->b_dirtyend = bp->b_bcount; 1387 1388 mycpu->gd_cnt.v_swapout++; 1389 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1390 swapdev_vp->v_numoutput++; 1391 1392 crit_exit(); 1393 1394 /* 1395 * asynchronous 1396 * 1397 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1398 */ 1399 1400 if (sync == FALSE) { 1401 bp->b_iodone = swp_pager_async_iodone; 1402 BUF_KERNPROC(bp); 1403 VOP_STRATEGY(bp->b_vp, bp); 1404 1405 for (j = 0; j < n; ++j) 1406 rtvals[i+j] = VM_PAGER_PEND; 1407 continue; 1408 } 1409 1410 /* 1411 * synchronous 1412 * 1413 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1414 */ 1415 1416 bp->b_iodone = swp_pager_sync_iodone; 1417 VOP_STRATEGY(bp->b_vp, bp); 1418 1419 /* 1420 * Wait for the sync I/O to complete, then update rtvals. 1421 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1422 * our async completion routine at the end, thus avoiding a 1423 * double-free. 1424 */ 1425 crit_enter(); 1426 1427 while ((bp->b_flags & B_DONE) == 0) { 1428 tsleep(bp, 0, "swwrt", 0); 1429 } 1430 1431 for (j = 0; j < n; ++j) 1432 rtvals[i+j] = VM_PAGER_PEND; 1433 1434 /* 1435 * Now that we are through with the bp, we can call the 1436 * normal async completion, which frees everything up. 1437 */ 1438 1439 swp_pager_async_iodone(bp); 1440 1441 crit_exit(); 1442 } 1443 } 1444 1445 /* 1446 * swap_pager_sync_iodone: 1447 * 1448 * Completion routine for synchronous reads and writes from/to swap. 1449 * We just mark the bp is complete and wake up anyone waiting on it. 1450 * 1451 * This routine may not block. This routine is called at splbio() or better. 1452 */ 1453 1454 static void 1455 swp_pager_sync_iodone(struct buf *bp) 1456 { 1457 bp->b_flags |= B_DONE; 1458 bp->b_flags &= ~B_ASYNC; 1459 wakeup(bp); 1460 } 1461 1462 /* 1463 * swp_pager_async_iodone: 1464 * 1465 * Completion routine for asynchronous reads and writes from/to swap. 1466 * Also called manually by synchronous code to finish up a bp. 1467 * 1468 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1469 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1470 * unbusy all pages except the 'main' request page. For WRITE 1471 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1472 * because we marked them all VM_PAGER_PEND on return from putpages ). 1473 * 1474 * This routine may not block. 1475 * This routine is called at splbio() or better 1476 * 1477 * We up ourselves to splvm() as required for various vm_page related 1478 * calls. 1479 */ 1480 1481 static void 1482 swp_pager_async_iodone(struct buf *bp) 1483 { 1484 int i; 1485 vm_object_t object = NULL; 1486 1487 bp->b_flags |= B_DONE; 1488 1489 /* 1490 * report error 1491 */ 1492 1493 if (bp->b_flags & B_ERROR) { 1494 printf( 1495 "swap_pager: I/O error - %s failed; blkno %ld," 1496 "size %ld, error %d\n", 1497 ((bp->b_flags & B_READ) ? "pagein" : "pageout"), 1498 (long)bp->b_blkno, 1499 (long)bp->b_bcount, 1500 bp->b_error 1501 ); 1502 } 1503 1504 /* 1505 * set object, raise to splvm(). 1506 */ 1507 1508 if (bp->b_xio.xio_npages) 1509 object = bp->b_xio.xio_pages[0]->object; 1510 crit_enter(); 1511 1512 /* 1513 * remove the mapping for kernel virtual 1514 */ 1515 1516 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1517 1518 /* 1519 * cleanup pages. If an error occurs writing to swap, we are in 1520 * very serious trouble. If it happens to be a disk error, though, 1521 * we may be able to recover by reassigning the swap later on. So 1522 * in this case we remove the m->swapblk assignment for the page 1523 * but do not free it in the rlist. The errornous block(s) are thus 1524 * never reallocated as swap. Redirty the page and continue. 1525 */ 1526 1527 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1528 vm_page_t m = bp->b_xio.xio_pages[i]; 1529 1530 vm_page_flag_clear(m, PG_SWAPINPROG); 1531 1532 if (bp->b_flags & B_ERROR) { 1533 /* 1534 * If an error occurs I'd love to throw the swapblk 1535 * away without freeing it back to swapspace, so it 1536 * can never be used again. But I can't from an 1537 * interrupt. 1538 */ 1539 1540 if (bp->b_flags & B_READ) { 1541 /* 1542 * When reading, reqpage needs to stay 1543 * locked for the parent, but all other 1544 * pages can be freed. We still want to 1545 * wakeup the parent waiting on the page, 1546 * though. ( also: pg_reqpage can be -1 and 1547 * not match anything ). 1548 * 1549 * We have to wake specifically requested pages 1550 * up too because we cleared PG_SWAPINPROG and 1551 * someone may be waiting for that. 1552 * 1553 * NOTE: for reads, m->dirty will probably 1554 * be overridden by the original caller of 1555 * getpages so don't play cute tricks here. 1556 * 1557 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1558 * AS THIS MESSES WITH object->memq, and it is 1559 * not legal to mess with object->memq from an 1560 * interrupt. 1561 */ 1562 1563 m->valid = 0; 1564 vm_page_flag_clear(m, PG_ZERO); 1565 1566 if (i != bp->b_pager.pg_reqpage) 1567 vm_page_free(m); 1568 else 1569 vm_page_flash(m); 1570 /* 1571 * If i == bp->b_pager.pg_reqpage, do not wake 1572 * the page up. The caller needs to. 1573 */ 1574 } else { 1575 /* 1576 * If a write error occurs, reactivate page 1577 * so it doesn't clog the inactive list, 1578 * then finish the I/O. 1579 */ 1580 vm_page_dirty(m); 1581 vm_page_activate(m); 1582 vm_page_io_finish(m); 1583 } 1584 } else if (bp->b_flags & B_READ) { 1585 /* 1586 * For read success, clear dirty bits. Nobody should 1587 * have this page mapped but don't take any chances, 1588 * make sure the pmap modify bits are also cleared. 1589 * 1590 * NOTE: for reads, m->dirty will probably be 1591 * overridden by the original caller of getpages so 1592 * we cannot set them in order to free the underlying 1593 * swap in a low-swap situation. I don't think we'd 1594 * want to do that anyway, but it was an optimization 1595 * that existed in the old swapper for a time before 1596 * it got ripped out due to precisely this problem. 1597 * 1598 * clear PG_ZERO in page. 1599 * 1600 * If not the requested page then deactivate it. 1601 * 1602 * Note that the requested page, reqpage, is left 1603 * busied, but we still have to wake it up. The 1604 * other pages are released (unbusied) by 1605 * vm_page_wakeup(). We do not set reqpage's 1606 * valid bits here, it is up to the caller. 1607 */ 1608 1609 pmap_clear_modify(m); 1610 m->valid = VM_PAGE_BITS_ALL; 1611 vm_page_undirty(m); 1612 vm_page_flag_clear(m, PG_ZERO); 1613 1614 /* 1615 * We have to wake specifically requested pages 1616 * up too because we cleared PG_SWAPINPROG and 1617 * could be waiting for it in getpages. However, 1618 * be sure to not unbusy getpages specifically 1619 * requested page - getpages expects it to be 1620 * left busy. 1621 */ 1622 if (i != bp->b_pager.pg_reqpage) { 1623 vm_page_deactivate(m); 1624 vm_page_wakeup(m); 1625 } else { 1626 vm_page_flash(m); 1627 } 1628 } else { 1629 /* 1630 * For write success, clear the modify and dirty 1631 * status, then finish the I/O ( which decrements the 1632 * busy count and possibly wakes waiter's up ). 1633 */ 1634 pmap_clear_modify(m); 1635 vm_page_undirty(m); 1636 vm_page_io_finish(m); 1637 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1638 vm_page_protect(m, VM_PROT_READ); 1639 } 1640 } 1641 1642 /* 1643 * adjust pip. NOTE: the original parent may still have its own 1644 * pip refs on the object. 1645 */ 1646 1647 if (object) 1648 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages); 1649 1650 /* 1651 * release the physical I/O buffer 1652 */ 1653 1654 relpbuf( 1655 bp, 1656 ((bp->b_flags & B_READ) ? &nsw_rcount : 1657 ((bp->b_flags & B_ASYNC) ? 1658 &nsw_wcount_async : 1659 &nsw_wcount_sync 1660 ) 1661 ) 1662 ); 1663 crit_exit(); 1664 } 1665 1666 /************************************************************************ 1667 * SWAP META DATA * 1668 ************************************************************************ 1669 * 1670 * These routines manipulate the swap metadata stored in the 1671 * OBJT_SWAP object. All swp_*() routines must be called at 1672 * splvm() because swap can be freed up by the low level vm_page 1673 * code which might be called from interrupts beyond what splbio() covers. 1674 * 1675 * Swap metadata is implemented with a global hash and not directly 1676 * linked into the object. Instead the object simply contains 1677 * appropriate tracking counters. 1678 */ 1679 1680 /* 1681 * SWP_PAGER_HASH() - hash swap meta data 1682 * 1683 * This is an inline helper function which hashes the swapblk given 1684 * the object and page index. It returns a pointer to a pointer 1685 * to the object, or a pointer to a NULL pointer if it could not 1686 * find a swapblk. 1687 * 1688 * This routine must be called at splvm(). 1689 */ 1690 1691 static __inline struct swblock ** 1692 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1693 { 1694 struct swblock **pswap; 1695 struct swblock *swap; 1696 1697 index &= ~SWAP_META_MASK; 1698 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1699 1700 while ((swap = *pswap) != NULL) { 1701 if (swap->swb_object == object && 1702 swap->swb_index == index 1703 ) { 1704 break; 1705 } 1706 pswap = &swap->swb_hnext; 1707 } 1708 return(pswap); 1709 } 1710 1711 /* 1712 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1713 * 1714 * We first convert the object to a swap object if it is a default 1715 * object. 1716 * 1717 * The specified swapblk is added to the object's swap metadata. If 1718 * the swapblk is not valid, it is freed instead. Any previously 1719 * assigned swapblk is freed. 1720 * 1721 * This routine must be called at splvm(), except when used to convert 1722 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1723 1724 */ 1725 1726 static void 1727 swp_pager_meta_build( 1728 vm_object_t object, 1729 vm_pindex_t index, 1730 daddr_t swapblk 1731 ) { 1732 struct swblock *swap; 1733 struct swblock **pswap; 1734 1735 /* 1736 * Convert default object to swap object if necessary 1737 */ 1738 1739 if (object->type != OBJT_SWAP) { 1740 object->type = OBJT_SWAP; 1741 object->un_pager.swp.swp_bcount = 0; 1742 1743 if (object->handle != NULL) { 1744 TAILQ_INSERT_TAIL( 1745 NOBJLIST(object->handle), 1746 object, 1747 pager_object_list 1748 ); 1749 } else { 1750 TAILQ_INSERT_TAIL( 1751 &swap_pager_un_object_list, 1752 object, 1753 pager_object_list 1754 ); 1755 } 1756 } 1757 1758 /* 1759 * Locate hash entry. If not found create, but if we aren't adding 1760 * anything just return. If we run out of space in the map we wait 1761 * and, since the hash table may have changed, retry. 1762 */ 1763 1764 retry: 1765 pswap = swp_pager_hash(object, index); 1766 1767 if ((swap = *pswap) == NULL) { 1768 int i; 1769 1770 if (swapblk == SWAPBLK_NONE) 1771 return; 1772 1773 swap = *pswap = zalloc(swap_zone); 1774 if (swap == NULL) { 1775 vm_wait(); 1776 goto retry; 1777 } 1778 swap->swb_hnext = NULL; 1779 swap->swb_object = object; 1780 swap->swb_index = index & ~SWAP_META_MASK; 1781 swap->swb_count = 0; 1782 1783 ++object->un_pager.swp.swp_bcount; 1784 1785 for (i = 0; i < SWAP_META_PAGES; ++i) 1786 swap->swb_pages[i] = SWAPBLK_NONE; 1787 } 1788 1789 /* 1790 * Delete prior contents of metadata 1791 */ 1792 1793 index &= SWAP_META_MASK; 1794 1795 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1796 swp_pager_freeswapspace(swap->swb_pages[index], 1); 1797 --swap->swb_count; 1798 } 1799 1800 /* 1801 * Enter block into metadata 1802 */ 1803 1804 swap->swb_pages[index] = swapblk; 1805 if (swapblk != SWAPBLK_NONE) 1806 ++swap->swb_count; 1807 } 1808 1809 /* 1810 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1811 * 1812 * The requested range of blocks is freed, with any associated swap 1813 * returned to the swap bitmap. 1814 * 1815 * This routine will free swap metadata structures as they are cleaned 1816 * out. This routine does *NOT* operate on swap metadata associated 1817 * with resident pages. 1818 * 1819 * This routine must be called at splvm() 1820 */ 1821 1822 static void 1823 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1824 { 1825 if (object->type != OBJT_SWAP) 1826 return; 1827 1828 while (count > 0) { 1829 struct swblock **pswap; 1830 struct swblock *swap; 1831 1832 pswap = swp_pager_hash(object, index); 1833 1834 if ((swap = *pswap) != NULL) { 1835 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1836 1837 if (v != SWAPBLK_NONE) { 1838 swp_pager_freeswapspace(v, 1); 1839 swap->swb_pages[index & SWAP_META_MASK] = 1840 SWAPBLK_NONE; 1841 if (--swap->swb_count == 0) { 1842 *pswap = swap->swb_hnext; 1843 zfree(swap_zone, swap); 1844 --object->un_pager.swp.swp_bcount; 1845 } 1846 } 1847 --count; 1848 ++index; 1849 } else { 1850 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1851 count -= n; 1852 index += n; 1853 } 1854 } 1855 } 1856 1857 /* 1858 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1859 * 1860 * This routine locates and destroys all swap metadata associated with 1861 * an object. 1862 * 1863 * This routine must be called at splvm() 1864 */ 1865 1866 static void 1867 swp_pager_meta_free_all(vm_object_t object) 1868 { 1869 daddr_t index = 0; 1870 1871 if (object->type != OBJT_SWAP) 1872 return; 1873 1874 while (object->un_pager.swp.swp_bcount) { 1875 struct swblock **pswap; 1876 struct swblock *swap; 1877 1878 pswap = swp_pager_hash(object, index); 1879 if ((swap = *pswap) != NULL) { 1880 int i; 1881 1882 for (i = 0; i < SWAP_META_PAGES; ++i) { 1883 daddr_t v = swap->swb_pages[i]; 1884 if (v != SWAPBLK_NONE) { 1885 --swap->swb_count; 1886 swp_pager_freeswapspace(v, 1); 1887 } 1888 } 1889 if (swap->swb_count != 0) 1890 panic("swap_pager_meta_free_all: swb_count != 0"); 1891 *pswap = swap->swb_hnext; 1892 zfree(swap_zone, swap); 1893 --object->un_pager.swp.swp_bcount; 1894 } 1895 index += SWAP_META_PAGES; 1896 if (index > 0x20000000) 1897 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1898 } 1899 } 1900 1901 /* 1902 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1903 * 1904 * This routine is capable of looking up, popping, or freeing 1905 * swapblk assignments in the swap meta data or in the vm_page_t. 1906 * The routine typically returns the swapblk being looked-up, or popped, 1907 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1908 * was invalid. This routine will automatically free any invalid 1909 * meta-data swapblks. 1910 * 1911 * It is not possible to store invalid swapblks in the swap meta data 1912 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1913 * 1914 * When acting on a busy resident page and paging is in progress, we 1915 * have to wait until paging is complete but otherwise can act on the 1916 * busy page. 1917 * 1918 * This routine must be called at splvm(). 1919 * 1920 * SWM_FREE remove and free swap block from metadata 1921 * SWM_POP remove from meta data but do not free.. pop it out 1922 */ 1923 1924 static daddr_t 1925 swp_pager_meta_ctl( 1926 vm_object_t object, 1927 vm_pindex_t index, 1928 int flags 1929 ) { 1930 struct swblock **pswap; 1931 struct swblock *swap; 1932 daddr_t r1; 1933 1934 /* 1935 * The meta data only exists of the object is OBJT_SWAP 1936 * and even then might not be allocated yet. 1937 */ 1938 1939 if (object->type != OBJT_SWAP) 1940 return(SWAPBLK_NONE); 1941 1942 r1 = SWAPBLK_NONE; 1943 pswap = swp_pager_hash(object, index); 1944 1945 if ((swap = *pswap) != NULL) { 1946 index &= SWAP_META_MASK; 1947 r1 = swap->swb_pages[index]; 1948 1949 if (r1 != SWAPBLK_NONE) { 1950 if (flags & SWM_FREE) { 1951 swp_pager_freeswapspace(r1, 1); 1952 r1 = SWAPBLK_NONE; 1953 } 1954 if (flags & (SWM_FREE|SWM_POP)) { 1955 swap->swb_pages[index] = SWAPBLK_NONE; 1956 if (--swap->swb_count == 0) { 1957 *pswap = swap->swb_hnext; 1958 zfree(swap_zone, swap); 1959 --object->un_pager.swp.swp_bcount; 1960 } 1961 } 1962 } 1963 } 1964 return(r1); 1965 } 1966