1 /* 2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1994 John S. Dyson 35 * Copyright (c) 1990 University of Utah. 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * the Systems Programming Group of the University of Utah Computer 41 * Science Department. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * New Swap System 72 * Matthew Dillon 73 * 74 * Radix Bitmap 'blists'. 75 * 76 * - The new swapper uses the new radix bitmap code. This should scale 77 * to arbitrarily small or arbitrarily large swap spaces and an almost 78 * arbitrary degree of fragmentation. 79 * 80 * Features: 81 * 82 * - on the fly reallocation of swap during putpages. The new system 83 * does not try to keep previously allocated swap blocks for dirty 84 * pages. 85 * 86 * - on the fly deallocation of swap 87 * 88 * - No more garbage collection required. Unnecessarily allocated swap 89 * blocks only exist for dirty vm_page_t's now and these are already 90 * cycled (in a high-load system) by the pager. We also do on-the-fly 91 * removal of invalidated swap blocks when a page is destroyed 92 * or renamed. 93 * 94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 95 * 96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 97 * 98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.14 2004/07/16 05:52:14 dillon Exp $ 100 */ 101 102 #include <sys/param.h> 103 #include <sys/systm.h> 104 #include <sys/conf.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/buf.h> 108 #include <sys/vnode.h> 109 #include <sys/malloc.h> 110 #include <sys/vmmeter.h> 111 #include <sys/sysctl.h> 112 #include <sys/blist.h> 113 #include <sys/lock.h> 114 #include <sys/vmmeter.h> 115 116 #ifndef MAX_PAGEOUT_CLUSTER 117 #define MAX_PAGEOUT_CLUSTER 16 118 #endif 119 120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 121 122 #include "opt_swap.h" 123 #include <vm/vm.h> 124 #include <vm/vm_object.h> 125 #include <vm/vm_page.h> 126 #include <vm/vm_pager.h> 127 #include <vm/vm_pageout.h> 128 #include <vm/swap_pager.h> 129 #include <vm/vm_extern.h> 130 #include <vm/vm_zone.h> 131 132 #include <sys/buf2.h> 133 #include <vm/vm_page2.h> 134 135 #define SWM_FREE 0x02 /* free, period */ 136 #define SWM_POP 0x04 /* pop out */ 137 138 /* 139 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 140 * in the old system. 141 */ 142 143 extern int vm_swap_size; /* number of free swap blocks, in pages */ 144 145 int swap_pager_full; /* swap space exhaustion (task killing) */ 146 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 147 static int nsw_rcount; /* free read buffers */ 148 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 149 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 150 static int nsw_wcount_async_max;/* assigned maximum */ 151 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 152 static int sw_alloc_interlock; /* swap pager allocation interlock */ 153 154 struct blist *swapblist; 155 static struct swblock **swhash; 156 static int swhash_mask; 157 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 158 159 extern struct vnode *swapdev_vp; /* from vm_swap.c */ 160 161 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 162 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 163 164 /* 165 * "named" and "unnamed" anon region objects. Try to reduce the overhead 166 * of searching a named list by hashing it just a little. 167 */ 168 169 #define NOBJLISTS 8 170 171 #define NOBJLIST(handle) \ 172 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 173 174 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 175 struct pagerlst swap_pager_un_object_list; 176 vm_zone_t swap_zone; 177 178 /* 179 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 180 * calls hooked from other parts of the VM system and do not appear here. 181 * (see vm/swap_pager.h). 182 */ 183 184 static vm_object_t 185 swap_pager_alloc (void *handle, vm_ooffset_t size, 186 vm_prot_t prot, vm_ooffset_t offset); 187 static void swap_pager_dealloc (vm_object_t object); 188 static int swap_pager_getpages (vm_object_t, vm_page_t *, int, int); 189 static void swap_pager_init (void); 190 static void swap_pager_unswapped (vm_page_t); 191 static void swap_pager_strategy (vm_object_t, struct buf *); 192 193 struct pagerops swappagerops = { 194 swap_pager_init, /* early system initialization of pager */ 195 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 196 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 197 swap_pager_getpages, /* pagein */ 198 swap_pager_putpages, /* pageout */ 199 swap_pager_haspage, /* get backing store status for page */ 200 swap_pager_unswapped, /* remove swap related to page */ 201 swap_pager_strategy /* pager strategy call */ 202 }; 203 204 /* 205 * dmmax is in page-sized chunks with the new swap system. It was 206 * dev-bsized chunks in the old. dmmax is always a power of 2. 207 * 208 * swap_*() routines are externally accessible. swp_*() routines are 209 * internal. 210 */ 211 212 int dmmax; 213 static int dmmax_mask; 214 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 215 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 216 217 static __inline void swp_sizecheck (void); 218 static void swp_pager_sync_iodone (struct buf *bp); 219 static void swp_pager_async_iodone (struct buf *bp); 220 221 /* 222 * Swap bitmap functions 223 */ 224 225 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages); 226 static __inline daddr_t swp_pager_getswapspace (int npages); 227 228 /* 229 * Metadata functions 230 */ 231 232 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t); 233 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t); 234 static void swp_pager_meta_free_all (vm_object_t); 235 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int); 236 237 /* 238 * SWP_SIZECHECK() - update swap_pager_full indication 239 * 240 * update the swap_pager_almost_full indication and warn when we are 241 * about to run out of swap space, using lowat/hiwat hysteresis. 242 * 243 * Clear swap_pager_full ( task killing ) indication when lowat is met. 244 * 245 * No restrictions on call 246 * This routine may not block. 247 * This routine must be called at splvm() 248 */ 249 250 static __inline void 251 swp_sizecheck(void) 252 { 253 if (vm_swap_size < nswap_lowat) { 254 if (swap_pager_almost_full == 0) { 255 printf("swap_pager: out of swap space\n"); 256 swap_pager_almost_full = 1; 257 } 258 } else { 259 swap_pager_full = 0; 260 if (vm_swap_size > nswap_hiwat) 261 swap_pager_almost_full = 0; 262 } 263 } 264 265 /* 266 * SWAP_PAGER_INIT() - initialize the swap pager! 267 * 268 * Expected to be started from system init. NOTE: This code is run 269 * before much else so be careful what you depend on. Most of the VM 270 * system has yet to be initialized at this point. 271 */ 272 273 static void 274 swap_pager_init(void) 275 { 276 /* 277 * Initialize object lists 278 */ 279 int i; 280 281 for (i = 0; i < NOBJLISTS; ++i) 282 TAILQ_INIT(&swap_pager_object_list[i]); 283 TAILQ_INIT(&swap_pager_un_object_list); 284 285 /* 286 * Device Stripe, in PAGE_SIZE'd blocks 287 */ 288 289 dmmax = SWB_NPAGES * 2; 290 dmmax_mask = ~(dmmax - 1); 291 } 292 293 /* 294 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 295 * 296 * Expected to be started from pageout process once, prior to entering 297 * its main loop. 298 */ 299 300 void 301 swap_pager_swap_init(void) 302 { 303 int n, n2; 304 305 /* 306 * Number of in-transit swap bp operations. Don't 307 * exhaust the pbufs completely. Make sure we 308 * initialize workable values (0 will work for hysteresis 309 * but it isn't very efficient). 310 * 311 * The nsw_cluster_max is constrained by the number of pages an XIO 312 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 313 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 314 * constrained by the swap device interleave stripe size. 315 * 316 * Currently we hardwire nsw_wcount_async to 4. This limit is 317 * designed to prevent other I/O from having high latencies due to 318 * our pageout I/O. The value 4 works well for one or two active swap 319 * devices but is probably a little low if you have more. Even so, 320 * a higher value would probably generate only a limited improvement 321 * with three or four active swap devices since the system does not 322 * typically have to pageout at extreme bandwidths. We will want 323 * at least 2 per swap devices, and 4 is a pretty good value if you 324 * have one NFS swap device due to the command/ack latency over NFS. 325 * So it all works out pretty well. 326 */ 327 328 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 329 330 nsw_rcount = (nswbuf + 1) / 2; 331 nsw_wcount_sync = (nswbuf + 3) / 4; 332 nsw_wcount_async = 4; 333 nsw_wcount_async_max = nsw_wcount_async; 334 335 /* 336 * Initialize our zone. Right now I'm just guessing on the number 337 * we need based on the number of pages in the system. Each swblock 338 * can hold 16 pages, so this is probably overkill. This reservation 339 * is typically limited to around 32MB by default. 340 */ 341 n = vmstats.v_page_count / 2; 342 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 343 n = maxswzone / sizeof(struct swblock); 344 n2 = n; 345 346 do { 347 swap_zone = zinit( 348 "SWAPMETA", 349 sizeof(struct swblock), 350 n, 351 ZONE_INTERRUPT, 352 1); 353 if (swap_zone != NULL) 354 break; 355 /* 356 * if the allocation failed, try a zone two thirds the 357 * size of the previous attempt. 358 */ 359 n -= ((n + 2) / 3); 360 } while (n > 0); 361 362 if (swap_zone == NULL) 363 panic("swap_pager_swap_init: swap_zone == NULL"); 364 if (n2 != n) 365 printf("Swap zone entries reduced from %d to %d.\n", n2, n); 366 n2 = n; 367 368 /* 369 * Initialize our meta-data hash table. The swapper does not need to 370 * be quite as efficient as the VM system, so we do not use an 371 * oversized hash table. 372 * 373 * n: size of hash table, must be power of 2 374 * swhash_mask: hash table index mask 375 */ 376 377 for (n = 1; n < n2 / 8; n *= 2) 378 ; 379 380 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK); 381 bzero(swhash, sizeof(struct swblock *) * n); 382 383 swhash_mask = n - 1; 384 } 385 386 /* 387 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 388 * its metadata structures. 389 * 390 * This routine is called from the mmap and fork code to create a new 391 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 392 * and then converting it with swp_pager_meta_build(). 393 * 394 * This routine may block in vm_object_allocate() and create a named 395 * object lookup race, so we must interlock. We must also run at 396 * splvm() for the object lookup to handle races with interrupts, but 397 * we do not have to maintain splvm() in between the lookup and the 398 * add because (I believe) it is not possible to attempt to create 399 * a new swap object w/handle when a default object with that handle 400 * already exists. 401 */ 402 403 static vm_object_t 404 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 405 vm_ooffset_t offset) 406 { 407 vm_object_t object; 408 409 if (handle) { 410 /* 411 * Reference existing named region or allocate new one. There 412 * should not be a race here against swp_pager_meta_build() 413 * as called from vm_page_remove() in regards to the lookup 414 * of the handle. 415 */ 416 417 while (sw_alloc_interlock) { 418 sw_alloc_interlock = -1; 419 tsleep(&sw_alloc_interlock, 0, "swpalc", 0); 420 } 421 sw_alloc_interlock = 1; 422 423 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 424 425 if (object != NULL) { 426 vm_object_reference(object); 427 } else { 428 object = vm_object_allocate(OBJT_DEFAULT, 429 OFF_TO_IDX(offset + PAGE_MASK + size)); 430 object->handle = handle; 431 432 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 433 } 434 435 if (sw_alloc_interlock < 0) 436 wakeup(&sw_alloc_interlock); 437 438 sw_alloc_interlock = 0; 439 } else { 440 object = vm_object_allocate(OBJT_DEFAULT, 441 OFF_TO_IDX(offset + PAGE_MASK + size)); 442 443 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 444 } 445 446 return (object); 447 } 448 449 /* 450 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 451 * 452 * The swap backing for the object is destroyed. The code is 453 * designed such that we can reinstantiate it later, but this 454 * routine is typically called only when the entire object is 455 * about to be destroyed. 456 * 457 * This routine may block, but no longer does. 458 * 459 * The object must be locked or unreferenceable. 460 */ 461 462 static void 463 swap_pager_dealloc(vm_object_t object) 464 { 465 int s; 466 467 /* 468 * Remove from list right away so lookups will fail if we block for 469 * pageout completion. 470 */ 471 472 if (object->handle == NULL) { 473 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 474 } else { 475 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 476 } 477 478 vm_object_pip_wait(object, "swpdea"); 479 480 /* 481 * Free all remaining metadata. We only bother to free it from 482 * the swap meta data. We do not attempt to free swapblk's still 483 * associated with vm_page_t's for this object. We do not care 484 * if paging is still in progress on some objects. 485 */ 486 s = splvm(); 487 swp_pager_meta_free_all(object); 488 splx(s); 489 } 490 491 /************************************************************************ 492 * SWAP PAGER BITMAP ROUTINES * 493 ************************************************************************/ 494 495 /* 496 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 497 * 498 * Allocate swap for the requested number of pages. The starting 499 * swap block number (a page index) is returned or SWAPBLK_NONE 500 * if the allocation failed. 501 * 502 * Also has the side effect of advising that somebody made a mistake 503 * when they configured swap and didn't configure enough. 504 * 505 * Must be called at splvm() to avoid races with bitmap frees from 506 * vm_page_remove() aka swap_pager_page_removed(). 507 * 508 * This routine may not block 509 * This routine must be called at splvm(). 510 */ 511 512 static __inline daddr_t 513 swp_pager_getswapspace(int npages) 514 { 515 daddr_t blk; 516 517 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 518 if (swap_pager_full != 2) { 519 printf("swap_pager_getswapspace: failed\n"); 520 swap_pager_full = 2; 521 swap_pager_almost_full = 1; 522 } 523 } else { 524 vm_swap_size -= npages; 525 swp_sizecheck(); 526 } 527 return(blk); 528 } 529 530 /* 531 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 532 * 533 * This routine returns the specified swap blocks back to the bitmap. 534 * 535 * Note: This routine may not block (it could in the old swap code), 536 * and through the use of the new blist routines it does not block. 537 * 538 * We must be called at splvm() to avoid races with bitmap frees from 539 * vm_page_remove() aka swap_pager_page_removed(). 540 * 541 * This routine may not block 542 * This routine must be called at splvm(). 543 */ 544 545 static __inline void 546 swp_pager_freeswapspace(daddr_t blk, int npages) 547 { 548 blist_free(swapblist, blk, npages); 549 vm_swap_size += npages; 550 swp_sizecheck(); 551 } 552 553 /* 554 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 555 * range within an object. 556 * 557 * This is a globally accessible routine. 558 * 559 * This routine removes swapblk assignments from swap metadata. 560 * 561 * The external callers of this routine typically have already destroyed 562 * or renamed vm_page_t's associated with this range in the object so 563 * we should be ok. 564 * 565 * This routine may be called at any spl. We up our spl to splvm temporarily 566 * in order to perform the metadata removal. 567 */ 568 569 void 570 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 571 { 572 int s = splvm(); 573 swp_pager_meta_free(object, start, size); 574 splx(s); 575 } 576 577 /* 578 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 579 * 580 * Assigns swap blocks to the specified range within the object. The 581 * swap blocks are not zerod. Any previous swap assignment is destroyed. 582 * 583 * Returns 0 on success, -1 on failure. 584 */ 585 586 int 587 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 588 { 589 int s; 590 int n = 0; 591 daddr_t blk = SWAPBLK_NONE; 592 vm_pindex_t beg = start; /* save start index */ 593 594 s = splvm(); 595 while (size) { 596 if (n == 0) { 597 n = BLIST_MAX_ALLOC; 598 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 599 n >>= 1; 600 if (n == 0) { 601 swp_pager_meta_free(object, beg, start - beg); 602 splx(s); 603 return(-1); 604 } 605 } 606 } 607 swp_pager_meta_build(object, start, blk); 608 --size; 609 ++start; 610 ++blk; 611 --n; 612 } 613 swp_pager_meta_free(object, start, n); 614 splx(s); 615 return(0); 616 } 617 618 /* 619 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 620 * and destroy the source. 621 * 622 * Copy any valid swapblks from the source to the destination. In 623 * cases where both the source and destination have a valid swapblk, 624 * we keep the destination's. 625 * 626 * This routine is allowed to block. It may block allocating metadata 627 * indirectly through swp_pager_meta_build() or if paging is still in 628 * progress on the source. 629 * 630 * This routine can be called at any spl 631 * 632 * XXX vm_page_collapse() kinda expects us not to block because we 633 * supposedly do not need to allocate memory, but for the moment we 634 * *may* have to get a little memory from the zone allocator, but 635 * it is taken from the interrupt memory. We should be ok. 636 * 637 * The source object contains no vm_page_t's (which is just as well) 638 * 639 * The source object is of type OBJT_SWAP. 640 * 641 * The source and destination objects must be locked or 642 * inaccessible (XXX are they ?) 643 */ 644 645 void 646 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 647 vm_pindex_t offset, int destroysource) 648 { 649 vm_pindex_t i; 650 int s; 651 652 s = splvm(); 653 654 /* 655 * If destroysource is set, we remove the source object from the 656 * swap_pager internal queue now. 657 */ 658 659 if (destroysource) { 660 if (srcobject->handle == NULL) { 661 TAILQ_REMOVE( 662 &swap_pager_un_object_list, 663 srcobject, 664 pager_object_list 665 ); 666 } else { 667 TAILQ_REMOVE( 668 NOBJLIST(srcobject->handle), 669 srcobject, 670 pager_object_list 671 ); 672 } 673 } 674 675 /* 676 * transfer source to destination. 677 */ 678 679 for (i = 0; i < dstobject->size; ++i) { 680 daddr_t dstaddr; 681 682 /* 683 * Locate (without changing) the swapblk on the destination, 684 * unless it is invalid in which case free it silently, or 685 * if the destination is a resident page, in which case the 686 * source is thrown away. 687 */ 688 689 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 690 691 if (dstaddr == SWAPBLK_NONE) { 692 /* 693 * Destination has no swapblk and is not resident, 694 * copy source. 695 */ 696 daddr_t srcaddr; 697 698 srcaddr = swp_pager_meta_ctl( 699 srcobject, 700 i + offset, 701 SWM_POP 702 ); 703 704 if (srcaddr != SWAPBLK_NONE) 705 swp_pager_meta_build(dstobject, i, srcaddr); 706 } else { 707 /* 708 * Destination has valid swapblk or it is represented 709 * by a resident page. We destroy the sourceblock. 710 */ 711 712 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 713 } 714 } 715 716 /* 717 * Free left over swap blocks in source. 718 * 719 * We have to revert the type to OBJT_DEFAULT so we do not accidently 720 * double-remove the object from the swap queues. 721 */ 722 723 if (destroysource) { 724 swp_pager_meta_free_all(srcobject); 725 /* 726 * Reverting the type is not necessary, the caller is going 727 * to destroy srcobject directly, but I'm doing it here 728 * for consistency since we've removed the object from its 729 * queues. 730 */ 731 srcobject->type = OBJT_DEFAULT; 732 } 733 splx(s); 734 } 735 736 /* 737 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 738 * the requested page. 739 * 740 * We determine whether good backing store exists for the requested 741 * page and return TRUE if it does, FALSE if it doesn't. 742 * 743 * If TRUE, we also try to determine how much valid, contiguous backing 744 * store exists before and after the requested page within a reasonable 745 * distance. We do not try to restrict it to the swap device stripe 746 * (that is handled in getpages/putpages). It probably isn't worth 747 * doing here. 748 */ 749 750 boolean_t 751 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 752 int *after) 753 { 754 daddr_t blk0; 755 int s; 756 757 /* 758 * do we have good backing store at the requested index ? 759 */ 760 761 s = splvm(); 762 blk0 = swp_pager_meta_ctl(object, pindex, 0); 763 764 if (blk0 == SWAPBLK_NONE) { 765 splx(s); 766 if (before) 767 *before = 0; 768 if (after) 769 *after = 0; 770 return (FALSE); 771 } 772 773 /* 774 * find backwards-looking contiguous good backing store 775 */ 776 777 if (before != NULL) { 778 int i; 779 780 for (i = 1; i < (SWB_NPAGES/2); ++i) { 781 daddr_t blk; 782 783 if (i > pindex) 784 break; 785 blk = swp_pager_meta_ctl(object, pindex - i, 0); 786 if (blk != blk0 - i) 787 break; 788 } 789 *before = (i - 1); 790 } 791 792 /* 793 * find forward-looking contiguous good backing store 794 */ 795 796 if (after != NULL) { 797 int i; 798 799 for (i = 1; i < (SWB_NPAGES/2); ++i) { 800 daddr_t blk; 801 802 blk = swp_pager_meta_ctl(object, pindex + i, 0); 803 if (blk != blk0 + i) 804 break; 805 } 806 *after = (i - 1); 807 } 808 splx(s); 809 return (TRUE); 810 } 811 812 /* 813 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 814 * 815 * This removes any associated swap backing store, whether valid or 816 * not, from the page. 817 * 818 * This routine is typically called when a page is made dirty, at 819 * which point any associated swap can be freed. MADV_FREE also 820 * calls us in a special-case situation 821 * 822 * NOTE!!! If the page is clean and the swap was valid, the caller 823 * should make the page dirty before calling this routine. This routine 824 * does NOT change the m->dirty status of the page. Also: MADV_FREE 825 * depends on it. 826 * 827 * This routine may not block 828 * This routine must be called at splvm() 829 */ 830 831 static void 832 swap_pager_unswapped(vm_page_t m) 833 { 834 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 835 } 836 837 /* 838 * SWAP_PAGER_STRATEGY() - read, write, free blocks 839 * 840 * This implements the vm_pager_strategy() interface to swap and allows 841 * other parts of the system to directly access swap as backing store 842 * through vm_objects of type OBJT_SWAP. This is intended to be a 843 * cacheless interface ( i.e. caching occurs at higher levels ). 844 * Therefore we do not maintain any resident pages. All I/O goes 845 * directly to and from the swap device. 846 * 847 * Note that b_blkno is scaled for PAGE_SIZE 848 * 849 * We currently attempt to run I/O synchronously or asynchronously as 850 * the caller requests. This isn't perfect because we loose error 851 * sequencing when we run multiple ops in parallel to satisfy a request. 852 * But this is swap, so we let it all hang out. 853 */ 854 855 static void 856 swap_pager_strategy(vm_object_t object, struct buf *bp) 857 { 858 vm_pindex_t start; 859 int count; 860 int s; 861 char *data; 862 struct buf *nbp = NULL; 863 864 if (bp->b_bcount & PAGE_MASK) { 865 bp->b_error = EINVAL; 866 bp->b_flags |= B_ERROR | B_INVAL; 867 biodone(bp); 868 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount); 869 return; 870 } 871 872 /* 873 * Clear error indication, initialize page index, count, data pointer. 874 */ 875 876 bp->b_error = 0; 877 bp->b_flags &= ~B_ERROR; 878 bp->b_resid = bp->b_bcount; 879 880 start = bp->b_pblkno; 881 count = howmany(bp->b_bcount, PAGE_SIZE); 882 data = bp->b_data; 883 884 s = splvm(); 885 886 /* 887 * Deal with B_FREEBUF 888 */ 889 890 if (bp->b_flags & B_FREEBUF) { 891 /* 892 * FREE PAGE(s) - destroy underlying swap that is no longer 893 * needed. 894 */ 895 swp_pager_meta_free(object, start, count); 896 splx(s); 897 bp->b_resid = 0; 898 biodone(bp); 899 return; 900 } 901 902 /* 903 * Execute read or write 904 */ 905 906 while (count > 0) { 907 daddr_t blk; 908 909 /* 910 * Obtain block. If block not found and writing, allocate a 911 * new block and build it into the object. 912 */ 913 914 blk = swp_pager_meta_ctl(object, start, 0); 915 if ((blk == SWAPBLK_NONE) && (bp->b_flags & B_READ) == 0) { 916 blk = swp_pager_getswapspace(1); 917 if (blk == SWAPBLK_NONE) { 918 bp->b_error = ENOMEM; 919 bp->b_flags |= B_ERROR; 920 break; 921 } 922 swp_pager_meta_build(object, start, blk); 923 } 924 925 /* 926 * Do we have to flush our current collection? Yes if: 927 * 928 * - no swap block at this index 929 * - swap block is not contiguous 930 * - we cross a physical disk boundry in the 931 * stripe. 932 */ 933 934 if ( 935 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 936 ((nbp->b_blkno ^ blk) & dmmax_mask) 937 ) 938 ) { 939 splx(s); 940 if (bp->b_flags & B_READ) { 941 ++mycpu->gd_cnt.v_swapin; 942 mycpu->gd_cnt.v_swappgsin += btoc(nbp->b_bcount); 943 } else { 944 ++mycpu->gd_cnt.v_swapout; 945 mycpu->gd_cnt.v_swappgsout += btoc(nbp->b_bcount); 946 nbp->b_dirtyend = nbp->b_bcount; 947 } 948 flushchainbuf(nbp); 949 s = splvm(); 950 nbp = NULL; 951 } 952 953 /* 954 * Add new swapblk to nbp, instantiating nbp if necessary. 955 * Zero-fill reads are able to take a shortcut. 956 */ 957 958 if (blk == SWAPBLK_NONE) { 959 /* 960 * We can only get here if we are reading. Since 961 * we are at splvm() we can safely modify b_resid, 962 * even if chain ops are in progress. 963 */ 964 bzero(data, PAGE_SIZE); 965 bp->b_resid -= PAGE_SIZE; 966 } else { 967 if (nbp == NULL) { 968 nbp = getchainbuf(bp, swapdev_vp, (bp->b_flags & B_READ) | B_ASYNC); 969 nbp->b_blkno = blk; 970 nbp->b_bcount = 0; 971 nbp->b_data = data; 972 } 973 nbp->b_bcount += PAGE_SIZE; 974 } 975 --count; 976 ++start; 977 data += PAGE_SIZE; 978 } 979 980 /* 981 * Flush out last buffer 982 */ 983 984 splx(s); 985 986 if (nbp) { 987 if ((bp->b_flags & B_ASYNC) == 0) 988 nbp->b_flags &= ~B_ASYNC; 989 if (nbp->b_flags & B_READ) { 990 ++mycpu->gd_cnt.v_swapin; 991 mycpu->gd_cnt.v_swappgsin += btoc(nbp->b_bcount); 992 } else { 993 ++mycpu->gd_cnt.v_swapout; 994 mycpu->gd_cnt.v_swappgsout += btoc(nbp->b_bcount); 995 nbp->b_dirtyend = nbp->b_bcount; 996 } 997 flushchainbuf(nbp); 998 /* nbp = NULL; */ 999 } 1000 1001 /* 1002 * Wait for completion. 1003 */ 1004 1005 if (bp->b_flags & B_ASYNC) { 1006 autochaindone(bp); 1007 } else { 1008 waitchainbuf(bp, 0, 1); 1009 } 1010 } 1011 1012 /* 1013 * SWAP_PAGER_GETPAGES() - bring pages in from swap 1014 * 1015 * Attempt to retrieve (m, count) pages from backing store, but make 1016 * sure we retrieve at least m[reqpage]. We try to load in as large 1017 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1018 * belongs to the same object. 1019 * 1020 * The code is designed for asynchronous operation and 1021 * immediate-notification of 'reqpage' but tends not to be 1022 * used that way. Please do not optimize-out this algorithmic 1023 * feature, I intend to improve on it in the future. 1024 * 1025 * The parent has a single vm_object_pip_add() reference prior to 1026 * calling us and we should return with the same. 1027 * 1028 * The parent has BUSY'd the pages. We should return with 'm' 1029 * left busy, but the others adjusted. 1030 */ 1031 1032 static int 1033 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 1034 { 1035 struct buf *bp; 1036 vm_page_t mreq; 1037 int s; 1038 int i; 1039 int j; 1040 daddr_t blk; 1041 vm_offset_t kva; 1042 vm_pindex_t lastpindex; 1043 1044 mreq = m[reqpage]; 1045 1046 if (mreq->object != object) { 1047 panic("swap_pager_getpages: object mismatch %p/%p", 1048 object, 1049 mreq->object 1050 ); 1051 } 1052 /* 1053 * Calculate range to retrieve. The pages have already been assigned 1054 * their swapblks. We require a *contiguous* range that falls entirely 1055 * within a single device stripe. If we do not supply it, bad things 1056 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1057 * loops are set up such that the case(s) are handled implicitly. 1058 * 1059 * The swp_*() calls must be made at splvm(). vm_page_free() does 1060 * not need to be, but it will go a little faster if it is. 1061 */ 1062 1063 s = splvm(); 1064 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1065 1066 for (i = reqpage - 1; i >= 0; --i) { 1067 daddr_t iblk; 1068 1069 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1070 if (blk != iblk + (reqpage - i)) 1071 break; 1072 if ((blk ^ iblk) & dmmax_mask) 1073 break; 1074 } 1075 ++i; 1076 1077 for (j = reqpage + 1; j < count; ++j) { 1078 daddr_t jblk; 1079 1080 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1081 if (blk != jblk - (j - reqpage)) 1082 break; 1083 if ((blk ^ jblk) & dmmax_mask) 1084 break; 1085 } 1086 1087 /* 1088 * free pages outside our collection range. Note: we never free 1089 * mreq, it must remain busy throughout. 1090 */ 1091 1092 { 1093 int k; 1094 1095 for (k = 0; k < i; ++k) 1096 vm_page_free(m[k]); 1097 for (k = j; k < count; ++k) 1098 vm_page_free(m[k]); 1099 } 1100 splx(s); 1101 1102 1103 /* 1104 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1105 * still busy, but the others unbusied. 1106 */ 1107 1108 if (blk == SWAPBLK_NONE) 1109 return(VM_PAGER_FAIL); 1110 1111 /* 1112 * Get a swap buffer header to perform the IO 1113 */ 1114 1115 bp = getpbuf(&nsw_rcount); 1116 kva = (vm_offset_t) bp->b_data; 1117 1118 /* 1119 * map our page(s) into kva for input 1120 * 1121 * NOTE: B_PAGING is set by pbgetvp() 1122 */ 1123 1124 pmap_qenter(kva, m + i, j - i); 1125 1126 bp->b_flags = B_READ | B_CALL; 1127 bp->b_iodone = swp_pager_async_iodone; 1128 bp->b_data = (caddr_t) kva; 1129 bp->b_blkno = blk - (reqpage - i); 1130 bp->b_bcount = PAGE_SIZE * (j - i); 1131 bp->b_bufsize = PAGE_SIZE * (j - i); 1132 bp->b_pager.pg_reqpage = reqpage - i; 1133 1134 { 1135 int k; 1136 1137 for (k = i; k < j; ++k) { 1138 bp->b_xio.xio_pages[k - i] = m[k]; 1139 vm_page_flag_set(m[k], PG_SWAPINPROG); 1140 } 1141 } 1142 bp->b_xio.xio_npages = j - i; 1143 1144 pbgetvp(swapdev_vp, bp); 1145 1146 mycpu->gd_cnt.v_swapin++; 1147 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1148 1149 /* 1150 * We still hold the lock on mreq, and our automatic completion routine 1151 * does not remove it. 1152 */ 1153 1154 vm_object_pip_add(mreq->object, bp->b_xio.xio_npages); 1155 lastpindex = m[j-1]->pindex; 1156 1157 /* 1158 * perform the I/O. NOTE!!! bp cannot be considered valid after 1159 * this point because we automatically release it on completion. 1160 * Instead, we look at the one page we are interested in which we 1161 * still hold a lock on even through the I/O completion. 1162 * 1163 * The other pages in our m[] array are also released on completion, 1164 * so we cannot assume they are valid anymore either. 1165 * 1166 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1167 */ 1168 1169 BUF_KERNPROC(bp); 1170 VOP_STRATEGY(bp->b_vp, bp); 1171 1172 /* 1173 * wait for the page we want to complete. PG_SWAPINPROG is always 1174 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1175 * is set in the meta-data. 1176 */ 1177 1178 s = splvm(); 1179 1180 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1181 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1182 mycpu->gd_cnt.v_intrans++; 1183 if (tsleep(mreq, 0, "swread", hz*20)) { 1184 printf( 1185 "swap_pager: indefinite wait buffer: device:" 1186 " %s, blkno: %ld, size: %ld\n", 1187 devtoname(bp->b_dev), (long)bp->b_blkno, 1188 bp->b_bcount 1189 ); 1190 } 1191 } 1192 1193 splx(s); 1194 1195 /* 1196 * mreq is left bussied after completion, but all the other pages 1197 * are freed. If we had an unrecoverable read error the page will 1198 * not be valid. 1199 */ 1200 1201 if (mreq->valid != VM_PAGE_BITS_ALL) { 1202 return(VM_PAGER_ERROR); 1203 } else { 1204 return(VM_PAGER_OK); 1205 } 1206 1207 /* 1208 * A final note: in a low swap situation, we cannot deallocate swap 1209 * and mark a page dirty here because the caller is likely to mark 1210 * the page clean when we return, causing the page to possibly revert 1211 * to all-zero's later. 1212 */ 1213 } 1214 1215 /* 1216 * swap_pager_putpages: 1217 * 1218 * Assign swap (if necessary) and initiate I/O on the specified pages. 1219 * 1220 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1221 * are automatically converted to SWAP objects. 1222 * 1223 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1224 * vm_page reservation system coupled with properly written VFS devices 1225 * should ensure that no low-memory deadlock occurs. This is an area 1226 * which needs work. 1227 * 1228 * The parent has N vm_object_pip_add() references prior to 1229 * calling us and will remove references for rtvals[] that are 1230 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1231 * completion. 1232 * 1233 * The parent has soft-busy'd the pages it passes us and will unbusy 1234 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1235 * We need to unbusy the rest on I/O completion. 1236 */ 1237 1238 void 1239 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, 1240 int *rtvals) 1241 { 1242 int i; 1243 int n = 0; 1244 1245 if (count && m[0]->object != object) { 1246 panic("swap_pager_getpages: object mismatch %p/%p", 1247 object, 1248 m[0]->object 1249 ); 1250 } 1251 /* 1252 * Step 1 1253 * 1254 * Turn object into OBJT_SWAP 1255 * check for bogus sysops 1256 * force sync if not pageout process 1257 */ 1258 1259 if (object->type != OBJT_SWAP) 1260 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1261 1262 if (curthread != pagethread) 1263 sync = TRUE; 1264 1265 /* 1266 * Step 2 1267 * 1268 * Update nsw parameters from swap_async_max sysctl values. 1269 * Do not let the sysop crash the machine with bogus numbers. 1270 */ 1271 1272 if (swap_async_max != nsw_wcount_async_max) { 1273 int n; 1274 int s; 1275 1276 /* 1277 * limit range 1278 */ 1279 if ((n = swap_async_max) > nswbuf / 2) 1280 n = nswbuf / 2; 1281 if (n < 1) 1282 n = 1; 1283 swap_async_max = n; 1284 1285 /* 1286 * Adjust difference ( if possible ). If the current async 1287 * count is too low, we may not be able to make the adjustment 1288 * at this time. 1289 */ 1290 s = splvm(); 1291 n -= nsw_wcount_async_max; 1292 if (nsw_wcount_async + n >= 0) { 1293 nsw_wcount_async += n; 1294 nsw_wcount_async_max += n; 1295 wakeup(&nsw_wcount_async); 1296 } 1297 splx(s); 1298 } 1299 1300 /* 1301 * Step 3 1302 * 1303 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1304 * The page is left dirty until the pageout operation completes 1305 * successfully. 1306 */ 1307 1308 for (i = 0; i < count; i += n) { 1309 int s; 1310 int j; 1311 struct buf *bp; 1312 daddr_t blk; 1313 1314 /* 1315 * Maximum I/O size is limited by a number of factors. 1316 */ 1317 1318 n = min(BLIST_MAX_ALLOC, count - i); 1319 n = min(n, nsw_cluster_max); 1320 1321 s = splvm(); 1322 1323 /* 1324 * Get biggest block of swap we can. If we fail, fall 1325 * back and try to allocate a smaller block. Don't go 1326 * overboard trying to allocate space if it would overly 1327 * fragment swap. 1328 */ 1329 while ( 1330 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1331 n > 4 1332 ) { 1333 n >>= 1; 1334 } 1335 if (blk == SWAPBLK_NONE) { 1336 for (j = 0; j < n; ++j) 1337 rtvals[i+j] = VM_PAGER_FAIL; 1338 splx(s); 1339 continue; 1340 } 1341 1342 /* 1343 * The I/O we are constructing cannot cross a physical 1344 * disk boundry in the swap stripe. Note: we are still 1345 * at splvm(). 1346 */ 1347 if ((blk ^ (blk + n)) & dmmax_mask) { 1348 j = ((blk + dmmax) & dmmax_mask) - blk; 1349 swp_pager_freeswapspace(blk + j, n - j); 1350 n = j; 1351 } 1352 1353 /* 1354 * All I/O parameters have been satisfied, build the I/O 1355 * request and assign the swap space. 1356 * 1357 * NOTE: B_PAGING is set by pbgetvp() 1358 */ 1359 1360 if (sync == TRUE) { 1361 bp = getpbuf(&nsw_wcount_sync); 1362 bp->b_flags = B_CALL; 1363 } else { 1364 bp = getpbuf(&nsw_wcount_async); 1365 bp->b_flags = B_CALL | B_ASYNC; 1366 } 1367 bp->b_spc = NULL; /* not used, but NULL-out anyway */ 1368 1369 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1370 1371 bp->b_bcount = PAGE_SIZE * n; 1372 bp->b_bufsize = PAGE_SIZE * n; 1373 bp->b_blkno = blk; 1374 1375 pbgetvp(swapdev_vp, bp); 1376 1377 for (j = 0; j < n; ++j) { 1378 vm_page_t mreq = m[i+j]; 1379 1380 swp_pager_meta_build( 1381 mreq->object, 1382 mreq->pindex, 1383 blk + j 1384 ); 1385 vm_page_dirty(mreq); 1386 rtvals[i+j] = VM_PAGER_OK; 1387 1388 vm_page_flag_set(mreq, PG_SWAPINPROG); 1389 bp->b_xio.xio_pages[j] = mreq; 1390 } 1391 bp->b_xio.xio_npages = n; 1392 /* 1393 * Must set dirty range for NFS to work. 1394 */ 1395 bp->b_dirtyoff = 0; 1396 bp->b_dirtyend = bp->b_bcount; 1397 1398 mycpu->gd_cnt.v_swapout++; 1399 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1400 swapdev_vp->v_numoutput++; 1401 1402 splx(s); 1403 1404 /* 1405 * asynchronous 1406 * 1407 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1408 */ 1409 1410 if (sync == FALSE) { 1411 bp->b_iodone = swp_pager_async_iodone; 1412 BUF_KERNPROC(bp); 1413 VOP_STRATEGY(bp->b_vp, bp); 1414 1415 for (j = 0; j < n; ++j) 1416 rtvals[i+j] = VM_PAGER_PEND; 1417 continue; 1418 } 1419 1420 /* 1421 * synchronous 1422 * 1423 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1424 */ 1425 1426 bp->b_iodone = swp_pager_sync_iodone; 1427 VOP_STRATEGY(bp->b_vp, bp); 1428 1429 /* 1430 * Wait for the sync I/O to complete, then update rtvals. 1431 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1432 * our async completion routine at the end, thus avoiding a 1433 * double-free. 1434 */ 1435 s = splbio(); 1436 1437 while ((bp->b_flags & B_DONE) == 0) { 1438 tsleep(bp, 0, "swwrt", 0); 1439 } 1440 1441 for (j = 0; j < n; ++j) 1442 rtvals[i+j] = VM_PAGER_PEND; 1443 1444 /* 1445 * Now that we are through with the bp, we can call the 1446 * normal async completion, which frees everything up. 1447 */ 1448 1449 swp_pager_async_iodone(bp); 1450 1451 splx(s); 1452 } 1453 } 1454 1455 /* 1456 * swap_pager_sync_iodone: 1457 * 1458 * Completion routine for synchronous reads and writes from/to swap. 1459 * We just mark the bp is complete and wake up anyone waiting on it. 1460 * 1461 * This routine may not block. This routine is called at splbio() or better. 1462 */ 1463 1464 static void 1465 swp_pager_sync_iodone(struct buf *bp) 1466 { 1467 bp->b_flags |= B_DONE; 1468 bp->b_flags &= ~B_ASYNC; 1469 wakeup(bp); 1470 } 1471 1472 /* 1473 * swp_pager_async_iodone: 1474 * 1475 * Completion routine for asynchronous reads and writes from/to swap. 1476 * Also called manually by synchronous code to finish up a bp. 1477 * 1478 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1479 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1480 * unbusy all pages except the 'main' request page. For WRITE 1481 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1482 * because we marked them all VM_PAGER_PEND on return from putpages ). 1483 * 1484 * This routine may not block. 1485 * This routine is called at splbio() or better 1486 * 1487 * We up ourselves to splvm() as required for various vm_page related 1488 * calls. 1489 */ 1490 1491 static void 1492 swp_pager_async_iodone(struct buf *bp) 1493 { 1494 int s; 1495 int i; 1496 vm_object_t object = NULL; 1497 1498 bp->b_flags |= B_DONE; 1499 1500 /* 1501 * report error 1502 */ 1503 1504 if (bp->b_flags & B_ERROR) { 1505 printf( 1506 "swap_pager: I/O error - %s failed; blkno %ld," 1507 "size %ld, error %d\n", 1508 ((bp->b_flags & B_READ) ? "pagein" : "pageout"), 1509 (long)bp->b_blkno, 1510 (long)bp->b_bcount, 1511 bp->b_error 1512 ); 1513 } 1514 1515 /* 1516 * set object, raise to splvm(). 1517 */ 1518 1519 if (bp->b_xio.xio_npages) 1520 object = bp->b_xio.xio_pages[0]->object; 1521 s = splvm(); 1522 1523 /* 1524 * remove the mapping for kernel virtual 1525 */ 1526 1527 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1528 1529 /* 1530 * cleanup pages. If an error occurs writing to swap, we are in 1531 * very serious trouble. If it happens to be a disk error, though, 1532 * we may be able to recover by reassigning the swap later on. So 1533 * in this case we remove the m->swapblk assignment for the page 1534 * but do not free it in the rlist. The errornous block(s) are thus 1535 * never reallocated as swap. Redirty the page and continue. 1536 */ 1537 1538 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1539 vm_page_t m = bp->b_xio.xio_pages[i]; 1540 1541 vm_page_flag_clear(m, PG_SWAPINPROG); 1542 1543 if (bp->b_flags & B_ERROR) { 1544 /* 1545 * If an error occurs I'd love to throw the swapblk 1546 * away without freeing it back to swapspace, so it 1547 * can never be used again. But I can't from an 1548 * interrupt. 1549 */ 1550 1551 if (bp->b_flags & B_READ) { 1552 /* 1553 * When reading, reqpage needs to stay 1554 * locked for the parent, but all other 1555 * pages can be freed. We still want to 1556 * wakeup the parent waiting on the page, 1557 * though. ( also: pg_reqpage can be -1 and 1558 * not match anything ). 1559 * 1560 * We have to wake specifically requested pages 1561 * up too because we cleared PG_SWAPINPROG and 1562 * someone may be waiting for that. 1563 * 1564 * NOTE: for reads, m->dirty will probably 1565 * be overridden by the original caller of 1566 * getpages so don't play cute tricks here. 1567 * 1568 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1569 * AS THIS MESSES WITH object->memq, and it is 1570 * not legal to mess with object->memq from an 1571 * interrupt. 1572 */ 1573 1574 m->valid = 0; 1575 vm_page_flag_clear(m, PG_ZERO); 1576 1577 if (i != bp->b_pager.pg_reqpage) 1578 vm_page_free(m); 1579 else 1580 vm_page_flash(m); 1581 /* 1582 * If i == bp->b_pager.pg_reqpage, do not wake 1583 * the page up. The caller needs to. 1584 */ 1585 } else { 1586 /* 1587 * If a write error occurs, reactivate page 1588 * so it doesn't clog the inactive list, 1589 * then finish the I/O. 1590 */ 1591 vm_page_dirty(m); 1592 vm_page_activate(m); 1593 vm_page_io_finish(m); 1594 } 1595 } else if (bp->b_flags & B_READ) { 1596 /* 1597 * For read success, clear dirty bits. Nobody should 1598 * have this page mapped but don't take any chances, 1599 * make sure the pmap modify bits are also cleared. 1600 * 1601 * NOTE: for reads, m->dirty will probably be 1602 * overridden by the original caller of getpages so 1603 * we cannot set them in order to free the underlying 1604 * swap in a low-swap situation. I don't think we'd 1605 * want to do that anyway, but it was an optimization 1606 * that existed in the old swapper for a time before 1607 * it got ripped out due to precisely this problem. 1608 * 1609 * clear PG_ZERO in page. 1610 * 1611 * If not the requested page then deactivate it. 1612 * 1613 * Note that the requested page, reqpage, is left 1614 * busied, but we still have to wake it up. The 1615 * other pages are released (unbusied) by 1616 * vm_page_wakeup(). We do not set reqpage's 1617 * valid bits here, it is up to the caller. 1618 */ 1619 1620 pmap_clear_modify(m); 1621 m->valid = VM_PAGE_BITS_ALL; 1622 vm_page_undirty(m); 1623 vm_page_flag_clear(m, PG_ZERO); 1624 1625 /* 1626 * We have to wake specifically requested pages 1627 * up too because we cleared PG_SWAPINPROG and 1628 * could be waiting for it in getpages. However, 1629 * be sure to not unbusy getpages specifically 1630 * requested page - getpages expects it to be 1631 * left busy. 1632 */ 1633 if (i != bp->b_pager.pg_reqpage) { 1634 vm_page_deactivate(m); 1635 vm_page_wakeup(m); 1636 } else { 1637 vm_page_flash(m); 1638 } 1639 } else { 1640 /* 1641 * For write success, clear the modify and dirty 1642 * status, then finish the I/O ( which decrements the 1643 * busy count and possibly wakes waiter's up ). 1644 */ 1645 pmap_clear_modify(m); 1646 vm_page_undirty(m); 1647 vm_page_io_finish(m); 1648 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1649 vm_page_protect(m, VM_PROT_READ); 1650 } 1651 } 1652 1653 /* 1654 * adjust pip. NOTE: the original parent may still have its own 1655 * pip refs on the object. 1656 */ 1657 1658 if (object) 1659 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages); 1660 1661 /* 1662 * release the physical I/O buffer 1663 */ 1664 1665 relpbuf( 1666 bp, 1667 ((bp->b_flags & B_READ) ? &nsw_rcount : 1668 ((bp->b_flags & B_ASYNC) ? 1669 &nsw_wcount_async : 1670 &nsw_wcount_sync 1671 ) 1672 ) 1673 ); 1674 splx(s); 1675 } 1676 1677 /************************************************************************ 1678 * SWAP META DATA * 1679 ************************************************************************ 1680 * 1681 * These routines manipulate the swap metadata stored in the 1682 * OBJT_SWAP object. All swp_*() routines must be called at 1683 * splvm() because swap can be freed up by the low level vm_page 1684 * code which might be called from interrupts beyond what splbio() covers. 1685 * 1686 * Swap metadata is implemented with a global hash and not directly 1687 * linked into the object. Instead the object simply contains 1688 * appropriate tracking counters. 1689 */ 1690 1691 /* 1692 * SWP_PAGER_HASH() - hash swap meta data 1693 * 1694 * This is an inline helper function which hashes the swapblk given 1695 * the object and page index. It returns a pointer to a pointer 1696 * to the object, or a pointer to a NULL pointer if it could not 1697 * find a swapblk. 1698 * 1699 * This routine must be called at splvm(). 1700 */ 1701 1702 static __inline struct swblock ** 1703 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1704 { 1705 struct swblock **pswap; 1706 struct swblock *swap; 1707 1708 index &= ~SWAP_META_MASK; 1709 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1710 1711 while ((swap = *pswap) != NULL) { 1712 if (swap->swb_object == object && 1713 swap->swb_index == index 1714 ) { 1715 break; 1716 } 1717 pswap = &swap->swb_hnext; 1718 } 1719 return(pswap); 1720 } 1721 1722 /* 1723 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1724 * 1725 * We first convert the object to a swap object if it is a default 1726 * object. 1727 * 1728 * The specified swapblk is added to the object's swap metadata. If 1729 * the swapblk is not valid, it is freed instead. Any previously 1730 * assigned swapblk is freed. 1731 * 1732 * This routine must be called at splvm(), except when used to convert 1733 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1734 1735 */ 1736 1737 static void 1738 swp_pager_meta_build( 1739 vm_object_t object, 1740 vm_pindex_t index, 1741 daddr_t swapblk 1742 ) { 1743 struct swblock *swap; 1744 struct swblock **pswap; 1745 1746 /* 1747 * Convert default object to swap object if necessary 1748 */ 1749 1750 if (object->type != OBJT_SWAP) { 1751 object->type = OBJT_SWAP; 1752 object->un_pager.swp.swp_bcount = 0; 1753 1754 if (object->handle != NULL) { 1755 TAILQ_INSERT_TAIL( 1756 NOBJLIST(object->handle), 1757 object, 1758 pager_object_list 1759 ); 1760 } else { 1761 TAILQ_INSERT_TAIL( 1762 &swap_pager_un_object_list, 1763 object, 1764 pager_object_list 1765 ); 1766 } 1767 } 1768 1769 /* 1770 * Locate hash entry. If not found create, but if we aren't adding 1771 * anything just return. If we run out of space in the map we wait 1772 * and, since the hash table may have changed, retry. 1773 */ 1774 1775 retry: 1776 pswap = swp_pager_hash(object, index); 1777 1778 if ((swap = *pswap) == NULL) { 1779 int i; 1780 1781 if (swapblk == SWAPBLK_NONE) 1782 return; 1783 1784 swap = *pswap = zalloc(swap_zone); 1785 if (swap == NULL) { 1786 vm_wait(); 1787 goto retry; 1788 } 1789 swap->swb_hnext = NULL; 1790 swap->swb_object = object; 1791 swap->swb_index = index & ~SWAP_META_MASK; 1792 swap->swb_count = 0; 1793 1794 ++object->un_pager.swp.swp_bcount; 1795 1796 for (i = 0; i < SWAP_META_PAGES; ++i) 1797 swap->swb_pages[i] = SWAPBLK_NONE; 1798 } 1799 1800 /* 1801 * Delete prior contents of metadata 1802 */ 1803 1804 index &= SWAP_META_MASK; 1805 1806 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1807 swp_pager_freeswapspace(swap->swb_pages[index], 1); 1808 --swap->swb_count; 1809 } 1810 1811 /* 1812 * Enter block into metadata 1813 */ 1814 1815 swap->swb_pages[index] = swapblk; 1816 if (swapblk != SWAPBLK_NONE) 1817 ++swap->swb_count; 1818 } 1819 1820 /* 1821 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1822 * 1823 * The requested range of blocks is freed, with any associated swap 1824 * returned to the swap bitmap. 1825 * 1826 * This routine will free swap metadata structures as they are cleaned 1827 * out. This routine does *NOT* operate on swap metadata associated 1828 * with resident pages. 1829 * 1830 * This routine must be called at splvm() 1831 */ 1832 1833 static void 1834 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1835 { 1836 if (object->type != OBJT_SWAP) 1837 return; 1838 1839 while (count > 0) { 1840 struct swblock **pswap; 1841 struct swblock *swap; 1842 1843 pswap = swp_pager_hash(object, index); 1844 1845 if ((swap = *pswap) != NULL) { 1846 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1847 1848 if (v != SWAPBLK_NONE) { 1849 swp_pager_freeswapspace(v, 1); 1850 swap->swb_pages[index & SWAP_META_MASK] = 1851 SWAPBLK_NONE; 1852 if (--swap->swb_count == 0) { 1853 *pswap = swap->swb_hnext; 1854 zfree(swap_zone, swap); 1855 --object->un_pager.swp.swp_bcount; 1856 } 1857 } 1858 --count; 1859 ++index; 1860 } else { 1861 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1862 count -= n; 1863 index += n; 1864 } 1865 } 1866 } 1867 1868 /* 1869 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1870 * 1871 * This routine locates and destroys all swap metadata associated with 1872 * an object. 1873 * 1874 * This routine must be called at splvm() 1875 */ 1876 1877 static void 1878 swp_pager_meta_free_all(vm_object_t object) 1879 { 1880 daddr_t index = 0; 1881 1882 if (object->type != OBJT_SWAP) 1883 return; 1884 1885 while (object->un_pager.swp.swp_bcount) { 1886 struct swblock **pswap; 1887 struct swblock *swap; 1888 1889 pswap = swp_pager_hash(object, index); 1890 if ((swap = *pswap) != NULL) { 1891 int i; 1892 1893 for (i = 0; i < SWAP_META_PAGES; ++i) { 1894 daddr_t v = swap->swb_pages[i]; 1895 if (v != SWAPBLK_NONE) { 1896 --swap->swb_count; 1897 swp_pager_freeswapspace(v, 1); 1898 } 1899 } 1900 if (swap->swb_count != 0) 1901 panic("swap_pager_meta_free_all: swb_count != 0"); 1902 *pswap = swap->swb_hnext; 1903 zfree(swap_zone, swap); 1904 --object->un_pager.swp.swp_bcount; 1905 } 1906 index += SWAP_META_PAGES; 1907 if (index > 0x20000000) 1908 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1909 } 1910 } 1911 1912 /* 1913 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1914 * 1915 * This routine is capable of looking up, popping, or freeing 1916 * swapblk assignments in the swap meta data or in the vm_page_t. 1917 * The routine typically returns the swapblk being looked-up, or popped, 1918 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1919 * was invalid. This routine will automatically free any invalid 1920 * meta-data swapblks. 1921 * 1922 * It is not possible to store invalid swapblks in the swap meta data 1923 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1924 * 1925 * When acting on a busy resident page and paging is in progress, we 1926 * have to wait until paging is complete but otherwise can act on the 1927 * busy page. 1928 * 1929 * This routine must be called at splvm(). 1930 * 1931 * SWM_FREE remove and free swap block from metadata 1932 * SWM_POP remove from meta data but do not free.. pop it out 1933 */ 1934 1935 static daddr_t 1936 swp_pager_meta_ctl( 1937 vm_object_t object, 1938 vm_pindex_t index, 1939 int flags 1940 ) { 1941 struct swblock **pswap; 1942 struct swblock *swap; 1943 daddr_t r1; 1944 1945 /* 1946 * The meta data only exists of the object is OBJT_SWAP 1947 * and even then might not be allocated yet. 1948 */ 1949 1950 if (object->type != OBJT_SWAP) 1951 return(SWAPBLK_NONE); 1952 1953 r1 = SWAPBLK_NONE; 1954 pswap = swp_pager_hash(object, index); 1955 1956 if ((swap = *pswap) != NULL) { 1957 index &= SWAP_META_MASK; 1958 r1 = swap->swb_pages[index]; 1959 1960 if (r1 != SWAPBLK_NONE) { 1961 if (flags & SWM_FREE) { 1962 swp_pager_freeswapspace(r1, 1); 1963 r1 = SWAPBLK_NONE; 1964 } 1965 if (flags & (SWM_FREE|SWM_POP)) { 1966 swap->swb_pages[index] = SWAPBLK_NONE; 1967 if (--swap->swb_count == 0) { 1968 *pswap = swap->swb_hnext; 1969 zfree(swap_zone, swap); 1970 --object->un_pager.swp.swp_bcount; 1971 } 1972 } 1973 } 1974 } 1975 return(r1); 1976 } 1977