1 /* 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * 67 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 68 * $DragonFly: src/sys/vm/swap_pager.c,v 1.10 2003/08/20 08:03:01 rob Exp $ 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/conf.h> 74 #include <sys/kernel.h> 75 #include <sys/proc.h> 76 #include <sys/buf.h> 77 #include <sys/vnode.h> 78 #include <sys/malloc.h> 79 #include <sys/vmmeter.h> 80 #include <sys/sysctl.h> 81 #include <sys/blist.h> 82 #include <sys/lock.h> 83 #include <sys/vmmeter.h> 84 85 #ifndef MAX_PAGEOUT_CLUSTER 86 #define MAX_PAGEOUT_CLUSTER 16 87 #endif 88 89 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 90 91 #include "opt_swap.h" 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_page.h> 95 #include <vm/vm_pager.h> 96 #include <vm/vm_pageout.h> 97 #include <vm/swap_pager.h> 98 #include <vm/vm_extern.h> 99 #include <vm/vm_zone.h> 100 101 #include <sys/buf2.h> 102 #include <vm/vm_page2.h> 103 104 #define SWM_FREE 0x02 /* free, period */ 105 #define SWM_POP 0x04 /* pop out */ 106 107 /* 108 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 109 * in the old system. 110 */ 111 112 extern int vm_swap_size; /* number of free swap blocks, in pages */ 113 114 int swap_pager_full; /* swap space exhaustion (task killing) */ 115 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 116 static int nsw_rcount; /* free read buffers */ 117 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 118 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 119 static int nsw_wcount_async_max;/* assigned maximum */ 120 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 121 static int sw_alloc_interlock; /* swap pager allocation interlock */ 122 123 struct blist *swapblist; 124 static struct swblock **swhash; 125 static int swhash_mask; 126 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 127 128 extern struct vnode *swapdev_vp; /* from vm_swap.c */ 129 130 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 131 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 132 133 /* 134 * "named" and "unnamed" anon region objects. Try to reduce the overhead 135 * of searching a named list by hashing it just a little. 136 */ 137 138 #define NOBJLISTS 8 139 140 #define NOBJLIST(handle) \ 141 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 142 143 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 144 struct pagerlst swap_pager_un_object_list; 145 vm_zone_t swap_zone; 146 147 /* 148 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 149 * calls hooked from other parts of the VM system and do not appear here. 150 * (see vm/swap_pager.h). 151 */ 152 153 static vm_object_t 154 swap_pager_alloc (void *handle, vm_ooffset_t size, 155 vm_prot_t prot, vm_ooffset_t offset); 156 static void swap_pager_dealloc (vm_object_t object); 157 static int swap_pager_getpages (vm_object_t, vm_page_t *, int, int); 158 static void swap_pager_init (void); 159 static void swap_pager_unswapped (vm_page_t); 160 static void swap_pager_strategy (vm_object_t, struct buf *); 161 162 struct pagerops swappagerops = { 163 swap_pager_init, /* early system initialization of pager */ 164 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 165 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 166 swap_pager_getpages, /* pagein */ 167 swap_pager_putpages, /* pageout */ 168 swap_pager_haspage, /* get backing store status for page */ 169 swap_pager_unswapped, /* remove swap related to page */ 170 swap_pager_strategy /* pager strategy call */ 171 }; 172 173 /* 174 * dmmax is in page-sized chunks with the new swap system. It was 175 * dev-bsized chunks in the old. dmmax is always a power of 2. 176 * 177 * swap_*() routines are externally accessible. swp_*() routines are 178 * internal. 179 */ 180 181 int dmmax; 182 static int dmmax_mask; 183 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 184 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 185 186 static __inline void swp_sizecheck (void); 187 static void swp_pager_sync_iodone (struct buf *bp); 188 static void swp_pager_async_iodone (struct buf *bp); 189 190 /* 191 * Swap bitmap functions 192 */ 193 194 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages); 195 static __inline daddr_t swp_pager_getswapspace (int npages); 196 197 /* 198 * Metadata functions 199 */ 200 201 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t); 202 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t); 203 static void swp_pager_meta_free_all (vm_object_t); 204 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int); 205 206 /* 207 * SWP_SIZECHECK() - update swap_pager_full indication 208 * 209 * update the swap_pager_almost_full indication and warn when we are 210 * about to run out of swap space, using lowat/hiwat hysteresis. 211 * 212 * Clear swap_pager_full ( task killing ) indication when lowat is met. 213 * 214 * No restrictions on call 215 * This routine may not block. 216 * This routine must be called at splvm() 217 */ 218 219 static __inline void 220 swp_sizecheck() 221 { 222 if (vm_swap_size < nswap_lowat) { 223 if (swap_pager_almost_full == 0) { 224 printf("swap_pager: out of swap space\n"); 225 swap_pager_almost_full = 1; 226 } 227 } else { 228 swap_pager_full = 0; 229 if (vm_swap_size > nswap_hiwat) 230 swap_pager_almost_full = 0; 231 } 232 } 233 234 /* 235 * SWAP_PAGER_INIT() - initialize the swap pager! 236 * 237 * Expected to be started from system init. NOTE: This code is run 238 * before much else so be careful what you depend on. Most of the VM 239 * system has yet to be initialized at this point. 240 */ 241 242 static void 243 swap_pager_init() 244 { 245 /* 246 * Initialize object lists 247 */ 248 int i; 249 250 for (i = 0; i < NOBJLISTS; ++i) 251 TAILQ_INIT(&swap_pager_object_list[i]); 252 TAILQ_INIT(&swap_pager_un_object_list); 253 254 /* 255 * Device Stripe, in PAGE_SIZE'd blocks 256 */ 257 258 dmmax = SWB_NPAGES * 2; 259 dmmax_mask = ~(dmmax - 1); 260 } 261 262 /* 263 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 264 * 265 * Expected to be started from pageout process once, prior to entering 266 * its main loop. 267 */ 268 269 void 270 swap_pager_swap_init() 271 { 272 int n, n2; 273 274 /* 275 * Number of in-transit swap bp operations. Don't 276 * exhaust the pbufs completely. Make sure we 277 * initialize workable values (0 will work for hysteresis 278 * but it isn't very efficient). 279 * 280 * The nsw_cluster_max is constrained by the bp->b_pages[] 281 * array (MAXPHYS/PAGE_SIZE) and our locally defined 282 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 283 * constrained by the swap device interleave stripe size. 284 * 285 * Currently we hardwire nsw_wcount_async to 4. This limit is 286 * designed to prevent other I/O from having high latencies due to 287 * our pageout I/O. The value 4 works well for one or two active swap 288 * devices but is probably a little low if you have more. Even so, 289 * a higher value would probably generate only a limited improvement 290 * with three or four active swap devices since the system does not 291 * typically have to pageout at extreme bandwidths. We will want 292 * at least 2 per swap devices, and 4 is a pretty good value if you 293 * have one NFS swap device due to the command/ack latency over NFS. 294 * So it all works out pretty well. 295 */ 296 297 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 298 299 nsw_rcount = (nswbuf + 1) / 2; 300 nsw_wcount_sync = (nswbuf + 3) / 4; 301 nsw_wcount_async = 4; 302 nsw_wcount_async_max = nsw_wcount_async; 303 304 /* 305 * Initialize our zone. Right now I'm just guessing on the number 306 * we need based on the number of pages in the system. Each swblock 307 * can hold 16 pages, so this is probably overkill. This reservation 308 * is typically limited to around 32MB by default. 309 */ 310 n = vmstats.v_page_count / 2; 311 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 312 n = maxswzone / sizeof(struct swblock); 313 n2 = n; 314 315 do { 316 swap_zone = zinit( 317 "SWAPMETA", 318 sizeof(struct swblock), 319 n, 320 ZONE_INTERRUPT, 321 1); 322 if (swap_zone != NULL) 323 break; 324 /* 325 * if the allocation failed, try a zone two thirds the 326 * size of the previous attempt. 327 */ 328 n -= ((n + 2) / 3); 329 } while (n > 0); 330 331 if (swap_zone == NULL) 332 panic("swap_pager_swap_init: swap_zone == NULL"); 333 if (n2 != n) 334 printf("Swap zone entries reduced from %d to %d.\n", n2, n); 335 n2 = n; 336 337 /* 338 * Initialize our meta-data hash table. The swapper does not need to 339 * be quite as efficient as the VM system, so we do not use an 340 * oversized hash table. 341 * 342 * n: size of hash table, must be power of 2 343 * swhash_mask: hash table index mask 344 */ 345 346 for (n = 1; n < n2 / 8; n *= 2) 347 ; 348 349 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK); 350 bzero(swhash, sizeof(struct swblock *) * n); 351 352 swhash_mask = n - 1; 353 } 354 355 /* 356 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 357 * its metadata structures. 358 * 359 * This routine is called from the mmap and fork code to create a new 360 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 361 * and then converting it with swp_pager_meta_build(). 362 * 363 * This routine may block in vm_object_allocate() and create a named 364 * object lookup race, so we must interlock. We must also run at 365 * splvm() for the object lookup to handle races with interrupts, but 366 * we do not have to maintain splvm() in between the lookup and the 367 * add because (I believe) it is not possible to attempt to create 368 * a new swap object w/handle when a default object with that handle 369 * already exists. 370 */ 371 372 static vm_object_t 373 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 374 vm_ooffset_t offset) 375 { 376 vm_object_t object; 377 378 if (handle) { 379 /* 380 * Reference existing named region or allocate new one. There 381 * should not be a race here against swp_pager_meta_build() 382 * as called from vm_page_remove() in regards to the lookup 383 * of the handle. 384 */ 385 386 while (sw_alloc_interlock) { 387 sw_alloc_interlock = -1; 388 tsleep(&sw_alloc_interlock, 0, "swpalc", 0); 389 } 390 sw_alloc_interlock = 1; 391 392 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 393 394 if (object != NULL) { 395 vm_object_reference(object); 396 } else { 397 object = vm_object_allocate(OBJT_DEFAULT, 398 OFF_TO_IDX(offset + PAGE_MASK + size)); 399 object->handle = handle; 400 401 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 402 } 403 404 if (sw_alloc_interlock < 0) 405 wakeup(&sw_alloc_interlock); 406 407 sw_alloc_interlock = 0; 408 } else { 409 object = vm_object_allocate(OBJT_DEFAULT, 410 OFF_TO_IDX(offset + PAGE_MASK + size)); 411 412 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 413 } 414 415 return (object); 416 } 417 418 /* 419 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 420 * 421 * The swap backing for the object is destroyed. The code is 422 * designed such that we can reinstantiate it later, but this 423 * routine is typically called only when the entire object is 424 * about to be destroyed. 425 * 426 * This routine may block, but no longer does. 427 * 428 * The object must be locked or unreferenceable. 429 */ 430 431 static void 432 swap_pager_dealloc(object) 433 vm_object_t object; 434 { 435 int s; 436 437 /* 438 * Remove from list right away so lookups will fail if we block for 439 * pageout completion. 440 */ 441 442 if (object->handle == NULL) { 443 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 444 } else { 445 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 446 } 447 448 vm_object_pip_wait(object, "swpdea"); 449 450 /* 451 * Free all remaining metadata. We only bother to free it from 452 * the swap meta data. We do not attempt to free swapblk's still 453 * associated with vm_page_t's for this object. We do not care 454 * if paging is still in progress on some objects. 455 */ 456 s = splvm(); 457 swp_pager_meta_free_all(object); 458 splx(s); 459 } 460 461 /************************************************************************ 462 * SWAP PAGER BITMAP ROUTINES * 463 ************************************************************************/ 464 465 /* 466 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 467 * 468 * Allocate swap for the requested number of pages. The starting 469 * swap block number (a page index) is returned or SWAPBLK_NONE 470 * if the allocation failed. 471 * 472 * Also has the side effect of advising that somebody made a mistake 473 * when they configured swap and didn't configure enough. 474 * 475 * Must be called at splvm() to avoid races with bitmap frees from 476 * vm_page_remove() aka swap_pager_page_removed(). 477 * 478 * This routine may not block 479 * This routine must be called at splvm(). 480 */ 481 482 static __inline daddr_t 483 swp_pager_getswapspace(npages) 484 int npages; 485 { 486 daddr_t blk; 487 488 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 489 if (swap_pager_full != 2) { 490 printf("swap_pager_getswapspace: failed\n"); 491 swap_pager_full = 2; 492 swap_pager_almost_full = 1; 493 } 494 } else { 495 vm_swap_size -= npages; 496 swp_sizecheck(); 497 } 498 return(blk); 499 } 500 501 /* 502 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 503 * 504 * This routine returns the specified swap blocks back to the bitmap. 505 * 506 * Note: This routine may not block (it could in the old swap code), 507 * and through the use of the new blist routines it does not block. 508 * 509 * We must be called at splvm() to avoid races with bitmap frees from 510 * vm_page_remove() aka swap_pager_page_removed(). 511 * 512 * This routine may not block 513 * This routine must be called at splvm(). 514 */ 515 516 static __inline void 517 swp_pager_freeswapspace(blk, npages) 518 daddr_t blk; 519 int npages; 520 { 521 blist_free(swapblist, blk, npages); 522 vm_swap_size += npages; 523 swp_sizecheck(); 524 } 525 526 /* 527 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 528 * range within an object. 529 * 530 * This is a globally accessible routine. 531 * 532 * This routine removes swapblk assignments from swap metadata. 533 * 534 * The external callers of this routine typically have already destroyed 535 * or renamed vm_page_t's associated with this range in the object so 536 * we should be ok. 537 * 538 * This routine may be called at any spl. We up our spl to splvm temporarily 539 * in order to perform the metadata removal. 540 */ 541 542 void 543 swap_pager_freespace(object, start, size) 544 vm_object_t object; 545 vm_pindex_t start; 546 vm_size_t size; 547 { 548 int s = splvm(); 549 swp_pager_meta_free(object, start, size); 550 splx(s); 551 } 552 553 /* 554 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 555 * 556 * Assigns swap blocks to the specified range within the object. The 557 * swap blocks are not zerod. Any previous swap assignment is destroyed. 558 * 559 * Returns 0 on success, -1 on failure. 560 */ 561 562 int 563 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 564 { 565 int s; 566 int n = 0; 567 daddr_t blk = SWAPBLK_NONE; 568 vm_pindex_t beg = start; /* save start index */ 569 570 s = splvm(); 571 while (size) { 572 if (n == 0) { 573 n = BLIST_MAX_ALLOC; 574 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 575 n >>= 1; 576 if (n == 0) { 577 swp_pager_meta_free(object, beg, start - beg); 578 splx(s); 579 return(-1); 580 } 581 } 582 } 583 swp_pager_meta_build(object, start, blk); 584 --size; 585 ++start; 586 ++blk; 587 --n; 588 } 589 swp_pager_meta_free(object, start, n); 590 splx(s); 591 return(0); 592 } 593 594 /* 595 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 596 * and destroy the source. 597 * 598 * Copy any valid swapblks from the source to the destination. In 599 * cases where both the source and destination have a valid swapblk, 600 * we keep the destination's. 601 * 602 * This routine is allowed to block. It may block allocating metadata 603 * indirectly through swp_pager_meta_build() or if paging is still in 604 * progress on the source. 605 * 606 * This routine can be called at any spl 607 * 608 * XXX vm_page_collapse() kinda expects us not to block because we 609 * supposedly do not need to allocate memory, but for the moment we 610 * *may* have to get a little memory from the zone allocator, but 611 * it is taken from the interrupt memory. We should be ok. 612 * 613 * The source object contains no vm_page_t's (which is just as well) 614 * 615 * The source object is of type OBJT_SWAP. 616 * 617 * The source and destination objects must be locked or 618 * inaccessible (XXX are they ?) 619 */ 620 621 void 622 swap_pager_copy(srcobject, dstobject, offset, destroysource) 623 vm_object_t srcobject; 624 vm_object_t dstobject; 625 vm_pindex_t offset; 626 int destroysource; 627 { 628 vm_pindex_t i; 629 int s; 630 631 s = splvm(); 632 633 /* 634 * If destroysource is set, we remove the source object from the 635 * swap_pager internal queue now. 636 */ 637 638 if (destroysource) { 639 if (srcobject->handle == NULL) { 640 TAILQ_REMOVE( 641 &swap_pager_un_object_list, 642 srcobject, 643 pager_object_list 644 ); 645 } else { 646 TAILQ_REMOVE( 647 NOBJLIST(srcobject->handle), 648 srcobject, 649 pager_object_list 650 ); 651 } 652 } 653 654 /* 655 * transfer source to destination. 656 */ 657 658 for (i = 0; i < dstobject->size; ++i) { 659 daddr_t dstaddr; 660 661 /* 662 * Locate (without changing) the swapblk on the destination, 663 * unless it is invalid in which case free it silently, or 664 * if the destination is a resident page, in which case the 665 * source is thrown away. 666 */ 667 668 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 669 670 if (dstaddr == SWAPBLK_NONE) { 671 /* 672 * Destination has no swapblk and is not resident, 673 * copy source. 674 */ 675 daddr_t srcaddr; 676 677 srcaddr = swp_pager_meta_ctl( 678 srcobject, 679 i + offset, 680 SWM_POP 681 ); 682 683 if (srcaddr != SWAPBLK_NONE) 684 swp_pager_meta_build(dstobject, i, srcaddr); 685 } else { 686 /* 687 * Destination has valid swapblk or it is represented 688 * by a resident page. We destroy the sourceblock. 689 */ 690 691 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 692 } 693 } 694 695 /* 696 * Free left over swap blocks in source. 697 * 698 * We have to revert the type to OBJT_DEFAULT so we do not accidently 699 * double-remove the object from the swap queues. 700 */ 701 702 if (destroysource) { 703 swp_pager_meta_free_all(srcobject); 704 /* 705 * Reverting the type is not necessary, the caller is going 706 * to destroy srcobject directly, but I'm doing it here 707 * for consistency since we've removed the object from its 708 * queues. 709 */ 710 srcobject->type = OBJT_DEFAULT; 711 } 712 splx(s); 713 } 714 715 /* 716 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 717 * the requested page. 718 * 719 * We determine whether good backing store exists for the requested 720 * page and return TRUE if it does, FALSE if it doesn't. 721 * 722 * If TRUE, we also try to determine how much valid, contiguous backing 723 * store exists before and after the requested page within a reasonable 724 * distance. We do not try to restrict it to the swap device stripe 725 * (that is handled in getpages/putpages). It probably isn't worth 726 * doing here. 727 */ 728 729 boolean_t 730 swap_pager_haspage(object, pindex, before, after) 731 vm_object_t object; 732 vm_pindex_t pindex; 733 int *before; 734 int *after; 735 { 736 daddr_t blk0; 737 int s; 738 739 /* 740 * do we have good backing store at the requested index ? 741 */ 742 743 s = splvm(); 744 blk0 = swp_pager_meta_ctl(object, pindex, 0); 745 746 if (blk0 == SWAPBLK_NONE) { 747 splx(s); 748 if (before) 749 *before = 0; 750 if (after) 751 *after = 0; 752 return (FALSE); 753 } 754 755 /* 756 * find backwards-looking contiguous good backing store 757 */ 758 759 if (before != NULL) { 760 int i; 761 762 for (i = 1; i < (SWB_NPAGES/2); ++i) { 763 daddr_t blk; 764 765 if (i > pindex) 766 break; 767 blk = swp_pager_meta_ctl(object, pindex - i, 0); 768 if (blk != blk0 - i) 769 break; 770 } 771 *before = (i - 1); 772 } 773 774 /* 775 * find forward-looking contiguous good backing store 776 */ 777 778 if (after != NULL) { 779 int i; 780 781 for (i = 1; i < (SWB_NPAGES/2); ++i) { 782 daddr_t blk; 783 784 blk = swp_pager_meta_ctl(object, pindex + i, 0); 785 if (blk != blk0 + i) 786 break; 787 } 788 *after = (i - 1); 789 } 790 splx(s); 791 return (TRUE); 792 } 793 794 /* 795 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 796 * 797 * This removes any associated swap backing store, whether valid or 798 * not, from the page. 799 * 800 * This routine is typically called when a page is made dirty, at 801 * which point any associated swap can be freed. MADV_FREE also 802 * calls us in a special-case situation 803 * 804 * NOTE!!! If the page is clean and the swap was valid, the caller 805 * should make the page dirty before calling this routine. This routine 806 * does NOT change the m->dirty status of the page. Also: MADV_FREE 807 * depends on it. 808 * 809 * This routine may not block 810 * This routine must be called at splvm() 811 */ 812 813 static void 814 swap_pager_unswapped(m) 815 vm_page_t m; 816 { 817 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 818 } 819 820 /* 821 * SWAP_PAGER_STRATEGY() - read, write, free blocks 822 * 823 * This implements the vm_pager_strategy() interface to swap and allows 824 * other parts of the system to directly access swap as backing store 825 * through vm_objects of type OBJT_SWAP. This is intended to be a 826 * cacheless interface ( i.e. caching occurs at higher levels ). 827 * Therefore we do not maintain any resident pages. All I/O goes 828 * directly to and from the swap device. 829 * 830 * Note that b_blkno is scaled for PAGE_SIZE 831 * 832 * We currently attempt to run I/O synchronously or asynchronously as 833 * the caller requests. This isn't perfect because we loose error 834 * sequencing when we run multiple ops in parallel to satisfy a request. 835 * But this is swap, so we let it all hang out. 836 */ 837 838 static void 839 swap_pager_strategy(vm_object_t object, struct buf *bp) 840 { 841 vm_pindex_t start; 842 int count; 843 int s; 844 char *data; 845 struct buf *nbp = NULL; 846 847 if (bp->b_bcount & PAGE_MASK) { 848 bp->b_error = EINVAL; 849 bp->b_flags |= B_ERROR | B_INVAL; 850 biodone(bp); 851 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount); 852 return; 853 } 854 855 /* 856 * Clear error indication, initialize page index, count, data pointer. 857 */ 858 859 bp->b_error = 0; 860 bp->b_flags &= ~B_ERROR; 861 bp->b_resid = bp->b_bcount; 862 863 start = bp->b_pblkno; 864 count = howmany(bp->b_bcount, PAGE_SIZE); 865 data = bp->b_data; 866 867 s = splvm(); 868 869 /* 870 * Deal with B_FREEBUF 871 */ 872 873 if (bp->b_flags & B_FREEBUF) { 874 /* 875 * FREE PAGE(s) - destroy underlying swap that is no longer 876 * needed. 877 */ 878 swp_pager_meta_free(object, start, count); 879 splx(s); 880 bp->b_resid = 0; 881 biodone(bp); 882 return; 883 } 884 885 /* 886 * Execute read or write 887 */ 888 889 while (count > 0) { 890 daddr_t blk; 891 892 /* 893 * Obtain block. If block not found and writing, allocate a 894 * new block and build it into the object. 895 */ 896 897 blk = swp_pager_meta_ctl(object, start, 0); 898 if ((blk == SWAPBLK_NONE) && (bp->b_flags & B_READ) == 0) { 899 blk = swp_pager_getswapspace(1); 900 if (blk == SWAPBLK_NONE) { 901 bp->b_error = ENOMEM; 902 bp->b_flags |= B_ERROR; 903 break; 904 } 905 swp_pager_meta_build(object, start, blk); 906 } 907 908 /* 909 * Do we have to flush our current collection? Yes if: 910 * 911 * - no swap block at this index 912 * - swap block is not contiguous 913 * - we cross a physical disk boundry in the 914 * stripe. 915 */ 916 917 if ( 918 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 919 ((nbp->b_blkno ^ blk) & dmmax_mask) 920 ) 921 ) { 922 splx(s); 923 if (bp->b_flags & B_READ) { 924 ++mycpu->gd_cnt.v_swapin; 925 mycpu->gd_cnt.v_swappgsin += btoc(nbp->b_bcount); 926 } else { 927 ++mycpu->gd_cnt.v_swapout; 928 mycpu->gd_cnt.v_swappgsout += btoc(nbp->b_bcount); 929 nbp->b_dirtyend = nbp->b_bcount; 930 } 931 flushchainbuf(nbp); 932 s = splvm(); 933 nbp = NULL; 934 } 935 936 /* 937 * Add new swapblk to nbp, instantiating nbp if necessary. 938 * Zero-fill reads are able to take a shortcut. 939 */ 940 941 if (blk == SWAPBLK_NONE) { 942 /* 943 * We can only get here if we are reading. Since 944 * we are at splvm() we can safely modify b_resid, 945 * even if chain ops are in progress. 946 */ 947 bzero(data, PAGE_SIZE); 948 bp->b_resid -= PAGE_SIZE; 949 } else { 950 if (nbp == NULL) { 951 nbp = getchainbuf(bp, swapdev_vp, (bp->b_flags & B_READ) | B_ASYNC); 952 nbp->b_blkno = blk; 953 nbp->b_bcount = 0; 954 nbp->b_data = data; 955 } 956 nbp->b_bcount += PAGE_SIZE; 957 } 958 --count; 959 ++start; 960 data += PAGE_SIZE; 961 } 962 963 /* 964 * Flush out last buffer 965 */ 966 967 splx(s); 968 969 if (nbp) { 970 if ((bp->b_flags & B_ASYNC) == 0) 971 nbp->b_flags &= ~B_ASYNC; 972 if (nbp->b_flags & B_READ) { 973 ++mycpu->gd_cnt.v_swapin; 974 mycpu->gd_cnt.v_swappgsin += btoc(nbp->b_bcount); 975 } else { 976 ++mycpu->gd_cnt.v_swapout; 977 mycpu->gd_cnt.v_swappgsout += btoc(nbp->b_bcount); 978 nbp->b_dirtyend = nbp->b_bcount; 979 } 980 flushchainbuf(nbp); 981 /* nbp = NULL; */ 982 } 983 984 /* 985 * Wait for completion. 986 */ 987 988 if (bp->b_flags & B_ASYNC) { 989 autochaindone(bp); 990 } else { 991 waitchainbuf(bp, 0, 1); 992 } 993 } 994 995 /* 996 * SWAP_PAGER_GETPAGES() - bring pages in from swap 997 * 998 * Attempt to retrieve (m, count) pages from backing store, but make 999 * sure we retrieve at least m[reqpage]. We try to load in as large 1000 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1001 * belongs to the same object. 1002 * 1003 * The code is designed for asynchronous operation and 1004 * immediate-notification of 'reqpage' but tends not to be 1005 * used that way. Please do not optimize-out this algorithmic 1006 * feature, I intend to improve on it in the future. 1007 * 1008 * The parent has a single vm_object_pip_add() reference prior to 1009 * calling us and we should return with the same. 1010 * 1011 * The parent has BUSY'd the pages. We should return with 'm' 1012 * left busy, but the others adjusted. 1013 */ 1014 1015 static int 1016 swap_pager_getpages(object, m, count, reqpage) 1017 vm_object_t object; 1018 vm_page_t *m; 1019 int count, reqpage; 1020 { 1021 struct buf *bp; 1022 vm_page_t mreq; 1023 int s; 1024 int i; 1025 int j; 1026 daddr_t blk; 1027 vm_offset_t kva; 1028 vm_pindex_t lastpindex; 1029 1030 mreq = m[reqpage]; 1031 1032 if (mreq->object != object) { 1033 panic("swap_pager_getpages: object mismatch %p/%p", 1034 object, 1035 mreq->object 1036 ); 1037 } 1038 /* 1039 * Calculate range to retrieve. The pages have already been assigned 1040 * their swapblks. We require a *contiguous* range that falls entirely 1041 * within a single device stripe. If we do not supply it, bad things 1042 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1043 * loops are set up such that the case(s) are handled implicitly. 1044 * 1045 * The swp_*() calls must be made at splvm(). vm_page_free() does 1046 * not need to be, but it will go a little faster if it is. 1047 */ 1048 1049 s = splvm(); 1050 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1051 1052 for (i = reqpage - 1; i >= 0; --i) { 1053 daddr_t iblk; 1054 1055 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1056 if (blk != iblk + (reqpage - i)) 1057 break; 1058 if ((blk ^ iblk) & dmmax_mask) 1059 break; 1060 } 1061 ++i; 1062 1063 for (j = reqpage + 1; j < count; ++j) { 1064 daddr_t jblk; 1065 1066 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1067 if (blk != jblk - (j - reqpage)) 1068 break; 1069 if ((blk ^ jblk) & dmmax_mask) 1070 break; 1071 } 1072 1073 /* 1074 * free pages outside our collection range. Note: we never free 1075 * mreq, it must remain busy throughout. 1076 */ 1077 1078 { 1079 int k; 1080 1081 for (k = 0; k < i; ++k) 1082 vm_page_free(m[k]); 1083 for (k = j; k < count; ++k) 1084 vm_page_free(m[k]); 1085 } 1086 splx(s); 1087 1088 1089 /* 1090 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1091 * still busy, but the others unbusied. 1092 */ 1093 1094 if (blk == SWAPBLK_NONE) 1095 return(VM_PAGER_FAIL); 1096 1097 /* 1098 * Get a swap buffer header to perform the IO 1099 */ 1100 1101 bp = getpbuf(&nsw_rcount); 1102 kva = (vm_offset_t) bp->b_data; 1103 1104 /* 1105 * map our page(s) into kva for input 1106 * 1107 * NOTE: B_PAGING is set by pbgetvp() 1108 */ 1109 1110 pmap_qenter(kva, m + i, j - i); 1111 1112 bp->b_flags = B_READ | B_CALL; 1113 bp->b_iodone = swp_pager_async_iodone; 1114 bp->b_data = (caddr_t) kva; 1115 bp->b_blkno = blk - (reqpage - i); 1116 bp->b_bcount = PAGE_SIZE * (j - i); 1117 bp->b_bufsize = PAGE_SIZE * (j - i); 1118 bp->b_pager.pg_reqpage = reqpage - i; 1119 1120 { 1121 int k; 1122 1123 for (k = i; k < j; ++k) { 1124 bp->b_pages[k - i] = m[k]; 1125 vm_page_flag_set(m[k], PG_SWAPINPROG); 1126 } 1127 } 1128 bp->b_npages = j - i; 1129 1130 pbgetvp(swapdev_vp, bp); 1131 1132 mycpu->gd_cnt.v_swapin++; 1133 mycpu->gd_cnt.v_swappgsin += bp->b_npages; 1134 1135 /* 1136 * We still hold the lock on mreq, and our automatic completion routine 1137 * does not remove it. 1138 */ 1139 1140 vm_object_pip_add(mreq->object, bp->b_npages); 1141 lastpindex = m[j-1]->pindex; 1142 1143 /* 1144 * perform the I/O. NOTE!!! bp cannot be considered valid after 1145 * this point because we automatically release it on completion. 1146 * Instead, we look at the one page we are interested in which we 1147 * still hold a lock on even through the I/O completion. 1148 * 1149 * The other pages in our m[] array are also released on completion, 1150 * so we cannot assume they are valid anymore either. 1151 * 1152 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1153 */ 1154 1155 BUF_KERNPROC(bp); 1156 VOP_STRATEGY(bp->b_vp, bp); 1157 1158 /* 1159 * wait for the page we want to complete. PG_SWAPINPROG is always 1160 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1161 * is set in the meta-data. 1162 */ 1163 1164 s = splvm(); 1165 1166 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1167 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1168 mycpu->gd_cnt.v_intrans++; 1169 if (tsleep(mreq, 0, "swread", hz*20)) { 1170 printf( 1171 "swap_pager: indefinite wait buffer: device:" 1172 " %s, blkno: %ld, size: %ld\n", 1173 devtoname(bp->b_dev), (long)bp->b_blkno, 1174 bp->b_bcount 1175 ); 1176 } 1177 } 1178 1179 splx(s); 1180 1181 /* 1182 * mreq is left bussied after completion, but all the other pages 1183 * are freed. If we had an unrecoverable read error the page will 1184 * not be valid. 1185 */ 1186 1187 if (mreq->valid != VM_PAGE_BITS_ALL) { 1188 return(VM_PAGER_ERROR); 1189 } else { 1190 return(VM_PAGER_OK); 1191 } 1192 1193 /* 1194 * A final note: in a low swap situation, we cannot deallocate swap 1195 * and mark a page dirty here because the caller is likely to mark 1196 * the page clean when we return, causing the page to possibly revert 1197 * to all-zero's later. 1198 */ 1199 } 1200 1201 /* 1202 * swap_pager_putpages: 1203 * 1204 * Assign swap (if necessary) and initiate I/O on the specified pages. 1205 * 1206 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1207 * are automatically converted to SWAP objects. 1208 * 1209 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1210 * vm_page reservation system coupled with properly written VFS devices 1211 * should ensure that no low-memory deadlock occurs. This is an area 1212 * which needs work. 1213 * 1214 * The parent has N vm_object_pip_add() references prior to 1215 * calling us and will remove references for rtvals[] that are 1216 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1217 * completion. 1218 * 1219 * The parent has soft-busy'd the pages it passes us and will unbusy 1220 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1221 * We need to unbusy the rest on I/O completion. 1222 */ 1223 1224 void 1225 swap_pager_putpages(object, m, count, sync, rtvals) 1226 vm_object_t object; 1227 vm_page_t *m; 1228 int count; 1229 boolean_t sync; 1230 int *rtvals; 1231 { 1232 int i; 1233 int n = 0; 1234 1235 if (count && m[0]->object != object) { 1236 panic("swap_pager_getpages: object mismatch %p/%p", 1237 object, 1238 m[0]->object 1239 ); 1240 } 1241 /* 1242 * Step 1 1243 * 1244 * Turn object into OBJT_SWAP 1245 * check for bogus sysops 1246 * force sync if not pageout process 1247 */ 1248 1249 if (object->type != OBJT_SWAP) 1250 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1251 1252 if (curthread != pagethread) 1253 sync = TRUE; 1254 1255 /* 1256 * Step 2 1257 * 1258 * Update nsw parameters from swap_async_max sysctl values. 1259 * Do not let the sysop crash the machine with bogus numbers. 1260 */ 1261 1262 if (swap_async_max != nsw_wcount_async_max) { 1263 int n; 1264 int s; 1265 1266 /* 1267 * limit range 1268 */ 1269 if ((n = swap_async_max) > nswbuf / 2) 1270 n = nswbuf / 2; 1271 if (n < 1) 1272 n = 1; 1273 swap_async_max = n; 1274 1275 /* 1276 * Adjust difference ( if possible ). If the current async 1277 * count is too low, we may not be able to make the adjustment 1278 * at this time. 1279 */ 1280 s = splvm(); 1281 n -= nsw_wcount_async_max; 1282 if (nsw_wcount_async + n >= 0) { 1283 nsw_wcount_async += n; 1284 nsw_wcount_async_max += n; 1285 wakeup(&nsw_wcount_async); 1286 } 1287 splx(s); 1288 } 1289 1290 /* 1291 * Step 3 1292 * 1293 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1294 * The page is left dirty until the pageout operation completes 1295 * successfully. 1296 */ 1297 1298 for (i = 0; i < count; i += n) { 1299 int s; 1300 int j; 1301 struct buf *bp; 1302 daddr_t blk; 1303 1304 /* 1305 * Maximum I/O size is limited by a number of factors. 1306 */ 1307 1308 n = min(BLIST_MAX_ALLOC, count - i); 1309 n = min(n, nsw_cluster_max); 1310 1311 s = splvm(); 1312 1313 /* 1314 * Get biggest block of swap we can. If we fail, fall 1315 * back and try to allocate a smaller block. Don't go 1316 * overboard trying to allocate space if it would overly 1317 * fragment swap. 1318 */ 1319 while ( 1320 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1321 n > 4 1322 ) { 1323 n >>= 1; 1324 } 1325 if (blk == SWAPBLK_NONE) { 1326 for (j = 0; j < n; ++j) 1327 rtvals[i+j] = VM_PAGER_FAIL; 1328 splx(s); 1329 continue; 1330 } 1331 1332 /* 1333 * The I/O we are constructing cannot cross a physical 1334 * disk boundry in the swap stripe. Note: we are still 1335 * at splvm(). 1336 */ 1337 if ((blk ^ (blk + n)) & dmmax_mask) { 1338 j = ((blk + dmmax) & dmmax_mask) - blk; 1339 swp_pager_freeswapspace(blk + j, n - j); 1340 n = j; 1341 } 1342 1343 /* 1344 * All I/O parameters have been satisfied, build the I/O 1345 * request and assign the swap space. 1346 * 1347 * NOTE: B_PAGING is set by pbgetvp() 1348 */ 1349 1350 if (sync == TRUE) { 1351 bp = getpbuf(&nsw_wcount_sync); 1352 bp->b_flags = B_CALL; 1353 } else { 1354 bp = getpbuf(&nsw_wcount_async); 1355 bp->b_flags = B_CALL | B_ASYNC; 1356 } 1357 bp->b_spc = NULL; /* not used, but NULL-out anyway */ 1358 1359 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1360 1361 bp->b_bcount = PAGE_SIZE * n; 1362 bp->b_bufsize = PAGE_SIZE * n; 1363 bp->b_blkno = blk; 1364 1365 pbgetvp(swapdev_vp, bp); 1366 1367 for (j = 0; j < n; ++j) { 1368 vm_page_t mreq = m[i+j]; 1369 1370 swp_pager_meta_build( 1371 mreq->object, 1372 mreq->pindex, 1373 blk + j 1374 ); 1375 vm_page_dirty(mreq); 1376 rtvals[i+j] = VM_PAGER_OK; 1377 1378 vm_page_flag_set(mreq, PG_SWAPINPROG); 1379 bp->b_pages[j] = mreq; 1380 } 1381 bp->b_npages = n; 1382 /* 1383 * Must set dirty range for NFS to work. 1384 */ 1385 bp->b_dirtyoff = 0; 1386 bp->b_dirtyend = bp->b_bcount; 1387 1388 mycpu->gd_cnt.v_swapout++; 1389 mycpu->gd_cnt.v_swappgsout += bp->b_npages; 1390 swapdev_vp->v_numoutput++; 1391 1392 splx(s); 1393 1394 /* 1395 * asynchronous 1396 * 1397 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1398 */ 1399 1400 if (sync == FALSE) { 1401 bp->b_iodone = swp_pager_async_iodone; 1402 BUF_KERNPROC(bp); 1403 VOP_STRATEGY(bp->b_vp, bp); 1404 1405 for (j = 0; j < n; ++j) 1406 rtvals[i+j] = VM_PAGER_PEND; 1407 continue; 1408 } 1409 1410 /* 1411 * synchronous 1412 * 1413 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1414 */ 1415 1416 bp->b_iodone = swp_pager_sync_iodone; 1417 VOP_STRATEGY(bp->b_vp, bp); 1418 1419 /* 1420 * Wait for the sync I/O to complete, then update rtvals. 1421 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1422 * our async completion routine at the end, thus avoiding a 1423 * double-free. 1424 */ 1425 s = splbio(); 1426 1427 while ((bp->b_flags & B_DONE) == 0) { 1428 tsleep(bp, 0, "swwrt", 0); 1429 } 1430 1431 for (j = 0; j < n; ++j) 1432 rtvals[i+j] = VM_PAGER_PEND; 1433 1434 /* 1435 * Now that we are through with the bp, we can call the 1436 * normal async completion, which frees everything up. 1437 */ 1438 1439 swp_pager_async_iodone(bp); 1440 1441 splx(s); 1442 } 1443 } 1444 1445 /* 1446 * swap_pager_sync_iodone: 1447 * 1448 * Completion routine for synchronous reads and writes from/to swap. 1449 * We just mark the bp is complete and wake up anyone waiting on it. 1450 * 1451 * This routine may not block. This routine is called at splbio() or better. 1452 */ 1453 1454 static void 1455 swp_pager_sync_iodone(bp) 1456 struct buf *bp; 1457 { 1458 bp->b_flags |= B_DONE; 1459 bp->b_flags &= ~B_ASYNC; 1460 wakeup(bp); 1461 } 1462 1463 /* 1464 * swp_pager_async_iodone: 1465 * 1466 * Completion routine for asynchronous reads and writes from/to swap. 1467 * Also called manually by synchronous code to finish up a bp. 1468 * 1469 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1470 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1471 * unbusy all pages except the 'main' request page. For WRITE 1472 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1473 * because we marked them all VM_PAGER_PEND on return from putpages ). 1474 * 1475 * This routine may not block. 1476 * This routine is called at splbio() or better 1477 * 1478 * We up ourselves to splvm() as required for various vm_page related 1479 * calls. 1480 */ 1481 1482 static void 1483 swp_pager_async_iodone(bp) 1484 struct buf *bp; 1485 { 1486 int s; 1487 int i; 1488 vm_object_t object = NULL; 1489 1490 bp->b_flags |= B_DONE; 1491 1492 /* 1493 * report error 1494 */ 1495 1496 if (bp->b_flags & B_ERROR) { 1497 printf( 1498 "swap_pager: I/O error - %s failed; blkno %ld," 1499 "size %ld, error %d\n", 1500 ((bp->b_flags & B_READ) ? "pagein" : "pageout"), 1501 (long)bp->b_blkno, 1502 (long)bp->b_bcount, 1503 bp->b_error 1504 ); 1505 } 1506 1507 /* 1508 * set object, raise to splvm(). 1509 */ 1510 1511 if (bp->b_npages) 1512 object = bp->b_pages[0]->object; 1513 s = splvm(); 1514 1515 /* 1516 * remove the mapping for kernel virtual 1517 */ 1518 1519 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1520 1521 /* 1522 * cleanup pages. If an error occurs writing to swap, we are in 1523 * very serious trouble. If it happens to be a disk error, though, 1524 * we may be able to recover by reassigning the swap later on. So 1525 * in this case we remove the m->swapblk assignment for the page 1526 * but do not free it in the rlist. The errornous block(s) are thus 1527 * never reallocated as swap. Redirty the page and continue. 1528 */ 1529 1530 for (i = 0; i < bp->b_npages; ++i) { 1531 vm_page_t m = bp->b_pages[i]; 1532 1533 vm_page_flag_clear(m, PG_SWAPINPROG); 1534 1535 if (bp->b_flags & B_ERROR) { 1536 /* 1537 * If an error occurs I'd love to throw the swapblk 1538 * away without freeing it back to swapspace, so it 1539 * can never be used again. But I can't from an 1540 * interrupt. 1541 */ 1542 1543 if (bp->b_flags & B_READ) { 1544 /* 1545 * When reading, reqpage needs to stay 1546 * locked for the parent, but all other 1547 * pages can be freed. We still want to 1548 * wakeup the parent waiting on the page, 1549 * though. ( also: pg_reqpage can be -1 and 1550 * not match anything ). 1551 * 1552 * We have to wake specifically requested pages 1553 * up too because we cleared PG_SWAPINPROG and 1554 * someone may be waiting for that. 1555 * 1556 * NOTE: for reads, m->dirty will probably 1557 * be overridden by the original caller of 1558 * getpages so don't play cute tricks here. 1559 * 1560 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1561 * AS THIS MESSES WITH object->memq, and it is 1562 * not legal to mess with object->memq from an 1563 * interrupt. 1564 */ 1565 1566 m->valid = 0; 1567 vm_page_flag_clear(m, PG_ZERO); 1568 1569 if (i != bp->b_pager.pg_reqpage) 1570 vm_page_free(m); 1571 else 1572 vm_page_flash(m); 1573 /* 1574 * If i == bp->b_pager.pg_reqpage, do not wake 1575 * the page up. The caller needs to. 1576 */ 1577 } else { 1578 /* 1579 * If a write error occurs, reactivate page 1580 * so it doesn't clog the inactive list, 1581 * then finish the I/O. 1582 */ 1583 vm_page_dirty(m); 1584 vm_page_activate(m); 1585 vm_page_io_finish(m); 1586 } 1587 } else if (bp->b_flags & B_READ) { 1588 /* 1589 * For read success, clear dirty bits. Nobody should 1590 * have this page mapped but don't take any chances, 1591 * make sure the pmap modify bits are also cleared. 1592 * 1593 * NOTE: for reads, m->dirty will probably be 1594 * overridden by the original caller of getpages so 1595 * we cannot set them in order to free the underlying 1596 * swap in a low-swap situation. I don't think we'd 1597 * want to do that anyway, but it was an optimization 1598 * that existed in the old swapper for a time before 1599 * it got ripped out due to precisely this problem. 1600 * 1601 * clear PG_ZERO in page. 1602 * 1603 * If not the requested page then deactivate it. 1604 * 1605 * Note that the requested page, reqpage, is left 1606 * busied, but we still have to wake it up. The 1607 * other pages are released (unbusied) by 1608 * vm_page_wakeup(). We do not set reqpage's 1609 * valid bits here, it is up to the caller. 1610 */ 1611 1612 pmap_clear_modify(m); 1613 m->valid = VM_PAGE_BITS_ALL; 1614 vm_page_undirty(m); 1615 vm_page_flag_clear(m, PG_ZERO); 1616 1617 /* 1618 * We have to wake specifically requested pages 1619 * up too because we cleared PG_SWAPINPROG and 1620 * could be waiting for it in getpages. However, 1621 * be sure to not unbusy getpages specifically 1622 * requested page - getpages expects it to be 1623 * left busy. 1624 */ 1625 if (i != bp->b_pager.pg_reqpage) { 1626 vm_page_deactivate(m); 1627 vm_page_wakeup(m); 1628 } else { 1629 vm_page_flash(m); 1630 } 1631 } else { 1632 /* 1633 * For write success, clear the modify and dirty 1634 * status, then finish the I/O ( which decrements the 1635 * busy count and possibly wakes waiter's up ). 1636 */ 1637 pmap_clear_modify(m); 1638 vm_page_undirty(m); 1639 vm_page_io_finish(m); 1640 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1641 vm_page_protect(m, VM_PROT_READ); 1642 } 1643 } 1644 1645 /* 1646 * adjust pip. NOTE: the original parent may still have its own 1647 * pip refs on the object. 1648 */ 1649 1650 if (object) 1651 vm_object_pip_wakeupn(object, bp->b_npages); 1652 1653 /* 1654 * release the physical I/O buffer 1655 */ 1656 1657 relpbuf( 1658 bp, 1659 ((bp->b_flags & B_READ) ? &nsw_rcount : 1660 ((bp->b_flags & B_ASYNC) ? 1661 &nsw_wcount_async : 1662 &nsw_wcount_sync 1663 ) 1664 ) 1665 ); 1666 splx(s); 1667 } 1668 1669 /************************************************************************ 1670 * SWAP META DATA * 1671 ************************************************************************ 1672 * 1673 * These routines manipulate the swap metadata stored in the 1674 * OBJT_SWAP object. All swp_*() routines must be called at 1675 * splvm() because swap can be freed up by the low level vm_page 1676 * code which might be called from interrupts beyond what splbio() covers. 1677 * 1678 * Swap metadata is implemented with a global hash and not directly 1679 * linked into the object. Instead the object simply contains 1680 * appropriate tracking counters. 1681 */ 1682 1683 /* 1684 * SWP_PAGER_HASH() - hash swap meta data 1685 * 1686 * This is an inline helper function which hashes the swapblk given 1687 * the object and page index. It returns a pointer to a pointer 1688 * to the object, or a pointer to a NULL pointer if it could not 1689 * find a swapblk. 1690 * 1691 * This routine must be called at splvm(). 1692 */ 1693 1694 static __inline struct swblock ** 1695 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1696 { 1697 struct swblock **pswap; 1698 struct swblock *swap; 1699 1700 index &= ~SWAP_META_MASK; 1701 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1702 1703 while ((swap = *pswap) != NULL) { 1704 if (swap->swb_object == object && 1705 swap->swb_index == index 1706 ) { 1707 break; 1708 } 1709 pswap = &swap->swb_hnext; 1710 } 1711 return(pswap); 1712 } 1713 1714 /* 1715 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1716 * 1717 * We first convert the object to a swap object if it is a default 1718 * object. 1719 * 1720 * The specified swapblk is added to the object's swap metadata. If 1721 * the swapblk is not valid, it is freed instead. Any previously 1722 * assigned swapblk is freed. 1723 * 1724 * This routine must be called at splvm(), except when used to convert 1725 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1726 1727 */ 1728 1729 static void 1730 swp_pager_meta_build( 1731 vm_object_t object, 1732 vm_pindex_t index, 1733 daddr_t swapblk 1734 ) { 1735 struct swblock *swap; 1736 struct swblock **pswap; 1737 1738 /* 1739 * Convert default object to swap object if necessary 1740 */ 1741 1742 if (object->type != OBJT_SWAP) { 1743 object->type = OBJT_SWAP; 1744 object->un_pager.swp.swp_bcount = 0; 1745 1746 if (object->handle != NULL) { 1747 TAILQ_INSERT_TAIL( 1748 NOBJLIST(object->handle), 1749 object, 1750 pager_object_list 1751 ); 1752 } else { 1753 TAILQ_INSERT_TAIL( 1754 &swap_pager_un_object_list, 1755 object, 1756 pager_object_list 1757 ); 1758 } 1759 } 1760 1761 /* 1762 * Locate hash entry. If not found create, but if we aren't adding 1763 * anything just return. If we run out of space in the map we wait 1764 * and, since the hash table may have changed, retry. 1765 */ 1766 1767 retry: 1768 pswap = swp_pager_hash(object, index); 1769 1770 if ((swap = *pswap) == NULL) { 1771 int i; 1772 1773 if (swapblk == SWAPBLK_NONE) 1774 return; 1775 1776 swap = *pswap = zalloc(swap_zone); 1777 if (swap == NULL) { 1778 VM_WAIT; 1779 goto retry; 1780 } 1781 swap->swb_hnext = NULL; 1782 swap->swb_object = object; 1783 swap->swb_index = index & ~SWAP_META_MASK; 1784 swap->swb_count = 0; 1785 1786 ++object->un_pager.swp.swp_bcount; 1787 1788 for (i = 0; i < SWAP_META_PAGES; ++i) 1789 swap->swb_pages[i] = SWAPBLK_NONE; 1790 } 1791 1792 /* 1793 * Delete prior contents of metadata 1794 */ 1795 1796 index &= SWAP_META_MASK; 1797 1798 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1799 swp_pager_freeswapspace(swap->swb_pages[index], 1); 1800 --swap->swb_count; 1801 } 1802 1803 /* 1804 * Enter block into metadata 1805 */ 1806 1807 swap->swb_pages[index] = swapblk; 1808 if (swapblk != SWAPBLK_NONE) 1809 ++swap->swb_count; 1810 } 1811 1812 /* 1813 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1814 * 1815 * The requested range of blocks is freed, with any associated swap 1816 * returned to the swap bitmap. 1817 * 1818 * This routine will free swap metadata structures as they are cleaned 1819 * out. This routine does *NOT* operate on swap metadata associated 1820 * with resident pages. 1821 * 1822 * This routine must be called at splvm() 1823 */ 1824 1825 static void 1826 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1827 { 1828 if (object->type != OBJT_SWAP) 1829 return; 1830 1831 while (count > 0) { 1832 struct swblock **pswap; 1833 struct swblock *swap; 1834 1835 pswap = swp_pager_hash(object, index); 1836 1837 if ((swap = *pswap) != NULL) { 1838 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1839 1840 if (v != SWAPBLK_NONE) { 1841 swp_pager_freeswapspace(v, 1); 1842 swap->swb_pages[index & SWAP_META_MASK] = 1843 SWAPBLK_NONE; 1844 if (--swap->swb_count == 0) { 1845 *pswap = swap->swb_hnext; 1846 zfree(swap_zone, swap); 1847 --object->un_pager.swp.swp_bcount; 1848 } 1849 } 1850 --count; 1851 ++index; 1852 } else { 1853 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1854 count -= n; 1855 index += n; 1856 } 1857 } 1858 } 1859 1860 /* 1861 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1862 * 1863 * This routine locates and destroys all swap metadata associated with 1864 * an object. 1865 * 1866 * This routine must be called at splvm() 1867 */ 1868 1869 static void 1870 swp_pager_meta_free_all(vm_object_t object) 1871 { 1872 daddr_t index = 0; 1873 1874 if (object->type != OBJT_SWAP) 1875 return; 1876 1877 while (object->un_pager.swp.swp_bcount) { 1878 struct swblock **pswap; 1879 struct swblock *swap; 1880 1881 pswap = swp_pager_hash(object, index); 1882 if ((swap = *pswap) != NULL) { 1883 int i; 1884 1885 for (i = 0; i < SWAP_META_PAGES; ++i) { 1886 daddr_t v = swap->swb_pages[i]; 1887 if (v != SWAPBLK_NONE) { 1888 --swap->swb_count; 1889 swp_pager_freeswapspace(v, 1); 1890 } 1891 } 1892 if (swap->swb_count != 0) 1893 panic("swap_pager_meta_free_all: swb_count != 0"); 1894 *pswap = swap->swb_hnext; 1895 zfree(swap_zone, swap); 1896 --object->un_pager.swp.swp_bcount; 1897 } 1898 index += SWAP_META_PAGES; 1899 if (index > 0x20000000) 1900 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1901 } 1902 } 1903 1904 /* 1905 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1906 * 1907 * This routine is capable of looking up, popping, or freeing 1908 * swapblk assignments in the swap meta data or in the vm_page_t. 1909 * The routine typically returns the swapblk being looked-up, or popped, 1910 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1911 * was invalid. This routine will automatically free any invalid 1912 * meta-data swapblks. 1913 * 1914 * It is not possible to store invalid swapblks in the swap meta data 1915 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1916 * 1917 * When acting on a busy resident page and paging is in progress, we 1918 * have to wait until paging is complete but otherwise can act on the 1919 * busy page. 1920 * 1921 * This routine must be called at splvm(). 1922 * 1923 * SWM_FREE remove and free swap block from metadata 1924 * SWM_POP remove from meta data but do not free.. pop it out 1925 */ 1926 1927 static daddr_t 1928 swp_pager_meta_ctl( 1929 vm_object_t object, 1930 vm_pindex_t index, 1931 int flags 1932 ) { 1933 struct swblock **pswap; 1934 struct swblock *swap; 1935 daddr_t r1; 1936 1937 /* 1938 * The meta data only exists of the object is OBJT_SWAP 1939 * and even then might not be allocated yet. 1940 */ 1941 1942 if (object->type != OBJT_SWAP) 1943 return(SWAPBLK_NONE); 1944 1945 r1 = SWAPBLK_NONE; 1946 pswap = swp_pager_hash(object, index); 1947 1948 if ((swap = *pswap) != NULL) { 1949 index &= SWAP_META_MASK; 1950 r1 = swap->swb_pages[index]; 1951 1952 if (r1 != SWAPBLK_NONE) { 1953 if (flags & SWM_FREE) { 1954 swp_pager_freeswapspace(r1, 1); 1955 r1 = SWAPBLK_NONE; 1956 } 1957 if (flags & (SWM_FREE|SWM_POP)) { 1958 swap->swb_pages[index] = SWAPBLK_NONE; 1959 if (--swap->swb_count == 0) { 1960 *pswap = swap->swb_hnext; 1961 zfree(swap_zone, swap); 1962 --object->un_pager.swp.swp_bcount; 1963 } 1964 } 1965 } 1966 } 1967 return(r1); 1968 } 1969 1970