1 /* 2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1994 John S. Dyson 35 * Copyright (c) 1990 University of Utah. 36 * Copyright (c) 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * 39 * This code is derived from software contributed to Berkeley by 40 * the Systems Programming Group of the University of Utah Computer 41 * Science Department. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * New Swap System 72 * Matthew Dillon 73 * 74 * Radix Bitmap 'blists'. 75 * 76 * - The new swapper uses the new radix bitmap code. This should scale 77 * to arbitrarily small or arbitrarily large swap spaces and an almost 78 * arbitrary degree of fragmentation. 79 * 80 * Features: 81 * 82 * - on the fly reallocation of swap during putpages. The new system 83 * does not try to keep previously allocated swap blocks for dirty 84 * pages. 85 * 86 * - on the fly deallocation of swap 87 * 88 * - No more garbage collection required. Unnecessarily allocated swap 89 * blocks only exist for dirty vm_page_t's now and these are already 90 * cycled (in a high-load system) by the pager. We also do on-the-fly 91 * removal of invalidated swap blocks when a page is destroyed 92 * or renamed. 93 * 94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 95 * 96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 97 * 98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $ 99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.32 2008/07/01 02:02:56 dillon Exp $ 100 */ 101 102 #include <sys/param.h> 103 #include <sys/systm.h> 104 #include <sys/conf.h> 105 #include <sys/kernel.h> 106 #include <sys/proc.h> 107 #include <sys/buf.h> 108 #include <sys/vnode.h> 109 #include <sys/malloc.h> 110 #include <sys/vmmeter.h> 111 #include <sys/sysctl.h> 112 #include <sys/blist.h> 113 #include <sys/lock.h> 114 #include <sys/thread2.h> 115 116 #ifndef MAX_PAGEOUT_CLUSTER 117 #define MAX_PAGEOUT_CLUSTER 16 118 #endif 119 120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 121 122 #include "opt_swap.h" 123 #include <vm/vm.h> 124 #include <vm/vm_object.h> 125 #include <vm/vm_page.h> 126 #include <vm/vm_pager.h> 127 #include <vm/vm_pageout.h> 128 #include <vm/swap_pager.h> 129 #include <vm/vm_extern.h> 130 #include <vm/vm_zone.h> 131 132 #include <sys/buf2.h> 133 #include <vm/vm_page2.h> 134 135 #define SWM_FREE 0x02 /* free, period */ 136 #define SWM_POP 0x04 /* pop out */ 137 138 /* 139 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 140 * in the old system. 141 */ 142 143 extern int vm_swap_size; /* number of free swap blocks, in pages */ 144 145 int swap_pager_full; /* swap space exhaustion (task killing) */ 146 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 147 static int nsw_rcount; /* free read buffers */ 148 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 149 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 150 static int nsw_wcount_async_max;/* assigned maximum */ 151 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 152 static int sw_alloc_interlock; /* swap pager allocation interlock */ 153 154 struct blist *swapblist; 155 static struct swblock **swhash; 156 static int swhash_mask; 157 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 158 159 extern struct vnode *swapdev_vp; /* from vm_swap.c */ 160 161 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 162 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 163 164 /* 165 * "named" and "unnamed" anon region objects. Try to reduce the overhead 166 * of searching a named list by hashing it just a little. 167 */ 168 169 #define NOBJLISTS 8 170 171 #define NOBJLIST(handle) \ 172 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 173 174 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 175 struct pagerlst swap_pager_un_object_list; 176 vm_zone_t swap_zone; 177 178 /* 179 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 180 * calls hooked from other parts of the VM system and do not appear here. 181 * (see vm/swap_pager.h). 182 */ 183 184 static vm_object_t 185 swap_pager_alloc (void *handle, off_t size, 186 vm_prot_t prot, off_t offset); 187 static void swap_pager_dealloc (vm_object_t object); 188 static int swap_pager_getpages (vm_object_t, vm_page_t *, int, int); 189 static void swap_pager_init (void); 190 static void swap_pager_unswapped (vm_page_t); 191 static void swap_pager_strategy (vm_object_t, struct bio *); 192 static void swap_chain_iodone(struct bio *biox); 193 194 struct pagerops swappagerops = { 195 swap_pager_init, /* early system initialization of pager */ 196 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 197 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 198 swap_pager_getpages, /* pagein */ 199 swap_pager_putpages, /* pageout */ 200 swap_pager_haspage, /* get backing store status for page */ 201 swap_pager_unswapped, /* remove swap related to page */ 202 swap_pager_strategy /* pager strategy call */ 203 }; 204 205 /* 206 * dmmax is in page-sized chunks with the new swap system. It was 207 * dev-bsized chunks in the old. dmmax is always a power of 2. 208 * 209 * swap_*() routines are externally accessible. swp_*() routines are 210 * internal. 211 */ 212 213 int dmmax; 214 static int dmmax_mask; 215 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 216 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 217 218 static __inline void swp_sizecheck (void); 219 static void swp_pager_async_iodone (struct bio *bio); 220 221 /* 222 * Swap bitmap functions 223 */ 224 225 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages); 226 static __inline daddr_t swp_pager_getswapspace (int npages); 227 228 /* 229 * Metadata functions 230 */ 231 232 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t); 233 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t); 234 static void swp_pager_meta_free_all (vm_object_t); 235 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int); 236 237 /* 238 * SWP_SIZECHECK() - update swap_pager_full indication 239 * 240 * update the swap_pager_almost_full indication and warn when we are 241 * about to run out of swap space, using lowat/hiwat hysteresis. 242 * 243 * Clear swap_pager_full ( task killing ) indication when lowat is met. 244 * 245 * No restrictions on call 246 * This routine may not block. 247 * This routine must be called at splvm() 248 */ 249 250 static __inline void 251 swp_sizecheck(void) 252 { 253 if (vm_swap_size < nswap_lowat) { 254 if (swap_pager_almost_full == 0) { 255 kprintf("swap_pager: out of swap space\n"); 256 swap_pager_almost_full = 1; 257 } 258 } else { 259 swap_pager_full = 0; 260 if (vm_swap_size > nswap_hiwat) 261 swap_pager_almost_full = 0; 262 } 263 } 264 265 /* 266 * SWAP_PAGER_INIT() - initialize the swap pager! 267 * 268 * Expected to be started from system init. NOTE: This code is run 269 * before much else so be careful what you depend on. Most of the VM 270 * system has yet to be initialized at this point. 271 */ 272 273 static void 274 swap_pager_init(void) 275 { 276 /* 277 * Initialize object lists 278 */ 279 int i; 280 281 for (i = 0; i < NOBJLISTS; ++i) 282 TAILQ_INIT(&swap_pager_object_list[i]); 283 TAILQ_INIT(&swap_pager_un_object_list); 284 285 /* 286 * Device Stripe, in PAGE_SIZE'd blocks 287 */ 288 289 dmmax = SWB_NPAGES * 2; 290 dmmax_mask = ~(dmmax - 1); 291 } 292 293 /* 294 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 295 * 296 * Expected to be started from pageout process once, prior to entering 297 * its main loop. 298 */ 299 300 void 301 swap_pager_swap_init(void) 302 { 303 int n, n2; 304 305 /* 306 * Number of in-transit swap bp operations. Don't 307 * exhaust the pbufs completely. Make sure we 308 * initialize workable values (0 will work for hysteresis 309 * but it isn't very efficient). 310 * 311 * The nsw_cluster_max is constrained by the number of pages an XIO 312 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined 313 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 314 * constrained by the swap device interleave stripe size. 315 * 316 * Currently we hardwire nsw_wcount_async to 4. This limit is 317 * designed to prevent other I/O from having high latencies due to 318 * our pageout I/O. The value 4 works well for one or two active swap 319 * devices but is probably a little low if you have more. Even so, 320 * a higher value would probably generate only a limited improvement 321 * with three or four active swap devices since the system does not 322 * typically have to pageout at extreme bandwidths. We will want 323 * at least 2 per swap devices, and 4 is a pretty good value if you 324 * have one NFS swap device due to the command/ack latency over NFS. 325 * So it all works out pretty well. 326 */ 327 328 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 329 330 nsw_rcount = (nswbuf + 1) / 2; 331 nsw_wcount_sync = (nswbuf + 3) / 4; 332 nsw_wcount_async = 4; 333 nsw_wcount_async_max = nsw_wcount_async; 334 335 /* 336 * Initialize our zone. Right now I'm just guessing on the number 337 * we need based on the number of pages in the system. Each swblock 338 * can hold 16 pages, so this is probably overkill. This reservation 339 * is typically limited to around 32MB by default. 340 */ 341 n = vmstats.v_page_count / 2; 342 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 343 n = maxswzone / sizeof(struct swblock); 344 n2 = n; 345 346 do { 347 swap_zone = zinit( 348 "SWAPMETA", 349 sizeof(struct swblock), 350 n, 351 ZONE_INTERRUPT, 352 1); 353 if (swap_zone != NULL) 354 break; 355 /* 356 * if the allocation failed, try a zone two thirds the 357 * size of the previous attempt. 358 */ 359 n -= ((n + 2) / 3); 360 } while (n > 0); 361 362 if (swap_zone == NULL) 363 panic("swap_pager_swap_init: swap_zone == NULL"); 364 if (n2 != n) 365 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n); 366 n2 = n; 367 368 /* 369 * Initialize our meta-data hash table. The swapper does not need to 370 * be quite as efficient as the VM system, so we do not use an 371 * oversized hash table. 372 * 373 * n: size of hash table, must be power of 2 374 * swhash_mask: hash table index mask 375 */ 376 377 for (n = 1; n < n2 / 8; n *= 2) 378 ; 379 380 swhash = kmalloc(sizeof(struct swblock *) * n, M_VMPGDATA, 381 M_WAITOK | M_ZERO); 382 383 swhash_mask = n - 1; 384 } 385 386 /* 387 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 388 * its metadata structures. 389 * 390 * This routine is called from the mmap and fork code to create a new 391 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 392 * and then converting it with swp_pager_meta_build(). 393 * 394 * This routine may block in vm_object_allocate() and create a named 395 * object lookup race, so we must interlock. We must also run at 396 * splvm() for the object lookup to handle races with interrupts, but 397 * we do not have to maintain splvm() in between the lookup and the 398 * add because (I believe) it is not possible to attempt to create 399 * a new swap object w/handle when a default object with that handle 400 * already exists. 401 */ 402 403 static vm_object_t 404 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset) 405 { 406 vm_object_t object; 407 408 if (handle) { 409 /* 410 * Reference existing named region or allocate new one. There 411 * should not be a race here against swp_pager_meta_build() 412 * as called from vm_page_remove() in regards to the lookup 413 * of the handle. 414 */ 415 416 while (sw_alloc_interlock) { 417 sw_alloc_interlock = -1; 418 tsleep(&sw_alloc_interlock, 0, "swpalc", 0); 419 } 420 sw_alloc_interlock = 1; 421 422 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 423 424 if (object != NULL) { 425 vm_object_reference(object); 426 } else { 427 object = vm_object_allocate(OBJT_DEFAULT, 428 OFF_TO_IDX(offset + PAGE_MASK + size)); 429 object->handle = handle; 430 431 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 432 } 433 434 if (sw_alloc_interlock < 0) 435 wakeup(&sw_alloc_interlock); 436 437 sw_alloc_interlock = 0; 438 } else { 439 object = vm_object_allocate(OBJT_DEFAULT, 440 OFF_TO_IDX(offset + PAGE_MASK + size)); 441 442 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 443 } 444 445 return (object); 446 } 447 448 /* 449 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 450 * 451 * The swap backing for the object is destroyed. The code is 452 * designed such that we can reinstantiate it later, but this 453 * routine is typically called only when the entire object is 454 * about to be destroyed. 455 * 456 * This routine may block, but no longer does. 457 * 458 * The object must be locked or unreferenceable. 459 */ 460 461 static void 462 swap_pager_dealloc(vm_object_t object) 463 { 464 /* 465 * Remove from list right away so lookups will fail if we block for 466 * pageout completion. 467 */ 468 469 if (object->handle == NULL) { 470 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 471 } else { 472 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 473 } 474 475 vm_object_pip_wait(object, "swpdea"); 476 477 /* 478 * Free all remaining metadata. We only bother to free it from 479 * the swap meta data. We do not attempt to free swapblk's still 480 * associated with vm_page_t's for this object. We do not care 481 * if paging is still in progress on some objects. 482 */ 483 crit_enter(); 484 swp_pager_meta_free_all(object); 485 crit_exit(); 486 } 487 488 /************************************************************************ 489 * SWAP PAGER BITMAP ROUTINES * 490 ************************************************************************/ 491 492 /* 493 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 494 * 495 * Allocate swap for the requested number of pages. The starting 496 * swap block number (a page index) is returned or SWAPBLK_NONE 497 * if the allocation failed. 498 * 499 * Also has the side effect of advising that somebody made a mistake 500 * when they configured swap and didn't configure enough. 501 * 502 * Must be called at splvm() to avoid races with bitmap frees from 503 * vm_page_remove() aka swap_pager_page_removed(). 504 * 505 * This routine may not block 506 * This routine must be called at splvm(). 507 */ 508 509 static __inline daddr_t 510 swp_pager_getswapspace(int npages) 511 { 512 daddr_t blk; 513 514 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 515 if (swap_pager_full != 2) { 516 kprintf("swap_pager_getswapspace: failed\n"); 517 swap_pager_full = 2; 518 swap_pager_almost_full = 1; 519 } 520 } else { 521 vm_swap_size -= npages; 522 swp_sizecheck(); 523 } 524 return(blk); 525 } 526 527 /* 528 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 529 * 530 * This routine returns the specified swap blocks back to the bitmap. 531 * 532 * Note: This routine may not block (it could in the old swap code), 533 * and through the use of the new blist routines it does not block. 534 * 535 * We must be called at splvm() to avoid races with bitmap frees from 536 * vm_page_remove() aka swap_pager_page_removed(). 537 * 538 * This routine may not block 539 * This routine must be called at splvm(). 540 */ 541 542 static __inline void 543 swp_pager_freeswapspace(daddr_t blk, int npages) 544 { 545 blist_free(swapblist, blk, npages); 546 vm_swap_size += npages; 547 swp_sizecheck(); 548 } 549 550 /* 551 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 552 * range within an object. 553 * 554 * This is a globally accessible routine. 555 * 556 * This routine removes swapblk assignments from swap metadata. 557 * 558 * The external callers of this routine typically have already destroyed 559 * or renamed vm_page_t's associated with this range in the object so 560 * we should be ok. 561 * 562 * This routine may be called at any spl. We up our spl to splvm temporarily 563 * in order to perform the metadata removal. 564 */ 565 566 void 567 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 568 { 569 crit_enter(); 570 swp_pager_meta_free(object, start, size); 571 crit_exit(); 572 } 573 574 /* 575 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 576 * 577 * Assigns swap blocks to the specified range within the object. The 578 * swap blocks are not zerod. Any previous swap assignment is destroyed. 579 * 580 * Returns 0 on success, -1 on failure. 581 */ 582 583 int 584 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 585 { 586 int n = 0; 587 daddr_t blk = SWAPBLK_NONE; 588 vm_pindex_t beg = start; /* save start index */ 589 590 crit_enter(); 591 while (size) { 592 if (n == 0) { 593 n = BLIST_MAX_ALLOC; 594 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 595 n >>= 1; 596 if (n == 0) { 597 swp_pager_meta_free(object, beg, start - beg); 598 crit_exit(); 599 return(-1); 600 } 601 } 602 } 603 swp_pager_meta_build(object, start, blk); 604 --size; 605 ++start; 606 ++blk; 607 --n; 608 } 609 swp_pager_meta_free(object, start, n); 610 crit_exit(); 611 return(0); 612 } 613 614 /* 615 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 616 * and destroy the source. 617 * 618 * Copy any valid swapblks from the source to the destination. In 619 * cases where both the source and destination have a valid swapblk, 620 * we keep the destination's. 621 * 622 * This routine is allowed to block. It may block allocating metadata 623 * indirectly through swp_pager_meta_build() or if paging is still in 624 * progress on the source. 625 * 626 * This routine can be called at any spl 627 * 628 * XXX vm_page_collapse() kinda expects us not to block because we 629 * supposedly do not need to allocate memory, but for the moment we 630 * *may* have to get a little memory from the zone allocator, but 631 * it is taken from the interrupt memory. We should be ok. 632 * 633 * The source object contains no vm_page_t's (which is just as well) 634 * 635 * The source object is of type OBJT_SWAP. 636 * 637 * The source and destination objects must be locked or 638 * inaccessible (XXX are they ?) 639 */ 640 641 void 642 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 643 vm_pindex_t offset, int destroysource) 644 { 645 vm_pindex_t i; 646 647 crit_enter(); 648 649 /* 650 * If destroysource is set, we remove the source object from the 651 * swap_pager internal queue now. 652 */ 653 654 if (destroysource) { 655 if (srcobject->handle == NULL) { 656 TAILQ_REMOVE( 657 &swap_pager_un_object_list, 658 srcobject, 659 pager_object_list 660 ); 661 } else { 662 TAILQ_REMOVE( 663 NOBJLIST(srcobject->handle), 664 srcobject, 665 pager_object_list 666 ); 667 } 668 } 669 670 /* 671 * transfer source to destination. 672 */ 673 674 for (i = 0; i < dstobject->size; ++i) { 675 daddr_t dstaddr; 676 677 /* 678 * Locate (without changing) the swapblk on the destination, 679 * unless it is invalid in which case free it silently, or 680 * if the destination is a resident page, in which case the 681 * source is thrown away. 682 */ 683 684 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 685 686 if (dstaddr == SWAPBLK_NONE) { 687 /* 688 * Destination has no swapblk and is not resident, 689 * copy source. 690 */ 691 daddr_t srcaddr; 692 693 srcaddr = swp_pager_meta_ctl( 694 srcobject, 695 i + offset, 696 SWM_POP 697 ); 698 699 if (srcaddr != SWAPBLK_NONE) 700 swp_pager_meta_build(dstobject, i, srcaddr); 701 } else { 702 /* 703 * Destination has valid swapblk or it is represented 704 * by a resident page. We destroy the sourceblock. 705 */ 706 707 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 708 } 709 } 710 711 /* 712 * Free left over swap blocks in source. 713 * 714 * We have to revert the type to OBJT_DEFAULT so we do not accidently 715 * double-remove the object from the swap queues. 716 */ 717 718 if (destroysource) { 719 swp_pager_meta_free_all(srcobject); 720 /* 721 * Reverting the type is not necessary, the caller is going 722 * to destroy srcobject directly, but I'm doing it here 723 * for consistency since we've removed the object from its 724 * queues. 725 */ 726 srcobject->type = OBJT_DEFAULT; 727 } 728 crit_exit(); 729 } 730 731 /* 732 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 733 * the requested page. 734 * 735 * We determine whether good backing store exists for the requested 736 * page and return TRUE if it does, FALSE if it doesn't. 737 * 738 * If TRUE, we also try to determine how much valid, contiguous backing 739 * store exists before and after the requested page within a reasonable 740 * distance. We do not try to restrict it to the swap device stripe 741 * (that is handled in getpages/putpages). It probably isn't worth 742 * doing here. 743 */ 744 745 boolean_t 746 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 747 int *after) 748 { 749 daddr_t blk0; 750 751 /* 752 * do we have good backing store at the requested index ? 753 */ 754 755 crit_enter(); 756 blk0 = swp_pager_meta_ctl(object, pindex, 0); 757 758 if (blk0 == SWAPBLK_NONE) { 759 crit_exit(); 760 if (before) 761 *before = 0; 762 if (after) 763 *after = 0; 764 return (FALSE); 765 } 766 767 /* 768 * find backwards-looking contiguous good backing store 769 */ 770 771 if (before != NULL) { 772 int i; 773 774 for (i = 1; i < (SWB_NPAGES/2); ++i) { 775 daddr_t blk; 776 777 if (i > pindex) 778 break; 779 blk = swp_pager_meta_ctl(object, pindex - i, 0); 780 if (blk != blk0 - i) 781 break; 782 } 783 *before = (i - 1); 784 } 785 786 /* 787 * find forward-looking contiguous good backing store 788 */ 789 790 if (after != NULL) { 791 int i; 792 793 for (i = 1; i < (SWB_NPAGES/2); ++i) { 794 daddr_t blk; 795 796 blk = swp_pager_meta_ctl(object, pindex + i, 0); 797 if (blk != blk0 + i) 798 break; 799 } 800 *after = (i - 1); 801 } 802 crit_exit(); 803 return (TRUE); 804 } 805 806 /* 807 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 808 * 809 * This removes any associated swap backing store, whether valid or 810 * not, from the page. 811 * 812 * This routine is typically called when a page is made dirty, at 813 * which point any associated swap can be freed. MADV_FREE also 814 * calls us in a special-case situation 815 * 816 * NOTE!!! If the page is clean and the swap was valid, the caller 817 * should make the page dirty before calling this routine. This routine 818 * does NOT change the m->dirty status of the page. Also: MADV_FREE 819 * depends on it. 820 * 821 * This routine may not block 822 * This routine must be called at splvm() 823 */ 824 825 static void 826 swap_pager_unswapped(vm_page_t m) 827 { 828 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 829 } 830 831 /* 832 * SWAP_PAGER_STRATEGY() - read, write, free blocks 833 * 834 * This implements the vm_pager_strategy() interface to swap and allows 835 * other parts of the system to directly access swap as backing store 836 * through vm_objects of type OBJT_SWAP. This is intended to be a 837 * cacheless interface ( i.e. caching occurs at higher levels ). 838 * Therefore we do not maintain any resident pages. All I/O goes 839 * directly to and from the swap device. 840 * 841 * We currently attempt to run I/O synchronously or asynchronously as 842 * the caller requests. This isn't perfect because we loose error 843 * sequencing when we run multiple ops in parallel to satisfy a request. 844 * But this is swap, so we let it all hang out. 845 */ 846 847 static void 848 swap_pager_strategy(vm_object_t object, struct bio *bio) 849 { 850 struct buf *bp = bio->bio_buf; 851 struct bio *nbio; 852 vm_pindex_t start; 853 vm_pindex_t biox_blkno = 0; 854 int count; 855 char *data; 856 struct bio *biox; 857 struct buf *bufx; 858 struct bio_track *track; 859 860 /* 861 * tracking for swapdev vnode I/Os 862 */ 863 if (bp->b_cmd == BUF_CMD_READ) 864 track = &swapdev_vp->v_track_read; 865 else 866 track = &swapdev_vp->v_track_write; 867 868 if (bp->b_bcount & PAGE_MASK) { 869 bp->b_error = EINVAL; 870 bp->b_flags |= B_ERROR | B_INVAL; 871 biodone(bio); 872 kprintf("swap_pager_strategy: bp %p offset %lld size %d, " 873 "not page bounded\n", 874 bp, (long long)bio->bio_offset, (int)bp->b_bcount); 875 return; 876 } 877 878 /* 879 * Clear error indication, initialize page index, count, data pointer. 880 */ 881 bp->b_error = 0; 882 bp->b_flags &= ~B_ERROR; 883 bp->b_resid = bp->b_bcount; 884 885 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT); 886 count = howmany(bp->b_bcount, PAGE_SIZE); 887 data = bp->b_data; 888 889 /* 890 * Deal with BUF_CMD_FREEBLKS 891 */ 892 if (bp->b_cmd == BUF_CMD_FREEBLKS) { 893 /* 894 * FREE PAGE(s) - destroy underlying swap that is no longer 895 * needed. 896 */ 897 swp_pager_meta_free(object, start, count); 898 bp->b_resid = 0; 899 biodone(bio); 900 return; 901 } 902 903 /* 904 * We need to be able to create a new cluster of I/O's. We cannot 905 * use the caller fields of the passed bio so push a new one. 906 * 907 * Because nbio is just a placeholder for the cluster links, 908 * we can biodone() the original bio instead of nbio to make 909 * things a bit more efficient. 910 */ 911 nbio = push_bio(bio); 912 nbio->bio_offset = bio->bio_offset; 913 nbio->bio_caller_info1.cluster_head = NULL; 914 nbio->bio_caller_info2.cluster_tail = NULL; 915 916 biox = NULL; 917 bufx = NULL; 918 919 /* 920 * Execute read or write 921 */ 922 while (count > 0) { 923 daddr_t blk; 924 925 /* 926 * Obtain block. If block not found and writing, allocate a 927 * new block and build it into the object. 928 */ 929 blk = swp_pager_meta_ctl(object, start, 0); 930 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) { 931 blk = swp_pager_getswapspace(1); 932 if (blk == SWAPBLK_NONE) { 933 bp->b_error = ENOMEM; 934 bp->b_flags |= B_ERROR; 935 break; 936 } 937 swp_pager_meta_build(object, start, blk); 938 } 939 940 /* 941 * Do we have to flush our current collection? Yes if: 942 * 943 * - no swap block at this index 944 * - swap block is not contiguous 945 * - we cross a physical disk boundry in the 946 * stripe. 947 */ 948 if ( 949 biox && (biox_blkno + btoc(bufx->b_bcount) != blk || 950 ((biox_blkno ^ blk) & dmmax_mask) 951 ) 952 ) { 953 if (bp->b_cmd == BUF_CMD_READ) { 954 ++mycpu->gd_cnt.v_swapin; 955 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 956 } else { 957 ++mycpu->gd_cnt.v_swapout; 958 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 959 bufx->b_dirtyend = bufx->b_bcount; 960 } 961 962 /* 963 * Finished with this buf. 964 */ 965 KKASSERT(bufx->b_bcount != 0); 966 if (bufx->b_cmd != BUF_CMD_READ) 967 bufx->b_dirtyend = bufx->b_bcount; 968 biox = NULL; 969 bufx = NULL; 970 } 971 972 /* 973 * Add new swapblk to biox, instantiating biox if necessary. 974 * Zero-fill reads are able to take a shortcut. 975 */ 976 if (blk == SWAPBLK_NONE) { 977 /* 978 * We can only get here if we are reading. Since 979 * we are at splvm() we can safely modify b_resid, 980 * even if chain ops are in progress. 981 */ 982 bzero(data, PAGE_SIZE); 983 bp->b_resid -= PAGE_SIZE; 984 } else { 985 if (biox == NULL) { 986 /* XXX chain count > 4, wait to <= 4 */ 987 988 bufx = getpbuf(NULL); 989 biox = &bufx->b_bio1; 990 cluster_append(nbio, bufx); 991 bufx->b_flags |= (bufx->b_flags & B_ORDERED); 992 bufx->b_cmd = bp->b_cmd; 993 biox->bio_done = swap_chain_iodone; 994 biox->bio_offset = (off_t)blk << PAGE_SHIFT; 995 biox->bio_caller_info1.cluster_parent = nbio; 996 biox_blkno = blk; 997 bufx->b_bcount = 0; 998 bufx->b_data = data; 999 } 1000 bufx->b_bcount += PAGE_SIZE; 1001 } 1002 --count; 1003 ++start; 1004 data += PAGE_SIZE; 1005 } 1006 1007 /* 1008 * Flush out last buffer 1009 */ 1010 if (biox) { 1011 if (bufx->b_cmd == BUF_CMD_READ) { 1012 ++mycpu->gd_cnt.v_swapin; 1013 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount); 1014 } else { 1015 ++mycpu->gd_cnt.v_swapout; 1016 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount); 1017 bufx->b_dirtyend = bufx->b_bcount; 1018 } 1019 KKASSERT(bufx->b_bcount); 1020 if (bufx->b_cmd != BUF_CMD_READ) 1021 bufx->b_dirtyend = bufx->b_bcount; 1022 /* biox, bufx = NULL */ 1023 } 1024 1025 /* 1026 * Now initiate all the I/O. Be careful looping on our chain as 1027 * I/O's may complete while we are still initiating them. 1028 */ 1029 nbio->bio_caller_info2.cluster_tail = NULL; 1030 bufx = nbio->bio_caller_info1.cluster_head; 1031 1032 while (bufx) { 1033 biox = &bufx->b_bio1; 1034 BUF_KERNPROC(bufx); 1035 bufx = bufx->b_cluster_next; 1036 vn_strategy(swapdev_vp, biox); 1037 } 1038 1039 /* 1040 * Completion of the cluster will also call biodone_chain(nbio). 1041 * We never call biodone(nbio) so we don't have to worry about 1042 * setting up a bio_done callback. It's handled in the sub-IO. 1043 */ 1044 /**/ 1045 } 1046 1047 static void 1048 swap_chain_iodone(struct bio *biox) 1049 { 1050 struct buf **nextp; 1051 struct buf *bufx; /* chained sub-buffer */ 1052 struct bio *nbio; /* parent nbio with chain glue */ 1053 struct buf *bp; /* original bp associated with nbio */ 1054 int chain_empty; 1055 1056 bufx = biox->bio_buf; 1057 nbio = biox->bio_caller_info1.cluster_parent; 1058 bp = nbio->bio_buf; 1059 1060 /* 1061 * Update the original buffer 1062 */ 1063 KKASSERT(bp != NULL); 1064 if (bufx->b_flags & B_ERROR) { 1065 atomic_set_int(&bufx->b_flags, B_ERROR); 1066 bp->b_error = bufx->b_error; 1067 } else if (bufx->b_resid != 0) { 1068 atomic_set_int(&bufx->b_flags, B_ERROR); 1069 bp->b_error = EINVAL; 1070 } else { 1071 atomic_subtract_int(&bp->b_resid, bufx->b_bcount); 1072 } 1073 1074 /* 1075 * Remove us from the chain. 1076 */ 1077 spin_lock_wr(&bp->b_lock.lk_spinlock); 1078 nextp = &nbio->bio_caller_info1.cluster_head; 1079 while (*nextp != bufx) { 1080 KKASSERT(*nextp != NULL); 1081 nextp = &(*nextp)->b_cluster_next; 1082 } 1083 *nextp = bufx->b_cluster_next; 1084 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL); 1085 spin_unlock_wr(&bp->b_lock.lk_spinlock); 1086 1087 /* 1088 * Clean up bufx. If the chain is now empty we finish out 1089 * the parent. Note that we may be racing other completions 1090 * so we must use the chain_empty status from above. 1091 */ 1092 if (chain_empty) { 1093 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 1094 atomic_set_int(&bp->b_flags, B_ERROR); 1095 bp->b_error = EINVAL; 1096 } 1097 biodone_chain(nbio); 1098 } 1099 relpbuf(bufx, NULL); 1100 } 1101 1102 /* 1103 * SWAP_PAGER_GETPAGES() - bring pages in from swap 1104 * 1105 * Attempt to retrieve (m, count) pages from backing store, but make 1106 * sure we retrieve at least m[reqpage]. We try to load in as large 1107 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1108 * belongs to the same object. 1109 * 1110 * The code is designed for asynchronous operation and 1111 * immediate-notification of 'reqpage' but tends not to be 1112 * used that way. Please do not optimize-out this algorithmic 1113 * feature, I intend to improve on it in the future. 1114 * 1115 * The parent has a single vm_object_pip_add() reference prior to 1116 * calling us and we should return with the same. 1117 * 1118 * The parent has BUSY'd the pages. We should return with 'm' 1119 * left busy, but the others adjusted. 1120 */ 1121 1122 static int 1123 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 1124 { 1125 struct buf *bp; 1126 struct bio *bio; 1127 vm_page_t mreq; 1128 int i; 1129 int j; 1130 daddr_t blk; 1131 vm_offset_t kva; 1132 vm_pindex_t lastpindex; 1133 1134 mreq = m[reqpage]; 1135 1136 if (mreq->object != object) { 1137 panic("swap_pager_getpages: object mismatch %p/%p", 1138 object, 1139 mreq->object 1140 ); 1141 } 1142 1143 /* 1144 * Calculate range to retrieve. The pages have already been assigned 1145 * their swapblks. We require a *contiguous* range that falls entirely 1146 * within a single device stripe. If we do not supply it, bad things 1147 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1148 * loops are set up such that the case(s) are handled implicitly. 1149 * 1150 * The swp_*() calls must be made at splvm(). vm_page_free() does 1151 * not need to be, but it will go a little faster if it is. 1152 */ 1153 crit_enter(); 1154 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1155 1156 for (i = reqpage - 1; i >= 0; --i) { 1157 daddr_t iblk; 1158 1159 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1160 if (blk != iblk + (reqpage - i)) 1161 break; 1162 if ((blk ^ iblk) & dmmax_mask) 1163 break; 1164 } 1165 ++i; 1166 1167 for (j = reqpage + 1; j < count; ++j) { 1168 daddr_t jblk; 1169 1170 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1171 if (blk != jblk - (j - reqpage)) 1172 break; 1173 if ((blk ^ jblk) & dmmax_mask) 1174 break; 1175 } 1176 1177 /* 1178 * free pages outside our collection range. Note: we never free 1179 * mreq, it must remain busy throughout. 1180 */ 1181 1182 { 1183 int k; 1184 1185 for (k = 0; k < i; ++k) 1186 vm_page_free(m[k]); 1187 for (k = j; k < count; ++k) 1188 vm_page_free(m[k]); 1189 } 1190 crit_exit(); 1191 1192 1193 /* 1194 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1195 * still busy, but the others unbusied. 1196 */ 1197 1198 if (blk == SWAPBLK_NONE) 1199 return(VM_PAGER_FAIL); 1200 1201 /* 1202 * Get a swap buffer header to perform the IO 1203 */ 1204 1205 bp = getpbuf(&nsw_rcount); 1206 bio = &bp->b_bio1; 1207 kva = (vm_offset_t) bp->b_data; 1208 1209 /* 1210 * map our page(s) into kva for input 1211 */ 1212 1213 pmap_qenter(kva, m + i, j - i); 1214 1215 bp->b_data = (caddr_t) kva; 1216 bp->b_bcount = PAGE_SIZE * (j - i); 1217 bio->bio_done = swp_pager_async_iodone; 1218 bio->bio_offset = (off_t)(blk - (reqpage - i)) << PAGE_SHIFT; 1219 bio->bio_driver_info = (void *)(intptr_t)(reqpage - i); 1220 1221 { 1222 int k; 1223 1224 for (k = i; k < j; ++k) { 1225 bp->b_xio.xio_pages[k - i] = m[k]; 1226 vm_page_flag_set(m[k], PG_SWAPINPROG); 1227 } 1228 } 1229 bp->b_xio.xio_npages = j - i; 1230 1231 mycpu->gd_cnt.v_swapin++; 1232 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages; 1233 1234 /* 1235 * We still hold the lock on mreq, and our automatic completion routine 1236 * does not remove it. 1237 */ 1238 1239 vm_object_pip_add(mreq->object, bp->b_xio.xio_npages); 1240 lastpindex = m[j-1]->pindex; 1241 1242 /* 1243 * perform the I/O. NOTE!!! bp cannot be considered valid after 1244 * this point because we automatically release it on completion. 1245 * Instead, we look at the one page we are interested in which we 1246 * still hold a lock on even through the I/O completion. 1247 * 1248 * The other pages in our m[] array are also released on completion, 1249 * so we cannot assume they are valid anymore either. 1250 */ 1251 1252 bp->b_cmd = BUF_CMD_READ; 1253 BUF_KERNPROC(bp); 1254 vn_strategy(swapdev_vp, bio); 1255 1256 /* 1257 * wait for the page we want to complete. PG_SWAPINPROG is always 1258 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1259 * is set in the meta-data. 1260 */ 1261 1262 crit_enter(); 1263 1264 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1265 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1266 mycpu->gd_cnt.v_intrans++; 1267 if (tsleep(mreq, 0, "swread", hz*20)) { 1268 kprintf( 1269 "swap_pager: indefinite wait buffer: " 1270 " offset: %lld, size: %ld\n", 1271 (long long)bio->bio_offset, 1272 (long)bp->b_bcount 1273 ); 1274 } 1275 } 1276 1277 crit_exit(); 1278 1279 /* 1280 * mreq is left bussied after completion, but all the other pages 1281 * are freed. If we had an unrecoverable read error the page will 1282 * not be valid. 1283 */ 1284 1285 if (mreq->valid != VM_PAGE_BITS_ALL) { 1286 return(VM_PAGER_ERROR); 1287 } else { 1288 return(VM_PAGER_OK); 1289 } 1290 1291 /* 1292 * A final note: in a low swap situation, we cannot deallocate swap 1293 * and mark a page dirty here because the caller is likely to mark 1294 * the page clean when we return, causing the page to possibly revert 1295 * to all-zero's later. 1296 */ 1297 } 1298 1299 /* 1300 * swap_pager_putpages: 1301 * 1302 * Assign swap (if necessary) and initiate I/O on the specified pages. 1303 * 1304 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1305 * are automatically converted to SWAP objects. 1306 * 1307 * In a low memory situation we may block in vn_strategy(), but the new 1308 * vm_page reservation system coupled with properly written VFS devices 1309 * should ensure that no low-memory deadlock occurs. This is an area 1310 * which needs work. 1311 * 1312 * The parent has N vm_object_pip_add() references prior to 1313 * calling us and will remove references for rtvals[] that are 1314 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1315 * completion. 1316 * 1317 * The parent has soft-busy'd the pages it passes us and will unbusy 1318 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1319 * We need to unbusy the rest on I/O completion. 1320 */ 1321 void 1322 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1323 boolean_t sync, int *rtvals) 1324 { 1325 int i; 1326 int n = 0; 1327 1328 if (count && m[0]->object != object) { 1329 panic("swap_pager_getpages: object mismatch %p/%p", 1330 object, 1331 m[0]->object 1332 ); 1333 } 1334 1335 /* 1336 * Step 1 1337 * 1338 * Turn object into OBJT_SWAP 1339 * check for bogus sysops 1340 * force sync if not pageout process 1341 */ 1342 1343 if (object->type != OBJT_SWAP) 1344 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1345 1346 if (curthread != pagethread) 1347 sync = TRUE; 1348 1349 /* 1350 * Step 2 1351 * 1352 * Update nsw parameters from swap_async_max sysctl values. 1353 * Do not let the sysop crash the machine with bogus numbers. 1354 */ 1355 1356 if (swap_async_max != nsw_wcount_async_max) { 1357 int n; 1358 1359 /* 1360 * limit range 1361 */ 1362 if ((n = swap_async_max) > nswbuf / 2) 1363 n = nswbuf / 2; 1364 if (n < 1) 1365 n = 1; 1366 swap_async_max = n; 1367 1368 /* 1369 * Adjust difference ( if possible ). If the current async 1370 * count is too low, we may not be able to make the adjustment 1371 * at this time. 1372 */ 1373 crit_enter(); 1374 n -= nsw_wcount_async_max; 1375 if (nsw_wcount_async + n >= 0) { 1376 nsw_wcount_async += n; 1377 nsw_wcount_async_max += n; 1378 wakeup(&nsw_wcount_async); 1379 } 1380 crit_exit(); 1381 } 1382 1383 /* 1384 * Step 3 1385 * 1386 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1387 * The page is left dirty until the pageout operation completes 1388 * successfully. 1389 */ 1390 1391 for (i = 0; i < count; i += n) { 1392 struct buf *bp; 1393 struct bio *bio; 1394 daddr_t blk; 1395 int j; 1396 1397 /* 1398 * Maximum I/O size is limited by a number of factors. 1399 */ 1400 1401 n = min(BLIST_MAX_ALLOC, count - i); 1402 n = min(n, nsw_cluster_max); 1403 1404 crit_enter(); 1405 1406 /* 1407 * Get biggest block of swap we can. If we fail, fall 1408 * back and try to allocate a smaller block. Don't go 1409 * overboard trying to allocate space if it would overly 1410 * fragment swap. 1411 */ 1412 while ( 1413 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1414 n > 4 1415 ) { 1416 n >>= 1; 1417 } 1418 if (blk == SWAPBLK_NONE) { 1419 for (j = 0; j < n; ++j) 1420 rtvals[i+j] = VM_PAGER_FAIL; 1421 crit_exit(); 1422 continue; 1423 } 1424 1425 /* 1426 * The I/O we are constructing cannot cross a physical 1427 * disk boundry in the swap stripe. Note: we are still 1428 * at splvm(). 1429 */ 1430 if ((blk ^ (blk + n)) & dmmax_mask) { 1431 j = ((blk + dmmax) & dmmax_mask) - blk; 1432 swp_pager_freeswapspace(blk + j, n - j); 1433 n = j; 1434 } 1435 1436 /* 1437 * All I/O parameters have been satisfied, build the I/O 1438 * request and assign the swap space. 1439 */ 1440 1441 if (sync == TRUE) 1442 bp = getpbuf(&nsw_wcount_sync); 1443 else 1444 bp = getpbuf(&nsw_wcount_async); 1445 bio = &bp->b_bio1; 1446 1447 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1448 1449 bp->b_bcount = PAGE_SIZE * n; 1450 bio->bio_offset = (off_t)blk << PAGE_SHIFT; 1451 1452 for (j = 0; j < n; ++j) { 1453 vm_page_t mreq = m[i+j]; 1454 1455 swp_pager_meta_build( 1456 mreq->object, 1457 mreq->pindex, 1458 blk + j 1459 ); 1460 vm_page_dirty(mreq); 1461 rtvals[i+j] = VM_PAGER_OK; 1462 1463 vm_page_flag_set(mreq, PG_SWAPINPROG); 1464 bp->b_xio.xio_pages[j] = mreq; 1465 } 1466 bp->b_xio.xio_npages = n; 1467 1468 mycpu->gd_cnt.v_swapout++; 1469 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages; 1470 1471 crit_exit(); 1472 1473 bp->b_dirtyoff = 0; /* req'd for NFS */ 1474 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */ 1475 bp->b_cmd = BUF_CMD_WRITE; 1476 1477 /* 1478 * asynchronous 1479 */ 1480 if (sync == FALSE) { 1481 bio->bio_done = swp_pager_async_iodone; 1482 BUF_KERNPROC(bp); 1483 vn_strategy(swapdev_vp, bio); 1484 1485 for (j = 0; j < n; ++j) 1486 rtvals[i+j] = VM_PAGER_PEND; 1487 continue; 1488 } 1489 1490 /* 1491 * Issue synchrnously. 1492 * 1493 * Wait for the sync I/O to complete, then update rtvals. 1494 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1495 * our async completion routine at the end, thus avoiding a 1496 * double-free. 1497 */ 1498 bio->bio_done = biodone_sync; 1499 bio->bio_flags |= BIO_SYNC; 1500 vn_strategy(swapdev_vp, bio); 1501 biowait(bio, "swwrt"); 1502 1503 for (j = 0; j < n; ++j) 1504 rtvals[i+j] = VM_PAGER_PEND; 1505 1506 /* 1507 * Now that we are through with the bp, we can call the 1508 * normal async completion, which frees everything up. 1509 */ 1510 swp_pager_async_iodone(bio); 1511 } 1512 } 1513 1514 void 1515 swap_pager_newswap(void) 1516 { 1517 swp_sizecheck(); 1518 } 1519 1520 /* 1521 * swp_pager_async_iodone: 1522 * 1523 * Completion routine for asynchronous reads and writes from/to swap. 1524 * Also called manually by synchronous code to finish up a bp. 1525 * 1526 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1527 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1528 * unbusy all pages except the 'main' request page. For WRITE 1529 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1530 * because we marked them all VM_PAGER_PEND on return from putpages ). 1531 * 1532 * This routine may not block. 1533 */ 1534 static void 1535 swp_pager_async_iodone(struct bio *bio) 1536 { 1537 struct buf *bp = bio->bio_buf; 1538 vm_object_t object = NULL; 1539 int i; 1540 int *nswptr; 1541 1542 /* 1543 * report error 1544 */ 1545 if (bp->b_flags & B_ERROR) { 1546 kprintf( 1547 "swap_pager: I/O error - %s failed; offset %lld," 1548 "size %ld, error %d\n", 1549 ((bp->b_cmd == BUF_CMD_READ) ? "pagein" : "pageout"), 1550 (long long)bio->bio_offset, 1551 (long)bp->b_bcount, 1552 bp->b_error 1553 ); 1554 } 1555 1556 /* 1557 * set object, raise to splvm(). 1558 */ 1559 1560 if (bp->b_xio.xio_npages) 1561 object = bp->b_xio.xio_pages[0]->object; 1562 crit_enter(); 1563 1564 /* 1565 * remove the mapping for kernel virtual 1566 */ 1567 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages); 1568 1569 /* 1570 * cleanup pages. If an error occurs writing to swap, we are in 1571 * very serious trouble. If it happens to be a disk error, though, 1572 * we may be able to recover by reassigning the swap later on. So 1573 * in this case we remove the m->swapblk assignment for the page 1574 * but do not free it in the rlist. The errornous block(s) are thus 1575 * never reallocated as swap. Redirty the page and continue. 1576 */ 1577 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1578 vm_page_t m = bp->b_xio.xio_pages[i]; 1579 1580 vm_page_flag_clear(m, PG_SWAPINPROG); 1581 1582 if (bp->b_flags & B_ERROR) { 1583 /* 1584 * If an error occurs I'd love to throw the swapblk 1585 * away without freeing it back to swapspace, so it 1586 * can never be used again. But I can't from an 1587 * interrupt. 1588 */ 1589 1590 if (bp->b_cmd == BUF_CMD_READ) { 1591 /* 1592 * When reading, reqpage needs to stay 1593 * locked for the parent, but all other 1594 * pages can be freed. We still want to 1595 * wakeup the parent waiting on the page, 1596 * though. ( also: pg_reqpage can be -1 and 1597 * not match anything ). 1598 * 1599 * We have to wake specifically requested pages 1600 * up too because we cleared PG_SWAPINPROG and 1601 * someone may be waiting for that. 1602 * 1603 * NOTE: for reads, m->dirty will probably 1604 * be overridden by the original caller of 1605 * getpages so don't play cute tricks here. 1606 * 1607 * NOTE: We can't actually free the page from 1608 * here, because this is an interrupt. It 1609 * is not legal to mess with object->memq 1610 * from an interrupt. Deactivate the page 1611 * instead. 1612 */ 1613 1614 m->valid = 0; 1615 vm_page_flag_clear(m, PG_ZERO); 1616 1617 /* 1618 * bio_driver_info holds the requested page 1619 * index. 1620 */ 1621 if (i != (int)(intptr_t)bio->bio_driver_info) { 1622 vm_page_deactivate(m); 1623 vm_page_wakeup(m); 1624 } else { 1625 vm_page_flash(m); 1626 } 1627 /* 1628 * If i == bp->b_pager.pg_reqpage, do not wake 1629 * the page up. The caller needs to. 1630 */ 1631 } else { 1632 /* 1633 * If a write error occurs, reactivate page 1634 * so it doesn't clog the inactive list, 1635 * then finish the I/O. 1636 */ 1637 vm_page_dirty(m); 1638 kprintf("f"); 1639 vm_page_activate(m); 1640 vm_page_io_finish(m); 1641 } 1642 } else if (bp->b_cmd == BUF_CMD_READ) { 1643 /* 1644 * NOTE: for reads, m->dirty will probably be 1645 * overridden by the original caller of getpages so 1646 * we cannot set them in order to free the underlying 1647 * swap in a low-swap situation. I don't think we'd 1648 * want to do that anyway, but it was an optimization 1649 * that existed in the old swapper for a time before 1650 * it got ripped out due to precisely this problem. 1651 * 1652 * clear PG_ZERO in page. 1653 * 1654 * If not the requested page then deactivate it. 1655 * 1656 * Note that the requested page, reqpage, is left 1657 * busied, but we still have to wake it up. The 1658 * other pages are released (unbusied) by 1659 * vm_page_wakeup(). We do not set reqpage's 1660 * valid bits here, it is up to the caller. 1661 */ 1662 1663 /* 1664 * NOTE: can't call pmap_clear_modify(m) from an 1665 * interrupt thread, the pmap code may have to map 1666 * non-kernel pmaps and currently asserts the case. 1667 */ 1668 /*pmap_clear_modify(m);*/ 1669 m->valid = VM_PAGE_BITS_ALL; 1670 vm_page_undirty(m); 1671 vm_page_flag_clear(m, PG_ZERO); 1672 1673 /* 1674 * We have to wake specifically requested pages 1675 * up too because we cleared PG_SWAPINPROG and 1676 * could be waiting for it in getpages. However, 1677 * be sure to not unbusy getpages specifically 1678 * requested page - getpages expects it to be 1679 * left busy. 1680 * 1681 * bio_driver_info holds the requested page 1682 */ 1683 if (i != (int)(intptr_t)bio->bio_driver_info) { 1684 vm_page_deactivate(m); 1685 vm_page_wakeup(m); 1686 } else { 1687 vm_page_flash(m); 1688 } 1689 } else { 1690 /* 1691 * Mark the page clean but do not mess with the 1692 * pmap-layer's modified state. That state should 1693 * also be clear since the caller protected the 1694 * page VM_PROT_READ, but allow the case. 1695 * 1696 * We are in an interrupt, avoid pmap operations. 1697 * 1698 * If we have a severe page deficit, deactivate the 1699 * page. Do not try to cache it (which would also 1700 * involve a pmap op), because the page might still 1701 * be read-heavy. 1702 */ 1703 vm_page_undirty(m); 1704 vm_page_io_finish(m); 1705 if (vm_page_count_severe()) 1706 vm_page_deactivate(m); 1707 #if 0 1708 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1709 vm_page_protect(m, VM_PROT_READ); 1710 #endif 1711 } 1712 } 1713 1714 /* 1715 * adjust pip. NOTE: the original parent may still have its own 1716 * pip refs on the object. 1717 */ 1718 1719 if (object) 1720 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages); 1721 1722 /* 1723 * release the physical I/O buffer 1724 */ 1725 if (bp->b_cmd == BUF_CMD_READ) 1726 nswptr = &nsw_rcount; 1727 else if (bio->bio_flags & BIO_SYNC) 1728 nswptr = &nsw_wcount_sync; 1729 else 1730 nswptr = &nsw_wcount_async; 1731 bp->b_cmd = BUF_CMD_DONE; 1732 relpbuf(bp, nswptr); 1733 crit_exit(); 1734 } 1735 1736 /************************************************************************ 1737 * SWAP META DATA * 1738 ************************************************************************ 1739 * 1740 * These routines manipulate the swap metadata stored in the 1741 * OBJT_SWAP object. All swp_*() routines must be called at 1742 * splvm() because swap can be freed up by the low level vm_page 1743 * code which might be called from interrupts beyond what splbio() covers. 1744 * 1745 * Swap metadata is implemented with a global hash and not directly 1746 * linked into the object. Instead the object simply contains 1747 * appropriate tracking counters. 1748 */ 1749 1750 /* 1751 * SWP_PAGER_HASH() - hash swap meta data 1752 * 1753 * This is an inline helper function which hashes the swapblk given 1754 * the object and page index. It returns a pointer to a pointer 1755 * to the object, or a pointer to a NULL pointer if it could not 1756 * find a swapblk. 1757 * 1758 * This routine must be called at splvm(). 1759 */ 1760 1761 static __inline struct swblock ** 1762 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1763 { 1764 struct swblock **pswap; 1765 struct swblock *swap; 1766 1767 index &= ~SWAP_META_MASK; 1768 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1769 1770 while ((swap = *pswap) != NULL) { 1771 if (swap->swb_object == object && 1772 swap->swb_index == index 1773 ) { 1774 break; 1775 } 1776 pswap = &swap->swb_hnext; 1777 } 1778 return(pswap); 1779 } 1780 1781 /* 1782 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1783 * 1784 * We first convert the object to a swap object if it is a default 1785 * object. 1786 * 1787 * The specified swapblk is added to the object's swap metadata. If 1788 * the swapblk is not valid, it is freed instead. Any previously 1789 * assigned swapblk is freed. 1790 * 1791 * This routine must be called at splvm(), except when used to convert 1792 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1793 1794 */ 1795 1796 static void 1797 swp_pager_meta_build( 1798 vm_object_t object, 1799 vm_pindex_t index, 1800 daddr_t swapblk 1801 ) { 1802 struct swblock *swap; 1803 struct swblock **pswap; 1804 1805 /* 1806 * Convert default object to swap object if necessary 1807 */ 1808 1809 if (object->type != OBJT_SWAP) { 1810 object->type = OBJT_SWAP; 1811 object->un_pager.swp.swp_bcount = 0; 1812 1813 if (object->handle != NULL) { 1814 TAILQ_INSERT_TAIL( 1815 NOBJLIST(object->handle), 1816 object, 1817 pager_object_list 1818 ); 1819 } else { 1820 TAILQ_INSERT_TAIL( 1821 &swap_pager_un_object_list, 1822 object, 1823 pager_object_list 1824 ); 1825 } 1826 } 1827 1828 /* 1829 * Locate hash entry. If not found create, but if we aren't adding 1830 * anything just return. If we run out of space in the map we wait 1831 * and, since the hash table may have changed, retry. 1832 */ 1833 1834 retry: 1835 pswap = swp_pager_hash(object, index); 1836 1837 if ((swap = *pswap) == NULL) { 1838 int i; 1839 1840 if (swapblk == SWAPBLK_NONE) 1841 return; 1842 1843 swap = *pswap = zalloc(swap_zone); 1844 if (swap == NULL) { 1845 vm_wait(0); 1846 goto retry; 1847 } 1848 swap->swb_hnext = NULL; 1849 swap->swb_object = object; 1850 swap->swb_index = index & ~SWAP_META_MASK; 1851 swap->swb_count = 0; 1852 1853 ++object->un_pager.swp.swp_bcount; 1854 1855 for (i = 0; i < SWAP_META_PAGES; ++i) 1856 swap->swb_pages[i] = SWAPBLK_NONE; 1857 } 1858 1859 /* 1860 * Delete prior contents of metadata 1861 */ 1862 1863 index &= SWAP_META_MASK; 1864 1865 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1866 swp_pager_freeswapspace(swap->swb_pages[index], 1); 1867 --swap->swb_count; 1868 } 1869 1870 /* 1871 * Enter block into metadata 1872 */ 1873 1874 swap->swb_pages[index] = swapblk; 1875 if (swapblk != SWAPBLK_NONE) 1876 ++swap->swb_count; 1877 } 1878 1879 /* 1880 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1881 * 1882 * The requested range of blocks is freed, with any associated swap 1883 * returned to the swap bitmap. 1884 * 1885 * This routine will free swap metadata structures as they are cleaned 1886 * out. This routine does *NOT* operate on swap metadata associated 1887 * with resident pages. 1888 * 1889 * This routine must be called at splvm() 1890 */ 1891 1892 static void 1893 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1894 { 1895 if (object->type != OBJT_SWAP) 1896 return; 1897 1898 while (count > 0) { 1899 struct swblock **pswap; 1900 struct swblock *swap; 1901 1902 pswap = swp_pager_hash(object, index); 1903 1904 if ((swap = *pswap) != NULL) { 1905 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1906 1907 if (v != SWAPBLK_NONE) { 1908 swp_pager_freeswapspace(v, 1); 1909 swap->swb_pages[index & SWAP_META_MASK] = 1910 SWAPBLK_NONE; 1911 if (--swap->swb_count == 0) { 1912 *pswap = swap->swb_hnext; 1913 zfree(swap_zone, swap); 1914 --object->un_pager.swp.swp_bcount; 1915 } 1916 } 1917 --count; 1918 ++index; 1919 } else { 1920 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1921 count -= n; 1922 index += n; 1923 } 1924 } 1925 } 1926 1927 /* 1928 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1929 * 1930 * This routine locates and destroys all swap metadata associated with 1931 * an object. 1932 * 1933 * This routine must be called at splvm() 1934 */ 1935 1936 static void 1937 swp_pager_meta_free_all(vm_object_t object) 1938 { 1939 daddr_t index = 0; 1940 1941 if (object->type != OBJT_SWAP) 1942 return; 1943 1944 while (object->un_pager.swp.swp_bcount) { 1945 struct swblock **pswap; 1946 struct swblock *swap; 1947 1948 pswap = swp_pager_hash(object, index); 1949 if ((swap = *pswap) != NULL) { 1950 int i; 1951 1952 for (i = 0; i < SWAP_META_PAGES; ++i) { 1953 daddr_t v = swap->swb_pages[i]; 1954 if (v != SWAPBLK_NONE) { 1955 --swap->swb_count; 1956 swp_pager_freeswapspace(v, 1); 1957 } 1958 } 1959 if (swap->swb_count != 0) 1960 panic("swap_pager_meta_free_all: swb_count != 0"); 1961 *pswap = swap->swb_hnext; 1962 zfree(swap_zone, swap); 1963 --object->un_pager.swp.swp_bcount; 1964 } 1965 index += SWAP_META_PAGES; 1966 if (index > 0x20000000) 1967 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1968 } 1969 } 1970 1971 /* 1972 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1973 * 1974 * This routine is capable of looking up, popping, or freeing 1975 * swapblk assignments in the swap meta data or in the vm_page_t. 1976 * The routine typically returns the swapblk being looked-up, or popped, 1977 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1978 * was invalid. This routine will automatically free any invalid 1979 * meta-data swapblks. 1980 * 1981 * It is not possible to store invalid swapblks in the swap meta data 1982 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1983 * 1984 * When acting on a busy resident page and paging is in progress, we 1985 * have to wait until paging is complete but otherwise can act on the 1986 * busy page. 1987 * 1988 * This routine must be called at splvm(). 1989 * 1990 * SWM_FREE remove and free swap block from metadata 1991 * SWM_POP remove from meta data but do not free.. pop it out 1992 */ 1993 1994 static daddr_t 1995 swp_pager_meta_ctl( 1996 vm_object_t object, 1997 vm_pindex_t index, 1998 int flags 1999 ) { 2000 struct swblock **pswap; 2001 struct swblock *swap; 2002 daddr_t r1; 2003 2004 /* 2005 * The meta data only exists of the object is OBJT_SWAP 2006 * and even then might not be allocated yet. 2007 */ 2008 2009 if (object->type != OBJT_SWAP) 2010 return(SWAPBLK_NONE); 2011 2012 r1 = SWAPBLK_NONE; 2013 pswap = swp_pager_hash(object, index); 2014 2015 if ((swap = *pswap) != NULL) { 2016 index &= SWAP_META_MASK; 2017 r1 = swap->swb_pages[index]; 2018 2019 if (r1 != SWAPBLK_NONE) { 2020 if (flags & SWM_FREE) { 2021 swp_pager_freeswapspace(r1, 1); 2022 r1 = SWAPBLK_NONE; 2023 } 2024 if (flags & (SWM_FREE|SWM_POP)) { 2025 swap->swb_pages[index] = SWAPBLK_NONE; 2026 if (--swap->swb_count == 0) { 2027 *pswap = swap->swb_hnext; 2028 zfree(swap_zone, swap); 2029 --object->un_pager.swp.swp_bcount; 2030 } 2031 } 2032 } 2033 } 2034 return(r1); 2035 } 2036