1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 /* 38 * Implement the swapcache daemon. When enabled swap is assumed to be 39 * configured on a fast storage device such as a SSD. Swap is assigned 40 * to clean vnode-backed pages in the inactive queue, clustered by object 41 * if possible, and written out. The swap assignment sticks around even 42 * after the underlying pages have been recycled. 43 * 44 * The daemon manages write bandwidth based on sysctl settings to control 45 * wear on the SSD. 46 * 47 * The vnode strategy code will check for the swap assignments and divert 48 * reads to the swap device when the data is present in the swapcache. 49 * 50 * This operates on both regular files and the block device vnodes used by 51 * filesystems to manage meta-data. 52 */ 53 54 #include "opt_vm.h" 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/kernel.h> 58 #include <sys/proc.h> 59 #include <sys/kthread.h> 60 #include <sys/resourcevar.h> 61 #include <sys/signalvar.h> 62 #include <sys/vnode.h> 63 #include <sys/vmmeter.h> 64 #include <sys/sysctl.h> 65 #include <sys/eventhandler.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <sys/lock.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_pageout.h> 74 #include <vm/vm_pager.h> 75 #include <vm/swap_pager.h> 76 #include <vm/vm_extern.h> 77 78 #include <sys/spinlock2.h> 79 #include <vm/vm_page2.h> 80 81 /* the kernel process "vm_pageout"*/ 82 static int vm_swapcached_flush (vm_page_t m, int isblkdev); 83 static int vm_swapcache_test(vm_page_t m); 84 static int vm_swapcache_writing_heuristic(void); 85 static int vm_swapcache_writing(vm_page_t marker, int count, int scount); 86 static void vm_swapcache_cleaning(vm_object_t marker, 87 struct vm_object_hash **swindexp); 88 static void vm_swapcache_movemarker(vm_object_t marker, 89 struct vm_object_hash *swindex, vm_object_t object); 90 struct thread *swapcached_thread; 91 92 SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL); 93 94 int vm_swapcache_read_enable; 95 static long vm_swapcache_wtrigger; 96 static int vm_swapcache_sleep; 97 static int vm_swapcache_maxscan = PQ_L2_SIZE * 8; 98 static int vm_swapcache_maxlaunder = PQ_L2_SIZE * 4; 99 static int vm_swapcache_data_enable = 0; 100 static int vm_swapcache_meta_enable = 0; 101 static int vm_swapcache_maxswappct = 75; 102 static int vm_swapcache_hysteresis; 103 static int vm_swapcache_min_hysteresis; 104 int vm_swapcache_use_chflags = 0; /* require chflags cache */ 105 static int64_t vm_swapcache_minburst = 10000000LL; /* 10MB */ 106 static int64_t vm_swapcache_curburst = 4000000000LL; /* 4G after boot */ 107 static int64_t vm_swapcache_maxburst = 2000000000LL; /* 2G nominal max */ 108 static int64_t vm_swapcache_accrate = 100000LL; /* 100K/s */ 109 static int64_t vm_swapcache_write_count; 110 static int64_t vm_swapcache_maxfilesize; 111 static int64_t vm_swapcache_cleanperobj = 16*1024*1024; 112 113 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder, 114 CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, ""); 115 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxscan, 116 CTLFLAG_RW, &vm_swapcache_maxscan, 0, ""); 117 118 SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable, 119 CTLFLAG_RW, &vm_swapcache_data_enable, 0, ""); 120 SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable, 121 CTLFLAG_RW, &vm_swapcache_meta_enable, 0, ""); 122 SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable, 123 CTLFLAG_RW, &vm_swapcache_read_enable, 0, ""); 124 SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct, 125 CTLFLAG_RW, &vm_swapcache_maxswappct, 0, ""); 126 SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis, 127 CTLFLAG_RD, &vm_swapcache_hysteresis, 0, ""); 128 SYSCTL_INT(_vm_swapcache, OID_AUTO, min_hysteresis, 129 CTLFLAG_RW, &vm_swapcache_min_hysteresis, 0, ""); 130 SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags, 131 CTLFLAG_RW, &vm_swapcache_use_chflags, 0, ""); 132 133 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst, 134 CTLFLAG_RW, &vm_swapcache_minburst, 0, ""); 135 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst, 136 CTLFLAG_RW, &vm_swapcache_curburst, 0, ""); 137 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst, 138 CTLFLAG_RW, &vm_swapcache_maxburst, 0, ""); 139 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize, 140 CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, ""); 141 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate, 142 CTLFLAG_RW, &vm_swapcache_accrate, 0, ""); 143 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count, 144 CTLFLAG_RW, &vm_swapcache_write_count, 0, ""); 145 SYSCTL_QUAD(_vm_swapcache, OID_AUTO, cleanperobj, 146 CTLFLAG_RW, &vm_swapcache_cleanperobj, 0, ""); 147 148 #define SWAPMAX(adj) \ 149 ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100) 150 151 /* 152 * When shutting down the machine we want to stop swapcache operation 153 * immediately so swap is not accessed after devices have been shuttered. 154 */ 155 static void 156 shutdown_swapcache(void *arg __unused) 157 { 158 vm_swapcache_read_enable = 0; 159 vm_swapcache_data_enable = 0; 160 vm_swapcache_meta_enable = 0; 161 wakeup(&vm_swapcache_sleep); /* shortcut 5-second wait */ 162 } 163 164 /* 165 * vm_swapcached is the high level pageout daemon. 166 * 167 * No requirements. 168 */ 169 static void 170 vm_swapcached_thread(void) 171 { 172 enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING; 173 enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING; 174 static struct vm_page page_marker[PQ_L2_SIZE]; 175 static struct vm_object swmarker; 176 static struct vm_object_hash *swindex; 177 int q; 178 179 /* 180 * Thread setup 181 */ 182 curthread->td_flags |= TDF_SYSTHREAD; 183 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, 184 swapcached_thread, SHUTDOWN_PRI_FIRST); 185 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_swapcache, 186 NULL, SHUTDOWN_PRI_SECOND); 187 188 /* 189 * Initialize our marker for the inactive scan (SWAPC_WRITING) 190 */ 191 bzero(&page_marker, sizeof(page_marker)); 192 for (q = 0; q < PQ_L2_SIZE; ++q) { 193 page_marker[q].flags = PG_FICTITIOUS | PG_MARKER; 194 page_marker[q].busy_count = PBUSY_LOCKED; 195 page_marker[q].queue = PQ_INACTIVE + q; 196 page_marker[q].pc = q; 197 page_marker[q].wire_count = 1; 198 vm_page_queues_spin_lock(PQ_INACTIVE + q); 199 TAILQ_INSERT_HEAD( 200 &vm_page_queues[PQ_INACTIVE + q].pl, 201 &page_marker[q], pageq); 202 vm_page_queues_spin_unlock(PQ_INACTIVE + q); 203 } 204 205 vm_swapcache_min_hysteresis = 1024; 206 vm_swapcache_hysteresis = vm_swapcache_min_hysteresis; 207 vm_swapcache_wtrigger = -vm_swapcache_hysteresis; 208 209 /* 210 * Initialize our marker for the vm_object scan (SWAPC_CLEANING) 211 */ 212 bzero(&swmarker, sizeof(swmarker)); 213 swmarker.type = OBJT_MARKER; 214 swindex = &vm_object_hash[0]; 215 lwkt_gettoken(&swindex->token); 216 TAILQ_INSERT_HEAD(&swindex->list, &swmarker, object_list); 217 lwkt_reltoken(&swindex->token); 218 219 for (;;) { 220 int reached_end; 221 int scount; 222 int count; 223 224 /* 225 * Handle shutdown 226 */ 227 kproc_suspend_loop(); 228 229 /* 230 * Check every 5 seconds when not enabled or if no swap 231 * is present. 232 */ 233 if ((vm_swapcache_data_enable == 0 && 234 vm_swapcache_meta_enable == 0 && 235 vm_swap_cache_use <= SWAPMAX(0)) || 236 vm_swap_max == 0) { 237 tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5); 238 continue; 239 } 240 241 /* 242 * Polling rate when enabled is approximately 10 hz. 243 */ 244 tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10); 245 246 /* 247 * State hysteresis. Generate write activity up to 75% of 248 * swap, then clean out swap assignments down to 70%, then 249 * repeat. 250 */ 251 if (state == SWAPC_WRITING) { 252 if (vm_swap_cache_use > SWAPMAX(0)) 253 state = SWAPC_CLEANING; 254 } else { 255 if (vm_swap_cache_use < SWAPMAX(-10)) 256 state = SWAPC_WRITING; 257 } 258 259 /* 260 * We are allowed to continue accumulating burst value 261 * in either state. Allow the user to set curburst > maxburst 262 * for the initial load-in. 263 */ 264 if (vm_swapcache_curburst < vm_swapcache_maxburst) { 265 vm_swapcache_curburst += vm_swapcache_accrate / 10; 266 if (vm_swapcache_curburst > vm_swapcache_maxburst) 267 vm_swapcache_curburst = vm_swapcache_maxburst; 268 } 269 270 /* 271 * We don't want to nickle-and-dime the scan as that will 272 * create unnecessary fragmentation. The minimum burst 273 * is one-seconds worth of accumulation. 274 */ 275 if (state != SWAPC_WRITING) { 276 vm_swapcache_cleaning(&swmarker, &swindex); 277 continue; 278 } 279 if (vm_swapcache_curburst < vm_swapcache_accrate) 280 continue; 281 282 reached_end = 0; 283 count = vm_swapcache_maxlaunder / PQ_L2_SIZE + 2; 284 scount = vm_swapcache_maxscan / PQ_L2_SIZE + 2; 285 286 if (burst == SWAPB_BURSTING) { 287 if (vm_swapcache_writing_heuristic()) { 288 for (q = 0; q < PQ_L2_SIZE; ++q) { 289 reached_end += 290 vm_swapcache_writing( 291 &page_marker[q], 292 count, 293 scount); 294 } 295 } 296 if (vm_swapcache_curburst <= 0) 297 burst = SWAPB_RECOVERING; 298 } else if (vm_swapcache_curburst > vm_swapcache_minburst) { 299 if (vm_swapcache_writing_heuristic()) { 300 for (q = 0; q < PQ_L2_SIZE; ++q) { 301 reached_end += 302 vm_swapcache_writing( 303 &page_marker[q], 304 count, 305 scount); 306 } 307 } 308 burst = SWAPB_BURSTING; 309 } 310 if (reached_end == PQ_L2_SIZE) { 311 vm_swapcache_wtrigger = -vm_swapcache_hysteresis; 312 } 313 } 314 315 /* 316 * Cleanup (NOT REACHED) 317 */ 318 for (q = 0; q < PQ_L2_SIZE; ++q) { 319 vm_page_queues_spin_lock(PQ_INACTIVE + q); 320 TAILQ_REMOVE( 321 &vm_page_queues[PQ_INACTIVE + q].pl, 322 &page_marker[q], pageq); 323 vm_page_queues_spin_unlock(PQ_INACTIVE + q); 324 } 325 326 lwkt_gettoken(&swindex->token); 327 TAILQ_REMOVE(&swindex->list, &swmarker, object_list); 328 lwkt_reltoken(&swindex->token); 329 } 330 331 static struct kproc_desc swpc_kp = { 332 "swapcached", 333 vm_swapcached_thread, 334 &swapcached_thread 335 }; 336 SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp); 337 338 /* 339 * Deal with an overflow of the heuristic counter or if the user 340 * manually changes the hysteresis. 341 * 342 * Try to avoid small incremental pageouts by waiting for enough 343 * pages to buildup in the inactive queue to hopefully get a good 344 * burst in. This heuristic is bumped by the VM system and reset 345 * when our scan hits the end of the queue. 346 * 347 * Return TRUE if we need to take a writing pass. 348 */ 349 static int 350 vm_swapcache_writing_heuristic(void) 351 { 352 int hyst; 353 int q; 354 long adds; 355 356 hyst = vmstats.v_inactive_count / 4; 357 if (hyst < vm_swapcache_min_hysteresis) 358 hyst = vm_swapcache_min_hysteresis; 359 cpu_ccfence(); 360 vm_swapcache_hysteresis = hyst; 361 362 adds = 0; 363 for (q = PQ_INACTIVE; q < PQ_INACTIVE + PQ_L2_SIZE; ++q) { 364 adds += atomic_swap_long(&vm_page_queues[q].adds, 0); 365 } 366 vm_swapcache_wtrigger += adds; 367 if (vm_swapcache_wtrigger < -hyst) 368 vm_swapcache_wtrigger = -hyst; 369 return (vm_swapcache_wtrigger >= 0); 370 } 371 372 /* 373 * Take a writing pass on one of the inactive queues, return non-zero if 374 * we hit the end of the queue. 375 */ 376 static int 377 vm_swapcache_writing(vm_page_t marker, int count, int scount) 378 { 379 vm_object_t object; 380 struct vnode *vp; 381 vm_page_t m; 382 int isblkdev; 383 384 /* 385 * Scan the inactive queue from our marker to locate 386 * suitable pages to push to the swap cache. 387 * 388 * We are looking for clean vnode-backed pages. 389 */ 390 vm_page_queues_spin_lock(marker->queue); 391 while ((m = TAILQ_NEXT(marker, pageq)) != NULL && 392 count > 0 && scount-- > 0) { 393 KKASSERT(m->queue == marker->queue); 394 395 /* 396 * Stop using swap if paniced, dumping, or dumped. 397 * Don't try to write if our curburst has been exhausted. 398 */ 399 if (panicstr || dumping) 400 break; 401 if (vm_swapcache_curburst < 0) 402 break; 403 404 /* 405 * Move marker 406 */ 407 TAILQ_REMOVE( 408 &vm_page_queues[marker->queue].pl, marker, pageq); 409 TAILQ_INSERT_AFTER( 410 &vm_page_queues[marker->queue].pl, m, marker, pageq); 411 412 /* 413 * Ignore markers and ignore pages that already have a swap 414 * assignment. 415 */ 416 if (m->flags & (PG_MARKER | PG_SWAPPED)) 417 continue; 418 if (vm_page_busy_try(m, TRUE)) 419 continue; 420 vm_page_queues_spin_unlock(marker->queue); 421 422 if ((object = m->object) == NULL) { 423 vm_page_wakeup(m); 424 vm_page_queues_spin_lock(marker->queue); 425 continue; 426 } 427 vm_object_hold(object); 428 if (m->object != object) { 429 vm_object_drop(object); 430 vm_page_wakeup(m); 431 vm_page_queues_spin_lock(marker->queue); 432 continue; 433 } 434 if (vm_swapcache_test(m)) { 435 vm_object_drop(object); 436 vm_page_wakeup(m); 437 vm_page_queues_spin_lock(marker->queue); 438 continue; 439 } 440 441 vp = object->handle; 442 if (vp == NULL) { 443 vm_object_drop(object); 444 vm_page_wakeup(m); 445 vm_page_queues_spin_lock(marker->queue); 446 continue; 447 } 448 449 switch(vp->v_type) { 450 case VREG: 451 /* 452 * PG_NOTMETA generically means 'don't swapcache this', 453 * and HAMMER will set this for regular data buffers 454 * (and leave it unset for meta-data buffers) as 455 * appropriate when double buffering is enabled. 456 */ 457 if (m->flags & PG_NOTMETA) { 458 vm_object_drop(object); 459 vm_page_wakeup(m); 460 vm_page_queues_spin_lock(marker->queue); 461 continue; 462 } 463 464 /* 465 * If data_enable is 0 do not try to swapcache data. 466 * If use_chflags is set then only swapcache data for 467 * VSWAPCACHE marked vnodes, otherwise any vnode. 468 */ 469 if (vm_swapcache_data_enable == 0 || 470 ((vp->v_flag & VSWAPCACHE) == 0 && 471 vm_swapcache_use_chflags)) { 472 vm_object_drop(object); 473 vm_page_wakeup(m); 474 vm_page_queues_spin_lock(marker->queue); 475 continue; 476 } 477 if (vm_swapcache_maxfilesize && 478 object->size > 479 (vm_swapcache_maxfilesize >> PAGE_SHIFT)) { 480 vm_object_drop(object); 481 vm_page_wakeup(m); 482 vm_page_queues_spin_lock(marker->queue); 483 continue; 484 } 485 isblkdev = 0; 486 break; 487 case VCHR: 488 /* 489 * PG_NOTMETA generically means 'don't swapcache this', 490 * and HAMMER will set this for regular data buffers 491 * (and leave it unset for meta-data buffers) as 492 * appropriate when double buffering is enabled. 493 */ 494 if (m->flags & PG_NOTMETA) { 495 vm_object_drop(object); 496 vm_page_wakeup(m); 497 vm_page_queues_spin_lock(marker->queue); 498 continue; 499 } 500 if (vm_swapcache_meta_enable == 0) { 501 vm_object_drop(object); 502 vm_page_wakeup(m); 503 vm_page_queues_spin_lock(marker->queue); 504 continue; 505 } 506 isblkdev = 1; 507 break; 508 default: 509 vm_object_drop(object); 510 vm_page_wakeup(m); 511 vm_page_queues_spin_lock(marker->queue); 512 continue; 513 } 514 515 516 /* 517 * Assign swap and initiate I/O. 518 * 519 * (adjust for the --count which also occurs in the loop) 520 */ 521 count -= vm_swapcached_flush(m, isblkdev); 522 523 /* 524 * Setup for next loop using marker. 525 */ 526 vm_object_drop(object); 527 vm_page_queues_spin_lock(marker->queue); 528 } 529 530 /* 531 * The marker could wind up at the end, which is ok. If we hit the 532 * end of the list adjust the heuristic. 533 * 534 * Earlier inactive pages that were dirty and become clean 535 * are typically moved to the end of PQ_INACTIVE by virtue 536 * of vfs_vmio_release() when they become unwired from the 537 * buffer cache. 538 */ 539 vm_page_queues_spin_unlock(marker->queue); 540 541 /* 542 * m invalid but can be used to test for NULL 543 */ 544 return (m == NULL); 545 } 546 547 /* 548 * Flush the specified page using the swap_pager. The page 549 * must be busied by the caller and its disposition will become 550 * the responsibility of this function. 551 * 552 * Try to collect surrounding pages, including pages which may 553 * have already been assigned swap. Try to cluster within a 554 * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block 555 * to match what swap_pager_putpages() can do. 556 * 557 * We also want to try to match against the buffer cache blocksize 558 * but we don't really know what it is here. Since the buffer cache 559 * wires and unwires pages in groups the fact that we skip wired pages 560 * should be sufficient. 561 * 562 * Returns a count of pages we might have flushed (minimum 1) 563 */ 564 static 565 int 566 vm_swapcached_flush(vm_page_t m, int isblkdev) 567 { 568 vm_object_t object; 569 vm_page_t marray[SWAP_META_PAGES]; 570 vm_pindex_t basei; 571 int rtvals[SWAP_META_PAGES]; 572 int x; 573 int i; 574 int j; 575 int count; 576 int error; 577 578 vm_page_io_start(m); 579 vm_page_protect(m, VM_PROT_READ); 580 object = m->object; 581 vm_object_hold(object); 582 583 /* 584 * Try to cluster around (m), keeping in mind that the swap pager 585 * can only do SMAP_META_PAGES worth of continguous write. 586 */ 587 x = (int)m->pindex & SWAP_META_MASK; 588 marray[x] = m; 589 basei = m->pindex; 590 vm_page_wakeup(m); 591 592 for (i = x - 1; i >= 0; --i) { 593 m = vm_page_lookup_busy_try(object, basei - x + i, 594 TRUE, &error); 595 if (error || m == NULL) 596 break; 597 if (vm_swapcache_test(m)) { 598 vm_page_wakeup(m); 599 break; 600 } 601 if (isblkdev && (m->flags & PG_NOTMETA)) { 602 vm_page_wakeup(m); 603 break; 604 } 605 vm_page_io_start(m); 606 vm_page_protect(m, VM_PROT_READ); 607 if (m->queue - m->pc == PQ_CACHE) { 608 vm_page_unqueue_nowakeup(m); 609 vm_page_deactivate(m); 610 } 611 marray[i] = m; 612 vm_page_wakeup(m); 613 } 614 ++i; 615 616 for (j = x + 1; j < SWAP_META_PAGES; ++j) { 617 m = vm_page_lookup_busy_try(object, basei - x + j, 618 TRUE, &error); 619 if (error || m == NULL) 620 break; 621 if (vm_swapcache_test(m)) { 622 vm_page_wakeup(m); 623 break; 624 } 625 if (isblkdev && (m->flags & PG_NOTMETA)) { 626 vm_page_wakeup(m); 627 break; 628 } 629 vm_page_io_start(m); 630 vm_page_protect(m, VM_PROT_READ); 631 if (m->queue - m->pc == PQ_CACHE) { 632 vm_page_unqueue_nowakeup(m); 633 vm_page_deactivate(m); 634 } 635 marray[j] = m; 636 vm_page_wakeup(m); 637 } 638 639 count = j - i; 640 vm_object_pip_add(object, count); 641 swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i); 642 vm_swapcache_write_count += count * PAGE_SIZE; 643 vm_swapcache_curburst -= count * PAGE_SIZE; 644 645 while (i < j) { 646 if (rtvals[i] != VM_PAGER_PEND) { 647 vm_page_busy_wait(marray[i], FALSE, "swppgfd"); 648 vm_page_io_finish(marray[i]); 649 vm_page_wakeup(marray[i]); 650 vm_object_pip_wakeup(object); 651 } 652 ++i; 653 } 654 vm_object_drop(object); 655 return(count); 656 } 657 658 /* 659 * Test whether a VM page is suitable for writing to the swapcache. 660 * Does not test m->queue, PG_MARKER, or PG_SWAPPED. 661 * 662 * Returns 0 on success, 1 on failure 663 */ 664 static int 665 vm_swapcache_test(vm_page_t m) 666 { 667 vm_object_t object; 668 669 if (m->flags & PG_UNMANAGED) 670 return(1); 671 if (m->hold_count || m->wire_count) 672 return(1); 673 if (m->valid != VM_PAGE_BITS_ALL) 674 return(1); 675 if (m->dirty & m->valid) 676 return(1); 677 if ((object = m->object) == NULL) 678 return(1); 679 if (object->type != OBJT_VNODE || 680 (object->flags & OBJ_DEAD)) { 681 return(1); 682 } 683 vm_page_test_dirty(m); 684 if (m->dirty & m->valid) 685 return(1); 686 return(0); 687 } 688 689 /* 690 * Cleaning pass. 691 * 692 * We clean whole objects up to 16MB 693 */ 694 static 695 void 696 vm_swapcache_cleaning(vm_object_t marker, struct vm_object_hash **swindexp) 697 { 698 vm_object_t object; 699 struct vnode *vp; 700 int count; 701 int scount; 702 int n; 703 int didmove; 704 705 count = vm_swapcache_maxlaunder; 706 scount = vm_swapcache_maxscan; 707 708 /* 709 * Look for vnode objects 710 */ 711 lwkt_gettoken(&(*swindexp)->token); 712 713 didmove = 0; 714 outerloop: 715 while ((object = TAILQ_NEXT(marker, object_list)) != NULL) { 716 /* 717 * We have to skip markers. We cannot hold/drop marker 718 * objects! 719 */ 720 if (object->type == OBJT_MARKER) { 721 vm_swapcache_movemarker(marker, *swindexp, object); 722 didmove = 1; 723 continue; 724 } 725 726 /* 727 * Safety, or in case there are millions of VM objects 728 * without swapcache backing. 729 */ 730 if (--scount <= 0) 731 goto breakout; 732 733 /* 734 * We must hold the object before potentially yielding. 735 */ 736 vm_object_hold(object); 737 lwkt_yield(); 738 739 /* 740 * Only operate on live VNODE objects that are either 741 * VREG or VCHR (VCHR for meta-data). 742 */ 743 if ((object->type != OBJT_VNODE) || 744 ((object->flags & OBJ_DEAD) || 745 object->swblock_count == 0) || 746 ((vp = object->handle) == NULL) || 747 (vp->v_type != VREG && vp->v_type != VCHR)) { 748 vm_object_drop(object); 749 /* object may be invalid now */ 750 vm_swapcache_movemarker(marker, *swindexp, object); 751 didmove = 1; 752 continue; 753 } 754 755 /* 756 * Reset the object pindex stored in the marker if the 757 * working object has changed. 758 */ 759 if (marker->backing_object != object || didmove) { 760 marker->size = 0; 761 marker->backing_object_offset = 0; 762 marker->backing_object = object; 763 didmove = 0; 764 } 765 766 /* 767 * Look for swblocks starting at our iterator. 768 * 769 * The swap_pager_condfree() function attempts to free 770 * swap space starting at the specified index. The index 771 * will be updated on return. The function will return 772 * a scan factor (NOT the number of blocks freed). 773 * 774 * If it must cut its scan of the object short due to an 775 * excessive number of swblocks, or is able to free the 776 * requested number of blocks, it will return n >= count 777 * and we break and pick it back up on a future attempt. 778 * 779 * Scan the object linearly and try to batch large sets of 780 * blocks that are likely to clean out entire swap radix 781 * tree leafs. 782 */ 783 lwkt_token_swap(); 784 lwkt_reltoken(&(*swindexp)->token); 785 786 n = swap_pager_condfree(object, &marker->size, 787 (count + SWAP_META_MASK) & ~SWAP_META_MASK); 788 789 vm_object_drop(object); /* object may be invalid now */ 790 lwkt_gettoken(&(*swindexp)->token); 791 792 /* 793 * If we have exhausted the object or deleted our per-pass 794 * page limit then move us to the next object. Note that 795 * the current object may no longer be on the vm_object_list. 796 */ 797 if (n <= 0 || 798 marker->backing_object_offset > vm_swapcache_cleanperobj) { 799 vm_swapcache_movemarker(marker, *swindexp, object); 800 didmove = 1; 801 } 802 803 /* 804 * If we have exhausted our max-launder stop for now. 805 */ 806 count -= n; 807 marker->backing_object_offset += n * PAGE_SIZE; 808 if (count < 0) 809 goto breakout; 810 } 811 812 /* 813 * Iterate vm_object_lists[] hash table 814 */ 815 TAILQ_REMOVE(&(*swindexp)->list, marker, object_list); 816 lwkt_reltoken(&(*swindexp)->token); 817 if (++*swindexp >= &vm_object_hash[VMOBJ_HSIZE]) 818 *swindexp = &vm_object_hash[0]; 819 lwkt_gettoken(&(*swindexp)->token); 820 TAILQ_INSERT_HEAD(&(*swindexp)->list, marker, object_list); 821 822 if (*swindexp != &vm_object_hash[0]) 823 goto outerloop; 824 825 breakout: 826 lwkt_reltoken(&(*swindexp)->token); 827 } 828 829 /* 830 * Move the marker past the current object. Object can be stale, but we 831 * still need it to determine if the marker has to be moved. If the object 832 * is still the 'current object' (object after the marker), we hop-scotch 833 * the marker past it. 834 */ 835 static void 836 vm_swapcache_movemarker(vm_object_t marker, struct vm_object_hash *swindex, 837 vm_object_t object) 838 { 839 if (TAILQ_NEXT(marker, object_list) == object) { 840 TAILQ_REMOVE(&swindex->list, marker, object_list); 841 TAILQ_INSERT_AFTER(&swindex->list, object, marker, object_list); 842 } 843 } 844