1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * The Mach Operating System project at Carnegie-Mellon University. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49 * 50 * Permission to use, copy, modify and distribute this software and 51 * its documentation is hereby granted, provided that both the copyright 52 * notice and this permission notice appear in all copies of the 53 * software, derivative works or modified versions, and any portions 54 * thereof, and that both notices appear in supporting documentation. 55 * 56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59 * 60 * Carnegie Mellon requests users of this software to return to 61 * 62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63 * School of Computer Science 64 * Carnegie Mellon University 65 * Pittsburgh PA 15213-3890 66 * 67 * any improvements or extensions that they make and grant Carnegie the 68 * rights to redistribute these changes. 69 * 70 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $ 71 * $DragonFly: src/sys/vm/vm_pageout.c,v 1.36 2008/07/01 02:02:56 dillon Exp $ 72 */ 73 74 /* 75 * The proverbial page-out daemon. 76 */ 77 78 #include "opt_vm.h" 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/kernel.h> 82 #include <sys/proc.h> 83 #include <sys/kthread.h> 84 #include <sys/resourcevar.h> 85 #include <sys/signalvar.h> 86 #include <sys/vnode.h> 87 #include <sys/vmmeter.h> 88 #include <sys/sysctl.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_param.h> 92 #include <sys/lock.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_page.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_pageout.h> 97 #include <vm/vm_pager.h> 98 #include <vm/swap_pager.h> 99 #include <vm/vm_extern.h> 100 101 #include <sys/thread2.h> 102 #include <sys/mplock2.h> 103 #include <vm/vm_page2.h> 104 105 /* 106 * System initialization 107 */ 108 109 /* the kernel process "vm_pageout"*/ 110 static int vm_pageout_clean (vm_page_t); 111 static int vm_pageout_scan (int pass); 112 static int vm_pageout_free_page_calc (vm_size_t count); 113 struct thread *pagethread; 114 115 #if !defined(NO_SWAPPING) 116 /* the kernel process "vm_daemon"*/ 117 static void vm_daemon (void); 118 static struct thread *vmthread; 119 120 static struct kproc_desc vm_kp = { 121 "vmdaemon", 122 vm_daemon, 123 &vmthread 124 }; 125 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 126 #endif 127 128 129 int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 130 int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 131 int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 132 133 #if !defined(NO_SWAPPING) 134 static int vm_pageout_req_swapout; /* XXX */ 135 static int vm_daemon_needed; 136 #endif 137 static int vm_max_launder = 32; 138 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 139 static int vm_pageout_full_stats_interval = 0; 140 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 141 static int defer_swap_pageouts=0; 142 static int disable_swap_pageouts=0; 143 144 #if defined(NO_SWAPPING) 145 static int vm_swap_enabled=0; 146 static int vm_swap_idle_enabled=0; 147 #else 148 static int vm_swap_enabled=1; 149 static int vm_swap_idle_enabled=0; 150 #endif 151 152 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 153 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 154 155 SYSCTL_INT(_vm, OID_AUTO, max_launder, 156 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 157 158 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 159 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 160 161 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 162 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 163 164 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 165 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 166 167 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 168 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 169 170 #if defined(NO_SWAPPING) 171 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 172 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 173 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 174 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 175 #else 176 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 177 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 178 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 179 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 180 #endif 181 182 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 183 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 184 185 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 186 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 187 188 static int pageout_lock_miss; 189 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 190 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 191 192 int vm_load; 193 SYSCTL_INT(_vm, OID_AUTO, vm_load, 194 CTLFLAG_RD, &vm_load, 0, "load on the VM system"); 195 int vm_load_enable = 1; 196 SYSCTL_INT(_vm, OID_AUTO, vm_load_enable, 197 CTLFLAG_RW, &vm_load_enable, 0, "enable vm_load rate limiting"); 198 #ifdef INVARIANTS 199 int vm_load_debug; 200 SYSCTL_INT(_vm, OID_AUTO, vm_load_debug, 201 CTLFLAG_RW, &vm_load_debug, 0, "debug vm_load"); 202 #endif 203 204 #define VM_PAGEOUT_PAGE_COUNT 16 205 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 206 207 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 208 209 #if !defined(NO_SWAPPING) 210 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int); 211 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t); 212 static freeer_fcn_t vm_pageout_object_deactivate_pages; 213 static void vm_req_vmdaemon (void); 214 #endif 215 static void vm_pageout_page_stats(void); 216 217 /* 218 * Update vm_load to slow down faulting processes. 219 * 220 * SMP races ok. 221 * No requirements. 222 */ 223 void 224 vm_fault_ratecheck(void) 225 { 226 if (vm_pages_needed) { 227 if (vm_load < 1000) 228 ++vm_load; 229 } else { 230 if (vm_load > 0) 231 --vm_load; 232 } 233 } 234 235 /* 236 * vm_pageout_clean: 237 * 238 * Clean the page and remove it from the laundry. The page must not be 239 * busy on-call. 240 * 241 * We set the busy bit to cause potential page faults on this page to 242 * block. Note the careful timing, however, the busy bit isn't set till 243 * late and we cannot do anything that will mess with the page. 244 * 245 * The caller must hold vm_token. 246 */ 247 static int 248 vm_pageout_clean(vm_page_t m) 249 { 250 vm_object_t object; 251 vm_page_t mc[2*vm_pageout_page_count]; 252 int pageout_count; 253 int ib, is, page_base; 254 vm_pindex_t pindex = m->pindex; 255 256 object = m->object; 257 258 /* 259 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 260 * with the new swapper, but we could have serious problems paging 261 * out other object types if there is insufficient memory. 262 * 263 * Unfortunately, checking free memory here is far too late, so the 264 * check has been moved up a procedural level. 265 */ 266 267 /* 268 * Don't mess with the page if it's busy, held, or special 269 */ 270 if ((m->hold_count != 0) || 271 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 272 return 0; 273 } 274 275 mc[vm_pageout_page_count] = m; 276 pageout_count = 1; 277 page_base = vm_pageout_page_count; 278 ib = 1; 279 is = 1; 280 281 /* 282 * Scan object for clusterable pages. 283 * 284 * We can cluster ONLY if: ->> the page is NOT 285 * clean, wired, busy, held, or mapped into a 286 * buffer, and one of the following: 287 * 1) The page is inactive, or a seldom used 288 * active page. 289 * -or- 290 * 2) we force the issue. 291 * 292 * During heavy mmap/modification loads the pageout 293 * daemon can really fragment the underlying file 294 * due to flushing pages out of order and not trying 295 * align the clusters (which leave sporatic out-of-order 296 * holes). To solve this problem we do the reverse scan 297 * first and attempt to align our cluster, then do a 298 * forward scan if room remains. 299 */ 300 301 more: 302 while (ib && pageout_count < vm_pageout_page_count) { 303 vm_page_t p; 304 305 if (ib > pindex) { 306 ib = 0; 307 break; 308 } 309 310 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 311 ib = 0; 312 break; 313 } 314 if (((p->queue - p->pc) == PQ_CACHE) || 315 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 316 ib = 0; 317 break; 318 } 319 vm_page_test_dirty(p); 320 if ((p->dirty & p->valid) == 0 || 321 p->queue != PQ_INACTIVE || 322 p->wire_count != 0 || /* may be held by buf cache */ 323 p->hold_count != 0) { /* may be undergoing I/O */ 324 ib = 0; 325 break; 326 } 327 mc[--page_base] = p; 328 ++pageout_count; 329 ++ib; 330 /* 331 * alignment boundry, stop here and switch directions. Do 332 * not clear ib. 333 */ 334 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 335 break; 336 } 337 338 while (pageout_count < vm_pageout_page_count && 339 pindex + is < object->size) { 340 vm_page_t p; 341 342 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 343 break; 344 if (((p->queue - p->pc) == PQ_CACHE) || 345 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 346 break; 347 } 348 vm_page_test_dirty(p); 349 if ((p->dirty & p->valid) == 0 || 350 p->queue != PQ_INACTIVE || 351 p->wire_count != 0 || /* may be held by buf cache */ 352 p->hold_count != 0) { /* may be undergoing I/O */ 353 break; 354 } 355 mc[page_base + pageout_count] = p; 356 ++pageout_count; 357 ++is; 358 } 359 360 /* 361 * If we exhausted our forward scan, continue with the reverse scan 362 * when possible, even past a page boundry. This catches boundry 363 * conditions. 364 */ 365 if (ib && pageout_count < vm_pageout_page_count) 366 goto more; 367 368 /* 369 * we allow reads during pageouts... 370 */ 371 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 372 } 373 374 /* 375 * vm_pageout_flush() - launder the given pages 376 * 377 * The given pages are laundered. Note that we setup for the start of 378 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 379 * reference count all in here rather then in the parent. If we want 380 * the parent to do more sophisticated things we may have to change 381 * the ordering. 382 * 383 * The caller must hold vm_token. 384 */ 385 int 386 vm_pageout_flush(vm_page_t *mc, int count, int flags) 387 { 388 vm_object_t object; 389 int pageout_status[count]; 390 int numpagedout = 0; 391 int i; 392 393 ASSERT_LWKT_TOKEN_HELD(&vm_token); 394 395 /* 396 * Initiate I/O. Bump the vm_page_t->busy counter. 397 */ 398 for (i = 0; i < count; i++) { 399 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 400 vm_page_io_start(mc[i]); 401 } 402 403 /* 404 * We must make the pages read-only. This will also force the 405 * modified bit in the related pmaps to be cleared. The pager 406 * cannot clear the bit for us since the I/O completion code 407 * typically runs from an interrupt. The act of making the page 408 * read-only handles the case for us. 409 */ 410 for (i = 0; i < count; i++) { 411 vm_page_protect(mc[i], VM_PROT_READ); 412 } 413 414 object = mc[0]->object; 415 vm_object_pip_add(object, count); 416 417 vm_pager_put_pages(object, mc, count, 418 (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 419 pageout_status); 420 421 for (i = 0; i < count; i++) { 422 vm_page_t mt = mc[i]; 423 424 switch (pageout_status[i]) { 425 case VM_PAGER_OK: 426 numpagedout++; 427 break; 428 case VM_PAGER_PEND: 429 numpagedout++; 430 break; 431 case VM_PAGER_BAD: 432 /* 433 * Page outside of range of object. Right now we 434 * essentially lose the changes by pretending it 435 * worked. 436 */ 437 pmap_clear_modify(mt); 438 vm_page_undirty(mt); 439 break; 440 case VM_PAGER_ERROR: 441 case VM_PAGER_FAIL: 442 /* 443 * A page typically cannot be paged out when we 444 * have run out of swap. We leave the page 445 * marked inactive and will try to page it out 446 * again later. 447 * 448 * Starvation of the active page list is used to 449 * determine when the system is massively memory 450 * starved. 451 */ 452 break; 453 case VM_PAGER_AGAIN: 454 break; 455 } 456 457 /* 458 * If the operation is still going, leave the page busy to 459 * block all other accesses. Also, leave the paging in 460 * progress indicator set so that we don't attempt an object 461 * collapse. 462 * 463 * For any pages which have completed synchronously, 464 * deactivate the page if we are under a severe deficit. 465 * Do not try to enter them into the cache, though, they 466 * might still be read-heavy. 467 */ 468 if (pageout_status[i] != VM_PAGER_PEND) { 469 if (vm_page_count_severe()) 470 vm_page_deactivate(mt); 471 #if 0 472 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 473 vm_page_protect(mt, VM_PROT_READ); 474 #endif 475 vm_page_io_finish(mt); 476 vm_object_pip_wakeup(object); 477 } 478 } 479 return numpagedout; 480 } 481 482 #if !defined(NO_SWAPPING) 483 /* 484 * vm_pageout_object_deactivate_pages 485 * 486 * deactivate enough pages to satisfy the inactive target 487 * requirements or if vm_page_proc_limit is set, then 488 * deactivate all of the pages in the object and its 489 * backing_objects. 490 * 491 * The map must be locked. 492 * The caller must hold vm_token. 493 */ 494 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *); 495 496 static void 497 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object, 498 vm_pindex_t desired, int map_remove_only) 499 { 500 struct rb_vm_page_scan_info info; 501 int remove_mode; 502 503 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 504 return; 505 506 while (object) { 507 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 508 return; 509 if (object->paging_in_progress) 510 return; 511 512 remove_mode = map_remove_only; 513 if (object->shadow_count > 1) 514 remove_mode = 1; 515 516 /* 517 * scan the objects entire memory queue. spl protection is 518 * required to avoid an interrupt unbusy/free race against 519 * our busy check. 520 */ 521 crit_enter(); 522 info.limit = remove_mode; 523 info.map = map; 524 info.desired = desired; 525 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, 526 vm_pageout_object_deactivate_pages_callback, 527 &info 528 ); 529 crit_exit(); 530 object = object->backing_object; 531 } 532 } 533 534 /* 535 * The caller must hold vm_token. 536 */ 537 static int 538 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data) 539 { 540 struct rb_vm_page_scan_info *info = data; 541 int actcount; 542 543 if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) { 544 return(-1); 545 } 546 mycpu->gd_cnt.v_pdpages++; 547 if (p->wire_count != 0 || p->hold_count != 0 || p->busy != 0 || 548 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 549 !pmap_page_exists_quick(vm_map_pmap(info->map), p)) { 550 return(0); 551 } 552 553 actcount = pmap_ts_referenced(p); 554 if (actcount) { 555 vm_page_flag_set(p, PG_REFERENCED); 556 } else if (p->flags & PG_REFERENCED) { 557 actcount = 1; 558 } 559 560 if ((p->queue != PQ_ACTIVE) && 561 (p->flags & PG_REFERENCED)) { 562 vm_page_activate(p); 563 p->act_count += actcount; 564 vm_page_flag_clear(p, PG_REFERENCED); 565 } else if (p->queue == PQ_ACTIVE) { 566 if ((p->flags & PG_REFERENCED) == 0) { 567 p->act_count -= min(p->act_count, ACT_DECLINE); 568 if (!info->limit && (vm_pageout_algorithm || (p->act_count == 0))) { 569 vm_page_busy(p); 570 vm_page_protect(p, VM_PROT_NONE); 571 vm_page_deactivate(p); 572 vm_page_wakeup(p); 573 } else { 574 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 575 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 576 } 577 } else { 578 vm_page_activate(p); 579 vm_page_flag_clear(p, PG_REFERENCED); 580 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 581 p->act_count += ACT_ADVANCE; 582 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 583 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 584 } 585 } else if (p->queue == PQ_INACTIVE) { 586 vm_page_busy(p); 587 vm_page_protect(p, VM_PROT_NONE); 588 vm_page_wakeup(p); 589 } 590 return(0); 591 } 592 593 /* 594 * Deactivate some number of pages in a map, try to do it fairly, but 595 * that is really hard to do. 596 * 597 * The caller must hold vm_token. 598 */ 599 static void 600 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired) 601 { 602 vm_map_entry_t tmpe; 603 vm_object_t obj, bigobj; 604 int nothingwired; 605 606 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) { 607 return; 608 } 609 610 bigobj = NULL; 611 nothingwired = TRUE; 612 613 /* 614 * first, search out the biggest object, and try to free pages from 615 * that. 616 */ 617 tmpe = map->header.next; 618 while (tmpe != &map->header) { 619 switch(tmpe->maptype) { 620 case VM_MAPTYPE_NORMAL: 621 case VM_MAPTYPE_VPAGETABLE: 622 obj = tmpe->object.vm_object; 623 if ((obj != NULL) && (obj->shadow_count <= 1) && 624 ((bigobj == NULL) || 625 (bigobj->resident_page_count < obj->resident_page_count))) { 626 bigobj = obj; 627 } 628 break; 629 default: 630 break; 631 } 632 if (tmpe->wired_count > 0) 633 nothingwired = FALSE; 634 tmpe = tmpe->next; 635 } 636 637 if (bigobj) 638 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 639 640 /* 641 * Next, hunt around for other pages to deactivate. We actually 642 * do this search sort of wrong -- .text first is not the best idea. 643 */ 644 tmpe = map->header.next; 645 while (tmpe != &map->header) { 646 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 647 break; 648 switch(tmpe->maptype) { 649 case VM_MAPTYPE_NORMAL: 650 case VM_MAPTYPE_VPAGETABLE: 651 obj = tmpe->object.vm_object; 652 if (obj) 653 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 654 break; 655 default: 656 break; 657 } 658 tmpe = tmpe->next; 659 }; 660 661 /* 662 * Remove all mappings if a process is swapped out, this will free page 663 * table pages. 664 */ 665 if (desired == 0 && nothingwired) 666 pmap_remove(vm_map_pmap(map), 667 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS); 668 vm_map_unlock(map); 669 } 670 #endif 671 672 /* 673 * Don't try to be fancy - being fancy can lead to vnode deadlocks. We 674 * only do it for OBJT_DEFAULT and OBJT_SWAP objects which we know can 675 * be trivially freed. 676 * 677 * The caller must hold vm_token. 678 * 679 * WARNING: vm_object_reference() can block. 680 */ 681 static void 682 vm_pageout_page_free(vm_page_t m) 683 { 684 vm_object_t object = m->object; 685 int type = object->type; 686 687 vm_page_busy(m); 688 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 689 vm_object_reference(object); 690 vm_page_protect(m, VM_PROT_NONE); 691 vm_page_free(m); 692 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 693 vm_object_deallocate(object); 694 } 695 696 /* 697 * vm_pageout_scan does the dirty work for the pageout daemon. 698 */ 699 struct vm_pageout_scan_info { 700 struct proc *bigproc; 701 vm_offset_t bigsize; 702 }; 703 704 static int vm_pageout_scan_callback(struct proc *p, void *data); 705 706 /* 707 * The caller must hold vm_token. 708 */ 709 static int 710 vm_pageout_scan(int pass) 711 { 712 struct vm_pageout_scan_info info; 713 vm_page_t m, next; 714 struct vm_page marker; 715 struct vnode *vpfailed; /* warning, allowed to be stale */ 716 int maxscan, pcount; 717 int recycle_count; 718 int inactive_shortage, active_shortage; 719 int inactive_original_shortage; 720 vm_object_t object; 721 int actcount; 722 int vnodes_skipped = 0; 723 int maxlaunder; 724 725 /* 726 * Do whatever cleanup that the pmap code can. 727 */ 728 pmap_collect(); 729 730 /* 731 * Calculate our target for the number of free+cache pages we 732 * want to get to. This is higher then the number that causes 733 * allocations to stall (severe) in order to provide hysteresis, 734 * and if we don't make it all the way but get to the minimum 735 * we're happy. 736 */ 737 inactive_shortage = vm_paging_target() + vm_pageout_deficit; 738 inactive_original_shortage = inactive_shortage; 739 vm_pageout_deficit = 0; 740 741 /* 742 * Initialize our marker 743 */ 744 bzero(&marker, sizeof(marker)); 745 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 746 marker.queue = PQ_INACTIVE; 747 marker.wire_count = 1; 748 749 /* 750 * Start scanning the inactive queue for pages we can move to the 751 * cache or free. The scan will stop when the target is reached or 752 * we have scanned the entire inactive queue. Note that m->act_count 753 * is not used to form decisions for the inactive queue, only for the 754 * active queue. 755 * 756 * maxlaunder limits the number of dirty pages we flush per scan. 757 * For most systems a smaller value (16 or 32) is more robust under 758 * extreme memory and disk pressure because any unnecessary writes 759 * to disk can result in extreme performance degredation. However, 760 * systems with excessive dirty pages (especially when MAP_NOSYNC is 761 * used) will die horribly with limited laundering. If the pageout 762 * daemon cannot clean enough pages in the first pass, we let it go 763 * all out in succeeding passes. 764 */ 765 if ((maxlaunder = vm_max_launder) <= 1) 766 maxlaunder = 1; 767 if (pass) 768 maxlaunder = 10000; 769 770 /* 771 * We will generally be in a critical section throughout the 772 * scan, but we can release it temporarily when we are sitting on a 773 * non-busy page without fear. this is required to prevent an 774 * interrupt from unbusying or freeing a page prior to our busy 775 * check, leaving us on the wrong queue or checking the wrong 776 * page. 777 */ 778 crit_enter(); 779 rescan0: 780 vpfailed = NULL; 781 maxscan = vmstats.v_inactive_count; 782 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 783 m != NULL && maxscan-- > 0 && inactive_shortage > 0; 784 m = next 785 ) { 786 mycpu->gd_cnt.v_pdpages++; 787 788 /* 789 * Give interrupts a chance 790 */ 791 crit_exit(); 792 crit_enter(); 793 794 /* 795 * It's easier for some of the conditions below to just loop 796 * and catch queue changes here rather then check everywhere 797 * else. 798 */ 799 if (m->queue != PQ_INACTIVE) 800 goto rescan0; 801 next = TAILQ_NEXT(m, pageq); 802 803 /* 804 * skip marker pages 805 */ 806 if (m->flags & PG_MARKER) 807 continue; 808 809 /* 810 * A held page may be undergoing I/O, so skip it. 811 */ 812 if (m->hold_count) { 813 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 814 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 815 ++vm_swapcache_inactive_heuristic; 816 continue; 817 } 818 819 /* 820 * Dont mess with busy pages, keep in the front of the 821 * queue, most likely are being paged out. 822 */ 823 if (m->busy || (m->flags & PG_BUSY)) { 824 continue; 825 } 826 827 if (m->object->ref_count == 0) { 828 /* 829 * If the object is not being used, we ignore previous 830 * references. 831 */ 832 vm_page_flag_clear(m, PG_REFERENCED); 833 pmap_clear_reference(m); 834 835 } else if (((m->flags & PG_REFERENCED) == 0) && 836 (actcount = pmap_ts_referenced(m))) { 837 /* 838 * Otherwise, if the page has been referenced while 839 * in the inactive queue, we bump the "activation 840 * count" upwards, making it less likely that the 841 * page will be added back to the inactive queue 842 * prematurely again. Here we check the page tables 843 * (or emulated bits, if any), given the upper level 844 * VM system not knowing anything about existing 845 * references. 846 */ 847 vm_page_activate(m); 848 m->act_count += (actcount + ACT_ADVANCE); 849 continue; 850 } 851 852 /* 853 * If the upper level VM system knows about any page 854 * references, we activate the page. We also set the 855 * "activation count" higher than normal so that we will less 856 * likely place pages back onto the inactive queue again. 857 */ 858 if ((m->flags & PG_REFERENCED) != 0) { 859 vm_page_flag_clear(m, PG_REFERENCED); 860 actcount = pmap_ts_referenced(m); 861 vm_page_activate(m); 862 m->act_count += (actcount + ACT_ADVANCE + 1); 863 continue; 864 } 865 866 /* 867 * If the upper level VM system doesn't know anything about 868 * the page being dirty, we have to check for it again. As 869 * far as the VM code knows, any partially dirty pages are 870 * fully dirty. 871 * 872 * Pages marked PG_WRITEABLE may be mapped into the user 873 * address space of a process running on another cpu. A 874 * user process (without holding the MP lock) running on 875 * another cpu may be able to touch the page while we are 876 * trying to remove it. vm_page_cache() will handle this 877 * case for us. 878 */ 879 if (m->dirty == 0) { 880 vm_page_test_dirty(m); 881 } else { 882 vm_page_dirty(m); 883 } 884 885 if (m->valid == 0) { 886 /* 887 * Invalid pages can be easily freed 888 */ 889 vm_pageout_page_free(m); 890 mycpu->gd_cnt.v_dfree++; 891 --inactive_shortage; 892 } else if (m->dirty == 0) { 893 /* 894 * Clean pages can be placed onto the cache queue. 895 * This effectively frees them. 896 */ 897 vm_page_busy(m); 898 vm_page_cache(m); 899 --inactive_shortage; 900 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 901 /* 902 * Dirty pages need to be paged out, but flushing 903 * a page is extremely expensive verses freeing 904 * a clean page. Rather then artificially limiting 905 * the number of pages we can flush, we instead give 906 * dirty pages extra priority on the inactive queue 907 * by forcing them to be cycled through the queue 908 * twice before being flushed, after which the 909 * (now clean) page will cycle through once more 910 * before being freed. This significantly extends 911 * the thrash point for a heavily loaded machine. 912 */ 913 vm_page_flag_set(m, PG_WINATCFLS); 914 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 915 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 916 ++vm_swapcache_inactive_heuristic; 917 } else if (maxlaunder > 0) { 918 /* 919 * We always want to try to flush some dirty pages if 920 * we encounter them, to keep the system stable. 921 * Normally this number is small, but under extreme 922 * pressure where there are insufficient clean pages 923 * on the inactive queue, we may have to go all out. 924 */ 925 int swap_pageouts_ok; 926 struct vnode *vp = NULL; 927 928 object = m->object; 929 930 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 931 swap_pageouts_ok = 1; 932 } else { 933 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 934 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 935 vm_page_count_min(0)); 936 937 } 938 939 /* 940 * We don't bother paging objects that are "dead". 941 * Those objects are in a "rundown" state. 942 */ 943 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 944 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 945 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 946 ++vm_swapcache_inactive_heuristic; 947 continue; 948 } 949 950 /* 951 * The object is already known NOT to be dead. It 952 * is possible for the vget() to block the whole 953 * pageout daemon, but the new low-memory handling 954 * code should prevent it. 955 * 956 * The previous code skipped locked vnodes and, worse, 957 * reordered pages in the queue. This results in 958 * completely non-deterministic operation because, 959 * quite often, a vm_fault has initiated an I/O and 960 * is holding a locked vnode at just the point where 961 * the pageout daemon is woken up. 962 * 963 * We can't wait forever for the vnode lock, we might 964 * deadlock due to a vn_read() getting stuck in 965 * vm_wait while holding this vnode. We skip the 966 * vnode if we can't get it in a reasonable amount 967 * of time. 968 * 969 * vpfailed is used to (try to) avoid the case where 970 * a large number of pages are associated with a 971 * locked vnode, which could cause the pageout daemon 972 * to stall for an excessive amount of time. 973 */ 974 if (object->type == OBJT_VNODE) { 975 int flags; 976 977 vp = object->handle; 978 flags = LK_EXCLUSIVE | LK_NOOBJ; 979 if (vp == vpfailed) 980 flags |= LK_NOWAIT; 981 else 982 flags |= LK_TIMELOCK; 983 if (vget(vp, flags) != 0) { 984 vpfailed = vp; 985 ++pageout_lock_miss; 986 if (object->flags & OBJ_MIGHTBEDIRTY) 987 vnodes_skipped++; 988 continue; 989 } 990 991 /* 992 * The page might have been moved to another 993 * queue during potential blocking in vget() 994 * above. The page might have been freed and 995 * reused for another vnode. The object might 996 * have been reused for another vnode. 997 */ 998 if (m->queue != PQ_INACTIVE || 999 m->object != object || 1000 object->handle != vp) { 1001 if (object->flags & OBJ_MIGHTBEDIRTY) 1002 vnodes_skipped++; 1003 vput(vp); 1004 continue; 1005 } 1006 1007 /* 1008 * The page may have been busied during the 1009 * blocking in vput(); We don't move the 1010 * page back onto the end of the queue so that 1011 * statistics are more correct if we don't. 1012 */ 1013 if (m->busy || (m->flags & PG_BUSY)) { 1014 vput(vp); 1015 continue; 1016 } 1017 1018 /* 1019 * If the page has become held it might 1020 * be undergoing I/O, so skip it 1021 */ 1022 if (m->hold_count) { 1023 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1024 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1025 ++vm_swapcache_inactive_heuristic; 1026 if (object->flags & OBJ_MIGHTBEDIRTY) 1027 vnodes_skipped++; 1028 vput(vp); 1029 continue; 1030 } 1031 } 1032 1033 /* 1034 * If a page is dirty, then it is either being washed 1035 * (but not yet cleaned) or it is still in the 1036 * laundry. If it is still in the laundry, then we 1037 * start the cleaning operation. 1038 * 1039 * This operation may cluster, invalidating the 'next' 1040 * pointer. To prevent an inordinate number of 1041 * restarts we use our marker to remember our place. 1042 * 1043 * decrement inactive_shortage on success to account 1044 * for the (future) cleaned page. Otherwise we 1045 * could wind up laundering or cleaning too many 1046 * pages. 1047 */ 1048 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 1049 if (vm_pageout_clean(m) != 0) { 1050 --inactive_shortage; 1051 --maxlaunder; 1052 } 1053 next = TAILQ_NEXT(&marker, pageq); 1054 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 1055 if (vp != NULL) 1056 vput(vp); 1057 } 1058 } 1059 1060 /* 1061 * We want to move pages from the active queue to the inactive 1062 * queue to get the inactive queue to the inactive target. If 1063 * we still have a page shortage from above we try to directly free 1064 * clean pages instead of moving them. 1065 * 1066 * If we do still have a shortage we keep track of the number of 1067 * pages we free or cache (recycle_count) as a measure of thrashing 1068 * between the active and inactive queues. 1069 * 1070 * If we were able to completely satisfy the free+cache targets 1071 * from the inactive pool we limit the number of pages we move 1072 * from the active pool to the inactive pool to 2x the pages we 1073 * had removed from the inactive pool (with a minimum of 1/5 the 1074 * inactive target). If we were not able to completely satisfy 1075 * the free+cache targets we go for the whole target aggressively. 1076 * 1077 * NOTE: Both variables can end up negative. 1078 * NOTE: We are still in a critical section. 1079 */ 1080 active_shortage = vmstats.v_inactive_target - vmstats.v_inactive_count; 1081 if (inactive_original_shortage < vmstats.v_inactive_target / 10) 1082 inactive_original_shortage = vmstats.v_inactive_target / 10; 1083 if (inactive_shortage <= 0 && 1084 active_shortage > inactive_original_shortage * 2) { 1085 active_shortage = inactive_original_shortage * 2; 1086 } 1087 1088 pcount = vmstats.v_active_count; 1089 recycle_count = 0; 1090 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1091 1092 while ((m != NULL) && (pcount-- > 0) && 1093 (inactive_shortage > 0 || active_shortage > 0) 1094 ) { 1095 /* 1096 * Give interrupts a chance. 1097 */ 1098 crit_exit(); 1099 crit_enter(); 1100 1101 /* 1102 * If the page was ripped out from under us, just stop. 1103 */ 1104 if (m->queue != PQ_ACTIVE) 1105 break; 1106 next = TAILQ_NEXT(m, pageq); 1107 1108 /* 1109 * Don't deactivate pages that are busy. 1110 */ 1111 if ((m->busy != 0) || 1112 (m->flags & PG_BUSY) || 1113 (m->hold_count != 0)) { 1114 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1115 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1116 m = next; 1117 continue; 1118 } 1119 1120 /* 1121 * The count for pagedaemon pages is done after checking the 1122 * page for eligibility... 1123 */ 1124 mycpu->gd_cnt.v_pdpages++; 1125 1126 /* 1127 * Check to see "how much" the page has been used and clear 1128 * the tracking access bits. If the object has no references 1129 * don't bother paying the expense. 1130 */ 1131 actcount = 0; 1132 if (m->object->ref_count != 0) { 1133 if (m->flags & PG_REFERENCED) 1134 ++actcount; 1135 actcount += pmap_ts_referenced(m); 1136 if (actcount) { 1137 m->act_count += ACT_ADVANCE + actcount; 1138 if (m->act_count > ACT_MAX) 1139 m->act_count = ACT_MAX; 1140 } 1141 } 1142 vm_page_flag_clear(m, PG_REFERENCED); 1143 1144 /* 1145 * actcount is only valid if the object ref_count is non-zero. 1146 */ 1147 if (actcount && m->object->ref_count != 0) { 1148 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1149 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1150 } else { 1151 m->act_count -= min(m->act_count, ACT_DECLINE); 1152 if (vm_pageout_algorithm || 1153 m->object->ref_count == 0 || 1154 m->act_count < pass + 1 1155 ) { 1156 /* 1157 * Deactivate the page. If we had a 1158 * shortage from our inactive scan try to 1159 * free (cache) the page instead. 1160 * 1161 * Don't just blindly cache the page if 1162 * we do not have a shortage from the 1163 * inactive scan, that could lead to 1164 * gigabytes being moved. 1165 */ 1166 --active_shortage; 1167 if (inactive_shortage > 0 || 1168 m->object->ref_count == 0) { 1169 if (inactive_shortage > 0) 1170 ++recycle_count; 1171 vm_page_busy(m); 1172 vm_page_protect(m, VM_PROT_NONE); 1173 if (m->dirty == 0 && 1174 inactive_shortage > 0) { 1175 --inactive_shortage; 1176 vm_page_cache(m); 1177 } else { 1178 vm_page_deactivate(m); 1179 vm_page_wakeup(m); 1180 } 1181 } else { 1182 vm_page_deactivate(m); 1183 } 1184 } else { 1185 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1186 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1187 } 1188 } 1189 m = next; 1190 } 1191 1192 /* 1193 * The number of actually free pages can drop down to v_free_reserved, 1194 * we try to build the free count back above v_free_min. Note that 1195 * vm_paging_needed() also returns TRUE if v_free_count is not at 1196 * least v_free_min so that is the minimum we must build the free 1197 * count to. 1198 * 1199 * We use a slightly higher target to improve hysteresis, 1200 * ((v_free_target + v_free_min) / 2). Since v_free_target 1201 * is usually the same as v_cache_min this maintains about 1202 * half the pages in the free queue as are in the cache queue, 1203 * providing pretty good pipelining for pageout operation. 1204 * 1205 * The system operator can manipulate vm.v_cache_min and 1206 * vm.v_free_target to tune the pageout demon. Be sure 1207 * to keep vm.v_free_min < vm.v_free_target. 1208 * 1209 * Note that the original paging target is to get at least 1210 * (free_min + cache_min) into (free + cache). The slightly 1211 * higher target will shift additional pages from cache to free 1212 * without effecting the original paging target in order to 1213 * maintain better hysteresis and not have the free count always 1214 * be dead-on v_free_min. 1215 * 1216 * NOTE: we are still in a critical section. 1217 * 1218 * Pages moved from PQ_CACHE to totally free are not counted in the 1219 * pages_freed counter. 1220 */ 1221 while (vmstats.v_free_count < 1222 (vmstats.v_free_min + vmstats.v_free_target) / 2) { 1223 /* 1224 * 1225 */ 1226 static int cache_rover = 0; 1227 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1228 if (m == NULL) 1229 break; 1230 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1231 m->busy || 1232 m->hold_count || 1233 m->wire_count) { 1234 #ifdef INVARIANTS 1235 kprintf("Warning: busy page %p found in cache\n", m); 1236 #endif 1237 vm_page_deactivate(m); 1238 continue; 1239 } 1240 KKASSERT((m->flags & PG_MAPPED) == 0); 1241 KKASSERT(m->dirty == 0); 1242 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1243 vm_pageout_page_free(m); 1244 mycpu->gd_cnt.v_dfree++; 1245 } 1246 1247 crit_exit(); 1248 1249 #if !defined(NO_SWAPPING) 1250 /* 1251 * Idle process swapout -- run once per second. 1252 */ 1253 if (vm_swap_idle_enabled) { 1254 static long lsec; 1255 if (time_second != lsec) { 1256 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1257 vm_req_vmdaemon(); 1258 lsec = time_second; 1259 } 1260 } 1261 #endif 1262 1263 /* 1264 * If we didn't get enough free pages, and we have skipped a vnode 1265 * in a writeable object, wakeup the sync daemon. And kick swapout 1266 * if we did not get enough free pages. 1267 */ 1268 if (vm_paging_target() > 0) { 1269 if (vnodes_skipped && vm_page_count_min(0)) 1270 speedup_syncer(); 1271 #if !defined(NO_SWAPPING) 1272 if (vm_swap_enabled && vm_page_count_target()) { 1273 vm_req_vmdaemon(); 1274 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1275 } 1276 #endif 1277 } 1278 1279 /* 1280 * Handle catastrophic conditions. Under good conditions we should 1281 * be at the target, well beyond our minimum. If we could not even 1282 * reach our minimum the system is under heavy stress. 1283 * 1284 * Determine whether we have run out of memory. This occurs when 1285 * swap_pager_full is TRUE and the only pages left in the page 1286 * queues are dirty. We will still likely have page shortages. 1287 * 1288 * - swap_pager_full is set if insufficient swap was 1289 * available to satisfy a requested pageout. 1290 * 1291 * - the inactive queue is bloated (4 x size of active queue), 1292 * meaning it is unable to get rid of dirty pages and. 1293 * 1294 * - vm_page_count_min() without counting pages recycled from the 1295 * active queue (recycle_count) means we could not recover 1296 * enough pages to meet bare minimum needs. This test only 1297 * works if the inactive queue is bloated. 1298 * 1299 * - due to a positive inactive_shortage we shifted the remaining 1300 * dirty pages from the active queue to the inactive queue 1301 * trying to find clean ones to free. 1302 */ 1303 if (swap_pager_full && vm_page_count_min(recycle_count)) 1304 kprintf("Warning: system low on memory+swap!\n"); 1305 if (swap_pager_full && vm_page_count_min(recycle_count) && 1306 vmstats.v_inactive_count > vmstats.v_active_count * 4 && 1307 inactive_shortage > 0) { 1308 /* 1309 * Kill something. 1310 */ 1311 info.bigproc = NULL; 1312 info.bigsize = 0; 1313 allproc_scan(vm_pageout_scan_callback, &info); 1314 if (info.bigproc != NULL) { 1315 killproc(info.bigproc, "out of swap space"); 1316 info.bigproc->p_nice = PRIO_MIN; 1317 info.bigproc->p_usched->resetpriority( 1318 FIRST_LWP_IN_PROC(info.bigproc)); 1319 wakeup(&vmstats.v_free_count); 1320 PRELE(info.bigproc); 1321 } 1322 } 1323 return(inactive_shortage); 1324 } 1325 1326 /* 1327 * The caller must hold vm_token and proc_token. 1328 */ 1329 static int 1330 vm_pageout_scan_callback(struct proc *p, void *data) 1331 { 1332 struct vm_pageout_scan_info *info = data; 1333 vm_offset_t size; 1334 1335 /* 1336 * Never kill system processes or init. If we have configured swap 1337 * then try to avoid killing low-numbered pids. 1338 */ 1339 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1340 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1341 return (0); 1342 } 1343 1344 /* 1345 * if the process is in a non-running type state, 1346 * don't touch it. 1347 */ 1348 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) 1349 return (0); 1350 1351 /* 1352 * Get the approximate process size. Note that anonymous pages 1353 * with backing swap will be counted twice, but there should not 1354 * be too many such pages due to the stress the VM system is 1355 * under at this point. 1356 */ 1357 size = vmspace_anonymous_count(p->p_vmspace) + 1358 vmspace_swap_count(p->p_vmspace); 1359 1360 /* 1361 * If the this process is bigger than the biggest one 1362 * remember it. 1363 */ 1364 if (info->bigsize < size) { 1365 if (info->bigproc) 1366 PRELE(info->bigproc); 1367 PHOLD(p); 1368 info->bigproc = p; 1369 info->bigsize = size; 1370 } 1371 return(0); 1372 } 1373 1374 /* 1375 * This routine tries to maintain the pseudo LRU active queue, 1376 * so that during long periods of time where there is no paging, 1377 * that some statistic accumulation still occurs. This code 1378 * helps the situation where paging just starts to occur. 1379 * 1380 * The caller must hold vm_token. 1381 */ 1382 static void 1383 vm_pageout_page_stats(void) 1384 { 1385 vm_page_t m,next; 1386 int pcount,tpcount; /* Number of pages to check */ 1387 static int fullintervalcount = 0; 1388 int page_shortage; 1389 1390 page_shortage = 1391 (vmstats.v_inactive_target + vmstats.v_cache_max + vmstats.v_free_min) - 1392 (vmstats.v_free_count + vmstats.v_inactive_count + vmstats.v_cache_count); 1393 1394 if (page_shortage <= 0) 1395 return; 1396 1397 crit_enter(); 1398 1399 pcount = vmstats.v_active_count; 1400 fullintervalcount += vm_pageout_stats_interval; 1401 if (fullintervalcount < vm_pageout_full_stats_interval) { 1402 tpcount = (vm_pageout_stats_max * vmstats.v_active_count) / vmstats.v_page_count; 1403 if (pcount > tpcount) 1404 pcount = tpcount; 1405 } else { 1406 fullintervalcount = 0; 1407 } 1408 1409 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1410 while ((m != NULL) && (pcount-- > 0)) { 1411 int actcount; 1412 1413 if (m->queue != PQ_ACTIVE) { 1414 break; 1415 } 1416 1417 next = TAILQ_NEXT(m, pageq); 1418 /* 1419 * Don't deactivate pages that are busy. 1420 */ 1421 if ((m->busy != 0) || 1422 (m->flags & PG_BUSY) || 1423 (m->hold_count != 0)) { 1424 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1425 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1426 m = next; 1427 continue; 1428 } 1429 1430 actcount = 0; 1431 if (m->flags & PG_REFERENCED) { 1432 vm_page_flag_clear(m, PG_REFERENCED); 1433 actcount += 1; 1434 } 1435 1436 actcount += pmap_ts_referenced(m); 1437 if (actcount) { 1438 m->act_count += ACT_ADVANCE + actcount; 1439 if (m->act_count > ACT_MAX) 1440 m->act_count = ACT_MAX; 1441 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1442 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1443 } else { 1444 if (m->act_count == 0) { 1445 /* 1446 * We turn off page access, so that we have 1447 * more accurate RSS stats. We don't do this 1448 * in the normal page deactivation when the 1449 * system is loaded VM wise, because the 1450 * cost of the large number of page protect 1451 * operations would be higher than the value 1452 * of doing the operation. 1453 */ 1454 vm_page_busy(m); 1455 vm_page_protect(m, VM_PROT_NONE); 1456 vm_page_deactivate(m); 1457 vm_page_wakeup(m); 1458 } else { 1459 m->act_count -= min(m->act_count, ACT_DECLINE); 1460 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1461 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1462 } 1463 } 1464 1465 m = next; 1466 } 1467 crit_exit(); 1468 } 1469 1470 /* 1471 * The caller must hold vm_token. 1472 */ 1473 static int 1474 vm_pageout_free_page_calc(vm_size_t count) 1475 { 1476 if (count < vmstats.v_page_count) 1477 return 0; 1478 /* 1479 * free_reserved needs to include enough for the largest swap pager 1480 * structures plus enough for any pv_entry structs when paging. 1481 */ 1482 if (vmstats.v_page_count > 1024) 1483 vmstats.v_free_min = 4 + (vmstats.v_page_count - 1024) / 200; 1484 else 1485 vmstats.v_free_min = 4; 1486 vmstats.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1487 vmstats.v_interrupt_free_min; 1488 vmstats.v_free_reserved = vm_pageout_page_count + 1489 vmstats.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1490 vmstats.v_free_severe = vmstats.v_free_min / 2; 1491 vmstats.v_free_min += vmstats.v_free_reserved; 1492 vmstats.v_free_severe += vmstats.v_free_reserved; 1493 return 1; 1494 } 1495 1496 1497 /* 1498 * vm_pageout is the high level pageout daemon. 1499 * 1500 * No requirements. 1501 */ 1502 static void 1503 vm_pageout_thread(void) 1504 { 1505 int pass; 1506 int inactive_shortage; 1507 1508 /* 1509 * Permanently hold vm_token. 1510 */ 1511 lwkt_gettoken(&vm_token); 1512 1513 /* 1514 * Initialize some paging parameters. 1515 */ 1516 curthread->td_flags |= TDF_SYSTHREAD; 1517 1518 vmstats.v_interrupt_free_min = 2; 1519 if (vmstats.v_page_count < 2000) 1520 vm_pageout_page_count = 8; 1521 1522 vm_pageout_free_page_calc(vmstats.v_page_count); 1523 1524 /* 1525 * v_free_target and v_cache_min control pageout hysteresis. Note 1526 * that these are more a measure of the VM cache queue hysteresis 1527 * then the VM free queue. Specifically, v_free_target is the 1528 * high water mark (free+cache pages). 1529 * 1530 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1531 * low water mark, while v_free_min is the stop. v_cache_min must 1532 * be big enough to handle memory needs while the pageout daemon 1533 * is signalled and run to free more pages. 1534 */ 1535 if (vmstats.v_free_count > 6144) 1536 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved; 1537 else 1538 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved; 1539 1540 /* 1541 * NOTE: With the new buffer cache b_act_count we want the default 1542 * inactive target to be a percentage of available memory. 1543 * 1544 * The inactive target essentially determines the minimum 1545 * number of 'temporary' pages capable of caching one-time-use 1546 * files when the VM system is otherwise full of pages 1547 * belonging to multi-time-use files or active program data. 1548 * 1549 * NOTE: The inactive target is aggressively persued only if the 1550 * inactive queue becomes too small. If the inactive queue 1551 * is large enough to satisfy page movement to free+cache 1552 * then it is repopulated more slowly from the active queue. 1553 * This allows a general inactive_target default to be set. 1554 * 1555 * There is an issue here for processes which sit mostly idle 1556 * 'overnight', such as sshd, tcsh, and X. Any movement from 1557 * the active queue will eventually cause such pages to 1558 * recycle eventually causing a lot of paging in the morning. 1559 * To reduce the incidence of this pages cycled out of the 1560 * buffer cache are moved directly to the inactive queue if 1561 * they were only used once or twice. 1562 * 1563 * The vfs.vm_cycle_point sysctl can be used to adjust this. 1564 * Increasing the value (up to 64) increases the number of 1565 * buffer recyclements which go directly to the inactive queue. 1566 */ 1567 if (vmstats.v_free_count > 2048) { 1568 vmstats.v_cache_min = vmstats.v_free_target; 1569 vmstats.v_cache_max = 2 * vmstats.v_cache_min; 1570 } else { 1571 vmstats.v_cache_min = 0; 1572 vmstats.v_cache_max = 0; 1573 } 1574 vmstats.v_inactive_target = vmstats.v_free_count / 4; 1575 1576 /* XXX does not really belong here */ 1577 if (vm_page_max_wired == 0) 1578 vm_page_max_wired = vmstats.v_free_count / 3; 1579 1580 if (vm_pageout_stats_max == 0) 1581 vm_pageout_stats_max = vmstats.v_free_target; 1582 1583 /* 1584 * Set interval in seconds for stats scan. 1585 */ 1586 if (vm_pageout_stats_interval == 0) 1587 vm_pageout_stats_interval = 5; 1588 if (vm_pageout_full_stats_interval == 0) 1589 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1590 1591 1592 /* 1593 * Set maximum free per pass 1594 */ 1595 if (vm_pageout_stats_free_max == 0) 1596 vm_pageout_stats_free_max = 5; 1597 1598 swap_pager_swap_init(); 1599 pass = 0; 1600 1601 /* 1602 * The pageout daemon is never done, so loop forever. 1603 */ 1604 while (TRUE) { 1605 int error; 1606 1607 /* 1608 * Wait for an action request. If we timeout check to 1609 * see if paging is needed (in case the normal wakeup 1610 * code raced us). 1611 */ 1612 if (vm_pages_needed == 0) { 1613 error = tsleep(&vm_pages_needed, 1614 0, "psleep", 1615 vm_pageout_stats_interval * hz); 1616 if (error && 1617 vm_paging_needed() == 0 && 1618 vm_pages_needed == 0) { 1619 vm_pageout_page_stats(); 1620 continue; 1621 } 1622 vm_pages_needed = 1; 1623 } 1624 1625 mycpu->gd_cnt.v_pdwakeups++; 1626 1627 /* 1628 * Scan for pageout. Try to avoid thrashing the system 1629 * with activity. 1630 */ 1631 inactive_shortage = vm_pageout_scan(pass); 1632 if (inactive_shortage > 0) { 1633 ++pass; 1634 if (swap_pager_full) { 1635 /* 1636 * Running out of memory, catastrophic back-off 1637 * to one-second intervals. 1638 */ 1639 tsleep(&vm_pages_needed, 0, "pdelay", hz); 1640 } else if (pass < 10 && vm_pages_needed > 1) { 1641 /* 1642 * Normal operation, additional processes 1643 * have already kicked us. Retry immediately. 1644 */ 1645 } else if (pass < 10) { 1646 /* 1647 * Normal operation, fewer processes. Delay 1648 * a bit but allow wakeups. 1649 */ 1650 vm_pages_needed = 0; 1651 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10); 1652 vm_pages_needed = 1; 1653 } else { 1654 /* 1655 * We've taken too many passes, forced delay. 1656 */ 1657 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10); 1658 } 1659 } else { 1660 /* 1661 * Interlocked wakeup of waiters (non-optional) 1662 */ 1663 pass = 0; 1664 if (vm_pages_needed && !vm_page_count_min(0)) { 1665 wakeup(&vmstats.v_free_count); 1666 vm_pages_needed = 0; 1667 } 1668 } 1669 } 1670 } 1671 1672 static struct kproc_desc page_kp = { 1673 "pagedaemon", 1674 vm_pageout_thread, 1675 &pagethread 1676 }; 1677 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 1678 1679 1680 /* 1681 * Called after allocating a page out of the cache or free queue 1682 * to possibly wake the pagedaemon up to replentish our supply. 1683 * 1684 * We try to generate some hysteresis by waking the pagedaemon up 1685 * when our free+cache pages go below the free_min+cache_min level. 1686 * The pagedaemon tries to get the count back up to at least the 1687 * minimum, and through to the target level if possible. 1688 * 1689 * If the pagedaemon is already active bump vm_pages_needed as a hint 1690 * that there are even more requests pending. 1691 * 1692 * SMP races ok? 1693 * No requirements. 1694 */ 1695 void 1696 pagedaemon_wakeup(void) 1697 { 1698 if (vm_paging_needed() && curthread != pagethread) { 1699 if (vm_pages_needed == 0) { 1700 vm_pages_needed = 1; /* SMP race ok */ 1701 wakeup(&vm_pages_needed); 1702 } else if (vm_page_count_min(0)) { 1703 ++vm_pages_needed; /* SMP race ok */ 1704 } 1705 } 1706 } 1707 1708 #if !defined(NO_SWAPPING) 1709 1710 /* 1711 * SMP races ok? 1712 * No requirements. 1713 */ 1714 static void 1715 vm_req_vmdaemon(void) 1716 { 1717 static int lastrun = 0; 1718 1719 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1720 wakeup(&vm_daemon_needed); 1721 lastrun = ticks; 1722 } 1723 } 1724 1725 static int vm_daemon_callback(struct proc *p, void *data __unused); 1726 1727 /* 1728 * No requirements. 1729 */ 1730 static void 1731 vm_daemon(void) 1732 { 1733 /* 1734 * Permanently hold vm_token. 1735 */ 1736 lwkt_gettoken(&vm_token); 1737 1738 while (TRUE) { 1739 tsleep(&vm_daemon_needed, 0, "psleep", 0); 1740 if (vm_pageout_req_swapout) { 1741 swapout_procs(vm_pageout_req_swapout); 1742 vm_pageout_req_swapout = 0; 1743 } 1744 /* 1745 * scan the processes for exceeding their rlimits or if 1746 * process is swapped out -- deactivate pages 1747 */ 1748 allproc_scan(vm_daemon_callback, NULL); 1749 } 1750 } 1751 1752 /* 1753 * Caller must hold vm_token and proc_token. 1754 */ 1755 static int 1756 vm_daemon_callback(struct proc *p, void *data __unused) 1757 { 1758 vm_pindex_t limit, size; 1759 1760 /* 1761 * if this is a system process or if we have already 1762 * looked at this process, skip it. 1763 */ 1764 if (p->p_flag & (P_SYSTEM | P_WEXIT)) 1765 return (0); 1766 1767 /* 1768 * if the process is in a non-running type state, 1769 * don't touch it. 1770 */ 1771 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) 1772 return (0); 1773 1774 /* 1775 * get a limit 1776 */ 1777 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1778 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1779 1780 /* 1781 * let processes that are swapped out really be 1782 * swapped out. Set the limit to nothing to get as 1783 * many pages out to swap as possible. 1784 */ 1785 if (p->p_flag & P_SWAPPEDOUT) 1786 limit = 0; 1787 1788 size = vmspace_resident_count(p->p_vmspace); 1789 if (limit >= 0 && size >= limit) { 1790 vm_pageout_map_deactivate_pages( 1791 &p->p_vmspace->vm_map, limit); 1792 } 1793 return (0); 1794 } 1795 1796 #endif 1797