1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $ 69 * $DragonFly: src/sys/vm/vm_pageout.c,v 1.23 2006/05/25 07:36:37 dillon Exp $ 70 */ 71 72 /* 73 * The proverbial page-out daemon. 74 */ 75 76 #include "opt_vm.h" 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/kernel.h> 80 #include <sys/proc.h> 81 #include <sys/kthread.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/vnode.h> 85 #include <sys/vmmeter.h> 86 #include <sys/sysctl.h> 87 88 #include <vm/vm.h> 89 #include <vm/vm_param.h> 90 #include <sys/lock.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/vm_pager.h> 96 #include <vm/swap_pager.h> 97 #include <vm/vm_extern.h> 98 99 #include <sys/thread2.h> 100 #include <vm/vm_page2.h> 101 102 /* 103 * System initialization 104 */ 105 106 /* the kernel process "vm_pageout"*/ 107 static void vm_pageout (void); 108 static int vm_pageout_clean (vm_page_t); 109 static void vm_pageout_scan (int pass); 110 static int vm_pageout_free_page_calc (vm_size_t count); 111 struct thread *pagethread; 112 113 static struct kproc_desc page_kp = { 114 "pagedaemon", 115 vm_pageout, 116 &pagethread 117 }; 118 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 119 120 #if !defined(NO_SWAPPING) 121 /* the kernel process "vm_daemon"*/ 122 static void vm_daemon (void); 123 static struct thread *vmthread; 124 125 static struct kproc_desc vm_kp = { 126 "vmdaemon", 127 vm_daemon, 128 &vmthread 129 }; 130 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 131 #endif 132 133 134 int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 135 int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 136 int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 137 138 #if !defined(NO_SWAPPING) 139 static int vm_pageout_req_swapout; /* XXX */ 140 static int vm_daemon_needed; 141 #endif 142 extern int vm_swap_size; 143 static int vm_max_launder = 32; 144 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 145 static int vm_pageout_full_stats_interval = 0; 146 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 147 static int defer_swap_pageouts=0; 148 static int disable_swap_pageouts=0; 149 150 #if defined(NO_SWAPPING) 151 static int vm_swap_enabled=0; 152 static int vm_swap_idle_enabled=0; 153 #else 154 static int vm_swap_enabled=1; 155 static int vm_swap_idle_enabled=0; 156 #endif 157 158 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 159 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 160 161 SYSCTL_INT(_vm, OID_AUTO, max_launder, 162 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 163 164 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 165 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 166 167 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 168 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 169 170 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 171 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 172 173 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 174 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 175 176 #if defined(NO_SWAPPING) 177 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 178 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 179 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 180 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 181 #else 182 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 183 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 184 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 185 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 186 #endif 187 188 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 189 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 190 191 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 192 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 193 194 static int pageout_lock_miss; 195 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 196 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 197 198 int vm_load; 199 SYSCTL_INT(_vm, OID_AUTO, vm_load, 200 CTLFLAG_RD, &vm_load, 0, "load on the VM system"); 201 int vm_load_enable = 1; 202 SYSCTL_INT(_vm, OID_AUTO, vm_load_enable, 203 CTLFLAG_RW, &vm_load_enable, 0, "enable vm_load rate limiting"); 204 #ifdef INVARIANTS 205 int vm_load_debug; 206 SYSCTL_INT(_vm, OID_AUTO, vm_load_debug, 207 CTLFLAG_RW, &vm_load_debug, 0, "debug vm_load"); 208 #endif 209 210 #define VM_PAGEOUT_PAGE_COUNT 16 211 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 212 213 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 214 215 #if !defined(NO_SWAPPING) 216 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int); 217 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t); 218 static freeer_fcn_t vm_pageout_object_deactivate_pages; 219 static void vm_req_vmdaemon (void); 220 #endif 221 static void vm_pageout_page_stats(void); 222 223 /* 224 * Update 225 */ 226 void 227 vm_fault_ratecheck(void) 228 { 229 if (vm_pages_needed) { 230 if (vm_load < 1000) 231 ++vm_load; 232 } else { 233 if (vm_load > 0) 234 --vm_load; 235 } 236 } 237 238 /* 239 * vm_pageout_clean: 240 * 241 * Clean the page and remove it from the laundry. The page must not be 242 * busy on-call. 243 * 244 * We set the busy bit to cause potential page faults on this page to 245 * block. Note the careful timing, however, the busy bit isn't set till 246 * late and we cannot do anything that will mess with the page. 247 */ 248 249 static int 250 vm_pageout_clean(vm_page_t m) 251 { 252 vm_object_t object; 253 vm_page_t mc[2*vm_pageout_page_count]; 254 int pageout_count; 255 int ib, is, page_base; 256 vm_pindex_t pindex = m->pindex; 257 258 object = m->object; 259 260 /* 261 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 262 * with the new swapper, but we could have serious problems paging 263 * out other object types if there is insufficient memory. 264 * 265 * Unfortunately, checking free memory here is far too late, so the 266 * check has been moved up a procedural level. 267 */ 268 269 /* 270 * Don't mess with the page if it's busy, held, or special 271 */ 272 if ((m->hold_count != 0) || 273 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 274 return 0; 275 } 276 277 mc[vm_pageout_page_count] = m; 278 pageout_count = 1; 279 page_base = vm_pageout_page_count; 280 ib = 1; 281 is = 1; 282 283 /* 284 * Scan object for clusterable pages. 285 * 286 * We can cluster ONLY if: ->> the page is NOT 287 * clean, wired, busy, held, or mapped into a 288 * buffer, and one of the following: 289 * 1) The page is inactive, or a seldom used 290 * active page. 291 * -or- 292 * 2) we force the issue. 293 * 294 * During heavy mmap/modification loads the pageout 295 * daemon can really fragment the underlying file 296 * due to flushing pages out of order and not trying 297 * align the clusters (which leave sporatic out-of-order 298 * holes). To solve this problem we do the reverse scan 299 * first and attempt to align our cluster, then do a 300 * forward scan if room remains. 301 */ 302 303 more: 304 while (ib && pageout_count < vm_pageout_page_count) { 305 vm_page_t p; 306 307 if (ib > pindex) { 308 ib = 0; 309 break; 310 } 311 312 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 313 ib = 0; 314 break; 315 } 316 if (((p->queue - p->pc) == PQ_CACHE) || 317 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 318 ib = 0; 319 break; 320 } 321 vm_page_test_dirty(p); 322 if ((p->dirty & p->valid) == 0 || 323 p->queue != PQ_INACTIVE || 324 p->wire_count != 0 || /* may be held by buf cache */ 325 p->hold_count != 0) { /* may be undergoing I/O */ 326 ib = 0; 327 break; 328 } 329 mc[--page_base] = p; 330 ++pageout_count; 331 ++ib; 332 /* 333 * alignment boundry, stop here and switch directions. Do 334 * not clear ib. 335 */ 336 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 337 break; 338 } 339 340 while (pageout_count < vm_pageout_page_count && 341 pindex + is < object->size) { 342 vm_page_t p; 343 344 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 345 break; 346 if (((p->queue - p->pc) == PQ_CACHE) || 347 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 348 break; 349 } 350 vm_page_test_dirty(p); 351 if ((p->dirty & p->valid) == 0 || 352 p->queue != PQ_INACTIVE || 353 p->wire_count != 0 || /* may be held by buf cache */ 354 p->hold_count != 0) { /* may be undergoing I/O */ 355 break; 356 } 357 mc[page_base + pageout_count] = p; 358 ++pageout_count; 359 ++is; 360 } 361 362 /* 363 * If we exhausted our forward scan, continue with the reverse scan 364 * when possible, even past a page boundry. This catches boundry 365 * conditions. 366 */ 367 if (ib && pageout_count < vm_pageout_page_count) 368 goto more; 369 370 /* 371 * we allow reads during pageouts... 372 */ 373 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 374 } 375 376 /* 377 * vm_pageout_flush() - launder the given pages 378 * 379 * The given pages are laundered. Note that we setup for the start of 380 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 381 * reference count all in here rather then in the parent. If we want 382 * the parent to do more sophisticated things we may have to change 383 * the ordering. 384 */ 385 386 int 387 vm_pageout_flush(vm_page_t *mc, int count, int flags) 388 { 389 vm_object_t object; 390 int pageout_status[count]; 391 int numpagedout = 0; 392 int i; 393 394 /* 395 * Initiate I/O. Bump the vm_page_t->busy counter and 396 * mark the pages read-only. 397 * 398 * We do not have to fixup the clean/dirty bits here... we can 399 * allow the pager to do it after the I/O completes. 400 */ 401 402 for (i = 0; i < count; i++) { 403 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 404 vm_page_io_start(mc[i]); 405 vm_page_protect(mc[i], VM_PROT_READ); 406 } 407 408 object = mc[0]->object; 409 vm_object_pip_add(object, count); 410 411 vm_pager_put_pages(object, mc, count, 412 (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 413 pageout_status); 414 415 for (i = 0; i < count; i++) { 416 vm_page_t mt = mc[i]; 417 418 switch (pageout_status[i]) { 419 case VM_PAGER_OK: 420 numpagedout++; 421 break; 422 case VM_PAGER_PEND: 423 numpagedout++; 424 break; 425 case VM_PAGER_BAD: 426 /* 427 * Page outside of range of object. Right now we 428 * essentially lose the changes by pretending it 429 * worked. 430 */ 431 pmap_clear_modify(mt); 432 vm_page_undirty(mt); 433 break; 434 case VM_PAGER_ERROR: 435 case VM_PAGER_FAIL: 436 /* 437 * If page couldn't be paged out, then reactivate the 438 * page so it doesn't clog the inactive list. (We 439 * will try paging out it again later). 440 */ 441 vm_page_activate(mt); 442 break; 443 case VM_PAGER_AGAIN: 444 break; 445 } 446 447 /* 448 * If the operation is still going, leave the page busy to 449 * block all other accesses. Also, leave the paging in 450 * progress indicator set so that we don't attempt an object 451 * collapse. 452 */ 453 if (pageout_status[i] != VM_PAGER_PEND) { 454 vm_object_pip_wakeup(object); 455 vm_page_io_finish(mt); 456 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 457 vm_page_protect(mt, VM_PROT_READ); 458 } 459 } 460 return numpagedout; 461 } 462 463 #if !defined(NO_SWAPPING) 464 /* 465 * vm_pageout_object_deactivate_pages 466 * 467 * deactivate enough pages to satisfy the inactive target 468 * requirements or if vm_page_proc_limit is set, then 469 * deactivate all of the pages in the object and its 470 * backing_objects. 471 * 472 * The object and map must be locked. 473 */ 474 static void 475 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object, 476 vm_pindex_t desired, int map_remove_only) 477 { 478 vm_page_t p, next; 479 int rcount; 480 int remove_mode; 481 482 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 483 return; 484 485 while (object) { 486 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 487 return; 488 if (object->paging_in_progress) 489 return; 490 491 remove_mode = map_remove_only; 492 if (object->shadow_count > 1) 493 remove_mode = 1; 494 495 /* 496 * scan the objects entire memory queue. spl protection is 497 * required to avoid an interrupt unbusy/free race against 498 * our busy check. 499 */ 500 crit_enter(); 501 rcount = object->resident_page_count; 502 p = TAILQ_FIRST(&object->memq); 503 504 while (p && (rcount-- > 0)) { 505 int actcount; 506 if (pmap_resident_count(vm_map_pmap(map)) <= desired) { 507 crit_exit(); 508 return; 509 } 510 next = TAILQ_NEXT(p, listq); 511 mycpu->gd_cnt.v_pdpages++; 512 if (p->wire_count != 0 || 513 p->hold_count != 0 || 514 p->busy != 0 || 515 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 516 !pmap_page_exists_quick(vm_map_pmap(map), p)) { 517 p = next; 518 continue; 519 } 520 521 actcount = pmap_ts_referenced(p); 522 if (actcount) { 523 vm_page_flag_set(p, PG_REFERENCED); 524 } else if (p->flags & PG_REFERENCED) { 525 actcount = 1; 526 } 527 528 if ((p->queue != PQ_ACTIVE) && 529 (p->flags & PG_REFERENCED)) { 530 vm_page_activate(p); 531 p->act_count += actcount; 532 vm_page_flag_clear(p, PG_REFERENCED); 533 } else if (p->queue == PQ_ACTIVE) { 534 if ((p->flags & PG_REFERENCED) == 0) { 535 p->act_count -= min(p->act_count, ACT_DECLINE); 536 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 537 vm_page_protect(p, VM_PROT_NONE); 538 vm_page_deactivate(p); 539 } else { 540 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 541 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 542 } 543 } else { 544 vm_page_activate(p); 545 vm_page_flag_clear(p, PG_REFERENCED); 546 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 547 p->act_count += ACT_ADVANCE; 548 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 549 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 550 } 551 } else if (p->queue == PQ_INACTIVE) { 552 vm_page_protect(p, VM_PROT_NONE); 553 } 554 p = next; 555 } 556 crit_exit(); 557 object = object->backing_object; 558 } 559 } 560 561 /* 562 * deactivate some number of pages in a map, try to do it fairly, but 563 * that is really hard to do. 564 */ 565 static void 566 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired) 567 { 568 vm_map_entry_t tmpe; 569 vm_object_t obj, bigobj; 570 int nothingwired; 571 572 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) { 573 return; 574 } 575 576 bigobj = NULL; 577 nothingwired = TRUE; 578 579 /* 580 * first, search out the biggest object, and try to free pages from 581 * that. 582 */ 583 tmpe = map->header.next; 584 while (tmpe != &map->header) { 585 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 586 obj = tmpe->object.vm_object; 587 if ((obj != NULL) && (obj->shadow_count <= 1) && 588 ((bigobj == NULL) || 589 (bigobj->resident_page_count < obj->resident_page_count))) { 590 bigobj = obj; 591 } 592 } 593 if (tmpe->wired_count > 0) 594 nothingwired = FALSE; 595 tmpe = tmpe->next; 596 } 597 598 if (bigobj) 599 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 600 601 /* 602 * Next, hunt around for other pages to deactivate. We actually 603 * do this search sort of wrong -- .text first is not the best idea. 604 */ 605 tmpe = map->header.next; 606 while (tmpe != &map->header) { 607 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 608 break; 609 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 610 obj = tmpe->object.vm_object; 611 if (obj) 612 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 613 } 614 tmpe = tmpe->next; 615 }; 616 617 /* 618 * Remove all mappings if a process is swapped out, this will free page 619 * table pages. 620 */ 621 if (desired == 0 && nothingwired) 622 pmap_remove(vm_map_pmap(map), 623 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 624 vm_map_unlock(map); 625 } 626 #endif 627 628 /* 629 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 630 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 631 * which we know can be trivially freed. 632 */ 633 634 void 635 vm_pageout_page_free(vm_page_t m) { 636 vm_object_t object = m->object; 637 int type = object->type; 638 639 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 640 vm_object_reference(object); 641 vm_page_busy(m); 642 vm_page_protect(m, VM_PROT_NONE); 643 vm_page_free(m); 644 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 645 vm_object_deallocate(object); 646 } 647 648 /* 649 * vm_pageout_scan does the dirty work for the pageout daemon. 650 */ 651 652 struct vm_pageout_scan_info { 653 struct proc *bigproc; 654 vm_offset_t bigsize; 655 }; 656 657 static int vm_pageout_scan_callback(struct proc *p, void *data); 658 659 static void 660 vm_pageout_scan(int pass) 661 { 662 struct vm_pageout_scan_info info; 663 vm_page_t m, next; 664 struct vm_page marker; 665 int page_shortage, maxscan, pcount; 666 int addl_page_shortage, addl_page_shortage_init; 667 vm_object_t object; 668 int actcount; 669 int vnodes_skipped = 0; 670 int maxlaunder; 671 672 /* 673 * Do whatever cleanup that the pmap code can. 674 */ 675 pmap_collect(); 676 677 addl_page_shortage_init = vm_pageout_deficit; 678 vm_pageout_deficit = 0; 679 680 /* 681 * Calculate the number of pages we want to either free or move 682 * to the cache. 683 */ 684 page_shortage = vm_paging_target() + addl_page_shortage_init; 685 686 /* 687 * Initialize our marker 688 */ 689 bzero(&marker, sizeof(marker)); 690 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 691 marker.queue = PQ_INACTIVE; 692 marker.wire_count = 1; 693 694 /* 695 * Start scanning the inactive queue for pages we can move to the 696 * cache or free. The scan will stop when the target is reached or 697 * we have scanned the entire inactive queue. Note that m->act_count 698 * is not used to form decisions for the inactive queue, only for the 699 * active queue. 700 * 701 * maxlaunder limits the number of dirty pages we flush per scan. 702 * For most systems a smaller value (16 or 32) is more robust under 703 * extreme memory and disk pressure because any unnecessary writes 704 * to disk can result in extreme performance degredation. However, 705 * systems with excessive dirty pages (especially when MAP_NOSYNC is 706 * used) will die horribly with limited laundering. If the pageout 707 * daemon cannot clean enough pages in the first pass, we let it go 708 * all out in succeeding passes. 709 */ 710 if ((maxlaunder = vm_max_launder) <= 1) 711 maxlaunder = 1; 712 if (pass) 713 maxlaunder = 10000; 714 715 /* 716 * We will generally be in a critical section throughout the 717 * scan, but we can release it temporarily when we are sitting on a 718 * non-busy page without fear. this is required to prevent an 719 * interrupt from unbusying or freeing a page prior to our busy 720 * check, leaving us on the wrong queue or checking the wrong 721 * page. 722 */ 723 crit_enter(); 724 rescan0: 725 addl_page_shortage = addl_page_shortage_init; 726 maxscan = vmstats.v_inactive_count; 727 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 728 m != NULL && maxscan-- > 0 && page_shortage > 0; 729 m = next 730 ) { 731 mycpu->gd_cnt.v_pdpages++; 732 733 /* 734 * Give interrupts a chance 735 */ 736 crit_exit(); 737 crit_enter(); 738 739 /* 740 * It's easier for some of the conditions below to just loop 741 * and catch queue changes here rather then check everywhere 742 * else. 743 */ 744 if (m->queue != PQ_INACTIVE) 745 goto rescan0; 746 next = TAILQ_NEXT(m, pageq); 747 748 /* 749 * skip marker pages 750 */ 751 if (m->flags & PG_MARKER) 752 continue; 753 754 /* 755 * A held page may be undergoing I/O, so skip it. 756 */ 757 if (m->hold_count) { 758 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 759 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 760 addl_page_shortage++; 761 continue; 762 } 763 764 /* 765 * Dont mess with busy pages, keep in the front of the 766 * queue, most likely are being paged out. 767 */ 768 if (m->busy || (m->flags & PG_BUSY)) { 769 addl_page_shortage++; 770 continue; 771 } 772 773 if (m->object->ref_count == 0) { 774 /* 775 * If the object is not being used, we ignore previous 776 * references. 777 */ 778 vm_page_flag_clear(m, PG_REFERENCED); 779 pmap_clear_reference(m); 780 781 } else if (((m->flags & PG_REFERENCED) == 0) && 782 (actcount = pmap_ts_referenced(m))) { 783 /* 784 * Otherwise, if the page has been referenced while 785 * in the inactive queue, we bump the "activation 786 * count" upwards, making it less likely that the 787 * page will be added back to the inactive queue 788 * prematurely again. Here we check the page tables 789 * (or emulated bits, if any), given the upper level 790 * VM system not knowing anything about existing 791 * references. 792 */ 793 vm_page_activate(m); 794 m->act_count += (actcount + ACT_ADVANCE); 795 continue; 796 } 797 798 /* 799 * If the upper level VM system knows about any page 800 * references, we activate the page. We also set the 801 * "activation count" higher than normal so that we will less 802 * likely place pages back onto the inactive queue again. 803 */ 804 if ((m->flags & PG_REFERENCED) != 0) { 805 vm_page_flag_clear(m, PG_REFERENCED); 806 actcount = pmap_ts_referenced(m); 807 vm_page_activate(m); 808 m->act_count += (actcount + ACT_ADVANCE + 1); 809 continue; 810 } 811 812 /* 813 * If the upper level VM system doesn't know anything about 814 * the page being dirty, we have to check for it again. As 815 * far as the VM code knows, any partially dirty pages are 816 * fully dirty. 817 * 818 * Pages marked PG_WRITEABLE may be mapped into the user 819 * address space of a process running on another cpu. A 820 * user process (without holding the MP lock) running on 821 * another cpu may be able to touch the page while we are 822 * trying to remove it. To prevent this from occuring we 823 * must call pmap_remove_all() or otherwise make the page 824 * read-only. If the race occured pmap_remove_all() is 825 * responsible for setting m->dirty. 826 */ 827 if (m->dirty == 0) { 828 vm_page_test_dirty(m); 829 #if 0 830 if (m->dirty == 0 && (m->flags & PG_WRITEABLE) != 0) 831 pmap_remove_all(m); 832 #endif 833 } else { 834 vm_page_dirty(m); 835 } 836 837 if (m->valid == 0) { 838 /* 839 * Invalid pages can be easily freed 840 */ 841 vm_pageout_page_free(m); 842 mycpu->gd_cnt.v_dfree++; 843 --page_shortage; 844 } else if (m->dirty == 0) { 845 /* 846 * Clean pages can be placed onto the cache queue. 847 * This effectively frees them. 848 */ 849 vm_page_cache(m); 850 --page_shortage; 851 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 852 /* 853 * Dirty pages need to be paged out, but flushing 854 * a page is extremely expensive verses freeing 855 * a clean page. Rather then artificially limiting 856 * the number of pages we can flush, we instead give 857 * dirty pages extra priority on the inactive queue 858 * by forcing them to be cycled through the queue 859 * twice before being flushed, after which the 860 * (now clean) page will cycle through once more 861 * before being freed. This significantly extends 862 * the thrash point for a heavily loaded machine. 863 */ 864 vm_page_flag_set(m, PG_WINATCFLS); 865 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 866 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 867 } else if (maxlaunder > 0) { 868 /* 869 * We always want to try to flush some dirty pages if 870 * we encounter them, to keep the system stable. 871 * Normally this number is small, but under extreme 872 * pressure where there are insufficient clean pages 873 * on the inactive queue, we may have to go all out. 874 */ 875 int swap_pageouts_ok; 876 struct vnode *vp = NULL; 877 878 object = m->object; 879 880 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 881 swap_pageouts_ok = 1; 882 } else { 883 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 884 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 885 vm_page_count_min()); 886 887 } 888 889 /* 890 * We don't bother paging objects that are "dead". 891 * Those objects are in a "rundown" state. 892 */ 893 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 894 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 895 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 896 continue; 897 } 898 899 /* 900 * The object is already known NOT to be dead. It 901 * is possible for the vget() to block the whole 902 * pageout daemon, but the new low-memory handling 903 * code should prevent it. 904 * 905 * The previous code skipped locked vnodes and, worse, 906 * reordered pages in the queue. This results in 907 * completely non-deterministic operation because, 908 * quite often, a vm_fault has initiated an I/O and 909 * is holding a locked vnode at just the point where 910 * the pageout daemon is woken up. 911 * 912 * We can't wait forever for the vnode lock, we might 913 * deadlock due to a vn_read() getting stuck in 914 * vm_wait while holding this vnode. We skip the 915 * vnode if we can't get it in a reasonable amount 916 * of time. 917 */ 918 919 if (object->type == OBJT_VNODE) { 920 vp = object->handle; 921 922 if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ|LK_TIMELOCK)) { 923 ++pageout_lock_miss; 924 if (object->flags & OBJ_MIGHTBEDIRTY) 925 vnodes_skipped++; 926 continue; 927 } 928 929 /* 930 * The page might have been moved to another 931 * queue during potential blocking in vget() 932 * above. The page might have been freed and 933 * reused for another vnode. The object might 934 * have been reused for another vnode. 935 */ 936 if (m->queue != PQ_INACTIVE || 937 m->object != object || 938 object->handle != vp) { 939 if (object->flags & OBJ_MIGHTBEDIRTY) 940 vnodes_skipped++; 941 vput(vp); 942 continue; 943 } 944 945 /* 946 * The page may have been busied during the 947 * blocking in vput(); We don't move the 948 * page back onto the end of the queue so that 949 * statistics are more correct if we don't. 950 */ 951 if (m->busy || (m->flags & PG_BUSY)) { 952 vput(vp); 953 continue; 954 } 955 956 /* 957 * If the page has become held it might 958 * be undergoing I/O, so skip it 959 */ 960 if (m->hold_count) { 961 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 962 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 963 if (object->flags & OBJ_MIGHTBEDIRTY) 964 vnodes_skipped++; 965 vput(vp); 966 continue; 967 } 968 } 969 970 /* 971 * If a page is dirty, then it is either being washed 972 * (but not yet cleaned) or it is still in the 973 * laundry. If it is still in the laundry, then we 974 * start the cleaning operation. 975 * 976 * This operation may cluster, invalidating the 'next' 977 * pointer. To prevent an inordinate number of 978 * restarts we use our marker to remember our place. 979 * 980 * decrement page_shortage on success to account for 981 * the (future) cleaned page. Otherwise we could wind 982 * up laundering or cleaning too many pages. 983 */ 984 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 985 if (vm_pageout_clean(m) != 0) { 986 --page_shortage; 987 --maxlaunder; 988 } 989 next = TAILQ_NEXT(&marker, pageq); 990 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 991 if (vp != NULL) 992 vput(vp); 993 } 994 } 995 996 /* 997 * Compute the number of pages we want to try to move from the 998 * active queue to the inactive queue. 999 */ 1000 page_shortage = vm_paging_target() + 1001 vmstats.v_inactive_target - vmstats.v_inactive_count; 1002 page_shortage += addl_page_shortage; 1003 1004 /* 1005 * Scan the active queue for things we can deactivate. We nominally 1006 * track the per-page activity counter and use it to locate 1007 * deactivation candidates. 1008 * 1009 * NOTE: we are still in a critical section. 1010 */ 1011 pcount = vmstats.v_active_count; 1012 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1013 1014 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1015 /* 1016 * Give interrupts a chance. 1017 */ 1018 crit_exit(); 1019 crit_enter(); 1020 1021 /* 1022 * If the page was ripped out from under us, just stop. 1023 */ 1024 if (m->queue != PQ_ACTIVE) 1025 break; 1026 next = TAILQ_NEXT(m, pageq); 1027 1028 /* 1029 * Don't deactivate pages that are busy. 1030 */ 1031 if ((m->busy != 0) || 1032 (m->flags & PG_BUSY) || 1033 (m->hold_count != 0)) { 1034 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1035 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1036 m = next; 1037 continue; 1038 } 1039 1040 /* 1041 * The count for pagedaemon pages is done after checking the 1042 * page for eligibility... 1043 */ 1044 mycpu->gd_cnt.v_pdpages++; 1045 1046 /* 1047 * Check to see "how much" the page has been used. 1048 */ 1049 actcount = 0; 1050 if (m->object->ref_count != 0) { 1051 if (m->flags & PG_REFERENCED) { 1052 actcount += 1; 1053 } 1054 actcount += pmap_ts_referenced(m); 1055 if (actcount) { 1056 m->act_count += ACT_ADVANCE + actcount; 1057 if (m->act_count > ACT_MAX) 1058 m->act_count = ACT_MAX; 1059 } 1060 } 1061 1062 /* 1063 * Since we have "tested" this bit, we need to clear it now. 1064 */ 1065 vm_page_flag_clear(m, PG_REFERENCED); 1066 1067 /* 1068 * Only if an object is currently being used, do we use the 1069 * page activation count stats. 1070 */ 1071 if (actcount && (m->object->ref_count != 0)) { 1072 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1073 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1074 } else { 1075 m->act_count -= min(m->act_count, ACT_DECLINE); 1076 if (vm_pageout_algorithm || 1077 m->object->ref_count == 0 || 1078 m->act_count < pass) { 1079 page_shortage--; 1080 if (m->object->ref_count == 0) { 1081 vm_page_protect(m, VM_PROT_NONE); 1082 if (m->dirty == 0) 1083 vm_page_cache(m); 1084 else 1085 vm_page_deactivate(m); 1086 } else { 1087 vm_page_deactivate(m); 1088 } 1089 } else { 1090 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1091 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1092 } 1093 } 1094 m = next; 1095 } 1096 1097 /* 1098 * We try to maintain some *really* free pages, this allows interrupt 1099 * code to be guaranteed space. Since both cache and free queues 1100 * are considered basically 'free', moving pages from cache to free 1101 * does not effect other calculations. 1102 * 1103 * NOTE: we are still in a critical section. 1104 */ 1105 1106 while (vmstats.v_free_count < vmstats.v_free_reserved) { 1107 static int cache_rover = 0; 1108 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1109 if (!m) 1110 break; 1111 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1112 m->busy || 1113 m->hold_count || 1114 m->wire_count) { 1115 #ifdef INVARIANTS 1116 printf("Warning: busy page %p found in cache\n", m); 1117 #endif 1118 vm_page_deactivate(m); 1119 continue; 1120 } 1121 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1122 vm_pageout_page_free(m); 1123 mycpu->gd_cnt.v_dfree++; 1124 } 1125 1126 crit_exit(); 1127 1128 #if !defined(NO_SWAPPING) 1129 /* 1130 * Idle process swapout -- run once per second. 1131 */ 1132 if (vm_swap_idle_enabled) { 1133 static long lsec; 1134 if (time_second != lsec) { 1135 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1136 vm_req_vmdaemon(); 1137 lsec = time_second; 1138 } 1139 } 1140 #endif 1141 1142 /* 1143 * If we didn't get enough free pages, and we have skipped a vnode 1144 * in a writeable object, wakeup the sync daemon. And kick swapout 1145 * if we did not get enough free pages. 1146 */ 1147 if (vm_paging_target() > 0) { 1148 if (vnodes_skipped && vm_page_count_min()) 1149 speedup_syncer(); 1150 #if !defined(NO_SWAPPING) 1151 if (vm_swap_enabled && vm_page_count_target()) { 1152 vm_req_vmdaemon(); 1153 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1154 } 1155 #endif 1156 } 1157 1158 /* 1159 * If we are out of swap and were not able to reach our paging 1160 * target, kill the largest process. 1161 */ 1162 if ((vm_swap_size < 64 && vm_page_count_min()) || 1163 (swap_pager_full && vm_paging_target() > 0)) { 1164 #if 0 1165 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1166 #endif 1167 info.bigproc = NULL; 1168 info.bigsize = 0; 1169 allproc_scan(vm_pageout_scan_callback, &info); 1170 if (info.bigproc != NULL) { 1171 killproc(info.bigproc, "out of swap space"); 1172 info.bigproc->p_nice = PRIO_MIN; 1173 info.bigproc->p_usched->resetpriority(&info.bigproc->p_lwp); 1174 wakeup(&vmstats.v_free_count); 1175 PRELE(info.bigproc); 1176 } 1177 } 1178 } 1179 1180 static int 1181 vm_pageout_scan_callback(struct proc *p, void *data) 1182 { 1183 struct vm_pageout_scan_info *info = data; 1184 vm_offset_t size; 1185 1186 /* 1187 * if this is a system process, skip it 1188 */ 1189 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1190 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1191 return (0); 1192 } 1193 1194 /* 1195 * if the process is in a non-running type state, 1196 * don't touch it. 1197 */ 1198 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1199 return (0); 1200 } 1201 1202 /* 1203 * get the process size 1204 */ 1205 size = vmspace_resident_count(p->p_vmspace) + 1206 vmspace_swap_count(p->p_vmspace); 1207 1208 /* 1209 * If the this process is bigger than the biggest one 1210 * remember it. 1211 */ 1212 if (size > info->bigsize) { 1213 if (info->bigproc) 1214 PRELE(info->bigproc); 1215 PHOLD(p); 1216 info->bigproc = p; 1217 info->bigsize = size; 1218 } 1219 return(0); 1220 } 1221 1222 /* 1223 * This routine tries to maintain the pseudo LRU active queue, 1224 * so that during long periods of time where there is no paging, 1225 * that some statistic accumulation still occurs. This code 1226 * helps the situation where paging just starts to occur. 1227 */ 1228 static void 1229 vm_pageout_page_stats(void) 1230 { 1231 vm_page_t m,next; 1232 int pcount,tpcount; /* Number of pages to check */ 1233 static int fullintervalcount = 0; 1234 int page_shortage; 1235 1236 page_shortage = 1237 (vmstats.v_inactive_target + vmstats.v_cache_max + vmstats.v_free_min) - 1238 (vmstats.v_free_count + vmstats.v_inactive_count + vmstats.v_cache_count); 1239 1240 if (page_shortage <= 0) 1241 return; 1242 1243 crit_enter(); 1244 1245 pcount = vmstats.v_active_count; 1246 fullintervalcount += vm_pageout_stats_interval; 1247 if (fullintervalcount < vm_pageout_full_stats_interval) { 1248 tpcount = (vm_pageout_stats_max * vmstats.v_active_count) / vmstats.v_page_count; 1249 if (pcount > tpcount) 1250 pcount = tpcount; 1251 } else { 1252 fullintervalcount = 0; 1253 } 1254 1255 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1256 while ((m != NULL) && (pcount-- > 0)) { 1257 int actcount; 1258 1259 if (m->queue != PQ_ACTIVE) { 1260 break; 1261 } 1262 1263 next = TAILQ_NEXT(m, pageq); 1264 /* 1265 * Don't deactivate pages that are busy. 1266 */ 1267 if ((m->busy != 0) || 1268 (m->flags & PG_BUSY) || 1269 (m->hold_count != 0)) { 1270 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1271 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1272 m = next; 1273 continue; 1274 } 1275 1276 actcount = 0; 1277 if (m->flags & PG_REFERENCED) { 1278 vm_page_flag_clear(m, PG_REFERENCED); 1279 actcount += 1; 1280 } 1281 1282 actcount += pmap_ts_referenced(m); 1283 if (actcount) { 1284 m->act_count += ACT_ADVANCE + actcount; 1285 if (m->act_count > ACT_MAX) 1286 m->act_count = ACT_MAX; 1287 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1288 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1289 } else { 1290 if (m->act_count == 0) { 1291 /* 1292 * We turn off page access, so that we have 1293 * more accurate RSS stats. We don't do this 1294 * in the normal page deactivation when the 1295 * system is loaded VM wise, because the 1296 * cost of the large number of page protect 1297 * operations would be higher than the value 1298 * of doing the operation. 1299 */ 1300 vm_page_protect(m, VM_PROT_NONE); 1301 vm_page_deactivate(m); 1302 } else { 1303 m->act_count -= min(m->act_count, ACT_DECLINE); 1304 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1305 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1306 } 1307 } 1308 1309 m = next; 1310 } 1311 crit_exit(); 1312 } 1313 1314 static int 1315 vm_pageout_free_page_calc(vm_size_t count) 1316 { 1317 if (count < vmstats.v_page_count) 1318 return 0; 1319 /* 1320 * free_reserved needs to include enough for the largest swap pager 1321 * structures plus enough for any pv_entry structs when paging. 1322 */ 1323 if (vmstats.v_page_count > 1024) 1324 vmstats.v_free_min = 4 + (vmstats.v_page_count - 1024) / 200; 1325 else 1326 vmstats.v_free_min = 4; 1327 vmstats.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1328 vmstats.v_interrupt_free_min; 1329 vmstats.v_free_reserved = vm_pageout_page_count + 1330 vmstats.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1331 vmstats.v_free_severe = vmstats.v_free_min / 2; 1332 vmstats.v_free_min += vmstats.v_free_reserved; 1333 vmstats.v_free_severe += vmstats.v_free_reserved; 1334 return 1; 1335 } 1336 1337 1338 /* 1339 * vm_pageout is the high level pageout daemon. 1340 */ 1341 static void 1342 vm_pageout(void) 1343 { 1344 int pass; 1345 1346 /* 1347 * Initialize some paging parameters. 1348 */ 1349 1350 vmstats.v_interrupt_free_min = 2; 1351 if (vmstats.v_page_count < 2000) 1352 vm_pageout_page_count = 8; 1353 1354 vm_pageout_free_page_calc(vmstats.v_page_count); 1355 /* 1356 * v_free_target and v_cache_min control pageout hysteresis. Note 1357 * that these are more a measure of the VM cache queue hysteresis 1358 * then the VM free queue. Specifically, v_free_target is the 1359 * high water mark (free+cache pages). 1360 * 1361 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1362 * low water mark, while v_free_min is the stop. v_cache_min must 1363 * be big enough to handle memory needs while the pageout daemon 1364 * is signalled and run to free more pages. 1365 */ 1366 if (vmstats.v_free_count > 6144) 1367 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved; 1368 else 1369 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved; 1370 1371 if (vmstats.v_free_count > 2048) { 1372 vmstats.v_cache_min = vmstats.v_free_target; 1373 vmstats.v_cache_max = 2 * vmstats.v_cache_min; 1374 vmstats.v_inactive_target = (3 * vmstats.v_free_target) / 2; 1375 } else { 1376 vmstats.v_cache_min = 0; 1377 vmstats.v_cache_max = 0; 1378 vmstats.v_inactive_target = vmstats.v_free_count / 4; 1379 } 1380 if (vmstats.v_inactive_target > vmstats.v_free_count / 3) 1381 vmstats.v_inactive_target = vmstats.v_free_count / 3; 1382 1383 /* XXX does not really belong here */ 1384 if (vm_page_max_wired == 0) 1385 vm_page_max_wired = vmstats.v_free_count / 3; 1386 1387 if (vm_pageout_stats_max == 0) 1388 vm_pageout_stats_max = vmstats.v_free_target; 1389 1390 /* 1391 * Set interval in seconds for stats scan. 1392 */ 1393 if (vm_pageout_stats_interval == 0) 1394 vm_pageout_stats_interval = 5; 1395 if (vm_pageout_full_stats_interval == 0) 1396 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1397 1398 1399 /* 1400 * Set maximum free per pass 1401 */ 1402 if (vm_pageout_stats_free_max == 0) 1403 vm_pageout_stats_free_max = 5; 1404 1405 swap_pager_swap_init(); 1406 pass = 0; 1407 /* 1408 * The pageout daemon is never done, so loop forever. 1409 */ 1410 while (TRUE) { 1411 int error; 1412 1413 /* 1414 * If we have enough free memory, wakeup waiters. Do 1415 * not clear vm_pages_needed until we reach our target, 1416 * otherwise we may be woken up over and over again and 1417 * waste a lot of cpu. 1418 */ 1419 crit_enter(); 1420 if (vm_pages_needed && !vm_page_count_min()) { 1421 if (vm_paging_needed() <= 0) 1422 vm_pages_needed = 0; 1423 wakeup(&vmstats.v_free_count); 1424 } 1425 if (vm_pages_needed) { 1426 /* 1427 * Still not done, take a second pass without waiting 1428 * (unlimited dirty cleaning), otherwise sleep a bit 1429 * and try again. 1430 */ 1431 ++pass; 1432 if (pass > 1) 1433 tsleep(&vm_pages_needed, 0, "psleep", hz/2); 1434 } else { 1435 /* 1436 * Good enough, sleep & handle stats. Prime the pass 1437 * for the next run. 1438 */ 1439 if (pass > 1) 1440 pass = 1; 1441 else 1442 pass = 0; 1443 error = tsleep(&vm_pages_needed, 1444 0, "psleep", vm_pageout_stats_interval * hz); 1445 if (error && !vm_pages_needed) { 1446 crit_exit(); 1447 pass = 0; 1448 vm_pageout_page_stats(); 1449 continue; 1450 } 1451 } 1452 1453 if (vm_pages_needed) 1454 mycpu->gd_cnt.v_pdwakeups++; 1455 crit_exit(); 1456 vm_pageout_scan(pass); 1457 vm_pageout_deficit = 0; 1458 } 1459 } 1460 1461 void 1462 pagedaemon_wakeup(void) 1463 { 1464 if (!vm_pages_needed && curthread != pagethread) { 1465 vm_pages_needed++; 1466 wakeup(&vm_pages_needed); 1467 } 1468 } 1469 1470 #if !defined(NO_SWAPPING) 1471 static void 1472 vm_req_vmdaemon(void) 1473 { 1474 static int lastrun = 0; 1475 1476 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1477 wakeup(&vm_daemon_needed); 1478 lastrun = ticks; 1479 } 1480 } 1481 1482 static int vm_daemon_callback(struct proc *p, void *data __unused); 1483 1484 static void 1485 vm_daemon(void) 1486 { 1487 while (TRUE) { 1488 tsleep(&vm_daemon_needed, 0, "psleep", 0); 1489 if (vm_pageout_req_swapout) { 1490 swapout_procs(vm_pageout_req_swapout); 1491 vm_pageout_req_swapout = 0; 1492 } 1493 /* 1494 * scan the processes for exceeding their rlimits or if 1495 * process is swapped out -- deactivate pages 1496 */ 1497 allproc_scan(vm_daemon_callback, NULL); 1498 } 1499 } 1500 1501 static int 1502 vm_daemon_callback(struct proc *p, void *data __unused) 1503 { 1504 vm_pindex_t limit, size; 1505 1506 /* 1507 * if this is a system process or if we have already 1508 * looked at this process, skip it. 1509 */ 1510 if (p->p_flag & (P_SYSTEM | P_WEXIT)) 1511 return (0); 1512 1513 /* 1514 * if the process is in a non-running type state, 1515 * don't touch it. 1516 */ 1517 if (p->p_stat != SRUN && p->p_stat != SSLEEP) 1518 return (0); 1519 1520 /* 1521 * get a limit 1522 */ 1523 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1524 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1525 1526 /* 1527 * let processes that are swapped out really be 1528 * swapped out. Set the limit to nothing to get as 1529 * many pages out to swap as possible. 1530 */ 1531 if (p->p_flag & P_SWAPPEDOUT) 1532 limit = 0; 1533 1534 size = vmspace_resident_count(p->p_vmspace); 1535 if (limit >= 0 && size >= limit) { 1536 vm_pageout_map_deactivate_pages( 1537 &p->p_vmspace->vm_map, limit); 1538 } 1539 return (0); 1540 } 1541 1542 #endif 1543