1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $ 65 */ 66 67 /* 68 * The proverbial page-out daemon. 69 */ 70 71 #include "opt_vm.h" 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/kernel.h> 75 #include <sys/proc.h> 76 #include <sys/kthread.h> 77 #include <sys/resourcevar.h> 78 #include <sys/signalvar.h> 79 #include <sys/vnode.h> 80 #include <sys/vmmeter.h> 81 #include <sys/sysctl.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_param.h> 85 #include <sys/lock.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_pager.h> 91 #include <vm/swap_pager.h> 92 #include <vm/vm_extern.h> 93 94 #include <sys/thread2.h> 95 #include <sys/spinlock2.h> 96 #include <vm/vm_page2.h> 97 98 /* 99 * System initialization 100 */ 101 102 /* the kernel process "vm_pageout"*/ 103 static int vm_pageout_clean (vm_page_t); 104 static int vm_pageout_free_page_calc (vm_size_t count); 105 struct thread *pagethread; 106 107 #if !defined(NO_SWAPPING) 108 /* the kernel process "vm_daemon"*/ 109 static void vm_daemon (void); 110 static struct thread *vmthread; 111 112 static struct kproc_desc vm_kp = { 113 "vmdaemon", 114 vm_daemon, 115 &vmthread 116 }; 117 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 118 #endif 119 120 int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 121 int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 122 int vm_pageout_pages_needed=0; /* pageout daemon needs pages */ 123 int vm_page_free_hysteresis = 16; 124 125 #if !defined(NO_SWAPPING) 126 static int vm_pageout_req_swapout; /* XXX */ 127 static int vm_daemon_needed; 128 #endif 129 static int vm_max_launder = 4096; 130 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 131 static int vm_pageout_full_stats_interval = 0; 132 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 133 static int defer_swap_pageouts=0; 134 static int disable_swap_pageouts=0; 135 static u_int vm_anonmem_decline = ACT_DECLINE; 136 static u_int vm_filemem_decline = ACT_DECLINE * 2; 137 138 #if defined(NO_SWAPPING) 139 static int vm_swap_enabled=0; 140 static int vm_swap_idle_enabled=0; 141 #else 142 static int vm_swap_enabled=1; 143 static int vm_swap_idle_enabled=0; 144 #endif 145 146 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline, 147 CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory"); 148 149 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline, 150 CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache"); 151 152 SYSCTL_INT(_vm, OID_AUTO, page_free_hysteresis, 153 CTLFLAG_RW, &vm_page_free_hysteresis, 0, 154 "Free more pages than the minimum required"); 155 156 SYSCTL_INT(_vm, OID_AUTO, max_launder, 157 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 158 159 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 160 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 161 162 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 163 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 164 165 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 166 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 167 168 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 169 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 170 171 #if defined(NO_SWAPPING) 172 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 173 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 174 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 175 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 176 #else 177 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 178 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 179 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 180 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 181 #endif 182 183 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 184 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 185 186 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 187 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 188 189 static int pageout_lock_miss; 190 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 191 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 192 193 int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 194 195 #if !defined(NO_SWAPPING) 196 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int); 197 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t); 198 static freeer_fcn_t vm_pageout_object_deactivate_pages; 199 static void vm_req_vmdaemon (void); 200 #endif 201 static void vm_pageout_page_stats(int q); 202 203 static __inline int 204 PQAVERAGE(int n) 205 { 206 if (n >= 0) 207 return((n + (PQ_L2_SIZE - 1)) / PQ_L2_SIZE + 1); 208 else 209 return((n - (PQ_L2_SIZE - 1)) / PQ_L2_SIZE - 1); 210 } 211 212 /* 213 * vm_pageout_clean: 214 * 215 * Clean the page and remove it from the laundry. The page must not be 216 * busy on-call. 217 * 218 * We set the busy bit to cause potential page faults on this page to 219 * block. Note the careful timing, however, the busy bit isn't set till 220 * late and we cannot do anything that will mess with the page. 221 */ 222 static int 223 vm_pageout_clean(vm_page_t m) 224 { 225 vm_object_t object; 226 vm_page_t mc[BLIST_MAX_ALLOC]; 227 int error; 228 int ib, is, page_base; 229 vm_pindex_t pindex = m->pindex; 230 231 object = m->object; 232 233 /* 234 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 235 * with the new swapper, but we could have serious problems paging 236 * out other object types if there is insufficient memory. 237 * 238 * Unfortunately, checking free memory here is far too late, so the 239 * check has been moved up a procedural level. 240 */ 241 242 /* 243 * Don't mess with the page if it's busy, held, or special 244 * 245 * XXX do we really need to check hold_count here? hold_count 246 * isn't supposed to mess with vm_page ops except prevent the 247 * page from being reused. 248 */ 249 if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) { 250 vm_page_wakeup(m); 251 return 0; 252 } 253 254 /* 255 * Place page in cluster. Align cluster for optimal swap space 256 * allocation (whether it is swap or not). This is typically ~16-32 257 * pages, which also tends to align the cluster to multiples of the 258 * filesystem block size if backed by a filesystem. 259 */ 260 page_base = pindex % BLIST_MAX_ALLOC; 261 mc[page_base] = m; 262 ib = page_base - 1; 263 is = page_base + 1; 264 265 /* 266 * Scan object for clusterable pages. 267 * 268 * We can cluster ONLY if: ->> the page is NOT 269 * clean, wired, busy, held, or mapped into a 270 * buffer, and one of the following: 271 * 1) The page is inactive, or a seldom used 272 * active page. 273 * -or- 274 * 2) we force the issue. 275 * 276 * During heavy mmap/modification loads the pageout 277 * daemon can really fragment the underlying file 278 * due to flushing pages out of order and not trying 279 * align the clusters (which leave sporatic out-of-order 280 * holes). To solve this problem we do the reverse scan 281 * first and attempt to align our cluster, then do a 282 * forward scan if room remains. 283 */ 284 285 vm_object_hold(object); 286 while (ib >= 0) { 287 vm_page_t p; 288 289 p = vm_page_lookup_busy_try(object, pindex - page_base + ib, 290 TRUE, &error); 291 if (error || p == NULL) 292 break; 293 if ((p->queue - p->pc) == PQ_CACHE || 294 (p->flags & PG_UNMANAGED)) { 295 vm_page_wakeup(p); 296 break; 297 } 298 vm_page_test_dirty(p); 299 if (((p->dirty & p->valid) == 0 && 300 (p->flags & PG_NEED_COMMIT) == 0) || 301 p->queue - p->pc != PQ_INACTIVE || 302 p->wire_count != 0 || /* may be held by buf cache */ 303 p->hold_count != 0) { /* may be undergoing I/O */ 304 vm_page_wakeup(p); 305 break; 306 } 307 mc[ib] = p; 308 --ib; 309 } 310 ++ib; /* fixup */ 311 312 while (is < BLIST_MAX_ALLOC && 313 pindex - page_base + is < object->size) { 314 vm_page_t p; 315 316 p = vm_page_lookup_busy_try(object, pindex - page_base + is, 317 TRUE, &error); 318 if (error || p == NULL) 319 break; 320 if (((p->queue - p->pc) == PQ_CACHE) || 321 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 322 vm_page_wakeup(p); 323 break; 324 } 325 vm_page_test_dirty(p); 326 if (((p->dirty & p->valid) == 0 && 327 (p->flags & PG_NEED_COMMIT) == 0) || 328 p->queue - p->pc != PQ_INACTIVE || 329 p->wire_count != 0 || /* may be held by buf cache */ 330 p->hold_count != 0) { /* may be undergoing I/O */ 331 vm_page_wakeup(p); 332 break; 333 } 334 mc[is] = p; 335 ++is; 336 } 337 338 vm_object_drop(object); 339 340 /* 341 * we allow reads during pageouts... 342 */ 343 return vm_pageout_flush(&mc[ib], is - ib, 0); 344 } 345 346 /* 347 * vm_pageout_flush() - launder the given pages 348 * 349 * The given pages are laundered. Note that we setup for the start of 350 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 351 * reference count all in here rather then in the parent. If we want 352 * the parent to do more sophisticated things we may have to change 353 * the ordering. 354 * 355 * The pages in the array must be busied by the caller and will be 356 * unbusied by this function. 357 */ 358 int 359 vm_pageout_flush(vm_page_t *mc, int count, int flags) 360 { 361 vm_object_t object; 362 int pageout_status[count]; 363 int numpagedout = 0; 364 int i; 365 366 /* 367 * Initiate I/O. Bump the vm_page_t->busy counter. 368 */ 369 for (i = 0; i < count; i++) { 370 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 371 ("vm_pageout_flush page %p index %d/%d: partially " 372 "invalid page", mc[i], i, count)); 373 vm_page_io_start(mc[i]); 374 } 375 376 /* 377 * We must make the pages read-only. This will also force the 378 * modified bit in the related pmaps to be cleared. The pager 379 * cannot clear the bit for us since the I/O completion code 380 * typically runs from an interrupt. The act of making the page 381 * read-only handles the case for us. 382 * 383 * Then we can unbusy the pages, we still hold a reference by virtue 384 * of our soft-busy. 385 */ 386 for (i = 0; i < count; i++) { 387 vm_page_protect(mc[i], VM_PROT_READ); 388 vm_page_wakeup(mc[i]); 389 } 390 391 object = mc[0]->object; 392 vm_object_pip_add(object, count); 393 394 vm_pager_put_pages(object, mc, count, 395 (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 396 pageout_status); 397 398 for (i = 0; i < count; i++) { 399 vm_page_t mt = mc[i]; 400 401 switch (pageout_status[i]) { 402 case VM_PAGER_OK: 403 numpagedout++; 404 break; 405 case VM_PAGER_PEND: 406 numpagedout++; 407 break; 408 case VM_PAGER_BAD: 409 /* 410 * Page outside of range of object. Right now we 411 * essentially lose the changes by pretending it 412 * worked. 413 */ 414 vm_page_busy_wait(mt, FALSE, "pgbad"); 415 pmap_clear_modify(mt); 416 vm_page_undirty(mt); 417 vm_page_wakeup(mt); 418 break; 419 case VM_PAGER_ERROR: 420 case VM_PAGER_FAIL: 421 /* 422 * A page typically cannot be paged out when we 423 * have run out of swap. We leave the page 424 * marked inactive and will try to page it out 425 * again later. 426 * 427 * Starvation of the active page list is used to 428 * determine when the system is massively memory 429 * starved. 430 */ 431 break; 432 case VM_PAGER_AGAIN: 433 break; 434 } 435 436 /* 437 * If the operation is still going, leave the page busy to 438 * block all other accesses. Also, leave the paging in 439 * progress indicator set so that we don't attempt an object 440 * collapse. 441 * 442 * For any pages which have completed synchronously, 443 * deactivate the page if we are under a severe deficit. 444 * Do not try to enter them into the cache, though, they 445 * might still be read-heavy. 446 */ 447 if (pageout_status[i] != VM_PAGER_PEND) { 448 vm_page_busy_wait(mt, FALSE, "pgouw"); 449 if (vm_page_count_severe()) 450 vm_page_deactivate(mt); 451 #if 0 452 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 453 vm_page_protect(mt, VM_PROT_READ); 454 #endif 455 vm_page_io_finish(mt); 456 vm_page_wakeup(mt); 457 vm_object_pip_wakeup(object); 458 } 459 } 460 return numpagedout; 461 } 462 463 #if !defined(NO_SWAPPING) 464 /* 465 * deactivate enough pages to satisfy the inactive target 466 * requirements or if vm_page_proc_limit is set, then 467 * deactivate all of the pages in the object and its 468 * backing_objects. 469 * 470 * The map must be locked. 471 * The caller must hold the vm_object. 472 */ 473 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *); 474 475 static void 476 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object, 477 vm_pindex_t desired, int map_remove_only) 478 { 479 struct rb_vm_page_scan_info info; 480 vm_object_t lobject; 481 vm_object_t tobject; 482 int remove_mode; 483 484 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 485 lobject = object; 486 487 while (lobject) { 488 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 489 break; 490 if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS) 491 break; 492 if (lobject->paging_in_progress) 493 break; 494 495 remove_mode = map_remove_only; 496 if (lobject->shadow_count > 1) 497 remove_mode = 1; 498 499 /* 500 * scan the objects entire memory queue. We hold the 501 * object's token so the scan should not race anything. 502 */ 503 info.limit = remove_mode; 504 info.map = map; 505 info.desired = desired; 506 vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL, 507 vm_pageout_object_deactivate_pages_callback, 508 &info 509 ); 510 while ((tobject = lobject->backing_object) != NULL) { 511 KKASSERT(tobject != object); 512 vm_object_hold(tobject); 513 if (tobject == lobject->backing_object) 514 break; 515 vm_object_drop(tobject); 516 } 517 if (lobject != object) { 518 if (tobject) 519 vm_object_lock_swap(); 520 vm_object_drop(lobject); 521 /* leaves tobject locked & at top */ 522 } 523 lobject = tobject; 524 } 525 if (lobject != object) 526 vm_object_drop(lobject); /* NULL ok */ 527 } 528 529 /* 530 * The caller must hold the vm_object. 531 */ 532 static int 533 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data) 534 { 535 struct rb_vm_page_scan_info *info = data; 536 int actcount; 537 538 if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) { 539 return(-1); 540 } 541 mycpu->gd_cnt.v_pdpages++; 542 543 if (vm_page_busy_try(p, TRUE)) 544 return(0); 545 if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) { 546 vm_page_wakeup(p); 547 return(0); 548 } 549 if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) { 550 vm_page_wakeup(p); 551 return(0); 552 } 553 554 actcount = pmap_ts_referenced(p); 555 if (actcount) { 556 vm_page_flag_set(p, PG_REFERENCED); 557 } else if (p->flags & PG_REFERENCED) { 558 actcount = 1; 559 } 560 561 vm_page_and_queue_spin_lock(p); 562 if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) { 563 vm_page_and_queue_spin_unlock(p); 564 vm_page_activate(p); 565 p->act_count += actcount; 566 vm_page_flag_clear(p, PG_REFERENCED); 567 } else if (p->queue - p->pc == PQ_ACTIVE) { 568 if ((p->flags & PG_REFERENCED) == 0) { 569 p->act_count -= min(p->act_count, ACT_DECLINE); 570 if (!info->limit && 571 (vm_pageout_algorithm || (p->act_count == 0))) { 572 vm_page_and_queue_spin_unlock(p); 573 vm_page_protect(p, VM_PROT_NONE); 574 vm_page_deactivate(p); 575 } else { 576 TAILQ_REMOVE(&vm_page_queues[p->queue].pl, 577 p, pageq); 578 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl, 579 p, pageq); 580 vm_page_and_queue_spin_unlock(p); 581 } 582 } else { 583 vm_page_and_queue_spin_unlock(p); 584 vm_page_activate(p); 585 vm_page_flag_clear(p, PG_REFERENCED); 586 587 vm_page_and_queue_spin_lock(p); 588 if (p->queue - p->pc == PQ_ACTIVE) { 589 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 590 p->act_count += ACT_ADVANCE; 591 TAILQ_REMOVE(&vm_page_queues[p->queue].pl, 592 p, pageq); 593 TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl, 594 p, pageq); 595 } 596 vm_page_and_queue_spin_unlock(p); 597 } 598 } else if (p->queue - p->pc == PQ_INACTIVE) { 599 vm_page_and_queue_spin_unlock(p); 600 vm_page_protect(p, VM_PROT_NONE); 601 } else { 602 vm_page_and_queue_spin_unlock(p); 603 } 604 vm_page_wakeup(p); 605 return(0); 606 } 607 608 /* 609 * Deactivate some number of pages in a map, try to do it fairly, but 610 * that is really hard to do. 611 */ 612 static void 613 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired) 614 { 615 vm_map_entry_t tmpe; 616 vm_object_t obj, bigobj; 617 int nothingwired; 618 619 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) { 620 return; 621 } 622 623 bigobj = NULL; 624 nothingwired = TRUE; 625 626 /* 627 * first, search out the biggest object, and try to free pages from 628 * that. 629 */ 630 tmpe = map->header.next; 631 while (tmpe != &map->header) { 632 switch(tmpe->maptype) { 633 case VM_MAPTYPE_NORMAL: 634 case VM_MAPTYPE_VPAGETABLE: 635 obj = tmpe->object.vm_object; 636 if ((obj != NULL) && (obj->shadow_count <= 1) && 637 ((bigobj == NULL) || 638 (bigobj->resident_page_count < obj->resident_page_count))) { 639 bigobj = obj; 640 } 641 break; 642 default: 643 break; 644 } 645 if (tmpe->wired_count > 0) 646 nothingwired = FALSE; 647 tmpe = tmpe->next; 648 } 649 650 if (bigobj) { 651 vm_object_hold(bigobj); 652 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 653 vm_object_drop(bigobj); 654 } 655 656 /* 657 * Next, hunt around for other pages to deactivate. We actually 658 * do this search sort of wrong -- .text first is not the best idea. 659 */ 660 tmpe = map->header.next; 661 while (tmpe != &map->header) { 662 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 663 break; 664 switch(tmpe->maptype) { 665 case VM_MAPTYPE_NORMAL: 666 case VM_MAPTYPE_VPAGETABLE: 667 obj = tmpe->object.vm_object; 668 if (obj) { 669 vm_object_hold(obj); 670 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 671 vm_object_drop(obj); 672 } 673 break; 674 default: 675 break; 676 } 677 tmpe = tmpe->next; 678 } 679 680 /* 681 * Remove all mappings if a process is swapped out, this will free page 682 * table pages. 683 */ 684 if (desired == 0 && nothingwired) 685 pmap_remove(vm_map_pmap(map), 686 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS); 687 vm_map_unlock(map); 688 } 689 #endif 690 691 /* 692 * Called when the pageout scan wants to free a page. We no longer 693 * try to cycle the vm_object here with a reference & dealloc, which can 694 * cause a non-trivial object collapse in a critical path. 695 * 696 * It is unclear why we cycled the ref_count in the past, perhaps to try 697 * to optimize shadow chain collapses but I don't quite see why it would 698 * be necessary. An OBJ_DEAD object should terminate any and all vm_pages 699 * synchronously and not have to be kicked-start. 700 */ 701 static void 702 vm_pageout_page_free(vm_page_t m) 703 { 704 vm_page_protect(m, VM_PROT_NONE); 705 vm_page_free(m); 706 } 707 708 /* 709 * vm_pageout_scan does the dirty work for the pageout daemon. 710 */ 711 struct vm_pageout_scan_info { 712 struct proc *bigproc; 713 vm_offset_t bigsize; 714 }; 715 716 static int vm_pageout_scan_callback(struct proc *p, void *data); 717 718 static int 719 vm_pageout_scan_inactive(int pass, int q, int avail_shortage, 720 int *vnodes_skippedp) 721 { 722 vm_page_t m; 723 struct vm_page marker; 724 struct vnode *vpfailed; /* warning, allowed to be stale */ 725 int maxscan; 726 int count; 727 int delta = 0; 728 vm_object_t object; 729 int actcount; 730 int maxlaunder; 731 732 /* 733 * Start scanning the inactive queue for pages we can move to the 734 * cache or free. The scan will stop when the target is reached or 735 * we have scanned the entire inactive queue. Note that m->act_count 736 * is not used to form decisions for the inactive queue, only for the 737 * active queue. 738 * 739 * maxlaunder limits the number of dirty pages we flush per scan. 740 * For most systems a smaller value (16 or 32) is more robust under 741 * extreme memory and disk pressure because any unnecessary writes 742 * to disk can result in extreme performance degredation. However, 743 * systems with excessive dirty pages (especially when MAP_NOSYNC is 744 * used) will die horribly with limited laundering. If the pageout 745 * daemon cannot clean enough pages in the first pass, we let it go 746 * all out in succeeding passes. 747 */ 748 if ((maxlaunder = vm_max_launder) <= 1) 749 maxlaunder = 1; 750 if (pass) 751 maxlaunder = 10000; 752 753 /* 754 * Initialize our marker 755 */ 756 bzero(&marker, sizeof(marker)); 757 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 758 marker.queue = PQ_INACTIVE + q; 759 marker.pc = q; 760 marker.wire_count = 1; 761 762 /* 763 * Inactive queue scan. 764 * 765 * NOTE: The vm_page must be spinlocked before the queue to avoid 766 * deadlocks, so it is easiest to simply iterate the loop 767 * with the queue unlocked at the top. 768 */ 769 vpfailed = NULL; 770 771 vm_page_queues_spin_lock(PQ_INACTIVE + q); 772 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq); 773 maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt; 774 vm_page_queues_spin_unlock(PQ_INACTIVE + q); 775 776 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL && 777 maxscan-- > 0 && avail_shortage - delta > 0) 778 { 779 vm_page_and_queue_spin_lock(m); 780 if (m != TAILQ_NEXT(&marker, pageq)) { 781 vm_page_and_queue_spin_unlock(m); 782 ++maxscan; 783 continue; 784 } 785 KKASSERT(m->queue - m->pc == PQ_INACTIVE); 786 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, 787 &marker, pageq); 788 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m, 789 &marker, pageq); 790 mycpu->gd_cnt.v_pdpages++; 791 792 /* 793 * Skip marker pages 794 */ 795 if (m->flags & PG_MARKER) { 796 vm_page_and_queue_spin_unlock(m); 797 continue; 798 } 799 800 /* 801 * Try to busy the page. Don't mess with pages which are 802 * already busy or reorder them in the queue. 803 */ 804 if (vm_page_busy_try(m, TRUE)) { 805 vm_page_and_queue_spin_unlock(m); 806 continue; 807 } 808 vm_page_and_queue_spin_unlock(m); 809 KKASSERT(m->queue - m->pc == PQ_INACTIVE); 810 811 lwkt_yield(); 812 813 /* 814 * The page has been successfully busied and is now no 815 * longer spinlocked. The queue is no longer spinlocked 816 * either. 817 */ 818 819 /* 820 * It is possible for a page to be busied ad-hoc (e.g. the 821 * pmap_collect() code) and wired and race against the 822 * allocation of a new page. vm_page_alloc() may be forced 823 * to deactivate the wired page in which case it winds up 824 * on the inactive queue and must be handled here. We 825 * correct the problem simply by unqueuing the page. 826 */ 827 if (m->wire_count) { 828 vm_page_unqueue_nowakeup(m); 829 vm_page_wakeup(m); 830 kprintf("WARNING: pagedaemon: wired page on " 831 "inactive queue %p\n", m); 832 continue; 833 } 834 835 /* 836 * A held page may be undergoing I/O, so skip it. 837 */ 838 if (m->hold_count) { 839 vm_page_and_queue_spin_lock(m); 840 if (m->queue - m->pc == PQ_INACTIVE) { 841 TAILQ_REMOVE( 842 &vm_page_queues[PQ_INACTIVE + q].pl, 843 m, pageq); 844 TAILQ_INSERT_TAIL( 845 &vm_page_queues[PQ_INACTIVE + q].pl, 846 m, pageq); 847 ++vm_swapcache_inactive_heuristic; 848 } 849 vm_page_and_queue_spin_unlock(m); 850 vm_page_wakeup(m); 851 continue; 852 } 853 854 if (m->object == NULL || m->object->ref_count == 0) { 855 /* 856 * If the object is not being used, we ignore previous 857 * references. 858 */ 859 vm_page_flag_clear(m, PG_REFERENCED); 860 pmap_clear_reference(m); 861 /* fall through to end */ 862 } else if (((m->flags & PG_REFERENCED) == 0) && 863 (actcount = pmap_ts_referenced(m))) { 864 /* 865 * Otherwise, if the page has been referenced while 866 * in the inactive queue, we bump the "activation 867 * count" upwards, making it less likely that the 868 * page will be added back to the inactive queue 869 * prematurely again. Here we check the page tables 870 * (or emulated bits, if any), given the upper level 871 * VM system not knowing anything about existing 872 * references. 873 */ 874 vm_page_activate(m); 875 m->act_count += (actcount + ACT_ADVANCE); 876 vm_page_wakeup(m); 877 continue; 878 } 879 880 /* 881 * (m) is still busied. 882 * 883 * If the upper level VM system knows about any page 884 * references, we activate the page. We also set the 885 * "activation count" higher than normal so that we will less 886 * likely place pages back onto the inactive queue again. 887 */ 888 if ((m->flags & PG_REFERENCED) != 0) { 889 vm_page_flag_clear(m, PG_REFERENCED); 890 actcount = pmap_ts_referenced(m); 891 vm_page_activate(m); 892 m->act_count += (actcount + ACT_ADVANCE + 1); 893 vm_page_wakeup(m); 894 continue; 895 } 896 897 /* 898 * If the upper level VM system doesn't know anything about 899 * the page being dirty, we have to check for it again. As 900 * far as the VM code knows, any partially dirty pages are 901 * fully dirty. 902 * 903 * Pages marked PG_WRITEABLE may be mapped into the user 904 * address space of a process running on another cpu. A 905 * user process (without holding the MP lock) running on 906 * another cpu may be able to touch the page while we are 907 * trying to remove it. vm_page_cache() will handle this 908 * case for us. 909 */ 910 if (m->dirty == 0) { 911 vm_page_test_dirty(m); 912 } else { 913 vm_page_dirty(m); 914 } 915 916 if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) { 917 /* 918 * Invalid pages can be easily freed 919 */ 920 vm_pageout_page_free(m); 921 mycpu->gd_cnt.v_dfree++; 922 ++delta; 923 } else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) { 924 /* 925 * Clean pages can be placed onto the cache queue. 926 * This effectively frees them. 927 */ 928 vm_page_cache(m); 929 ++delta; 930 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 931 /* 932 * Dirty pages need to be paged out, but flushing 933 * a page is extremely expensive verses freeing 934 * a clean page. Rather then artificially limiting 935 * the number of pages we can flush, we instead give 936 * dirty pages extra priority on the inactive queue 937 * by forcing them to be cycled through the queue 938 * twice before being flushed, after which the 939 * (now clean) page will cycle through once more 940 * before being freed. This significantly extends 941 * the thrash point for a heavily loaded machine. 942 */ 943 vm_page_flag_set(m, PG_WINATCFLS); 944 vm_page_and_queue_spin_lock(m); 945 if (m->queue - m->pc == PQ_INACTIVE) { 946 TAILQ_REMOVE( 947 &vm_page_queues[PQ_INACTIVE + q].pl, 948 m, pageq); 949 TAILQ_INSERT_TAIL( 950 &vm_page_queues[PQ_INACTIVE + q].pl, 951 m, pageq); 952 ++vm_swapcache_inactive_heuristic; 953 } 954 vm_page_and_queue_spin_unlock(m); 955 vm_page_wakeup(m); 956 } else if (maxlaunder > 0) { 957 /* 958 * We always want to try to flush some dirty pages if 959 * we encounter them, to keep the system stable. 960 * Normally this number is small, but under extreme 961 * pressure where there are insufficient clean pages 962 * on the inactive queue, we may have to go all out. 963 */ 964 int swap_pageouts_ok; 965 struct vnode *vp = NULL; 966 967 swap_pageouts_ok = 0; 968 object = m->object; 969 if (object && 970 (object->type != OBJT_SWAP) && 971 (object->type != OBJT_DEFAULT)) { 972 swap_pageouts_ok = 1; 973 } else { 974 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 975 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 976 vm_page_count_min(0)); 977 978 } 979 980 /* 981 * We don't bother paging objects that are "dead". 982 * Those objects are in a "rundown" state. 983 */ 984 if (!swap_pageouts_ok || 985 (object == NULL) || 986 (object->flags & OBJ_DEAD)) { 987 vm_page_and_queue_spin_lock(m); 988 if (m->queue - m->pc == PQ_INACTIVE) { 989 TAILQ_REMOVE( 990 &vm_page_queues[PQ_INACTIVE + q].pl, 991 m, pageq); 992 TAILQ_INSERT_TAIL( 993 &vm_page_queues[PQ_INACTIVE + q].pl, 994 m, pageq); 995 ++vm_swapcache_inactive_heuristic; 996 } 997 vm_page_and_queue_spin_unlock(m); 998 vm_page_wakeup(m); 999 continue; 1000 } 1001 1002 /* 1003 * (m) is still busied. 1004 * 1005 * The object is already known NOT to be dead. It 1006 * is possible for the vget() to block the whole 1007 * pageout daemon, but the new low-memory handling 1008 * code should prevent it. 1009 * 1010 * The previous code skipped locked vnodes and, worse, 1011 * reordered pages in the queue. This results in 1012 * completely non-deterministic operation because, 1013 * quite often, a vm_fault has initiated an I/O and 1014 * is holding a locked vnode at just the point where 1015 * the pageout daemon is woken up. 1016 * 1017 * We can't wait forever for the vnode lock, we might 1018 * deadlock due to a vn_read() getting stuck in 1019 * vm_wait while holding this vnode. We skip the 1020 * vnode if we can't get it in a reasonable amount 1021 * of time. 1022 * 1023 * vpfailed is used to (try to) avoid the case where 1024 * a large number of pages are associated with a 1025 * locked vnode, which could cause the pageout daemon 1026 * to stall for an excessive amount of time. 1027 */ 1028 if (object->type == OBJT_VNODE) { 1029 int flags; 1030 1031 vp = object->handle; 1032 flags = LK_EXCLUSIVE | LK_NOOBJ; 1033 if (vp == vpfailed) 1034 flags |= LK_NOWAIT; 1035 else 1036 flags |= LK_TIMELOCK; 1037 vm_page_hold(m); 1038 vm_page_wakeup(m); 1039 1040 /* 1041 * We have unbusied (m) temporarily so we can 1042 * acquire the vp lock without deadlocking. 1043 * (m) is held to prevent destruction. 1044 */ 1045 if (vget(vp, flags) != 0) { 1046 vpfailed = vp; 1047 ++pageout_lock_miss; 1048 if (object->flags & OBJ_MIGHTBEDIRTY) 1049 ++*vnodes_skippedp; 1050 vm_page_unhold(m); 1051 continue; 1052 } 1053 1054 /* 1055 * The page might have been moved to another 1056 * queue during potential blocking in vget() 1057 * above. The page might have been freed and 1058 * reused for another vnode. The object might 1059 * have been reused for another vnode. 1060 */ 1061 if (m->queue - m->pc != PQ_INACTIVE || 1062 m->object != object || 1063 object->handle != vp) { 1064 if (object->flags & OBJ_MIGHTBEDIRTY) 1065 ++*vnodes_skippedp; 1066 vput(vp); 1067 vm_page_unhold(m); 1068 continue; 1069 } 1070 1071 /* 1072 * The page may have been busied during the 1073 * blocking in vput(); We don't move the 1074 * page back onto the end of the queue so that 1075 * statistics are more correct if we don't. 1076 */ 1077 if (vm_page_busy_try(m, TRUE)) { 1078 vput(vp); 1079 vm_page_unhold(m); 1080 continue; 1081 } 1082 vm_page_unhold(m); 1083 1084 /* 1085 * (m) is busied again 1086 * 1087 * We own the busy bit and remove our hold 1088 * bit. If the page is still held it 1089 * might be undergoing I/O, so skip it. 1090 */ 1091 if (m->hold_count) { 1092 vm_page_and_queue_spin_lock(m); 1093 if (m->queue - m->pc == PQ_INACTIVE) { 1094 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq); 1095 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq); 1096 ++vm_swapcache_inactive_heuristic; 1097 } 1098 vm_page_and_queue_spin_unlock(m); 1099 if (object->flags & OBJ_MIGHTBEDIRTY) 1100 ++*vnodes_skippedp; 1101 vm_page_wakeup(m); 1102 vput(vp); 1103 continue; 1104 } 1105 /* (m) is left busied as we fall through */ 1106 } 1107 1108 /* 1109 * page is busy and not held here. 1110 * 1111 * If a page is dirty, then it is either being washed 1112 * (but not yet cleaned) or it is still in the 1113 * laundry. If it is still in the laundry, then we 1114 * start the cleaning operation. 1115 * 1116 * decrement inactive_shortage on success to account 1117 * for the (future) cleaned page. Otherwise we 1118 * could wind up laundering or cleaning too many 1119 * pages. 1120 */ 1121 count = vm_pageout_clean(m); 1122 delta += count; 1123 maxlaunder -= count; 1124 1125 /* 1126 * Clean ate busy, page no longer accessible 1127 */ 1128 if (vp != NULL) 1129 vput(vp); 1130 } else { 1131 vm_page_wakeup(m); 1132 } 1133 1134 /* 1135 * Systems with a ton of memory can wind up with huge 1136 * deactivation counts. Because the inactive scan is 1137 * doing a lot of flushing, the combination can result 1138 * in excessive paging even in situations where other 1139 * unrelated threads free up sufficient VM. 1140 * 1141 * To deal with this we abort the nominal active->inactive 1142 * scan before we hit the inactive target when free+cache 1143 * levels have already reached their target. 1144 * 1145 * Note that nominally the inactive scan is not freeing or 1146 * caching pages, it is deactivating active pages, so it 1147 * will not by itself cause the abort condition. 1148 */ 1149 if (vm_paging_target() < 0) 1150 break; 1151 } 1152 vm_page_queues_spin_lock(PQ_INACTIVE + q); 1153 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq); 1154 vm_page_queues_spin_unlock(PQ_INACTIVE + q); 1155 1156 return (delta); 1157 } 1158 1159 static int 1160 vm_pageout_scan_active(int pass, int q, 1161 int avail_shortage, int inactive_shortage, 1162 int *recycle_countp) 1163 { 1164 struct vm_page marker; 1165 vm_page_t m; 1166 int actcount; 1167 int delta = 0; 1168 int maxscan; 1169 1170 /* 1171 * We want to move pages from the active queue to the inactive 1172 * queue to get the inactive queue to the inactive target. If 1173 * we still have a page shortage from above we try to directly free 1174 * clean pages instead of moving them. 1175 * 1176 * If we do still have a shortage we keep track of the number of 1177 * pages we free or cache (recycle_count) as a measure of thrashing 1178 * between the active and inactive queues. 1179 * 1180 * If we were able to completely satisfy the free+cache targets 1181 * from the inactive pool we limit the number of pages we move 1182 * from the active pool to the inactive pool to 2x the pages we 1183 * had removed from the inactive pool (with a minimum of 1/5 the 1184 * inactive target). If we were not able to completely satisfy 1185 * the free+cache targets we go for the whole target aggressively. 1186 * 1187 * NOTE: Both variables can end up negative. 1188 * NOTE: We are still in a critical section. 1189 */ 1190 1191 bzero(&marker, sizeof(marker)); 1192 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 1193 marker.queue = PQ_ACTIVE + q; 1194 marker.pc = q; 1195 marker.wire_count = 1; 1196 1197 vm_page_queues_spin_lock(PQ_ACTIVE + q); 1198 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq); 1199 maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt; 1200 vm_page_queues_spin_unlock(PQ_ACTIVE + q); 1201 1202 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL && 1203 maxscan-- > 0 && (avail_shortage - delta > 0 || 1204 inactive_shortage > 0)) 1205 { 1206 vm_page_and_queue_spin_lock(m); 1207 if (m != TAILQ_NEXT(&marker, pageq)) { 1208 vm_page_and_queue_spin_unlock(m); 1209 ++maxscan; 1210 continue; 1211 } 1212 KKASSERT(m->queue - m->pc == PQ_ACTIVE); 1213 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, 1214 &marker, pageq); 1215 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m, 1216 &marker, pageq); 1217 1218 /* 1219 * Skip marker pages 1220 */ 1221 if (m->flags & PG_MARKER) { 1222 vm_page_and_queue_spin_unlock(m); 1223 continue; 1224 } 1225 1226 /* 1227 * Try to busy the page. Don't mess with pages which are 1228 * already busy or reorder them in the queue. 1229 */ 1230 if (vm_page_busy_try(m, TRUE)) { 1231 vm_page_and_queue_spin_unlock(m); 1232 continue; 1233 } 1234 1235 /* 1236 * Don't deactivate pages that are held, even if we can 1237 * busy them. (XXX why not?) 1238 */ 1239 if (m->hold_count != 0) { 1240 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, 1241 m, pageq); 1242 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl, 1243 m, pageq); 1244 vm_page_and_queue_spin_unlock(m); 1245 vm_page_wakeup(m); 1246 continue; 1247 } 1248 vm_page_and_queue_spin_unlock(m); 1249 lwkt_yield(); 1250 1251 /* 1252 * The page has been successfully busied and the page and 1253 * queue are no longer locked. 1254 */ 1255 1256 /* 1257 * The count for pagedaemon pages is done after checking the 1258 * page for eligibility... 1259 */ 1260 mycpu->gd_cnt.v_pdpages++; 1261 1262 /* 1263 * Check to see "how much" the page has been used and clear 1264 * the tracking access bits. If the object has no references 1265 * don't bother paying the expense. 1266 */ 1267 actcount = 0; 1268 if (m->object && m->object->ref_count != 0) { 1269 if (m->flags & PG_REFERENCED) 1270 ++actcount; 1271 actcount += pmap_ts_referenced(m); 1272 if (actcount) { 1273 m->act_count += ACT_ADVANCE + actcount; 1274 if (m->act_count > ACT_MAX) 1275 m->act_count = ACT_MAX; 1276 } 1277 } 1278 vm_page_flag_clear(m, PG_REFERENCED); 1279 1280 /* 1281 * actcount is only valid if the object ref_count is non-zero. 1282 * If the page does not have an object, actcount will be zero. 1283 */ 1284 if (actcount && m->object->ref_count != 0) { 1285 vm_page_and_queue_spin_lock(m); 1286 if (m->queue - m->pc == PQ_ACTIVE) { 1287 TAILQ_REMOVE( 1288 &vm_page_queues[PQ_ACTIVE + q].pl, 1289 m, pageq); 1290 TAILQ_INSERT_TAIL( 1291 &vm_page_queues[PQ_ACTIVE + q].pl, 1292 m, pageq); 1293 } 1294 vm_page_and_queue_spin_unlock(m); 1295 vm_page_wakeup(m); 1296 } else { 1297 switch(m->object->type) { 1298 case OBJT_DEFAULT: 1299 case OBJT_SWAP: 1300 m->act_count -= min(m->act_count, 1301 vm_anonmem_decline); 1302 break; 1303 default: 1304 m->act_count -= min(m->act_count, 1305 vm_filemem_decline); 1306 break; 1307 } 1308 if (vm_pageout_algorithm || 1309 (m->object == NULL) || 1310 (m->object && (m->object->ref_count == 0)) || 1311 m->act_count < pass + 1 1312 ) { 1313 /* 1314 * Deactivate the page. If we had a 1315 * shortage from our inactive scan try to 1316 * free (cache) the page instead. 1317 * 1318 * Don't just blindly cache the page if 1319 * we do not have a shortage from the 1320 * inactive scan, that could lead to 1321 * gigabytes being moved. 1322 */ 1323 --inactive_shortage; 1324 if (avail_shortage - delta > 0 || 1325 (m->object && (m->object->ref_count == 0))) 1326 { 1327 if (avail_shortage - delta > 0) 1328 ++*recycle_countp; 1329 vm_page_protect(m, VM_PROT_NONE); 1330 if (m->dirty == 0 && 1331 (m->flags & PG_NEED_COMMIT) == 0 && 1332 avail_shortage - delta > 0) { 1333 vm_page_cache(m); 1334 } else { 1335 vm_page_deactivate(m); 1336 vm_page_wakeup(m); 1337 } 1338 } else { 1339 vm_page_deactivate(m); 1340 vm_page_wakeup(m); 1341 } 1342 ++delta; 1343 } else { 1344 vm_page_and_queue_spin_lock(m); 1345 if (m->queue - m->pc == PQ_ACTIVE) { 1346 TAILQ_REMOVE( 1347 &vm_page_queues[PQ_ACTIVE + q].pl, 1348 m, pageq); 1349 TAILQ_INSERT_TAIL( 1350 &vm_page_queues[PQ_ACTIVE + q].pl, 1351 m, pageq); 1352 } 1353 vm_page_and_queue_spin_unlock(m); 1354 vm_page_wakeup(m); 1355 } 1356 } 1357 } 1358 1359 /* 1360 * Clean out our local marker. 1361 */ 1362 vm_page_queues_spin_lock(PQ_ACTIVE + q); 1363 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq); 1364 vm_page_queues_spin_unlock(PQ_ACTIVE + q); 1365 1366 return (delta); 1367 } 1368 1369 /* 1370 * The number of actually free pages can drop down to v_free_reserved, 1371 * we try to build the free count back above v_free_min. Note that 1372 * vm_paging_needed() also returns TRUE if v_free_count is not at 1373 * least v_free_min so that is the minimum we must build the free 1374 * count to. 1375 * 1376 * We use a slightly higher target to improve hysteresis, 1377 * ((v_free_target + v_free_min) / 2). Since v_free_target 1378 * is usually the same as v_cache_min this maintains about 1379 * half the pages in the free queue as are in the cache queue, 1380 * providing pretty good pipelining for pageout operation. 1381 * 1382 * The system operator can manipulate vm.v_cache_min and 1383 * vm.v_free_target to tune the pageout demon. Be sure 1384 * to keep vm.v_free_min < vm.v_free_target. 1385 * 1386 * Note that the original paging target is to get at least 1387 * (free_min + cache_min) into (free + cache). The slightly 1388 * higher target will shift additional pages from cache to free 1389 * without effecting the original paging target in order to 1390 * maintain better hysteresis and not have the free count always 1391 * be dead-on v_free_min. 1392 * 1393 * NOTE: we are still in a critical section. 1394 * 1395 * Pages moved from PQ_CACHE to totally free are not counted in the 1396 * pages_freed counter. 1397 */ 1398 static void 1399 vm_pageout_scan_cache(int avail_shortage, int vnodes_skipped, int recycle_count) 1400 { 1401 struct vm_pageout_scan_info info; 1402 vm_page_t m; 1403 1404 while (vmstats.v_free_count < 1405 (vmstats.v_free_min + vmstats.v_free_target) / 2) { 1406 /* 1407 * This steals some code from vm/vm_page.c 1408 */ 1409 static int cache_rover = 0; 1410 1411 m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE); 1412 if (m == NULL) 1413 break; 1414 /* page is returned removed from its queue and spinlocked */ 1415 if (vm_page_busy_try(m, TRUE)) { 1416 vm_page_deactivate_locked(m); 1417 vm_page_spin_unlock(m); 1418 continue; 1419 } 1420 vm_page_spin_unlock(m); 1421 pagedaemon_wakeup(); 1422 lwkt_yield(); 1423 1424 /* 1425 * Page has been successfully busied and it and its queue 1426 * is no longer spinlocked. 1427 */ 1428 if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) || 1429 m->hold_count || 1430 m->wire_count) { 1431 vm_page_deactivate(m); 1432 vm_page_wakeup(m); 1433 continue; 1434 } 1435 KKASSERT((m->flags & PG_MAPPED) == 0); 1436 KKASSERT(m->dirty == 0); 1437 cache_rover += PQ_PRIME2; 1438 vm_pageout_page_free(m); 1439 mycpu->gd_cnt.v_dfree++; 1440 } 1441 1442 #if !defined(NO_SWAPPING) 1443 /* 1444 * Idle process swapout -- run once per second. 1445 */ 1446 if (vm_swap_idle_enabled) { 1447 static long lsec; 1448 if (time_second != lsec) { 1449 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1450 vm_req_vmdaemon(); 1451 lsec = time_second; 1452 } 1453 } 1454 #endif 1455 1456 /* 1457 * If we didn't get enough free pages, and we have skipped a vnode 1458 * in a writeable object, wakeup the sync daemon. And kick swapout 1459 * if we did not get enough free pages. 1460 */ 1461 if (vm_paging_target() > 0) { 1462 if (vnodes_skipped && vm_page_count_min(0)) 1463 speedup_syncer(); 1464 #if !defined(NO_SWAPPING) 1465 if (vm_swap_enabled && vm_page_count_target()) { 1466 vm_req_vmdaemon(); 1467 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1468 } 1469 #endif 1470 } 1471 1472 /* 1473 * Handle catastrophic conditions. Under good conditions we should 1474 * be at the target, well beyond our minimum. If we could not even 1475 * reach our minimum the system is under heavy stress. 1476 * 1477 * Determine whether we have run out of memory. This occurs when 1478 * swap_pager_full is TRUE and the only pages left in the page 1479 * queues are dirty. We will still likely have page shortages. 1480 * 1481 * - swap_pager_full is set if insufficient swap was 1482 * available to satisfy a requested pageout. 1483 * 1484 * - the inactive queue is bloated (4 x size of active queue), 1485 * meaning it is unable to get rid of dirty pages and. 1486 * 1487 * - vm_page_count_min() without counting pages recycled from the 1488 * active queue (recycle_count) means we could not recover 1489 * enough pages to meet bare minimum needs. This test only 1490 * works if the inactive queue is bloated. 1491 * 1492 * - due to a positive avail_shortage we shifted the remaining 1493 * dirty pages from the active queue to the inactive queue 1494 * trying to find clean ones to free. 1495 */ 1496 if (swap_pager_full && vm_page_count_min(recycle_count)) 1497 kprintf("Warning: system low on memory+swap!\n"); 1498 if (swap_pager_full && vm_page_count_min(recycle_count) && 1499 vmstats.v_inactive_count > vmstats.v_active_count * 4 && 1500 avail_shortage > 0) { 1501 /* 1502 * Kill something. 1503 */ 1504 info.bigproc = NULL; 1505 info.bigsize = 0; 1506 allproc_scan(vm_pageout_scan_callback, &info); 1507 if (info.bigproc != NULL) { 1508 killproc(info.bigproc, "out of swap space"); 1509 info.bigproc->p_nice = PRIO_MIN; 1510 info.bigproc->p_usched->resetpriority( 1511 FIRST_LWP_IN_PROC(info.bigproc)); 1512 wakeup(&vmstats.v_free_count); 1513 PRELE(info.bigproc); 1514 } 1515 } 1516 } 1517 1518 /* 1519 * The caller must hold proc_token. 1520 */ 1521 static int 1522 vm_pageout_scan_callback(struct proc *p, void *data) 1523 { 1524 struct vm_pageout_scan_info *info = data; 1525 vm_offset_t size; 1526 1527 /* 1528 * Never kill system processes or init. If we have configured swap 1529 * then try to avoid killing low-numbered pids. 1530 */ 1531 if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) || 1532 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1533 return (0); 1534 } 1535 1536 /* 1537 * if the process is in a non-running type state, 1538 * don't touch it. 1539 */ 1540 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) 1541 return (0); 1542 1543 /* 1544 * Get the approximate process size. Note that anonymous pages 1545 * with backing swap will be counted twice, but there should not 1546 * be too many such pages due to the stress the VM system is 1547 * under at this point. 1548 */ 1549 size = vmspace_anonymous_count(p->p_vmspace) + 1550 vmspace_swap_count(p->p_vmspace); 1551 1552 /* 1553 * If the this process is bigger than the biggest one 1554 * remember it. 1555 */ 1556 if (info->bigsize < size) { 1557 if (info->bigproc) 1558 PRELE(info->bigproc); 1559 PHOLD(p); 1560 info->bigproc = p; 1561 info->bigsize = size; 1562 } 1563 lwkt_yield(); 1564 return(0); 1565 } 1566 1567 /* 1568 * This routine tries to maintain the pseudo LRU active queue, 1569 * so that during long periods of time where there is no paging, 1570 * that some statistic accumulation still occurs. This code 1571 * helps the situation where paging just starts to occur. 1572 */ 1573 static void 1574 vm_pageout_page_stats(int q) 1575 { 1576 static int fullintervalcount = 0; 1577 struct vm_page marker; 1578 vm_page_t m; 1579 int pcount, tpcount; /* Number of pages to check */ 1580 int page_shortage; 1581 1582 page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max + 1583 vmstats.v_free_min) - 1584 (vmstats.v_free_count + vmstats.v_inactive_count + 1585 vmstats.v_cache_count); 1586 1587 if (page_shortage <= 0) 1588 return; 1589 1590 pcount = vm_page_queues[PQ_ACTIVE + q].lcnt; 1591 fullintervalcount += vm_pageout_stats_interval; 1592 if (fullintervalcount < vm_pageout_full_stats_interval) { 1593 tpcount = (vm_pageout_stats_max * pcount) / 1594 vmstats.v_page_count + 1; 1595 if (pcount > tpcount) 1596 pcount = tpcount; 1597 } else { 1598 fullintervalcount = 0; 1599 } 1600 1601 bzero(&marker, sizeof(marker)); 1602 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 1603 marker.queue = PQ_ACTIVE + q; 1604 marker.pc = q; 1605 marker.wire_count = 1; 1606 1607 vm_page_queues_spin_lock(PQ_ACTIVE + q); 1608 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq); 1609 vm_page_queues_spin_unlock(PQ_ACTIVE + q); 1610 1611 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL && 1612 pcount-- > 0) 1613 { 1614 int actcount; 1615 1616 vm_page_and_queue_spin_lock(m); 1617 if (m != TAILQ_NEXT(&marker, pageq)) { 1618 vm_page_and_queue_spin_unlock(m); 1619 ++pcount; 1620 continue; 1621 } 1622 KKASSERT(m->queue - m->pc == PQ_ACTIVE); 1623 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq); 1624 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m, 1625 &marker, pageq); 1626 1627 /* 1628 * Ignore markers 1629 */ 1630 if (m->flags & PG_MARKER) { 1631 vm_page_and_queue_spin_unlock(m); 1632 continue; 1633 } 1634 1635 /* 1636 * Ignore pages we can't busy 1637 */ 1638 if (vm_page_busy_try(m, TRUE)) { 1639 vm_page_and_queue_spin_unlock(m); 1640 continue; 1641 } 1642 vm_page_and_queue_spin_unlock(m); 1643 KKASSERT(m->queue - m->pc == PQ_ACTIVE); 1644 1645 /* 1646 * We now have a safely busied page, the page and queue 1647 * spinlocks have been released. 1648 * 1649 * Ignore held pages 1650 */ 1651 if (m->hold_count) { 1652 vm_page_wakeup(m); 1653 continue; 1654 } 1655 1656 /* 1657 * Calculate activity 1658 */ 1659 actcount = 0; 1660 if (m->flags & PG_REFERENCED) { 1661 vm_page_flag_clear(m, PG_REFERENCED); 1662 actcount += 1; 1663 } 1664 actcount += pmap_ts_referenced(m); 1665 1666 /* 1667 * Update act_count and move page to end of queue. 1668 */ 1669 if (actcount) { 1670 m->act_count += ACT_ADVANCE + actcount; 1671 if (m->act_count > ACT_MAX) 1672 m->act_count = ACT_MAX; 1673 vm_page_and_queue_spin_lock(m); 1674 if (m->queue - m->pc == PQ_ACTIVE) { 1675 TAILQ_REMOVE( 1676 &vm_page_queues[PQ_ACTIVE + q].pl, 1677 m, pageq); 1678 TAILQ_INSERT_TAIL( 1679 &vm_page_queues[PQ_ACTIVE + q].pl, 1680 m, pageq); 1681 } 1682 vm_page_and_queue_spin_unlock(m); 1683 vm_page_wakeup(m); 1684 continue; 1685 } 1686 1687 if (m->act_count == 0) { 1688 /* 1689 * We turn off page access, so that we have 1690 * more accurate RSS stats. We don't do this 1691 * in the normal page deactivation when the 1692 * system is loaded VM wise, because the 1693 * cost of the large number of page protect 1694 * operations would be higher than the value 1695 * of doing the operation. 1696 * 1697 * We use the marker to save our place so 1698 * we can release the spin lock. both (m) 1699 * and (next) will be invalid. 1700 */ 1701 vm_page_protect(m, VM_PROT_NONE); 1702 vm_page_deactivate(m); 1703 } else { 1704 m->act_count -= min(m->act_count, ACT_DECLINE); 1705 vm_page_and_queue_spin_lock(m); 1706 if (m->queue - m->pc == PQ_ACTIVE) { 1707 TAILQ_REMOVE( 1708 &vm_page_queues[PQ_ACTIVE + q].pl, 1709 m, pageq); 1710 TAILQ_INSERT_TAIL( 1711 &vm_page_queues[PQ_ACTIVE + q].pl, 1712 m, pageq); 1713 } 1714 vm_page_and_queue_spin_unlock(m); 1715 } 1716 vm_page_wakeup(m); 1717 } 1718 1719 /* 1720 * Remove our local marker 1721 */ 1722 vm_page_queues_spin_lock(PQ_ACTIVE + q); 1723 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq); 1724 vm_page_queues_spin_unlock(PQ_ACTIVE + q); 1725 } 1726 1727 static int 1728 vm_pageout_free_page_calc(vm_size_t count) 1729 { 1730 if (count < vmstats.v_page_count) 1731 return 0; 1732 /* 1733 * free_reserved needs to include enough for the largest swap pager 1734 * structures plus enough for any pv_entry structs when paging. 1735 * 1736 * v_free_min normal allocations 1737 * v_free_reserved system allocations 1738 * v_pageout_free_min allocations by pageout daemon 1739 * v_interrupt_free_min low level allocations (e.g swap structures) 1740 */ 1741 if (vmstats.v_page_count > 1024) 1742 vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200; 1743 else 1744 vmstats.v_free_min = 64; 1745 vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7; 1746 vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0; 1747 vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7; 1748 vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7; 1749 1750 return 1; 1751 } 1752 1753 1754 /* 1755 * vm_pageout is the high level pageout daemon. 1756 * 1757 * No requirements. 1758 */ 1759 static void 1760 vm_pageout_thread(void) 1761 { 1762 int pass; 1763 int q; 1764 int q1iterator = 0; 1765 int q2iterator = 0; 1766 1767 /* 1768 * Initialize some paging parameters. 1769 */ 1770 curthread->td_flags |= TDF_SYSTHREAD; 1771 1772 vm_pageout_free_page_calc(vmstats.v_page_count); 1773 1774 /* 1775 * v_free_target and v_cache_min control pageout hysteresis. Note 1776 * that these are more a measure of the VM cache queue hysteresis 1777 * then the VM free queue. Specifically, v_free_target is the 1778 * high water mark (free+cache pages). 1779 * 1780 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1781 * low water mark, while v_free_min is the stop. v_cache_min must 1782 * be big enough to handle memory needs while the pageout daemon 1783 * is signalled and run to free more pages. 1784 */ 1785 if (vmstats.v_free_count > 6144) 1786 vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved; 1787 else 1788 vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved; 1789 1790 /* 1791 * NOTE: With the new buffer cache b_act_count we want the default 1792 * inactive target to be a percentage of available memory. 1793 * 1794 * The inactive target essentially determines the minimum 1795 * number of 'temporary' pages capable of caching one-time-use 1796 * files when the VM system is otherwise full of pages 1797 * belonging to multi-time-use files or active program data. 1798 * 1799 * NOTE: The inactive target is aggressively persued only if the 1800 * inactive queue becomes too small. If the inactive queue 1801 * is large enough to satisfy page movement to free+cache 1802 * then it is repopulated more slowly from the active queue. 1803 * This allows a general inactive_target default to be set. 1804 * 1805 * There is an issue here for processes which sit mostly idle 1806 * 'overnight', such as sshd, tcsh, and X. Any movement from 1807 * the active queue will eventually cause such pages to 1808 * recycle eventually causing a lot of paging in the morning. 1809 * To reduce the incidence of this pages cycled out of the 1810 * buffer cache are moved directly to the inactive queue if 1811 * they were only used once or twice. 1812 * 1813 * The vfs.vm_cycle_point sysctl can be used to adjust this. 1814 * Increasing the value (up to 64) increases the number of 1815 * buffer recyclements which go directly to the inactive queue. 1816 */ 1817 if (vmstats.v_free_count > 2048) { 1818 vmstats.v_cache_min = vmstats.v_free_target; 1819 vmstats.v_cache_max = 2 * vmstats.v_cache_min; 1820 } else { 1821 vmstats.v_cache_min = 0; 1822 vmstats.v_cache_max = 0; 1823 } 1824 vmstats.v_inactive_target = vmstats.v_free_count / 4; 1825 1826 /* XXX does not really belong here */ 1827 if (vm_page_max_wired == 0) 1828 vm_page_max_wired = vmstats.v_free_count / 3; 1829 1830 if (vm_pageout_stats_max == 0) 1831 vm_pageout_stats_max = vmstats.v_free_target; 1832 1833 /* 1834 * Set interval in seconds for stats scan. 1835 */ 1836 if (vm_pageout_stats_interval == 0) 1837 vm_pageout_stats_interval = 5; 1838 if (vm_pageout_full_stats_interval == 0) 1839 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1840 1841 1842 /* 1843 * Set maximum free per pass 1844 */ 1845 if (vm_pageout_stats_free_max == 0) 1846 vm_pageout_stats_free_max = 5; 1847 1848 swap_pager_swap_init(); 1849 pass = 0; 1850 1851 /* 1852 * The pageout daemon is never done, so loop forever. 1853 */ 1854 while (TRUE) { 1855 int error; 1856 int avail_shortage; 1857 int inactive_shortage; 1858 int vnodes_skipped = 0; 1859 int recycle_count = 0; 1860 int tmp; 1861 1862 /* 1863 * Wait for an action request. If we timeout check to 1864 * see if paging is needed (in case the normal wakeup 1865 * code raced us). 1866 */ 1867 if (vm_pages_needed == 0) { 1868 error = tsleep(&vm_pages_needed, 1869 0, "psleep", 1870 vm_pageout_stats_interval * hz); 1871 if (error && 1872 vm_paging_needed() == 0 && 1873 vm_pages_needed == 0) { 1874 for (q = 0; q < PQ_L2_SIZE; ++q) 1875 vm_pageout_page_stats(q); 1876 continue; 1877 } 1878 vm_pages_needed = 1; 1879 } 1880 1881 mycpu->gd_cnt.v_pdwakeups++; 1882 1883 /* 1884 * Do whatever cleanup that the pmap code can. 1885 */ 1886 pmap_collect(); 1887 1888 /* 1889 * Scan for pageout. Try to avoid thrashing the system 1890 * with activity. 1891 * 1892 * Calculate our target for the number of free+cache pages we 1893 * want to get to. This is higher then the number that causes 1894 * allocations to stall (severe) in order to provide hysteresis, 1895 * and if we don't make it all the way but get to the minimum 1896 * we're happy. Goose it a bit if there are multipler 1897 * requests for memory. 1898 */ 1899 avail_shortage = vm_paging_target() + vm_pageout_deficit; 1900 vm_pageout_deficit = 0; 1901 1902 if (avail_shortage > 0) { 1903 for (q = 0; q < PQ_L2_SIZE; ++q) { 1904 avail_shortage -= 1905 vm_pageout_scan_inactive( 1906 pass, 1907 (q + q1iterator) & PQ_L2_MASK, 1908 PQAVERAGE(avail_shortage), 1909 &vnodes_skipped); 1910 if (avail_shortage <= 0) 1911 break; 1912 } 1913 q1iterator = q + 1; 1914 } 1915 1916 /* 1917 * Figure out how many active pages we must deactivate. If 1918 * we were able to reach our target with just the inactive 1919 * scan above we limit the number of active pages we 1920 * deactivate to reduce unnecessary work. 1921 */ 1922 inactive_shortage = vmstats.v_inactive_target - 1923 vmstats.v_inactive_count; 1924 1925 /* 1926 * If we were unable to free sufficient inactive pages to 1927 * satisfy the free/cache queue requirements then simply 1928 * reaching the inactive target may not be good enough. 1929 * Try to deactivate pages in excess of the target based 1930 * on the shortfall. 1931 * 1932 * However to prevent thrashing the VM system do not 1933 * deactivate more than an additional 1/10 the inactive 1934 * target's worth of active pages. 1935 */ 1936 if (avail_shortage > 0) { 1937 tmp = avail_shortage * 2; 1938 if (tmp > vmstats.v_inactive_target / 10) 1939 tmp = vmstats.v_inactive_target / 10; 1940 inactive_shortage += tmp; 1941 } 1942 1943 if (avail_shortage > 0 || inactive_shortage > 0) { 1944 int delta; 1945 1946 for (q = 0; q < PQ_L2_SIZE; ++q) { 1947 delta = vm_pageout_scan_active( 1948 pass, 1949 (q + q2iterator) & PQ_L2_MASK, 1950 PQAVERAGE(avail_shortage), 1951 PQAVERAGE(inactive_shortage), 1952 &recycle_count); 1953 inactive_shortage -= delta; 1954 avail_shortage -= delta; 1955 if (inactive_shortage <= 0 && 1956 avail_shortage <= 0) { 1957 break; 1958 } 1959 } 1960 q2iterator = q + 1; 1961 } 1962 1963 /* 1964 * Finally free enough cache pages to meet our free page 1965 * requirement and take more drastic measures if we are 1966 * still in trouble. 1967 */ 1968 vm_pageout_scan_cache(avail_shortage, vnodes_skipped, 1969 recycle_count); 1970 1971 /* 1972 * Wait for more work. 1973 */ 1974 if (avail_shortage > 0) { 1975 ++pass; 1976 if (swap_pager_full) { 1977 /* 1978 * Running out of memory, catastrophic back-off 1979 * to one-second intervals. 1980 */ 1981 tsleep(&vm_pages_needed, 0, "pdelay", hz); 1982 } else if (pass < 10 && vm_pages_needed > 1) { 1983 /* 1984 * Normal operation, additional processes 1985 * have already kicked us. Retry immediately. 1986 */ 1987 } else if (pass < 10) { 1988 /* 1989 * Normal operation, fewer processes. Delay 1990 * a bit but allow wakeups. 1991 */ 1992 vm_pages_needed = 0; 1993 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10); 1994 vm_pages_needed = 1; 1995 } else { 1996 /* 1997 * We've taken too many passes, forced delay. 1998 */ 1999 tsleep(&vm_pages_needed, 0, "pdelay", hz / 10); 2000 } 2001 } else if (vm_pages_needed) { 2002 /* 2003 * Interlocked wakeup of waiters (non-optional). 2004 * 2005 * Similar to vm_page_free_wakeup() in vm_page.c, 2006 * wake 2007 */ 2008 pass = 0; 2009 if (!vm_page_count_min(vm_page_free_hysteresis) || 2010 !vm_page_count_target()) { 2011 vm_pages_needed = 0; 2012 wakeup(&vmstats.v_free_count); 2013 } 2014 } else { 2015 pass = 0; 2016 } 2017 } 2018 } 2019 2020 static struct kproc_desc page_kp = { 2021 "pagedaemon", 2022 vm_pageout_thread, 2023 &pagethread 2024 }; 2025 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 2026 2027 2028 /* 2029 * Called after allocating a page out of the cache or free queue 2030 * to possibly wake the pagedaemon up to replentish our supply. 2031 * 2032 * We try to generate some hysteresis by waking the pagedaemon up 2033 * when our free+cache pages go below the free_min+cache_min level. 2034 * The pagedaemon tries to get the count back up to at least the 2035 * minimum, and through to the target level if possible. 2036 * 2037 * If the pagedaemon is already active bump vm_pages_needed as a hint 2038 * that there are even more requests pending. 2039 * 2040 * SMP races ok? 2041 * No requirements. 2042 */ 2043 void 2044 pagedaemon_wakeup(void) 2045 { 2046 if (vm_paging_needed() && curthread != pagethread) { 2047 if (vm_pages_needed == 0) { 2048 vm_pages_needed = 1; /* SMP race ok */ 2049 wakeup(&vm_pages_needed); 2050 } else if (vm_page_count_min(0)) { 2051 ++vm_pages_needed; /* SMP race ok */ 2052 } 2053 } 2054 } 2055 2056 #if !defined(NO_SWAPPING) 2057 2058 /* 2059 * SMP races ok? 2060 * No requirements. 2061 */ 2062 static void 2063 vm_req_vmdaemon(void) 2064 { 2065 static int lastrun = 0; 2066 2067 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 2068 wakeup(&vm_daemon_needed); 2069 lastrun = ticks; 2070 } 2071 } 2072 2073 static int vm_daemon_callback(struct proc *p, void *data __unused); 2074 2075 /* 2076 * No requirements. 2077 */ 2078 static void 2079 vm_daemon(void) 2080 { 2081 /* 2082 * XXX vm_daemon_needed specific token? 2083 */ 2084 while (TRUE) { 2085 tsleep(&vm_daemon_needed, 0, "psleep", 0); 2086 if (vm_pageout_req_swapout) { 2087 swapout_procs(vm_pageout_req_swapout); 2088 vm_pageout_req_swapout = 0; 2089 } 2090 /* 2091 * scan the processes for exceeding their rlimits or if 2092 * process is swapped out -- deactivate pages 2093 */ 2094 allproc_scan(vm_daemon_callback, NULL); 2095 } 2096 } 2097 2098 /* 2099 * Caller must hold proc_token. 2100 */ 2101 static int 2102 vm_daemon_callback(struct proc *p, void *data __unused) 2103 { 2104 vm_pindex_t limit, size; 2105 2106 /* 2107 * if this is a system process or if we have already 2108 * looked at this process, skip it. 2109 */ 2110 if (p->p_flags & (P_SYSTEM | P_WEXIT)) 2111 return (0); 2112 2113 /* 2114 * if the process is in a non-running type state, 2115 * don't touch it. 2116 */ 2117 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) 2118 return (0); 2119 2120 /* 2121 * get a limit 2122 */ 2123 limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 2124 p->p_rlimit[RLIMIT_RSS].rlim_max)); 2125 2126 /* 2127 * let processes that are swapped out really be 2128 * swapped out. Set the limit to nothing to get as 2129 * many pages out to swap as possible. 2130 */ 2131 if (p->p_flags & P_SWAPPEDOUT) 2132 limit = 0; 2133 2134 lwkt_gettoken(&p->p_vmspace->vm_map.token); 2135 size = vmspace_resident_count(p->p_vmspace); 2136 if (limit >= 0 && size >= limit) { 2137 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit); 2138 } 2139 lwkt_reltoken(&p->p_vmspace->vm_map.token); 2140 return (0); 2141 } 2142 2143 #endif 2144