1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005 Yahoo! Technologies Norway AS 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * The Mach Operating System project at Carnegie-Mellon University. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 45 * 46 * 47 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 48 * All rights reserved. 49 * 50 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 51 * 52 * Permission to use, copy, modify and distribute this software and 53 * its documentation is hereby granted, provided that both the copyright 54 * notice and this permission notice appear in all copies of the 55 * software, derivative works or modified versions, and any portions 56 * thereof, and that both notices appear in supporting documentation. 57 * 58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61 * 62 * Carnegie Mellon requests users of this software to return to 63 * 64 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65 * School of Computer Science 66 * Carnegie Mellon University 67 * Pittsburgh PA 15213-3890 68 * 69 * any improvements or extensions that they make and grant Carnegie the 70 * rights to redistribute these changes. 71 */ 72 73 #include <sys/cdefs.h> 74 __FBSDID("$FreeBSD$"); 75 76 #include "opt_kstack_pages.h" 77 #include "opt_kstack_max_pages.h" 78 #include "opt_vm.h" 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/limits.h> 83 #include <sys/kernel.h> 84 #include <sys/eventhandler.h> 85 #include <sys/lock.h> 86 #include <sys/mutex.h> 87 #include <sys/proc.h> 88 #include <sys/kthread.h> 89 #include <sys/ktr.h> 90 #include <sys/mount.h> 91 #include <sys/racct.h> 92 #include <sys/resourcevar.h> 93 #include <sys/sched.h> 94 #include <sys/sdt.h> 95 #include <sys/signalvar.h> 96 #include <sys/smp.h> 97 #include <sys/time.h> 98 #include <sys/vnode.h> 99 #include <sys/vmmeter.h> 100 #include <sys/rwlock.h> 101 #include <sys/sx.h> 102 #include <sys/sysctl.h> 103 104 #include <vm/vm.h> 105 #include <vm/vm_param.h> 106 #include <vm/vm_object.h> 107 #include <vm/vm_page.h> 108 #include <vm/vm_map.h> 109 #include <vm/vm_pageout.h> 110 #include <vm/vm_pager.h> 111 #include <vm/vm_phys.h> 112 #include <vm/swap_pager.h> 113 #include <vm/vm_extern.h> 114 #include <vm/uma.h> 115 116 /* the kernel process "vm_daemon" */ 117 static void vm_daemon(void); 118 static struct proc *vmproc; 119 120 static struct kproc_desc vm_kp = { 121 "vmdaemon", 122 vm_daemon, 123 &vmproc 124 }; 125 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 126 127 static int vm_swap_enabled = 1; 128 static int vm_swap_idle_enabled = 0; 129 130 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RW, 131 &vm_swap_enabled, 0, 132 "Enable entire process swapout"); 133 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, CTLFLAG_RW, 134 &vm_swap_idle_enabled, 0, 135 "Allow swapout on idle criteria"); 136 137 /* 138 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 139 */ 140 static int swap_idle_threshold1 = 2; 141 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 142 &swap_idle_threshold1, 0, 143 "Guaranteed swapped in time for a process"); 144 145 /* 146 * Swap_idle_threshold2 is the time that a process can be idle before 147 * it will be swapped out, if idle swapping is enabled. 148 */ 149 static int swap_idle_threshold2 = 10; 150 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 151 &swap_idle_threshold2, 0, 152 "Time before a process will be swapped out"); 153 154 static int vm_pageout_req_swapout; /* XXX */ 155 static int vm_daemon_needed; 156 static struct mtx vm_daemon_mtx; 157 /* Allow for use by vm_pageout before vm_daemon is initialized. */ 158 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 159 160 static int swapped_cnt; 161 static int swap_inprogress; /* Pending swap-ins done outside swapper. */ 162 static int last_swapin; 163 164 static void swapclear(struct proc *); 165 static int swapout(struct proc *); 166 static void vm_swapout_map_deactivate_pages(vm_map_t, long); 167 static void vm_swapout_object_deactivate_pages(pmap_t, vm_object_t, long); 168 static void swapout_procs(int action); 169 static void vm_req_vmdaemon(int req); 170 static void vm_thread_swapout(struct thread *td); 171 172 /* 173 * vm_swapout_object_deactivate_pages 174 * 175 * Deactivate enough pages to satisfy the inactive target 176 * requirements. 177 * 178 * The object and map must be locked. 179 */ 180 static void 181 vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, 182 long desired) 183 { 184 vm_object_t backing_object, object; 185 vm_page_t p; 186 int act_delta, remove_mode; 187 188 VM_OBJECT_ASSERT_LOCKED(first_object); 189 if ((first_object->flags & OBJ_FICTITIOUS) != 0) 190 return; 191 for (object = first_object;; object = backing_object) { 192 if (pmap_resident_count(pmap) <= desired) 193 goto unlock_return; 194 VM_OBJECT_ASSERT_LOCKED(object); 195 if ((object->flags & OBJ_UNMANAGED) != 0 || 196 object->paging_in_progress != 0) 197 goto unlock_return; 198 199 remove_mode = 0; 200 if (object->shadow_count > 1) 201 remove_mode = 1; 202 /* 203 * Scan the object's entire memory queue. 204 */ 205 TAILQ_FOREACH(p, &object->memq, listq) { 206 if (pmap_resident_count(pmap) <= desired) 207 goto unlock_return; 208 if (should_yield()) 209 goto unlock_return; 210 if (vm_page_busied(p)) 211 continue; 212 VM_CNT_INC(v_pdpages); 213 vm_page_lock(p); 214 if (vm_page_wired(p) || 215 !pmap_page_exists_quick(pmap, p)) { 216 vm_page_unlock(p); 217 continue; 218 } 219 act_delta = pmap_ts_referenced(p); 220 if ((p->aflags & PGA_REFERENCED) != 0) { 221 if (act_delta == 0) 222 act_delta = 1; 223 vm_page_aflag_clear(p, PGA_REFERENCED); 224 } 225 if (!vm_page_active(p) && act_delta != 0) { 226 vm_page_activate(p); 227 p->act_count += act_delta; 228 } else if (vm_page_active(p)) { 229 /* 230 * The page daemon does not requeue pages 231 * after modifying their activation count. 232 */ 233 if (act_delta == 0) { 234 p->act_count -= min(p->act_count, 235 ACT_DECLINE); 236 if (!remove_mode && p->act_count == 0) { 237 pmap_remove_all(p); 238 vm_page_deactivate(p); 239 } 240 } else { 241 vm_page_activate(p); 242 if (p->act_count < ACT_MAX - 243 ACT_ADVANCE) 244 p->act_count += ACT_ADVANCE; 245 } 246 } else if (vm_page_inactive(p)) 247 pmap_remove_all(p); 248 vm_page_unlock(p); 249 } 250 if ((backing_object = object->backing_object) == NULL) 251 goto unlock_return; 252 VM_OBJECT_RLOCK(backing_object); 253 if (object != first_object) 254 VM_OBJECT_RUNLOCK(object); 255 } 256 unlock_return: 257 if (object != first_object) 258 VM_OBJECT_RUNLOCK(object); 259 } 260 261 /* 262 * deactivate some number of pages in a map, try to do it fairly, but 263 * that is really hard to do. 264 */ 265 static void 266 vm_swapout_map_deactivate_pages(vm_map_t map, long desired) 267 { 268 vm_map_entry_t tmpe; 269 vm_object_t obj, bigobj; 270 int nothingwired; 271 272 if (!vm_map_trylock_read(map)) 273 return; 274 275 bigobj = NULL; 276 nothingwired = TRUE; 277 278 /* 279 * first, search out the biggest object, and try to free pages from 280 * that. 281 */ 282 tmpe = map->header.next; 283 while (tmpe != &map->header) { 284 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 285 obj = tmpe->object.vm_object; 286 if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) { 287 if (obj->shadow_count <= 1 && 288 (bigobj == NULL || 289 bigobj->resident_page_count < 290 obj->resident_page_count)) { 291 if (bigobj != NULL) 292 VM_OBJECT_RUNLOCK(bigobj); 293 bigobj = obj; 294 } else 295 VM_OBJECT_RUNLOCK(obj); 296 } 297 } 298 if (tmpe->wired_count > 0) 299 nothingwired = FALSE; 300 tmpe = tmpe->next; 301 } 302 303 if (bigobj != NULL) { 304 vm_swapout_object_deactivate_pages(map->pmap, bigobj, desired); 305 VM_OBJECT_RUNLOCK(bigobj); 306 } 307 /* 308 * Next, hunt around for other pages to deactivate. We actually 309 * do this search sort of wrong -- .text first is not the best idea. 310 */ 311 tmpe = map->header.next; 312 while (tmpe != &map->header) { 313 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 314 break; 315 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 316 obj = tmpe->object.vm_object; 317 if (obj != NULL) { 318 VM_OBJECT_RLOCK(obj); 319 vm_swapout_object_deactivate_pages(map->pmap, 320 obj, desired); 321 VM_OBJECT_RUNLOCK(obj); 322 } 323 } 324 tmpe = tmpe->next; 325 } 326 327 /* 328 * Remove all mappings if a process is swapped out, this will free page 329 * table pages. 330 */ 331 if (desired == 0 && nothingwired) { 332 pmap_remove(vm_map_pmap(map), vm_map_min(map), 333 vm_map_max(map)); 334 } 335 336 vm_map_unlock_read(map); 337 } 338 339 /* 340 * Swap out requests 341 */ 342 #define VM_SWAP_NORMAL 1 343 #define VM_SWAP_IDLE 2 344 345 void 346 vm_swapout_run(void) 347 { 348 349 if (vm_swap_enabled) 350 vm_req_vmdaemon(VM_SWAP_NORMAL); 351 } 352 353 /* 354 * Idle process swapout -- run once per second when pagedaemons are 355 * reclaiming pages. 356 */ 357 void 358 vm_swapout_run_idle(void) 359 { 360 static long lsec; 361 362 if (!vm_swap_idle_enabled || time_second == lsec) 363 return; 364 vm_req_vmdaemon(VM_SWAP_IDLE); 365 lsec = time_second; 366 } 367 368 static void 369 vm_req_vmdaemon(int req) 370 { 371 static int lastrun = 0; 372 373 mtx_lock(&vm_daemon_mtx); 374 vm_pageout_req_swapout |= req; 375 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 376 wakeup(&vm_daemon_needed); 377 lastrun = ticks; 378 } 379 mtx_unlock(&vm_daemon_mtx); 380 } 381 382 static void 383 vm_daemon(void) 384 { 385 struct rlimit rsslim; 386 struct proc *p; 387 struct thread *td; 388 struct vmspace *vm; 389 int breakout, swapout_flags, tryagain, attempts; 390 #ifdef RACCT 391 uint64_t rsize, ravailable; 392 #endif 393 394 while (TRUE) { 395 mtx_lock(&vm_daemon_mtx); 396 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 397 #ifdef RACCT 398 racct_enable ? hz : 0 399 #else 400 0 401 #endif 402 ); 403 swapout_flags = vm_pageout_req_swapout; 404 vm_pageout_req_swapout = 0; 405 mtx_unlock(&vm_daemon_mtx); 406 if (swapout_flags != 0) { 407 /* 408 * Drain the per-CPU page queue batches as a deadlock 409 * avoidance measure. 410 */ 411 if ((swapout_flags & VM_SWAP_NORMAL) != 0) 412 vm_page_pqbatch_drain(); 413 swapout_procs(swapout_flags); 414 } 415 416 /* 417 * scan the processes for exceeding their rlimits or if 418 * process is swapped out -- deactivate pages 419 */ 420 tryagain = 0; 421 attempts = 0; 422 again: 423 attempts++; 424 sx_slock(&allproc_lock); 425 FOREACH_PROC_IN_SYSTEM(p) { 426 vm_pindex_t limit, size; 427 428 /* 429 * if this is a system process or if we have already 430 * looked at this process, skip it. 431 */ 432 PROC_LOCK(p); 433 if (p->p_state != PRS_NORMAL || 434 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 435 PROC_UNLOCK(p); 436 continue; 437 } 438 /* 439 * if the process is in a non-running type state, 440 * don't touch it. 441 */ 442 breakout = 0; 443 FOREACH_THREAD_IN_PROC(p, td) { 444 thread_lock(td); 445 if (!TD_ON_RUNQ(td) && 446 !TD_IS_RUNNING(td) && 447 !TD_IS_SLEEPING(td) && 448 !TD_IS_SUSPENDED(td)) { 449 thread_unlock(td); 450 breakout = 1; 451 break; 452 } 453 thread_unlock(td); 454 } 455 if (breakout) { 456 PROC_UNLOCK(p); 457 continue; 458 } 459 /* 460 * get a limit 461 */ 462 lim_rlimit_proc(p, RLIMIT_RSS, &rsslim); 463 limit = OFF_TO_IDX( 464 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 465 466 /* 467 * let processes that are swapped out really be 468 * swapped out set the limit to nothing (will force a 469 * swap-out.) 470 */ 471 if ((p->p_flag & P_INMEM) == 0) 472 limit = 0; /* XXX */ 473 vm = vmspace_acquire_ref(p); 474 _PHOLD_LITE(p); 475 PROC_UNLOCK(p); 476 if (vm == NULL) { 477 PRELE(p); 478 continue; 479 } 480 sx_sunlock(&allproc_lock); 481 482 size = vmspace_resident_count(vm); 483 if (size >= limit) { 484 vm_swapout_map_deactivate_pages( 485 &vm->vm_map, limit); 486 size = vmspace_resident_count(vm); 487 } 488 #ifdef RACCT 489 if (racct_enable) { 490 rsize = IDX_TO_OFF(size); 491 PROC_LOCK(p); 492 if (p->p_state == PRS_NORMAL) 493 racct_set(p, RACCT_RSS, rsize); 494 ravailable = racct_get_available(p, RACCT_RSS); 495 PROC_UNLOCK(p); 496 if (rsize > ravailable) { 497 /* 498 * Don't be overly aggressive; this 499 * might be an innocent process, 500 * and the limit could've been exceeded 501 * by some memory hog. Don't try 502 * to deactivate more than 1/4th 503 * of process' resident set size. 504 */ 505 if (attempts <= 8) { 506 if (ravailable < rsize - 507 (rsize / 4)) { 508 ravailable = rsize - 509 (rsize / 4); 510 } 511 } 512 vm_swapout_map_deactivate_pages( 513 &vm->vm_map, 514 OFF_TO_IDX(ravailable)); 515 /* Update RSS usage after paging out. */ 516 size = vmspace_resident_count(vm); 517 rsize = IDX_TO_OFF(size); 518 PROC_LOCK(p); 519 if (p->p_state == PRS_NORMAL) 520 racct_set(p, RACCT_RSS, rsize); 521 PROC_UNLOCK(p); 522 if (rsize > ravailable) 523 tryagain = 1; 524 } 525 } 526 #endif 527 vmspace_free(vm); 528 sx_slock(&allproc_lock); 529 PRELE(p); 530 } 531 sx_sunlock(&allproc_lock); 532 if (tryagain != 0 && attempts <= 10) { 533 maybe_yield(); 534 goto again; 535 } 536 } 537 } 538 539 /* 540 * Allow a thread's kernel stack to be paged out. 541 */ 542 static void 543 vm_thread_swapout(struct thread *td) 544 { 545 vm_object_t ksobj; 546 vm_page_t m; 547 int i, pages; 548 549 cpu_thread_swapout(td); 550 pages = td->td_kstack_pages; 551 ksobj = td->td_kstack_obj; 552 pmap_qremove(td->td_kstack, pages); 553 VM_OBJECT_WLOCK(ksobj); 554 for (i = 0; i < pages; i++) { 555 m = vm_page_lookup(ksobj, i); 556 if (m == NULL) 557 panic("vm_thread_swapout: kstack already missing?"); 558 vm_page_dirty(m); 559 vm_page_lock(m); 560 vm_page_unwire(m, PQ_LAUNDRY); 561 vm_page_unlock(m); 562 } 563 VM_OBJECT_WUNLOCK(ksobj); 564 } 565 566 /* 567 * Bring the kernel stack for a specified thread back in. 568 */ 569 static void 570 vm_thread_swapin(struct thread *td, int oom_alloc) 571 { 572 vm_object_t ksobj; 573 vm_page_t ma[KSTACK_MAX_PAGES]; 574 int a, count, i, j, pages, rv; 575 576 pages = td->td_kstack_pages; 577 ksobj = td->td_kstack_obj; 578 VM_OBJECT_WLOCK(ksobj); 579 (void)vm_page_grab_pages(ksobj, 0, oom_alloc | VM_ALLOC_WIRED, ma, 580 pages); 581 for (i = 0; i < pages;) { 582 vm_page_assert_xbusied(ma[i]); 583 if (ma[i]->valid == VM_PAGE_BITS_ALL) { 584 vm_page_xunbusy(ma[i]); 585 i++; 586 continue; 587 } 588 vm_object_pip_add(ksobj, 1); 589 for (j = i + 1; j < pages; j++) 590 if (ma[j]->valid == VM_PAGE_BITS_ALL) 591 break; 592 rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a); 593 KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i])); 594 count = min(a + 1, j - i); 595 rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL); 596 KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d", 597 __func__, td->td_proc->p_pid)); 598 vm_object_pip_wakeup(ksobj); 599 for (j = i; j < i + count; j++) 600 vm_page_xunbusy(ma[j]); 601 i += count; 602 } 603 VM_OBJECT_WUNLOCK(ksobj); 604 pmap_qenter(td->td_kstack, ma, pages); 605 cpu_thread_swapin(td); 606 } 607 608 void 609 faultin(struct proc *p) 610 { 611 struct thread *td; 612 int oom_alloc; 613 614 PROC_LOCK_ASSERT(p, MA_OWNED); 615 616 /* 617 * If another process is swapping in this process, 618 * just wait until it finishes. 619 */ 620 if (p->p_flag & P_SWAPPINGIN) { 621 while (p->p_flag & P_SWAPPINGIN) 622 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0); 623 return; 624 } 625 626 if ((p->p_flag & P_INMEM) == 0) { 627 oom_alloc = (p->p_flag & P_WKILLED) != 0 ? VM_ALLOC_SYSTEM : 628 VM_ALLOC_NORMAL; 629 630 /* 631 * Don't let another thread swap process p out while we are 632 * busy swapping it in. 633 */ 634 ++p->p_lock; 635 p->p_flag |= P_SWAPPINGIN; 636 PROC_UNLOCK(p); 637 sx_xlock(&allproc_lock); 638 MPASS(swapped_cnt > 0); 639 swapped_cnt--; 640 if (curthread != &thread0) 641 swap_inprogress++; 642 sx_xunlock(&allproc_lock); 643 644 /* 645 * We hold no lock here because the list of threads 646 * can not change while all threads in the process are 647 * swapped out. 648 */ 649 FOREACH_THREAD_IN_PROC(p, td) 650 vm_thread_swapin(td, oom_alloc); 651 652 if (curthread != &thread0) { 653 sx_xlock(&allproc_lock); 654 MPASS(swap_inprogress > 0); 655 swap_inprogress--; 656 last_swapin = ticks; 657 sx_xunlock(&allproc_lock); 658 } 659 PROC_LOCK(p); 660 swapclear(p); 661 p->p_swtick = ticks; 662 663 /* Allow other threads to swap p out now. */ 664 wakeup(&p->p_flag); 665 --p->p_lock; 666 } 667 } 668 669 /* 670 * This swapin algorithm attempts to swap-in processes only if there 671 * is enough space for them. Of course, if a process waits for a long 672 * time, it will be swapped in anyway. 673 */ 674 675 static struct proc * 676 swapper_selector(bool wkilled_only) 677 { 678 struct proc *p, *res; 679 struct thread *td; 680 int ppri, pri, slptime, swtime; 681 682 sx_assert(&allproc_lock, SA_SLOCKED); 683 if (swapped_cnt == 0) 684 return (NULL); 685 res = NULL; 686 ppri = INT_MIN; 687 FOREACH_PROC_IN_SYSTEM(p) { 688 PROC_LOCK(p); 689 if (p->p_state == PRS_NEW || (p->p_flag & (P_SWAPPINGOUT | 690 P_SWAPPINGIN | P_INMEM)) != 0) { 691 PROC_UNLOCK(p); 692 continue; 693 } 694 if (p->p_state == PRS_NORMAL && (p->p_flag & P_WKILLED) != 0) { 695 /* 696 * A swapped-out process might have mapped a 697 * large portion of the system's pages as 698 * anonymous memory. There is no other way to 699 * release the memory other than to kill the 700 * process, for which we need to swap it in. 701 */ 702 return (p); 703 } 704 if (wkilled_only) { 705 PROC_UNLOCK(p); 706 continue; 707 } 708 swtime = (ticks - p->p_swtick) / hz; 709 FOREACH_THREAD_IN_PROC(p, td) { 710 /* 711 * An otherwise runnable thread of a process 712 * swapped out has only the TDI_SWAPPED bit set. 713 */ 714 thread_lock(td); 715 if (td->td_inhibitors == TDI_SWAPPED) { 716 slptime = (ticks - td->td_slptick) / hz; 717 pri = swtime + slptime; 718 if ((td->td_flags & TDF_SWAPINREQ) == 0) 719 pri -= p->p_nice * 8; 720 /* 721 * if this thread is higher priority 722 * and there is enough space, then select 723 * this process instead of the previous 724 * selection. 725 */ 726 if (pri > ppri) { 727 res = p; 728 ppri = pri; 729 } 730 } 731 thread_unlock(td); 732 } 733 PROC_UNLOCK(p); 734 } 735 736 if (res != NULL) 737 PROC_LOCK(res); 738 return (res); 739 } 740 741 #define SWAPIN_INTERVAL (MAXSLP * hz / 2) 742 743 /* 744 * Limit swapper to swap in one non-WKILLED process in MAXSLP/2 745 * interval, assuming that there is: 746 * - at least one domain that is not suffering from a shortage of free memory; 747 * - no parallel swap-ins; 748 * - no other swap-ins in the current SWAPIN_INTERVAL. 749 */ 750 static bool 751 swapper_wkilled_only(void) 752 { 753 754 return (vm_page_count_min_set(&all_domains) || swap_inprogress > 0 || 755 (u_int)(ticks - last_swapin) < SWAPIN_INTERVAL); 756 } 757 758 void 759 swapper(void) 760 { 761 struct proc *p; 762 763 for (;;) { 764 sx_slock(&allproc_lock); 765 p = swapper_selector(swapper_wkilled_only()); 766 sx_sunlock(&allproc_lock); 767 768 if (p == NULL) { 769 tsleep(&proc0, PVM, "swapin", SWAPIN_INTERVAL); 770 } else { 771 PROC_LOCK_ASSERT(p, MA_OWNED); 772 773 /* 774 * Another process may be bringing or may have 775 * already brought this process in while we 776 * traverse all threads. Or, this process may 777 * have exited or even being swapped out 778 * again. 779 */ 780 if (p->p_state == PRS_NORMAL && (p->p_flag & (P_INMEM | 781 P_SWAPPINGOUT | P_SWAPPINGIN)) == 0) { 782 faultin(p); 783 } 784 PROC_UNLOCK(p); 785 } 786 } 787 } 788 789 /* 790 * First, if any processes have been sleeping or stopped for at least 791 * "swap_idle_threshold1" seconds, they are swapped out. If, however, 792 * no such processes exist, then the longest-sleeping or stopped 793 * process is swapped out. Finally, and only as a last resort, if 794 * there are no sleeping or stopped processes, the longest-resident 795 * process is swapped out. 796 */ 797 static void 798 swapout_procs(int action) 799 { 800 struct proc *p; 801 struct thread *td; 802 int slptime; 803 bool didswap, doswap; 804 805 MPASS((action & (VM_SWAP_NORMAL | VM_SWAP_IDLE)) != 0); 806 807 didswap = false; 808 sx_slock(&allproc_lock); 809 FOREACH_PROC_IN_SYSTEM(p) { 810 /* 811 * Filter out not yet fully constructed processes. Do 812 * not swap out held processes. Avoid processes which 813 * are system, exiting, execing, traced, already swapped 814 * out or are in the process of being swapped in or out. 815 */ 816 PROC_LOCK(p); 817 if (p->p_state != PRS_NORMAL || p->p_lock != 0 || (p->p_flag & 818 (P_SYSTEM | P_WEXIT | P_INEXEC | P_STOPPED_SINGLE | 819 P_TRACED | P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) != 820 P_INMEM) { 821 PROC_UNLOCK(p); 822 continue; 823 } 824 825 /* 826 * Further consideration of this process for swap out 827 * requires iterating over its threads. We release 828 * allproc_lock here so that process creation and 829 * destruction are not blocked while we iterate. 830 * 831 * To later reacquire allproc_lock and resume 832 * iteration over the allproc list, we will first have 833 * to release the lock on the process. We place a 834 * hold on the process so that it remains in the 835 * allproc list while it is unlocked. 836 */ 837 _PHOLD_LITE(p); 838 sx_sunlock(&allproc_lock); 839 840 /* 841 * Do not swapout a realtime process. 842 * Guarantee swap_idle_threshold1 time in memory. 843 * If the system is under memory stress, or if we are 844 * swapping idle processes >= swap_idle_threshold2, 845 * then swap the process out. 846 */ 847 doswap = true; 848 FOREACH_THREAD_IN_PROC(p, td) { 849 thread_lock(td); 850 slptime = (ticks - td->td_slptick) / hz; 851 if (PRI_IS_REALTIME(td->td_pri_class) || 852 slptime < swap_idle_threshold1 || 853 !thread_safetoswapout(td) || 854 ((action & VM_SWAP_NORMAL) == 0 && 855 slptime < swap_idle_threshold2)) 856 doswap = false; 857 thread_unlock(td); 858 if (!doswap) 859 break; 860 } 861 if (doswap && swapout(p) == 0) 862 didswap = true; 863 864 PROC_UNLOCK(p); 865 if (didswap) { 866 sx_xlock(&allproc_lock); 867 swapped_cnt++; 868 sx_downgrade(&allproc_lock); 869 } else 870 sx_slock(&allproc_lock); 871 PRELE(p); 872 } 873 sx_sunlock(&allproc_lock); 874 875 /* 876 * If we swapped something out, and another process needed memory, 877 * then wakeup the sched process. 878 */ 879 if (didswap) 880 wakeup(&proc0); 881 } 882 883 static void 884 swapclear(struct proc *p) 885 { 886 struct thread *td; 887 888 PROC_LOCK_ASSERT(p, MA_OWNED); 889 890 FOREACH_THREAD_IN_PROC(p, td) { 891 thread_lock(td); 892 td->td_flags |= TDF_INMEM; 893 td->td_flags &= ~TDF_SWAPINREQ; 894 TD_CLR_SWAPPED(td); 895 if (TD_CAN_RUN(td)) 896 if (setrunnable(td)) { 897 #ifdef INVARIANTS 898 /* 899 * XXX: We just cleared TDI_SWAPPED 900 * above and set TDF_INMEM, so this 901 * should never happen. 902 */ 903 panic("not waking up swapper"); 904 #endif 905 } 906 thread_unlock(td); 907 } 908 p->p_flag &= ~(P_SWAPPINGIN | P_SWAPPINGOUT); 909 p->p_flag |= P_INMEM; 910 } 911 912 static int 913 swapout(struct proc *p) 914 { 915 struct thread *td; 916 917 PROC_LOCK_ASSERT(p, MA_OWNED); 918 919 /* 920 * The states of this process and its threads may have changed 921 * by now. Assuming that there is only one pageout daemon thread, 922 * this process should still be in memory. 923 */ 924 KASSERT((p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) == 925 P_INMEM, ("swapout: lost a swapout race?")); 926 927 /* 928 * Remember the resident count. 929 */ 930 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 931 932 /* 933 * Check and mark all threads before we proceed. 934 */ 935 p->p_flag &= ~P_INMEM; 936 p->p_flag |= P_SWAPPINGOUT; 937 FOREACH_THREAD_IN_PROC(p, td) { 938 thread_lock(td); 939 if (!thread_safetoswapout(td)) { 940 thread_unlock(td); 941 swapclear(p); 942 return (EBUSY); 943 } 944 td->td_flags &= ~TDF_INMEM; 945 TD_SET_SWAPPED(td); 946 thread_unlock(td); 947 } 948 td = FIRST_THREAD_IN_PROC(p); 949 ++td->td_ru.ru_nswap; 950 PROC_UNLOCK(p); 951 952 /* 953 * This list is stable because all threads are now prevented from 954 * running. The list is only modified in the context of a running 955 * thread in this process. 956 */ 957 FOREACH_THREAD_IN_PROC(p, td) 958 vm_thread_swapout(td); 959 960 PROC_LOCK(p); 961 p->p_flag &= ~P_SWAPPINGOUT; 962 p->p_swtick = ticks; 963 return (0); 964 } 965