xref: /freebsd/sys/vm/vm_swapout.c (revision 780fb4a2)
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72 
73 #include <sys/cdefs.h>
74 __FBSDID("$FreeBSD$");
75 
76 #include "opt_kstack_pages.h"
77 #include "opt_kstack_max_pages.h"
78 #include "opt_vm.h"
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/limits.h>
83 #include <sys/kernel.h>
84 #include <sys/eventhandler.h>
85 #include <sys/lock.h>
86 #include <sys/mutex.h>
87 #include <sys/proc.h>
88 #include <sys/_kstack_cache.h>
89 #include <sys/kthread.h>
90 #include <sys/ktr.h>
91 #include <sys/mount.h>
92 #include <sys/racct.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sdt.h>
96 #include <sys/signalvar.h>
97 #include <sys/smp.h>
98 #include <sys/time.h>
99 #include <sys/vnode.h>
100 #include <sys/vmmeter.h>
101 #include <sys/rwlock.h>
102 #include <sys/sx.h>
103 #include <sys/sysctl.h>
104 
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/swap_pager.h>
114 #include <vm/vm_extern.h>
115 #include <vm/uma.h>
116 
117 /* the kernel process "vm_daemon" */
118 static void vm_daemon(void);
119 static struct proc *vmproc;
120 
121 static struct kproc_desc vm_kp = {
122 	"vmdaemon",
123 	vm_daemon,
124 	&vmproc
125 };
126 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
127 
128 static int vm_swap_enabled = 1;
129 static int vm_swap_idle_enabled = 0;
130 
131 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RW,
132     &vm_swap_enabled, 0,
133     "Enable entire process swapout");
134 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, CTLFLAG_RW,
135     &vm_swap_idle_enabled, 0,
136     "Allow swapout on idle criteria");
137 
138 /*
139  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
140  */
141 static int swap_idle_threshold1 = 2;
142 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
143     &swap_idle_threshold1, 0,
144     "Guaranteed swapped in time for a process");
145 
146 /*
147  * Swap_idle_threshold2 is the time that a process can be idle before
148  * it will be swapped out, if idle swapping is enabled.
149  */
150 static int swap_idle_threshold2 = 10;
151 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
152     &swap_idle_threshold2, 0,
153     "Time before a process will be swapped out");
154 
155 static int vm_pageout_req_swapout;	/* XXX */
156 static int vm_daemon_needed;
157 static struct mtx vm_daemon_mtx;
158 /* Allow for use by vm_pageout before vm_daemon is initialized. */
159 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
160 
161 static void swapclear(struct proc *);
162 static int swapout(struct proc *);
163 static void vm_swapout_map_deactivate_pages(vm_map_t, long);
164 static void vm_swapout_object_deactivate_pages(pmap_t, vm_object_t, long);
165 static void swapout_procs(int action);
166 static void vm_req_vmdaemon(int req);
167 static void vm_thread_swapin(struct thread *td);
168 static void vm_thread_swapout(struct thread *td);
169 
170 /*
171  *	vm_swapout_object_deactivate_pages
172  *
173  *	Deactivate enough pages to satisfy the inactive target
174  *	requirements.
175  *
176  *	The object and map must be locked.
177  */
178 static void
179 vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
180     long desired)
181 {
182 	vm_object_t backing_object, object;
183 	vm_page_t p;
184 	int act_delta, remove_mode;
185 
186 	VM_OBJECT_ASSERT_LOCKED(first_object);
187 	if ((first_object->flags & OBJ_FICTITIOUS) != 0)
188 		return;
189 	for (object = first_object;; object = backing_object) {
190 		if (pmap_resident_count(pmap) <= desired)
191 			goto unlock_return;
192 		VM_OBJECT_ASSERT_LOCKED(object);
193 		if ((object->flags & OBJ_UNMANAGED) != 0 ||
194 		    object->paging_in_progress != 0)
195 			goto unlock_return;
196 
197 		remove_mode = 0;
198 		if (object->shadow_count > 1)
199 			remove_mode = 1;
200 		/*
201 		 * Scan the object's entire memory queue.
202 		 */
203 		TAILQ_FOREACH(p, &object->memq, listq) {
204 			if (pmap_resident_count(pmap) <= desired)
205 				goto unlock_return;
206 			if (should_yield())
207 				goto unlock_return;
208 			if (vm_page_busied(p))
209 				continue;
210 			VM_CNT_INC(v_pdpages);
211 			vm_page_lock(p);
212 			if (vm_page_held(p) ||
213 			    !pmap_page_exists_quick(pmap, p)) {
214 				vm_page_unlock(p);
215 				continue;
216 			}
217 			act_delta = pmap_ts_referenced(p);
218 			if ((p->aflags & PGA_REFERENCED) != 0) {
219 				if (act_delta == 0)
220 					act_delta = 1;
221 				vm_page_aflag_clear(p, PGA_REFERENCED);
222 			}
223 			if (!vm_page_active(p) && act_delta != 0) {
224 				vm_page_activate(p);
225 				p->act_count += act_delta;
226 			} else if (vm_page_active(p)) {
227 				if (act_delta == 0) {
228 					p->act_count -= min(p->act_count,
229 					    ACT_DECLINE);
230 					if (!remove_mode && p->act_count == 0) {
231 						pmap_remove_all(p);
232 						vm_page_deactivate(p);
233 					} else
234 						vm_page_requeue(p);
235 				} else {
236 					vm_page_activate(p);
237 					if (p->act_count < ACT_MAX -
238 					    ACT_ADVANCE)
239 						p->act_count += ACT_ADVANCE;
240 					vm_page_requeue(p);
241 				}
242 			} else if (vm_page_inactive(p))
243 				pmap_remove_all(p);
244 			vm_page_unlock(p);
245 		}
246 		if ((backing_object = object->backing_object) == NULL)
247 			goto unlock_return;
248 		VM_OBJECT_RLOCK(backing_object);
249 		if (object != first_object)
250 			VM_OBJECT_RUNLOCK(object);
251 	}
252 unlock_return:
253 	if (object != first_object)
254 		VM_OBJECT_RUNLOCK(object);
255 }
256 
257 /*
258  * deactivate some number of pages in a map, try to do it fairly, but
259  * that is really hard to do.
260  */
261 static void
262 vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
263 {
264 	vm_map_entry_t tmpe;
265 	vm_object_t obj, bigobj;
266 	int nothingwired;
267 
268 	if (!vm_map_trylock_read(map))
269 		return;
270 
271 	bigobj = NULL;
272 	nothingwired = TRUE;
273 
274 	/*
275 	 * first, search out the biggest object, and try to free pages from
276 	 * that.
277 	 */
278 	tmpe = map->header.next;
279 	while (tmpe != &map->header) {
280 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
281 			obj = tmpe->object.vm_object;
282 			if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
283 				if (obj->shadow_count <= 1 &&
284 				    (bigobj == NULL ||
285 				     bigobj->resident_page_count <
286 				     obj->resident_page_count)) {
287 					if (bigobj != NULL)
288 						VM_OBJECT_RUNLOCK(bigobj);
289 					bigobj = obj;
290 				} else
291 					VM_OBJECT_RUNLOCK(obj);
292 			}
293 		}
294 		if (tmpe->wired_count > 0)
295 			nothingwired = FALSE;
296 		tmpe = tmpe->next;
297 	}
298 
299 	if (bigobj != NULL) {
300 		vm_swapout_object_deactivate_pages(map->pmap, bigobj, desired);
301 		VM_OBJECT_RUNLOCK(bigobj);
302 	}
303 	/*
304 	 * Next, hunt around for other pages to deactivate.  We actually
305 	 * do this search sort of wrong -- .text first is not the best idea.
306 	 */
307 	tmpe = map->header.next;
308 	while (tmpe != &map->header) {
309 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
310 			break;
311 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
312 			obj = tmpe->object.vm_object;
313 			if (obj != NULL) {
314 				VM_OBJECT_RLOCK(obj);
315 				vm_swapout_object_deactivate_pages(map->pmap,
316 				    obj, desired);
317 				VM_OBJECT_RUNLOCK(obj);
318 			}
319 		}
320 		tmpe = tmpe->next;
321 	}
322 
323 	/*
324 	 * Remove all mappings if a process is swapped out, this will free page
325 	 * table pages.
326 	 */
327 	if (desired == 0 && nothingwired) {
328 		pmap_remove(vm_map_pmap(map), vm_map_min(map),
329 		    vm_map_max(map));
330 	}
331 
332 	vm_map_unlock_read(map);
333 }
334 
335 /*
336  * Swap out requests
337  */
338 #define VM_SWAP_NORMAL 1
339 #define VM_SWAP_IDLE 2
340 
341 void
342 vm_swapout_run(void)
343 {
344 
345 	if (vm_swap_enabled)
346 		vm_req_vmdaemon(VM_SWAP_NORMAL);
347 }
348 
349 /*
350  * Idle process swapout -- run once per second when pagedaemons are
351  * reclaiming pages.
352  */
353 void
354 vm_swapout_run_idle(void)
355 {
356 	static long lsec;
357 
358 	if (!vm_swap_idle_enabled || time_second == lsec)
359 		return;
360 	vm_req_vmdaemon(VM_SWAP_IDLE);
361 	lsec = time_second;
362 }
363 
364 static void
365 vm_req_vmdaemon(int req)
366 {
367 	static int lastrun = 0;
368 
369 	mtx_lock(&vm_daemon_mtx);
370 	vm_pageout_req_swapout |= req;
371 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
372 		wakeup(&vm_daemon_needed);
373 		lastrun = ticks;
374 	}
375 	mtx_unlock(&vm_daemon_mtx);
376 }
377 
378 static void
379 vm_daemon(void)
380 {
381 	struct rlimit rsslim;
382 	struct proc *p;
383 	struct thread *td;
384 	struct vmspace *vm;
385 	int breakout, swapout_flags, tryagain, attempts;
386 #ifdef RACCT
387 	uint64_t rsize, ravailable;
388 #endif
389 
390 	while (TRUE) {
391 		mtx_lock(&vm_daemon_mtx);
392 		msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep",
393 #ifdef RACCT
394 		    racct_enable ? hz : 0
395 #else
396 		    0
397 #endif
398 		);
399 		swapout_flags = vm_pageout_req_swapout;
400 		vm_pageout_req_swapout = 0;
401 		mtx_unlock(&vm_daemon_mtx);
402 		if (swapout_flags != 0) {
403 			/*
404 			 * Drain the per-CPU page queue batches as a deadlock
405 			 * avoidance measure.
406 			 */
407 			if ((swapout_flags & VM_SWAP_NORMAL) != 0)
408 				vm_page_drain_pqbatch();
409 			swapout_procs(swapout_flags);
410 		}
411 
412 		/*
413 		 * scan the processes for exceeding their rlimits or if
414 		 * process is swapped out -- deactivate pages
415 		 */
416 		tryagain = 0;
417 		attempts = 0;
418 again:
419 		attempts++;
420 		sx_slock(&allproc_lock);
421 		FOREACH_PROC_IN_SYSTEM(p) {
422 			vm_pindex_t limit, size;
423 
424 			/*
425 			 * if this is a system process or if we have already
426 			 * looked at this process, skip it.
427 			 */
428 			PROC_LOCK(p);
429 			if (p->p_state != PRS_NORMAL ||
430 			    p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
431 				PROC_UNLOCK(p);
432 				continue;
433 			}
434 			/*
435 			 * if the process is in a non-running type state,
436 			 * don't touch it.
437 			 */
438 			breakout = 0;
439 			FOREACH_THREAD_IN_PROC(p, td) {
440 				thread_lock(td);
441 				if (!TD_ON_RUNQ(td) &&
442 				    !TD_IS_RUNNING(td) &&
443 				    !TD_IS_SLEEPING(td) &&
444 				    !TD_IS_SUSPENDED(td)) {
445 					thread_unlock(td);
446 					breakout = 1;
447 					break;
448 				}
449 				thread_unlock(td);
450 			}
451 			if (breakout) {
452 				PROC_UNLOCK(p);
453 				continue;
454 			}
455 			/*
456 			 * get a limit
457 			 */
458 			lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
459 			limit = OFF_TO_IDX(
460 			    qmin(rsslim.rlim_cur, rsslim.rlim_max));
461 
462 			/*
463 			 * let processes that are swapped out really be
464 			 * swapped out set the limit to nothing (will force a
465 			 * swap-out.)
466 			 */
467 			if ((p->p_flag & P_INMEM) == 0)
468 				limit = 0;	/* XXX */
469 			vm = vmspace_acquire_ref(p);
470 			_PHOLD_LITE(p);
471 			PROC_UNLOCK(p);
472 			if (vm == NULL) {
473 				PRELE(p);
474 				continue;
475 			}
476 			sx_sunlock(&allproc_lock);
477 
478 			size = vmspace_resident_count(vm);
479 			if (size >= limit) {
480 				vm_swapout_map_deactivate_pages(
481 				    &vm->vm_map, limit);
482 				size = vmspace_resident_count(vm);
483 			}
484 #ifdef RACCT
485 			if (racct_enable) {
486 				rsize = IDX_TO_OFF(size);
487 				PROC_LOCK(p);
488 				if (p->p_state == PRS_NORMAL)
489 					racct_set(p, RACCT_RSS, rsize);
490 				ravailable = racct_get_available(p, RACCT_RSS);
491 				PROC_UNLOCK(p);
492 				if (rsize > ravailable) {
493 					/*
494 					 * Don't be overly aggressive; this
495 					 * might be an innocent process,
496 					 * and the limit could've been exceeded
497 					 * by some memory hog.  Don't try
498 					 * to deactivate more than 1/4th
499 					 * of process' resident set size.
500 					 */
501 					if (attempts <= 8) {
502 						if (ravailable < rsize -
503 						    (rsize / 4)) {
504 							ravailable = rsize -
505 							    (rsize / 4);
506 						}
507 					}
508 					vm_swapout_map_deactivate_pages(
509 					    &vm->vm_map,
510 					    OFF_TO_IDX(ravailable));
511 					/* Update RSS usage after paging out. */
512 					size = vmspace_resident_count(vm);
513 					rsize = IDX_TO_OFF(size);
514 					PROC_LOCK(p);
515 					if (p->p_state == PRS_NORMAL)
516 						racct_set(p, RACCT_RSS, rsize);
517 					PROC_UNLOCK(p);
518 					if (rsize > ravailable)
519 						tryagain = 1;
520 				}
521 			}
522 #endif
523 			vmspace_free(vm);
524 			sx_slock(&allproc_lock);
525 			PRELE(p);
526 		}
527 		sx_sunlock(&allproc_lock);
528 		if (tryagain != 0 && attempts <= 10) {
529 			maybe_yield();
530 			goto again;
531 		}
532 	}
533 }
534 
535 /*
536  * Allow a thread's kernel stack to be paged out.
537  */
538 static void
539 vm_thread_swapout(struct thread *td)
540 {
541 	vm_object_t ksobj;
542 	vm_page_t m;
543 	int i, pages;
544 
545 	cpu_thread_swapout(td);
546 	pages = td->td_kstack_pages;
547 	ksobj = td->td_kstack_obj;
548 	pmap_qremove(td->td_kstack, pages);
549 	VM_OBJECT_WLOCK(ksobj);
550 	for (i = 0; i < pages; i++) {
551 		m = vm_page_lookup(ksobj, i);
552 		if (m == NULL)
553 			panic("vm_thread_swapout: kstack already missing?");
554 		vm_page_dirty(m);
555 		vm_page_lock(m);
556 		vm_page_unwire(m, PQ_LAUNDRY);
557 		vm_page_unlock(m);
558 	}
559 	VM_OBJECT_WUNLOCK(ksobj);
560 }
561 
562 /*
563  * Bring the kernel stack for a specified thread back in.
564  */
565 static void
566 vm_thread_swapin(struct thread *td)
567 {
568 	vm_object_t ksobj;
569 	vm_page_t ma[KSTACK_MAX_PAGES];
570 	int a, count, i, j, pages, rv;
571 
572 	pages = td->td_kstack_pages;
573 	ksobj = td->td_kstack_obj;
574 	VM_OBJECT_WLOCK(ksobj);
575 	(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma,
576 	    pages);
577 	for (i = 0; i < pages;) {
578 		vm_page_assert_xbusied(ma[i]);
579 		if (ma[i]->valid == VM_PAGE_BITS_ALL) {
580 			vm_page_xunbusy(ma[i]);
581 			i++;
582 			continue;
583 		}
584 		vm_object_pip_add(ksobj, 1);
585 		for (j = i + 1; j < pages; j++)
586 			if (ma[j]->valid == VM_PAGE_BITS_ALL)
587 				break;
588 		rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a);
589 		KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i]));
590 		count = min(a + 1, j - i);
591 		rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL);
592 		KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
593 		    __func__, td->td_proc->p_pid));
594 		vm_object_pip_wakeup(ksobj);
595 		for (j = i; j < i + count; j++)
596 			vm_page_xunbusy(ma[j]);
597 		i += count;
598 	}
599 	VM_OBJECT_WUNLOCK(ksobj);
600 	pmap_qenter(td->td_kstack, ma, pages);
601 	cpu_thread_swapin(td);
602 }
603 
604 void
605 faultin(struct proc *p)
606 {
607 	struct thread *td;
608 
609 	PROC_LOCK_ASSERT(p, MA_OWNED);
610 	/*
611 	 * If another process is swapping in this process,
612 	 * just wait until it finishes.
613 	 */
614 	if (p->p_flag & P_SWAPPINGIN) {
615 		while (p->p_flag & P_SWAPPINGIN)
616 			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
617 		return;
618 	}
619 	if ((p->p_flag & P_INMEM) == 0) {
620 		/*
621 		 * Don't let another thread swap process p out while we are
622 		 * busy swapping it in.
623 		 */
624 		++p->p_lock;
625 		p->p_flag |= P_SWAPPINGIN;
626 		PROC_UNLOCK(p);
627 
628 		/*
629 		 * We hold no lock here because the list of threads
630 		 * can not change while all threads in the process are
631 		 * swapped out.
632 		 */
633 		FOREACH_THREAD_IN_PROC(p, td)
634 			vm_thread_swapin(td);
635 		PROC_LOCK(p);
636 		swapclear(p);
637 		p->p_swtick = ticks;
638 
639 		wakeup(&p->p_flag);
640 
641 		/* Allow other threads to swap p out now. */
642 		--p->p_lock;
643 	}
644 }
645 
646 /*
647  * This swapin algorithm attempts to swap-in processes only if there
648  * is enough space for them.  Of course, if a process waits for a long
649  * time, it will be swapped in anyway.
650  */
651 void
652 swapper(void)
653 {
654 	struct proc *p, *pp;
655 	struct thread *td;
656 	int ppri, pri, slptime, swtime;
657 
658 loop:
659 	if (vm_page_count_min()) {
660 		vm_wait_min();
661 		goto loop;
662 	}
663 
664 	pp = NULL;
665 	ppri = INT_MIN;
666 	sx_slock(&allproc_lock);
667 	FOREACH_PROC_IN_SYSTEM(p) {
668 		PROC_LOCK(p);
669 		if (p->p_state == PRS_NEW ||
670 		    p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
671 			PROC_UNLOCK(p);
672 			continue;
673 		}
674 		swtime = (ticks - p->p_swtick) / hz;
675 		FOREACH_THREAD_IN_PROC(p, td) {
676 			/*
677 			 * An otherwise runnable thread of a process
678 			 * swapped out has only the TDI_SWAPPED bit set.
679 			 */
680 			thread_lock(td);
681 			if (td->td_inhibitors == TDI_SWAPPED) {
682 				slptime = (ticks - td->td_slptick) / hz;
683 				pri = swtime + slptime;
684 				if ((td->td_flags & TDF_SWAPINREQ) == 0)
685 					pri -= p->p_nice * 8;
686 				/*
687 				 * if this thread is higher priority
688 				 * and there is enough space, then select
689 				 * this process instead of the previous
690 				 * selection.
691 				 */
692 				if (pri > ppri) {
693 					pp = p;
694 					ppri = pri;
695 				}
696 			}
697 			thread_unlock(td);
698 		}
699 		PROC_UNLOCK(p);
700 	}
701 	sx_sunlock(&allproc_lock);
702 
703 	/*
704 	 * Nothing to do, back to sleep.
705 	 */
706 	if ((p = pp) == NULL) {
707 		tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2);
708 		goto loop;
709 	}
710 	PROC_LOCK(p);
711 
712 	/*
713 	 * Another process may be bringing or may have already
714 	 * brought this process in while we traverse all threads.
715 	 * Or, this process may even be being swapped out again.
716 	 */
717 	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
718 		PROC_UNLOCK(p);
719 		goto loop;
720 	}
721 
722 	/*
723 	 * We would like to bring someone in.
724 	 */
725 	faultin(p);
726 	PROC_UNLOCK(p);
727 	goto loop;
728 }
729 
730 /*
731  * First, if any processes have been sleeping or stopped for at least
732  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
733  * no such processes exist, then the longest-sleeping or stopped
734  * process is swapped out.  Finally, and only as a last resort, if
735  * there are no sleeping or stopped processes, the longest-resident
736  * process is swapped out.
737  */
738 static void
739 swapout_procs(int action)
740 {
741 	struct proc *p;
742 	struct thread *td;
743 	int slptime;
744 	bool didswap, doswap;
745 
746 	MPASS((action & (VM_SWAP_NORMAL | VM_SWAP_IDLE)) != 0);
747 
748 	didswap = false;
749 	sx_slock(&allproc_lock);
750 	FOREACH_PROC_IN_SYSTEM(p) {
751 		/*
752 		 * Filter out not yet fully constructed processes.  Do
753 		 * not swap out held processes.  Avoid processes which
754 		 * are system, exiting, execing, traced, already swapped
755 		 * out or are in the process of being swapped in or out.
756 		 */
757 		PROC_LOCK(p);
758 		if (p->p_state != PRS_NORMAL || p->p_lock != 0 || (p->p_flag &
759 		    (P_SYSTEM | P_WEXIT | P_INEXEC | P_STOPPED_SINGLE |
760 		    P_TRACED | P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) !=
761 		    P_INMEM) {
762 			PROC_UNLOCK(p);
763 			continue;
764 		}
765 
766 		/*
767 		 * Further consideration of this process for swap out
768 		 * requires iterating over its threads.  We release
769 		 * allproc_lock here so that process creation and
770 		 * destruction are not blocked while we iterate.
771 		 *
772 		 * To later reacquire allproc_lock and resume
773 		 * iteration over the allproc list, we will first have
774 		 * to release the lock on the process.  We place a
775 		 * hold on the process so that it remains in the
776 		 * allproc list while it is unlocked.
777 		 */
778 		_PHOLD_LITE(p);
779 		sx_sunlock(&allproc_lock);
780 
781 		/*
782 		 * Do not swapout a realtime process.
783 		 * Guarantee swap_idle_threshold1 time in memory.
784 		 * If the system is under memory stress, or if we are
785 		 * swapping idle processes >= swap_idle_threshold2,
786 		 * then swap the process out.
787 		 */
788 		doswap = true;
789 		FOREACH_THREAD_IN_PROC(p, td) {
790 			thread_lock(td);
791 			slptime = (ticks - td->td_slptick) / hz;
792 			if (PRI_IS_REALTIME(td->td_pri_class) ||
793 			    slptime < swap_idle_threshold1 ||
794 			    !thread_safetoswapout(td) ||
795 			    ((action & VM_SWAP_NORMAL) == 0 &&
796 			    slptime < swap_idle_threshold2))
797 				doswap = false;
798 			thread_unlock(td);
799 			if (!doswap)
800 				break;
801 		}
802 		if (doswap && swapout(p) == 0)
803 			didswap = true;
804 
805 		PROC_UNLOCK(p);
806 		sx_slock(&allproc_lock);
807 		PRELE(p);
808 	}
809 	sx_sunlock(&allproc_lock);
810 
811 	/*
812 	 * If we swapped something out, and another process needed memory,
813 	 * then wakeup the sched process.
814 	 */
815 	if (didswap)
816 		wakeup(&proc0);
817 }
818 
819 static void
820 swapclear(struct proc *p)
821 {
822 	struct thread *td;
823 
824 	PROC_LOCK_ASSERT(p, MA_OWNED);
825 
826 	FOREACH_THREAD_IN_PROC(p, td) {
827 		thread_lock(td);
828 		td->td_flags |= TDF_INMEM;
829 		td->td_flags &= ~TDF_SWAPINREQ;
830 		TD_CLR_SWAPPED(td);
831 		if (TD_CAN_RUN(td))
832 			if (setrunnable(td)) {
833 #ifdef INVARIANTS
834 				/*
835 				 * XXX: We just cleared TDI_SWAPPED
836 				 * above and set TDF_INMEM, so this
837 				 * should never happen.
838 				 */
839 				panic("not waking up swapper");
840 #endif
841 			}
842 		thread_unlock(td);
843 	}
844 	p->p_flag &= ~(P_SWAPPINGIN | P_SWAPPINGOUT);
845 	p->p_flag |= P_INMEM;
846 }
847 
848 static int
849 swapout(struct proc *p)
850 {
851 	struct thread *td;
852 
853 	PROC_LOCK_ASSERT(p, MA_OWNED);
854 
855 	/*
856 	 * The states of this process and its threads may have changed
857 	 * by now.  Assuming that there is only one pageout daemon thread,
858 	 * this process should still be in memory.
859 	 */
860 	KASSERT((p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) ==
861 	    P_INMEM, ("swapout: lost a swapout race?"));
862 
863 	/*
864 	 * Remember the resident count.
865 	 */
866 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
867 
868 	/*
869 	 * Check and mark all threads before we proceed.
870 	 */
871 	p->p_flag &= ~P_INMEM;
872 	p->p_flag |= P_SWAPPINGOUT;
873 	FOREACH_THREAD_IN_PROC(p, td) {
874 		thread_lock(td);
875 		if (!thread_safetoswapout(td)) {
876 			thread_unlock(td);
877 			swapclear(p);
878 			return (EBUSY);
879 		}
880 		td->td_flags &= ~TDF_INMEM;
881 		TD_SET_SWAPPED(td);
882 		thread_unlock(td);
883 	}
884 	td = FIRST_THREAD_IN_PROC(p);
885 	++td->td_ru.ru_nswap;
886 	PROC_UNLOCK(p);
887 
888 	/*
889 	 * This list is stable because all threads are now prevented from
890 	 * running.  The list is only modified in the context of a running
891 	 * thread in this process.
892 	 */
893 	FOREACH_THREAD_IN_PROC(p, td)
894 		vm_thread_swapout(td);
895 
896 	PROC_LOCK(p);
897 	p->p_flag &= ~P_SWAPPINGOUT;
898 	p->p_swtick = ticks;
899 	return (0);
900 }
901