xref: /freebsd/sys/vm/vm_pageout.c (revision 38069501)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2005 Yahoo! Technologies Norway AS
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * The Mach Operating System project at Carnegie-Mellon University.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49  *
50  * Permission to use, copy, modify and distribute this software and
51  * its documentation is hereby granted, provided that both the copyright
52  * notice and this permission notice appear in all copies of the
53  * software, derivative works or modified versions, and any portions
54  * thereof, and that both notices appear in supporting documentation.
55  *
56  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59  *
60  * Carnegie Mellon requests users of this software to return to
61  *
62  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
63  *  School of Computer Science
64  *  Carnegie Mellon University
65  *  Pittsburgh PA 15213-3890
66  *
67  * any improvements or extensions that they make and grant Carnegie the
68  * rights to redistribute these changes.
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include <sys/cdefs.h>
76 __FBSDID("$FreeBSD$");
77 
78 #include "opt_vm.h"
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/eventhandler.h>
84 #include <sys/lock.h>
85 #include <sys/mutex.h>
86 #include <sys/proc.h>
87 #include <sys/kthread.h>
88 #include <sys/ktr.h>
89 #include <sys/mount.h>
90 #include <sys/racct.h>
91 #include <sys/resourcevar.h>
92 #include <sys/sched.h>
93 #include <sys/sdt.h>
94 #include <sys/signalvar.h>
95 #include <sys/smp.h>
96 #include <sys/time.h>
97 #include <sys/vnode.h>
98 #include <sys/vmmeter.h>
99 #include <sys/rwlock.h>
100 #include <sys/sx.h>
101 #include <sys/sysctl.h>
102 
103 #include <vm/vm.h>
104 #include <vm/vm_param.h>
105 #include <vm/vm_object.h>
106 #include <vm/vm_page.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_pageout.h>
109 #include <vm/vm_pager.h>
110 #include <vm/vm_phys.h>
111 #include <vm/swap_pager.h>
112 #include <vm/vm_extern.h>
113 #include <vm/uma.h>
114 
115 /*
116  * System initialization
117  */
118 
119 /* the kernel process "vm_pageout"*/
120 static void vm_pageout(void);
121 static void vm_pageout_init(void);
122 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
123 static int vm_pageout_cluster(vm_page_t m);
124 static bool vm_pageout_scan(struct vm_domain *vmd, int pass);
125 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
126     int starting_page_shortage);
127 
128 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
129     NULL);
130 
131 struct proc *pageproc;
132 
133 static struct kproc_desc page_kp = {
134 	"pagedaemon",
135 	vm_pageout,
136 	&pageproc
137 };
138 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
139     &page_kp);
140 
141 SDT_PROVIDER_DEFINE(vm);
142 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
143 
144 /* Pagedaemon activity rates, in subdivisions of one second. */
145 #define	VM_LAUNDER_RATE		10
146 #define	VM_INACT_SCAN_RATE	2
147 
148 int vm_pageout_deficit;		/* Estimated number of pages deficit */
149 u_int vm_pageout_wakeup_thresh;
150 static int vm_pageout_oom_seq = 12;
151 bool vm_pageout_wanted;		/* Event on which pageout daemon sleeps */
152 bool vm_pages_needed;		/* Are threads waiting for free pages? */
153 
154 /* Pending request for dirty page laundering. */
155 static enum {
156 	VM_LAUNDRY_IDLE,
157 	VM_LAUNDRY_BACKGROUND,
158 	VM_LAUNDRY_SHORTFALL
159 } vm_laundry_request = VM_LAUNDRY_IDLE;
160 
161 static int vm_pageout_update_period;
162 static int disable_swap_pageouts;
163 static int lowmem_period = 10;
164 static time_t lowmem_uptime;
165 static int swapdev_enabled;
166 
167 static int vm_panic_on_oom = 0;
168 
169 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
170 	CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
171 	"panic on out of memory instead of killing the largest process");
172 
173 SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh,
174 	CTLFLAG_RW, &vm_pageout_wakeup_thresh, 0,
175 	"free page threshold for waking up the pageout daemon");
176 
177 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
178 	CTLFLAG_RW, &vm_pageout_update_period, 0,
179 	"Maximum active LRU update period");
180 
181 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RW, &lowmem_period, 0,
182 	"Low memory callback period");
183 
184 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
185 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
186 
187 static int pageout_lock_miss;
188 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
189 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
190 
191 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
192 	CTLFLAG_RW, &vm_pageout_oom_seq, 0,
193 	"back-to-back calls to oom detector to start OOM");
194 
195 static int act_scan_laundry_weight = 3;
196 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RW,
197     &act_scan_laundry_weight, 0,
198     "weight given to clean vs. dirty pages in active queue scans");
199 
200 static u_int vm_background_launder_target;
201 SYSCTL_UINT(_vm, OID_AUTO, background_launder_target, CTLFLAG_RW,
202     &vm_background_launder_target, 0,
203     "background laundering target, in pages");
204 
205 static u_int vm_background_launder_rate = 4096;
206 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RW,
207     &vm_background_launder_rate, 0,
208     "background laundering rate, in kilobytes per second");
209 
210 static u_int vm_background_launder_max = 20 * 1024;
211 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RW,
212     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
213 
214 int vm_pageout_page_count = 32;
215 
216 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
217 SYSCTL_INT(_vm, OID_AUTO, max_wired,
218 	CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
219 
220 static u_int isqrt(u_int num);
221 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
222 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
223     bool in_shortfall);
224 static void vm_pageout_laundry_worker(void *arg);
225 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
226 
227 /*
228  * Initialize a dummy page for marking the caller's place in the specified
229  * paging queue.  In principle, this function only needs to set the flag
230  * PG_MARKER.  Nonetheless, it write busies and initializes the hold count
231  * to one as safety precautions.
232  */
233 static void
234 vm_pageout_init_marker(vm_page_t marker, u_short queue)
235 {
236 
237 	bzero(marker, sizeof(*marker));
238 	marker->flags = PG_MARKER;
239 	marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
240 	marker->queue = queue;
241 	marker->hold_count = 1;
242 }
243 
244 /*
245  * vm_pageout_fallback_object_lock:
246  *
247  * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
248  * known to have failed and page queue must be either PQ_ACTIVE or
249  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queue
250  * while locking the vm object.  Use marker page to detect page queue
251  * changes and maintain notion of next page on page queue.  Return
252  * TRUE if no changes were detected, FALSE otherwise.  vm object is
253  * locked on return.
254  *
255  * This function depends on both the lock portion of struct vm_object
256  * and normal struct vm_page being type stable.
257  */
258 static boolean_t
259 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
260 {
261 	struct vm_page marker;
262 	struct vm_pagequeue *pq;
263 	boolean_t unchanged;
264 	u_short queue;
265 	vm_object_t object;
266 
267 	queue = m->queue;
268 	vm_pageout_init_marker(&marker, queue);
269 	pq = vm_page_pagequeue(m);
270 	object = m->object;
271 
272 	TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
273 	vm_pagequeue_unlock(pq);
274 	vm_page_unlock(m);
275 	VM_OBJECT_WLOCK(object);
276 	vm_page_lock(m);
277 	vm_pagequeue_lock(pq);
278 
279 	/*
280 	 * The page's object might have changed, and/or the page might
281 	 * have moved from its original position in the queue.  If the
282 	 * page's object has changed, then the caller should abandon
283 	 * processing the page because the wrong object lock was
284 	 * acquired.  Use the marker's plinks.q, not the page's, to
285 	 * determine if the page has been moved.  The state of the
286 	 * page's plinks.q can be indeterminate; whereas, the marker's
287 	 * plinks.q must be valid.
288 	 */
289 	*next = TAILQ_NEXT(&marker, plinks.q);
290 	unchanged = m->object == object &&
291 	    m == TAILQ_PREV(&marker, pglist, plinks.q);
292 	KASSERT(!unchanged || m->queue == queue,
293 	    ("page %p queue %d %d", m, queue, m->queue));
294 	TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
295 	return (unchanged);
296 }
297 
298 /*
299  * Lock the page while holding the page queue lock.  Use marker page
300  * to detect page queue changes and maintain notion of next page on
301  * page queue.  Return TRUE if no changes were detected, FALSE
302  * otherwise.  The page is locked on return. The page queue lock might
303  * be dropped and reacquired.
304  *
305  * This function depends on normal struct vm_page being type stable.
306  */
307 static boolean_t
308 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
309 {
310 	struct vm_page marker;
311 	struct vm_pagequeue *pq;
312 	boolean_t unchanged;
313 	u_short queue;
314 
315 	vm_page_lock_assert(m, MA_NOTOWNED);
316 	if (vm_page_trylock(m))
317 		return (TRUE);
318 
319 	queue = m->queue;
320 	vm_pageout_init_marker(&marker, queue);
321 	pq = vm_page_pagequeue(m);
322 
323 	TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
324 	vm_pagequeue_unlock(pq);
325 	vm_page_lock(m);
326 	vm_pagequeue_lock(pq);
327 
328 	/* Page queue might have changed. */
329 	*next = TAILQ_NEXT(&marker, plinks.q);
330 	unchanged = m == TAILQ_PREV(&marker, pglist, plinks.q);
331 	KASSERT(!unchanged || m->queue == queue,
332 	    ("page %p queue %d %d", m, queue, m->queue));
333 	TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
334 	return (unchanged);
335 }
336 
337 /*
338  * Scan for pages at adjacent offsets within the given page's object that are
339  * eligible for laundering, form a cluster of these pages and the given page,
340  * and launder that cluster.
341  */
342 static int
343 vm_pageout_cluster(vm_page_t m)
344 {
345 	vm_object_t object;
346 	vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
347 	vm_pindex_t pindex;
348 	int ib, is, page_base, pageout_count;
349 
350 	vm_page_assert_locked(m);
351 	object = m->object;
352 	VM_OBJECT_ASSERT_WLOCKED(object);
353 	pindex = m->pindex;
354 
355 	/*
356 	 * We can't clean the page if it is busy or held.
357 	 */
358 	vm_page_assert_unbusied(m);
359 	KASSERT(m->hold_count == 0, ("page %p is held", m));
360 
361 	pmap_remove_write(m);
362 	vm_page_unlock(m);
363 
364 	mc[vm_pageout_page_count] = pb = ps = m;
365 	pageout_count = 1;
366 	page_base = vm_pageout_page_count;
367 	ib = 1;
368 	is = 1;
369 
370 	/*
371 	 * We can cluster only if the page is not clean, busy, or held, and
372 	 * the page is in the laundry queue.
373 	 *
374 	 * During heavy mmap/modification loads the pageout
375 	 * daemon can really fragment the underlying file
376 	 * due to flushing pages out of order and not trying to
377 	 * align the clusters (which leaves sporadic out-of-order
378 	 * holes).  To solve this problem we do the reverse scan
379 	 * first and attempt to align our cluster, then do a
380 	 * forward scan if room remains.
381 	 */
382 more:
383 	while (ib != 0 && pageout_count < vm_pageout_page_count) {
384 		if (ib > pindex) {
385 			ib = 0;
386 			break;
387 		}
388 		if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
389 			ib = 0;
390 			break;
391 		}
392 		vm_page_test_dirty(p);
393 		if (p->dirty == 0) {
394 			ib = 0;
395 			break;
396 		}
397 		vm_page_lock(p);
398 		if (!vm_page_in_laundry(p) ||
399 		    p->hold_count != 0) {	/* may be undergoing I/O */
400 			vm_page_unlock(p);
401 			ib = 0;
402 			break;
403 		}
404 		pmap_remove_write(p);
405 		vm_page_unlock(p);
406 		mc[--page_base] = pb = p;
407 		++pageout_count;
408 		++ib;
409 
410 		/*
411 		 * We are at an alignment boundary.  Stop here, and switch
412 		 * directions.  Do not clear ib.
413 		 */
414 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
415 			break;
416 	}
417 	while (pageout_count < vm_pageout_page_count &&
418 	    pindex + is < object->size) {
419 		if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
420 			break;
421 		vm_page_test_dirty(p);
422 		if (p->dirty == 0)
423 			break;
424 		vm_page_lock(p);
425 		if (!vm_page_in_laundry(p) ||
426 		    p->hold_count != 0) {	/* may be undergoing I/O */
427 			vm_page_unlock(p);
428 			break;
429 		}
430 		pmap_remove_write(p);
431 		vm_page_unlock(p);
432 		mc[page_base + pageout_count] = ps = p;
433 		++pageout_count;
434 		++is;
435 	}
436 
437 	/*
438 	 * If we exhausted our forward scan, continue with the reverse scan
439 	 * when possible, even past an alignment boundary.  This catches
440 	 * boundary conditions.
441 	 */
442 	if (ib != 0 && pageout_count < vm_pageout_page_count)
443 		goto more;
444 
445 	return (vm_pageout_flush(&mc[page_base], pageout_count,
446 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
447 }
448 
449 /*
450  * vm_pageout_flush() - launder the given pages
451  *
452  *	The given pages are laundered.  Note that we setup for the start of
453  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
454  *	reference count all in here rather then in the parent.  If we want
455  *	the parent to do more sophisticated things we may have to change
456  *	the ordering.
457  *
458  *	Returned runlen is the count of pages between mreq and first
459  *	page after mreq with status VM_PAGER_AGAIN.
460  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
461  *	for any page in runlen set.
462  */
463 int
464 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
465     boolean_t *eio)
466 {
467 	vm_object_t object = mc[0]->object;
468 	int pageout_status[count];
469 	int numpagedout = 0;
470 	int i, runlen;
471 
472 	VM_OBJECT_ASSERT_WLOCKED(object);
473 
474 	/*
475 	 * Initiate I/O.  Mark the pages busy and verify that they're valid
476 	 * and read-only.
477 	 *
478 	 * We do not have to fixup the clean/dirty bits here... we can
479 	 * allow the pager to do it after the I/O completes.
480 	 *
481 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
482 	 * edge case with file fragments.
483 	 */
484 	for (i = 0; i < count; i++) {
485 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
486 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
487 			mc[i], i, count));
488 		KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
489 		    ("vm_pageout_flush: writeable page %p", mc[i]));
490 		vm_page_sbusy(mc[i]);
491 	}
492 	vm_object_pip_add(object, count);
493 
494 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
495 
496 	runlen = count - mreq;
497 	if (eio != NULL)
498 		*eio = FALSE;
499 	for (i = 0; i < count; i++) {
500 		vm_page_t mt = mc[i];
501 
502 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
503 		    !pmap_page_is_write_mapped(mt),
504 		    ("vm_pageout_flush: page %p is not write protected", mt));
505 		switch (pageout_status[i]) {
506 		case VM_PAGER_OK:
507 			vm_page_lock(mt);
508 			if (vm_page_in_laundry(mt))
509 				vm_page_deactivate_noreuse(mt);
510 			vm_page_unlock(mt);
511 			/* FALLTHROUGH */
512 		case VM_PAGER_PEND:
513 			numpagedout++;
514 			break;
515 		case VM_PAGER_BAD:
516 			/*
517 			 * The page is outside the object's range.  We pretend
518 			 * that the page out worked and clean the page, so the
519 			 * changes will be lost if the page is reclaimed by
520 			 * the page daemon.
521 			 */
522 			vm_page_undirty(mt);
523 			vm_page_lock(mt);
524 			if (vm_page_in_laundry(mt))
525 				vm_page_deactivate_noreuse(mt);
526 			vm_page_unlock(mt);
527 			break;
528 		case VM_PAGER_ERROR:
529 		case VM_PAGER_FAIL:
530 			/*
531 			 * If the page couldn't be paged out to swap because the
532 			 * pager wasn't able to find space, place the page in
533 			 * the PQ_UNSWAPPABLE holding queue.  This is an
534 			 * optimization that prevents the page daemon from
535 			 * wasting CPU cycles on pages that cannot be reclaimed
536 			 * becase no swap device is configured.
537 			 *
538 			 * Otherwise, reactivate the page so that it doesn't
539 			 * clog the laundry and inactive queues.  (We will try
540 			 * paging it out again later.)
541 			 */
542 			vm_page_lock(mt);
543 			if (object->type == OBJT_SWAP &&
544 			    pageout_status[i] == VM_PAGER_FAIL) {
545 				vm_page_unswappable(mt);
546 				numpagedout++;
547 			} else
548 				vm_page_activate(mt);
549 			vm_page_unlock(mt);
550 			if (eio != NULL && i >= mreq && i - mreq < runlen)
551 				*eio = TRUE;
552 			break;
553 		case VM_PAGER_AGAIN:
554 			if (i >= mreq && i - mreq < runlen)
555 				runlen = i - mreq;
556 			break;
557 		}
558 
559 		/*
560 		 * If the operation is still going, leave the page busy to
561 		 * block all other accesses. Also, leave the paging in
562 		 * progress indicator set so that we don't attempt an object
563 		 * collapse.
564 		 */
565 		if (pageout_status[i] != VM_PAGER_PEND) {
566 			vm_object_pip_wakeup(object);
567 			vm_page_sunbusy(mt);
568 		}
569 	}
570 	if (prunlen != NULL)
571 		*prunlen = runlen;
572 	return (numpagedout);
573 }
574 
575 static void
576 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
577 {
578 
579 	atomic_store_rel_int(&swapdev_enabled, 1);
580 }
581 
582 static void
583 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
584 {
585 
586 	if (swap_pager_nswapdev() == 1)
587 		atomic_store_rel_int(&swapdev_enabled, 0);
588 }
589 
590 /*
591  * Attempt to acquire all of the necessary locks to launder a page and
592  * then call through the clustering layer to PUTPAGES.  Wait a short
593  * time for a vnode lock.
594  *
595  * Requires the page and object lock on entry, releases both before return.
596  * Returns 0 on success and an errno otherwise.
597  */
598 static int
599 vm_pageout_clean(vm_page_t m, int *numpagedout)
600 {
601 	struct vnode *vp;
602 	struct mount *mp;
603 	vm_object_t object;
604 	vm_pindex_t pindex;
605 	int error, lockmode;
606 
607 	vm_page_assert_locked(m);
608 	object = m->object;
609 	VM_OBJECT_ASSERT_WLOCKED(object);
610 	error = 0;
611 	vp = NULL;
612 	mp = NULL;
613 
614 	/*
615 	 * The object is already known NOT to be dead.   It
616 	 * is possible for the vget() to block the whole
617 	 * pageout daemon, but the new low-memory handling
618 	 * code should prevent it.
619 	 *
620 	 * We can't wait forever for the vnode lock, we might
621 	 * deadlock due to a vn_read() getting stuck in
622 	 * vm_wait while holding this vnode.  We skip the
623 	 * vnode if we can't get it in a reasonable amount
624 	 * of time.
625 	 */
626 	if (object->type == OBJT_VNODE) {
627 		vm_page_unlock(m);
628 		vp = object->handle;
629 		if (vp->v_type == VREG &&
630 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
631 			mp = NULL;
632 			error = EDEADLK;
633 			goto unlock_all;
634 		}
635 		KASSERT(mp != NULL,
636 		    ("vp %p with NULL v_mount", vp));
637 		vm_object_reference_locked(object);
638 		pindex = m->pindex;
639 		VM_OBJECT_WUNLOCK(object);
640 		lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
641 		    LK_SHARED : LK_EXCLUSIVE;
642 		if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
643 			vp = NULL;
644 			error = EDEADLK;
645 			goto unlock_mp;
646 		}
647 		VM_OBJECT_WLOCK(object);
648 		vm_page_lock(m);
649 		/*
650 		 * While the object and page were unlocked, the page
651 		 * may have been:
652 		 * (1) moved to a different queue,
653 		 * (2) reallocated to a different object,
654 		 * (3) reallocated to a different offset, or
655 		 * (4) cleaned.
656 		 */
657 		if (!vm_page_in_laundry(m) || m->object != object ||
658 		    m->pindex != pindex || m->dirty == 0) {
659 			vm_page_unlock(m);
660 			error = ENXIO;
661 			goto unlock_all;
662 		}
663 
664 		/*
665 		 * The page may have been busied or held while the object
666 		 * and page locks were released.
667 		 */
668 		if (vm_page_busied(m) || m->hold_count != 0) {
669 			vm_page_unlock(m);
670 			error = EBUSY;
671 			goto unlock_all;
672 		}
673 	}
674 
675 	/*
676 	 * If a page is dirty, then it is either being washed
677 	 * (but not yet cleaned) or it is still in the
678 	 * laundry.  If it is still in the laundry, then we
679 	 * start the cleaning operation.
680 	 */
681 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
682 		error = EIO;
683 
684 unlock_all:
685 	VM_OBJECT_WUNLOCK(object);
686 
687 unlock_mp:
688 	vm_page_lock_assert(m, MA_NOTOWNED);
689 	if (mp != NULL) {
690 		if (vp != NULL)
691 			vput(vp);
692 		vm_object_deallocate(object);
693 		vn_finished_write(mp);
694 	}
695 
696 	return (error);
697 }
698 
699 /*
700  * Attempt to launder the specified number of pages.
701  *
702  * Returns the number of pages successfully laundered.
703  */
704 static int
705 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
706 {
707 	struct vm_pagequeue *pq;
708 	vm_object_t object;
709 	vm_page_t m, next;
710 	int act_delta, error, maxscan, numpagedout, starting_target;
711 	int vnodes_skipped;
712 	bool pageout_ok, queue_locked;
713 
714 	starting_target = launder;
715 	vnodes_skipped = 0;
716 
717 	/*
718 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
719 	 * once the target number of dirty pages have been laundered, or once
720 	 * we've reached the end of the queue.  A single iteration of this loop
721 	 * may cause more than one page to be laundered because of clustering.
722 	 *
723 	 * maxscan ensures that we don't re-examine requeued pages.  Any
724 	 * additional pages written as part of a cluster are subtracted from
725 	 * maxscan since they must be taken from the laundry queue.
726 	 *
727 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
728 	 * swap devices are configured.
729 	 */
730 	if (atomic_load_acq_int(&swapdev_enabled))
731 		pq = &vmd->vmd_pagequeues[PQ_UNSWAPPABLE];
732 	else
733 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
734 
735 scan:
736 	vm_pagequeue_lock(pq);
737 	maxscan = pq->pq_cnt;
738 	queue_locked = true;
739 	for (m = TAILQ_FIRST(&pq->pq_pl);
740 	    m != NULL && maxscan-- > 0 && launder > 0;
741 	    m = next) {
742 		vm_pagequeue_assert_locked(pq);
743 		KASSERT(queue_locked, ("unlocked laundry queue"));
744 		KASSERT(vm_page_in_laundry(m),
745 		    ("page %p has an inconsistent queue", m));
746 		next = TAILQ_NEXT(m, plinks.q);
747 		if ((m->flags & PG_MARKER) != 0)
748 			continue;
749 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
750 		    ("PG_FICTITIOUS page %p cannot be in laundry queue", m));
751 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
752 		    ("VPO_UNMANAGED page %p cannot be in laundry queue", m));
753 		if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
754 			vm_page_unlock(m);
755 			continue;
756 		}
757 		object = m->object;
758 		if ((!VM_OBJECT_TRYWLOCK(object) &&
759 		    (!vm_pageout_fallback_object_lock(m, &next) ||
760 		    m->hold_count != 0)) || vm_page_busied(m)) {
761 			VM_OBJECT_WUNLOCK(object);
762 			vm_page_unlock(m);
763 			continue;
764 		}
765 
766 		/*
767 		 * Unlock the laundry queue, invalidating the 'next' pointer.
768 		 * Use a marker to remember our place in the laundry queue.
769 		 */
770 		TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_laundry_marker,
771 		    plinks.q);
772 		vm_pagequeue_unlock(pq);
773 		queue_locked = false;
774 
775 		/*
776 		 * Invalid pages can be easily freed.  They cannot be
777 		 * mapped; vm_page_free() asserts this.
778 		 */
779 		if (m->valid == 0)
780 			goto free_page;
781 
782 		/*
783 		 * If the page has been referenced and the object is not dead,
784 		 * reactivate or requeue the page depending on whether the
785 		 * object is mapped.
786 		 */
787 		if ((m->aflags & PGA_REFERENCED) != 0) {
788 			vm_page_aflag_clear(m, PGA_REFERENCED);
789 			act_delta = 1;
790 		} else
791 			act_delta = 0;
792 		if (object->ref_count != 0)
793 			act_delta += pmap_ts_referenced(m);
794 		else {
795 			KASSERT(!pmap_page_is_mapped(m),
796 			    ("page %p is mapped", m));
797 		}
798 		if (act_delta != 0) {
799 			if (object->ref_count != 0) {
800 				VM_CNT_INC(v_reactivated);
801 				vm_page_activate(m);
802 
803 				/*
804 				 * Increase the activation count if the page
805 				 * was referenced while in the laundry queue.
806 				 * This makes it less likely that the page will
807 				 * be returned prematurely to the inactive
808 				 * queue.
809  				 */
810 				m->act_count += act_delta + ACT_ADVANCE;
811 
812 				/*
813 				 * If this was a background laundering, count
814 				 * activated pages towards our target.  The
815 				 * purpose of background laundering is to ensure
816 				 * that pages are eventually cycled through the
817 				 * laundry queue, and an activation is a valid
818 				 * way out.
819 				 */
820 				if (!in_shortfall)
821 					launder--;
822 				goto drop_page;
823 			} else if ((object->flags & OBJ_DEAD) == 0)
824 				goto requeue_page;
825 		}
826 
827 		/*
828 		 * If the page appears to be clean at the machine-independent
829 		 * layer, then remove all of its mappings from the pmap in
830 		 * anticipation of freeing it.  If, however, any of the page's
831 		 * mappings allow write access, then the page may still be
832 		 * modified until the last of those mappings are removed.
833 		 */
834 		if (object->ref_count != 0) {
835 			vm_page_test_dirty(m);
836 			if (m->dirty == 0)
837 				pmap_remove_all(m);
838 		}
839 
840 		/*
841 		 * Clean pages are freed, and dirty pages are paged out unless
842 		 * they belong to a dead object.  Requeueing dirty pages from
843 		 * dead objects is pointless, as they are being paged out and
844 		 * freed by the thread that destroyed the object.
845 		 */
846 		if (m->dirty == 0) {
847 free_page:
848 			vm_page_free(m);
849 			VM_CNT_INC(v_dfree);
850 		} else if ((object->flags & OBJ_DEAD) == 0) {
851 			if (object->type != OBJT_SWAP &&
852 			    object->type != OBJT_DEFAULT)
853 				pageout_ok = true;
854 			else if (disable_swap_pageouts)
855 				pageout_ok = false;
856 			else
857 				pageout_ok = true;
858 			if (!pageout_ok) {
859 requeue_page:
860 				vm_pagequeue_lock(pq);
861 				queue_locked = true;
862 				vm_page_requeue_locked(m);
863 				goto drop_page;
864 			}
865 
866 			/*
867 			 * Form a cluster with adjacent, dirty pages from the
868 			 * same object, and page out that entire cluster.
869 			 *
870 			 * The adjacent, dirty pages must also be in the
871 			 * laundry.  However, their mappings are not checked
872 			 * for new references.  Consequently, a recently
873 			 * referenced page may be paged out.  However, that
874 			 * page will not be prematurely reclaimed.  After page
875 			 * out, the page will be placed in the inactive queue,
876 			 * where any new references will be detected and the
877 			 * page reactivated.
878 			 */
879 			error = vm_pageout_clean(m, &numpagedout);
880 			if (error == 0) {
881 				launder -= numpagedout;
882 				maxscan -= numpagedout - 1;
883 			} else if (error == EDEADLK) {
884 				pageout_lock_miss++;
885 				vnodes_skipped++;
886 			}
887 			goto relock_queue;
888 		}
889 drop_page:
890 		vm_page_unlock(m);
891 		VM_OBJECT_WUNLOCK(object);
892 relock_queue:
893 		if (!queue_locked) {
894 			vm_pagequeue_lock(pq);
895 			queue_locked = true;
896 		}
897 		next = TAILQ_NEXT(&vmd->vmd_laundry_marker, plinks.q);
898 		TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_laundry_marker, plinks.q);
899 	}
900 	vm_pagequeue_unlock(pq);
901 
902 	if (launder > 0 && pq == &vmd->vmd_pagequeues[PQ_UNSWAPPABLE]) {
903 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
904 		goto scan;
905 	}
906 
907 	/*
908 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
909 	 * and we didn't launder enough pages.
910 	 */
911 	if (vnodes_skipped > 0 && launder > 0)
912 		(void)speedup_syncer();
913 
914 	return (starting_target - launder);
915 }
916 
917 /*
918  * Compute the integer square root.
919  */
920 static u_int
921 isqrt(u_int num)
922 {
923 	u_int bit, root, tmp;
924 
925 	bit = 1u << ((NBBY * sizeof(u_int)) - 2);
926 	while (bit > num)
927 		bit >>= 2;
928 	root = 0;
929 	while (bit != 0) {
930 		tmp = root + bit;
931 		root >>= 1;
932 		if (num >= tmp) {
933 			num -= tmp;
934 			root += bit;
935 		}
936 		bit >>= 2;
937 	}
938 	return (root);
939 }
940 
941 /*
942  * Perform the work of the laundry thread: periodically wake up and determine
943  * whether any pages need to be laundered.  If so, determine the number of pages
944  * that need to be laundered, and launder them.
945  */
946 static void
947 vm_pageout_laundry_worker(void *arg)
948 {
949 	struct vm_domain *domain;
950 	struct vm_pagequeue *pq;
951 	uint64_t nclean, ndirty;
952 	u_int last_launder, wakeups;
953 	int domidx, last_target, launder, shortfall, shortfall_cycle, target;
954 	bool in_shortfall;
955 
956 	domidx = (uintptr_t)arg;
957 	domain = &vm_dom[domidx];
958 	pq = &domain->vmd_pagequeues[PQ_LAUNDRY];
959 	KASSERT(domain->vmd_segs != 0, ("domain without segments"));
960 	vm_pageout_init_marker(&domain->vmd_laundry_marker, PQ_LAUNDRY);
961 
962 	shortfall = 0;
963 	in_shortfall = false;
964 	shortfall_cycle = 0;
965 	target = 0;
966 	last_launder = 0;
967 
968 	/*
969 	 * Calls to these handlers are serialized by the swap syscall lock.
970 	 */
971 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, domain,
972 	    EVENTHANDLER_PRI_ANY);
973 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, domain,
974 	    EVENTHANDLER_PRI_ANY);
975 
976 	/*
977 	 * The pageout laundry worker is never done, so loop forever.
978 	 */
979 	for (;;) {
980 		KASSERT(target >= 0, ("negative target %d", target));
981 		KASSERT(shortfall_cycle >= 0,
982 		    ("negative cycle %d", shortfall_cycle));
983 		launder = 0;
984 		wakeups = VM_CNT_FETCH(v_pdwakeups);
985 
986 		/*
987 		 * First determine whether we need to launder pages to meet a
988 		 * shortage of free pages.
989 		 */
990 		if (shortfall > 0) {
991 			in_shortfall = true;
992 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
993 			target = shortfall;
994 		} else if (!in_shortfall)
995 			goto trybackground;
996 		else if (shortfall_cycle == 0 || vm_laundry_target() <= 0) {
997 			/*
998 			 * We recently entered shortfall and began laundering
999 			 * pages.  If we have completed that laundering run
1000 			 * (and we are no longer in shortfall) or we have met
1001 			 * our laundry target through other activity, then we
1002 			 * can stop laundering pages.
1003 			 */
1004 			in_shortfall = false;
1005 			target = 0;
1006 			goto trybackground;
1007 		}
1008 		last_launder = wakeups;
1009 		launder = target / shortfall_cycle--;
1010 		goto dolaundry;
1011 
1012 		/*
1013 		 * There's no immediate need to launder any pages; see if we
1014 		 * meet the conditions to perform background laundering:
1015 		 *
1016 		 * 1. The ratio of dirty to clean inactive pages exceeds the
1017 		 *    background laundering threshold and the pagedaemon has
1018 		 *    been woken up to reclaim pages since our last
1019 		 *    laundering, or
1020 		 * 2. we haven't yet reached the target of the current
1021 		 *    background laundering run.
1022 		 *
1023 		 * The background laundering threshold is not a constant.
1024 		 * Instead, it is a slowly growing function of the number of
1025 		 * page daemon wakeups since the last laundering.  Thus, as the
1026 		 * ratio of dirty to clean inactive pages grows, the amount of
1027 		 * memory pressure required to trigger laundering decreases.
1028 		 */
1029 trybackground:
1030 		nclean = vm_cnt.v_inactive_count + vm_cnt.v_free_count;
1031 		ndirty = vm_cnt.v_laundry_count;
1032 		if (target == 0 && wakeups != last_launder &&
1033 		    ndirty * isqrt(wakeups - last_launder) >= nclean) {
1034 			target = vm_background_launder_target;
1035 		}
1036 
1037 		/*
1038 		 * We have a non-zero background laundering target.  If we've
1039 		 * laundered up to our maximum without observing a page daemon
1040 		 * wakeup, just stop.  This is a safety belt that ensures we
1041 		 * don't launder an excessive amount if memory pressure is low
1042 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1043 		 * proceed at the background laundering rate.
1044 		 */
1045 		if (target > 0) {
1046 			if (wakeups != last_launder) {
1047 				last_launder = wakeups;
1048 				last_target = target;
1049 			} else if (last_target - target >=
1050 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1051 				target = 0;
1052 			}
1053 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1054 			launder /= VM_LAUNDER_RATE;
1055 			if (launder > target)
1056 				launder = target;
1057 		}
1058 
1059 dolaundry:
1060 		if (launder > 0) {
1061 			/*
1062 			 * Because of I/O clustering, the number of laundered
1063 			 * pages could exceed "target" by the maximum size of
1064 			 * a cluster minus one.
1065 			 */
1066 			target -= min(vm_pageout_launder(domain, launder,
1067 			    in_shortfall), target);
1068 			pause("laundp", hz / VM_LAUNDER_RATE);
1069 		}
1070 
1071 		/*
1072 		 * If we're not currently laundering pages and the page daemon
1073 		 * hasn't posted a new request, sleep until the page daemon
1074 		 * kicks us.
1075 		 */
1076 		vm_pagequeue_lock(pq);
1077 		if (target == 0 && vm_laundry_request == VM_LAUNDRY_IDLE)
1078 			(void)mtx_sleep(&vm_laundry_request,
1079 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1080 
1081 		/*
1082 		 * If the pagedaemon has indicated that it's in shortfall, start
1083 		 * a shortfall laundering unless we're already in the middle of
1084 		 * one.  This may preempt a background laundering.
1085 		 */
1086 		if (vm_laundry_request == VM_LAUNDRY_SHORTFALL &&
1087 		    (!in_shortfall || shortfall_cycle == 0)) {
1088 			shortfall = vm_laundry_target() + vm_pageout_deficit;
1089 			target = 0;
1090 		} else
1091 			shortfall = 0;
1092 
1093 		if (target == 0)
1094 			vm_laundry_request = VM_LAUNDRY_IDLE;
1095 		vm_pagequeue_unlock(pq);
1096 	}
1097 }
1098 
1099 /*
1100  *	vm_pageout_scan does the dirty work for the pageout daemon.
1101  *
1102  *	pass == 0: Update active LRU/deactivate pages
1103  *	pass >= 1: Free inactive pages
1104  *
1105  * Returns true if pass was zero or enough pages were freed by the inactive
1106  * queue scan to meet the target.
1107  */
1108 static bool
1109 vm_pageout_scan(struct vm_domain *vmd, int pass)
1110 {
1111 	vm_page_t m, next;
1112 	struct vm_pagequeue *pq;
1113 	vm_object_t object;
1114 	long min_scan;
1115 	int act_delta, addl_page_shortage, deficit, inactq_shortage, maxscan;
1116 	int page_shortage, scan_tick, scanned, starting_page_shortage;
1117 	boolean_t queue_locked;
1118 
1119 	/*
1120 	 * If we need to reclaim memory ask kernel caches to return
1121 	 * some.  We rate limit to avoid thrashing.
1122 	 */
1123 	if (vmd == &vm_dom[0] && pass > 0 &&
1124 	    (time_uptime - lowmem_uptime) >= lowmem_period) {
1125 		/*
1126 		 * Decrease registered cache sizes.
1127 		 */
1128 		SDT_PROBE0(vm, , , vm__lowmem_scan);
1129 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1130 		/*
1131 		 * We do this explicitly after the caches have been
1132 		 * drained above.
1133 		 */
1134 		uma_reclaim();
1135 		lowmem_uptime = time_uptime;
1136 	}
1137 
1138 	/*
1139 	 * The addl_page_shortage is the number of temporarily
1140 	 * stuck pages in the inactive queue.  In other words, the
1141 	 * number of pages from the inactive count that should be
1142 	 * discounted in setting the target for the active queue scan.
1143 	 */
1144 	addl_page_shortage = 0;
1145 
1146 	/*
1147 	 * Calculate the number of pages that we want to free.  This number
1148 	 * can be negative if many pages are freed between the wakeup call to
1149 	 * the page daemon and this calculation.
1150 	 */
1151 	if (pass > 0) {
1152 		deficit = atomic_readandclear_int(&vm_pageout_deficit);
1153 		page_shortage = vm_paging_target() + deficit;
1154 	} else
1155 		page_shortage = deficit = 0;
1156 	starting_page_shortage = page_shortage;
1157 
1158 	/*
1159 	 * Start scanning the inactive queue for pages that we can free.  The
1160 	 * scan will stop when we reach the target or we have scanned the
1161 	 * entire queue.  (Note that m->act_count is not used to make
1162 	 * decisions for the inactive queue, only for the active queue.)
1163 	 */
1164 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1165 	maxscan = pq->pq_cnt;
1166 	vm_pagequeue_lock(pq);
1167 	queue_locked = TRUE;
1168 	for (m = TAILQ_FIRST(&pq->pq_pl);
1169 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
1170 	     m = next) {
1171 		vm_pagequeue_assert_locked(pq);
1172 		KASSERT(queue_locked, ("unlocked inactive queue"));
1173 		KASSERT(vm_page_inactive(m), ("Inactive queue %p", m));
1174 
1175 		VM_CNT_INC(v_pdpages);
1176 		next = TAILQ_NEXT(m, plinks.q);
1177 
1178 		/*
1179 		 * skip marker pages
1180 		 */
1181 		if (m->flags & PG_MARKER)
1182 			continue;
1183 
1184 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
1185 		    ("Fictitious page %p cannot be in inactive queue", m));
1186 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1187 		    ("Unmanaged page %p cannot be in inactive queue", m));
1188 
1189 		/*
1190 		 * The page or object lock acquisitions fail if the
1191 		 * page was removed from the queue or moved to a
1192 		 * different position within the queue.  In either
1193 		 * case, addl_page_shortage should not be incremented.
1194 		 */
1195 		if (!vm_pageout_page_lock(m, &next))
1196 			goto unlock_page;
1197 		else if (m->hold_count != 0) {
1198 			/*
1199 			 * Held pages are essentially stuck in the
1200 			 * queue.  So, they ought to be discounted
1201 			 * from the inactive count.  See the
1202 			 * calculation of inactq_shortage before the
1203 			 * loop over the active queue below.
1204 			 */
1205 			addl_page_shortage++;
1206 			goto unlock_page;
1207 		}
1208 		object = m->object;
1209 		if (!VM_OBJECT_TRYWLOCK(object)) {
1210 			if (!vm_pageout_fallback_object_lock(m, &next))
1211 				goto unlock_object;
1212 			else if (m->hold_count != 0) {
1213 				addl_page_shortage++;
1214 				goto unlock_object;
1215 			}
1216 		}
1217 		if (vm_page_busied(m)) {
1218 			/*
1219 			 * Don't mess with busy pages.  Leave them at
1220 			 * the front of the queue.  Most likely, they
1221 			 * are being paged out and will leave the
1222 			 * queue shortly after the scan finishes.  So,
1223 			 * they ought to be discounted from the
1224 			 * inactive count.
1225 			 */
1226 			addl_page_shortage++;
1227 unlock_object:
1228 			VM_OBJECT_WUNLOCK(object);
1229 unlock_page:
1230 			vm_page_unlock(m);
1231 			continue;
1232 		}
1233 		KASSERT(m->hold_count == 0, ("Held page %p", m));
1234 
1235 		/*
1236 		 * Dequeue the inactive page and unlock the inactive page
1237 		 * queue, invalidating the 'next' pointer.  Dequeueing the
1238 		 * page here avoids a later reacquisition (and release) of
1239 		 * the inactive page queue lock when vm_page_activate(),
1240 		 * vm_page_free(), or vm_page_launder() is called.  Use a
1241 		 * marker to remember our place in the inactive queue.
1242 		 */
1243 		TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
1244 		vm_page_dequeue_locked(m);
1245 		vm_pagequeue_unlock(pq);
1246 		queue_locked = FALSE;
1247 
1248 		/*
1249 		 * Invalid pages can be easily freed. They cannot be
1250 		 * mapped, vm_page_free() asserts this.
1251 		 */
1252 		if (m->valid == 0)
1253 			goto free_page;
1254 
1255 		/*
1256 		 * If the page has been referenced and the object is not dead,
1257 		 * reactivate or requeue the page depending on whether the
1258 		 * object is mapped.
1259 		 */
1260 		if ((m->aflags & PGA_REFERENCED) != 0) {
1261 			vm_page_aflag_clear(m, PGA_REFERENCED);
1262 			act_delta = 1;
1263 		} else
1264 			act_delta = 0;
1265 		if (object->ref_count != 0) {
1266 			act_delta += pmap_ts_referenced(m);
1267 		} else {
1268 			KASSERT(!pmap_page_is_mapped(m),
1269 			    ("vm_pageout_scan: page %p is mapped", m));
1270 		}
1271 		if (act_delta != 0) {
1272 			if (object->ref_count != 0) {
1273 				VM_CNT_INC(v_reactivated);
1274 				vm_page_activate(m);
1275 
1276 				/*
1277 				 * Increase the activation count if the page
1278 				 * was referenced while in the inactive queue.
1279 				 * This makes it less likely that the page will
1280 				 * be returned prematurely to the inactive
1281 				 * queue.
1282  				 */
1283 				m->act_count += act_delta + ACT_ADVANCE;
1284 				goto drop_page;
1285 			} else if ((object->flags & OBJ_DEAD) == 0) {
1286 				vm_pagequeue_lock(pq);
1287 				queue_locked = TRUE;
1288 				m->queue = PQ_INACTIVE;
1289 				TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
1290 				vm_pagequeue_cnt_inc(pq);
1291 				goto drop_page;
1292 			}
1293 		}
1294 
1295 		/*
1296 		 * If the page appears to be clean at the machine-independent
1297 		 * layer, then remove all of its mappings from the pmap in
1298 		 * anticipation of freeing it.  If, however, any of the page's
1299 		 * mappings allow write access, then the page may still be
1300 		 * modified until the last of those mappings are removed.
1301 		 */
1302 		if (object->ref_count != 0) {
1303 			vm_page_test_dirty(m);
1304 			if (m->dirty == 0)
1305 				pmap_remove_all(m);
1306 		}
1307 
1308 		/*
1309 		 * Clean pages can be freed, but dirty pages must be sent back
1310 		 * to the laundry, unless they belong to a dead object.
1311 		 * Requeueing dirty pages from dead objects is pointless, as
1312 		 * they are being paged out and freed by the thread that
1313 		 * destroyed the object.
1314 		 */
1315 		if (m->dirty == 0) {
1316 free_page:
1317 			vm_page_free(m);
1318 			VM_CNT_INC(v_dfree);
1319 			--page_shortage;
1320 		} else if ((object->flags & OBJ_DEAD) == 0)
1321 			vm_page_launder(m);
1322 drop_page:
1323 		vm_page_unlock(m);
1324 		VM_OBJECT_WUNLOCK(object);
1325 		if (!queue_locked) {
1326 			vm_pagequeue_lock(pq);
1327 			queue_locked = TRUE;
1328 		}
1329 		next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
1330 		TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
1331 	}
1332 	vm_pagequeue_unlock(pq);
1333 
1334 	/*
1335 	 * Wake up the laundry thread so that it can perform any needed
1336 	 * laundering.  If we didn't meet our target, we're in shortfall and
1337 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1338 	 * swap devices are configured, the laundry thread has no work to do, so
1339 	 * don't bother waking it up.
1340 	 */
1341 	if (vm_laundry_request == VM_LAUNDRY_IDLE &&
1342 	    starting_page_shortage > 0) {
1343 		pq = &vm_dom[0].vmd_pagequeues[PQ_LAUNDRY];
1344 		vm_pagequeue_lock(pq);
1345 		if (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled)) {
1346 			if (page_shortage > 0) {
1347 				vm_laundry_request = VM_LAUNDRY_SHORTFALL;
1348 				VM_CNT_INC(v_pdshortfalls);
1349 			} else if (vm_laundry_request != VM_LAUNDRY_SHORTFALL)
1350 				vm_laundry_request = VM_LAUNDRY_BACKGROUND;
1351 			wakeup(&vm_laundry_request);
1352 		}
1353 		vm_pagequeue_unlock(pq);
1354 	}
1355 
1356 	/*
1357 	 * Wakeup the swapout daemon if we didn't free the targeted number of
1358 	 * pages.
1359 	 */
1360 	if (page_shortage > 0)
1361 		vm_swapout_run();
1362 
1363 	/*
1364 	 * If the inactive queue scan fails repeatedly to meet its
1365 	 * target, kill the largest process.
1366 	 */
1367 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1368 
1369 	/*
1370 	 * Compute the number of pages we want to try to move from the
1371 	 * active queue to either the inactive or laundry queue.
1372 	 *
1373 	 * When scanning active pages, we make clean pages count more heavily
1374 	 * towards the page shortage than dirty pages.  This is because dirty
1375 	 * pages must be laundered before they can be reused and thus have less
1376 	 * utility when attempting to quickly alleviate a shortage.  However,
1377 	 * this weighting also causes the scan to deactivate dirty pages more
1378 	 * more aggressively, improving the effectiveness of clustering and
1379 	 * ensuring that they can eventually be reused.
1380 	 */
1381 	inactq_shortage = vm_cnt.v_inactive_target - (vm_cnt.v_inactive_count +
1382 	    vm_cnt.v_laundry_count / act_scan_laundry_weight) +
1383 	    vm_paging_target() + deficit + addl_page_shortage;
1384 	page_shortage *= act_scan_laundry_weight;
1385 
1386 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1387 	vm_pagequeue_lock(pq);
1388 	maxscan = pq->pq_cnt;
1389 
1390 	/*
1391 	 * If we're just idle polling attempt to visit every
1392 	 * active page within 'update_period' seconds.
1393 	 */
1394 	scan_tick = ticks;
1395 	if (vm_pageout_update_period != 0) {
1396 		min_scan = pq->pq_cnt;
1397 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
1398 		min_scan /= hz * vm_pageout_update_period;
1399 	} else
1400 		min_scan = 0;
1401 	if (min_scan > 0 || (inactq_shortage > 0 && maxscan > 0))
1402 		vmd->vmd_last_active_scan = scan_tick;
1403 
1404 	/*
1405 	 * Scan the active queue for pages that can be deactivated.  Update
1406 	 * the per-page activity counter and use it to identify deactivation
1407 	 * candidates.  Held pages may be deactivated.
1408 	 */
1409 	for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
1410 	    min_scan || (inactq_shortage > 0 && scanned < maxscan)); m = next,
1411 	    scanned++) {
1412 		KASSERT(m->queue == PQ_ACTIVE,
1413 		    ("vm_pageout_scan: page %p isn't active", m));
1414 		next = TAILQ_NEXT(m, plinks.q);
1415 		if ((m->flags & PG_MARKER) != 0)
1416 			continue;
1417 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
1418 		    ("Fictitious page %p cannot be in active queue", m));
1419 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1420 		    ("Unmanaged page %p cannot be in active queue", m));
1421 		if (!vm_pageout_page_lock(m, &next)) {
1422 			vm_page_unlock(m);
1423 			continue;
1424 		}
1425 
1426 		/*
1427 		 * The count for page daemon pages is updated after checking
1428 		 * the page for eligibility.
1429 		 */
1430 		VM_CNT_INC(v_pdpages);
1431 
1432 		/*
1433 		 * Check to see "how much" the page has been used.
1434 		 */
1435 		if ((m->aflags & PGA_REFERENCED) != 0) {
1436 			vm_page_aflag_clear(m, PGA_REFERENCED);
1437 			act_delta = 1;
1438 		} else
1439 			act_delta = 0;
1440 
1441 		/*
1442 		 * Perform an unsynchronized object ref count check.  While
1443 		 * the page lock ensures that the page is not reallocated to
1444 		 * another object, in particular, one with unmanaged mappings
1445 		 * that cannot support pmap_ts_referenced(), two races are,
1446 		 * nonetheless, possible:
1447 		 * 1) The count was transitioning to zero, but we saw a non-
1448 		 *    zero value.  pmap_ts_referenced() will return zero
1449 		 *    because the page is not mapped.
1450 		 * 2) The count was transitioning to one, but we saw zero.
1451 		 *    This race delays the detection of a new reference.  At
1452 		 *    worst, we will deactivate and reactivate the page.
1453 		 */
1454 		if (m->object->ref_count != 0)
1455 			act_delta += pmap_ts_referenced(m);
1456 
1457 		/*
1458 		 * Advance or decay the act_count based on recent usage.
1459 		 */
1460 		if (act_delta != 0) {
1461 			m->act_count += ACT_ADVANCE + act_delta;
1462 			if (m->act_count > ACT_MAX)
1463 				m->act_count = ACT_MAX;
1464 		} else
1465 			m->act_count -= min(m->act_count, ACT_DECLINE);
1466 
1467 		/*
1468 		 * Move this page to the tail of the active, inactive or laundry
1469 		 * queue depending on usage.
1470 		 */
1471 		if (m->act_count == 0) {
1472 			/* Dequeue to avoid later lock recursion. */
1473 			vm_page_dequeue_locked(m);
1474 
1475 			/*
1476 			 * When not short for inactive pages, let dirty pages go
1477 			 * through the inactive queue before moving to the
1478 			 * laundry queues.  This gives them some extra time to
1479 			 * be reactivated, potentially avoiding an expensive
1480 			 * pageout.  During a page shortage, the inactive queue
1481 			 * is necessarily small, so we may move dirty pages
1482 			 * directly to the laundry queue.
1483 			 */
1484 			if (inactq_shortage <= 0)
1485 				vm_page_deactivate(m);
1486 			else {
1487 				/*
1488 				 * Calling vm_page_test_dirty() here would
1489 				 * require acquisition of the object's write
1490 				 * lock.  However, during a page shortage,
1491 				 * directing dirty pages into the laundry
1492 				 * queue is only an optimization and not a
1493 				 * requirement.  Therefore, we simply rely on
1494 				 * the opportunistic updates to the page's
1495 				 * dirty field by the pmap.
1496 				 */
1497 				if (m->dirty == 0) {
1498 					vm_page_deactivate(m);
1499 					inactq_shortage -=
1500 					    act_scan_laundry_weight;
1501 				} else {
1502 					vm_page_launder(m);
1503 					inactq_shortage--;
1504 				}
1505 			}
1506 		} else
1507 			vm_page_requeue_locked(m);
1508 		vm_page_unlock(m);
1509 	}
1510 	vm_pagequeue_unlock(pq);
1511 	if (pass > 0)
1512 		vm_swapout_run_idle();
1513 	return (page_shortage <= 0);
1514 }
1515 
1516 static int vm_pageout_oom_vote;
1517 
1518 /*
1519  * The pagedaemon threads randlomly select one to perform the
1520  * OOM.  Trying to kill processes before all pagedaemons
1521  * failed to reach free target is premature.
1522  */
1523 static void
1524 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1525     int starting_page_shortage)
1526 {
1527 	int old_vote;
1528 
1529 	if (starting_page_shortage <= 0 || starting_page_shortage !=
1530 	    page_shortage)
1531 		vmd->vmd_oom_seq = 0;
1532 	else
1533 		vmd->vmd_oom_seq++;
1534 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1535 		if (vmd->vmd_oom) {
1536 			vmd->vmd_oom = FALSE;
1537 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1538 		}
1539 		return;
1540 	}
1541 
1542 	/*
1543 	 * Do not follow the call sequence until OOM condition is
1544 	 * cleared.
1545 	 */
1546 	vmd->vmd_oom_seq = 0;
1547 
1548 	if (vmd->vmd_oom)
1549 		return;
1550 
1551 	vmd->vmd_oom = TRUE;
1552 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1553 	if (old_vote != vm_ndomains - 1)
1554 		return;
1555 
1556 	/*
1557 	 * The current pagedaemon thread is the last in the quorum to
1558 	 * start OOM.  Initiate the selection and signaling of the
1559 	 * victim.
1560 	 */
1561 	vm_pageout_oom(VM_OOM_MEM);
1562 
1563 	/*
1564 	 * After one round of OOM terror, recall our vote.  On the
1565 	 * next pass, current pagedaemon would vote again if the low
1566 	 * memory condition is still there, due to vmd_oom being
1567 	 * false.
1568 	 */
1569 	vmd->vmd_oom = FALSE;
1570 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1571 }
1572 
1573 /*
1574  * The OOM killer is the page daemon's action of last resort when
1575  * memory allocation requests have been stalled for a prolonged period
1576  * of time because it cannot reclaim memory.  This function computes
1577  * the approximate number of physical pages that could be reclaimed if
1578  * the specified address space is destroyed.
1579  *
1580  * Private, anonymous memory owned by the address space is the
1581  * principal resource that we expect to recover after an OOM kill.
1582  * Since the physical pages mapped by the address space's COW entries
1583  * are typically shared pages, they are unlikely to be released and so
1584  * they are not counted.
1585  *
1586  * To get to the point where the page daemon runs the OOM killer, its
1587  * efforts to write-back vnode-backed pages may have stalled.  This
1588  * could be caused by a memory allocation deadlock in the write path
1589  * that might be resolved by an OOM kill.  Therefore, physical pages
1590  * belonging to vnode-backed objects are counted, because they might
1591  * be freed without being written out first if the address space holds
1592  * the last reference to an unlinked vnode.
1593  *
1594  * Similarly, physical pages belonging to OBJT_PHYS objects are
1595  * counted because the address space might hold the last reference to
1596  * the object.
1597  */
1598 static long
1599 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1600 {
1601 	vm_map_t map;
1602 	vm_map_entry_t entry;
1603 	vm_object_t obj;
1604 	long res;
1605 
1606 	map = &vmspace->vm_map;
1607 	KASSERT(!map->system_map, ("system map"));
1608 	sx_assert(&map->lock, SA_LOCKED);
1609 	res = 0;
1610 	for (entry = map->header.next; entry != &map->header;
1611 	    entry = entry->next) {
1612 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1613 			continue;
1614 		obj = entry->object.vm_object;
1615 		if (obj == NULL)
1616 			continue;
1617 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1618 		    obj->ref_count != 1)
1619 			continue;
1620 		switch (obj->type) {
1621 		case OBJT_DEFAULT:
1622 		case OBJT_SWAP:
1623 		case OBJT_PHYS:
1624 		case OBJT_VNODE:
1625 			res += obj->resident_page_count;
1626 			break;
1627 		}
1628 	}
1629 	return (res);
1630 }
1631 
1632 void
1633 vm_pageout_oom(int shortage)
1634 {
1635 	struct proc *p, *bigproc;
1636 	vm_offset_t size, bigsize;
1637 	struct thread *td;
1638 	struct vmspace *vm;
1639 	bool breakout;
1640 
1641 	/*
1642 	 * We keep the process bigproc locked once we find it to keep anyone
1643 	 * from messing with it; however, there is a possibility of
1644 	 * deadlock if process B is bigproc and one of its child processes
1645 	 * attempts to propagate a signal to B while we are waiting for A's
1646 	 * lock while walking this list.  To avoid this, we don't block on
1647 	 * the process lock but just skip a process if it is already locked.
1648 	 */
1649 	bigproc = NULL;
1650 	bigsize = 0;
1651 	sx_slock(&allproc_lock);
1652 	FOREACH_PROC_IN_SYSTEM(p) {
1653 		PROC_LOCK(p);
1654 
1655 		/*
1656 		 * If this is a system, protected or killed process, skip it.
1657 		 */
1658 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1659 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1660 		    p->p_pid == 1 || P_KILLED(p) ||
1661 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
1662 			PROC_UNLOCK(p);
1663 			continue;
1664 		}
1665 		/*
1666 		 * If the process is in a non-running type state,
1667 		 * don't touch it.  Check all the threads individually.
1668 		 */
1669 		breakout = false;
1670 		FOREACH_THREAD_IN_PROC(p, td) {
1671 			thread_lock(td);
1672 			if (!TD_ON_RUNQ(td) &&
1673 			    !TD_IS_RUNNING(td) &&
1674 			    !TD_IS_SLEEPING(td) &&
1675 			    !TD_IS_SUSPENDED(td) &&
1676 			    !TD_IS_SWAPPED(td)) {
1677 				thread_unlock(td);
1678 				breakout = true;
1679 				break;
1680 			}
1681 			thread_unlock(td);
1682 		}
1683 		if (breakout) {
1684 			PROC_UNLOCK(p);
1685 			continue;
1686 		}
1687 		/*
1688 		 * get the process size
1689 		 */
1690 		vm = vmspace_acquire_ref(p);
1691 		if (vm == NULL) {
1692 			PROC_UNLOCK(p);
1693 			continue;
1694 		}
1695 		_PHOLD_LITE(p);
1696 		PROC_UNLOCK(p);
1697 		sx_sunlock(&allproc_lock);
1698 		if (!vm_map_trylock_read(&vm->vm_map)) {
1699 			vmspace_free(vm);
1700 			sx_slock(&allproc_lock);
1701 			PRELE(p);
1702 			continue;
1703 		}
1704 		size = vmspace_swap_count(vm);
1705 		if (shortage == VM_OOM_MEM)
1706 			size += vm_pageout_oom_pagecount(vm);
1707 		vm_map_unlock_read(&vm->vm_map);
1708 		vmspace_free(vm);
1709 		sx_slock(&allproc_lock);
1710 
1711 		/*
1712 		 * If this process is bigger than the biggest one,
1713 		 * remember it.
1714 		 */
1715 		if (size > bigsize) {
1716 			if (bigproc != NULL)
1717 				PRELE(bigproc);
1718 			bigproc = p;
1719 			bigsize = size;
1720 		} else {
1721 			PRELE(p);
1722 		}
1723 	}
1724 	sx_sunlock(&allproc_lock);
1725 	if (bigproc != NULL) {
1726 		if (vm_panic_on_oom != 0)
1727 			panic("out of swap space");
1728 		PROC_LOCK(bigproc);
1729 		killproc(bigproc, "out of swap space");
1730 		sched_nice(bigproc, PRIO_MIN);
1731 		_PRELE(bigproc);
1732 		PROC_UNLOCK(bigproc);
1733 		wakeup(&vm_cnt.v_free_count);
1734 	}
1735 }
1736 
1737 static void
1738 vm_pageout_worker(void *arg)
1739 {
1740 	struct vm_domain *domain;
1741 	int domidx, pass;
1742 	bool target_met;
1743 
1744 	domidx = (uintptr_t)arg;
1745 	domain = &vm_dom[domidx];
1746 	pass = 0;
1747 	target_met = true;
1748 
1749 	/*
1750 	 * XXXKIB It could be useful to bind pageout daemon threads to
1751 	 * the cores belonging to the domain, from which vm_page_array
1752 	 * is allocated.
1753 	 */
1754 
1755 	KASSERT(domain->vmd_segs != 0, ("domain without segments"));
1756 	domain->vmd_last_active_scan = ticks;
1757 	vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE);
1758 	vm_pageout_init_marker(&domain->vmd_inacthead, PQ_INACTIVE);
1759 	TAILQ_INSERT_HEAD(&domain->vmd_pagequeues[PQ_INACTIVE].pq_pl,
1760 	    &domain->vmd_inacthead, plinks.q);
1761 
1762 	/*
1763 	 * The pageout daemon worker is never done, so loop forever.
1764 	 */
1765 	while (TRUE) {
1766 		mtx_lock(&vm_page_queue_free_mtx);
1767 
1768 		/*
1769 		 * Generally, after a level >= 1 scan, if there are enough
1770 		 * free pages to wakeup the waiters, then they are already
1771 		 * awake.  A call to vm_page_free() during the scan awakened
1772 		 * them.  However, in the following case, this wakeup serves
1773 		 * to bound the amount of time that a thread might wait.
1774 		 * Suppose a thread's call to vm_page_alloc() fails, but
1775 		 * before that thread calls VM_WAIT, enough pages are freed by
1776 		 * other threads to alleviate the free page shortage.  The
1777 		 * thread will, nonetheless, wait until another page is freed
1778 		 * or this wakeup is performed.
1779 		 */
1780 		if (vm_pages_needed && !vm_page_count_min()) {
1781 			vm_pages_needed = false;
1782 			wakeup(&vm_cnt.v_free_count);
1783 		}
1784 
1785 		/*
1786 		 * Do not clear vm_pageout_wanted until we reach our free page
1787 		 * target.  Otherwise, we may be awakened over and over again,
1788 		 * wasting CPU time.
1789 		 */
1790 		if (vm_pageout_wanted && target_met)
1791 			vm_pageout_wanted = false;
1792 
1793 		/*
1794 		 * Might the page daemon receive a wakeup call?
1795 		 */
1796 		if (vm_pageout_wanted) {
1797 			/*
1798 			 * No.  Either vm_pageout_wanted was set by another
1799 			 * thread during the previous scan, which must have
1800 			 * been a level 0 scan, or vm_pageout_wanted was
1801 			 * already set and the scan failed to free enough
1802 			 * pages.  If we haven't yet performed a level >= 1
1803 			 * (page reclamation) scan, then increase the level
1804 			 * and scan again now.  Otherwise, sleep a bit and
1805 			 * try again later.
1806 			 */
1807 			mtx_unlock(&vm_page_queue_free_mtx);
1808 			if (pass >= 1)
1809 				pause("psleep", hz / VM_INACT_SCAN_RATE);
1810 			pass++;
1811 		} else {
1812 			/*
1813 			 * Yes.  Sleep until pages need to be reclaimed or
1814 			 * have their reference stats updated.
1815 			 */
1816 			if (mtx_sleep(&vm_pageout_wanted,
1817 			    &vm_page_queue_free_mtx, PDROP | PVM, "psleep",
1818 			    hz) == 0) {
1819 				VM_CNT_INC(v_pdwakeups);
1820 				pass = 1;
1821 			} else
1822 				pass = 0;
1823 		}
1824 
1825 		target_met = vm_pageout_scan(domain, pass);
1826 	}
1827 }
1828 
1829 /*
1830  *	vm_pageout_init initialises basic pageout daemon settings.
1831  */
1832 static void
1833 vm_pageout_init(void)
1834 {
1835 	/*
1836 	 * Initialize some paging parameters.
1837 	 */
1838 	vm_cnt.v_interrupt_free_min = 2;
1839 	if (vm_cnt.v_page_count < 2000)
1840 		vm_pageout_page_count = 8;
1841 
1842 	/*
1843 	 * v_free_reserved needs to include enough for the largest
1844 	 * swap pager structures plus enough for any pv_entry structs
1845 	 * when paging.
1846 	 */
1847 	if (vm_cnt.v_page_count > 1024)
1848 		vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200;
1849 	else
1850 		vm_cnt.v_free_min = 4;
1851 	vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1852 	    vm_cnt.v_interrupt_free_min;
1853 	vm_cnt.v_free_reserved = vm_pageout_page_count +
1854 	    vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768);
1855 	vm_cnt.v_free_severe = vm_cnt.v_free_min / 2;
1856 	vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved;
1857 	vm_cnt.v_free_min += vm_cnt.v_free_reserved;
1858 	vm_cnt.v_free_severe += vm_cnt.v_free_reserved;
1859 	vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2;
1860 	if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3)
1861 		vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3;
1862 
1863 	/*
1864 	 * Set the default wakeup threshold to be 10% above the minimum
1865 	 * page limit.  This keeps the steady state out of shortfall.
1866 	 */
1867 	vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11;
1868 
1869 	/*
1870 	 * Set interval in seconds for active scan.  We want to visit each
1871 	 * page at least once every ten minutes.  This is to prevent worst
1872 	 * case paging behaviors with stale active LRU.
1873 	 */
1874 	if (vm_pageout_update_period == 0)
1875 		vm_pageout_update_period = 600;
1876 
1877 	/* XXX does not really belong here */
1878 	if (vm_page_max_wired == 0)
1879 		vm_page_max_wired = vm_cnt.v_free_count / 3;
1880 
1881 	/*
1882 	 * Target amount of memory to move out of the laundry queue during a
1883 	 * background laundering.  This is proportional to the amount of system
1884 	 * memory.
1885 	 */
1886 	vm_background_launder_target = (vm_cnt.v_free_target -
1887 	    vm_cnt.v_free_min) / 10;
1888 }
1889 
1890 /*
1891  *     vm_pageout is the high level pageout daemon.
1892  */
1893 static void
1894 vm_pageout(void)
1895 {
1896 	int error;
1897 #ifdef VM_NUMA_ALLOC
1898 	int i;
1899 #endif
1900 
1901 	swap_pager_swap_init();
1902 	error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL,
1903 	    0, 0, "laundry: dom0");
1904 	if (error != 0)
1905 		panic("starting laundry for domain 0, error %d", error);
1906 #ifdef VM_NUMA_ALLOC
1907 	for (i = 1; i < vm_ndomains; i++) {
1908 		error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
1909 		    curproc, NULL, 0, 0, "dom%d", i);
1910 		if (error != 0) {
1911 			panic("starting pageout for domain %d, error %d\n",
1912 			    i, error);
1913 		}
1914 	}
1915 #endif
1916 	error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
1917 	    0, 0, "uma");
1918 	if (error != 0)
1919 		panic("starting uma_reclaim helper, error %d\n", error);
1920 	vm_pageout_worker((void *)(uintptr_t)0);
1921 }
1922 
1923 /*
1924  * Unless the free page queue lock is held by the caller, this function
1925  * should be regarded as advisory.  Specifically, the caller should
1926  * not msleep() on &vm_cnt.v_free_count following this function unless
1927  * the free page queue lock is held until the msleep() is performed.
1928  */
1929 void
1930 pagedaemon_wakeup(void)
1931 {
1932 
1933 	if (!vm_pageout_wanted && curthread->td_proc != pageproc) {
1934 		vm_pageout_wanted = true;
1935 		wakeup(&vm_pageout_wanted);
1936 	}
1937 }
1938