xref: /dragonfly/sys/vm/vm_pageout.c (revision dc71b7ab)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_pageout.c,v 1.151.2.15 2002/12/29 18:21:04 dillon Exp $
65  */
66 
67 /*
68  *	The proverbial page-out daemon.
69  */
70 
71 #include "opt_vm.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/kthread.h>
77 #include <sys/resourcevar.h>
78 #include <sys/signalvar.h>
79 #include <sys/vnode.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sysctl.h>
82 
83 #include <vm/vm.h>
84 #include <vm/vm_param.h>
85 #include <sys/lock.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_extern.h>
93 
94 #include <sys/thread2.h>
95 #include <sys/spinlock2.h>
96 #include <vm/vm_page2.h>
97 
98 /*
99  * System initialization
100  */
101 
102 /* the kernel process "vm_pageout"*/
103 static int vm_pageout_clean (vm_page_t);
104 static int vm_pageout_free_page_calc (vm_size_t count);
105 struct thread *pagethread;
106 
107 #if !defined(NO_SWAPPING)
108 /* the kernel process "vm_daemon"*/
109 static void vm_daemon (void);
110 static struct	thread *vmthread;
111 
112 static struct kproc_desc vm_kp = {
113 	"vmdaemon",
114 	vm_daemon,
115 	&vmthread
116 };
117 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
118 #endif
119 
120 int vm_pages_needed=0;		/* Event on which pageout daemon sleeps */
121 int vm_pageout_deficit=0;	/* Estimated number of pages deficit */
122 int vm_pageout_pages_needed=0;	/* flag saying that the pageout daemon needs pages */
123 
124 #if !defined(NO_SWAPPING)
125 static int vm_pageout_req_swapout;	/* XXX */
126 static int vm_daemon_needed;
127 #endif
128 static int vm_max_launder = 32;
129 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
130 static int vm_pageout_full_stats_interval = 0;
131 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
132 static int defer_swap_pageouts=0;
133 static int disable_swap_pageouts=0;
134 static u_int vm_anonmem_decline = ACT_DECLINE;
135 static u_int vm_filemem_decline = ACT_DECLINE * 2;
136 
137 #if defined(NO_SWAPPING)
138 static int vm_swap_enabled=0;
139 static int vm_swap_idle_enabled=0;
140 #else
141 static int vm_swap_enabled=1;
142 static int vm_swap_idle_enabled=0;
143 #endif
144 
145 SYSCTL_UINT(_vm, VM_PAGEOUT_ALGORITHM, anonmem_decline,
146 	CTLFLAG_RW, &vm_anonmem_decline, 0, "active->inactive anon memory");
147 
148 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, filemem_decline,
149 	CTLFLAG_RW, &vm_filemem_decline, 0, "active->inactive file cache");
150 
151 SYSCTL_INT(_vm, OID_AUTO, max_launder,
152 	CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
153 
154 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
155 	CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
156 
157 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
158 	CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
159 
160 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
161 	CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
162 
163 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
164 	CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
165 
166 #if defined(NO_SWAPPING)
167 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
168 	CTLFLAG_RD, &vm_swap_enabled, 0, "");
169 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
170 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
171 #else
172 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
173 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
174 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
175 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
176 #endif
177 
178 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
179 	CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
180 
181 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
182 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
183 
184 static int pageout_lock_miss;
185 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
186 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
187 
188 #define VM_PAGEOUT_PAGE_COUNT 16
189 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
190 
191 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
192 
193 #if !defined(NO_SWAPPING)
194 typedef void freeer_fcn_t (vm_map_t, vm_object_t, vm_pindex_t, int);
195 static void vm_pageout_map_deactivate_pages (vm_map_t, vm_pindex_t);
196 static freeer_fcn_t vm_pageout_object_deactivate_pages;
197 static void vm_req_vmdaemon (void);
198 #endif
199 static void vm_pageout_page_stats(int q);
200 
201 static __inline int
202 PQAVERAGE(int n)
203 {
204 	if (n >= 0)
205 		return((n + (PQ_L2_SIZE - 1)) / PQ_L2_SIZE + 1);
206 	else
207 		return((n - (PQ_L2_SIZE - 1)) / PQ_L2_SIZE - 1);
208 }
209 
210 /*
211  * vm_pageout_clean:
212  *
213  * Clean the page and remove it from the laundry.  The page must not be
214  * busy on-call.
215  *
216  * We set the busy bit to cause potential page faults on this page to
217  * block.  Note the careful timing, however, the busy bit isn't set till
218  * late and we cannot do anything that will mess with the page.
219  */
220 static int
221 vm_pageout_clean(vm_page_t m)
222 {
223 	vm_object_t object;
224 	vm_page_t mc[2*vm_pageout_page_count];
225 	int pageout_count;
226 	int error;
227 	int ib, is, page_base;
228 	vm_pindex_t pindex = m->pindex;
229 
230 	object = m->object;
231 
232 	/*
233 	 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
234 	 * with the new swapper, but we could have serious problems paging
235 	 * out other object types if there is insufficient memory.
236 	 *
237 	 * Unfortunately, checking free memory here is far too late, so the
238 	 * check has been moved up a procedural level.
239 	 */
240 
241 	/*
242 	 * Don't mess with the page if it's busy, held, or special
243 	 *
244 	 * XXX do we really need to check hold_count here?  hold_count
245 	 * isn't supposed to mess with vm_page ops except prevent the
246 	 * page from being reused.
247 	 */
248 	if (m->hold_count != 0 || (m->flags & PG_UNMANAGED)) {
249 		vm_page_wakeup(m);
250 		return 0;
251 	}
252 
253 	mc[vm_pageout_page_count] = m;
254 	pageout_count = 1;
255 	page_base = vm_pageout_page_count;
256 	ib = 1;
257 	is = 1;
258 
259 	/*
260 	 * Scan object for clusterable pages.
261 	 *
262 	 * We can cluster ONLY if: ->> the page is NOT
263 	 * clean, wired, busy, held, or mapped into a
264 	 * buffer, and one of the following:
265 	 * 1) The page is inactive, or a seldom used
266 	 *    active page.
267 	 * -or-
268 	 * 2) we force the issue.
269 	 *
270 	 * During heavy mmap/modification loads the pageout
271 	 * daemon can really fragment the underlying file
272 	 * due to flushing pages out of order and not trying
273 	 * align the clusters (which leave sporatic out-of-order
274 	 * holes).  To solve this problem we do the reverse scan
275 	 * first and attempt to align our cluster, then do a
276 	 * forward scan if room remains.
277 	 */
278 
279 	vm_object_hold(object);
280 more:
281 	while (ib && pageout_count < vm_pageout_page_count) {
282 		vm_page_t p;
283 
284 		if (ib > pindex) {
285 			ib = 0;
286 			break;
287 		}
288 
289 		p = vm_page_lookup_busy_try(object, pindex - ib, TRUE, &error);
290 		if (error || p == NULL) {
291 			ib = 0;
292 			break;
293 		}
294 		if ((p->queue - p->pc) == PQ_CACHE ||
295 		    (p->flags & PG_UNMANAGED)) {
296 			vm_page_wakeup(p);
297 			ib = 0;
298 			break;
299 		}
300 		vm_page_test_dirty(p);
301 		if (((p->dirty & p->valid) == 0 &&
302 		     (p->flags & PG_NEED_COMMIT) == 0) ||
303 		    p->queue - p->pc != PQ_INACTIVE ||
304 		    p->wire_count != 0 ||	/* may be held by buf cache */
305 		    p->hold_count != 0) {	/* may be undergoing I/O */
306 			vm_page_wakeup(p);
307 			ib = 0;
308 			break;
309 		}
310 		mc[--page_base] = p;
311 		++pageout_count;
312 		++ib;
313 		/*
314 		 * alignment boundry, stop here and switch directions.  Do
315 		 * not clear ib.
316 		 */
317 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
318 			break;
319 	}
320 
321 	while (pageout_count < vm_pageout_page_count &&
322 	    pindex + is < object->size) {
323 		vm_page_t p;
324 
325 		p = vm_page_lookup_busy_try(object, pindex + is, TRUE, &error);
326 		if (error || p == NULL)
327 			break;
328 		if (((p->queue - p->pc) == PQ_CACHE) ||
329 		    (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
330 			vm_page_wakeup(p);
331 			break;
332 		}
333 		vm_page_test_dirty(p);
334 		if (((p->dirty & p->valid) == 0 &&
335 		     (p->flags & PG_NEED_COMMIT) == 0) ||
336 		    p->queue - p->pc != PQ_INACTIVE ||
337 		    p->wire_count != 0 ||	/* may be held by buf cache */
338 		    p->hold_count != 0) {	/* may be undergoing I/O */
339 			vm_page_wakeup(p);
340 			break;
341 		}
342 		mc[page_base + pageout_count] = p;
343 		++pageout_count;
344 		++is;
345 	}
346 
347 	/*
348 	 * If we exhausted our forward scan, continue with the reverse scan
349 	 * when possible, even past a page boundry.  This catches boundry
350 	 * conditions.
351 	 */
352 	if (ib && pageout_count < vm_pageout_page_count)
353 		goto more;
354 
355 	vm_object_drop(object);
356 
357 	/*
358 	 * we allow reads during pageouts...
359 	 */
360 	return vm_pageout_flush(&mc[page_base], pageout_count, 0);
361 }
362 
363 /*
364  * vm_pageout_flush() - launder the given pages
365  *
366  *	The given pages are laundered.  Note that we setup for the start of
367  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
368  *	reference count all in here rather then in the parent.  If we want
369  *	the parent to do more sophisticated things we may have to change
370  *	the ordering.
371  *
372  *	The pages in the array must be busied by the caller and will be
373  *	unbusied by this function.
374  */
375 int
376 vm_pageout_flush(vm_page_t *mc, int count, int flags)
377 {
378 	vm_object_t object;
379 	int pageout_status[count];
380 	int numpagedout = 0;
381 	int i;
382 
383 	/*
384 	 * Initiate I/O.  Bump the vm_page_t->busy counter.
385 	 */
386 	for (i = 0; i < count; i++) {
387 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
388 			("vm_pageout_flush page %p index %d/%d: partially "
389 			 "invalid page", mc[i], i, count));
390 		vm_page_io_start(mc[i]);
391 	}
392 
393 	/*
394 	 * We must make the pages read-only.  This will also force the
395 	 * modified bit in the related pmaps to be cleared.  The pager
396 	 * cannot clear the bit for us since the I/O completion code
397 	 * typically runs from an interrupt.  The act of making the page
398 	 * read-only handles the case for us.
399 	 *
400 	 * Then we can unbusy the pages, we still hold a reference by virtue
401 	 * of our soft-busy.
402 	 */
403 	for (i = 0; i < count; i++) {
404 		vm_page_protect(mc[i], VM_PROT_READ);
405 		vm_page_wakeup(mc[i]);
406 	}
407 
408 	object = mc[0]->object;
409 	vm_object_pip_add(object, count);
410 
411 	vm_pager_put_pages(object, mc, count,
412 	    (flags | ((object == &kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
413 	    pageout_status);
414 
415 	for (i = 0; i < count; i++) {
416 		vm_page_t mt = mc[i];
417 
418 		switch (pageout_status[i]) {
419 		case VM_PAGER_OK:
420 			numpagedout++;
421 			break;
422 		case VM_PAGER_PEND:
423 			numpagedout++;
424 			break;
425 		case VM_PAGER_BAD:
426 			/*
427 			 * Page outside of range of object. Right now we
428 			 * essentially lose the changes by pretending it
429 			 * worked.
430 			 */
431 			vm_page_busy_wait(mt, FALSE, "pgbad");
432 			pmap_clear_modify(mt);
433 			vm_page_undirty(mt);
434 			vm_page_wakeup(mt);
435 			break;
436 		case VM_PAGER_ERROR:
437 		case VM_PAGER_FAIL:
438 			/*
439 			 * A page typically cannot be paged out when we
440 			 * have run out of swap.  We leave the page
441 			 * marked inactive and will try to page it out
442 			 * again later.
443 			 *
444 			 * Starvation of the active page list is used to
445 			 * determine when the system is massively memory
446 			 * starved.
447 			 */
448 			break;
449 		case VM_PAGER_AGAIN:
450 			break;
451 		}
452 
453 		/*
454 		 * If the operation is still going, leave the page busy to
455 		 * block all other accesses. Also, leave the paging in
456 		 * progress indicator set so that we don't attempt an object
457 		 * collapse.
458 		 *
459 		 * For any pages which have completed synchronously,
460 		 * deactivate the page if we are under a severe deficit.
461 		 * Do not try to enter them into the cache, though, they
462 		 * might still be read-heavy.
463 		 */
464 		if (pageout_status[i] != VM_PAGER_PEND) {
465 			vm_page_busy_wait(mt, FALSE, "pgouw");
466 			if (vm_page_count_severe())
467 				vm_page_deactivate(mt);
468 #if 0
469 			if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
470 				vm_page_protect(mt, VM_PROT_READ);
471 #endif
472 			vm_page_io_finish(mt);
473 			vm_page_wakeup(mt);
474 			vm_object_pip_wakeup(object);
475 		}
476 	}
477 	return numpagedout;
478 }
479 
480 #if !defined(NO_SWAPPING)
481 /*
482  * deactivate enough pages to satisfy the inactive target
483  * requirements or if vm_page_proc_limit is set, then
484  * deactivate all of the pages in the object and its
485  * backing_objects.
486  *
487  * The map must be locked.
488  * The caller must hold the vm_object.
489  */
490 static int vm_pageout_object_deactivate_pages_callback(vm_page_t, void *);
491 
492 static void
493 vm_pageout_object_deactivate_pages(vm_map_t map, vm_object_t object,
494 				   vm_pindex_t desired, int map_remove_only)
495 {
496 	struct rb_vm_page_scan_info info;
497 	vm_object_t lobject;
498 	vm_object_t tobject;
499 	int remove_mode;
500 
501 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
502 	lobject = object;
503 
504 	while (lobject) {
505 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
506 			break;
507 		if (lobject->type == OBJT_DEVICE || lobject->type == OBJT_PHYS)
508 			break;
509 		if (lobject->paging_in_progress)
510 			break;
511 
512 		remove_mode = map_remove_only;
513 		if (lobject->shadow_count > 1)
514 			remove_mode = 1;
515 
516 		/*
517 		 * scan the objects entire memory queue.  We hold the
518 		 * object's token so the scan should not race anything.
519 		 */
520 		info.limit = remove_mode;
521 		info.map = map;
522 		info.desired = desired;
523 		vm_page_rb_tree_RB_SCAN(&lobject->rb_memq, NULL,
524 				vm_pageout_object_deactivate_pages_callback,
525 				&info
526 		);
527 		while ((tobject = lobject->backing_object) != NULL) {
528 			KKASSERT(tobject != object);
529 			vm_object_hold(tobject);
530 			if (tobject == lobject->backing_object)
531 				break;
532 			vm_object_drop(tobject);
533 		}
534 		if (lobject != object) {
535 			if (tobject)
536 				vm_object_lock_swap();
537 			vm_object_drop(lobject);
538 			/* leaves tobject locked & at top */
539 		}
540 		lobject = tobject;
541 	}
542 	if (lobject != object)
543 		vm_object_drop(lobject);	/* NULL ok */
544 }
545 
546 /*
547  * The caller must hold the vm_object.
548  */
549 static int
550 vm_pageout_object_deactivate_pages_callback(vm_page_t p, void *data)
551 {
552 	struct rb_vm_page_scan_info *info = data;
553 	int actcount;
554 
555 	if (pmap_resident_count(vm_map_pmap(info->map)) <= info->desired) {
556 		return(-1);
557 	}
558 	mycpu->gd_cnt.v_pdpages++;
559 
560 	if (vm_page_busy_try(p, TRUE))
561 		return(0);
562 	if (p->wire_count || p->hold_count || (p->flags & PG_UNMANAGED)) {
563 		vm_page_wakeup(p);
564 		return(0);
565 	}
566 	if (!pmap_page_exists_quick(vm_map_pmap(info->map), p)) {
567 		vm_page_wakeup(p);
568 		return(0);
569 	}
570 
571 	actcount = pmap_ts_referenced(p);
572 	if (actcount) {
573 		vm_page_flag_set(p, PG_REFERENCED);
574 	} else if (p->flags & PG_REFERENCED) {
575 		actcount = 1;
576 	}
577 
578 	vm_page_and_queue_spin_lock(p);
579 	if (p->queue - p->pc != PQ_ACTIVE && (p->flags & PG_REFERENCED)) {
580 		vm_page_and_queue_spin_unlock(p);
581 		vm_page_activate(p);
582 		p->act_count += actcount;
583 		vm_page_flag_clear(p, PG_REFERENCED);
584 	} else if (p->queue - p->pc == PQ_ACTIVE) {
585 		if ((p->flags & PG_REFERENCED) == 0) {
586 			p->act_count -= min(p->act_count, ACT_DECLINE);
587 			if (!info->limit &&
588 			    (vm_pageout_algorithm || (p->act_count == 0))) {
589 				vm_page_and_queue_spin_unlock(p);
590 				vm_page_protect(p, VM_PROT_NONE);
591 				vm_page_deactivate(p);
592 			} else {
593 				TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
594 					     p, pageq);
595 				TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
596 						  p, pageq);
597 				vm_page_and_queue_spin_unlock(p);
598 			}
599 		} else {
600 			vm_page_and_queue_spin_unlock(p);
601 			vm_page_activate(p);
602 			vm_page_flag_clear(p, PG_REFERENCED);
603 
604 			vm_page_and_queue_spin_lock(p);
605 			if (p->queue - p->pc == PQ_ACTIVE) {
606 				if (p->act_count < (ACT_MAX - ACT_ADVANCE))
607 					p->act_count += ACT_ADVANCE;
608 				TAILQ_REMOVE(&vm_page_queues[p->queue].pl,
609 					     p, pageq);
610 				TAILQ_INSERT_TAIL(&vm_page_queues[p->queue].pl,
611 						  p, pageq);
612 			}
613 			vm_page_and_queue_spin_unlock(p);
614 		}
615 	} else if (p->queue - p->pc == PQ_INACTIVE) {
616 		vm_page_and_queue_spin_unlock(p);
617 		vm_page_protect(p, VM_PROT_NONE);
618 	} else {
619 		vm_page_and_queue_spin_unlock(p);
620 	}
621 	vm_page_wakeup(p);
622 	return(0);
623 }
624 
625 /*
626  * Deactivate some number of pages in a map, try to do it fairly, but
627  * that is really hard to do.
628  */
629 static void
630 vm_pageout_map_deactivate_pages(vm_map_t map, vm_pindex_t desired)
631 {
632 	vm_map_entry_t tmpe;
633 	vm_object_t obj, bigobj;
634 	int nothingwired;
635 
636 	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT)) {
637 		return;
638 	}
639 
640 	bigobj = NULL;
641 	nothingwired = TRUE;
642 
643 	/*
644 	 * first, search out the biggest object, and try to free pages from
645 	 * that.
646 	 */
647 	tmpe = map->header.next;
648 	while (tmpe != &map->header) {
649 		switch(tmpe->maptype) {
650 		case VM_MAPTYPE_NORMAL:
651 		case VM_MAPTYPE_VPAGETABLE:
652 			obj = tmpe->object.vm_object;
653 			if ((obj != NULL) && (obj->shadow_count <= 1) &&
654 				((bigobj == NULL) ||
655 				 (bigobj->resident_page_count < obj->resident_page_count))) {
656 				bigobj = obj;
657 			}
658 			break;
659 		default:
660 			break;
661 		}
662 		if (tmpe->wired_count > 0)
663 			nothingwired = FALSE;
664 		tmpe = tmpe->next;
665 	}
666 
667 	if (bigobj)  {
668 		vm_object_hold(bigobj);
669 		vm_pageout_object_deactivate_pages(map, bigobj, desired, 0);
670 		vm_object_drop(bigobj);
671 	}
672 
673 	/*
674 	 * Next, hunt around for other pages to deactivate.  We actually
675 	 * do this search sort of wrong -- .text first is not the best idea.
676 	 */
677 	tmpe = map->header.next;
678 	while (tmpe != &map->header) {
679 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
680 			break;
681 		switch(tmpe->maptype) {
682 		case VM_MAPTYPE_NORMAL:
683 		case VM_MAPTYPE_VPAGETABLE:
684 			obj = tmpe->object.vm_object;
685 			if (obj) {
686 				vm_object_hold(obj);
687 				vm_pageout_object_deactivate_pages(map, obj, desired, 0);
688 				vm_object_drop(obj);
689 			}
690 			break;
691 		default:
692 			break;
693 		}
694 		tmpe = tmpe->next;
695 	}
696 
697 	/*
698 	 * Remove all mappings if a process is swapped out, this will free page
699 	 * table pages.
700 	 */
701 	if (desired == 0 && nothingwired)
702 		pmap_remove(vm_map_pmap(map),
703 			    VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
704 	vm_map_unlock(map);
705 }
706 #endif
707 
708 /*
709  * Called when the pageout scan wants to free a page.  We no longer
710  * try to cycle the vm_object here with a reference & dealloc, which can
711  * cause a non-trivial object collapse in a critical path.
712  *
713  * It is unclear why we cycled the ref_count in the past, perhaps to try
714  * to optimize shadow chain collapses but I don't quite see why it would
715  * be necessary.  An OBJ_DEAD object should terminate any and all vm_pages
716  * synchronously and not have to be kicked-start.
717  */
718 static void
719 vm_pageout_page_free(vm_page_t m)
720 {
721 	vm_page_protect(m, VM_PROT_NONE);
722 	vm_page_free(m);
723 }
724 
725 /*
726  * vm_pageout_scan does the dirty work for the pageout daemon.
727  */
728 struct vm_pageout_scan_info {
729 	struct proc *bigproc;
730 	vm_offset_t bigsize;
731 };
732 
733 static int vm_pageout_scan_callback(struct proc *p, void *data);
734 
735 static int
736 vm_pageout_scan_inactive(int pass, int q, int avail_shortage,
737 			 int *vnodes_skippedp)
738 {
739 	vm_page_t m;
740 	struct vm_page marker;
741 	struct vnode *vpfailed;		/* warning, allowed to be stale */
742 	int maxscan;
743 	int delta = 0;
744 	vm_object_t object;
745 	int actcount;
746 	int maxlaunder;
747 
748 	/*
749 	 * Start scanning the inactive queue for pages we can move to the
750 	 * cache or free.  The scan will stop when the target is reached or
751 	 * we have scanned the entire inactive queue.  Note that m->act_count
752 	 * is not used to form decisions for the inactive queue, only for the
753 	 * active queue.
754 	 *
755 	 * maxlaunder limits the number of dirty pages we flush per scan.
756 	 * For most systems a smaller value (16 or 32) is more robust under
757 	 * extreme memory and disk pressure because any unnecessary writes
758 	 * to disk can result in extreme performance degredation.  However,
759 	 * systems with excessive dirty pages (especially when MAP_NOSYNC is
760 	 * used) will die horribly with limited laundering.  If the pageout
761 	 * daemon cannot clean enough pages in the first pass, we let it go
762 	 * all out in succeeding passes.
763 	 */
764 	if ((maxlaunder = vm_max_launder) <= 1)
765 		maxlaunder = 1;
766 	if (pass)
767 		maxlaunder = 10000;
768 
769 	/*
770 	 * Initialize our marker
771 	 */
772 	bzero(&marker, sizeof(marker));
773 	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
774 	marker.queue = PQ_INACTIVE + q;
775 	marker.pc = q;
776 	marker.wire_count = 1;
777 
778 	/*
779 	 * Inactive queue scan.
780 	 *
781 	 * NOTE: The vm_page must be spinlocked before the queue to avoid
782 	 *	 deadlocks, so it is easiest to simply iterate the loop
783 	 *	 with the queue unlocked at the top.
784 	 */
785 	vpfailed = NULL;
786 
787 	vm_page_queues_spin_lock(PQ_INACTIVE + q);
788 	TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
789 	maxscan = vm_page_queues[PQ_INACTIVE + q].lcnt;
790 	vm_page_queues_spin_unlock(PQ_INACTIVE + q);
791 
792 	while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
793 	       maxscan-- > 0 && avail_shortage - delta > 0)
794 	{
795 		vm_page_and_queue_spin_lock(m);
796 		if (m != TAILQ_NEXT(&marker, pageq)) {
797 			vm_page_and_queue_spin_unlock(m);
798 			++maxscan;
799 			continue;
800 		}
801 		KKASSERT(m->queue - m->pc == PQ_INACTIVE);
802 		TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl,
803 			     &marker, pageq);
804 		TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE + q].pl, m,
805 				   &marker, pageq);
806 		mycpu->gd_cnt.v_pdpages++;
807 
808 		/*
809 		 * Skip marker pages
810 		 */
811 		if (m->flags & PG_MARKER) {
812 			vm_page_and_queue_spin_unlock(m);
813 			continue;
814 		}
815 
816 		/*
817 		 * Try to busy the page.  Don't mess with pages which are
818 		 * already busy or reorder them in the queue.
819 		 */
820 		if (vm_page_busy_try(m, TRUE)) {
821 			vm_page_and_queue_spin_unlock(m);
822 			continue;
823 		}
824 		vm_page_and_queue_spin_unlock(m);
825 		KKASSERT(m->queue - m->pc == PQ_INACTIVE);
826 
827 		lwkt_yield();
828 
829 		/*
830 		 * The page has been successfully busied and is now no
831 		 * longer spinlocked.  The queue is no longer spinlocked
832 		 * either.
833 		 */
834 
835 		/*
836 		 * It is possible for a page to be busied ad-hoc (e.g. the
837 		 * pmap_collect() code) and wired and race against the
838 		 * allocation of a new page.  vm_page_alloc() may be forced
839 		 * to deactivate the wired page in which case it winds up
840 		 * on the inactive queue and must be handled here.  We
841 		 * correct the problem simply by unqueuing the page.
842 		 */
843 		if (m->wire_count) {
844 			vm_page_unqueue_nowakeup(m);
845 			vm_page_wakeup(m);
846 			kprintf("WARNING: pagedaemon: wired page on "
847 				"inactive queue %p\n", m);
848 			continue;
849 		}
850 
851 		/*
852 		 * A held page may be undergoing I/O, so skip it.
853 		 */
854 		if (m->hold_count) {
855 			vm_page_and_queue_spin_lock(m);
856 			if (m->queue - m->pc == PQ_INACTIVE) {
857 				TAILQ_REMOVE(
858 					&vm_page_queues[PQ_INACTIVE + q].pl,
859 					m, pageq);
860 				TAILQ_INSERT_TAIL(
861 					&vm_page_queues[PQ_INACTIVE + q].pl,
862 					m, pageq);
863 				++vm_swapcache_inactive_heuristic;
864 			}
865 			vm_page_and_queue_spin_unlock(m);
866 			vm_page_wakeup(m);
867 			continue;
868 		}
869 
870 		if (m->object == NULL || m->object->ref_count == 0) {
871 			/*
872 			 * If the object is not being used, we ignore previous
873 			 * references.
874 			 */
875 			vm_page_flag_clear(m, PG_REFERENCED);
876 			pmap_clear_reference(m);
877 			/* fall through to end */
878 		} else if (((m->flags & PG_REFERENCED) == 0) &&
879 			    (actcount = pmap_ts_referenced(m))) {
880 			/*
881 			 * Otherwise, if the page has been referenced while
882 			 * in the inactive queue, we bump the "activation
883 			 * count" upwards, making it less likely that the
884 			 * page will be added back to the inactive queue
885 			 * prematurely again.  Here we check the page tables
886 			 * (or emulated bits, if any), given the upper level
887 			 * VM system not knowing anything about existing
888 			 * references.
889 			 */
890 			vm_page_activate(m);
891 			m->act_count += (actcount + ACT_ADVANCE);
892 			vm_page_wakeup(m);
893 			continue;
894 		}
895 
896 		/*
897 		 * (m) is still busied.
898 		 *
899 		 * If the upper level VM system knows about any page
900 		 * references, we activate the page.  We also set the
901 		 * "activation count" higher than normal so that we will less
902 		 * likely place pages back onto the inactive queue again.
903 		 */
904 		if ((m->flags & PG_REFERENCED) != 0) {
905 			vm_page_flag_clear(m, PG_REFERENCED);
906 			actcount = pmap_ts_referenced(m);
907 			vm_page_activate(m);
908 			m->act_count += (actcount + ACT_ADVANCE + 1);
909 			vm_page_wakeup(m);
910 			continue;
911 		}
912 
913 		/*
914 		 * If the upper level VM system doesn't know anything about
915 		 * the page being dirty, we have to check for it again.  As
916 		 * far as the VM code knows, any partially dirty pages are
917 		 * fully dirty.
918 		 *
919 		 * Pages marked PG_WRITEABLE may be mapped into the user
920 		 * address space of a process running on another cpu.  A
921 		 * user process (without holding the MP lock) running on
922 		 * another cpu may be able to touch the page while we are
923 		 * trying to remove it.  vm_page_cache() will handle this
924 		 * case for us.
925 		 */
926 		if (m->dirty == 0) {
927 			vm_page_test_dirty(m);
928 		} else {
929 			vm_page_dirty(m);
930 		}
931 
932 		if (m->valid == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
933 			/*
934 			 * Invalid pages can be easily freed
935 			 */
936 			vm_pageout_page_free(m);
937 			mycpu->gd_cnt.v_dfree++;
938 			++delta;
939 		} else if (m->dirty == 0 && (m->flags & PG_NEED_COMMIT) == 0) {
940 			/*
941 			 * Clean pages can be placed onto the cache queue.
942 			 * This effectively frees them.
943 			 */
944 			vm_page_cache(m);
945 			++delta;
946 		} else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
947 			/*
948 			 * Dirty pages need to be paged out, but flushing
949 			 * a page is extremely expensive verses freeing
950 			 * a clean page.  Rather then artificially limiting
951 			 * the number of pages we can flush, we instead give
952 			 * dirty pages extra priority on the inactive queue
953 			 * by forcing them to be cycled through the queue
954 			 * twice before being flushed, after which the
955 			 * (now clean) page will cycle through once more
956 			 * before being freed.  This significantly extends
957 			 * the thrash point for a heavily loaded machine.
958 			 */
959 			vm_page_flag_set(m, PG_WINATCFLS);
960 			vm_page_and_queue_spin_lock(m);
961 			if (m->queue - m->pc == PQ_INACTIVE) {
962 				TAILQ_REMOVE(
963 					&vm_page_queues[PQ_INACTIVE + q].pl,
964 					m, pageq);
965 				TAILQ_INSERT_TAIL(
966 					&vm_page_queues[PQ_INACTIVE + q].pl,
967 					m, pageq);
968 				++vm_swapcache_inactive_heuristic;
969 			}
970 			vm_page_and_queue_spin_unlock(m);
971 			vm_page_wakeup(m);
972 		} else if (maxlaunder > 0) {
973 			/*
974 			 * We always want to try to flush some dirty pages if
975 			 * we encounter them, to keep the system stable.
976 			 * Normally this number is small, but under extreme
977 			 * pressure where there are insufficient clean pages
978 			 * on the inactive queue, we may have to go all out.
979 			 */
980 			int swap_pageouts_ok;
981 			struct vnode *vp = NULL;
982 
983 			swap_pageouts_ok = 0;
984 			object = m->object;
985 			if (object &&
986 			    (object->type != OBJT_SWAP) &&
987 			    (object->type != OBJT_DEFAULT)) {
988 				swap_pageouts_ok = 1;
989 			} else {
990 				swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
991 				swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
992 				vm_page_count_min(0));
993 
994 			}
995 
996 			/*
997 			 * We don't bother paging objects that are "dead".
998 			 * Those objects are in a "rundown" state.
999 			 */
1000 			if (!swap_pageouts_ok ||
1001 			    (object == NULL) ||
1002 			    (object->flags & OBJ_DEAD)) {
1003 				vm_page_and_queue_spin_lock(m);
1004 				if (m->queue - m->pc == PQ_INACTIVE) {
1005 					TAILQ_REMOVE(
1006 					    &vm_page_queues[PQ_INACTIVE + q].pl,
1007 					    m, pageq);
1008 					TAILQ_INSERT_TAIL(
1009 					    &vm_page_queues[PQ_INACTIVE + q].pl,
1010 					    m, pageq);
1011 					++vm_swapcache_inactive_heuristic;
1012 				}
1013 				vm_page_and_queue_spin_unlock(m);
1014 				vm_page_wakeup(m);
1015 				continue;
1016 			}
1017 
1018 			/*
1019 			 * (m) is still busied.
1020 			 *
1021 			 * The object is already known NOT to be dead.   It
1022 			 * is possible for the vget() to block the whole
1023 			 * pageout daemon, but the new low-memory handling
1024 			 * code should prevent it.
1025 			 *
1026 			 * The previous code skipped locked vnodes and, worse,
1027 			 * reordered pages in the queue.  This results in
1028 			 * completely non-deterministic operation because,
1029 			 * quite often, a vm_fault has initiated an I/O and
1030 			 * is holding a locked vnode at just the point where
1031 			 * the pageout daemon is woken up.
1032 			 *
1033 			 * We can't wait forever for the vnode lock, we might
1034 			 * deadlock due to a vn_read() getting stuck in
1035 			 * vm_wait while holding this vnode.  We skip the
1036 			 * vnode if we can't get it in a reasonable amount
1037 			 * of time.
1038 			 *
1039 			 * vpfailed is used to (try to) avoid the case where
1040 			 * a large number of pages are associated with a
1041 			 * locked vnode, which could cause the pageout daemon
1042 			 * to stall for an excessive amount of time.
1043 			 */
1044 			if (object->type == OBJT_VNODE) {
1045 				int flags;
1046 
1047 				vp = object->handle;
1048 				flags = LK_EXCLUSIVE | LK_NOOBJ;
1049 				if (vp == vpfailed)
1050 					flags |= LK_NOWAIT;
1051 				else
1052 					flags |= LK_TIMELOCK;
1053 				vm_page_hold(m);
1054 				vm_page_wakeup(m);
1055 
1056 				/*
1057 				 * We have unbusied (m) temporarily so we can
1058 				 * acquire the vp lock without deadlocking.
1059 				 * (m) is held to prevent destruction.
1060 				 */
1061 				if (vget(vp, flags) != 0) {
1062 					vpfailed = vp;
1063 					++pageout_lock_miss;
1064 					if (object->flags & OBJ_MIGHTBEDIRTY)
1065 						    ++*vnodes_skippedp;
1066 					vm_page_unhold(m);
1067 					continue;
1068 				}
1069 
1070 				/*
1071 				 * The page might have been moved to another
1072 				 * queue during potential blocking in vget()
1073 				 * above.  The page might have been freed and
1074 				 * reused for another vnode.  The object might
1075 				 * have been reused for another vnode.
1076 				 */
1077 				if (m->queue - m->pc != PQ_INACTIVE ||
1078 				    m->object != object ||
1079 				    object->handle != vp) {
1080 					if (object->flags & OBJ_MIGHTBEDIRTY)
1081 						++*vnodes_skippedp;
1082 					vput(vp);
1083 					vm_page_unhold(m);
1084 					continue;
1085 				}
1086 
1087 				/*
1088 				 * The page may have been busied during the
1089 				 * blocking in vput();  We don't move the
1090 				 * page back onto the end of the queue so that
1091 				 * statistics are more correct if we don't.
1092 				 */
1093 				if (vm_page_busy_try(m, TRUE)) {
1094 					vput(vp);
1095 					vm_page_unhold(m);
1096 					continue;
1097 				}
1098 				vm_page_unhold(m);
1099 
1100 				/*
1101 				 * (m) is busied again
1102 				 *
1103 				 * We own the busy bit and remove our hold
1104 				 * bit.  If the page is still held it
1105 				 * might be undergoing I/O, so skip it.
1106 				 */
1107 				if (m->hold_count) {
1108 					vm_page_and_queue_spin_lock(m);
1109 					if (m->queue - m->pc == PQ_INACTIVE) {
1110 						TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1111 						TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE + q].pl, m, pageq);
1112 						++vm_swapcache_inactive_heuristic;
1113 					}
1114 					vm_page_and_queue_spin_unlock(m);
1115 					if (object->flags & OBJ_MIGHTBEDIRTY)
1116 						++*vnodes_skippedp;
1117 					vm_page_wakeup(m);
1118 					vput(vp);
1119 					continue;
1120 				}
1121 				/* (m) is left busied as we fall through */
1122 			}
1123 
1124 			/*
1125 			 * page is busy and not held here.
1126 			 *
1127 			 * If a page is dirty, then it is either being washed
1128 			 * (but not yet cleaned) or it is still in the
1129 			 * laundry.  If it is still in the laundry, then we
1130 			 * start the cleaning operation.
1131 			 *
1132 			 * decrement inactive_shortage on success to account
1133 			 * for the (future) cleaned page.  Otherwise we
1134 			 * could wind up laundering or cleaning too many
1135 			 * pages.
1136 			 */
1137 			if (vm_pageout_clean(m) != 0) {
1138 				++delta;
1139 				--maxlaunder;
1140 			}
1141 			/* clean ate busy, page no longer accessible */
1142 			if (vp != NULL)
1143 				vput(vp);
1144 		} else {
1145 			vm_page_wakeup(m);
1146 		}
1147 
1148 		/*
1149 		 * Systems with a ton of memory can wind up with huge
1150 		 * deactivation counts.  Because the inactive scan is
1151 		 * doing a lot of flushing, the combination can result
1152 		 * in excessive paging even in situations where other
1153 		 * unrelated threads free up sufficient VM.
1154 		 *
1155 		 * To deal with this we abort the nominal active->inactive
1156 		 * scan before we hit the inactive target when free+cache
1157 		 * levels have already reached their target.
1158 		 *
1159 		 * Note that nominally the inactive scan is not freeing or
1160 		 * caching pages, it is deactivating active pages, so it
1161 		 * will not by itself cause the abort condition.
1162 		 */
1163 		if (vm_paging_target() < 0)
1164 			break;
1165 	}
1166 	vm_page_queues_spin_lock(PQ_INACTIVE + q);
1167 	TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE + q].pl, &marker, pageq);
1168 	vm_page_queues_spin_unlock(PQ_INACTIVE + q);
1169 	return (delta);
1170 }
1171 
1172 static int
1173 vm_pageout_scan_active(int pass, int q,
1174 		       int avail_shortage, int inactive_shortage,
1175 		       int *recycle_countp)
1176 {
1177 	struct vm_page marker;
1178 	vm_page_t m;
1179 	int actcount;
1180 	int delta = 0;
1181 	int maxscan;
1182 
1183 	/*
1184 	 * We want to move pages from the active queue to the inactive
1185 	 * queue to get the inactive queue to the inactive target.  If
1186 	 * we still have a page shortage from above we try to directly free
1187 	 * clean pages instead of moving them.
1188 	 *
1189 	 * If we do still have a shortage we keep track of the number of
1190 	 * pages we free or cache (recycle_count) as a measure of thrashing
1191 	 * between the active and inactive queues.
1192 	 *
1193 	 * If we were able to completely satisfy the free+cache targets
1194 	 * from the inactive pool we limit the number of pages we move
1195 	 * from the active pool to the inactive pool to 2x the pages we
1196 	 * had removed from the inactive pool (with a minimum of 1/5 the
1197 	 * inactive target).  If we were not able to completely satisfy
1198 	 * the free+cache targets we go for the whole target aggressively.
1199 	 *
1200 	 * NOTE: Both variables can end up negative.
1201 	 * NOTE: We are still in a critical section.
1202 	 */
1203 
1204 	bzero(&marker, sizeof(marker));
1205 	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1206 	marker.queue = PQ_ACTIVE + q;
1207 	marker.pc = q;
1208 	marker.wire_count = 1;
1209 
1210 	vm_page_queues_spin_lock(PQ_ACTIVE + q);
1211 	TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1212 	maxscan = vm_page_queues[PQ_ACTIVE + q].lcnt;
1213 	vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1214 
1215 	while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1216 	       maxscan-- > 0 && (avail_shortage - delta > 0 ||
1217 				inactive_shortage > 0))
1218 	{
1219 		vm_page_and_queue_spin_lock(m);
1220 		if (m != TAILQ_NEXT(&marker, pageq)) {
1221 			vm_page_and_queue_spin_unlock(m);
1222 			++maxscan;
1223 			continue;
1224 		}
1225 		KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1226 		TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1227 			     &marker, pageq);
1228 		TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1229 				   &marker, pageq);
1230 
1231 		/*
1232 		 * Skip marker pages
1233 		 */
1234 		if (m->flags & PG_MARKER) {
1235 			vm_page_and_queue_spin_unlock(m);
1236 			continue;
1237 		}
1238 
1239 		/*
1240 		 * Try to busy the page.  Don't mess with pages which are
1241 		 * already busy or reorder them in the queue.
1242 		 */
1243 		if (vm_page_busy_try(m, TRUE)) {
1244 			vm_page_and_queue_spin_unlock(m);
1245 			continue;
1246 		}
1247 
1248 		/*
1249 		 * Don't deactivate pages that are held, even if we can
1250 		 * busy them.  (XXX why not?)
1251 		 */
1252 		if (m->hold_count != 0) {
1253 			TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl,
1254 				     m, pageq);
1255 			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE + q].pl,
1256 					  m, pageq);
1257 			vm_page_and_queue_spin_unlock(m);
1258 			vm_page_wakeup(m);
1259 			continue;
1260 		}
1261 		vm_page_and_queue_spin_unlock(m);
1262 		lwkt_yield();
1263 
1264 		/*
1265 		 * The page has been successfully busied and the page and
1266 		 * queue are no longer locked.
1267 		 */
1268 
1269 		/*
1270 		 * The count for pagedaemon pages is done after checking the
1271 		 * page for eligibility...
1272 		 */
1273 		mycpu->gd_cnt.v_pdpages++;
1274 
1275 		/*
1276 		 * Check to see "how much" the page has been used and clear
1277 		 * the tracking access bits.  If the object has no references
1278 		 * don't bother paying the expense.
1279 		 */
1280 		actcount = 0;
1281 		if (m->object && m->object->ref_count != 0) {
1282 			if (m->flags & PG_REFERENCED)
1283 				++actcount;
1284 			actcount += pmap_ts_referenced(m);
1285 			if (actcount) {
1286 				m->act_count += ACT_ADVANCE + actcount;
1287 				if (m->act_count > ACT_MAX)
1288 					m->act_count = ACT_MAX;
1289 			}
1290 		}
1291 		vm_page_flag_clear(m, PG_REFERENCED);
1292 
1293 		/*
1294 		 * actcount is only valid if the object ref_count is non-zero.
1295 		 * If the page does not have an object, actcount will be zero.
1296 		 */
1297 		if (actcount && m->object->ref_count != 0) {
1298 			vm_page_and_queue_spin_lock(m);
1299 			if (m->queue - m->pc == PQ_ACTIVE) {
1300 				TAILQ_REMOVE(
1301 					&vm_page_queues[PQ_ACTIVE + q].pl,
1302 					m, pageq);
1303 				TAILQ_INSERT_TAIL(
1304 					&vm_page_queues[PQ_ACTIVE + q].pl,
1305 					m, pageq);
1306 			}
1307 			vm_page_and_queue_spin_unlock(m);
1308 			vm_page_wakeup(m);
1309 		} else {
1310 			switch(m->object->type) {
1311 			case OBJT_DEFAULT:
1312 			case OBJT_SWAP:
1313 				m->act_count -= min(m->act_count,
1314 						    vm_anonmem_decline);
1315 				break;
1316 			default:
1317 				m->act_count -= min(m->act_count,
1318 						    vm_filemem_decline);
1319 				break;
1320 			}
1321 			if (vm_pageout_algorithm ||
1322 			    (m->object == NULL) ||
1323 			    (m->object && (m->object->ref_count == 0)) ||
1324 			    m->act_count < pass + 1
1325 			) {
1326 				/*
1327 				 * Deactivate the page.  If we had a
1328 				 * shortage from our inactive scan try to
1329 				 * free (cache) the page instead.
1330 				 *
1331 				 * Don't just blindly cache the page if
1332 				 * we do not have a shortage from the
1333 				 * inactive scan, that could lead to
1334 				 * gigabytes being moved.
1335 				 */
1336 				--inactive_shortage;
1337 				if (avail_shortage - delta > 0 ||
1338 				    (m->object && (m->object->ref_count == 0)))
1339 				{
1340 					if (avail_shortage - delta > 0)
1341 						++*recycle_countp;
1342 					vm_page_protect(m, VM_PROT_NONE);
1343 					if (m->dirty == 0 &&
1344 					    (m->flags & PG_NEED_COMMIT) == 0 &&
1345 					    avail_shortage - delta > 0) {
1346 						vm_page_cache(m);
1347 					} else {
1348 						vm_page_deactivate(m);
1349 						vm_page_wakeup(m);
1350 					}
1351 				} else {
1352 					vm_page_deactivate(m);
1353 					vm_page_wakeup(m);
1354 				}
1355 				++delta;
1356 			} else {
1357 				vm_page_and_queue_spin_lock(m);
1358 				if (m->queue - m->pc == PQ_ACTIVE) {
1359 					TAILQ_REMOVE(
1360 					    &vm_page_queues[PQ_ACTIVE + q].pl,
1361 					    m, pageq);
1362 					TAILQ_INSERT_TAIL(
1363 					    &vm_page_queues[PQ_ACTIVE + q].pl,
1364 					    m, pageq);
1365 				}
1366 				vm_page_and_queue_spin_unlock(m);
1367 				vm_page_wakeup(m);
1368 			}
1369 		}
1370 	}
1371 
1372 	/*
1373 	 * Clean out our local marker.
1374 	 */
1375 	vm_page_queues_spin_lock(PQ_ACTIVE + q);
1376 	TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1377 	vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1378 
1379 	return (delta);
1380 }
1381 
1382 /*
1383  * The number of actually free pages can drop down to v_free_reserved,
1384  * we try to build the free count back above v_free_min.  Note that
1385  * vm_paging_needed() also returns TRUE if v_free_count is not at
1386  * least v_free_min so that is the minimum we must build the free
1387  * count to.
1388  *
1389  * We use a slightly higher target to improve hysteresis,
1390  * ((v_free_target + v_free_min) / 2).  Since v_free_target
1391  * is usually the same as v_cache_min this maintains about
1392  * half the pages in the free queue as are in the cache queue,
1393  * providing pretty good pipelining for pageout operation.
1394  *
1395  * The system operator can manipulate vm.v_cache_min and
1396  * vm.v_free_target to tune the pageout demon.  Be sure
1397  * to keep vm.v_free_min < vm.v_free_target.
1398  *
1399  * Note that the original paging target is to get at least
1400  * (free_min + cache_min) into (free + cache).  The slightly
1401  * higher target will shift additional pages from cache to free
1402  * without effecting the original paging target in order to
1403  * maintain better hysteresis and not have the free count always
1404  * be dead-on v_free_min.
1405  *
1406  * NOTE: we are still in a critical section.
1407  *
1408  * Pages moved from PQ_CACHE to totally free are not counted in the
1409  * pages_freed counter.
1410  */
1411 static void
1412 vm_pageout_scan_cache(int avail_shortage, int vnodes_skipped, int recycle_count)
1413 {
1414 	struct vm_pageout_scan_info info;
1415 	vm_page_t m;
1416 
1417 	while (vmstats.v_free_count <
1418 	       (vmstats.v_free_min + vmstats.v_free_target) / 2) {
1419 		/*
1420 		 * This steals some code from vm/vm_page.c
1421 		 */
1422 		static int cache_rover = 0;
1423 
1424 		m = vm_page_list_find(PQ_CACHE, cache_rover & PQ_L2_MASK, FALSE);
1425 		if (m == NULL)
1426 			break;
1427 		/* page is returned removed from its queue and spinlocked */
1428 		if (vm_page_busy_try(m, TRUE)) {
1429 			vm_page_deactivate_locked(m);
1430 			vm_page_spin_unlock(m);
1431 #ifdef INVARIANTS
1432 			kprintf("Warning: busy page %p found in cache\n", m);
1433 #endif
1434 			continue;
1435 		}
1436 		vm_page_spin_unlock(m);
1437 		pagedaemon_wakeup();
1438 		lwkt_yield();
1439 
1440 		/*
1441 		 * Page has been successfully busied and it and its queue
1442 		 * is no longer spinlocked.
1443 		 */
1444 		if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1445 		    m->hold_count ||
1446 		    m->wire_count) {
1447 			vm_page_deactivate(m);
1448 			vm_page_wakeup(m);
1449 			continue;
1450 		}
1451 		KKASSERT((m->flags & PG_MAPPED) == 0);
1452 		KKASSERT(m->dirty == 0);
1453 		cache_rover += PQ_PRIME2;
1454 		vm_pageout_page_free(m);
1455 		mycpu->gd_cnt.v_dfree++;
1456 	}
1457 
1458 #if !defined(NO_SWAPPING)
1459 	/*
1460 	 * Idle process swapout -- run once per second.
1461 	 */
1462 	if (vm_swap_idle_enabled) {
1463 		static long lsec;
1464 		if (time_second != lsec) {
1465 			vm_pageout_req_swapout |= VM_SWAP_IDLE;
1466 			vm_req_vmdaemon();
1467 			lsec = time_second;
1468 		}
1469 	}
1470 #endif
1471 
1472 	/*
1473 	 * If we didn't get enough free pages, and we have skipped a vnode
1474 	 * in a writeable object, wakeup the sync daemon.  And kick swapout
1475 	 * if we did not get enough free pages.
1476 	 */
1477 	if (vm_paging_target() > 0) {
1478 		if (vnodes_skipped && vm_page_count_min(0))
1479 			speedup_syncer();
1480 #if !defined(NO_SWAPPING)
1481 		if (vm_swap_enabled && vm_page_count_target()) {
1482 			vm_req_vmdaemon();
1483 			vm_pageout_req_swapout |= VM_SWAP_NORMAL;
1484 		}
1485 #endif
1486 	}
1487 
1488 	/*
1489 	 * Handle catastrophic conditions.  Under good conditions we should
1490 	 * be at the target, well beyond our minimum.  If we could not even
1491 	 * reach our minimum the system is under heavy stress.
1492 	 *
1493 	 * Determine whether we have run out of memory.  This occurs when
1494 	 * swap_pager_full is TRUE and the only pages left in the page
1495 	 * queues are dirty.  We will still likely have page shortages.
1496 	 *
1497 	 * - swap_pager_full is set if insufficient swap was
1498 	 *   available to satisfy a requested pageout.
1499 	 *
1500 	 * - the inactive queue is bloated (4 x size of active queue),
1501 	 *   meaning it is unable to get rid of dirty pages and.
1502 	 *
1503 	 * - vm_page_count_min() without counting pages recycled from the
1504 	 *   active queue (recycle_count) means we could not recover
1505 	 *   enough pages to meet bare minimum needs.  This test only
1506 	 *   works if the inactive queue is bloated.
1507 	 *
1508 	 * - due to a positive avail_shortage we shifted the remaining
1509 	 *   dirty pages from the active queue to the inactive queue
1510 	 *   trying to find clean ones to free.
1511 	 */
1512 	if (swap_pager_full && vm_page_count_min(recycle_count))
1513 		kprintf("Warning: system low on memory+swap!\n");
1514 	if (swap_pager_full && vm_page_count_min(recycle_count) &&
1515 	    vmstats.v_inactive_count > vmstats.v_active_count * 4 &&
1516 	    avail_shortage > 0) {
1517 		/*
1518 		 * Kill something.
1519 		 */
1520 		info.bigproc = NULL;
1521 		info.bigsize = 0;
1522 		allproc_scan(vm_pageout_scan_callback, &info);
1523 		if (info.bigproc != NULL) {
1524 			killproc(info.bigproc, "out of swap space");
1525 			info.bigproc->p_nice = PRIO_MIN;
1526 			info.bigproc->p_usched->resetpriority(
1527 				FIRST_LWP_IN_PROC(info.bigproc));
1528 			wakeup(&vmstats.v_free_count);
1529 			PRELE(info.bigproc);
1530 		}
1531 	}
1532 }
1533 
1534 /*
1535  * The caller must hold proc_token.
1536  */
1537 static int
1538 vm_pageout_scan_callback(struct proc *p, void *data)
1539 {
1540 	struct vm_pageout_scan_info *info = data;
1541 	vm_offset_t size;
1542 
1543 	/*
1544 	 * Never kill system processes or init.  If we have configured swap
1545 	 * then try to avoid killing low-numbered pids.
1546 	 */
1547 	if ((p->p_flags & P_SYSTEM) || (p->p_pid == 1) ||
1548 	    ((p->p_pid < 48) && (vm_swap_size != 0))) {
1549 		return (0);
1550 	}
1551 
1552 	/*
1553 	 * if the process is in a non-running type state,
1554 	 * don't touch it.
1555 	 */
1556 	if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
1557 		return (0);
1558 
1559 	/*
1560 	 * Get the approximate process size.  Note that anonymous pages
1561 	 * with backing swap will be counted twice, but there should not
1562 	 * be too many such pages due to the stress the VM system is
1563 	 * under at this point.
1564 	 */
1565 	size = vmspace_anonymous_count(p->p_vmspace) +
1566 		vmspace_swap_count(p->p_vmspace);
1567 
1568 	/*
1569 	 * If the this process is bigger than the biggest one
1570 	 * remember it.
1571 	 */
1572 	if (info->bigsize < size) {
1573 		if (info->bigproc)
1574 			PRELE(info->bigproc);
1575 		PHOLD(p);
1576 		info->bigproc = p;
1577 		info->bigsize = size;
1578 	}
1579 	lwkt_yield();
1580 	return(0);
1581 }
1582 
1583 /*
1584  * This routine tries to maintain the pseudo LRU active queue,
1585  * so that during long periods of time where there is no paging,
1586  * that some statistic accumulation still occurs.  This code
1587  * helps the situation where paging just starts to occur.
1588  */
1589 static void
1590 vm_pageout_page_stats(int q)
1591 {
1592 	static int fullintervalcount = 0;
1593 	struct vm_page marker;
1594 	vm_page_t m;
1595 	int pcount, tpcount;		/* Number of pages to check */
1596 	int page_shortage;
1597 
1598 	page_shortage = (vmstats.v_inactive_target + vmstats.v_cache_max +
1599 			 vmstats.v_free_min) -
1600 			(vmstats.v_free_count + vmstats.v_inactive_count +
1601 			 vmstats.v_cache_count);
1602 
1603 	if (page_shortage <= 0)
1604 		return;
1605 
1606 	pcount = vm_page_queues[PQ_ACTIVE + q].lcnt;
1607 	fullintervalcount += vm_pageout_stats_interval;
1608 	if (fullintervalcount < vm_pageout_full_stats_interval) {
1609 		tpcount = (vm_pageout_stats_max * pcount) /
1610 			  vmstats.v_page_count + 1;
1611 		if (pcount > tpcount)
1612 			pcount = tpcount;
1613 	} else {
1614 		fullintervalcount = 0;
1615 	}
1616 
1617 	bzero(&marker, sizeof(marker));
1618 	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
1619 	marker.queue = PQ_ACTIVE + q;
1620 	marker.pc = q;
1621 	marker.wire_count = 1;
1622 
1623 	vm_page_queues_spin_lock(PQ_ACTIVE + q);
1624 	TAILQ_INSERT_HEAD(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1625 	vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1626 
1627 	while ((m = TAILQ_NEXT(&marker, pageq)) != NULL &&
1628 	       pcount-- > 0)
1629 	{
1630 		int actcount;
1631 
1632 		vm_page_and_queue_spin_lock(m);
1633 		if (m != TAILQ_NEXT(&marker, pageq)) {
1634 			vm_page_and_queue_spin_unlock(m);
1635 			++pcount;
1636 			continue;
1637 		}
1638 		KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1639 		TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1640 		TAILQ_INSERT_AFTER(&vm_page_queues[PQ_ACTIVE + q].pl, m,
1641 				   &marker, pageq);
1642 
1643 		/*
1644 		 * Ignore markers
1645 		 */
1646 		if (m->flags & PG_MARKER) {
1647 			vm_page_and_queue_spin_unlock(m);
1648 			continue;
1649 		}
1650 
1651 		/*
1652 		 * Ignore pages we can't busy
1653 		 */
1654 		if (vm_page_busy_try(m, TRUE)) {
1655 			vm_page_and_queue_spin_unlock(m);
1656 			continue;
1657 		}
1658 		vm_page_and_queue_spin_unlock(m);
1659 		KKASSERT(m->queue - m->pc == PQ_ACTIVE);
1660 
1661 		/*
1662 		 * We now have a safely busied page, the page and queue
1663 		 * spinlocks have been released.
1664 		 *
1665 		 * Ignore held pages
1666 		 */
1667 		if (m->hold_count) {
1668 			vm_page_wakeup(m);
1669 			continue;
1670 		}
1671 
1672 		/*
1673 		 * Calculate activity
1674 		 */
1675 		actcount = 0;
1676 		if (m->flags & PG_REFERENCED) {
1677 			vm_page_flag_clear(m, PG_REFERENCED);
1678 			actcount += 1;
1679 		}
1680 		actcount += pmap_ts_referenced(m);
1681 
1682 		/*
1683 		 * Update act_count and move page to end of queue.
1684 		 */
1685 		if (actcount) {
1686 			m->act_count += ACT_ADVANCE + actcount;
1687 			if (m->act_count > ACT_MAX)
1688 				m->act_count = ACT_MAX;
1689 			vm_page_and_queue_spin_lock(m);
1690 			if (m->queue - m->pc == PQ_ACTIVE) {
1691 				TAILQ_REMOVE(
1692 					&vm_page_queues[PQ_ACTIVE + q].pl,
1693 					m, pageq);
1694 				TAILQ_INSERT_TAIL(
1695 					&vm_page_queues[PQ_ACTIVE + q].pl,
1696 					m, pageq);
1697 			}
1698 			vm_page_and_queue_spin_unlock(m);
1699 			vm_page_wakeup(m);
1700 			continue;
1701 		}
1702 
1703 		if (m->act_count == 0) {
1704 			/*
1705 			 * We turn off page access, so that we have
1706 			 * more accurate RSS stats.  We don't do this
1707 			 * in the normal page deactivation when the
1708 			 * system is loaded VM wise, because the
1709 			 * cost of the large number of page protect
1710 			 * operations would be higher than the value
1711 			 * of doing the operation.
1712 			 *
1713 			 * We use the marker to save our place so
1714 			 * we can release the spin lock.  both (m)
1715 			 * and (next) will be invalid.
1716 			 */
1717 			vm_page_protect(m, VM_PROT_NONE);
1718 			vm_page_deactivate(m);
1719 		} else {
1720 			m->act_count -= min(m->act_count, ACT_DECLINE);
1721 			vm_page_and_queue_spin_lock(m);
1722 			if (m->queue - m->pc == PQ_ACTIVE) {
1723 				TAILQ_REMOVE(
1724 					&vm_page_queues[PQ_ACTIVE + q].pl,
1725 					m, pageq);
1726 				TAILQ_INSERT_TAIL(
1727 					&vm_page_queues[PQ_ACTIVE + q].pl,
1728 					m, pageq);
1729 			}
1730 			vm_page_and_queue_spin_unlock(m);
1731 		}
1732 		vm_page_wakeup(m);
1733 	}
1734 
1735 	/*
1736 	 * Remove our local marker
1737 	 */
1738 	vm_page_queues_spin_lock(PQ_ACTIVE + q);
1739 	TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE + q].pl, &marker, pageq);
1740 	vm_page_queues_spin_unlock(PQ_ACTIVE + q);
1741 }
1742 
1743 static int
1744 vm_pageout_free_page_calc(vm_size_t count)
1745 {
1746 	if (count < vmstats.v_page_count)
1747 		 return 0;
1748 	/*
1749 	 * free_reserved needs to include enough for the largest swap pager
1750 	 * structures plus enough for any pv_entry structs when paging.
1751 	 *
1752 	 * v_free_min		normal allocations
1753 	 * v_free_reserved	system allocations
1754 	 * v_pageout_free_min	allocations by pageout daemon
1755 	 * v_interrupt_free_min	low level allocations (e.g swap structures)
1756 	 */
1757 	if (vmstats.v_page_count > 1024)
1758 		vmstats.v_free_min = 64 + (vmstats.v_page_count - 1024) / 200;
1759 	else
1760 		vmstats.v_free_min = 64;
1761 	vmstats.v_free_reserved = vmstats.v_free_min * 4 / 8 + 7;
1762 	vmstats.v_free_severe = vmstats.v_free_min * 4 / 8 + 0;
1763 	vmstats.v_pageout_free_min = vmstats.v_free_min * 2 / 8 + 7;
1764 	vmstats.v_interrupt_free_min = vmstats.v_free_min * 1 / 8 + 7;
1765 
1766 	return 1;
1767 }
1768 
1769 
1770 /*
1771  * vm_pageout is the high level pageout daemon.
1772  *
1773  * No requirements.
1774  */
1775 static void
1776 vm_pageout_thread(void)
1777 {
1778 	int pass;
1779 	int q;
1780 
1781 	/*
1782 	 * Initialize some paging parameters.
1783 	 */
1784 	curthread->td_flags |= TDF_SYSTHREAD;
1785 
1786 	if (vmstats.v_page_count < 2000)
1787 		vm_pageout_page_count = 8;
1788 
1789 	vm_pageout_free_page_calc(vmstats.v_page_count);
1790 
1791 	/*
1792 	 * v_free_target and v_cache_min control pageout hysteresis.  Note
1793 	 * that these are more a measure of the VM cache queue hysteresis
1794 	 * then the VM free queue.  Specifically, v_free_target is the
1795 	 * high water mark (free+cache pages).
1796 	 *
1797 	 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1798 	 * low water mark, while v_free_min is the stop.  v_cache_min must
1799 	 * be big enough to handle memory needs while the pageout daemon
1800 	 * is signalled and run to free more pages.
1801 	 */
1802 	if (vmstats.v_free_count > 6144)
1803 		vmstats.v_free_target = 4 * vmstats.v_free_min + vmstats.v_free_reserved;
1804 	else
1805 		vmstats.v_free_target = 2 * vmstats.v_free_min + vmstats.v_free_reserved;
1806 
1807 	/*
1808 	 * NOTE: With the new buffer cache b_act_count we want the default
1809 	 *	 inactive target to be a percentage of available memory.
1810 	 *
1811 	 *	 The inactive target essentially determines the minimum
1812 	 *	 number of 'temporary' pages capable of caching one-time-use
1813 	 *	 files when the VM system is otherwise full of pages
1814 	 *	 belonging to multi-time-use files or active program data.
1815 	 *
1816 	 * NOTE: The inactive target is aggressively persued only if the
1817 	 *	 inactive queue becomes too small.  If the inactive queue
1818 	 *	 is large enough to satisfy page movement to free+cache
1819 	 *	 then it is repopulated more slowly from the active queue.
1820 	 *	 This allows a general inactive_target default to be set.
1821 	 *
1822 	 *	 There is an issue here for processes which sit mostly idle
1823 	 *	 'overnight', such as sshd, tcsh, and X.  Any movement from
1824 	 *	 the active queue will eventually cause such pages to
1825 	 *	 recycle eventually causing a lot of paging in the morning.
1826 	 *	 To reduce the incidence of this pages cycled out of the
1827 	 *	 buffer cache are moved directly to the inactive queue if
1828 	 *	 they were only used once or twice.
1829 	 *
1830 	 *	 The vfs.vm_cycle_point sysctl can be used to adjust this.
1831 	 *	 Increasing the value (up to 64) increases the number of
1832 	 *	 buffer recyclements which go directly to the inactive queue.
1833 	 */
1834 	if (vmstats.v_free_count > 2048) {
1835 		vmstats.v_cache_min = vmstats.v_free_target;
1836 		vmstats.v_cache_max = 2 * vmstats.v_cache_min;
1837 	} else {
1838 		vmstats.v_cache_min = 0;
1839 		vmstats.v_cache_max = 0;
1840 	}
1841 	vmstats.v_inactive_target = vmstats.v_free_count / 4;
1842 
1843 	/* XXX does not really belong here */
1844 	if (vm_page_max_wired == 0)
1845 		vm_page_max_wired = vmstats.v_free_count / 3;
1846 
1847 	if (vm_pageout_stats_max == 0)
1848 		vm_pageout_stats_max = vmstats.v_free_target;
1849 
1850 	/*
1851 	 * Set interval in seconds for stats scan.
1852 	 */
1853 	if (vm_pageout_stats_interval == 0)
1854 		vm_pageout_stats_interval = 5;
1855 	if (vm_pageout_full_stats_interval == 0)
1856 		vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1857 
1858 
1859 	/*
1860 	 * Set maximum free per pass
1861 	 */
1862 	if (vm_pageout_stats_free_max == 0)
1863 		vm_pageout_stats_free_max = 5;
1864 
1865 	swap_pager_swap_init();
1866 	pass = 0;
1867 
1868 	/*
1869 	 * The pageout daemon is never done, so loop forever.
1870 	 */
1871 	while (TRUE) {
1872 		int error;
1873 		int delta1;
1874 		int delta2;
1875 		int avail_shortage;
1876 		int inactive_shortage;
1877 		int vnodes_skipped = 0;
1878 		int recycle_count = 0;
1879 		int tmp;
1880 
1881 		/*
1882 		 * Wait for an action request.  If we timeout check to
1883 		 * see if paging is needed (in case the normal wakeup
1884 		 * code raced us).
1885 		 */
1886 		if (vm_pages_needed == 0) {
1887 			error = tsleep(&vm_pages_needed,
1888 				       0, "psleep",
1889 				       vm_pageout_stats_interval * hz);
1890 			if (error &&
1891 			    vm_paging_needed() == 0 &&
1892 			    vm_pages_needed == 0) {
1893 				for (q = 0; q < PQ_L2_SIZE; ++q)
1894 					vm_pageout_page_stats(q);
1895 				continue;
1896 			}
1897 			vm_pages_needed = 1;
1898 		}
1899 
1900 		mycpu->gd_cnt.v_pdwakeups++;
1901 
1902 		/*
1903 		 * Do whatever cleanup that the pmap code can.
1904 		 */
1905 		pmap_collect();
1906 
1907 		/*
1908 		 * Scan for pageout.  Try to avoid thrashing the system
1909 		 * with activity.
1910 		 *
1911 		 * Calculate our target for the number of free+cache pages we
1912 		 * want to get to.  This is higher then the number that causes
1913 		 * allocations to stall (severe) in order to provide hysteresis,
1914 		 * and if we don't make it all the way but get to the minimum
1915 		 * we're happy.  Goose it a bit if there are multipler
1916 		 * requests for memory.
1917 		 */
1918 		avail_shortage = vm_paging_target() + vm_pageout_deficit;
1919 		vm_pageout_deficit = 0;
1920 		delta1 = 0;
1921 		if (avail_shortage > 0) {
1922 			for (q = 0; q < PQ_L2_SIZE; ++q) {
1923 				delta1 += vm_pageout_scan_inactive(
1924 					    pass, q,
1925 					    PQAVERAGE(avail_shortage),
1926 					    &vnodes_skipped);
1927 			}
1928 			avail_shortage -= delta1;
1929 		}
1930 
1931 		/*
1932 		 * Figure out how many active pages we must deactivate.  If
1933 		 * we were able to reach our target with just the inactive
1934 		 * scan above we limit the number of active pages we
1935 		 * deactivate to reduce unnecessary work.
1936 		 */
1937 		inactive_shortage = vmstats.v_inactive_target -
1938 				    vmstats.v_inactive_count;
1939 
1940 		/*
1941 		 * If we were unable to free sufficient inactive pages to
1942 		 * satisfy the free/cache queue requirements then simply
1943 		 * reaching the inactive target may not be good enough.
1944 		 * Try to deactivate pages in excess of the target based
1945 		 * on the shortfall.
1946 		 *
1947 		 * However to prevent thrashing the VM system do not
1948 		 * deactivate more than an additional 1/10 the inactive
1949 		 * target's worth of active pages.
1950 		 */
1951 		if (avail_shortage > 0) {
1952 			tmp = avail_shortage * 2;
1953 			if (tmp > vmstats.v_inactive_target / 10)
1954 				tmp = vmstats.v_inactive_target / 10;
1955 			inactive_shortage += tmp;
1956 		}
1957 
1958 		if (avail_shortage > 0 || inactive_shortage > 0) {
1959 			delta2 = 0;
1960 			for (q = 0; q < PQ_L2_SIZE; ++q) {
1961 				delta2 += vm_pageout_scan_active(
1962 						pass, q,
1963 						PQAVERAGE(avail_shortage),
1964 						PQAVERAGE(inactive_shortage),
1965 						&recycle_count);
1966 			}
1967 			inactive_shortage -= delta2;
1968 			avail_shortage -= delta2;
1969 		}
1970 
1971 		/*
1972 		 * Finally free enough cache pages to meet our free page
1973 		 * requirement and take more drastic measures if we are
1974 		 * still in trouble.
1975 		 */
1976 		vm_pageout_scan_cache(avail_shortage, vnodes_skipped,
1977 				      recycle_count);
1978 
1979 		/*
1980 		 * Wait for more work.
1981 		 */
1982 		if (avail_shortage > 0) {
1983 			++pass;
1984 			if (swap_pager_full) {
1985 				/*
1986 				 * Running out of memory, catastrophic back-off
1987 				 * to one-second intervals.
1988 				 */
1989 				tsleep(&vm_pages_needed, 0, "pdelay", hz);
1990 			} else if (pass < 10 && vm_pages_needed > 1) {
1991 				/*
1992 				 * Normal operation, additional processes
1993 				 * have already kicked us.  Retry immediately.
1994 				 */
1995 			} else if (pass < 10) {
1996 				/*
1997 				 * Normal operation, fewer processes.  Delay
1998 				 * a bit but allow wakeups.
1999 				 */
2000 				vm_pages_needed = 0;
2001 				tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2002 				vm_pages_needed = 1;
2003 			} else {
2004 				/*
2005 				 * We've taken too many passes, forced delay.
2006 				 */
2007 				tsleep(&vm_pages_needed, 0, "pdelay", hz / 10);
2008 			}
2009 		} else {
2010 			/*
2011 			 * Interlocked wakeup of waiters (non-optional)
2012 			 */
2013 			pass = 0;
2014 			if (vm_pages_needed && !vm_page_count_min(0)) {
2015 				wakeup(&vmstats.v_free_count);
2016 				vm_pages_needed = 0;
2017 			}
2018 		}
2019 	}
2020 }
2021 
2022 static struct kproc_desc page_kp = {
2023 	"pagedaemon",
2024 	vm_pageout_thread,
2025 	&pagethread
2026 };
2027 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
2028 
2029 
2030 /*
2031  * Called after allocating a page out of the cache or free queue
2032  * to possibly wake the pagedaemon up to replentish our supply.
2033  *
2034  * We try to generate some hysteresis by waking the pagedaemon up
2035  * when our free+cache pages go below the free_min+cache_min level.
2036  * The pagedaemon tries to get the count back up to at least the
2037  * minimum, and through to the target level if possible.
2038  *
2039  * If the pagedaemon is already active bump vm_pages_needed as a hint
2040  * that there are even more requests pending.
2041  *
2042  * SMP races ok?
2043  * No requirements.
2044  */
2045 void
2046 pagedaemon_wakeup(void)
2047 {
2048 	if (vm_paging_needed() && curthread != pagethread) {
2049 		if (vm_pages_needed == 0) {
2050 			vm_pages_needed = 1;	/* SMP race ok */
2051 			wakeup(&vm_pages_needed);
2052 		} else if (vm_page_count_min(0)) {
2053 			++vm_pages_needed;	/* SMP race ok */
2054 		}
2055 	}
2056 }
2057 
2058 #if !defined(NO_SWAPPING)
2059 
2060 /*
2061  * SMP races ok?
2062  * No requirements.
2063  */
2064 static void
2065 vm_req_vmdaemon(void)
2066 {
2067 	static int lastrun = 0;
2068 
2069 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2070 		wakeup(&vm_daemon_needed);
2071 		lastrun = ticks;
2072 	}
2073 }
2074 
2075 static int vm_daemon_callback(struct proc *p, void *data __unused);
2076 
2077 /*
2078  * No requirements.
2079  */
2080 static void
2081 vm_daemon(void)
2082 {
2083 	/*
2084 	 * XXX vm_daemon_needed specific token?
2085 	 */
2086 	while (TRUE) {
2087 		tsleep(&vm_daemon_needed, 0, "psleep", 0);
2088 		if (vm_pageout_req_swapout) {
2089 			swapout_procs(vm_pageout_req_swapout);
2090 			vm_pageout_req_swapout = 0;
2091 		}
2092 		/*
2093 		 * scan the processes for exceeding their rlimits or if
2094 		 * process is swapped out -- deactivate pages
2095 		 */
2096 		allproc_scan(vm_daemon_callback, NULL);
2097 	}
2098 }
2099 
2100 /*
2101  * Caller must hold proc_token.
2102  */
2103 static int
2104 vm_daemon_callback(struct proc *p, void *data __unused)
2105 {
2106 	vm_pindex_t limit, size;
2107 
2108 	/*
2109 	 * if this is a system process or if we have already
2110 	 * looked at this process, skip it.
2111 	 */
2112 	if (p->p_flags & (P_SYSTEM | P_WEXIT))
2113 		return (0);
2114 
2115 	/*
2116 	 * if the process is in a non-running type state,
2117 	 * don't touch it.
2118 	 */
2119 	if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
2120 		return (0);
2121 
2122 	/*
2123 	 * get a limit
2124 	 */
2125 	limit = OFF_TO_IDX(qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
2126 			        p->p_rlimit[RLIMIT_RSS].rlim_max));
2127 
2128 	/*
2129 	 * let processes that are swapped out really be
2130 	 * swapped out.  Set the limit to nothing to get as
2131 	 * many pages out to swap as possible.
2132 	 */
2133 	if (p->p_flags & P_SWAPPEDOUT)
2134 		limit = 0;
2135 
2136 	lwkt_gettoken(&p->p_vmspace->vm_map.token);
2137 	size = vmspace_resident_count(p->p_vmspace);
2138 	if (limit >= 0 && size >= limit) {
2139 		vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, limit);
2140 	}
2141 	lwkt_reltoken(&p->p_vmspace->vm_map.token);
2142 	return (0);
2143 }
2144 
2145 #endif
2146