xref: /dragonfly/sys/vm/vm_swapcache.c (revision eccc8ca1)
1096e95c0SMatthew Dillon /*
28e7c4729SMatthew Dillon  * (MPSAFE)
38e7c4729SMatthew Dillon  *
4096e95c0SMatthew Dillon  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
5096e95c0SMatthew Dillon  *
6096e95c0SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
7096e95c0SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
8096e95c0SMatthew Dillon  *
9096e95c0SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
10096e95c0SMatthew Dillon  * modification, are permitted provided that the following conditions
11096e95c0SMatthew Dillon  * are met:
12096e95c0SMatthew Dillon  *
13096e95c0SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
14096e95c0SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
15096e95c0SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
16096e95c0SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
17096e95c0SMatthew Dillon  *    the documentation and/or other materials provided with the
18096e95c0SMatthew Dillon  *    distribution.
19096e95c0SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
20096e95c0SMatthew Dillon  *    contributors may be used to endorse or promote products derived
21096e95c0SMatthew Dillon  *    from this software without specific, prior written permission.
22096e95c0SMatthew Dillon  *
23096e95c0SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24096e95c0SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25096e95c0SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26096e95c0SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27096e95c0SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28096e95c0SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29096e95c0SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30096e95c0SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31096e95c0SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32096e95c0SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33096e95c0SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34096e95c0SMatthew Dillon  * SUCH DAMAGE.
35096e95c0SMatthew Dillon  */
36096e95c0SMatthew Dillon 
37096e95c0SMatthew Dillon /*
38096e95c0SMatthew Dillon  * Implement the swapcache daemon.  When enabled swap is assumed to be
39096e95c0SMatthew Dillon  * configured on a fast storage device such as a SSD.  Swap is assigned
40096e95c0SMatthew Dillon  * to clean vnode-backed pages in the inactive queue, clustered by object
41096e95c0SMatthew Dillon  * if possible, and written out.  The swap assignment sticks around even
42096e95c0SMatthew Dillon  * after the underlying pages have been recycled.
43096e95c0SMatthew Dillon  *
44096e95c0SMatthew Dillon  * The daemon manages write bandwidth based on sysctl settings to control
45096e95c0SMatthew Dillon  * wear on the SSD.
46096e95c0SMatthew Dillon  *
47096e95c0SMatthew Dillon  * The vnode strategy code will check for the swap assignments and divert
483ffc7051SMatthew Dillon  * reads to the swap device when the data is present in the swapcache.
49096e95c0SMatthew Dillon  *
50096e95c0SMatthew Dillon  * This operates on both regular files and the block device vnodes used by
51096e95c0SMatthew Dillon  * filesystems to manage meta-data.
52096e95c0SMatthew Dillon  */
53096e95c0SMatthew Dillon 
54096e95c0SMatthew Dillon #include "opt_vm.h"
55096e95c0SMatthew Dillon #include <sys/param.h>
56096e95c0SMatthew Dillon #include <sys/systm.h>
57096e95c0SMatthew Dillon #include <sys/kernel.h>
58096e95c0SMatthew Dillon #include <sys/proc.h>
59096e95c0SMatthew Dillon #include <sys/kthread.h>
60096e95c0SMatthew Dillon #include <sys/resourcevar.h>
61096e95c0SMatthew Dillon #include <sys/signalvar.h>
62096e95c0SMatthew Dillon #include <sys/vnode.h>
63096e95c0SMatthew Dillon #include <sys/vmmeter.h>
64096e95c0SMatthew Dillon #include <sys/sysctl.h>
65096e95c0SMatthew Dillon 
66096e95c0SMatthew Dillon #include <vm/vm.h>
67096e95c0SMatthew Dillon #include <vm/vm_param.h>
68096e95c0SMatthew Dillon #include <sys/lock.h>
69096e95c0SMatthew Dillon #include <vm/vm_object.h>
70096e95c0SMatthew Dillon #include <vm/vm_page.h>
71096e95c0SMatthew Dillon #include <vm/vm_map.h>
72096e95c0SMatthew Dillon #include <vm/vm_pageout.h>
73096e95c0SMatthew Dillon #include <vm/vm_pager.h>
74096e95c0SMatthew Dillon #include <vm/swap_pager.h>
75096e95c0SMatthew Dillon #include <vm/vm_extern.h>
76096e95c0SMatthew Dillon 
77096e95c0SMatthew Dillon #include <sys/thread2.h>
78cd8ab232SMatthew Dillon #include <sys/mplock2.h>
79096e95c0SMatthew Dillon #include <vm/vm_page2.h>
80096e95c0SMatthew Dillon 
81096e95c0SMatthew Dillon #define INACTIVE_LIST	(&vm_page_queues[PQ_INACTIVE].pl)
82096e95c0SMatthew Dillon 
83096e95c0SMatthew Dillon /* the kernel process "vm_pageout"*/
84aabd5ce8SMatthew Dillon static int vm_swapcached_flush (vm_page_t m, int isblkdev);
853ffc7051SMatthew Dillon static int vm_swapcache_test(vm_page_t m);
8600a3fdcaSMatthew Dillon static void vm_swapcache_writing(vm_page_t marker);
8700a3fdcaSMatthew Dillon static void vm_swapcache_cleaning(vm_object_t marker);
88096e95c0SMatthew Dillon struct thread *swapcached_thread;
89096e95c0SMatthew Dillon 
90096e95c0SMatthew Dillon SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
91096e95c0SMatthew Dillon 
92c504e38eSMatthew Dillon int vm_swapcache_read_enable;
93e527fb6bSMatthew Dillon int vm_swapcache_inactive_heuristic;
94096e95c0SMatthew Dillon static int vm_swapcache_sleep;
951e5196f0SMatthew Dillon static int vm_swapcache_maxlaunder = 256;
96096e95c0SMatthew Dillon static int vm_swapcache_data_enable = 0;
97096e95c0SMatthew Dillon static int vm_swapcache_meta_enable = 0;
98e9b56058SMatthew Dillon static int vm_swapcache_maxswappct = 75;
99e527fb6bSMatthew Dillon static int vm_swapcache_hysteresis;
100e9b56058SMatthew Dillon static int vm_swapcache_use_chflags = 1;	/* require chflags cache */
1013ffc7051SMatthew Dillon static int64_t vm_swapcache_minburst = 10000000LL;	/* 10MB */
1023ffc7051SMatthew Dillon static int64_t vm_swapcache_curburst = 4000000000LL;	/* 4G after boot */
1033ffc7051SMatthew Dillon static int64_t vm_swapcache_maxburst = 2000000000LL;	/* 2G nominal max */
1043ffc7051SMatthew Dillon static int64_t vm_swapcache_accrate = 100000LL;		/* 100K/s */
105096e95c0SMatthew Dillon static int64_t vm_swapcache_write_count;
1063ffc7051SMatthew Dillon static int64_t vm_swapcache_maxfilesize;
107096e95c0SMatthew Dillon 
108096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder,
109096e95c0SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, "");
110c504e38eSMatthew Dillon 
111096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable,
112096e95c0SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_data_enable, 0, "");
113096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable,
114096e95c0SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_meta_enable, 0, "");
115c504e38eSMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
116c504e38eSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
117e9b56058SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct,
118e9b56058SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxswappct, 0, "");
119e527fb6bSMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis,
120e527fb6bSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_hysteresis, 0, "");
121e9b56058SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags,
122e9b56058SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_use_chflags, 0, "");
123c504e38eSMatthew Dillon 
1243ffc7051SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst,
1253ffc7051SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_minburst, 0, "");
126c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst,
127c504e38eSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_curburst, 0, "");
128c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst,
129c504e38eSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxburst, 0, "");
1303ffc7051SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize,
1313ffc7051SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, "");
132c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate,
133c504e38eSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_accrate, 0, "");
134096e95c0SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count,
135096e95c0SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_write_count, 0, "");
136096e95c0SMatthew Dillon 
137e9b56058SMatthew Dillon #define SWAPMAX(adj)	\
138e9b56058SMatthew Dillon 	((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
139e9b56058SMatthew Dillon 
140096e95c0SMatthew Dillon /*
141096e95c0SMatthew Dillon  * vm_swapcached is the high level pageout daemon.
1428e7c4729SMatthew Dillon  *
1438e7c4729SMatthew Dillon  * No requirements.
144096e95c0SMatthew Dillon  */
145096e95c0SMatthew Dillon static void
146cd8ab232SMatthew Dillon vm_swapcached_thread(void)
147096e95c0SMatthew Dillon {
14800a3fdcaSMatthew Dillon 	enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING;
1493ffc7051SMatthew Dillon 	enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING;
15000a3fdcaSMatthew Dillon 	struct vm_page page_marker;
15100a3fdcaSMatthew Dillon 	struct vm_object object_marker;
152096e95c0SMatthew Dillon 
153096e95c0SMatthew Dillon 	/*
154096e95c0SMatthew Dillon 	 * Thread setup
155096e95c0SMatthew Dillon 	 */
156096e95c0SMatthew Dillon 	curthread->td_flags |= TDF_SYSTHREAD;
157cd8ab232SMatthew Dillon 
1588e7c4729SMatthew Dillon 	lwkt_gettoken(&vm_token);
159*eccc8ca1SMatthew Dillon 	crit_enter();
160096e95c0SMatthew Dillon 
161096e95c0SMatthew Dillon 	/*
16200a3fdcaSMatthew Dillon 	 * Initialize our marker for the inactive scan (SWAPC_WRITING)
163096e95c0SMatthew Dillon 	 */
16400a3fdcaSMatthew Dillon 	bzero(&page_marker, sizeof(page_marker));
16500a3fdcaSMatthew Dillon 	page_marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
16600a3fdcaSMatthew Dillon 	page_marker.queue = PQ_INACTIVE;
16700a3fdcaSMatthew Dillon 	page_marker.wire_count = 1;
16800a3fdcaSMatthew Dillon 	TAILQ_INSERT_HEAD(INACTIVE_LIST, &page_marker, pageq);
169e527fb6bSMatthew Dillon 	vm_swapcache_hysteresis = vmstats.v_inactive_target / 2;
170e527fb6bSMatthew Dillon 	vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
171096e95c0SMatthew Dillon 
17200a3fdcaSMatthew Dillon 	/*
17300a3fdcaSMatthew Dillon 	 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
17400a3fdcaSMatthew Dillon 	 */
17500a3fdcaSMatthew Dillon 	bzero(&object_marker, sizeof(object_marker));
17600a3fdcaSMatthew Dillon 	object_marker.type = OBJT_MARKER;
1772de4f77eSMatthew Dillon 	lwkt_gettoken(&vmobj_token);
17800a3fdcaSMatthew Dillon 	TAILQ_INSERT_HEAD(&vm_object_list, &object_marker, object_list);
1792de4f77eSMatthew Dillon 	lwkt_reltoken(&vmobj_token);
180096e95c0SMatthew Dillon 
181096e95c0SMatthew Dillon 	for (;;) {
182096e95c0SMatthew Dillon 		/*
1833da46bd7SMatthew Dillon 		 * Check every 5 seconds when not enabled or if no swap
1843da46bd7SMatthew Dillon 		 * is present.
185096e95c0SMatthew Dillon 		 */
1863da46bd7SMatthew Dillon 		if ((vm_swapcache_data_enable == 0 &&
1873da46bd7SMatthew Dillon 		     vm_swapcache_meta_enable == 0) ||
1883da46bd7SMatthew Dillon 		    vm_swap_max == 0) {
189096e95c0SMatthew Dillon 			tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5);
190096e95c0SMatthew Dillon 			continue;
191096e95c0SMatthew Dillon 		}
192c504e38eSMatthew Dillon 
193c504e38eSMatthew Dillon 		/*
1943da46bd7SMatthew Dillon 		 * Polling rate when enabled is approximately 10 hz.
195c504e38eSMatthew Dillon 		 */
196c504e38eSMatthew Dillon 		tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10);
19700a3fdcaSMatthew Dillon 
19800a3fdcaSMatthew Dillon 		/*
19900a3fdcaSMatthew Dillon 		 * State hysteresis.  Generate write activity up to 75% of
20000a3fdcaSMatthew Dillon 		 * swap, then clean out swap assignments down to 70%, then
20100a3fdcaSMatthew Dillon 		 * repeat.
20200a3fdcaSMatthew Dillon 		 */
20300a3fdcaSMatthew Dillon 		if (state == SWAPC_WRITING) {
204e9b56058SMatthew Dillon 			if (vm_swap_cache_use > SWAPMAX(0))
20500a3fdcaSMatthew Dillon 				state = SWAPC_CLEANING;
20600a3fdcaSMatthew Dillon 		} else {
207e9b56058SMatthew Dillon 			if (vm_swap_cache_use < SWAPMAX(-5))
20800a3fdcaSMatthew Dillon 				state = SWAPC_WRITING;
20900a3fdcaSMatthew Dillon 		}
21000a3fdcaSMatthew Dillon 
21100a3fdcaSMatthew Dillon 		/*
21200a3fdcaSMatthew Dillon 		 * We are allowed to continue accumulating burst value
2133ffc7051SMatthew Dillon 		 * in either state.  Allow the user to set curburst > maxburst
2143ffc7051SMatthew Dillon 		 * for the initial load-in.
21500a3fdcaSMatthew Dillon 		 */
2163ffc7051SMatthew Dillon 		if (vm_swapcache_curburst < vm_swapcache_maxburst) {
217c504e38eSMatthew Dillon 			vm_swapcache_curburst += vm_swapcache_accrate / 10;
218c504e38eSMatthew Dillon 			if (vm_swapcache_curburst > vm_swapcache_maxburst)
219c504e38eSMatthew Dillon 				vm_swapcache_curburst = vm_swapcache_maxburst;
2203ffc7051SMatthew Dillon 		}
221c504e38eSMatthew Dillon 
222c504e38eSMatthew Dillon 		/*
22300a3fdcaSMatthew Dillon 		 * We don't want to nickle-and-dime the scan as that will
22400a3fdcaSMatthew Dillon 		 * create unnecessary fragmentation.  The minimum burst
22500a3fdcaSMatthew Dillon 		 * is one-seconds worth of accumulation.
226c504e38eSMatthew Dillon 		 */
22700a3fdcaSMatthew Dillon 		if (state == SWAPC_WRITING) {
2283ffc7051SMatthew Dillon 			if (vm_swapcache_curburst >= vm_swapcache_accrate) {
2293ffc7051SMatthew Dillon 				if (burst == SWAPB_BURSTING) {
23000a3fdcaSMatthew Dillon 					vm_swapcache_writing(&page_marker);
2313ffc7051SMatthew Dillon 					if (vm_swapcache_curburst <= 0)
2323ffc7051SMatthew Dillon 						burst = SWAPB_RECOVERING;
2333ffc7051SMatthew Dillon 				} else if (vm_swapcache_curburst >
2343ffc7051SMatthew Dillon 					   vm_swapcache_minburst) {
2353ffc7051SMatthew Dillon 					vm_swapcache_writing(&page_marker);
2363ffc7051SMatthew Dillon 					burst = SWAPB_BURSTING;
2373ffc7051SMatthew Dillon 				}
2383ffc7051SMatthew Dillon 			}
23900a3fdcaSMatthew Dillon 		} else {
24000a3fdcaSMatthew Dillon 			vm_swapcache_cleaning(&object_marker);
24100a3fdcaSMatthew Dillon 		}
24200a3fdcaSMatthew Dillon 	}
243*eccc8ca1SMatthew Dillon 
244*eccc8ca1SMatthew Dillon 	/*
245*eccc8ca1SMatthew Dillon 	 * Cleanup (NOT REACHED)
246*eccc8ca1SMatthew Dillon 	 */
24700a3fdcaSMatthew Dillon 	TAILQ_REMOVE(INACTIVE_LIST, &page_marker, pageq);
248*eccc8ca1SMatthew Dillon 	crit_exit();
249*eccc8ca1SMatthew Dillon 	lwkt_reltoken(&vm_token);
250*eccc8ca1SMatthew Dillon 
2512de4f77eSMatthew Dillon 	lwkt_gettoken(&vmobj_token);
25200a3fdcaSMatthew Dillon 	TAILQ_REMOVE(&vm_object_list, &object_marker, object_list);
2532de4f77eSMatthew Dillon 	lwkt_reltoken(&vmobj_token);
25400a3fdcaSMatthew Dillon }
255096e95c0SMatthew Dillon 
256cd8ab232SMatthew Dillon static struct kproc_desc swpc_kp = {
257cd8ab232SMatthew Dillon 	"swapcached",
258cd8ab232SMatthew Dillon 	vm_swapcached_thread,
259cd8ab232SMatthew Dillon 	&swapcached_thread
260cd8ab232SMatthew Dillon };
261cd8ab232SMatthew Dillon SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp)
262cd8ab232SMatthew Dillon 
2638e7c4729SMatthew Dillon /*
2648e7c4729SMatthew Dillon  * The caller must hold vm_token.
2658e7c4729SMatthew Dillon  */
26600a3fdcaSMatthew Dillon static void
26700a3fdcaSMatthew Dillon vm_swapcache_writing(vm_page_t marker)
26800a3fdcaSMatthew Dillon {
26900a3fdcaSMatthew Dillon 	vm_object_t object;
27000a3fdcaSMatthew Dillon 	struct vnode *vp;
27100a3fdcaSMatthew Dillon 	vm_page_t m;
27200a3fdcaSMatthew Dillon 	int count;
273aabd5ce8SMatthew Dillon 	int isblkdev;
274096e95c0SMatthew Dillon 
275096e95c0SMatthew Dillon 	/*
276fdc53cc7SMatthew Dillon 	 * Deal with an overflow of the heuristic counter or if the user
277fdc53cc7SMatthew Dillon 	 * manually changes the hysteresis.
278fdc53cc7SMatthew Dillon 	 *
279e527fb6bSMatthew Dillon 	 * Try to avoid small incremental pageouts by waiting for enough
280e527fb6bSMatthew Dillon 	 * pages to buildup in the inactive queue to hopefully get a good
281e527fb6bSMatthew Dillon 	 * burst in.  This heuristic is bumped by the VM system and reset
282e527fb6bSMatthew Dillon 	 * when our scan hits the end of the queue.
283e527fb6bSMatthew Dillon 	 */
284fdc53cc7SMatthew Dillon 	if (vm_swapcache_inactive_heuristic < -vm_swapcache_hysteresis)
285fdc53cc7SMatthew Dillon 		vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
286e527fb6bSMatthew Dillon 	if (vm_swapcache_inactive_heuristic < 0)
287e527fb6bSMatthew Dillon 		return;
288e527fb6bSMatthew Dillon 
289e527fb6bSMatthew Dillon 	/*
290096e95c0SMatthew Dillon 	 * Scan the inactive queue from our marker to locate
291096e95c0SMatthew Dillon 	 * suitable pages to push to the swap cache.
292096e95c0SMatthew Dillon 	 *
293096e95c0SMatthew Dillon 	 * We are looking for clean vnode-backed pages.
2945ac04117SMatthew Dillon 	 *
2955ac04117SMatthew Dillon 	 * NOTE: PG_SWAPPED pages in particular are not part of
2965ac04117SMatthew Dillon 	 *	 our count because once the cache stabilizes we
2975ac04117SMatthew Dillon 	 *	 can end up with a very high datarate of VM pages
2985ac04117SMatthew Dillon 	 *	 cycling from it.
299096e95c0SMatthew Dillon 	 */
30000a3fdcaSMatthew Dillon 	m = marker;
30100a3fdcaSMatthew Dillon 	count = vm_swapcache_maxlaunder;
30200a3fdcaSMatthew Dillon 
303096e95c0SMatthew Dillon 	while ((m = TAILQ_NEXT(m, pageq)) != NULL && count--) {
3045ac04117SMatthew Dillon 		if (m->flags & (PG_MARKER | PG_SWAPPED)) {
305096e95c0SMatthew Dillon 			++count;
306096e95c0SMatthew Dillon 			continue;
307096e95c0SMatthew Dillon 		}
308c504e38eSMatthew Dillon 		if (vm_swapcache_curburst < 0)
309c504e38eSMatthew Dillon 			break;
3103ffc7051SMatthew Dillon 		if (vm_swapcache_test(m))
311096e95c0SMatthew Dillon 			continue;
3123ffc7051SMatthew Dillon 		object = m->object;
313c504e38eSMatthew Dillon 		vp = object->handle;
314c504e38eSMatthew Dillon 		if (vp == NULL)
315c504e38eSMatthew Dillon 			continue;
316d3070b8dSMatthew Dillon 
317c504e38eSMatthew Dillon 		switch(vp->v_type) {
318c504e38eSMatthew Dillon 		case VREG:
319e9b56058SMatthew Dillon 			/*
320e9b56058SMatthew Dillon 			 * If data_enable is 0 do not try to swapcache data.
321e9b56058SMatthew Dillon 			 * If use_chflags is set then only swapcache data for
322e9b56058SMatthew Dillon 			 * VSWAPCACHE marked vnodes, otherwise any vnode.
323e9b56058SMatthew Dillon 			 */
324e9b56058SMatthew Dillon 			if (vm_swapcache_data_enable == 0 ||
325e9b56058SMatthew Dillon 			    ((vp->v_flag & VSWAPCACHE) == 0 &&
326e9b56058SMatthew Dillon 			     vm_swapcache_use_chflags)) {
327c504e38eSMatthew Dillon 				continue;
328e9b56058SMatthew Dillon 			}
329d3070b8dSMatthew Dillon 			if (vm_swapcache_maxfilesize &&
330d3070b8dSMatthew Dillon 			    object->size >
331d3070b8dSMatthew Dillon 			    (vm_swapcache_maxfilesize >> PAGE_SHIFT)) {
332d3070b8dSMatthew Dillon 				continue;
333d3070b8dSMatthew Dillon 			}
334aabd5ce8SMatthew Dillon 			isblkdev = 0;
335c504e38eSMatthew Dillon 			break;
336c504e38eSMatthew Dillon 		case VCHR:
337aabd5ce8SMatthew Dillon 			/*
338aabd5ce8SMatthew Dillon 			 * The PG_NOTMETA flag only applies to pages
339aabd5ce8SMatthew Dillon 			 * associated with block devices.
340aabd5ce8SMatthew Dillon 			 */
341aabd5ce8SMatthew Dillon 			if (m->flags & PG_NOTMETA)
342aabd5ce8SMatthew Dillon 				continue;
343c504e38eSMatthew Dillon 			if (vm_swapcache_meta_enable == 0)
344c504e38eSMatthew Dillon 				continue;
345aabd5ce8SMatthew Dillon 			isblkdev = 1;
346c504e38eSMatthew Dillon 			break;
347c504e38eSMatthew Dillon 		default:
348c504e38eSMatthew Dillon 			continue;
349c504e38eSMatthew Dillon 		}
350096e95c0SMatthew Dillon 
351096e95c0SMatthew Dillon 		/*
352096e95c0SMatthew Dillon 		 * Ok, move the marker and soft-busy the page.
353096e95c0SMatthew Dillon 		 */
35400a3fdcaSMatthew Dillon 		TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
35500a3fdcaSMatthew Dillon 		TAILQ_INSERT_AFTER(INACTIVE_LIST, m, marker, pageq);
356096e95c0SMatthew Dillon 
357096e95c0SMatthew Dillon 		/*
3583ffc7051SMatthew Dillon 		 * Assign swap and initiate I/O.
3593ffc7051SMatthew Dillon 		 *
3603ffc7051SMatthew Dillon 		 * (adjust for the --count which also occurs in the loop)
361096e95c0SMatthew Dillon 		 */
362aabd5ce8SMatthew Dillon 		count -= vm_swapcached_flush(m, isblkdev) - 1;
363096e95c0SMatthew Dillon 
364096e95c0SMatthew Dillon 		/*
365096e95c0SMatthew Dillon 		 * Setup for next loop using marker.
366096e95c0SMatthew Dillon 		 */
36700a3fdcaSMatthew Dillon 		m = marker;
368096e95c0SMatthew Dillon 	}
3691e5196f0SMatthew Dillon 
3701e5196f0SMatthew Dillon 	/*
3711e5196f0SMatthew Dillon 	 * Cleanup marker position.  If we hit the end of the
3721e5196f0SMatthew Dillon 	 * list the marker is placed at the tail.  Newly deactivated
3731e5196f0SMatthew Dillon 	 * pages will be placed after it.
3741e5196f0SMatthew Dillon 	 *
3751e5196f0SMatthew Dillon 	 * Earlier inactive pages that were dirty and become clean
3761e5196f0SMatthew Dillon 	 * are typically moved to the end of PQ_INACTIVE by virtue
3771e5196f0SMatthew Dillon 	 * of vfs_vmio_release() when they become unwired from the
3781e5196f0SMatthew Dillon 	 * buffer cache.
3791e5196f0SMatthew Dillon 	 */
38000a3fdcaSMatthew Dillon 	TAILQ_REMOVE(INACTIVE_LIST, marker, pageq);
381e527fb6bSMatthew Dillon 	if (m) {
38200a3fdcaSMatthew Dillon 		TAILQ_INSERT_BEFORE(m, marker, pageq);
383e527fb6bSMatthew Dillon 	} else {
38400a3fdcaSMatthew Dillon 		TAILQ_INSERT_TAIL(INACTIVE_LIST, marker, pageq);
385e527fb6bSMatthew Dillon 		vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
386e527fb6bSMatthew Dillon 	}
387096e95c0SMatthew Dillon }
388096e95c0SMatthew Dillon 
389096e95c0SMatthew Dillon /*
390096e95c0SMatthew Dillon  * Flush the specified page using the swap_pager.
3913ffc7051SMatthew Dillon  *
3923ffc7051SMatthew Dillon  * Try to collect surrounding pages, including pages which may
3933ffc7051SMatthew Dillon  * have already been assigned swap.  Try to cluster within a
3943ffc7051SMatthew Dillon  * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
3953ffc7051SMatthew Dillon  * to match what swap_pager_putpages() can do.
3963ffc7051SMatthew Dillon  *
3973ffc7051SMatthew Dillon  * We also want to try to match against the buffer cache blocksize
3983ffc7051SMatthew Dillon  * but we don't really know what it is here.  Since the buffer cache
3993ffc7051SMatthew Dillon  * wires and unwires pages in groups the fact that we skip wired pages
4003ffc7051SMatthew Dillon  * should be sufficient.
4013ffc7051SMatthew Dillon  *
4023ffc7051SMatthew Dillon  * Returns a count of pages we might have flushed (minimum 1)
4038e7c4729SMatthew Dillon  *
4048e7c4729SMatthew Dillon  * The caller must hold vm_token.
405096e95c0SMatthew Dillon  */
406096e95c0SMatthew Dillon static
4073ffc7051SMatthew Dillon int
408aabd5ce8SMatthew Dillon vm_swapcached_flush(vm_page_t m, int isblkdev)
409096e95c0SMatthew Dillon {
410096e95c0SMatthew Dillon 	vm_object_t object;
4113ffc7051SMatthew Dillon 	vm_page_t marray[SWAP_META_PAGES];
4123ffc7051SMatthew Dillon 	vm_pindex_t basei;
4133ffc7051SMatthew Dillon 	int rtvals[SWAP_META_PAGES];
4143ffc7051SMatthew Dillon 	int x;
4153ffc7051SMatthew Dillon 	int i;
4163ffc7051SMatthew Dillon 	int j;
4173ffc7051SMatthew Dillon 	int count;
418096e95c0SMatthew Dillon 
419096e95c0SMatthew Dillon 	vm_page_io_start(m);
420096e95c0SMatthew Dillon 	vm_page_protect(m, VM_PROT_READ);
421096e95c0SMatthew Dillon 	object = m->object;
422096e95c0SMatthew Dillon 
4233ffc7051SMatthew Dillon 	/*
4243ffc7051SMatthew Dillon 	 * Try to cluster around (m), keeping in mind that the swap pager
4253ffc7051SMatthew Dillon 	 * can only do SMAP_META_PAGES worth of continguous write.
4263ffc7051SMatthew Dillon 	 */
4273ffc7051SMatthew Dillon 	x = (int)m->pindex & SWAP_META_MASK;
4283ffc7051SMatthew Dillon 	marray[x] = m;
4293ffc7051SMatthew Dillon 	basei = m->pindex;
4303ffc7051SMatthew Dillon 
4313ffc7051SMatthew Dillon 	for (i = x - 1; i >= 0; --i) {
4323ffc7051SMatthew Dillon 		m = vm_page_lookup(object, basei - x + i);
4333ffc7051SMatthew Dillon 		if (m == NULL)
4343ffc7051SMatthew Dillon 			break;
4353ffc7051SMatthew Dillon 		if (vm_swapcache_test(m))
4363ffc7051SMatthew Dillon 			break;
437aabd5ce8SMatthew Dillon 		if (isblkdev && (m->flags & PG_NOTMETA))
438aabd5ce8SMatthew Dillon 			break;
4393ffc7051SMatthew Dillon 		vm_page_io_start(m);
4403ffc7051SMatthew Dillon 		vm_page_protect(m, VM_PROT_READ);
4413ffc7051SMatthew Dillon 		if (m->queue - m->pc == PQ_CACHE) {
4423ffc7051SMatthew Dillon 			vm_page_unqueue_nowakeup(m);
4433ffc7051SMatthew Dillon 			vm_page_deactivate(m);
4443ffc7051SMatthew Dillon 		}
4453ffc7051SMatthew Dillon 		marray[i] = m;
4463ffc7051SMatthew Dillon 	}
4473ffc7051SMatthew Dillon 	++i;
4483ffc7051SMatthew Dillon 
4493ffc7051SMatthew Dillon 	for (j = x + 1; j < SWAP_META_PAGES; ++j) {
4503ffc7051SMatthew Dillon 		m = vm_page_lookup(object, basei - x + j);
4513ffc7051SMatthew Dillon 		if (m == NULL)
4523ffc7051SMatthew Dillon 			break;
4533ffc7051SMatthew Dillon 		if (vm_swapcache_test(m))
4543ffc7051SMatthew Dillon 			break;
455aabd5ce8SMatthew Dillon 		if (isblkdev && (m->flags & PG_NOTMETA))
456aabd5ce8SMatthew Dillon 			break;
4573ffc7051SMatthew Dillon 		vm_page_io_start(m);
4583ffc7051SMatthew Dillon 		vm_page_protect(m, VM_PROT_READ);
4593ffc7051SMatthew Dillon 		if (m->queue - m->pc == PQ_CACHE) {
4603ffc7051SMatthew Dillon 			vm_page_unqueue_nowakeup(m);
4613ffc7051SMatthew Dillon 			vm_page_deactivate(m);
4623ffc7051SMatthew Dillon 		}
4633ffc7051SMatthew Dillon 		marray[j] = m;
4643ffc7051SMatthew Dillon 	}
4653ffc7051SMatthew Dillon 
4663ffc7051SMatthew Dillon 	count = j - i;
4673ffc7051SMatthew Dillon 	vm_object_pip_add(object, count);
4683ffc7051SMatthew Dillon 	swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i);
4693ffc7051SMatthew Dillon 	vm_swapcache_write_count += count * PAGE_SIZE;
4703ffc7051SMatthew Dillon 	vm_swapcache_curburst -= count * PAGE_SIZE;
4713ffc7051SMatthew Dillon 
4723ffc7051SMatthew Dillon 	while (i < j) {
4733ffc7051SMatthew Dillon 		if (rtvals[i] != VM_PAGER_PEND) {
4743ffc7051SMatthew Dillon 			vm_page_io_finish(marray[i]);
475096e95c0SMatthew Dillon 			vm_object_pip_wakeup(object);
476096e95c0SMatthew Dillon 		}
4773ffc7051SMatthew Dillon 		++i;
4783ffc7051SMatthew Dillon 	}
4793ffc7051SMatthew Dillon 	return(count);
480096e95c0SMatthew Dillon }
48100a3fdcaSMatthew Dillon 
4823ffc7051SMatthew Dillon /*
4833ffc7051SMatthew Dillon  * Test whether a VM page is suitable for writing to the swapcache.
4843ffc7051SMatthew Dillon  * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
4853ffc7051SMatthew Dillon  *
4863ffc7051SMatthew Dillon  * Returns 0 on success, 1 on failure
4878e7c4729SMatthew Dillon  *
4888e7c4729SMatthew Dillon  * The caller must hold vm_token.
4893ffc7051SMatthew Dillon  */
4903ffc7051SMatthew Dillon static int
4913ffc7051SMatthew Dillon vm_swapcache_test(vm_page_t m)
4923ffc7051SMatthew Dillon {
4933ffc7051SMatthew Dillon 	vm_object_t object;
4943ffc7051SMatthew Dillon 
495aabd5ce8SMatthew Dillon 	if (m->flags & (PG_BUSY | PG_UNMANAGED))
4963ffc7051SMatthew Dillon 		return(1);
4973ffc7051SMatthew Dillon 	if (m->busy || m->hold_count || m->wire_count)
4983ffc7051SMatthew Dillon 		return(1);
4993ffc7051SMatthew Dillon 	if (m->valid != VM_PAGE_BITS_ALL)
5003ffc7051SMatthew Dillon 		return(1);
5013ffc7051SMatthew Dillon 	if (m->dirty & m->valid)
5023ffc7051SMatthew Dillon 		return(1);
5033ffc7051SMatthew Dillon 	if ((object = m->object) == NULL)
5043ffc7051SMatthew Dillon 		return(1);
5053ffc7051SMatthew Dillon 	if (object->type != OBJT_VNODE ||
5063ffc7051SMatthew Dillon 	    (object->flags & OBJ_DEAD)) {
5073ffc7051SMatthew Dillon 		return(1);
5083ffc7051SMatthew Dillon 	}
5093ffc7051SMatthew Dillon 	vm_page_test_dirty(m);
5103ffc7051SMatthew Dillon 	if (m->dirty & m->valid)
5113ffc7051SMatthew Dillon 		return(1);
5123ffc7051SMatthew Dillon 	return(0);
5133ffc7051SMatthew Dillon }
5143ffc7051SMatthew Dillon 
5153ffc7051SMatthew Dillon /*
5163ffc7051SMatthew Dillon  * Cleaning pass
5178e7c4729SMatthew Dillon  *
5188e7c4729SMatthew Dillon  * The caller must hold vm_token.
5193ffc7051SMatthew Dillon  */
52000a3fdcaSMatthew Dillon static
52100a3fdcaSMatthew Dillon void
52200a3fdcaSMatthew Dillon vm_swapcache_cleaning(vm_object_t marker)
52300a3fdcaSMatthew Dillon {
52400a3fdcaSMatthew Dillon 	vm_object_t object;
52500a3fdcaSMatthew Dillon 	struct vnode *vp;
52600a3fdcaSMatthew Dillon 	int count;
52700a3fdcaSMatthew Dillon 	int n;
52800a3fdcaSMatthew Dillon 
52900a3fdcaSMatthew Dillon 	object = marker;
53000a3fdcaSMatthew Dillon 	count = vm_swapcache_maxlaunder;
53100a3fdcaSMatthew Dillon 
53200a3fdcaSMatthew Dillon 	/*
53300a3fdcaSMatthew Dillon 	 * Look for vnode objects
53400a3fdcaSMatthew Dillon 	 */
5358e7c4729SMatthew Dillon 	lwkt_gettoken(&vm_token);
5362de4f77eSMatthew Dillon 	lwkt_gettoken(&vmobj_token);
5372de4f77eSMatthew Dillon 
53800a3fdcaSMatthew Dillon 	while ((object = TAILQ_NEXT(object, object_list)) != NULL && count--) {
53900a3fdcaSMatthew Dillon 		if (object->type != OBJT_VNODE)
54000a3fdcaSMatthew Dillon 			continue;
54100a3fdcaSMatthew Dillon 		if ((object->flags & OBJ_DEAD) || object->swblock_count == 0)
54200a3fdcaSMatthew Dillon 			continue;
54300a3fdcaSMatthew Dillon 		if ((vp = object->handle) == NULL)
54400a3fdcaSMatthew Dillon 			continue;
54500a3fdcaSMatthew Dillon 		if (vp->v_type != VREG && vp->v_type != VCHR)
54600a3fdcaSMatthew Dillon 			continue;
54700a3fdcaSMatthew Dillon 
54800a3fdcaSMatthew Dillon 		/*
54900a3fdcaSMatthew Dillon 		 * Adjust iterator.
55000a3fdcaSMatthew Dillon 		 */
55100a3fdcaSMatthew Dillon 		if (marker->backing_object != object)
55200a3fdcaSMatthew Dillon 			marker->size = 0;
55300a3fdcaSMatthew Dillon 
55400a3fdcaSMatthew Dillon 		/*
55500a3fdcaSMatthew Dillon 		 * Move the marker so we can work on the VM object
55600a3fdcaSMatthew Dillon 		 */
55700a3fdcaSMatthew Dillon 		TAILQ_REMOVE(&vm_object_list, marker, object_list);
55800a3fdcaSMatthew Dillon 		TAILQ_INSERT_AFTER(&vm_object_list, object,
55900a3fdcaSMatthew Dillon 				   marker, object_list);
56000a3fdcaSMatthew Dillon 
56100a3fdcaSMatthew Dillon 		/*
56200a3fdcaSMatthew Dillon 		 * Look for swblocks starting at our iterator.
56300a3fdcaSMatthew Dillon 		 *
56400a3fdcaSMatthew Dillon 		 * The swap_pager_condfree() function attempts to free
56500a3fdcaSMatthew Dillon 		 * swap space starting at the specified index.  The index
56600a3fdcaSMatthew Dillon 		 * will be updated on return.  The function will return
56700a3fdcaSMatthew Dillon 		 * a scan factor (NOT the number of blocks freed).
56800a3fdcaSMatthew Dillon 		 *
56900a3fdcaSMatthew Dillon 		 * If it must cut its scan of the object short due to an
57000a3fdcaSMatthew Dillon 		 * excessive number of swblocks, or is able to free the
57100a3fdcaSMatthew Dillon 		 * requested number of blocks, it will return n >= count
57200a3fdcaSMatthew Dillon 		 * and we break and pick it back up on a future attempt.
57300a3fdcaSMatthew Dillon 		 */
57400a3fdcaSMatthew Dillon 		n = swap_pager_condfree(object, &marker->size, count);
57500a3fdcaSMatthew Dillon 		count -= n;
57600a3fdcaSMatthew Dillon 		if (count < 0)
57700a3fdcaSMatthew Dillon 			break;
57800a3fdcaSMatthew Dillon 
57900a3fdcaSMatthew Dillon 		/*
58000a3fdcaSMatthew Dillon 		 * Setup for loop.
58100a3fdcaSMatthew Dillon 		 */
58200a3fdcaSMatthew Dillon 		marker->size = 0;
58300a3fdcaSMatthew Dillon 		object = marker;
58400a3fdcaSMatthew Dillon 	}
58500a3fdcaSMatthew Dillon 
58600a3fdcaSMatthew Dillon 	/*
58700a3fdcaSMatthew Dillon 	 * Adjust marker so we continue the scan from where we left off.
58800a3fdcaSMatthew Dillon 	 * When we reach the end we start back at the beginning.
58900a3fdcaSMatthew Dillon 	 */
59000a3fdcaSMatthew Dillon 	TAILQ_REMOVE(&vm_object_list, marker, object_list);
59100a3fdcaSMatthew Dillon 	if (object)
59200a3fdcaSMatthew Dillon 		TAILQ_INSERT_BEFORE(object, marker, object_list);
59300a3fdcaSMatthew Dillon 	else
59400a3fdcaSMatthew Dillon 		TAILQ_INSERT_HEAD(&vm_object_list, marker, object_list);
59500a3fdcaSMatthew Dillon 	marker->backing_object = object;
5962de4f77eSMatthew Dillon 
5972de4f77eSMatthew Dillon 	lwkt_reltoken(&vmobj_token);
5988e7c4729SMatthew Dillon 	lwkt_reltoken(&vm_token);
59900a3fdcaSMatthew Dillon }
600