xref: /dragonfly/sys/vm/vm_swapcache.c (revision 1de864f0)
1096e95c0SMatthew Dillon /*
28e7c4729SMatthew Dillon  * (MPSAFE)
38e7c4729SMatthew Dillon  *
4096e95c0SMatthew Dillon  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
5096e95c0SMatthew Dillon  *
6096e95c0SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
7096e95c0SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
8096e95c0SMatthew Dillon  *
9096e95c0SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
10096e95c0SMatthew Dillon  * modification, are permitted provided that the following conditions
11096e95c0SMatthew Dillon  * are met:
12096e95c0SMatthew Dillon  *
13096e95c0SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
14096e95c0SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
15096e95c0SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
16096e95c0SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
17096e95c0SMatthew Dillon  *    the documentation and/or other materials provided with the
18096e95c0SMatthew Dillon  *    distribution.
19096e95c0SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
20096e95c0SMatthew Dillon  *    contributors may be used to endorse or promote products derived
21096e95c0SMatthew Dillon  *    from this software without specific, prior written permission.
22096e95c0SMatthew Dillon  *
23096e95c0SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24096e95c0SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25096e95c0SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26096e95c0SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27096e95c0SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28096e95c0SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29096e95c0SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30096e95c0SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31096e95c0SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32096e95c0SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33096e95c0SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34096e95c0SMatthew Dillon  * SUCH DAMAGE.
35096e95c0SMatthew Dillon  */
36096e95c0SMatthew Dillon 
37096e95c0SMatthew Dillon /*
38096e95c0SMatthew Dillon  * Implement the swapcache daemon.  When enabled swap is assumed to be
39096e95c0SMatthew Dillon  * configured on a fast storage device such as a SSD.  Swap is assigned
40096e95c0SMatthew Dillon  * to clean vnode-backed pages in the inactive queue, clustered by object
41096e95c0SMatthew Dillon  * if possible, and written out.  The swap assignment sticks around even
42096e95c0SMatthew Dillon  * after the underlying pages have been recycled.
43096e95c0SMatthew Dillon  *
44096e95c0SMatthew Dillon  * The daemon manages write bandwidth based on sysctl settings to control
45096e95c0SMatthew Dillon  * wear on the SSD.
46096e95c0SMatthew Dillon  *
47096e95c0SMatthew Dillon  * The vnode strategy code will check for the swap assignments and divert
483ffc7051SMatthew Dillon  * reads to the swap device when the data is present in the swapcache.
49096e95c0SMatthew Dillon  *
50096e95c0SMatthew Dillon  * This operates on both regular files and the block device vnodes used by
51096e95c0SMatthew Dillon  * filesystems to manage meta-data.
52096e95c0SMatthew Dillon  */
53096e95c0SMatthew Dillon 
54096e95c0SMatthew Dillon #include "opt_vm.h"
55096e95c0SMatthew Dillon #include <sys/param.h>
56096e95c0SMatthew Dillon #include <sys/systm.h>
57096e95c0SMatthew Dillon #include <sys/kernel.h>
58096e95c0SMatthew Dillon #include <sys/proc.h>
59096e95c0SMatthew Dillon #include <sys/kthread.h>
60096e95c0SMatthew Dillon #include <sys/resourcevar.h>
61096e95c0SMatthew Dillon #include <sys/signalvar.h>
62096e95c0SMatthew Dillon #include <sys/vnode.h>
63096e95c0SMatthew Dillon #include <sys/vmmeter.h>
64096e95c0SMatthew Dillon #include <sys/sysctl.h>
65497524bfSMatthew Dillon #include <sys/eventhandler.h>
66096e95c0SMatthew Dillon 
67096e95c0SMatthew Dillon #include <vm/vm.h>
68096e95c0SMatthew Dillon #include <vm/vm_param.h>
69096e95c0SMatthew Dillon #include <sys/lock.h>
70096e95c0SMatthew Dillon #include <vm/vm_object.h>
71096e95c0SMatthew Dillon #include <vm/vm_page.h>
72096e95c0SMatthew Dillon #include <vm/vm_map.h>
73096e95c0SMatthew Dillon #include <vm/vm_pageout.h>
74096e95c0SMatthew Dillon #include <vm/vm_pager.h>
75096e95c0SMatthew Dillon #include <vm/swap_pager.h>
76096e95c0SMatthew Dillon #include <vm/vm_extern.h>
77096e95c0SMatthew Dillon 
78096e95c0SMatthew Dillon #include <sys/thread2.h>
79b12defdcSMatthew Dillon #include <sys/spinlock2.h>
80096e95c0SMatthew Dillon #include <vm/vm_page2.h>
81096e95c0SMatthew Dillon 
82096e95c0SMatthew Dillon /* the kernel process "vm_pageout"*/
83aabd5ce8SMatthew Dillon static int vm_swapcached_flush (vm_page_t m, int isblkdev);
843ffc7051SMatthew Dillon static int vm_swapcache_test(vm_page_t m);
8564949baaSMatthew Dillon static int vm_swapcache_writing_heuristic(void);
8664949baaSMatthew Dillon static int vm_swapcache_writing(vm_page_t marker, int count, int scount);
877b00fbb4SMatthew Dillon static void vm_swapcache_cleaning(vm_object_t marker, int *swindexp);
887b00fbb4SMatthew Dillon static void vm_swapcache_movemarker(vm_object_t marker, int swindex,
897b00fbb4SMatthew Dillon 				vm_object_t object);
90096e95c0SMatthew Dillon struct thread *swapcached_thread;
91096e95c0SMatthew Dillon 
92096e95c0SMatthew Dillon SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL);
93096e95c0SMatthew Dillon 
94c504e38eSMatthew Dillon int vm_swapcache_read_enable;
95e527fb6bSMatthew Dillon int vm_swapcache_inactive_heuristic;
96096e95c0SMatthew Dillon static int vm_swapcache_sleep;
9764949baaSMatthew Dillon static int vm_swapcache_maxscan = PQ_L2_SIZE * 8;
9864949baaSMatthew Dillon static int vm_swapcache_maxlaunder = PQ_L2_SIZE * 4;
99096e95c0SMatthew Dillon static int vm_swapcache_data_enable = 0;
100096e95c0SMatthew Dillon static int vm_swapcache_meta_enable = 0;
101e9b56058SMatthew Dillon static int vm_swapcache_maxswappct = 75;
102e527fb6bSMatthew Dillon static int vm_swapcache_hysteresis;
10364949baaSMatthew Dillon static int vm_swapcache_min_hysteresis;
104bfa86281SMatthew Dillon int vm_swapcache_use_chflags = 1;	/* require chflags cache */
1053ffc7051SMatthew Dillon static int64_t vm_swapcache_minburst = 10000000LL;	/* 10MB */
1063ffc7051SMatthew Dillon static int64_t vm_swapcache_curburst = 4000000000LL;	/* 4G after boot */
1073ffc7051SMatthew Dillon static int64_t vm_swapcache_maxburst = 2000000000LL;	/* 2G nominal max */
1083ffc7051SMatthew Dillon static int64_t vm_swapcache_accrate = 100000LL;		/* 100K/s */
109096e95c0SMatthew Dillon static int64_t vm_swapcache_write_count;
1103ffc7051SMatthew Dillon static int64_t vm_swapcache_maxfilesize;
111f5f6d247SMatthew Dillon static int64_t vm_swapcache_cleanperobj = 16*1024*1024;
112096e95c0SMatthew Dillon 
113096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder,
114096e95c0SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, "");
1150bf81261SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, maxscan,
1160bf81261SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxscan, 0, "");
117c504e38eSMatthew Dillon 
118096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable,
119096e95c0SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_data_enable, 0, "");
120096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable,
121096e95c0SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_meta_enable, 0, "");
122c504e38eSMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable,
123c504e38eSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_read_enable, 0, "");
124e9b56058SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct,
125e9b56058SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxswappct, 0, "");
126e527fb6bSMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis,
12764949baaSMatthew Dillon 	CTLFLAG_RD, &vm_swapcache_hysteresis, 0, "");
12864949baaSMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, min_hysteresis,
12964949baaSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_min_hysteresis, 0, "");
130e9b56058SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags,
131e9b56058SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_use_chflags, 0, "");
132c504e38eSMatthew Dillon 
1333ffc7051SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst,
1343ffc7051SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_minburst, 0, "");
135c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst,
136c504e38eSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_curburst, 0, "");
137c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst,
138c504e38eSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxburst, 0, "");
1393ffc7051SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize,
1403ffc7051SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, "");
141c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate,
142c504e38eSMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_accrate, 0, "");
143096e95c0SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count,
144096e95c0SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_write_count, 0, "");
145f5f6d247SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, cleanperobj,
146f5f6d247SMatthew Dillon 	CTLFLAG_RW, &vm_swapcache_cleanperobj, 0, "");
147096e95c0SMatthew Dillon 
148e9b56058SMatthew Dillon #define SWAPMAX(adj)	\
149e9b56058SMatthew Dillon 	((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100)
150e9b56058SMatthew Dillon 
151096e95c0SMatthew Dillon /*
152497524bfSMatthew Dillon  * When shutting down the machine we want to stop swapcache operation
153497524bfSMatthew Dillon  * immediately so swap is not accessed after devices have been shuttered.
154497524bfSMatthew Dillon  */
155497524bfSMatthew Dillon static void
156497524bfSMatthew Dillon shutdown_swapcache(void *arg __unused)
157497524bfSMatthew Dillon {
158497524bfSMatthew Dillon 	vm_swapcache_read_enable = 0;
159497524bfSMatthew Dillon 	vm_swapcache_data_enable = 0;
160497524bfSMatthew Dillon 	vm_swapcache_meta_enable = 0;
161497524bfSMatthew Dillon 	wakeup(&vm_swapcache_sleep);	/* shortcut 5-second wait */
162497524bfSMatthew Dillon }
163497524bfSMatthew Dillon 
164497524bfSMatthew Dillon /*
165096e95c0SMatthew Dillon  * vm_swapcached is the high level pageout daemon.
1668e7c4729SMatthew Dillon  *
1678e7c4729SMatthew Dillon  * No requirements.
168096e95c0SMatthew Dillon  */
169096e95c0SMatthew Dillon static void
170cd8ab232SMatthew Dillon vm_swapcached_thread(void)
171096e95c0SMatthew Dillon {
17200a3fdcaSMatthew Dillon 	enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING;
1733ffc7051SMatthew Dillon 	enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING;
17451c99c61SMatthew Dillon 	static struct vm_page page_marker[PQ_L2_SIZE];
1757b00fbb4SMatthew Dillon 	static struct vm_object swmarker;
1767b00fbb4SMatthew Dillon 	static int swindex;
177027193ebSMatthew Dillon 	int q;
178096e95c0SMatthew Dillon 
179096e95c0SMatthew Dillon 	/*
180096e95c0SMatthew Dillon 	 * Thread setup
181096e95c0SMatthew Dillon 	 */
182096e95c0SMatthew Dillon 	curthread->td_flags |= TDF_SYSTHREAD;
183497524bfSMatthew Dillon 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc,
184497524bfSMatthew Dillon 			      swapcached_thread, SHUTDOWN_PRI_FIRST);
185497524bfSMatthew Dillon 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_swapcache,
186497524bfSMatthew Dillon 			      NULL, SHUTDOWN_PRI_SECOND);
187096e95c0SMatthew Dillon 
188096e95c0SMatthew Dillon 	/*
18900a3fdcaSMatthew Dillon 	 * Initialize our marker for the inactive scan (SWAPC_WRITING)
190096e95c0SMatthew Dillon 	 */
19100a3fdcaSMatthew Dillon 	bzero(&page_marker, sizeof(page_marker));
19251c99c61SMatthew Dillon 	for (q = 0; q < PQ_L2_SIZE; ++q) {
193027193ebSMatthew Dillon 		page_marker[q].flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
194027193ebSMatthew Dillon 		page_marker[q].queue = PQ_INACTIVE + q;
195027193ebSMatthew Dillon 		page_marker[q].pc = q;
196027193ebSMatthew Dillon 		page_marker[q].wire_count = 1;
197027193ebSMatthew Dillon 		vm_page_queues_spin_lock(PQ_INACTIVE + q);
198027193ebSMatthew Dillon 		TAILQ_INSERT_HEAD(
199027193ebSMatthew Dillon 			&vm_page_queues[PQ_INACTIVE + q].pl,
200027193ebSMatthew Dillon 			&page_marker[q], pageq);
201027193ebSMatthew Dillon 		vm_page_queues_spin_unlock(PQ_INACTIVE + q);
202027193ebSMatthew Dillon 	}
203b12defdcSMatthew Dillon 
20464949baaSMatthew Dillon 	vm_swapcache_min_hysteresis = 1024;
20564949baaSMatthew Dillon 	vm_swapcache_hysteresis = vm_swapcache_min_hysteresis;
206e527fb6bSMatthew Dillon 	vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis;
207096e95c0SMatthew Dillon 
20800a3fdcaSMatthew Dillon 	/*
20900a3fdcaSMatthew Dillon 	 * Initialize our marker for the vm_object scan (SWAPC_CLEANING)
21000a3fdcaSMatthew Dillon 	 */
2117b00fbb4SMatthew Dillon 	bzero(&swmarker, sizeof(swmarker));
2127b00fbb4SMatthew Dillon 	swmarker.type = OBJT_MARKER;
2137b00fbb4SMatthew Dillon 	swindex = 0;
2147b00fbb4SMatthew Dillon 	lwkt_gettoken(&vmobj_tokens[swindex]);
2157b00fbb4SMatthew Dillon 	TAILQ_INSERT_HEAD(&vm_object_lists[swindex],
2167b00fbb4SMatthew Dillon 			  &swmarker, object_list);
2177b00fbb4SMatthew Dillon 	lwkt_reltoken(&vmobj_tokens[swindex]);
218096e95c0SMatthew Dillon 
219096e95c0SMatthew Dillon 	for (;;) {
22064949baaSMatthew Dillon 		int reached_end;
22164949baaSMatthew Dillon 		int scount;
22264949baaSMatthew Dillon 		int count;
22364949baaSMatthew Dillon 
224096e95c0SMatthew Dillon 		/*
225497524bfSMatthew Dillon 		 * Handle shutdown
226497524bfSMatthew Dillon 		 */
227497524bfSMatthew Dillon 		kproc_suspend_loop();
228497524bfSMatthew Dillon 
229497524bfSMatthew Dillon 		/*
2303da46bd7SMatthew Dillon 		 * Check every 5 seconds when not enabled or if no swap
2313da46bd7SMatthew Dillon 		 * is present.
232096e95c0SMatthew Dillon 		 */
2333da46bd7SMatthew Dillon 		if ((vm_swapcache_data_enable == 0 &&
234c8ddd5d8SMatthew Dillon 		     vm_swapcache_meta_enable == 0 &&
235c8ddd5d8SMatthew Dillon 		     vm_swap_cache_use <= SWAPMAX(0)) ||
2363da46bd7SMatthew Dillon 		    vm_swap_max == 0) {
237096e95c0SMatthew Dillon 			tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5);
238096e95c0SMatthew Dillon 			continue;
239096e95c0SMatthew Dillon 		}
240c504e38eSMatthew Dillon 
241c504e38eSMatthew Dillon 		/*
2423da46bd7SMatthew Dillon 		 * Polling rate when enabled is approximately 10 hz.
243c504e38eSMatthew Dillon 		 */
244c504e38eSMatthew Dillon 		tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10);
24500a3fdcaSMatthew Dillon 
24600a3fdcaSMatthew Dillon 		/*
24700a3fdcaSMatthew Dillon 		 * State hysteresis.  Generate write activity up to 75% of
24800a3fdcaSMatthew Dillon 		 * swap, then clean out swap assignments down to 70%, then
24900a3fdcaSMatthew Dillon 		 * repeat.
25000a3fdcaSMatthew Dillon 		 */
25100a3fdcaSMatthew Dillon 		if (state == SWAPC_WRITING) {
252e9b56058SMatthew Dillon 			if (vm_swap_cache_use > SWAPMAX(0))
25300a3fdcaSMatthew Dillon 				state = SWAPC_CLEANING;
25400a3fdcaSMatthew Dillon 		} else {
25508fb7a9dSMatthew Dillon 			if (vm_swap_cache_use < SWAPMAX(-10))
25600a3fdcaSMatthew Dillon 				state = SWAPC_WRITING;
25700a3fdcaSMatthew Dillon 		}
25800a3fdcaSMatthew Dillon 
25900a3fdcaSMatthew Dillon 		/*
26000a3fdcaSMatthew Dillon 		 * We are allowed to continue accumulating burst value
2613ffc7051SMatthew Dillon 		 * in either state.  Allow the user to set curburst > maxburst
2623ffc7051SMatthew Dillon 		 * for the initial load-in.
26300a3fdcaSMatthew Dillon 		 */
2643ffc7051SMatthew Dillon 		if (vm_swapcache_curburst < vm_swapcache_maxburst) {
265c504e38eSMatthew Dillon 			vm_swapcache_curburst += vm_swapcache_accrate / 10;
266c504e38eSMatthew Dillon 			if (vm_swapcache_curburst > vm_swapcache_maxburst)
267c504e38eSMatthew Dillon 				vm_swapcache_curburst = vm_swapcache_maxburst;
2683ffc7051SMatthew Dillon 		}
269c504e38eSMatthew Dillon 
270c504e38eSMatthew Dillon 		/*
27100a3fdcaSMatthew Dillon 		 * We don't want to nickle-and-dime the scan as that will
27200a3fdcaSMatthew Dillon 		 * create unnecessary fragmentation.  The minimum burst
27300a3fdcaSMatthew Dillon 		 * is one-seconds worth of accumulation.
274c504e38eSMatthew Dillon 		 */
27564949baaSMatthew Dillon 		if (state != SWAPC_WRITING) {
2767b00fbb4SMatthew Dillon 			vm_swapcache_cleaning(&swmarker, &swindex);
27764949baaSMatthew Dillon 			continue;
27864949baaSMatthew Dillon 		}
27964949baaSMatthew Dillon 		if (vm_swapcache_curburst < vm_swapcache_accrate)
28064949baaSMatthew Dillon 			continue;
28164949baaSMatthew Dillon 
28264949baaSMatthew Dillon 		reached_end = 0;
28364949baaSMatthew Dillon 		count = vm_swapcache_maxlaunder / PQ_L2_SIZE + 2;
28464949baaSMatthew Dillon 		scount = vm_swapcache_maxscan / PQ_L2_SIZE + 2;
28564949baaSMatthew Dillon 
2863ffc7051SMatthew Dillon 		if (burst == SWAPB_BURSTING) {
28764949baaSMatthew Dillon 			if (vm_swapcache_writing_heuristic()) {
28851c99c61SMatthew Dillon 				for (q = 0; q < PQ_L2_SIZE; ++q) {
28964949baaSMatthew Dillon 					reached_end +=
290027193ebSMatthew Dillon 						vm_swapcache_writing(
29164949baaSMatthew Dillon 							&page_marker[q],
29264949baaSMatthew Dillon 							count,
29364949baaSMatthew Dillon 							scount);
29464949baaSMatthew Dillon 				}
295027193ebSMatthew Dillon 			}
2963ffc7051SMatthew Dillon 			if (vm_swapcache_curburst <= 0)
2973ffc7051SMatthew Dillon 				burst = SWAPB_RECOVERING;
29864949baaSMatthew Dillon 		} else if (vm_swapcache_curburst > vm_swapcache_minburst) {
29964949baaSMatthew Dillon 			if (vm_swapcache_writing_heuristic()) {
30051c99c61SMatthew Dillon 				for (q = 0; q < PQ_L2_SIZE; ++q) {
30164949baaSMatthew Dillon 					reached_end +=
302027193ebSMatthew Dillon 						vm_swapcache_writing(
30364949baaSMatthew Dillon 							&page_marker[q],
30464949baaSMatthew Dillon 							count,
30564949baaSMatthew Dillon 							scount);
30664949baaSMatthew Dillon 				}
307027193ebSMatthew Dillon 			}
3083ffc7051SMatthew Dillon 			burst = SWAPB_BURSTING;
3093ffc7051SMatthew Dillon 		}
31064949baaSMatthew Dillon 		if (reached_end == PQ_L2_SIZE) {
31164949baaSMatthew Dillon 			vm_swapcache_inactive_heuristic =
31264949baaSMatthew Dillon 				-vm_swapcache_hysteresis;
31300a3fdcaSMatthew Dillon 		}
31400a3fdcaSMatthew Dillon 	}
315eccc8ca1SMatthew Dillon 
316eccc8ca1SMatthew Dillon 	/*
317eccc8ca1SMatthew Dillon 	 * Cleanup (NOT REACHED)
318eccc8ca1SMatthew Dillon 	 */
31951c99c61SMatthew Dillon 	for (q = 0; q < PQ_L2_SIZE; ++q) {
320027193ebSMatthew Dillon 		vm_page_queues_spin_lock(PQ_INACTIVE + q);
321027193ebSMatthew Dillon 		TAILQ_REMOVE(
322027193ebSMatthew Dillon 			&vm_page_queues[PQ_INACTIVE + q].pl,
323027193ebSMatthew Dillon 			&page_marker[q], pageq);
324027193ebSMatthew Dillon 		vm_page_queues_spin_unlock(PQ_INACTIVE + q);
325027193ebSMatthew Dillon 	}
326eccc8ca1SMatthew Dillon 
3277b00fbb4SMatthew Dillon 	lwkt_gettoken(&vmobj_tokens[swindex]);
3287b00fbb4SMatthew Dillon 	TAILQ_REMOVE(&vm_object_lists[swindex], &swmarker, object_list);
3297b00fbb4SMatthew Dillon 	lwkt_reltoken(&vmobj_tokens[swindex]);
33000a3fdcaSMatthew Dillon }
331096e95c0SMatthew Dillon 
332cd8ab232SMatthew Dillon static struct kproc_desc swpc_kp = {
333cd8ab232SMatthew Dillon 	"swapcached",
334cd8ab232SMatthew Dillon 	vm_swapcached_thread,
335cd8ab232SMatthew Dillon 	&swapcached_thread
336cd8ab232SMatthew Dillon };
337f3f3eadbSSascha Wildner SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp);
338cd8ab232SMatthew Dillon 
339096e95c0SMatthew Dillon /*
340fdc53cc7SMatthew Dillon  * Deal with an overflow of the heuristic counter or if the user
341fdc53cc7SMatthew Dillon  * manually changes the hysteresis.
342fdc53cc7SMatthew Dillon  *
343e527fb6bSMatthew Dillon  * Try to avoid small incremental pageouts by waiting for enough
344e527fb6bSMatthew Dillon  * pages to buildup in the inactive queue to hopefully get a good
345e527fb6bSMatthew Dillon  * burst in.  This heuristic is bumped by the VM system and reset
346e527fb6bSMatthew Dillon  * when our scan hits the end of the queue.
34764949baaSMatthew Dillon  *
34864949baaSMatthew Dillon  * Return TRUE if we need to take a writing pass.
349e527fb6bSMatthew Dillon  */
35064949baaSMatthew Dillon static int
35164949baaSMatthew Dillon vm_swapcache_writing_heuristic(void)
35264949baaSMatthew Dillon {
35364949baaSMatthew Dillon 	int hyst;
35464949baaSMatthew Dillon 
35564949baaSMatthew Dillon 	hyst = vmstats.v_inactive_count / 4;
35664949baaSMatthew Dillon 	if (hyst < vm_swapcache_min_hysteresis)
35764949baaSMatthew Dillon 		hyst = vm_swapcache_min_hysteresis;
35864949baaSMatthew Dillon 	cpu_ccfence();
35964949baaSMatthew Dillon 	vm_swapcache_hysteresis = hyst;
36064949baaSMatthew Dillon 
36164949baaSMatthew Dillon 	if (vm_swapcache_inactive_heuristic < -hyst)
36264949baaSMatthew Dillon 		vm_swapcache_inactive_heuristic = -hyst;
36364949baaSMatthew Dillon 
36464949baaSMatthew Dillon 	return (vm_swapcache_inactive_heuristic >= 0);
36564949baaSMatthew Dillon }
36664949baaSMatthew Dillon 
36764949baaSMatthew Dillon /*
36864949baaSMatthew Dillon  * Take a writing pass on one of the inactive queues, return non-zero if
36964949baaSMatthew Dillon  * we hit the end of the queue.
37064949baaSMatthew Dillon  */
37164949baaSMatthew Dillon static int
37264949baaSMatthew Dillon vm_swapcache_writing(vm_page_t marker, int count, int scount)
37364949baaSMatthew Dillon {
37464949baaSMatthew Dillon 	vm_object_t object;
37564949baaSMatthew Dillon 	struct vnode *vp;
37664949baaSMatthew Dillon 	vm_page_t m;
37764949baaSMatthew Dillon 	int isblkdev;
378e527fb6bSMatthew Dillon 
379e527fb6bSMatthew Dillon 	/*
380096e95c0SMatthew Dillon 	 * Scan the inactive queue from our marker to locate
381096e95c0SMatthew Dillon 	 * suitable pages to push to the swap cache.
382096e95c0SMatthew Dillon 	 *
383096e95c0SMatthew Dillon 	 * We are looking for clean vnode-backed pages.
384096e95c0SMatthew Dillon 	 */
385027193ebSMatthew Dillon 	vm_page_queues_spin_lock(marker->queue);
3860bf81261SMatthew Dillon 	while ((m = TAILQ_NEXT(marker, pageq)) != NULL &&
3870bf81261SMatthew Dillon 	       count > 0 && scount-- > 0) {
388027193ebSMatthew Dillon 		KKASSERT(m->queue == marker->queue);
389b12defdcSMatthew Dillon 
390*1de864f0SMatthew Dillon 		/*
391*1de864f0SMatthew Dillon 		 * Stop using swap if paniced, dumping, or dumped.
392*1de864f0SMatthew Dillon 		 * Don't try to write if our curburst has been exhausted.
393*1de864f0SMatthew Dillon 		 */
394*1de864f0SMatthew Dillon 		if (panicstr || dumping)
395*1de864f0SMatthew Dillon 			break;
396b12defdcSMatthew Dillon 		if (vm_swapcache_curburst < 0)
397b12defdcSMatthew Dillon 			break;
398*1de864f0SMatthew Dillon 
399*1de864f0SMatthew Dillon 		/*
400*1de864f0SMatthew Dillon 		 * Move marker
401*1de864f0SMatthew Dillon 		 */
402027193ebSMatthew Dillon 		TAILQ_REMOVE(
403027193ebSMatthew Dillon 			&vm_page_queues[marker->queue].pl, marker, pageq);
404027193ebSMatthew Dillon 		TAILQ_INSERT_AFTER(
405027193ebSMatthew Dillon 			&vm_page_queues[marker->queue].pl, m, marker, pageq);
406f5f6d247SMatthew Dillon 
407f5f6d247SMatthew Dillon 		/*
408f5f6d247SMatthew Dillon 		 * Ignore markers and ignore pages that already have a swap
409f5f6d247SMatthew Dillon 		 * assignment.
410f5f6d247SMatthew Dillon 		 */
4110bf81261SMatthew Dillon 		if (m->flags & (PG_MARKER | PG_SWAPPED))
412096e95c0SMatthew Dillon 			continue;
413b12defdcSMatthew Dillon 		if (vm_page_busy_try(m, TRUE))
414096e95c0SMatthew Dillon 			continue;
415027193ebSMatthew Dillon 		vm_page_queues_spin_unlock(marker->queue);
416b12defdcSMatthew Dillon 
417b12defdcSMatthew Dillon 		if ((object = m->object) == NULL) {
418b12defdcSMatthew Dillon 			vm_page_wakeup(m);
419027193ebSMatthew Dillon 			vm_page_queues_spin_lock(marker->queue);
420b12defdcSMatthew Dillon 			continue;
421b12defdcSMatthew Dillon 		}
422b12defdcSMatthew Dillon 		vm_object_hold(object);
423b12defdcSMatthew Dillon 		if (m->object != object) {
424b12defdcSMatthew Dillon 			vm_object_drop(object);
425b12defdcSMatthew Dillon 			vm_page_wakeup(m);
426027193ebSMatthew Dillon 			vm_page_queues_spin_lock(marker->queue);
427b12defdcSMatthew Dillon 			continue;
428b12defdcSMatthew Dillon 		}
429b12defdcSMatthew Dillon 		if (vm_swapcache_test(m)) {
430b12defdcSMatthew Dillon 			vm_object_drop(object);
431b12defdcSMatthew Dillon 			vm_page_wakeup(m);
432027193ebSMatthew Dillon 			vm_page_queues_spin_lock(marker->queue);
433b12defdcSMatthew Dillon 			continue;
434b12defdcSMatthew Dillon 		}
435b12defdcSMatthew Dillon 
436c504e38eSMatthew Dillon 		vp = object->handle;
437b12defdcSMatthew Dillon 		if (vp == NULL) {
438b12defdcSMatthew Dillon 			vm_object_drop(object);
439b12defdcSMatthew Dillon 			vm_page_wakeup(m);
440027193ebSMatthew Dillon 			vm_page_queues_spin_lock(marker->queue);
441c504e38eSMatthew Dillon 			continue;
442b12defdcSMatthew Dillon 		}
443d3070b8dSMatthew Dillon 
444c504e38eSMatthew Dillon 		switch(vp->v_type) {
445c504e38eSMatthew Dillon 		case VREG:
446e9b56058SMatthew Dillon 			/*
447bfa86281SMatthew Dillon 			 * PG_NOTMETA generically means 'don't swapcache this',
448bfa86281SMatthew Dillon 			 * and HAMMER will set this for regular data buffers
449bfa86281SMatthew Dillon 			 * (and leave it unset for meta-data buffers) as
450bfa86281SMatthew Dillon 			 * appropriate when double buffering is enabled.
451bfa86281SMatthew Dillon 			 */
452b12defdcSMatthew Dillon 			if (m->flags & PG_NOTMETA) {
453b12defdcSMatthew Dillon 				vm_object_drop(object);
454b12defdcSMatthew Dillon 				vm_page_wakeup(m);
455027193ebSMatthew Dillon 				vm_page_queues_spin_lock(marker->queue);
456bfa86281SMatthew Dillon 				continue;
457b12defdcSMatthew Dillon 			}
458bfa86281SMatthew Dillon 
459bfa86281SMatthew Dillon 			/*
460e9b56058SMatthew Dillon 			 * If data_enable is 0 do not try to swapcache data.
461e9b56058SMatthew Dillon 			 * If use_chflags is set then only swapcache data for
462e9b56058SMatthew Dillon 			 * VSWAPCACHE marked vnodes, otherwise any vnode.
463e9b56058SMatthew Dillon 			 */
464e9b56058SMatthew Dillon 			if (vm_swapcache_data_enable == 0 ||
465e9b56058SMatthew Dillon 			    ((vp->v_flag & VSWAPCACHE) == 0 &&
466e9b56058SMatthew Dillon 			     vm_swapcache_use_chflags)) {
467b12defdcSMatthew Dillon 				vm_object_drop(object);
468b12defdcSMatthew Dillon 				vm_page_wakeup(m);
469027193ebSMatthew Dillon 				vm_page_queues_spin_lock(marker->queue);
470c504e38eSMatthew Dillon 				continue;
471e9b56058SMatthew Dillon 			}
472d3070b8dSMatthew Dillon 			if (vm_swapcache_maxfilesize &&
473d3070b8dSMatthew Dillon 			    object->size >
474d3070b8dSMatthew Dillon 			    (vm_swapcache_maxfilesize >> PAGE_SHIFT)) {
475b12defdcSMatthew Dillon 				vm_object_drop(object);
476b12defdcSMatthew Dillon 				vm_page_wakeup(m);
477027193ebSMatthew Dillon 				vm_page_queues_spin_lock(marker->queue);
478d3070b8dSMatthew Dillon 				continue;
479d3070b8dSMatthew Dillon 			}
480aabd5ce8SMatthew Dillon 			isblkdev = 0;
481c504e38eSMatthew Dillon 			break;
482c504e38eSMatthew Dillon 		case VCHR:
483aabd5ce8SMatthew Dillon 			/*
484bfa86281SMatthew Dillon 			 * PG_NOTMETA generically means 'don't swapcache this',
485bfa86281SMatthew Dillon 			 * and HAMMER will set this for regular data buffers
486bfa86281SMatthew Dillon 			 * (and leave it unset for meta-data buffers) as
487bfa86281SMatthew Dillon 			 * appropriate when double buffering is enabled.
488aabd5ce8SMatthew Dillon 			 */
489b12defdcSMatthew Dillon 			if (m->flags & PG_NOTMETA) {
490b12defdcSMatthew Dillon 				vm_object_drop(object);
491b12defdcSMatthew Dillon 				vm_page_wakeup(m);
492027193ebSMatthew Dillon 				vm_page_queues_spin_lock(marker->queue);
493aabd5ce8SMatthew Dillon 				continue;
494b12defdcSMatthew Dillon 			}
495b12defdcSMatthew Dillon 			if (vm_swapcache_meta_enable == 0) {
496b12defdcSMatthew Dillon 				vm_object_drop(object);
497b12defdcSMatthew Dillon 				vm_page_wakeup(m);
498027193ebSMatthew Dillon 				vm_page_queues_spin_lock(marker->queue);
499c504e38eSMatthew Dillon 				continue;
500b12defdcSMatthew Dillon 			}
501aabd5ce8SMatthew Dillon 			isblkdev = 1;
502c504e38eSMatthew Dillon 			break;
503c504e38eSMatthew Dillon 		default:
504b12defdcSMatthew Dillon 			vm_object_drop(object);
505b12defdcSMatthew Dillon 			vm_page_wakeup(m);
506027193ebSMatthew Dillon 			vm_page_queues_spin_lock(marker->queue);
507c504e38eSMatthew Dillon 			continue;
508c504e38eSMatthew Dillon 		}
509096e95c0SMatthew Dillon 
510096e95c0SMatthew Dillon 
511096e95c0SMatthew Dillon 		/*
5123ffc7051SMatthew Dillon 		 * Assign swap and initiate I/O.
5133ffc7051SMatthew Dillon 		 *
5143ffc7051SMatthew Dillon 		 * (adjust for the --count which also occurs in the loop)
515096e95c0SMatthew Dillon 		 */
5160bf81261SMatthew Dillon 		count -= vm_swapcached_flush(m, isblkdev);
517096e95c0SMatthew Dillon 
518096e95c0SMatthew Dillon 		/*
519096e95c0SMatthew Dillon 		 * Setup for next loop using marker.
520096e95c0SMatthew Dillon 		 */
521b12defdcSMatthew Dillon 		vm_object_drop(object);
522027193ebSMatthew Dillon 		vm_page_queues_spin_lock(marker->queue);
523096e95c0SMatthew Dillon 	}
5241e5196f0SMatthew Dillon 
5251e5196f0SMatthew Dillon 	/*
526b12defdcSMatthew Dillon 	 * The marker could wind up at the end, which is ok.  If we hit the
527b12defdcSMatthew Dillon 	 * end of the list adjust the heuristic.
5281e5196f0SMatthew Dillon 	 *
5291e5196f0SMatthew Dillon 	 * Earlier inactive pages that were dirty and become clean
5301e5196f0SMatthew Dillon 	 * are typically moved to the end of PQ_INACTIVE by virtue
5311e5196f0SMatthew Dillon 	 * of vfs_vmio_release() when they become unwired from the
5321e5196f0SMatthew Dillon 	 * buffer cache.
5331e5196f0SMatthew Dillon 	 */
534027193ebSMatthew Dillon 	vm_page_queues_spin_unlock(marker->queue);
53564949baaSMatthew Dillon 
53664949baaSMatthew Dillon 	/*
53764949baaSMatthew Dillon 	 * m invalid but can be used to test for NULL
53864949baaSMatthew Dillon 	 */
53964949baaSMatthew Dillon 	return (m == NULL);
540096e95c0SMatthew Dillon }
541096e95c0SMatthew Dillon 
542096e95c0SMatthew Dillon /*
543b12defdcSMatthew Dillon  * Flush the specified page using the swap_pager.  The page
544b12defdcSMatthew Dillon  * must be busied by the caller and its disposition will become
545b12defdcSMatthew Dillon  * the responsibility of this function.
5463ffc7051SMatthew Dillon  *
5473ffc7051SMatthew Dillon  * Try to collect surrounding pages, including pages which may
5483ffc7051SMatthew Dillon  * have already been assigned swap.  Try to cluster within a
5493ffc7051SMatthew Dillon  * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block
5503ffc7051SMatthew Dillon  * to match what swap_pager_putpages() can do.
5513ffc7051SMatthew Dillon  *
5523ffc7051SMatthew Dillon  * We also want to try to match against the buffer cache blocksize
5533ffc7051SMatthew Dillon  * but we don't really know what it is here.  Since the buffer cache
5543ffc7051SMatthew Dillon  * wires and unwires pages in groups the fact that we skip wired pages
5553ffc7051SMatthew Dillon  * should be sufficient.
5563ffc7051SMatthew Dillon  *
5573ffc7051SMatthew Dillon  * Returns a count of pages we might have flushed (minimum 1)
558096e95c0SMatthew Dillon  */
559096e95c0SMatthew Dillon static
5603ffc7051SMatthew Dillon int
561aabd5ce8SMatthew Dillon vm_swapcached_flush(vm_page_t m, int isblkdev)
562096e95c0SMatthew Dillon {
563096e95c0SMatthew Dillon 	vm_object_t object;
5643ffc7051SMatthew Dillon 	vm_page_t marray[SWAP_META_PAGES];
5653ffc7051SMatthew Dillon 	vm_pindex_t basei;
5663ffc7051SMatthew Dillon 	int rtvals[SWAP_META_PAGES];
5673ffc7051SMatthew Dillon 	int x;
5683ffc7051SMatthew Dillon 	int i;
5693ffc7051SMatthew Dillon 	int j;
5703ffc7051SMatthew Dillon 	int count;
571b12defdcSMatthew Dillon 	int error;
572096e95c0SMatthew Dillon 
573096e95c0SMatthew Dillon 	vm_page_io_start(m);
574096e95c0SMatthew Dillon 	vm_page_protect(m, VM_PROT_READ);
575096e95c0SMatthew Dillon 	object = m->object;
576b12defdcSMatthew Dillon 	vm_object_hold(object);
577096e95c0SMatthew Dillon 
5783ffc7051SMatthew Dillon 	/*
5793ffc7051SMatthew Dillon 	 * Try to cluster around (m), keeping in mind that the swap pager
5803ffc7051SMatthew Dillon 	 * can only do SMAP_META_PAGES worth of continguous write.
5813ffc7051SMatthew Dillon 	 */
5823ffc7051SMatthew Dillon 	x = (int)m->pindex & SWAP_META_MASK;
5833ffc7051SMatthew Dillon 	marray[x] = m;
5843ffc7051SMatthew Dillon 	basei = m->pindex;
585b12defdcSMatthew Dillon 	vm_page_wakeup(m);
5863ffc7051SMatthew Dillon 
5873ffc7051SMatthew Dillon 	for (i = x - 1; i >= 0; --i) {
588b12defdcSMatthew Dillon 		m = vm_page_lookup_busy_try(object, basei - x + i,
589b12defdcSMatthew Dillon 					    TRUE, &error);
590b12defdcSMatthew Dillon 		if (error || m == NULL)
5913ffc7051SMatthew Dillon 			break;
592b12defdcSMatthew Dillon 		if (vm_swapcache_test(m)) {
593b12defdcSMatthew Dillon 			vm_page_wakeup(m);
5943ffc7051SMatthew Dillon 			break;
595b12defdcSMatthew Dillon 		}
596b12defdcSMatthew Dillon 		if (isblkdev && (m->flags & PG_NOTMETA)) {
597b12defdcSMatthew Dillon 			vm_page_wakeup(m);
598aabd5ce8SMatthew Dillon 			break;
599b12defdcSMatthew Dillon 		}
6003ffc7051SMatthew Dillon 		vm_page_io_start(m);
6013ffc7051SMatthew Dillon 		vm_page_protect(m, VM_PROT_READ);
6023ffc7051SMatthew Dillon 		if (m->queue - m->pc == PQ_CACHE) {
6033ffc7051SMatthew Dillon 			vm_page_unqueue_nowakeup(m);
6043ffc7051SMatthew Dillon 			vm_page_deactivate(m);
6053ffc7051SMatthew Dillon 		}
6063ffc7051SMatthew Dillon 		marray[i] = m;
607b12defdcSMatthew Dillon 		vm_page_wakeup(m);
6083ffc7051SMatthew Dillon 	}
6093ffc7051SMatthew Dillon 	++i;
6103ffc7051SMatthew Dillon 
6113ffc7051SMatthew Dillon 	for (j = x + 1; j < SWAP_META_PAGES; ++j) {
612b12defdcSMatthew Dillon 		m = vm_page_lookup_busy_try(object, basei - x + j,
613b12defdcSMatthew Dillon 					    TRUE, &error);
614b12defdcSMatthew Dillon 		if (error || m == NULL)
6153ffc7051SMatthew Dillon 			break;
616b12defdcSMatthew Dillon 		if (vm_swapcache_test(m)) {
617b12defdcSMatthew Dillon 			vm_page_wakeup(m);
6183ffc7051SMatthew Dillon 			break;
619b12defdcSMatthew Dillon 		}
620b12defdcSMatthew Dillon 		if (isblkdev && (m->flags & PG_NOTMETA)) {
621b12defdcSMatthew Dillon 			vm_page_wakeup(m);
622aabd5ce8SMatthew Dillon 			break;
623b12defdcSMatthew Dillon 		}
6243ffc7051SMatthew Dillon 		vm_page_io_start(m);
6253ffc7051SMatthew Dillon 		vm_page_protect(m, VM_PROT_READ);
6263ffc7051SMatthew Dillon 		if (m->queue - m->pc == PQ_CACHE) {
6273ffc7051SMatthew Dillon 			vm_page_unqueue_nowakeup(m);
6283ffc7051SMatthew Dillon 			vm_page_deactivate(m);
6293ffc7051SMatthew Dillon 		}
6303ffc7051SMatthew Dillon 		marray[j] = m;
631b12defdcSMatthew Dillon 		vm_page_wakeup(m);
6323ffc7051SMatthew Dillon 	}
6333ffc7051SMatthew Dillon 
6343ffc7051SMatthew Dillon 	count = j - i;
6353ffc7051SMatthew Dillon 	vm_object_pip_add(object, count);
6363ffc7051SMatthew Dillon 	swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i);
6373ffc7051SMatthew Dillon 	vm_swapcache_write_count += count * PAGE_SIZE;
6383ffc7051SMatthew Dillon 	vm_swapcache_curburst -= count * PAGE_SIZE;
6393ffc7051SMatthew Dillon 
6403ffc7051SMatthew Dillon 	while (i < j) {
6413ffc7051SMatthew Dillon 		if (rtvals[i] != VM_PAGER_PEND) {
642b12defdcSMatthew Dillon 			vm_page_busy_wait(marray[i], FALSE, "swppgfd");
6433ffc7051SMatthew Dillon 			vm_page_io_finish(marray[i]);
644b12defdcSMatthew Dillon 			vm_page_wakeup(marray[i]);
645096e95c0SMatthew Dillon 			vm_object_pip_wakeup(object);
646096e95c0SMatthew Dillon 		}
6473ffc7051SMatthew Dillon 		++i;
6483ffc7051SMatthew Dillon 	}
649b12defdcSMatthew Dillon 	vm_object_drop(object);
6503ffc7051SMatthew Dillon 	return(count);
651096e95c0SMatthew Dillon }
65200a3fdcaSMatthew Dillon 
6533ffc7051SMatthew Dillon /*
6543ffc7051SMatthew Dillon  * Test whether a VM page is suitable for writing to the swapcache.
6553ffc7051SMatthew Dillon  * Does not test m->queue, PG_MARKER, or PG_SWAPPED.
6563ffc7051SMatthew Dillon  *
6573ffc7051SMatthew Dillon  * Returns 0 on success, 1 on failure
6583ffc7051SMatthew Dillon  */
6593ffc7051SMatthew Dillon static int
6603ffc7051SMatthew Dillon vm_swapcache_test(vm_page_t m)
6613ffc7051SMatthew Dillon {
6623ffc7051SMatthew Dillon 	vm_object_t object;
6633ffc7051SMatthew Dillon 
664b12defdcSMatthew Dillon 	if (m->flags & PG_UNMANAGED)
6653ffc7051SMatthew Dillon 		return(1);
666b12defdcSMatthew Dillon 	if (m->hold_count || m->wire_count)
6673ffc7051SMatthew Dillon 		return(1);
6683ffc7051SMatthew Dillon 	if (m->valid != VM_PAGE_BITS_ALL)
6693ffc7051SMatthew Dillon 		return(1);
6703ffc7051SMatthew Dillon 	if (m->dirty & m->valid)
6713ffc7051SMatthew Dillon 		return(1);
6723ffc7051SMatthew Dillon 	if ((object = m->object) == NULL)
6733ffc7051SMatthew Dillon 		return(1);
6743ffc7051SMatthew Dillon 	if (object->type != OBJT_VNODE ||
6753ffc7051SMatthew Dillon 	    (object->flags & OBJ_DEAD)) {
6763ffc7051SMatthew Dillon 		return(1);
6773ffc7051SMatthew Dillon 	}
6783ffc7051SMatthew Dillon 	vm_page_test_dirty(m);
6793ffc7051SMatthew Dillon 	if (m->dirty & m->valid)
6803ffc7051SMatthew Dillon 		return(1);
6813ffc7051SMatthew Dillon 	return(0);
6823ffc7051SMatthew Dillon }
6833ffc7051SMatthew Dillon 
6843ffc7051SMatthew Dillon /*
685f5f6d247SMatthew Dillon  * Cleaning pass.
686f5f6d247SMatthew Dillon  *
687f5f6d247SMatthew Dillon  * We clean whole objects up to 16MB
6883ffc7051SMatthew Dillon  */
68900a3fdcaSMatthew Dillon static
69000a3fdcaSMatthew Dillon void
6917b00fbb4SMatthew Dillon vm_swapcache_cleaning(vm_object_t marker, int *swindexp)
69200a3fdcaSMatthew Dillon {
69300a3fdcaSMatthew Dillon 	vm_object_t object;
69400a3fdcaSMatthew Dillon 	struct vnode *vp;
69500a3fdcaSMatthew Dillon 	int count;
6960bf81261SMatthew Dillon 	int scount;
69700a3fdcaSMatthew Dillon 	int n;
69800a3fdcaSMatthew Dillon 
69900a3fdcaSMatthew Dillon 	count = vm_swapcache_maxlaunder;
7000bf81261SMatthew Dillon 	scount = vm_swapcache_maxscan;
70100a3fdcaSMatthew Dillon 
70200a3fdcaSMatthew Dillon 	/*
70300a3fdcaSMatthew Dillon 	 * Look for vnode objects
70400a3fdcaSMatthew Dillon 	 */
7057b00fbb4SMatthew Dillon 	lwkt_gettoken(&vmobj_tokens[*swindexp]);
7062de4f77eSMatthew Dillon 
707b2286ce7SMatthew Dillon outerloop:
708f5f6d247SMatthew Dillon 	while ((object = TAILQ_NEXT(marker, object_list)) != NULL) {
7092f2d9e58SVenkatesh Srinivas 		/*
710f5f6d247SMatthew Dillon 		 * We have to skip markers.  We cannot hold/drop marker
711f5f6d247SMatthew Dillon 		 * objects!
7122f2d9e58SVenkatesh Srinivas 		 */
713f5f6d247SMatthew Dillon 		if (object->type == OBJT_MARKER) {
7147b00fbb4SMatthew Dillon 			vm_swapcache_movemarker(marker, *swindexp, object);
71500a3fdcaSMatthew Dillon 			continue;
7162f2d9e58SVenkatesh Srinivas 		}
71700a3fdcaSMatthew Dillon 
71800a3fdcaSMatthew Dillon 		/*
719f5f6d247SMatthew Dillon 		 * Safety, or in case there are millions of VM objects
720f5f6d247SMatthew Dillon 		 * without swapcache backing.
72100a3fdcaSMatthew Dillon 		 */
7220bf81261SMatthew Dillon 		if (--scount <= 0)
7237b00fbb4SMatthew Dillon 			goto breakout;
72400a3fdcaSMatthew Dillon 
72500a3fdcaSMatthew Dillon 		/*
726f5f6d247SMatthew Dillon 		 * We must hold the object before potentially yielding.
72700a3fdcaSMatthew Dillon 		 */
728f5f6d247SMatthew Dillon 		vm_object_hold(object);
729f5f6d247SMatthew Dillon 		lwkt_yield();
730f5f6d247SMatthew Dillon 
731f5f6d247SMatthew Dillon 		/*
732f5f6d247SMatthew Dillon 		 * Only operate on live VNODE objects that are either
733f5f6d247SMatthew Dillon 		 * VREG or VCHR (VCHR for meta-data).
734f5f6d247SMatthew Dillon 		 */
735f5f6d247SMatthew Dillon 		if ((object->type != OBJT_VNODE) ||
736f5f6d247SMatthew Dillon 		    ((object->flags & OBJ_DEAD) ||
737f5f6d247SMatthew Dillon 		     object->swblock_count == 0) ||
738f5f6d247SMatthew Dillon 		    ((vp = object->handle) == NULL) ||
739f5f6d247SMatthew Dillon 		    (vp->v_type != VREG && vp->v_type != VCHR)) {
740f5f6d247SMatthew Dillon 			vm_object_drop(object);
741f5f6d247SMatthew Dillon 			/* object may be invalid now */
7427b00fbb4SMatthew Dillon 			vm_swapcache_movemarker(marker, *swindexp, object);
743f5f6d247SMatthew Dillon 			continue;
744f5f6d247SMatthew Dillon 		}
745f5f6d247SMatthew Dillon 
746f5f6d247SMatthew Dillon 		/*
747f5f6d247SMatthew Dillon 		 * Reset the object pindex stored in the marker if the
748f5f6d247SMatthew Dillon 		 * working object has changed.
749f5f6d247SMatthew Dillon 		 */
750f5f6d247SMatthew Dillon 		if (marker->backing_object != object) {
751f5f6d247SMatthew Dillon 			marker->size = 0;
752f5f6d247SMatthew Dillon 			marker->backing_object_offset = 0;
753f5f6d247SMatthew Dillon 			marker->backing_object = object;
754f5f6d247SMatthew Dillon 		}
75500a3fdcaSMatthew Dillon 
75600a3fdcaSMatthew Dillon 		/*
75700a3fdcaSMatthew Dillon 		 * Look for swblocks starting at our iterator.
75800a3fdcaSMatthew Dillon 		 *
75900a3fdcaSMatthew Dillon 		 * The swap_pager_condfree() function attempts to free
76000a3fdcaSMatthew Dillon 		 * swap space starting at the specified index.  The index
76100a3fdcaSMatthew Dillon 		 * will be updated on return.  The function will return
76200a3fdcaSMatthew Dillon 		 * a scan factor (NOT the number of blocks freed).
76300a3fdcaSMatthew Dillon 		 *
76400a3fdcaSMatthew Dillon 		 * If it must cut its scan of the object short due to an
76500a3fdcaSMatthew Dillon 		 * excessive number of swblocks, or is able to free the
76600a3fdcaSMatthew Dillon 		 * requested number of blocks, it will return n >= count
76700a3fdcaSMatthew Dillon 		 * and we break and pick it back up on a future attempt.
768f5f6d247SMatthew Dillon 		 *
769f5f6d247SMatthew Dillon 		 * Scan the object linearly and try to batch large sets of
770f5f6d247SMatthew Dillon 		 * blocks that are likely to clean out entire swap radix
771f5f6d247SMatthew Dillon 		 * tree leafs.
77200a3fdcaSMatthew Dillon 		 */
773739be60bSMatthew Dillon 		lwkt_token_swap();
7747b00fbb4SMatthew Dillon 		lwkt_reltoken(&vmobj_tokens[*swindexp]);
77527b6ee03SMatthew Dillon 
776f5f6d247SMatthew Dillon 		n = swap_pager_condfree(object, &marker->size,
777f5f6d247SMatthew Dillon 				    (count + SWAP_META_MASK) & ~SWAP_META_MASK);
7782f2d9e58SVenkatesh Srinivas 
779f5f6d247SMatthew Dillon 		vm_object_drop(object);		/* object may be invalid now */
7807b00fbb4SMatthew Dillon 		lwkt_gettoken(&vmobj_tokens[*swindexp]);
7812f2d9e58SVenkatesh Srinivas 
78200a3fdcaSMatthew Dillon 		/*
783f5f6d247SMatthew Dillon 		 * If we have exhausted the object or deleted our per-pass
784f5f6d247SMatthew Dillon 		 * page limit then move us to the next object.  Note that
785f5f6d247SMatthew Dillon 		 * the current object may no longer be on the vm_object_list.
78600a3fdcaSMatthew Dillon 		 */
787f5f6d247SMatthew Dillon 		if (n <= 0 ||
788f5f6d247SMatthew Dillon 		    marker->backing_object_offset > vm_swapcache_cleanperobj) {
7897b00fbb4SMatthew Dillon 			vm_swapcache_movemarker(marker, *swindexp, object);
79000a3fdcaSMatthew Dillon 		}
79100a3fdcaSMatthew Dillon 
79200a3fdcaSMatthew Dillon 		/*
793f5f6d247SMatthew Dillon 		 * If we have exhausted our max-launder stop for now.
79400a3fdcaSMatthew Dillon 		 */
795f5f6d247SMatthew Dillon 		count -= n;
796f5f6d247SMatthew Dillon 		marker->backing_object_offset += n * PAGE_SIZE;
797f5f6d247SMatthew Dillon 		if (count < 0)
7987b00fbb4SMatthew Dillon 			goto breakout;
799f5f6d247SMatthew Dillon 	}
8007a175765SMatthew Dillon 
8017a175765SMatthew Dillon 	/*
8027b00fbb4SMatthew Dillon 	 * Iterate vm_object_lists[] hash table
8037a175765SMatthew Dillon 	 */
8047b00fbb4SMatthew Dillon 	TAILQ_REMOVE(&vm_object_lists[*swindexp], marker, object_list);
8057b00fbb4SMatthew Dillon 	lwkt_reltoken(&vmobj_tokens[*swindexp]);
8067b00fbb4SMatthew Dillon 	if (++*swindexp >= VMOBJ_HSIZE)
8077b00fbb4SMatthew Dillon 		*swindexp = 0;
8087b00fbb4SMatthew Dillon 	lwkt_gettoken(&vmobj_tokens[*swindexp]);
8097b00fbb4SMatthew Dillon 	TAILQ_INSERT_HEAD(&vm_object_lists[*swindexp], marker, object_list);
8107a175765SMatthew Dillon 
8117b00fbb4SMatthew Dillon 	if (*swindexp != 0)
8127b00fbb4SMatthew Dillon 		goto outerloop;
8137b00fbb4SMatthew Dillon 
8147b00fbb4SMatthew Dillon breakout:
8157b00fbb4SMatthew Dillon 	lwkt_reltoken(&vmobj_tokens[*swindexp]);
81600a3fdcaSMatthew Dillon }
817f5f6d247SMatthew Dillon 
818f5f6d247SMatthew Dillon /*
819f5f6d247SMatthew Dillon  * Move the marker past the current object.  Object can be stale, but we
820f5f6d247SMatthew Dillon  * still need it to determine if the marker has to be moved.  If the object
821f5f6d247SMatthew Dillon  * is still the 'current object' (object after the marker), we hop-scotch
822f5f6d247SMatthew Dillon  * the marker past it.
823f5f6d247SMatthew Dillon  */
824f5f6d247SMatthew Dillon static void
8257b00fbb4SMatthew Dillon vm_swapcache_movemarker(vm_object_t marker, int swindex, vm_object_t object)
826f5f6d247SMatthew Dillon {
827f5f6d247SMatthew Dillon 	if (TAILQ_NEXT(marker, object_list) == object) {
8287b00fbb4SMatthew Dillon 		TAILQ_REMOVE(&vm_object_lists[swindex], marker, object_list);
8297b00fbb4SMatthew Dillon 		TAILQ_INSERT_AFTER(&vm_object_lists[swindex], object,
830f5f6d247SMatthew Dillon 				   marker, object_list);
831f5f6d247SMatthew Dillon 	}
832f5f6d247SMatthew Dillon }
833