1096e95c0SMatthew Dillon /* 28e7c4729SMatthew Dillon * (MPSAFE) 38e7c4729SMatthew Dillon * 4096e95c0SMatthew Dillon * Copyright (c) 2010 The DragonFly Project. All rights reserved. 5096e95c0SMatthew Dillon * 6096e95c0SMatthew Dillon * This code is derived from software contributed to The DragonFly Project 7096e95c0SMatthew Dillon * by Matthew Dillon <dillon@backplane.com> 8096e95c0SMatthew Dillon * 9096e95c0SMatthew Dillon * Redistribution and use in source and binary forms, with or without 10096e95c0SMatthew Dillon * modification, are permitted provided that the following conditions 11096e95c0SMatthew Dillon * are met: 12096e95c0SMatthew Dillon * 13096e95c0SMatthew Dillon * 1. Redistributions of source code must retain the above copyright 14096e95c0SMatthew Dillon * notice, this list of conditions and the following disclaimer. 15096e95c0SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright 16096e95c0SMatthew Dillon * notice, this list of conditions and the following disclaimer in 17096e95c0SMatthew Dillon * the documentation and/or other materials provided with the 18096e95c0SMatthew Dillon * distribution. 19096e95c0SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its 20096e95c0SMatthew Dillon * contributors may be used to endorse or promote products derived 21096e95c0SMatthew Dillon * from this software without specific, prior written permission. 22096e95c0SMatthew Dillon * 23096e95c0SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24096e95c0SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25096e95c0SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26096e95c0SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27096e95c0SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28096e95c0SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29096e95c0SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30096e95c0SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31096e95c0SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32096e95c0SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33096e95c0SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34096e95c0SMatthew Dillon * SUCH DAMAGE. 35096e95c0SMatthew Dillon */ 36096e95c0SMatthew Dillon 37096e95c0SMatthew Dillon /* 38096e95c0SMatthew Dillon * Implement the swapcache daemon. When enabled swap is assumed to be 39096e95c0SMatthew Dillon * configured on a fast storage device such as a SSD. Swap is assigned 40096e95c0SMatthew Dillon * to clean vnode-backed pages in the inactive queue, clustered by object 41096e95c0SMatthew Dillon * if possible, and written out. The swap assignment sticks around even 42096e95c0SMatthew Dillon * after the underlying pages have been recycled. 43096e95c0SMatthew Dillon * 44096e95c0SMatthew Dillon * The daemon manages write bandwidth based on sysctl settings to control 45096e95c0SMatthew Dillon * wear on the SSD. 46096e95c0SMatthew Dillon * 47096e95c0SMatthew Dillon * The vnode strategy code will check for the swap assignments and divert 483ffc7051SMatthew Dillon * reads to the swap device when the data is present in the swapcache. 49096e95c0SMatthew Dillon * 50096e95c0SMatthew Dillon * This operates on both regular files and the block device vnodes used by 51096e95c0SMatthew Dillon * filesystems to manage meta-data. 52096e95c0SMatthew Dillon */ 53096e95c0SMatthew Dillon 54096e95c0SMatthew Dillon #include "opt_vm.h" 55096e95c0SMatthew Dillon #include <sys/param.h> 56096e95c0SMatthew Dillon #include <sys/systm.h> 57096e95c0SMatthew Dillon #include <sys/kernel.h> 58096e95c0SMatthew Dillon #include <sys/proc.h> 59096e95c0SMatthew Dillon #include <sys/kthread.h> 60096e95c0SMatthew Dillon #include <sys/resourcevar.h> 61096e95c0SMatthew Dillon #include <sys/signalvar.h> 62096e95c0SMatthew Dillon #include <sys/vnode.h> 63096e95c0SMatthew Dillon #include <sys/vmmeter.h> 64096e95c0SMatthew Dillon #include <sys/sysctl.h> 65497524bfSMatthew Dillon #include <sys/eventhandler.h> 66096e95c0SMatthew Dillon 67096e95c0SMatthew Dillon #include <vm/vm.h> 68096e95c0SMatthew Dillon #include <vm/vm_param.h> 69096e95c0SMatthew Dillon #include <sys/lock.h> 70096e95c0SMatthew Dillon #include <vm/vm_object.h> 71096e95c0SMatthew Dillon #include <vm/vm_page.h> 72096e95c0SMatthew Dillon #include <vm/vm_map.h> 73096e95c0SMatthew Dillon #include <vm/vm_pageout.h> 74096e95c0SMatthew Dillon #include <vm/vm_pager.h> 75096e95c0SMatthew Dillon #include <vm/swap_pager.h> 76096e95c0SMatthew Dillon #include <vm/vm_extern.h> 77096e95c0SMatthew Dillon 78096e95c0SMatthew Dillon #include <sys/thread2.h> 79b12defdcSMatthew Dillon #include <sys/spinlock2.h> 80096e95c0SMatthew Dillon #include <vm/vm_page2.h> 81096e95c0SMatthew Dillon 82096e95c0SMatthew Dillon /* the kernel process "vm_pageout"*/ 83aabd5ce8SMatthew Dillon static int vm_swapcached_flush (vm_page_t m, int isblkdev); 843ffc7051SMatthew Dillon static int vm_swapcache_test(vm_page_t m); 8564949baaSMatthew Dillon static int vm_swapcache_writing_heuristic(void); 8664949baaSMatthew Dillon static int vm_swapcache_writing(vm_page_t marker, int count, int scount); 87*7b00fbb4SMatthew Dillon static void vm_swapcache_cleaning(vm_object_t marker, int *swindexp); 88*7b00fbb4SMatthew Dillon static void vm_swapcache_movemarker(vm_object_t marker, int swindex, 89*7b00fbb4SMatthew Dillon vm_object_t object); 90096e95c0SMatthew Dillon struct thread *swapcached_thread; 91096e95c0SMatthew Dillon 92096e95c0SMatthew Dillon SYSCTL_NODE(_vm, OID_AUTO, swapcache, CTLFLAG_RW, NULL, NULL); 93096e95c0SMatthew Dillon 94c504e38eSMatthew Dillon int vm_swapcache_read_enable; 95e527fb6bSMatthew Dillon int vm_swapcache_inactive_heuristic; 96096e95c0SMatthew Dillon static int vm_swapcache_sleep; 9764949baaSMatthew Dillon static int vm_swapcache_maxscan = PQ_L2_SIZE * 8; 9864949baaSMatthew Dillon static int vm_swapcache_maxlaunder = PQ_L2_SIZE * 4; 99096e95c0SMatthew Dillon static int vm_swapcache_data_enable = 0; 100096e95c0SMatthew Dillon static int vm_swapcache_meta_enable = 0; 101e9b56058SMatthew Dillon static int vm_swapcache_maxswappct = 75; 102e527fb6bSMatthew Dillon static int vm_swapcache_hysteresis; 10364949baaSMatthew Dillon static int vm_swapcache_min_hysteresis; 104bfa86281SMatthew Dillon int vm_swapcache_use_chflags = 1; /* require chflags cache */ 1053ffc7051SMatthew Dillon static int64_t vm_swapcache_minburst = 10000000LL; /* 10MB */ 1063ffc7051SMatthew Dillon static int64_t vm_swapcache_curburst = 4000000000LL; /* 4G after boot */ 1073ffc7051SMatthew Dillon static int64_t vm_swapcache_maxburst = 2000000000LL; /* 2G nominal max */ 1083ffc7051SMatthew Dillon static int64_t vm_swapcache_accrate = 100000LL; /* 100K/s */ 109096e95c0SMatthew Dillon static int64_t vm_swapcache_write_count; 1103ffc7051SMatthew Dillon static int64_t vm_swapcache_maxfilesize; 111f5f6d247SMatthew Dillon static int64_t vm_swapcache_cleanperobj = 16*1024*1024; 112096e95c0SMatthew Dillon 113096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, maxlaunder, 114096e95c0SMatthew Dillon CTLFLAG_RW, &vm_swapcache_maxlaunder, 0, ""); 1150bf81261SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, maxscan, 1160bf81261SMatthew Dillon CTLFLAG_RW, &vm_swapcache_maxscan, 0, ""); 117c504e38eSMatthew Dillon 118096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, data_enable, 119096e95c0SMatthew Dillon CTLFLAG_RW, &vm_swapcache_data_enable, 0, ""); 120096e95c0SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, meta_enable, 121096e95c0SMatthew Dillon CTLFLAG_RW, &vm_swapcache_meta_enable, 0, ""); 122c504e38eSMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, read_enable, 123c504e38eSMatthew Dillon CTLFLAG_RW, &vm_swapcache_read_enable, 0, ""); 124e9b56058SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, maxswappct, 125e9b56058SMatthew Dillon CTLFLAG_RW, &vm_swapcache_maxswappct, 0, ""); 126e527fb6bSMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, hysteresis, 12764949baaSMatthew Dillon CTLFLAG_RD, &vm_swapcache_hysteresis, 0, ""); 12864949baaSMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, min_hysteresis, 12964949baaSMatthew Dillon CTLFLAG_RW, &vm_swapcache_min_hysteresis, 0, ""); 130e9b56058SMatthew Dillon SYSCTL_INT(_vm_swapcache, OID_AUTO, use_chflags, 131e9b56058SMatthew Dillon CTLFLAG_RW, &vm_swapcache_use_chflags, 0, ""); 132c504e38eSMatthew Dillon 1333ffc7051SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, minburst, 1343ffc7051SMatthew Dillon CTLFLAG_RW, &vm_swapcache_minburst, 0, ""); 135c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, curburst, 136c504e38eSMatthew Dillon CTLFLAG_RW, &vm_swapcache_curburst, 0, ""); 137c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxburst, 138c504e38eSMatthew Dillon CTLFLAG_RW, &vm_swapcache_maxburst, 0, ""); 1393ffc7051SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, maxfilesize, 1403ffc7051SMatthew Dillon CTLFLAG_RW, &vm_swapcache_maxfilesize, 0, ""); 141c504e38eSMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, accrate, 142c504e38eSMatthew Dillon CTLFLAG_RW, &vm_swapcache_accrate, 0, ""); 143096e95c0SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, write_count, 144096e95c0SMatthew Dillon CTLFLAG_RW, &vm_swapcache_write_count, 0, ""); 145f5f6d247SMatthew Dillon SYSCTL_QUAD(_vm_swapcache, OID_AUTO, cleanperobj, 146f5f6d247SMatthew Dillon CTLFLAG_RW, &vm_swapcache_cleanperobj, 0, ""); 147096e95c0SMatthew Dillon 148e9b56058SMatthew Dillon #define SWAPMAX(adj) \ 149e9b56058SMatthew Dillon ((int64_t)vm_swap_max * (vm_swapcache_maxswappct + (adj)) / 100) 150e9b56058SMatthew Dillon 151096e95c0SMatthew Dillon /* 152497524bfSMatthew Dillon * When shutting down the machine we want to stop swapcache operation 153497524bfSMatthew Dillon * immediately so swap is not accessed after devices have been shuttered. 154497524bfSMatthew Dillon */ 155497524bfSMatthew Dillon static void 156497524bfSMatthew Dillon shutdown_swapcache(void *arg __unused) 157497524bfSMatthew Dillon { 158497524bfSMatthew Dillon vm_swapcache_read_enable = 0; 159497524bfSMatthew Dillon vm_swapcache_data_enable = 0; 160497524bfSMatthew Dillon vm_swapcache_meta_enable = 0; 161497524bfSMatthew Dillon wakeup(&vm_swapcache_sleep); /* shortcut 5-second wait */ 162497524bfSMatthew Dillon } 163497524bfSMatthew Dillon 164497524bfSMatthew Dillon /* 165096e95c0SMatthew Dillon * vm_swapcached is the high level pageout daemon. 1668e7c4729SMatthew Dillon * 1678e7c4729SMatthew Dillon * No requirements. 168096e95c0SMatthew Dillon */ 169096e95c0SMatthew Dillon static void 170cd8ab232SMatthew Dillon vm_swapcached_thread(void) 171096e95c0SMatthew Dillon { 17200a3fdcaSMatthew Dillon enum { SWAPC_WRITING, SWAPC_CLEANING } state = SWAPC_WRITING; 1733ffc7051SMatthew Dillon enum { SWAPB_BURSTING, SWAPB_RECOVERING } burst = SWAPB_BURSTING; 17451c99c61SMatthew Dillon static struct vm_page page_marker[PQ_L2_SIZE]; 175*7b00fbb4SMatthew Dillon static struct vm_object swmarker; 176*7b00fbb4SMatthew Dillon static int swindex; 177027193ebSMatthew Dillon int q; 178096e95c0SMatthew Dillon 179096e95c0SMatthew Dillon /* 180096e95c0SMatthew Dillon * Thread setup 181096e95c0SMatthew Dillon */ 182096e95c0SMatthew Dillon curthread->td_flags |= TDF_SYSTHREAD; 183497524bfSMatthew Dillon EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, 184497524bfSMatthew Dillon swapcached_thread, SHUTDOWN_PRI_FIRST); 185497524bfSMatthew Dillon EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_swapcache, 186497524bfSMatthew Dillon NULL, SHUTDOWN_PRI_SECOND); 187096e95c0SMatthew Dillon 188096e95c0SMatthew Dillon /* 18900a3fdcaSMatthew Dillon * Initialize our marker for the inactive scan (SWAPC_WRITING) 190096e95c0SMatthew Dillon */ 19100a3fdcaSMatthew Dillon bzero(&page_marker, sizeof(page_marker)); 19251c99c61SMatthew Dillon for (q = 0; q < PQ_L2_SIZE; ++q) { 193027193ebSMatthew Dillon page_marker[q].flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 194027193ebSMatthew Dillon page_marker[q].queue = PQ_INACTIVE + q; 195027193ebSMatthew Dillon page_marker[q].pc = q; 196027193ebSMatthew Dillon page_marker[q].wire_count = 1; 197027193ebSMatthew Dillon vm_page_queues_spin_lock(PQ_INACTIVE + q); 198027193ebSMatthew Dillon TAILQ_INSERT_HEAD( 199027193ebSMatthew Dillon &vm_page_queues[PQ_INACTIVE + q].pl, 200027193ebSMatthew Dillon &page_marker[q], pageq); 201027193ebSMatthew Dillon vm_page_queues_spin_unlock(PQ_INACTIVE + q); 202027193ebSMatthew Dillon } 203b12defdcSMatthew Dillon 20464949baaSMatthew Dillon vm_swapcache_min_hysteresis = 1024; 20564949baaSMatthew Dillon vm_swapcache_hysteresis = vm_swapcache_min_hysteresis; 206e527fb6bSMatthew Dillon vm_swapcache_inactive_heuristic = -vm_swapcache_hysteresis; 207096e95c0SMatthew Dillon 20800a3fdcaSMatthew Dillon /* 20900a3fdcaSMatthew Dillon * Initialize our marker for the vm_object scan (SWAPC_CLEANING) 21000a3fdcaSMatthew Dillon */ 211*7b00fbb4SMatthew Dillon bzero(&swmarker, sizeof(swmarker)); 212*7b00fbb4SMatthew Dillon swmarker.type = OBJT_MARKER; 213*7b00fbb4SMatthew Dillon swindex = 0; 214*7b00fbb4SMatthew Dillon lwkt_gettoken(&vmobj_tokens[swindex]); 215*7b00fbb4SMatthew Dillon TAILQ_INSERT_HEAD(&vm_object_lists[swindex], 216*7b00fbb4SMatthew Dillon &swmarker, object_list); 217*7b00fbb4SMatthew Dillon lwkt_reltoken(&vmobj_tokens[swindex]); 218096e95c0SMatthew Dillon 219096e95c0SMatthew Dillon for (;;) { 22064949baaSMatthew Dillon int reached_end; 22164949baaSMatthew Dillon int scount; 22264949baaSMatthew Dillon int count; 22364949baaSMatthew Dillon 224096e95c0SMatthew Dillon /* 225497524bfSMatthew Dillon * Handle shutdown 226497524bfSMatthew Dillon */ 227497524bfSMatthew Dillon kproc_suspend_loop(); 228497524bfSMatthew Dillon 229497524bfSMatthew Dillon /* 2303da46bd7SMatthew Dillon * Check every 5 seconds when not enabled or if no swap 2313da46bd7SMatthew Dillon * is present. 232096e95c0SMatthew Dillon */ 2333da46bd7SMatthew Dillon if ((vm_swapcache_data_enable == 0 && 2343da46bd7SMatthew Dillon vm_swapcache_meta_enable == 0) || 2353da46bd7SMatthew Dillon vm_swap_max == 0) { 236096e95c0SMatthew Dillon tsleep(&vm_swapcache_sleep, 0, "csleep", hz * 5); 237096e95c0SMatthew Dillon continue; 238096e95c0SMatthew Dillon } 239c504e38eSMatthew Dillon 240c504e38eSMatthew Dillon /* 2413da46bd7SMatthew Dillon * Polling rate when enabled is approximately 10 hz. 242c504e38eSMatthew Dillon */ 243c504e38eSMatthew Dillon tsleep(&vm_swapcache_sleep, 0, "csleep", hz / 10); 24400a3fdcaSMatthew Dillon 24500a3fdcaSMatthew Dillon /* 24600a3fdcaSMatthew Dillon * State hysteresis. Generate write activity up to 75% of 24700a3fdcaSMatthew Dillon * swap, then clean out swap assignments down to 70%, then 24800a3fdcaSMatthew Dillon * repeat. 24900a3fdcaSMatthew Dillon */ 25000a3fdcaSMatthew Dillon if (state == SWAPC_WRITING) { 251e9b56058SMatthew Dillon if (vm_swap_cache_use > SWAPMAX(0)) 25200a3fdcaSMatthew Dillon state = SWAPC_CLEANING; 25300a3fdcaSMatthew Dillon } else { 25408fb7a9dSMatthew Dillon if (vm_swap_cache_use < SWAPMAX(-10)) 25500a3fdcaSMatthew Dillon state = SWAPC_WRITING; 25600a3fdcaSMatthew Dillon } 25700a3fdcaSMatthew Dillon 25800a3fdcaSMatthew Dillon /* 25900a3fdcaSMatthew Dillon * We are allowed to continue accumulating burst value 2603ffc7051SMatthew Dillon * in either state. Allow the user to set curburst > maxburst 2613ffc7051SMatthew Dillon * for the initial load-in. 26200a3fdcaSMatthew Dillon */ 2633ffc7051SMatthew Dillon if (vm_swapcache_curburst < vm_swapcache_maxburst) { 264c504e38eSMatthew Dillon vm_swapcache_curburst += vm_swapcache_accrate / 10; 265c504e38eSMatthew Dillon if (vm_swapcache_curburst > vm_swapcache_maxburst) 266c504e38eSMatthew Dillon vm_swapcache_curburst = vm_swapcache_maxburst; 2673ffc7051SMatthew Dillon } 268c504e38eSMatthew Dillon 269c504e38eSMatthew Dillon /* 27000a3fdcaSMatthew Dillon * We don't want to nickle-and-dime the scan as that will 27100a3fdcaSMatthew Dillon * create unnecessary fragmentation. The minimum burst 27200a3fdcaSMatthew Dillon * is one-seconds worth of accumulation. 273c504e38eSMatthew Dillon */ 27464949baaSMatthew Dillon if (state != SWAPC_WRITING) { 275*7b00fbb4SMatthew Dillon vm_swapcache_cleaning(&swmarker, &swindex); 27664949baaSMatthew Dillon continue; 27764949baaSMatthew Dillon } 27864949baaSMatthew Dillon if (vm_swapcache_curburst < vm_swapcache_accrate) 27964949baaSMatthew Dillon continue; 28064949baaSMatthew Dillon 28164949baaSMatthew Dillon reached_end = 0; 28264949baaSMatthew Dillon count = vm_swapcache_maxlaunder / PQ_L2_SIZE + 2; 28364949baaSMatthew Dillon scount = vm_swapcache_maxscan / PQ_L2_SIZE + 2; 28464949baaSMatthew Dillon 2853ffc7051SMatthew Dillon if (burst == SWAPB_BURSTING) { 28664949baaSMatthew Dillon if (vm_swapcache_writing_heuristic()) { 28751c99c61SMatthew Dillon for (q = 0; q < PQ_L2_SIZE; ++q) { 28864949baaSMatthew Dillon reached_end += 289027193ebSMatthew Dillon vm_swapcache_writing( 29064949baaSMatthew Dillon &page_marker[q], 29164949baaSMatthew Dillon count, 29264949baaSMatthew Dillon scount); 29364949baaSMatthew Dillon } 294027193ebSMatthew Dillon } 2953ffc7051SMatthew Dillon if (vm_swapcache_curburst <= 0) 2963ffc7051SMatthew Dillon burst = SWAPB_RECOVERING; 29764949baaSMatthew Dillon } else if (vm_swapcache_curburst > vm_swapcache_minburst) { 29864949baaSMatthew Dillon if (vm_swapcache_writing_heuristic()) { 29951c99c61SMatthew Dillon for (q = 0; q < PQ_L2_SIZE; ++q) { 30064949baaSMatthew Dillon reached_end += 301027193ebSMatthew Dillon vm_swapcache_writing( 30264949baaSMatthew Dillon &page_marker[q], 30364949baaSMatthew Dillon count, 30464949baaSMatthew Dillon scount); 30564949baaSMatthew Dillon } 306027193ebSMatthew Dillon } 3073ffc7051SMatthew Dillon burst = SWAPB_BURSTING; 3083ffc7051SMatthew Dillon } 30964949baaSMatthew Dillon if (reached_end == PQ_L2_SIZE) { 31064949baaSMatthew Dillon vm_swapcache_inactive_heuristic = 31164949baaSMatthew Dillon -vm_swapcache_hysteresis; 31200a3fdcaSMatthew Dillon } 31300a3fdcaSMatthew Dillon } 314eccc8ca1SMatthew Dillon 315eccc8ca1SMatthew Dillon /* 316eccc8ca1SMatthew Dillon * Cleanup (NOT REACHED) 317eccc8ca1SMatthew Dillon */ 31851c99c61SMatthew Dillon for (q = 0; q < PQ_L2_SIZE; ++q) { 319027193ebSMatthew Dillon vm_page_queues_spin_lock(PQ_INACTIVE + q); 320027193ebSMatthew Dillon TAILQ_REMOVE( 321027193ebSMatthew Dillon &vm_page_queues[PQ_INACTIVE + q].pl, 322027193ebSMatthew Dillon &page_marker[q], pageq); 323027193ebSMatthew Dillon vm_page_queues_spin_unlock(PQ_INACTIVE + q); 324027193ebSMatthew Dillon } 325eccc8ca1SMatthew Dillon 326*7b00fbb4SMatthew Dillon lwkt_gettoken(&vmobj_tokens[swindex]); 327*7b00fbb4SMatthew Dillon TAILQ_REMOVE(&vm_object_lists[swindex], &swmarker, object_list); 328*7b00fbb4SMatthew Dillon lwkt_reltoken(&vmobj_tokens[swindex]); 32900a3fdcaSMatthew Dillon } 330096e95c0SMatthew Dillon 331cd8ab232SMatthew Dillon static struct kproc_desc swpc_kp = { 332cd8ab232SMatthew Dillon "swapcached", 333cd8ab232SMatthew Dillon vm_swapcached_thread, 334cd8ab232SMatthew Dillon &swapcached_thread 335cd8ab232SMatthew Dillon }; 336cd8ab232SMatthew Dillon SYSINIT(swapcached, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, &swpc_kp) 337cd8ab232SMatthew Dillon 338096e95c0SMatthew Dillon /* 339fdc53cc7SMatthew Dillon * Deal with an overflow of the heuristic counter or if the user 340fdc53cc7SMatthew Dillon * manually changes the hysteresis. 341fdc53cc7SMatthew Dillon * 342e527fb6bSMatthew Dillon * Try to avoid small incremental pageouts by waiting for enough 343e527fb6bSMatthew Dillon * pages to buildup in the inactive queue to hopefully get a good 344e527fb6bSMatthew Dillon * burst in. This heuristic is bumped by the VM system and reset 345e527fb6bSMatthew Dillon * when our scan hits the end of the queue. 34664949baaSMatthew Dillon * 34764949baaSMatthew Dillon * Return TRUE if we need to take a writing pass. 348e527fb6bSMatthew Dillon */ 34964949baaSMatthew Dillon static int 35064949baaSMatthew Dillon vm_swapcache_writing_heuristic(void) 35164949baaSMatthew Dillon { 35264949baaSMatthew Dillon int hyst; 35364949baaSMatthew Dillon 35464949baaSMatthew Dillon hyst = vmstats.v_inactive_count / 4; 35564949baaSMatthew Dillon if (hyst < vm_swapcache_min_hysteresis) 35664949baaSMatthew Dillon hyst = vm_swapcache_min_hysteresis; 35764949baaSMatthew Dillon cpu_ccfence(); 35864949baaSMatthew Dillon vm_swapcache_hysteresis = hyst; 35964949baaSMatthew Dillon 36064949baaSMatthew Dillon if (vm_swapcache_inactive_heuristic < -hyst) 36164949baaSMatthew Dillon vm_swapcache_inactive_heuristic = -hyst; 36264949baaSMatthew Dillon 36364949baaSMatthew Dillon return (vm_swapcache_inactive_heuristic >= 0); 36464949baaSMatthew Dillon } 36564949baaSMatthew Dillon 36664949baaSMatthew Dillon /* 36764949baaSMatthew Dillon * Take a writing pass on one of the inactive queues, return non-zero if 36864949baaSMatthew Dillon * we hit the end of the queue. 36964949baaSMatthew Dillon */ 37064949baaSMatthew Dillon static int 37164949baaSMatthew Dillon vm_swapcache_writing(vm_page_t marker, int count, int scount) 37264949baaSMatthew Dillon { 37364949baaSMatthew Dillon vm_object_t object; 37464949baaSMatthew Dillon struct vnode *vp; 37564949baaSMatthew Dillon vm_page_t m; 37664949baaSMatthew Dillon int isblkdev; 377e527fb6bSMatthew Dillon 378e527fb6bSMatthew Dillon /* 379096e95c0SMatthew Dillon * Scan the inactive queue from our marker to locate 380096e95c0SMatthew Dillon * suitable pages to push to the swap cache. 381096e95c0SMatthew Dillon * 382096e95c0SMatthew Dillon * We are looking for clean vnode-backed pages. 383096e95c0SMatthew Dillon */ 384027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 3850bf81261SMatthew Dillon while ((m = TAILQ_NEXT(marker, pageq)) != NULL && 3860bf81261SMatthew Dillon count > 0 && scount-- > 0) { 387027193ebSMatthew Dillon KKASSERT(m->queue == marker->queue); 388b12defdcSMatthew Dillon 389b12defdcSMatthew Dillon if (vm_swapcache_curburst < 0) 390b12defdcSMatthew Dillon break; 391027193ebSMatthew Dillon TAILQ_REMOVE( 392027193ebSMatthew Dillon &vm_page_queues[marker->queue].pl, marker, pageq); 393027193ebSMatthew Dillon TAILQ_INSERT_AFTER( 394027193ebSMatthew Dillon &vm_page_queues[marker->queue].pl, m, marker, pageq); 395f5f6d247SMatthew Dillon 396f5f6d247SMatthew Dillon /* 397f5f6d247SMatthew Dillon * Ignore markers and ignore pages that already have a swap 398f5f6d247SMatthew Dillon * assignment. 399f5f6d247SMatthew Dillon */ 4000bf81261SMatthew Dillon if (m->flags & (PG_MARKER | PG_SWAPPED)) 401096e95c0SMatthew Dillon continue; 402b12defdcSMatthew Dillon if (vm_page_busy_try(m, TRUE)) 403096e95c0SMatthew Dillon continue; 404027193ebSMatthew Dillon vm_page_queues_spin_unlock(marker->queue); 405b12defdcSMatthew Dillon 406b12defdcSMatthew Dillon if ((object = m->object) == NULL) { 407b12defdcSMatthew Dillon vm_page_wakeup(m); 408027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 409b12defdcSMatthew Dillon continue; 410b12defdcSMatthew Dillon } 411b12defdcSMatthew Dillon vm_object_hold(object); 412b12defdcSMatthew Dillon if (m->object != object) { 413b12defdcSMatthew Dillon vm_object_drop(object); 414b12defdcSMatthew Dillon vm_page_wakeup(m); 415027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 416b12defdcSMatthew Dillon continue; 417b12defdcSMatthew Dillon } 418b12defdcSMatthew Dillon if (vm_swapcache_test(m)) { 419b12defdcSMatthew Dillon vm_object_drop(object); 420b12defdcSMatthew Dillon vm_page_wakeup(m); 421027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 422b12defdcSMatthew Dillon continue; 423b12defdcSMatthew Dillon } 424b12defdcSMatthew Dillon 425c504e38eSMatthew Dillon vp = object->handle; 426b12defdcSMatthew Dillon if (vp == NULL) { 427b12defdcSMatthew Dillon vm_object_drop(object); 428b12defdcSMatthew Dillon vm_page_wakeup(m); 429027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 430c504e38eSMatthew Dillon continue; 431b12defdcSMatthew Dillon } 432d3070b8dSMatthew Dillon 433c504e38eSMatthew Dillon switch(vp->v_type) { 434c504e38eSMatthew Dillon case VREG: 435e9b56058SMatthew Dillon /* 436bfa86281SMatthew Dillon * PG_NOTMETA generically means 'don't swapcache this', 437bfa86281SMatthew Dillon * and HAMMER will set this for regular data buffers 438bfa86281SMatthew Dillon * (and leave it unset for meta-data buffers) as 439bfa86281SMatthew Dillon * appropriate when double buffering is enabled. 440bfa86281SMatthew Dillon */ 441b12defdcSMatthew Dillon if (m->flags & PG_NOTMETA) { 442b12defdcSMatthew Dillon vm_object_drop(object); 443b12defdcSMatthew Dillon vm_page_wakeup(m); 444027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 445bfa86281SMatthew Dillon continue; 446b12defdcSMatthew Dillon } 447bfa86281SMatthew Dillon 448bfa86281SMatthew Dillon /* 449e9b56058SMatthew Dillon * If data_enable is 0 do not try to swapcache data. 450e9b56058SMatthew Dillon * If use_chflags is set then only swapcache data for 451e9b56058SMatthew Dillon * VSWAPCACHE marked vnodes, otherwise any vnode. 452e9b56058SMatthew Dillon */ 453e9b56058SMatthew Dillon if (vm_swapcache_data_enable == 0 || 454e9b56058SMatthew Dillon ((vp->v_flag & VSWAPCACHE) == 0 && 455e9b56058SMatthew Dillon vm_swapcache_use_chflags)) { 456b12defdcSMatthew Dillon vm_object_drop(object); 457b12defdcSMatthew Dillon vm_page_wakeup(m); 458027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 459c504e38eSMatthew Dillon continue; 460e9b56058SMatthew Dillon } 461d3070b8dSMatthew Dillon if (vm_swapcache_maxfilesize && 462d3070b8dSMatthew Dillon object->size > 463d3070b8dSMatthew Dillon (vm_swapcache_maxfilesize >> PAGE_SHIFT)) { 464b12defdcSMatthew Dillon vm_object_drop(object); 465b12defdcSMatthew Dillon vm_page_wakeup(m); 466027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 467d3070b8dSMatthew Dillon continue; 468d3070b8dSMatthew Dillon } 469aabd5ce8SMatthew Dillon isblkdev = 0; 470c504e38eSMatthew Dillon break; 471c504e38eSMatthew Dillon case VCHR: 472aabd5ce8SMatthew Dillon /* 473bfa86281SMatthew Dillon * PG_NOTMETA generically means 'don't swapcache this', 474bfa86281SMatthew Dillon * and HAMMER will set this for regular data buffers 475bfa86281SMatthew Dillon * (and leave it unset for meta-data buffers) as 476bfa86281SMatthew Dillon * appropriate when double buffering is enabled. 477aabd5ce8SMatthew Dillon */ 478b12defdcSMatthew Dillon if (m->flags & PG_NOTMETA) { 479b12defdcSMatthew Dillon vm_object_drop(object); 480b12defdcSMatthew Dillon vm_page_wakeup(m); 481027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 482aabd5ce8SMatthew Dillon continue; 483b12defdcSMatthew Dillon } 484b12defdcSMatthew Dillon if (vm_swapcache_meta_enable == 0) { 485b12defdcSMatthew Dillon vm_object_drop(object); 486b12defdcSMatthew Dillon vm_page_wakeup(m); 487027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 488c504e38eSMatthew Dillon continue; 489b12defdcSMatthew Dillon } 490aabd5ce8SMatthew Dillon isblkdev = 1; 491c504e38eSMatthew Dillon break; 492c504e38eSMatthew Dillon default: 493b12defdcSMatthew Dillon vm_object_drop(object); 494b12defdcSMatthew Dillon vm_page_wakeup(m); 495027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 496c504e38eSMatthew Dillon continue; 497c504e38eSMatthew Dillon } 498096e95c0SMatthew Dillon 499096e95c0SMatthew Dillon 500096e95c0SMatthew Dillon /* 5013ffc7051SMatthew Dillon * Assign swap and initiate I/O. 5023ffc7051SMatthew Dillon * 5033ffc7051SMatthew Dillon * (adjust for the --count which also occurs in the loop) 504096e95c0SMatthew Dillon */ 5050bf81261SMatthew Dillon count -= vm_swapcached_flush(m, isblkdev); 506096e95c0SMatthew Dillon 507096e95c0SMatthew Dillon /* 508096e95c0SMatthew Dillon * Setup for next loop using marker. 509096e95c0SMatthew Dillon */ 510b12defdcSMatthew Dillon vm_object_drop(object); 511027193ebSMatthew Dillon vm_page_queues_spin_lock(marker->queue); 512096e95c0SMatthew Dillon } 5131e5196f0SMatthew Dillon 5141e5196f0SMatthew Dillon /* 515b12defdcSMatthew Dillon * The marker could wind up at the end, which is ok. If we hit the 516b12defdcSMatthew Dillon * end of the list adjust the heuristic. 5171e5196f0SMatthew Dillon * 5181e5196f0SMatthew Dillon * Earlier inactive pages that were dirty and become clean 5191e5196f0SMatthew Dillon * are typically moved to the end of PQ_INACTIVE by virtue 5201e5196f0SMatthew Dillon * of vfs_vmio_release() when they become unwired from the 5211e5196f0SMatthew Dillon * buffer cache. 5221e5196f0SMatthew Dillon */ 523027193ebSMatthew Dillon vm_page_queues_spin_unlock(marker->queue); 52464949baaSMatthew Dillon 52564949baaSMatthew Dillon /* 52664949baaSMatthew Dillon * m invalid but can be used to test for NULL 52764949baaSMatthew Dillon */ 52864949baaSMatthew Dillon return (m == NULL); 529096e95c0SMatthew Dillon } 530096e95c0SMatthew Dillon 531096e95c0SMatthew Dillon /* 532b12defdcSMatthew Dillon * Flush the specified page using the swap_pager. The page 533b12defdcSMatthew Dillon * must be busied by the caller and its disposition will become 534b12defdcSMatthew Dillon * the responsibility of this function. 5353ffc7051SMatthew Dillon * 5363ffc7051SMatthew Dillon * Try to collect surrounding pages, including pages which may 5373ffc7051SMatthew Dillon * have already been assigned swap. Try to cluster within a 5383ffc7051SMatthew Dillon * contiguous aligned SMAP_META_PAGES (typ 16 x PAGE_SIZE) block 5393ffc7051SMatthew Dillon * to match what swap_pager_putpages() can do. 5403ffc7051SMatthew Dillon * 5413ffc7051SMatthew Dillon * We also want to try to match against the buffer cache blocksize 5423ffc7051SMatthew Dillon * but we don't really know what it is here. Since the buffer cache 5433ffc7051SMatthew Dillon * wires and unwires pages in groups the fact that we skip wired pages 5443ffc7051SMatthew Dillon * should be sufficient. 5453ffc7051SMatthew Dillon * 5463ffc7051SMatthew Dillon * Returns a count of pages we might have flushed (minimum 1) 547096e95c0SMatthew Dillon */ 548096e95c0SMatthew Dillon static 5493ffc7051SMatthew Dillon int 550aabd5ce8SMatthew Dillon vm_swapcached_flush(vm_page_t m, int isblkdev) 551096e95c0SMatthew Dillon { 552096e95c0SMatthew Dillon vm_object_t object; 5533ffc7051SMatthew Dillon vm_page_t marray[SWAP_META_PAGES]; 5543ffc7051SMatthew Dillon vm_pindex_t basei; 5553ffc7051SMatthew Dillon int rtvals[SWAP_META_PAGES]; 5563ffc7051SMatthew Dillon int x; 5573ffc7051SMatthew Dillon int i; 5583ffc7051SMatthew Dillon int j; 5593ffc7051SMatthew Dillon int count; 560b12defdcSMatthew Dillon int error; 561096e95c0SMatthew Dillon 562096e95c0SMatthew Dillon vm_page_io_start(m); 563096e95c0SMatthew Dillon vm_page_protect(m, VM_PROT_READ); 564096e95c0SMatthew Dillon object = m->object; 565b12defdcSMatthew Dillon vm_object_hold(object); 566096e95c0SMatthew Dillon 5673ffc7051SMatthew Dillon /* 5683ffc7051SMatthew Dillon * Try to cluster around (m), keeping in mind that the swap pager 5693ffc7051SMatthew Dillon * can only do SMAP_META_PAGES worth of continguous write. 5703ffc7051SMatthew Dillon */ 5713ffc7051SMatthew Dillon x = (int)m->pindex & SWAP_META_MASK; 5723ffc7051SMatthew Dillon marray[x] = m; 5733ffc7051SMatthew Dillon basei = m->pindex; 574b12defdcSMatthew Dillon vm_page_wakeup(m); 5753ffc7051SMatthew Dillon 5763ffc7051SMatthew Dillon for (i = x - 1; i >= 0; --i) { 577b12defdcSMatthew Dillon m = vm_page_lookup_busy_try(object, basei - x + i, 578b12defdcSMatthew Dillon TRUE, &error); 579b12defdcSMatthew Dillon if (error || m == NULL) 5803ffc7051SMatthew Dillon break; 581b12defdcSMatthew Dillon if (vm_swapcache_test(m)) { 582b12defdcSMatthew Dillon vm_page_wakeup(m); 5833ffc7051SMatthew Dillon break; 584b12defdcSMatthew Dillon } 585b12defdcSMatthew Dillon if (isblkdev && (m->flags & PG_NOTMETA)) { 586b12defdcSMatthew Dillon vm_page_wakeup(m); 587aabd5ce8SMatthew Dillon break; 588b12defdcSMatthew Dillon } 5893ffc7051SMatthew Dillon vm_page_io_start(m); 5903ffc7051SMatthew Dillon vm_page_protect(m, VM_PROT_READ); 5913ffc7051SMatthew Dillon if (m->queue - m->pc == PQ_CACHE) { 5923ffc7051SMatthew Dillon vm_page_unqueue_nowakeup(m); 5933ffc7051SMatthew Dillon vm_page_deactivate(m); 5943ffc7051SMatthew Dillon } 5953ffc7051SMatthew Dillon marray[i] = m; 596b12defdcSMatthew Dillon vm_page_wakeup(m); 5973ffc7051SMatthew Dillon } 5983ffc7051SMatthew Dillon ++i; 5993ffc7051SMatthew Dillon 6003ffc7051SMatthew Dillon for (j = x + 1; j < SWAP_META_PAGES; ++j) { 601b12defdcSMatthew Dillon m = vm_page_lookup_busy_try(object, basei - x + j, 602b12defdcSMatthew Dillon TRUE, &error); 603b12defdcSMatthew Dillon if (error || m == NULL) 6043ffc7051SMatthew Dillon break; 605b12defdcSMatthew Dillon if (vm_swapcache_test(m)) { 606b12defdcSMatthew Dillon vm_page_wakeup(m); 6073ffc7051SMatthew Dillon break; 608b12defdcSMatthew Dillon } 609b12defdcSMatthew Dillon if (isblkdev && (m->flags & PG_NOTMETA)) { 610b12defdcSMatthew Dillon vm_page_wakeup(m); 611aabd5ce8SMatthew Dillon break; 612b12defdcSMatthew Dillon } 6133ffc7051SMatthew Dillon vm_page_io_start(m); 6143ffc7051SMatthew Dillon vm_page_protect(m, VM_PROT_READ); 6153ffc7051SMatthew Dillon if (m->queue - m->pc == PQ_CACHE) { 6163ffc7051SMatthew Dillon vm_page_unqueue_nowakeup(m); 6173ffc7051SMatthew Dillon vm_page_deactivate(m); 6183ffc7051SMatthew Dillon } 6193ffc7051SMatthew Dillon marray[j] = m; 620b12defdcSMatthew Dillon vm_page_wakeup(m); 6213ffc7051SMatthew Dillon } 6223ffc7051SMatthew Dillon 6233ffc7051SMatthew Dillon count = j - i; 6243ffc7051SMatthew Dillon vm_object_pip_add(object, count); 6253ffc7051SMatthew Dillon swap_pager_putpages(object, marray + i, count, FALSE, rtvals + i); 6263ffc7051SMatthew Dillon vm_swapcache_write_count += count * PAGE_SIZE; 6273ffc7051SMatthew Dillon vm_swapcache_curburst -= count * PAGE_SIZE; 6283ffc7051SMatthew Dillon 6293ffc7051SMatthew Dillon while (i < j) { 6303ffc7051SMatthew Dillon if (rtvals[i] != VM_PAGER_PEND) { 631b12defdcSMatthew Dillon vm_page_busy_wait(marray[i], FALSE, "swppgfd"); 6323ffc7051SMatthew Dillon vm_page_io_finish(marray[i]); 633b12defdcSMatthew Dillon vm_page_wakeup(marray[i]); 634096e95c0SMatthew Dillon vm_object_pip_wakeup(object); 635096e95c0SMatthew Dillon } 6363ffc7051SMatthew Dillon ++i; 6373ffc7051SMatthew Dillon } 638b12defdcSMatthew Dillon vm_object_drop(object); 6393ffc7051SMatthew Dillon return(count); 640096e95c0SMatthew Dillon } 64100a3fdcaSMatthew Dillon 6423ffc7051SMatthew Dillon /* 6433ffc7051SMatthew Dillon * Test whether a VM page is suitable for writing to the swapcache. 6443ffc7051SMatthew Dillon * Does not test m->queue, PG_MARKER, or PG_SWAPPED. 6453ffc7051SMatthew Dillon * 6463ffc7051SMatthew Dillon * Returns 0 on success, 1 on failure 6473ffc7051SMatthew Dillon */ 6483ffc7051SMatthew Dillon static int 6493ffc7051SMatthew Dillon vm_swapcache_test(vm_page_t m) 6503ffc7051SMatthew Dillon { 6513ffc7051SMatthew Dillon vm_object_t object; 6523ffc7051SMatthew Dillon 653b12defdcSMatthew Dillon if (m->flags & PG_UNMANAGED) 6543ffc7051SMatthew Dillon return(1); 655b12defdcSMatthew Dillon if (m->hold_count || m->wire_count) 6563ffc7051SMatthew Dillon return(1); 6573ffc7051SMatthew Dillon if (m->valid != VM_PAGE_BITS_ALL) 6583ffc7051SMatthew Dillon return(1); 6593ffc7051SMatthew Dillon if (m->dirty & m->valid) 6603ffc7051SMatthew Dillon return(1); 6613ffc7051SMatthew Dillon if ((object = m->object) == NULL) 6623ffc7051SMatthew Dillon return(1); 6633ffc7051SMatthew Dillon if (object->type != OBJT_VNODE || 6643ffc7051SMatthew Dillon (object->flags & OBJ_DEAD)) { 6653ffc7051SMatthew Dillon return(1); 6663ffc7051SMatthew Dillon } 6673ffc7051SMatthew Dillon vm_page_test_dirty(m); 6683ffc7051SMatthew Dillon if (m->dirty & m->valid) 6693ffc7051SMatthew Dillon return(1); 6703ffc7051SMatthew Dillon return(0); 6713ffc7051SMatthew Dillon } 6723ffc7051SMatthew Dillon 6733ffc7051SMatthew Dillon /* 674f5f6d247SMatthew Dillon * Cleaning pass. 675f5f6d247SMatthew Dillon * 676f5f6d247SMatthew Dillon * We clean whole objects up to 16MB 6773ffc7051SMatthew Dillon */ 67800a3fdcaSMatthew Dillon static 67900a3fdcaSMatthew Dillon void 680*7b00fbb4SMatthew Dillon vm_swapcache_cleaning(vm_object_t marker, int *swindexp) 68100a3fdcaSMatthew Dillon { 68200a3fdcaSMatthew Dillon vm_object_t object; 68300a3fdcaSMatthew Dillon struct vnode *vp; 68400a3fdcaSMatthew Dillon int count; 6850bf81261SMatthew Dillon int scount; 68600a3fdcaSMatthew Dillon int n; 68700a3fdcaSMatthew Dillon 68800a3fdcaSMatthew Dillon count = vm_swapcache_maxlaunder; 6890bf81261SMatthew Dillon scount = vm_swapcache_maxscan; 69000a3fdcaSMatthew Dillon 691*7b00fbb4SMatthew Dillon outerloop: 69200a3fdcaSMatthew Dillon /* 69300a3fdcaSMatthew Dillon * Look for vnode objects 69400a3fdcaSMatthew Dillon */ 695*7b00fbb4SMatthew Dillon lwkt_gettoken(&vmobj_tokens[*swindexp]); 6962de4f77eSMatthew Dillon 697f5f6d247SMatthew Dillon while ((object = TAILQ_NEXT(marker, object_list)) != NULL) { 6982f2d9e58SVenkatesh Srinivas /* 699f5f6d247SMatthew Dillon * We have to skip markers. We cannot hold/drop marker 700f5f6d247SMatthew Dillon * objects! 7012f2d9e58SVenkatesh Srinivas */ 702f5f6d247SMatthew Dillon if (object->type == OBJT_MARKER) { 703*7b00fbb4SMatthew Dillon vm_swapcache_movemarker(marker, *swindexp, object); 70400a3fdcaSMatthew Dillon continue; 7052f2d9e58SVenkatesh Srinivas } 70600a3fdcaSMatthew Dillon 70700a3fdcaSMatthew Dillon /* 708f5f6d247SMatthew Dillon * Safety, or in case there are millions of VM objects 709f5f6d247SMatthew Dillon * without swapcache backing. 71000a3fdcaSMatthew Dillon */ 7110bf81261SMatthew Dillon if (--scount <= 0) 712*7b00fbb4SMatthew Dillon goto breakout; 71300a3fdcaSMatthew Dillon 71400a3fdcaSMatthew Dillon /* 715f5f6d247SMatthew Dillon * We must hold the object before potentially yielding. 71600a3fdcaSMatthew Dillon */ 717f5f6d247SMatthew Dillon vm_object_hold(object); 718f5f6d247SMatthew Dillon lwkt_yield(); 719f5f6d247SMatthew Dillon 720f5f6d247SMatthew Dillon /* 721f5f6d247SMatthew Dillon * Only operate on live VNODE objects that are either 722f5f6d247SMatthew Dillon * VREG or VCHR (VCHR for meta-data). 723f5f6d247SMatthew Dillon */ 724f5f6d247SMatthew Dillon if ((object->type != OBJT_VNODE) || 725f5f6d247SMatthew Dillon ((object->flags & OBJ_DEAD) || 726f5f6d247SMatthew Dillon object->swblock_count == 0) || 727f5f6d247SMatthew Dillon ((vp = object->handle) == NULL) || 728f5f6d247SMatthew Dillon (vp->v_type != VREG && vp->v_type != VCHR)) { 729f5f6d247SMatthew Dillon vm_object_drop(object); 730f5f6d247SMatthew Dillon /* object may be invalid now */ 731*7b00fbb4SMatthew Dillon vm_swapcache_movemarker(marker, *swindexp, object); 732f5f6d247SMatthew Dillon continue; 733f5f6d247SMatthew Dillon } 734f5f6d247SMatthew Dillon 735f5f6d247SMatthew Dillon /* 736f5f6d247SMatthew Dillon * Reset the object pindex stored in the marker if the 737f5f6d247SMatthew Dillon * working object has changed. 738f5f6d247SMatthew Dillon */ 739f5f6d247SMatthew Dillon if (marker->backing_object != object) { 740f5f6d247SMatthew Dillon marker->size = 0; 741f5f6d247SMatthew Dillon marker->backing_object_offset = 0; 742f5f6d247SMatthew Dillon marker->backing_object = object; 743f5f6d247SMatthew Dillon } 74400a3fdcaSMatthew Dillon 74500a3fdcaSMatthew Dillon /* 74600a3fdcaSMatthew Dillon * Look for swblocks starting at our iterator. 74700a3fdcaSMatthew Dillon * 74800a3fdcaSMatthew Dillon * The swap_pager_condfree() function attempts to free 74900a3fdcaSMatthew Dillon * swap space starting at the specified index. The index 75000a3fdcaSMatthew Dillon * will be updated on return. The function will return 75100a3fdcaSMatthew Dillon * a scan factor (NOT the number of blocks freed). 75200a3fdcaSMatthew Dillon * 75300a3fdcaSMatthew Dillon * If it must cut its scan of the object short due to an 75400a3fdcaSMatthew Dillon * excessive number of swblocks, or is able to free the 75500a3fdcaSMatthew Dillon * requested number of blocks, it will return n >= count 75600a3fdcaSMatthew Dillon * and we break and pick it back up on a future attempt. 757f5f6d247SMatthew Dillon * 758f5f6d247SMatthew Dillon * Scan the object linearly and try to batch large sets of 759f5f6d247SMatthew Dillon * blocks that are likely to clean out entire swap radix 760f5f6d247SMatthew Dillon * tree leafs. 76100a3fdcaSMatthew Dillon */ 762739be60bSMatthew Dillon lwkt_token_swap(); 763*7b00fbb4SMatthew Dillon lwkt_reltoken(&vmobj_tokens[*swindexp]); 76427b6ee03SMatthew Dillon 765f5f6d247SMatthew Dillon n = swap_pager_condfree(object, &marker->size, 766f5f6d247SMatthew Dillon (count + SWAP_META_MASK) & ~SWAP_META_MASK); 7672f2d9e58SVenkatesh Srinivas 768f5f6d247SMatthew Dillon vm_object_drop(object); /* object may be invalid now */ 769*7b00fbb4SMatthew Dillon lwkt_gettoken(&vmobj_tokens[*swindexp]); 7702f2d9e58SVenkatesh Srinivas 77100a3fdcaSMatthew Dillon /* 772f5f6d247SMatthew Dillon * If we have exhausted the object or deleted our per-pass 773f5f6d247SMatthew Dillon * page limit then move us to the next object. Note that 774f5f6d247SMatthew Dillon * the current object may no longer be on the vm_object_list. 77500a3fdcaSMatthew Dillon */ 776f5f6d247SMatthew Dillon if (n <= 0 || 777f5f6d247SMatthew Dillon marker->backing_object_offset > vm_swapcache_cleanperobj) { 778*7b00fbb4SMatthew Dillon vm_swapcache_movemarker(marker, *swindexp, object); 77900a3fdcaSMatthew Dillon } 78000a3fdcaSMatthew Dillon 78100a3fdcaSMatthew Dillon /* 782f5f6d247SMatthew Dillon * If we have exhausted our max-launder stop for now. 78300a3fdcaSMatthew Dillon */ 784f5f6d247SMatthew Dillon count -= n; 785f5f6d247SMatthew Dillon marker->backing_object_offset += n * PAGE_SIZE; 786f5f6d247SMatthew Dillon if (count < 0) 787*7b00fbb4SMatthew Dillon goto breakout; 788f5f6d247SMatthew Dillon } 7897a175765SMatthew Dillon 7907a175765SMatthew Dillon /* 791*7b00fbb4SMatthew Dillon * Iterate vm_object_lists[] hash table 7927a175765SMatthew Dillon */ 793*7b00fbb4SMatthew Dillon TAILQ_REMOVE(&vm_object_lists[*swindexp], marker, object_list); 794*7b00fbb4SMatthew Dillon lwkt_reltoken(&vmobj_tokens[*swindexp]); 795*7b00fbb4SMatthew Dillon if (++*swindexp >= VMOBJ_HSIZE) 796*7b00fbb4SMatthew Dillon *swindexp = 0; 797*7b00fbb4SMatthew Dillon lwkt_gettoken(&vmobj_tokens[*swindexp]); 798*7b00fbb4SMatthew Dillon TAILQ_INSERT_HEAD(&vm_object_lists[*swindexp], marker, object_list); 7997a175765SMatthew Dillon 800*7b00fbb4SMatthew Dillon if (*swindexp != 0) 801*7b00fbb4SMatthew Dillon goto outerloop; 802*7b00fbb4SMatthew Dillon 803*7b00fbb4SMatthew Dillon breakout: 804*7b00fbb4SMatthew Dillon lwkt_reltoken(&vmobj_tokens[*swindexp]); 80500a3fdcaSMatthew Dillon } 806f5f6d247SMatthew Dillon 807f5f6d247SMatthew Dillon /* 808f5f6d247SMatthew Dillon * Move the marker past the current object. Object can be stale, but we 809f5f6d247SMatthew Dillon * still need it to determine if the marker has to be moved. If the object 810f5f6d247SMatthew Dillon * is still the 'current object' (object after the marker), we hop-scotch 811f5f6d247SMatthew Dillon * the marker past it. 812f5f6d247SMatthew Dillon */ 813f5f6d247SMatthew Dillon static void 814*7b00fbb4SMatthew Dillon vm_swapcache_movemarker(vm_object_t marker, int swindex, vm_object_t object) 815f5f6d247SMatthew Dillon { 816f5f6d247SMatthew Dillon if (TAILQ_NEXT(marker, object_list) == object) { 817*7b00fbb4SMatthew Dillon TAILQ_REMOVE(&vm_object_lists[swindex], marker, object_list); 818*7b00fbb4SMatthew Dillon TAILQ_INSERT_AFTER(&vm_object_lists[swindex], object, 819f5f6d247SMatthew Dillon marker, object_list); 820f5f6d247SMatthew Dillon } 821f5f6d247SMatthew Dillon } 822