xref: /dragonfly/sys/vm/swap_pager.c (revision e98bdfd3)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1998-2010 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * Copyright (c) 1994 John S. Dyson
37  * Copyright (c) 1990 University of Utah.
38  * Copyright (c) 1991, 1993
39  *	The Regents of the University of California.  All rights reserved.
40  *
41  * This code is derived from software contributed to Berkeley by
42  * the Systems Programming Group of the University of Utah Computer
43  * Science Department.
44  *
45  * Redistribution and use in source and binary forms, with or without
46  * modification, are permitted provided that the following conditions
47  * are met:
48  * 1. Redistributions of source code must retain the above copyright
49  *    notice, this list of conditions and the following disclaimer.
50  * 2. Redistributions in binary form must reproduce the above copyright
51  *    notice, this list of conditions and the following disclaimer in the
52  *    documentation and/or other materials provided with the distribution.
53  * 3. Neither the name of the University nor the names of its contributors
54  *    may be used to endorse or promote products derived from this software
55  *    without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67  * SUCH DAMAGE.
68  *
69  *				New Swap System
70  *				Matthew Dillon
71  *
72  * Radix Bitmap 'blists'.
73  *
74  *	- The new swapper uses the new radix bitmap code.  This should scale
75  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
76  *	  arbitrary degree of fragmentation.
77  *
78  * Features:
79  *
80  *	- on the fly reallocation of swap during putpages.  The new system
81  *	  does not try to keep previously allocated swap blocks for dirty
82  *	  pages.
83  *
84  *	- on the fly deallocation of swap
85  *
86  *	- No more garbage collection required.  Unnecessarily allocated swap
87  *	  blocks only exist for dirty vm_page_t's now and these are already
88  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
89  *	  removal of invalidated swap blocks when a page is destroyed
90  *	  or renamed.
91  *
92  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
93  * @(#)swap_pager.c	8.9 (Berkeley) 3/21/94
94  * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
95  */
96 
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/proc.h>
102 #include <sys/buf.h>
103 #include <sys/vnode.h>
104 #include <sys/malloc.h>
105 #include <sys/vmmeter.h>
106 #include <sys/sysctl.h>
107 #include <sys/blist.h>
108 #include <sys/lock.h>
109 #include <sys/thread2.h>
110 
111 #include "opt_swap.h"
112 #include <vm/vm.h>
113 #include <vm/vm_object.h>
114 #include <vm/vm_page.h>
115 #include <vm/vm_pager.h>
116 #include <vm/vm_pageout.h>
117 #include <vm/swap_pager.h>
118 #include <vm/vm_extern.h>
119 #include <vm/vm_zone.h>
120 #include <vm/vnode_pager.h>
121 
122 #include <sys/buf2.h>
123 #include <vm/vm_page2.h>
124 
125 #ifndef MAX_PAGEOUT_CLUSTER
126 #define MAX_PAGEOUT_CLUSTER	SWB_NPAGES
127 #endif
128 
129 #define SWM_FREE	0x02	/* free, period			*/
130 #define SWM_POP		0x04	/* pop out			*/
131 
132 #define SWBIO_READ	0x01
133 #define SWBIO_WRITE	0x02
134 #define SWBIO_SYNC	0x04
135 
136 struct swfreeinfo {
137 	vm_object_t	object;
138 	vm_pindex_t	basei;
139 	vm_pindex_t	begi;
140 	vm_pindex_t	endi;	/* inclusive */
141 };
142 
143 struct swswapoffinfo {
144 	vm_object_t	object;
145 	int		devidx;
146 	int		shared;
147 };
148 
149 /*
150  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
151  * in the old system.
152  */
153 
154 int swap_pager_full;		/* swap space exhaustion (task killing) */
155 int swap_fail_ticks;		/* when we became exhausted */
156 int swap_pager_almost_full;	/* swap space exhaustion (w/ hysteresis)*/
157 int vm_swap_cache_use;
158 int vm_swap_anon_use;
159 static int vm_report_swap_allocs;
160 
161 static int nsw_rcount;		/* free read buffers			*/
162 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
163 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
164 static int nsw_wcount_async_max;/* assigned maximum			*/
165 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
166 
167 struct blist *swapblist;
168 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
169 static int swap_burst_read = 0;	/* allow burst reading */
170 static swblk_t swapiterator;	/* linearize allocations */
171 
172 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin");
173 
174 /* from vm_swap.c */
175 extern struct vnode *swapdev_vp;
176 extern struct swdevt *swdevt;
177 extern int nswdev;
178 
179 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
180 
181 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
182         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
183 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read,
184         CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins");
185 
186 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use,
187         CTLFLAG_RD, &vm_swap_cache_use, 0, "");
188 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use,
189         CTLFLAG_RD, &vm_swap_anon_use, 0, "");
190 SYSCTL_INT(_vm, OID_AUTO, swap_size,
191         CTLFLAG_RD, &vm_swap_size, 0, "");
192 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs,
193         CTLFLAG_RW, &vm_report_swap_allocs, 0, "");
194 
195 vm_zone_t		swap_zone;
196 
197 /*
198  * Red-Black tree for swblock entries
199  *
200  * The caller must hold vm_token
201  */
202 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
203 	     vm_pindex_t, swb_index);
204 
205 int
206 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2)
207 {
208 	if (swb1->swb_index < swb2->swb_index)
209 		return(-1);
210 	if (swb1->swb_index > swb2->swb_index)
211 		return(1);
212 	return(0);
213 }
214 
215 static
216 int
217 rb_swblock_scancmp(struct swblock *swb, void *data)
218 {
219 	struct swfreeinfo *info = data;
220 
221 	if (swb->swb_index < info->basei)
222 		return(-1);
223 	if (swb->swb_index > info->endi)
224 		return(1);
225 	return(0);
226 }
227 
228 static
229 int
230 rb_swblock_condcmp(struct swblock *swb, void *data)
231 {
232 	struct swfreeinfo *info = data;
233 
234 	if (swb->swb_index < info->basei)
235 		return(-1);
236 	return(0);
237 }
238 
239 /*
240  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
241  * calls hooked from other parts of the VM system and do not appear here.
242  * (see vm/swap_pager.h).
243  */
244 
245 static void	swap_pager_dealloc (vm_object_t object);
246 static int	swap_pager_getpage (vm_object_t, vm_page_t *, int);
247 static void	swap_chain_iodone(struct bio *biox);
248 
249 struct pagerops swappagerops = {
250 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
251 	swap_pager_getpage,	/* pagein				*/
252 	swap_pager_putpages,	/* pageout				*/
253 	swap_pager_haspage	/* get backing store status for page	*/
254 };
255 
256 /*
257  * dmmax is in page-sized chunks with the new swap system.  It was
258  * dev-bsized chunks in the old.  dmmax is always a power of 2.
259  *
260  * swap_*() routines are externally accessible.  swp_*() routines are
261  * internal.
262  */
263 
264 int dmmax;
265 static int dmmax_mask;
266 int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
267 int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
268 
269 static __inline void	swp_sizecheck (void);
270 static void	swp_pager_async_iodone (struct bio *bio);
271 
272 /*
273  * Swap bitmap functions
274  */
275 
276 static __inline void	swp_pager_freeswapspace(vm_object_t object,
277 						swblk_t blk, int npages);
278 static __inline swblk_t	swp_pager_getswapspace(vm_object_t object, int npages);
279 
280 /*
281  * Metadata functions
282  */
283 
284 static void swp_pager_meta_convert(vm_object_t);
285 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t);
286 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
287 static void swp_pager_meta_free_all(vm_object_t);
288 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
289 
290 /*
291  * SWP_SIZECHECK() -	update swap_pager_full indication
292  *
293  *	update the swap_pager_almost_full indication and warn when we are
294  *	about to run out of swap space, using lowat/hiwat hysteresis.
295  *
296  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
297  *
298  * No restrictions on call
299  * This routine may not block.
300  * SMP races are ok.
301  */
302 static __inline void
303 swp_sizecheck(void)
304 {
305 	if (vm_swap_size < nswap_lowat) {
306 		if (swap_pager_almost_full == 0) {
307 			kprintf("swap_pager: out of swap space\n");
308 			swap_pager_almost_full = 1;
309 			swap_fail_ticks = ticks;
310 		}
311 	} else {
312 		swap_pager_full = 0;
313 		if (vm_swap_size > nswap_hiwat)
314 			swap_pager_almost_full = 0;
315 	}
316 }
317 
318 /*
319  * SWAP_PAGER_INIT() -	initialize the swap pager!
320  *
321  *	Expected to be started from system init.  NOTE:  This code is run
322  *	before much else so be careful what you depend on.  Most of the VM
323  *	system has yet to be initialized at this point.
324  *
325  * Called from the low level boot code only.
326  */
327 static void
328 swap_pager_init(void *arg __unused)
329 {
330 	/*
331 	 * Device Stripe, in PAGE_SIZE'd blocks
332 	 */
333 	dmmax = SWB_NPAGES * 2;
334 	dmmax_mask = ~(dmmax - 1);
335 }
336 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL);
337 
338 /*
339  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
340  *
341  *	Expected to be started from pageout process once, prior to entering
342  *	its main loop.
343  *
344  * Called from the low level boot code only.
345  */
346 void
347 swap_pager_swap_init(void)
348 {
349 	int n, n2;
350 
351 	/*
352 	 * Number of in-transit swap bp operations.  Don't
353 	 * exhaust the pbufs completely.  Make sure we
354 	 * initialize workable values (0 will work for hysteresis
355 	 * but it isn't very efficient).
356 	 *
357 	 * The nsw_cluster_max is constrained by the number of pages an XIO
358 	 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
359 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
360 	 * constrained by the swap device interleave stripe size.
361 	 *
362 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
363 	 * designed to prevent other I/O from having high latencies due to
364 	 * our pageout I/O.  The value 4 works well for one or two active swap
365 	 * devices but is probably a little low if you have more.  Even so,
366 	 * a higher value would probably generate only a limited improvement
367 	 * with three or four active swap devices since the system does not
368 	 * typically have to pageout at extreme bandwidths.   We will want
369 	 * at least 2 per swap devices, and 4 is a pretty good value if you
370 	 * have one NFS swap device due to the command/ack latency over NFS.
371 	 * So it all works out pretty well.
372 	 */
373 
374 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
375 
376 	nsw_rcount = (nswbuf + 1) / 2;
377 	nsw_wcount_sync = (nswbuf + 3) / 4;
378 	nsw_wcount_async = 4;
379 	nsw_wcount_async_max = nsw_wcount_async;
380 
381 	/*
382 	 * The zone is dynamically allocated so generally size it to
383 	 * maxswzone (32MB to 512MB of KVM).  Set a minimum size based
384 	 * on physical memory of around 8x (each swblock can hold 16 pages).
385 	 *
386 	 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
387 	 * has increased dramatically.
388 	 */
389 	n = vmstats.v_page_count / 2;
390 	if (maxswzone && n < maxswzone / sizeof(struct swblock))
391 		n = maxswzone / sizeof(struct swblock);
392 	n2 = n;
393 
394 	do {
395 		swap_zone = zinit(
396 			"SWAPMETA",
397 			sizeof(struct swblock),
398 			n,
399 			ZONE_INTERRUPT,
400 			1);
401 		if (swap_zone != NULL)
402 			break;
403 		/*
404 		 * if the allocation failed, try a zone two thirds the
405 		 * size of the previous attempt.
406 		 */
407 		n -= ((n + 2) / 3);
408 	} while (n > 0);
409 
410 	if (swap_zone == NULL)
411 		panic("swap_pager_swap_init: swap_zone == NULL");
412 	if (n2 != n)
413 		kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
414 }
415 
416 /*
417  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
418  *			its metadata structures.
419  *
420  *	This routine is called from the mmap and fork code to create a new
421  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
422  *	and then converting it with swp_pager_meta_convert().
423  *
424  *	We only support unnamed objects.
425  *
426  * No restrictions.
427  */
428 vm_object_t
429 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
430 {
431 	vm_object_t object;
432 
433 	KKASSERT(handle == NULL);
434 	object = vm_object_allocate_hold(OBJT_DEFAULT,
435 					 OFF_TO_IDX(offset + PAGE_MASK + size));
436 	swp_pager_meta_convert(object);
437 	vm_object_drop(object);
438 
439 	return (object);
440 }
441 
442 /*
443  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
444  *
445  *	The swap backing for the object is destroyed.  The code is
446  *	designed such that we can reinstantiate it later, but this
447  *	routine is typically called only when the entire object is
448  *	about to be destroyed.
449  *
450  * The object must be locked or unreferenceable.
451  * No other requirements.
452  */
453 static void
454 swap_pager_dealloc(vm_object_t object)
455 {
456 	vm_object_hold(object);
457 	vm_object_pip_wait(object, "swpdea");
458 
459 	/*
460 	 * Free all remaining metadata.  We only bother to free it from
461 	 * the swap meta data.  We do not attempt to free swapblk's still
462 	 * associated with vm_page_t's for this object.  We do not care
463 	 * if paging is still in progress on some objects.
464 	 */
465 	swp_pager_meta_free_all(object);
466 	vm_object_drop(object);
467 }
468 
469 /************************************************************************
470  *			SWAP PAGER BITMAP ROUTINES			*
471  ************************************************************************/
472 
473 /*
474  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
475  *
476  *	Allocate swap for the requested number of pages.  The starting
477  *	swap block number (a page index) is returned or SWAPBLK_NONE
478  *	if the allocation failed.
479  *
480  *	Also has the side effect of advising that somebody made a mistake
481  *	when they configured swap and didn't configure enough.
482  *
483  * The caller must hold the object.
484  * This routine may not block.
485  */
486 static __inline swblk_t
487 swp_pager_getswapspace(vm_object_t object, int npages)
488 {
489 	swblk_t blk;
490 
491 	lwkt_gettoken(&vm_token);
492 	blk = blist_allocat(swapblist, npages, swapiterator);
493 	if (blk == SWAPBLK_NONE)
494 		blk = blist_allocat(swapblist, npages, 0);
495 	if (blk == SWAPBLK_NONE) {
496 		if (swap_pager_full != 2) {
497 			kprintf("swap_pager_getswapspace: failed alloc=%d\n",
498 				npages);
499 			swap_pager_full = 2;
500 			if (swap_pager_almost_full == 0)
501 				swap_fail_ticks = ticks;
502 			swap_pager_almost_full = 1;
503 		}
504 	} else {
505 		/* swapiterator = blk; disable for now, doesn't work well */
506 		swapacctspace(blk, -npages);
507 		if (object->type == OBJT_SWAP)
508 			vm_swap_anon_use += npages;
509 		else
510 			vm_swap_cache_use += npages;
511 		swp_sizecheck();
512 	}
513 	lwkt_reltoken(&vm_token);
514 	return(blk);
515 }
516 
517 /*
518  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
519  *
520  *	This routine returns the specified swap blocks back to the bitmap.
521  *
522  *	Note:  This routine may not block (it could in the old swap code),
523  *	and through the use of the new blist routines it does not block.
524  *
525  *	We must be called at splvm() to avoid races with bitmap frees from
526  *	vm_page_remove() aka swap_pager_page_removed().
527  *
528  * This routine may not block.
529  */
530 
531 static __inline void
532 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages)
533 {
534 	struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)];
535 
536 	lwkt_gettoken(&vm_token);
537 	sp->sw_nused -= npages;
538 	if (object->type == OBJT_SWAP)
539 		vm_swap_anon_use -= npages;
540 	else
541 		vm_swap_cache_use -= npages;
542 
543 	if (sp->sw_flags & SW_CLOSING) {
544 		lwkt_reltoken(&vm_token);
545 		return;
546 	}
547 
548 	blist_free(swapblist, blk, npages);
549 	vm_swap_size += npages;
550 	swp_sizecheck();
551 	lwkt_reltoken(&vm_token);
552 }
553 
554 /*
555  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
556  *				range within an object.
557  *
558  *	This is a globally accessible routine.
559  *
560  *	This routine removes swapblk assignments from swap metadata.
561  *
562  *	The external callers of this routine typically have already destroyed
563  *	or renamed vm_page_t's associated with this range in the object so
564  *	we should be ok.
565  *
566  * No requirements.
567  */
568 void
569 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size)
570 {
571 	vm_object_hold(object);
572 	swp_pager_meta_free(object, start, size);
573 	vm_object_drop(object);
574 }
575 
576 /*
577  * No requirements.
578  */
579 void
580 swap_pager_freespace_all(vm_object_t object)
581 {
582 	vm_object_hold(object);
583 	swp_pager_meta_free_all(object);
584 	vm_object_drop(object);
585 }
586 
587 /*
588  * This function conditionally frees swap cache swap starting at
589  * (*basei) in the object.  (count) swap blocks will be nominally freed.
590  * The actual number of blocks freed can be more or less than the
591  * requested number.
592  *
593  * This function nominally returns the number of blocks freed.  However,
594  * the actual number of blocks freed may be less then the returned value.
595  * If the function is unable to exhaust the object or if it is able to
596  * free (approximately) the requested number of blocks it returns
597  * a value n > count.
598  *
599  * If we exhaust the object we will return a value n <= count.
600  *
601  * The caller must hold the object.
602  *
603  * WARNING!  If count == 0 then -1 can be returned as a degenerate case,
604  *	     callers should always pass a count value > 0.
605  */
606 static int swap_pager_condfree_callback(struct swblock *swap, void *data);
607 
608 int
609 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count)
610 {
611 	struct swfreeinfo info;
612 	int n;
613 	int t;
614 
615 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
616 
617 	info.object = object;
618 	info.basei = *basei;	/* skip up to this page index */
619 	info.begi = count;	/* max swap pages to destroy */
620 	info.endi = count * 8;	/* max swblocks to scan */
621 
622 	swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp,
623 				swap_pager_condfree_callback, &info);
624 	*basei = info.basei;
625 
626 	/*
627 	 * Take the higher difference swblocks vs pages
628 	 */
629 	n = count - (int)info.begi;
630 	t = count * 8 - (int)info.endi;
631 	if (n < t)
632 		n = t;
633 	if (n < 1)
634 		n = 1;
635 	return(n);
636 }
637 
638 /*
639  * The idea is to free whole meta-block to avoid fragmenting
640  * the swap space or disk I/O.  We only do this if NO VM pages
641  * are present.
642  *
643  * We do not have to deal with clearing PG_SWAPPED in related VM
644  * pages because there are no related VM pages.
645  *
646  * The caller must hold the object.
647  */
648 static int
649 swap_pager_condfree_callback(struct swblock *swap, void *data)
650 {
651 	struct swfreeinfo *info = data;
652 	vm_object_t object = info->object;
653 	int i;
654 
655 	for (i = 0; i < SWAP_META_PAGES; ++i) {
656 		if (vm_page_lookup(object, swap->swb_index + i))
657 			break;
658 	}
659 	info->basei = swap->swb_index + SWAP_META_PAGES;
660 	if (i == SWAP_META_PAGES) {
661 		info->begi -= swap->swb_count;
662 		swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES);
663 	}
664 	--info->endi;
665 	if ((int)info->begi < 0 || (int)info->endi < 0)
666 		return(-1);
667 	lwkt_yield();
668 	return(0);
669 }
670 
671 /*
672  * Called by vm_page_alloc() when a new VM page is inserted
673  * into a VM object.  Checks whether swap has been assigned to
674  * the page and sets PG_SWAPPED as necessary.
675  *
676  * No requirements.
677  */
678 void
679 swap_pager_page_inserted(vm_page_t m)
680 {
681 	if (m->object->swblock_count) {
682 		vm_object_hold(m->object);
683 		if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE)
684 			vm_page_flag_set(m, PG_SWAPPED);
685 		vm_object_drop(m->object);
686 	}
687 }
688 
689 /*
690  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
691  *
692  *	Assigns swap blocks to the specified range within the object.  The
693  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
694  *
695  *	Returns 0 on success, -1 on failure.
696  *
697  * The caller is responsible for avoiding races in the specified range.
698  * No other requirements.
699  */
700 int
701 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
702 {
703 	int n = 0;
704 	swblk_t blk = SWAPBLK_NONE;
705 	vm_pindex_t beg = start;	/* save start index */
706 
707 	vm_object_hold(object);
708 
709 	while (size) {
710 		if (n == 0) {
711 			n = BLIST_MAX_ALLOC;
712 			while ((blk = swp_pager_getswapspace(object, n)) ==
713 			       SWAPBLK_NONE)
714 			{
715 				n >>= 1;
716 				if (n == 0) {
717 					swp_pager_meta_free(object, beg,
718 							    start - beg);
719 					vm_object_drop(object);
720 					return(-1);
721 				}
722 			}
723 		}
724 		swp_pager_meta_build(object, start, blk);
725 		--size;
726 		++start;
727 		++blk;
728 		--n;
729 	}
730 	swp_pager_meta_free(object, start, n);
731 	vm_object_drop(object);
732 	return(0);
733 }
734 
735 /*
736  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
737  *			and destroy the source.
738  *
739  *	Copy any valid swapblks from the source to the destination.  In
740  *	cases where both the source and destination have a valid swapblk,
741  *	we keep the destination's.
742  *
743  *	This routine is allowed to block.  It may block allocating metadata
744  *	indirectly through swp_pager_meta_build() or if paging is still in
745  *	progress on the source.
746  *
747  *	XXX vm_page_collapse() kinda expects us not to block because we
748  *	supposedly do not need to allocate memory, but for the moment we
749  *	*may* have to get a little memory from the zone allocator, but
750  *	it is taken from the interrupt memory.  We should be ok.
751  *
752  *	The source object contains no vm_page_t's (which is just as well)
753  *	The source object is of type OBJT_SWAP.
754  *
755  *	The source and destination objects must be held by the caller.
756  */
757 void
758 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
759 		vm_pindex_t base_index, int destroysource)
760 {
761 	vm_pindex_t i;
762 
763 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject));
764 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject));
765 
766 	/*
767 	 * transfer source to destination.
768 	 */
769 	for (i = 0; i < dstobject->size; ++i) {
770 		swblk_t dstaddr;
771 
772 		/*
773 		 * Locate (without changing) the swapblk on the destination,
774 		 * unless it is invalid in which case free it silently, or
775 		 * if the destination is a resident page, in which case the
776 		 * source is thrown away.
777 		 */
778 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
779 
780 		if (dstaddr == SWAPBLK_NONE) {
781 			/*
782 			 * Destination has no swapblk and is not resident,
783 			 * copy source.
784 			 */
785 			swblk_t srcaddr;
786 
787 			srcaddr = swp_pager_meta_ctl(srcobject,
788 						     base_index + i, SWM_POP);
789 
790 			if (srcaddr != SWAPBLK_NONE)
791 				swp_pager_meta_build(dstobject, i, srcaddr);
792 		} else {
793 			/*
794 			 * Destination has valid swapblk or it is represented
795 			 * by a resident page.  We destroy the sourceblock.
796 			 */
797 			swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE);
798 		}
799 	}
800 
801 	/*
802 	 * Free left over swap blocks in source.
803 	 *
804 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
805 	 * double-remove the object from the swap queues.
806 	 */
807 	if (destroysource) {
808 		/*
809 		 * Reverting the type is not necessary, the caller is going
810 		 * to destroy srcobject directly, but I'm doing it here
811 		 * for consistency since we've removed the object from its
812 		 * queues.
813 		 */
814 		swp_pager_meta_free_all(srcobject);
815 		if (srcobject->type == OBJT_SWAP)
816 			srcobject->type = OBJT_DEFAULT;
817 	}
818 }
819 
820 /*
821  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
822  *				the requested page.
823  *
824  *	We determine whether good backing store exists for the requested
825  *	page and return TRUE if it does, FALSE if it doesn't.
826  *
827  *	If TRUE, we also try to determine how much valid, contiguous backing
828  *	store exists before and after the requested page within a reasonable
829  *	distance.  We do not try to restrict it to the swap device stripe
830  *	(that is handled in getpages/putpages).  It probably isn't worth
831  *	doing here.
832  *
833  * No requirements.
834  */
835 boolean_t
836 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex)
837 {
838 	swblk_t blk0;
839 
840 	/*
841 	 * do we have good backing store at the requested index ?
842 	 */
843 	vm_object_hold(object);
844 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
845 
846 	if (blk0 == SWAPBLK_NONE) {
847 		vm_object_drop(object);
848 		return (FALSE);
849 	}
850 	vm_object_drop(object);
851 	return (TRUE);
852 }
853 
854 /*
855  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
856  *
857  * This removes any associated swap backing store, whether valid or
858  * not, from the page.  This operates on any VM object, not just OBJT_SWAP
859  * objects.
860  *
861  * This routine is typically called when a page is made dirty, at
862  * which point any associated swap can be freed.  MADV_FREE also
863  * calls us in a special-case situation
864  *
865  * NOTE!!!  If the page is clean and the swap was valid, the caller
866  * should make the page dirty before calling this routine.  This routine
867  * does NOT change the m->dirty status of the page.  Also: MADV_FREE
868  * depends on it.
869  *
870  * The page must be busied or soft-busied.
871  * The caller can hold the object to avoid blocking, else we might block.
872  * No other requirements.
873  */
874 void
875 swap_pager_unswapped(vm_page_t m)
876 {
877 	if (m->flags & PG_SWAPPED) {
878 		vm_object_hold(m->object);
879 		KKASSERT(m->flags & PG_SWAPPED);
880 		swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
881 		vm_page_flag_clear(m, PG_SWAPPED);
882 		vm_object_drop(m->object);
883 	}
884 }
885 
886 /*
887  * SWAP_PAGER_STRATEGY() - read, write, free blocks
888  *
889  * This implements a VM OBJECT strategy function using swap backing store.
890  * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP
891  * types.
892  *
893  * This is intended to be a cacheless interface (i.e. caching occurs at
894  * higher levels), and is also used as a swap-based SSD cache for vnode
895  * and device objects.
896  *
897  * All I/O goes directly to and from the swap device.
898  *
899  * We currently attempt to run I/O synchronously or asynchronously as
900  * the caller requests.  This isn't perfect because we loose error
901  * sequencing when we run multiple ops in parallel to satisfy a request.
902  * But this is swap, so we let it all hang out.
903  *
904  * No requirements.
905  */
906 void
907 swap_pager_strategy(vm_object_t object, struct bio *bio)
908 {
909 	struct buf *bp = bio->bio_buf;
910 	struct bio *nbio;
911 	vm_pindex_t start;
912 	vm_pindex_t biox_blkno = 0;
913 	int count;
914 	char *data;
915 	struct bio *biox;
916 	struct buf *bufx;
917 #if 0
918 	struct bio_track *track;
919 #endif
920 
921 #if 0
922 	/*
923 	 * tracking for swapdev vnode I/Os
924 	 */
925 	if (bp->b_cmd == BUF_CMD_READ)
926 		track = &swapdev_vp->v_track_read;
927 	else
928 		track = &swapdev_vp->v_track_write;
929 #endif
930 
931 	if (bp->b_bcount & PAGE_MASK) {
932 		bp->b_error = EINVAL;
933 		bp->b_flags |= B_ERROR | B_INVAL;
934 		biodone(bio);
935 		kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
936 			"not page bounded\n",
937 			bp, (long long)bio->bio_offset, (int)bp->b_bcount);
938 		return;
939 	}
940 
941 	/*
942 	 * Clear error indication, initialize page index, count, data pointer.
943 	 */
944 	bp->b_error = 0;
945 	bp->b_flags &= ~B_ERROR;
946 	bp->b_resid = bp->b_bcount;
947 
948 	start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
949 	count = howmany(bp->b_bcount, PAGE_SIZE);
950 	data = bp->b_data;
951 
952 	/*
953 	 * Deal with BUF_CMD_FREEBLKS
954 	 */
955 	if (bp->b_cmd == BUF_CMD_FREEBLKS) {
956 		/*
957 		 * FREE PAGE(s) - destroy underlying swap that is no longer
958 		 *		  needed.
959 		 */
960 		vm_object_hold(object);
961 		swp_pager_meta_free(object, start, count);
962 		vm_object_drop(object);
963 		bp->b_resid = 0;
964 		biodone(bio);
965 		return;
966 	}
967 
968 	/*
969 	 * We need to be able to create a new cluster of I/O's.  We cannot
970 	 * use the caller fields of the passed bio so push a new one.
971 	 *
972 	 * Because nbio is just a placeholder for the cluster links,
973 	 * we can biodone() the original bio instead of nbio to make
974 	 * things a bit more efficient.
975 	 */
976 	nbio = push_bio(bio);
977 	nbio->bio_offset = bio->bio_offset;
978 	nbio->bio_caller_info1.cluster_head = NULL;
979 	nbio->bio_caller_info2.cluster_tail = NULL;
980 
981 	biox = NULL;
982 	bufx = NULL;
983 
984 	/*
985 	 * Execute read or write
986 	 */
987 	vm_object_hold(object);
988 
989 	while (count > 0) {
990 		swblk_t blk;
991 
992 		/*
993 		 * Obtain block.  If block not found and writing, allocate a
994 		 * new block and build it into the object.
995 		 */
996 		blk = swp_pager_meta_ctl(object, start, 0);
997 		if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) {
998 			blk = swp_pager_getswapspace(object, 1);
999 			if (blk == SWAPBLK_NONE) {
1000 				bp->b_error = ENOMEM;
1001 				bp->b_flags |= B_ERROR;
1002 				break;
1003 			}
1004 			swp_pager_meta_build(object, start, blk);
1005 		}
1006 
1007 		/*
1008 		 * Do we have to flush our current collection?  Yes if:
1009 		 *
1010 		 *	- no swap block at this index
1011 		 *	- swap block is not contiguous
1012 		 *	- we cross a physical disk boundry in the
1013 		 *	  stripe.
1014 		 */
1015 		if (
1016 		    biox && (biox_blkno + btoc(bufx->b_bcount) != blk ||
1017 		     ((biox_blkno ^ blk) & dmmax_mask)
1018 		    )
1019 		) {
1020 			if (bp->b_cmd == BUF_CMD_READ) {
1021 				++mycpu->gd_cnt.v_swapin;
1022 				mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1023 			} else {
1024 				++mycpu->gd_cnt.v_swapout;
1025 				mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1026 				bufx->b_dirtyend = bufx->b_bcount;
1027 			}
1028 
1029 			/*
1030 			 * Finished with this buf.
1031 			 */
1032 			KKASSERT(bufx->b_bcount != 0);
1033 			if (bufx->b_cmd != BUF_CMD_READ)
1034 				bufx->b_dirtyend = bufx->b_bcount;
1035 			biox = NULL;
1036 			bufx = NULL;
1037 		}
1038 
1039 		/*
1040 		 * Add new swapblk to biox, instantiating biox if necessary.
1041 		 * Zero-fill reads are able to take a shortcut.
1042 		 */
1043 		if (blk == SWAPBLK_NONE) {
1044 			/*
1045 			 * We can only get here if we are reading.  Since
1046 			 * we are at splvm() we can safely modify b_resid,
1047 			 * even if chain ops are in progress.
1048 			 */
1049 			bzero(data, PAGE_SIZE);
1050 			bp->b_resid -= PAGE_SIZE;
1051 		} else {
1052 			if (biox == NULL) {
1053 				/* XXX chain count > 4, wait to <= 4 */
1054 
1055 				bufx = getpbuf(NULL);
1056 				biox = &bufx->b_bio1;
1057 				cluster_append(nbio, bufx);
1058 				bufx->b_flags |= (bp->b_flags & B_ORDERED);
1059 				bufx->b_cmd = bp->b_cmd;
1060 				biox->bio_done = swap_chain_iodone;
1061 				biox->bio_offset = (off_t)blk << PAGE_SHIFT;
1062 				biox->bio_caller_info1.cluster_parent = nbio;
1063 				biox_blkno = blk;
1064 				bufx->b_bcount = 0;
1065 				bufx->b_data = data;
1066 			}
1067 			bufx->b_bcount += PAGE_SIZE;
1068 		}
1069 		--count;
1070 		++start;
1071 		data += PAGE_SIZE;
1072 	}
1073 
1074 	vm_object_drop(object);
1075 
1076 	/*
1077 	 *  Flush out last buffer
1078 	 */
1079 	if (biox) {
1080 		if (bufx->b_cmd == BUF_CMD_READ) {
1081 			++mycpu->gd_cnt.v_swapin;
1082 			mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1083 		} else {
1084 			++mycpu->gd_cnt.v_swapout;
1085 			mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1086 			bufx->b_dirtyend = bufx->b_bcount;
1087 		}
1088 		KKASSERT(bufx->b_bcount);
1089 		if (bufx->b_cmd != BUF_CMD_READ)
1090 			bufx->b_dirtyend = bufx->b_bcount;
1091 		/* biox, bufx = NULL */
1092 	}
1093 
1094 	/*
1095 	 * Now initiate all the I/O.  Be careful looping on our chain as
1096 	 * I/O's may complete while we are still initiating them.
1097 	 *
1098 	 * If the request is a 100% sparse read no bios will be present
1099 	 * and we just biodone() the buffer.
1100 	 */
1101 	nbio->bio_caller_info2.cluster_tail = NULL;
1102 	bufx = nbio->bio_caller_info1.cluster_head;
1103 
1104 	if (bufx) {
1105 		while (bufx) {
1106 			biox = &bufx->b_bio1;
1107 			BUF_KERNPROC(bufx);
1108 			bufx = bufx->b_cluster_next;
1109 			vn_strategy(swapdev_vp, biox);
1110 		}
1111 	} else {
1112 		biodone(bio);
1113 	}
1114 
1115 	/*
1116 	 * Completion of the cluster will also call biodone_chain(nbio).
1117 	 * We never call biodone(nbio) so we don't have to worry about
1118 	 * setting up a bio_done callback.  It's handled in the sub-IO.
1119 	 */
1120 	/**/
1121 }
1122 
1123 /*
1124  * biodone callback
1125  *
1126  * No requirements.
1127  */
1128 static void
1129 swap_chain_iodone(struct bio *biox)
1130 {
1131 	struct buf **nextp;
1132 	struct buf *bufx;	/* chained sub-buffer */
1133 	struct bio *nbio;	/* parent nbio with chain glue */
1134 	struct buf *bp;		/* original bp associated with nbio */
1135 	int chain_empty;
1136 
1137 	bufx = biox->bio_buf;
1138 	nbio = biox->bio_caller_info1.cluster_parent;
1139 	bp = nbio->bio_buf;
1140 
1141 	/*
1142 	 * Update the original buffer
1143 	 */
1144         KKASSERT(bp != NULL);
1145 	if (bufx->b_flags & B_ERROR) {
1146 		atomic_set_int(&bufx->b_flags, B_ERROR);
1147 		bp->b_error = bufx->b_error;	/* race ok */
1148 	} else if (bufx->b_resid != 0) {
1149 		atomic_set_int(&bufx->b_flags, B_ERROR);
1150 		bp->b_error = EINVAL;		/* race ok */
1151 	} else {
1152 		atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1153 	}
1154 
1155 	/*
1156 	 * Remove us from the chain.
1157 	 */
1158 	spin_lock(&swapbp_spin);
1159 	nextp = &nbio->bio_caller_info1.cluster_head;
1160 	while (*nextp != bufx) {
1161 		KKASSERT(*nextp != NULL);
1162 		nextp = &(*nextp)->b_cluster_next;
1163 	}
1164 	*nextp = bufx->b_cluster_next;
1165 	chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1166 	spin_unlock(&swapbp_spin);
1167 
1168 	/*
1169 	 * Clean up bufx.  If the chain is now empty we finish out
1170 	 * the parent.  Note that we may be racing other completions
1171 	 * so we must use the chain_empty status from above.
1172 	 */
1173 	if (chain_empty) {
1174 		if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1175 			atomic_set_int(&bp->b_flags, B_ERROR);
1176 			bp->b_error = EINVAL;
1177 		}
1178 		biodone_chain(nbio);
1179         }
1180         relpbuf(bufx, NULL);
1181 }
1182 
1183 /*
1184  * SWAP_PAGER_GETPAGES() - bring page in from swap
1185  *
1186  * The requested page may have to be brought in from swap.  Calculate the
1187  * swap block and bring in additional pages if possible.  All pages must
1188  * have contiguous swap block assignments and reside in the same object.
1189  *
1190  * The caller has a single vm_object_pip_add() reference prior to
1191  * calling us and we should return with the same.
1192  *
1193  * The caller has BUSY'd the page.  We should return with (*mpp) left busy,
1194  * and any additinal pages unbusied.
1195  *
1196  * If the caller encounters a PG_RAM page it will pass it to us even though
1197  * it may be valid and dirty.  We cannot overwrite the page in this case!
1198  * The case is used to allow us to issue pure read-aheads.
1199  *
1200  * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1201  *       the PG_RAM page is validated at the same time as mreq.  What we
1202  *	 really need to do is issue a separate read-ahead pbuf.
1203  *
1204  * No requirements.
1205  */
1206 static int
1207 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
1208 {
1209 	struct buf *bp;
1210 	struct bio *bio;
1211 	vm_page_t mreq;
1212 	vm_page_t m;
1213 	vm_offset_t kva;
1214 	swblk_t blk;
1215 	int i;
1216 	int j;
1217 	int raonly;
1218 	int error;
1219 	u_int32_t flags;
1220 	vm_page_t marray[XIO_INTERNAL_PAGES];
1221 
1222 	mreq = *mpp;
1223 
1224 	vm_object_hold(object);
1225 	if (mreq->object != object) {
1226 		panic("swap_pager_getpages: object mismatch %p/%p",
1227 		    object,
1228 		    mreq->object
1229 		);
1230 	}
1231 
1232 	/*
1233 	 * We don't want to overwrite a fully valid page as it might be
1234 	 * dirty.  This case can occur when e.g. vm_fault hits a perfectly
1235 	 * valid page with PG_RAM set.
1236 	 *
1237 	 * In this case we see if the next page is a suitable page-in
1238 	 * candidate and if it is we issue read-ahead.  PG_RAM will be
1239 	 * set on the last page of the read-ahead to continue the pipeline.
1240 	 */
1241 	if (mreq->valid == VM_PAGE_BITS_ALL) {
1242 		if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) {
1243 			vm_object_drop(object);
1244 			return(VM_PAGER_OK);
1245 		}
1246 		blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0);
1247 		if (blk == SWAPBLK_NONE) {
1248 			vm_object_drop(object);
1249 			return(VM_PAGER_OK);
1250 		}
1251 		m = vm_page_lookup_busy_try(object, mreq->pindex + 1,
1252 					    TRUE, &error);
1253 		if (error) {
1254 			vm_object_drop(object);
1255 			return(VM_PAGER_OK);
1256 		} else if (m == NULL) {
1257 			/*
1258 			 * Use VM_ALLOC_QUICK to avoid blocking on cache
1259 			 * page reuse.
1260 			 */
1261 			m = vm_page_alloc(object, mreq->pindex + 1,
1262 					  VM_ALLOC_QUICK);
1263 			if (m == NULL) {
1264 				vm_object_drop(object);
1265 				return(VM_PAGER_OK);
1266 			}
1267 		} else {
1268 			if (m->valid) {
1269 				vm_page_wakeup(m);
1270 				vm_object_drop(object);
1271 				return(VM_PAGER_OK);
1272 			}
1273 			vm_page_unqueue_nowakeup(m);
1274 		}
1275 		/* page is busy */
1276 		mreq = m;
1277 		raonly = 1;
1278 	} else {
1279 		raonly = 0;
1280 	}
1281 
1282 	/*
1283 	 * Try to block-read contiguous pages from swap if sequential,
1284 	 * otherwise just read one page.  Contiguous pages from swap must
1285 	 * reside within a single device stripe because the I/O cannot be
1286 	 * broken up across multiple stripes.
1287 	 *
1288 	 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1289 	 * set up such that the case(s) are handled implicitly.
1290 	 */
1291 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1292 	marray[0] = mreq;
1293 
1294 	for (i = 1; swap_burst_read &&
1295 		    i < XIO_INTERNAL_PAGES &&
1296 		    mreq->pindex + i < object->size; ++i) {
1297 		swblk_t iblk;
1298 
1299 		iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0);
1300 		if (iblk != blk + i)
1301 			break;
1302 		if ((blk ^ iblk) & dmmax_mask)
1303 			break;
1304 		m = vm_page_lookup_busy_try(object, mreq->pindex + i,
1305 					    TRUE, &error);
1306 		if (error) {
1307 			break;
1308 		} else if (m == NULL) {
1309 			/*
1310 			 * Use VM_ALLOC_QUICK to avoid blocking on cache
1311 			 * page reuse.
1312 			 */
1313 			m = vm_page_alloc(object, mreq->pindex + i,
1314 					  VM_ALLOC_QUICK);
1315 			if (m == NULL)
1316 				break;
1317 		} else {
1318 			if (m->valid) {
1319 				vm_page_wakeup(m);
1320 				break;
1321 			}
1322 			vm_page_unqueue_nowakeup(m);
1323 		}
1324 		/* page is busy */
1325 		marray[i] = m;
1326 	}
1327 	if (i > 1)
1328 		vm_page_flag_set(marray[i - 1], PG_RAM);
1329 
1330 	/*
1331 	 * If mreq is the requested page and we have nothing to do return
1332 	 * VM_PAGER_FAIL.  If raonly is set mreq is just another read-ahead
1333 	 * page and must be cleaned up.
1334 	 */
1335 	if (blk == SWAPBLK_NONE) {
1336 		KKASSERT(i == 1);
1337 		if (raonly) {
1338 			vnode_pager_freepage(mreq);
1339 			vm_object_drop(object);
1340 			return(VM_PAGER_OK);
1341 		} else {
1342 			vm_object_drop(object);
1343 			return(VM_PAGER_FAIL);
1344 		}
1345 	}
1346 
1347 	/*
1348 	 * map our page(s) into kva for input
1349 	 */
1350 	bp = getpbuf_kva(&nsw_rcount);
1351 	bio = &bp->b_bio1;
1352 	kva = (vm_offset_t) bp->b_kvabase;
1353 	bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t));
1354 	pmap_qenter(kva, bp->b_xio.xio_pages, i);
1355 
1356 	bp->b_data = (caddr_t)kva;
1357 	bp->b_bcount = PAGE_SIZE * i;
1358 	bp->b_xio.xio_npages = i;
1359 	bio->bio_done = swp_pager_async_iodone;
1360 	bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1361 	bio->bio_caller_info1.index = SWBIO_READ;
1362 
1363 	/*
1364 	 * Set index.  If raonly set the index beyond the array so all
1365 	 * the pages are treated the same, otherwise the original mreq is
1366 	 * at index 0.
1367 	 */
1368 	if (raonly)
1369 		bio->bio_driver_info = (void *)(intptr_t)i;
1370 	else
1371 		bio->bio_driver_info = (void *)(intptr_t)0;
1372 
1373 	for (j = 0; j < i; ++j)
1374 		vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG);
1375 
1376 	mycpu->gd_cnt.v_swapin++;
1377 	mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1378 
1379 	/*
1380 	 * We still hold the lock on mreq, and our automatic completion routine
1381 	 * does not remove it.
1382 	 */
1383 	vm_object_pip_add(object, bp->b_xio.xio_npages);
1384 
1385 	/*
1386 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1387 	 * this point because we automatically release it on completion.
1388 	 * Instead, we look at the one page we are interested in which we
1389 	 * still hold a lock on even through the I/O completion.
1390 	 *
1391 	 * The other pages in our m[] array are also released on completion,
1392 	 * so we cannot assume they are valid anymore either.
1393 	 */
1394 	bp->b_cmd = BUF_CMD_READ;
1395 	BUF_KERNPROC(bp);
1396 	vn_strategy(swapdev_vp, bio);
1397 
1398 	/*
1399 	 * Wait for the page we want to complete.  PG_SWAPINPROG is always
1400 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1401 	 * is set in the meta-data.
1402 	 *
1403 	 * If this is a read-ahead only we return immediately without
1404 	 * waiting for I/O.
1405 	 */
1406 	if (raonly) {
1407 		vm_object_drop(object);
1408 		return(VM_PAGER_OK);
1409 	}
1410 
1411 	/*
1412 	 * Read-ahead includes originally requested page case.
1413 	 */
1414 	for (;;) {
1415 		flags = mreq->flags;
1416 		cpu_ccfence();
1417 		if ((flags & PG_SWAPINPROG) == 0)
1418 			break;
1419 		tsleep_interlock(mreq, 0);
1420 		if (!atomic_cmpset_int(&mreq->flags, flags,
1421 				       flags | PG_WANTED | PG_REFERENCED)) {
1422 			continue;
1423 		}
1424 		mycpu->gd_cnt.v_intrans++;
1425 		if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) {
1426 			kprintf(
1427 			    "swap_pager: indefinite wait buffer: "
1428 				" offset: %lld, size: %ld\n",
1429 			    (long long)bio->bio_offset,
1430 			    (long)bp->b_bcount
1431 			);
1432 		}
1433 	}
1434 
1435 	/*
1436 	 * mreq is left bussied after completion, but all the other pages
1437 	 * are freed.  If we had an unrecoverable read error the page will
1438 	 * not be valid.
1439 	 */
1440 	vm_object_drop(object);
1441 	if (mreq->valid != VM_PAGE_BITS_ALL)
1442 		return(VM_PAGER_ERROR);
1443 	else
1444 		return(VM_PAGER_OK);
1445 
1446 	/*
1447 	 * A final note: in a low swap situation, we cannot deallocate swap
1448 	 * and mark a page dirty here because the caller is likely to mark
1449 	 * the page clean when we return, causing the page to possibly revert
1450 	 * to all-zero's later.
1451 	 */
1452 }
1453 
1454 /*
1455  *	swap_pager_putpages:
1456  *
1457  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1458  *
1459  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1460  *	are automatically converted to SWAP objects.
1461  *
1462  *	In a low memory situation we may block in vn_strategy(), but the new
1463  *	vm_page reservation system coupled with properly written VFS devices
1464  *	should ensure that no low-memory deadlock occurs.  This is an area
1465  *	which needs work.
1466  *
1467  *	The parent has N vm_object_pip_add() references prior to
1468  *	calling us and will remove references for rtvals[] that are
1469  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1470  *	completion.
1471  *
1472  *	The parent has soft-busy'd the pages it passes us and will unbusy
1473  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1474  *	We need to unbusy the rest on I/O completion.
1475  *
1476  * No requirements.
1477  */
1478 void
1479 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1480 		    int sync, int *rtvals)
1481 {
1482 	int i;
1483 	int n = 0;
1484 
1485 	vm_object_hold(object);
1486 
1487 	if (count && m[0]->object != object) {
1488 		panic("swap_pager_getpages: object mismatch %p/%p",
1489 		    object,
1490 		    m[0]->object
1491 		);
1492 	}
1493 
1494 	/*
1495 	 * Step 1
1496 	 *
1497 	 * Turn object into OBJT_SWAP
1498 	 * check for bogus sysops
1499 	 * force sync if not pageout process
1500 	 */
1501 	if (object->type == OBJT_DEFAULT) {
1502 		if (object->type == OBJT_DEFAULT)
1503 			swp_pager_meta_convert(object);
1504 	}
1505 
1506 	if (curthread != pagethread)
1507 		sync = TRUE;
1508 
1509 	/*
1510 	 * Step 2
1511 	 *
1512 	 * Update nsw parameters from swap_async_max sysctl values.
1513 	 * Do not let the sysop crash the machine with bogus numbers.
1514 	 */
1515 	if (swap_async_max != nsw_wcount_async_max) {
1516 		int n;
1517 
1518 		/*
1519 		 * limit range
1520 		 */
1521 		if ((n = swap_async_max) > nswbuf / 2)
1522 			n = nswbuf / 2;
1523 		if (n < 1)
1524 			n = 1;
1525 		swap_async_max = n;
1526 
1527 		/*
1528 		 * Adjust difference ( if possible ).  If the current async
1529 		 * count is too low, we may not be able to make the adjustment
1530 		 * at this time.
1531 		 *
1532 		 * vm_token needed for nsw_wcount sleep interlock
1533 		 */
1534 		lwkt_gettoken(&vm_token);
1535 		n -= nsw_wcount_async_max;
1536 		if (nsw_wcount_async + n >= 0) {
1537 			nsw_wcount_async_max += n;
1538 			pbuf_adjcount(&nsw_wcount_async, n);
1539 		}
1540 		lwkt_reltoken(&vm_token);
1541 	}
1542 
1543 	/*
1544 	 * Step 3
1545 	 *
1546 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1547 	 * The page is left dirty until the pageout operation completes
1548 	 * successfully.
1549 	 */
1550 
1551 	for (i = 0; i < count; i += n) {
1552 		struct buf *bp;
1553 		struct bio *bio;
1554 		swblk_t blk;
1555 		int j;
1556 
1557 		/*
1558 		 * Maximum I/O size is limited by a number of factors.
1559 		 */
1560 
1561 		n = min(BLIST_MAX_ALLOC, count - i);
1562 		n = min(n, nsw_cluster_max);
1563 
1564 		lwkt_gettoken(&vm_token);
1565 
1566 		/*
1567 		 * Get biggest block of swap we can.  If we fail, fall
1568 		 * back and try to allocate a smaller block.  Don't go
1569 		 * overboard trying to allocate space if it would overly
1570 		 * fragment swap.
1571 		 */
1572 		while (
1573 		    (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE &&
1574 		    n > 4
1575 		) {
1576 			n >>= 1;
1577 		}
1578 		if (blk == SWAPBLK_NONE) {
1579 			for (j = 0; j < n; ++j)
1580 				rtvals[i+j] = VM_PAGER_FAIL;
1581 			lwkt_reltoken(&vm_token);
1582 			continue;
1583 		}
1584 		if (vm_report_swap_allocs > 0) {
1585 			kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n);
1586 			--vm_report_swap_allocs;
1587 		}
1588 
1589 		/*
1590 		 * The I/O we are constructing cannot cross a physical
1591 		 * disk boundry in the swap stripe.  Note: we are still
1592 		 * at splvm().
1593 		 */
1594 		if ((blk ^ (blk + n)) & dmmax_mask) {
1595 			j = ((blk + dmmax) & dmmax_mask) - blk;
1596 			swp_pager_freeswapspace(object, blk + j, n - j);
1597 			n = j;
1598 		}
1599 
1600 		/*
1601 		 * All I/O parameters have been satisfied, build the I/O
1602 		 * request and assign the swap space.
1603 		 */
1604 		if (sync == TRUE)
1605 			bp = getpbuf_kva(&nsw_wcount_sync);
1606 		else
1607 			bp = getpbuf_kva(&nsw_wcount_async);
1608 		bio = &bp->b_bio1;
1609 
1610 		lwkt_reltoken(&vm_token);
1611 
1612 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1613 
1614 		bp->b_bcount = PAGE_SIZE * n;
1615 		bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1616 
1617 		for (j = 0; j < n; ++j) {
1618 			vm_page_t mreq = m[i+j];
1619 
1620 			swp_pager_meta_build(mreq->object, mreq->pindex,
1621 					     blk + j);
1622 			if (object->type == OBJT_SWAP)
1623 				vm_page_dirty(mreq);
1624 			rtvals[i+j] = VM_PAGER_OK;
1625 
1626 			vm_page_flag_set(mreq, PG_SWAPINPROG);
1627 			bp->b_xio.xio_pages[j] = mreq;
1628 		}
1629 		bp->b_xio.xio_npages = n;
1630 
1631 		mycpu->gd_cnt.v_swapout++;
1632 		mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1633 
1634 		bp->b_dirtyoff = 0;		/* req'd for NFS */
1635 		bp->b_dirtyend = bp->b_bcount;	/* req'd for NFS */
1636 		bp->b_cmd = BUF_CMD_WRITE;
1637 		bio->bio_caller_info1.index = SWBIO_WRITE;
1638 
1639 		/*
1640 		 * asynchronous
1641 		 */
1642 		if (sync == FALSE) {
1643 			bio->bio_done = swp_pager_async_iodone;
1644 			BUF_KERNPROC(bp);
1645 			vn_strategy(swapdev_vp, bio);
1646 
1647 			for (j = 0; j < n; ++j)
1648 				rtvals[i+j] = VM_PAGER_PEND;
1649 			continue;
1650 		}
1651 
1652 		/*
1653 		 * Issue synchrnously.
1654 		 *
1655 		 * Wait for the sync I/O to complete, then update rtvals.
1656 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1657 		 * our async completion routine at the end, thus avoiding a
1658 		 * double-free.
1659 		 */
1660 		bio->bio_caller_info1.index |= SWBIO_SYNC;
1661 		bio->bio_done = biodone_sync;
1662 		bio->bio_flags |= BIO_SYNC;
1663 		vn_strategy(swapdev_vp, bio);
1664 		biowait(bio, "swwrt");
1665 
1666 		for (j = 0; j < n; ++j)
1667 			rtvals[i+j] = VM_PAGER_PEND;
1668 
1669 		/*
1670 		 * Now that we are through with the bp, we can call the
1671 		 * normal async completion, which frees everything up.
1672 		 */
1673 		swp_pager_async_iodone(bio);
1674 	}
1675 	vm_object_drop(object);
1676 }
1677 
1678 /*
1679  * No requirements.
1680  *
1681  * Recalculate the low and high-water marks.
1682  */
1683 void
1684 swap_pager_newswap(void)
1685 {
1686 	if (vm_swap_max) {
1687 		nswap_lowat = vm_swap_max * 4 / 100;	/* 4% left */
1688 		nswap_hiwat = vm_swap_max * 6 / 100;	/* 6% left */
1689 		kprintf("swap low/high-water marks set to %d/%d\n",
1690 			nswap_lowat, nswap_hiwat);
1691 	} else {
1692 		nswap_lowat = 128;
1693 		nswap_hiwat = 512;
1694 	}
1695 	swp_sizecheck();
1696 }
1697 
1698 /*
1699  *	swp_pager_async_iodone:
1700  *
1701  *	Completion routine for asynchronous reads and writes from/to swap.
1702  *	Also called manually by synchronous code to finish up a bp.
1703  *
1704  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
1705  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
1706  *	unbusy all pages except the 'main' request page.  For WRITE
1707  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1708  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1709  *
1710  *	This routine may not block.
1711  *
1712  * No requirements.
1713  */
1714 static void
1715 swp_pager_async_iodone(struct bio *bio)
1716 {
1717 	struct buf *bp = bio->bio_buf;
1718 	vm_object_t object = NULL;
1719 	int i;
1720 	int *nswptr;
1721 
1722 	/*
1723 	 * report error
1724 	 */
1725 	if (bp->b_flags & B_ERROR) {
1726 		kprintf(
1727 		    "swap_pager: I/O error - %s failed; offset %lld,"
1728 			"size %ld, error %d\n",
1729 		    ((bio->bio_caller_info1.index & SWBIO_READ) ?
1730 			"pagein" : "pageout"),
1731 		    (long long)bio->bio_offset,
1732 		    (long)bp->b_bcount,
1733 		    bp->b_error
1734 		);
1735 	}
1736 
1737 	/*
1738 	 * set object, raise to splvm().
1739 	 */
1740 	if (bp->b_xio.xio_npages)
1741 		object = bp->b_xio.xio_pages[0]->object;
1742 
1743 	/*
1744 	 * remove the mapping for kernel virtual
1745 	 */
1746 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1747 
1748 	/*
1749 	 * cleanup pages.  If an error occurs writing to swap, we are in
1750 	 * very serious trouble.  If it happens to be a disk error, though,
1751 	 * we may be able to recover by reassigning the swap later on.  So
1752 	 * in this case we remove the m->swapblk assignment for the page
1753 	 * but do not free it in the rlist.  The errornous block(s) are thus
1754 	 * never reallocated as swap.  Redirty the page and continue.
1755 	 */
1756 	for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1757 		vm_page_t m = bp->b_xio.xio_pages[i];
1758 
1759 		if (bp->b_flags & B_ERROR) {
1760 			/*
1761 			 * If an error occurs I'd love to throw the swapblk
1762 			 * away without freeing it back to swapspace, so it
1763 			 * can never be used again.  But I can't from an
1764 			 * interrupt.
1765 			 */
1766 
1767 			if (bio->bio_caller_info1.index & SWBIO_READ) {
1768 				/*
1769 				 * When reading, reqpage needs to stay
1770 				 * locked for the parent, but all other
1771 				 * pages can be freed.  We still want to
1772 				 * wakeup the parent waiting on the page,
1773 				 * though.  ( also: pg_reqpage can be -1 and
1774 				 * not match anything ).
1775 				 *
1776 				 * We have to wake specifically requested pages
1777 				 * up too because we cleared PG_SWAPINPROG and
1778 				 * someone may be waiting for that.
1779 				 *
1780 				 * NOTE: for reads, m->dirty will probably
1781 				 * be overridden by the original caller of
1782 				 * getpages so don't play cute tricks here.
1783 				 *
1784 				 * NOTE: We can't actually free the page from
1785 				 * here, because this is an interrupt.  It
1786 				 * is not legal to mess with object->memq
1787 				 * from an interrupt.  Deactivate the page
1788 				 * instead.
1789 				 */
1790 
1791 				m->valid = 0;
1792 				vm_page_flag_clear(m, PG_ZERO);
1793 				vm_page_flag_clear(m, PG_SWAPINPROG);
1794 
1795 				/*
1796 				 * bio_driver_info holds the requested page
1797 				 * index.
1798 				 */
1799 				if (i != (int)(intptr_t)bio->bio_driver_info) {
1800 					vm_page_deactivate(m);
1801 					vm_page_wakeup(m);
1802 				} else {
1803 					vm_page_flash(m);
1804 				}
1805 				/*
1806 				 * If i == bp->b_pager.pg_reqpage, do not wake
1807 				 * the page up.  The caller needs to.
1808 				 */
1809 			} else {
1810 				/*
1811 				 * If a write error occurs remove the swap
1812 				 * assignment (note that PG_SWAPPED may or
1813 				 * may not be set depending on prior activity).
1814 				 *
1815 				 * Re-dirty OBJT_SWAP pages as there is no
1816 				 * other backing store, we can't throw the
1817 				 * page away.
1818 				 *
1819 				 * Non-OBJT_SWAP pages (aka swapcache) must
1820 				 * not be dirtied since they may not have
1821 				 * been dirty in the first place, and they
1822 				 * do have backing store (the vnode).
1823 				 */
1824 				vm_page_busy_wait(m, FALSE, "swadpg");
1825 				swp_pager_meta_ctl(m->object, m->pindex,
1826 						   SWM_FREE);
1827 				vm_page_flag_clear(m, PG_SWAPPED);
1828 				if (m->object->type == OBJT_SWAP) {
1829 					vm_page_dirty(m);
1830 					vm_page_activate(m);
1831 				}
1832 				vm_page_flag_clear(m, PG_SWAPINPROG);
1833 				vm_page_io_finish(m);
1834 				vm_page_wakeup(m);
1835 			}
1836 		} else if (bio->bio_caller_info1.index & SWBIO_READ) {
1837 			/*
1838 			 * NOTE: for reads, m->dirty will probably be
1839 			 * overridden by the original caller of getpages so
1840 			 * we cannot set them in order to free the underlying
1841 			 * swap in a low-swap situation.  I don't think we'd
1842 			 * want to do that anyway, but it was an optimization
1843 			 * that existed in the old swapper for a time before
1844 			 * it got ripped out due to precisely this problem.
1845 			 *
1846 			 * clear PG_ZERO in page.
1847 			 *
1848 			 * If not the requested page then deactivate it.
1849 			 *
1850 			 * Note that the requested page, reqpage, is left
1851 			 * busied, but we still have to wake it up.  The
1852 			 * other pages are released (unbusied) by
1853 			 * vm_page_wakeup().  We do not set reqpage's
1854 			 * valid bits here, it is up to the caller.
1855 			 */
1856 
1857 			/*
1858 			 * NOTE: can't call pmap_clear_modify(m) from an
1859 			 * interrupt thread, the pmap code may have to map
1860 			 * non-kernel pmaps and currently asserts the case.
1861 			 */
1862 			/*pmap_clear_modify(m);*/
1863 			m->valid = VM_PAGE_BITS_ALL;
1864 			vm_page_undirty(m);
1865 			vm_page_flag_clear(m, PG_ZERO | PG_SWAPINPROG);
1866 			vm_page_flag_set(m, PG_SWAPPED);
1867 
1868 			/*
1869 			 * We have to wake specifically requested pages
1870 			 * up too because we cleared PG_SWAPINPROG and
1871 			 * could be waiting for it in getpages.  However,
1872 			 * be sure to not unbusy getpages specifically
1873 			 * requested page - getpages expects it to be
1874 			 * left busy.
1875 			 *
1876 			 * bio_driver_info holds the requested page
1877 			 */
1878 			if (i != (int)(intptr_t)bio->bio_driver_info) {
1879 				vm_page_deactivate(m);
1880 				vm_page_wakeup(m);
1881 			} else {
1882 				vm_page_flash(m);
1883 			}
1884 		} else {
1885 			/*
1886 			 * Mark the page clean but do not mess with the
1887 			 * pmap-layer's modified state.  That state should
1888 			 * also be clear since the caller protected the
1889 			 * page VM_PROT_READ, but allow the case.
1890 			 *
1891 			 * We are in an interrupt, avoid pmap operations.
1892 			 *
1893 			 * If we have a severe page deficit, deactivate the
1894 			 * page.  Do not try to cache it (which would also
1895 			 * involve a pmap op), because the page might still
1896 			 * be read-heavy.
1897 			 *
1898 			 * When using the swap to cache clean vnode pages
1899 			 * we do not mess with the page dirty bits.
1900 			 */
1901 			vm_page_busy_wait(m, FALSE, "swadpg");
1902 			if (m->object->type == OBJT_SWAP)
1903 				vm_page_undirty(m);
1904 			vm_page_flag_clear(m, PG_SWAPINPROG);
1905 			vm_page_flag_set(m, PG_SWAPPED);
1906 			if (vm_page_count_severe())
1907 				vm_page_deactivate(m);
1908 #if 0
1909 			if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1910 				vm_page_protect(m, VM_PROT_READ);
1911 #endif
1912 			vm_page_io_finish(m);
1913 			vm_page_wakeup(m);
1914 		}
1915 	}
1916 
1917 	/*
1918 	 * adjust pip.  NOTE: the original parent may still have its own
1919 	 * pip refs on the object.
1920 	 */
1921 
1922 	if (object)
1923 		vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages);
1924 
1925 	/*
1926 	 * Release the physical I/O buffer.
1927 	 *
1928 	 * NOTE: Due to synchronous operations in the write case b_cmd may
1929 	 *	 already be set to BUF_CMD_DONE and BIO_SYNC may have already
1930 	 *	 been cleared.
1931 	 *
1932 	 * Use vm_token to interlock nsw_rcount/wcount wakeup?
1933 	 */
1934 	lwkt_gettoken(&vm_token);
1935 	if (bio->bio_caller_info1.index & SWBIO_READ)
1936 		nswptr = &nsw_rcount;
1937 	else if (bio->bio_caller_info1.index & SWBIO_SYNC)
1938 		nswptr = &nsw_wcount_sync;
1939 	else
1940 		nswptr = &nsw_wcount_async;
1941 	bp->b_cmd = BUF_CMD_DONE;
1942 	relpbuf(bp, nswptr);
1943 	lwkt_reltoken(&vm_token);
1944 }
1945 
1946 /*
1947  * Fault-in a potentially swapped page and remove the swap reference.
1948  * (used by swapoff code)
1949  *
1950  * object must be held.
1951  */
1952 static __inline void
1953 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex)
1954 {
1955 	struct vnode *vp;
1956 	vm_page_t m;
1957 	int error;
1958 
1959 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1960 
1961 	if (object->type == OBJT_VNODE) {
1962 		/*
1963 		 * Any swap related to a vnode is due to swapcache.  We must
1964 		 * vget() the vnode in case it is not active (otherwise
1965 		 * vref() will panic).  Calling vm_object_page_remove() will
1966 		 * ensure that any swap ref is removed interlocked with the
1967 		 * page.  clean_only is set to TRUE so we don't throw away
1968 		 * dirty pages.
1969 		 */
1970 		vp = object->handle;
1971 		error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
1972 		if (error == 0) {
1973 			vm_object_page_remove(object, pindex, pindex + 1, TRUE);
1974 			vput(vp);
1975 		}
1976 	} else {
1977 		/*
1978 		 * Otherwise it is a normal OBJT_SWAP object and we can
1979 		 * fault the page in and remove the swap.
1980 		 */
1981 		m = vm_fault_object_page(object, IDX_TO_OFF(pindex),
1982 					 VM_PROT_NONE,
1983 					 VM_FAULT_DIRTY | VM_FAULT_UNSWAP,
1984 					 sharedp, &error);
1985 		if (m)
1986 			vm_page_unhold(m);
1987 	}
1988 }
1989 
1990 /*
1991  * This removes all swap blocks related to a particular device.  We have
1992  * to be careful of ripups during the scan.
1993  */
1994 static int swp_pager_swapoff_callback(struct swblock *swap, void *data);
1995 
1996 int
1997 swap_pager_swapoff(int devidx)
1998 {
1999 	struct swswapoffinfo info;
2000 	struct vm_object marker;
2001 	vm_object_t object;
2002 	int n;
2003 
2004 	bzero(&marker, sizeof(marker));
2005 	marker.type = OBJT_MARKER;
2006 
2007 	for (n = 0; n < VMOBJ_HSIZE; ++n) {
2008 		lwkt_gettoken(&vmobj_tokens[n]);
2009 		TAILQ_INSERT_HEAD(&vm_object_lists[n], &marker, object_list);
2010 
2011 		while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) {
2012 			if (object->type == OBJT_MARKER)
2013 				goto skip;
2014 			if (object->type != OBJT_SWAP &&
2015 			    object->type != OBJT_VNODE)
2016 				goto skip;
2017 			vm_object_hold(object);
2018 			if (object->type != OBJT_SWAP &&
2019 			    object->type != OBJT_VNODE) {
2020 				vm_object_drop(object);
2021 				goto skip;
2022 			}
2023 			info.object = object;
2024 			info.shared = 0;
2025 			info.devidx = devidx;
2026 			swblock_rb_tree_RB_SCAN(&object->swblock_root,
2027 					    NULL, swp_pager_swapoff_callback,
2028 					    &info);
2029 			vm_object_drop(object);
2030 skip:
2031 			if (object == TAILQ_NEXT(&marker, object_list)) {
2032 				TAILQ_REMOVE(&vm_object_lists[n],
2033 					     &marker, object_list);
2034 				TAILQ_INSERT_AFTER(&vm_object_lists[n], object,
2035 						   &marker, object_list);
2036 			}
2037 		}
2038 		TAILQ_REMOVE(&vm_object_lists[n], &marker, object_list);
2039 		lwkt_reltoken(&vmobj_tokens[n]);
2040 	}
2041 
2042 	/*
2043 	 * If we fail to locate all swblocks we just fail gracefully and
2044 	 * do not bother to restore paging on the swap device.  If the
2045 	 * user wants to retry the user can retry.
2046 	 */
2047 	if (swdevt[devidx].sw_nused)
2048 		return (1);
2049 	else
2050 		return (0);
2051 }
2052 
2053 static
2054 int
2055 swp_pager_swapoff_callback(struct swblock *swap, void *data)
2056 {
2057 	struct swswapoffinfo *info = data;
2058 	vm_object_t object = info->object;
2059 	vm_pindex_t index;
2060 	swblk_t v;
2061 	int i;
2062 
2063 	index = swap->swb_index;
2064 	for (i = 0; i < SWAP_META_PAGES; ++i) {
2065 		/*
2066 		 * Make sure we don't race a dying object.  This will
2067 		 * kill the scan of the object's swap blocks entirely.
2068 		 */
2069 		if (object->flags & OBJ_DEAD)
2070 			return(-1);
2071 
2072 		/*
2073 		 * Fault the page, which can obviously block.  If the swap
2074 		 * structure disappears break out.
2075 		 */
2076 		v = swap->swb_pages[i];
2077 		if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) {
2078 			swp_pager_fault_page(object, &info->shared,
2079 					     swap->swb_index + i);
2080 			/* swap ptr might go away */
2081 			if (RB_LOOKUP(swblock_rb_tree,
2082 				      &object->swblock_root, index) != swap) {
2083 				break;
2084 			}
2085 		}
2086 	}
2087 	return(0);
2088 }
2089 
2090 /************************************************************************
2091  *				SWAP META DATA 				*
2092  ************************************************************************
2093  *
2094  *	These routines manipulate the swap metadata stored in the
2095  *	OBJT_SWAP object.  All swp_*() routines must be called at
2096  *	splvm() because swap can be freed up by the low level vm_page
2097  *	code which might be called from interrupts beyond what splbio() covers.
2098  *
2099  *	Swap metadata is implemented with a global hash and not directly
2100  *	linked into the object.  Instead the object simply contains
2101  *	appropriate tracking counters.
2102  */
2103 
2104 /*
2105  * Lookup the swblock containing the specified swap block index.
2106  *
2107  * The caller must hold the object.
2108  */
2109 static __inline
2110 struct swblock *
2111 swp_pager_lookup(vm_object_t object, vm_pindex_t index)
2112 {
2113 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2114 	index &= ~(vm_pindex_t)SWAP_META_MASK;
2115 	return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index));
2116 }
2117 
2118 /*
2119  * Remove a swblock from the RB tree.
2120  *
2121  * The caller must hold the object.
2122  */
2123 static __inline
2124 void
2125 swp_pager_remove(vm_object_t object, struct swblock *swap)
2126 {
2127 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2128 	RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap);
2129 }
2130 
2131 /*
2132  * Convert default object to swap object if necessary
2133  *
2134  * The caller must hold the object.
2135  */
2136 static void
2137 swp_pager_meta_convert(vm_object_t object)
2138 {
2139 	if (object->type == OBJT_DEFAULT) {
2140 		object->type = OBJT_SWAP;
2141 		KKASSERT(object->swblock_count == 0);
2142 	}
2143 }
2144 
2145 /*
2146  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
2147  *
2148  *	We first convert the object to a swap object if it is a default
2149  *	object.  Vnode objects do not need to be converted.
2150  *
2151  *	The specified swapblk is added to the object's swap metadata.  If
2152  *	the swapblk is not valid, it is freed instead.  Any previously
2153  *	assigned swapblk is freed.
2154  *
2155  * The caller must hold the object.
2156  */
2157 static void
2158 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk)
2159 {
2160 	struct swblock *swap;
2161 	struct swblock *oswap;
2162 	vm_pindex_t v;
2163 
2164 	KKASSERT(swapblk != SWAPBLK_NONE);
2165 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2166 
2167 	/*
2168 	 * Convert object if necessary
2169 	 */
2170 	if (object->type == OBJT_DEFAULT)
2171 		swp_pager_meta_convert(object);
2172 
2173 	/*
2174 	 * Locate swblock.  If not found create, but if we aren't adding
2175 	 * anything just return.  If we run out of space in the map we wait
2176 	 * and, since the hash table may have changed, retry.
2177 	 */
2178 retry:
2179 	swap = swp_pager_lookup(object, index);
2180 
2181 	if (swap == NULL) {
2182 		int i;
2183 
2184 		swap = zalloc(swap_zone);
2185 		if (swap == NULL) {
2186 			vm_wait(0);
2187 			goto retry;
2188 		}
2189 		swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK;
2190 		swap->swb_count = 0;
2191 
2192 		++object->swblock_count;
2193 
2194 		for (i = 0; i < SWAP_META_PAGES; ++i)
2195 			swap->swb_pages[i] = SWAPBLK_NONE;
2196 		oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap);
2197 		KKASSERT(oswap == NULL);
2198 	}
2199 
2200 	/*
2201 	 * Delete prior contents of metadata.
2202 	 *
2203 	 * NOTE: Decrement swb_count after the freeing operation (which
2204 	 *	 might block) to prevent racing destruction of the swblock.
2205 	 */
2206 	index &= SWAP_META_MASK;
2207 
2208 	while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) {
2209 		swap->swb_pages[index] = SWAPBLK_NONE;
2210 		/* can block */
2211 		swp_pager_freeswapspace(object, v, 1);
2212 		--swap->swb_count;
2213 		--mycpu->gd_vmtotal.t_vm;
2214 	}
2215 
2216 	/*
2217 	 * Enter block into metadata
2218 	 */
2219 	swap->swb_pages[index] = swapblk;
2220 	if (swapblk != SWAPBLK_NONE) {
2221 		++swap->swb_count;
2222 		++mycpu->gd_vmtotal.t_vm;
2223 	}
2224 }
2225 
2226 /*
2227  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2228  *
2229  *	The requested range of blocks is freed, with any associated swap
2230  *	returned to the swap bitmap.
2231  *
2232  *	This routine will free swap metadata structures as they are cleaned
2233  *	out.  This routine does *NOT* operate on swap metadata associated
2234  *	with resident pages.
2235  *
2236  * The caller must hold the object.
2237  */
2238 static int swp_pager_meta_free_callback(struct swblock *swb, void *data);
2239 
2240 static void
2241 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count)
2242 {
2243 	struct swfreeinfo info;
2244 
2245 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2246 
2247 	/*
2248 	 * Nothing to do
2249 	 */
2250 	if (object->swblock_count == 0) {
2251 		KKASSERT(RB_EMPTY(&object->swblock_root));
2252 		return;
2253 	}
2254 	if (count == 0)
2255 		return;
2256 
2257 	/*
2258 	 * Setup for RB tree scan.  Note that the pindex range can be huge
2259 	 * due to the 64 bit page index space so we cannot safely iterate.
2260 	 */
2261 	info.object = object;
2262 	info.basei = index & ~(vm_pindex_t)SWAP_META_MASK;
2263 	info.begi = index;
2264 	info.endi = index + count - 1;
2265 	swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp,
2266 				swp_pager_meta_free_callback, &info);
2267 }
2268 
2269 /*
2270  * The caller must hold the object.
2271  */
2272 static
2273 int
2274 swp_pager_meta_free_callback(struct swblock *swap, void *data)
2275 {
2276 	struct swfreeinfo *info = data;
2277 	vm_object_t object = info->object;
2278 	int index;
2279 	int eindex;
2280 
2281 	/*
2282 	 * Figure out the range within the swblock.  The wider scan may
2283 	 * return edge-case swap blocks when the start and/or end points
2284 	 * are in the middle of a block.
2285 	 */
2286 	if (swap->swb_index < info->begi)
2287 		index = (int)info->begi & SWAP_META_MASK;
2288 	else
2289 		index = 0;
2290 
2291 	if (swap->swb_index + SWAP_META_PAGES > info->endi)
2292 		eindex = (int)info->endi & SWAP_META_MASK;
2293 	else
2294 		eindex = SWAP_META_MASK;
2295 
2296 	/*
2297 	 * Scan and free the blocks.  The loop terminates early
2298 	 * if (swap) runs out of blocks and could be freed.
2299 	 *
2300 	 * NOTE: Decrement swb_count after swp_pager_freeswapspace()
2301 	 *	 to deal with a zfree race.
2302 	 */
2303 	while (index <= eindex) {
2304 		swblk_t v = swap->swb_pages[index];
2305 
2306 		if (v != SWAPBLK_NONE) {
2307 			swap->swb_pages[index] = SWAPBLK_NONE;
2308 			/* can block */
2309 			swp_pager_freeswapspace(object, v, 1);
2310 			--mycpu->gd_vmtotal.t_vm;
2311 			if (--swap->swb_count == 0) {
2312 				swp_pager_remove(object, swap);
2313 				zfree(swap_zone, swap);
2314 				--object->swblock_count;
2315 				break;
2316 			}
2317 		}
2318 		++index;
2319 	}
2320 
2321 	/* swap may be invalid here due to zfree above */
2322 	lwkt_yield();
2323 
2324 	return(0);
2325 }
2326 
2327 /*
2328  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2329  *
2330  *	This routine locates and destroys all swap metadata associated with
2331  *	an object.
2332  *
2333  * NOTE: Decrement swb_count after the freeing operation (which
2334  *	 might block) to prevent racing destruction of the swblock.
2335  *
2336  * The caller must hold the object.
2337  */
2338 static void
2339 swp_pager_meta_free_all(vm_object_t object)
2340 {
2341 	struct swblock *swap;
2342 	int i;
2343 
2344 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2345 
2346 	while ((swap = RB_ROOT(&object->swblock_root)) != NULL) {
2347 		swp_pager_remove(object, swap);
2348 		for (i = 0; i < SWAP_META_PAGES; ++i) {
2349 			swblk_t v = swap->swb_pages[i];
2350 			if (v != SWAPBLK_NONE) {
2351 				/* can block */
2352 				swp_pager_freeswapspace(object, v, 1);
2353 				--swap->swb_count;
2354 				--mycpu->gd_vmtotal.t_vm;
2355 			}
2356 		}
2357 		if (swap->swb_count != 0)
2358 			panic("swap_pager_meta_free_all: swb_count != 0");
2359 		zfree(swap_zone, swap);
2360 		--object->swblock_count;
2361 		lwkt_yield();
2362 	}
2363 	KKASSERT(object->swblock_count == 0);
2364 }
2365 
2366 /*
2367  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
2368  *
2369  *	This routine is capable of looking up, popping, or freeing
2370  *	swapblk assignments in the swap meta data or in the vm_page_t.
2371  *	The routine typically returns the swapblk being looked-up, or popped,
2372  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
2373  *	was invalid.  This routine will automatically free any invalid
2374  *	meta-data swapblks.
2375  *
2376  *	It is not possible to store invalid swapblks in the swap meta data
2377  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2378  *
2379  *	When acting on a busy resident page and paging is in progress, we
2380  *	have to wait until paging is complete but otherwise can act on the
2381  *	busy page.
2382  *
2383  *	SWM_FREE	remove and free swap block from metadata
2384  *	SWM_POP		remove from meta data but do not free.. pop it out
2385  *
2386  * The caller must hold the object.
2387  */
2388 static swblk_t
2389 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags)
2390 {
2391 	struct swblock *swap;
2392 	swblk_t r1;
2393 
2394 	if (object->swblock_count == 0)
2395 		return(SWAPBLK_NONE);
2396 
2397 	r1 = SWAPBLK_NONE;
2398 	swap = swp_pager_lookup(object, index);
2399 
2400 	if (swap != NULL) {
2401 		index &= SWAP_META_MASK;
2402 		r1 = swap->swb_pages[index];
2403 
2404 		if (r1 != SWAPBLK_NONE) {
2405 			if (flags & (SWM_FREE|SWM_POP)) {
2406 				swap->swb_pages[index] = SWAPBLK_NONE;
2407 				--mycpu->gd_vmtotal.t_vm;
2408 				if (--swap->swb_count == 0) {
2409 					swp_pager_remove(object, swap);
2410 					zfree(swap_zone, swap);
2411 					--object->swblock_count;
2412 				}
2413 			}
2414 			/* swap ptr may be invalid */
2415 			if (flags & SWM_FREE) {
2416 				swp_pager_freeswapspace(object, r1, 1);
2417 				r1 = SWAPBLK_NONE;
2418 			}
2419 		}
2420 		/* swap ptr may be invalid */
2421 	}
2422 	return(r1);
2423 }
2424