xref: /dragonfly/sys/vm/swap_pager.c (revision a1626531)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1998-2010 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * Copyright (c) 1994 John S. Dyson
37  * Copyright (c) 1990 University of Utah.
38  * Copyright (c) 1991, 1993
39  *	The Regents of the University of California.  All rights reserved.
40  *
41  * This code is derived from software contributed to Berkeley by
42  * the Systems Programming Group of the University of Utah Computer
43  * Science Department.
44  *
45  * Redistribution and use in source and binary forms, with or without
46  * modification, are permitted provided that the following conditions
47  * are met:
48  * 1. Redistributions of source code must retain the above copyright
49  *    notice, this list of conditions and the following disclaimer.
50  * 2. Redistributions in binary form must reproduce the above copyright
51  *    notice, this list of conditions and the following disclaimer in the
52  *    documentation and/or other materials provided with the distribution.
53  * 3. Neither the name of the University nor the names of its contributors
54  *    may be used to endorse or promote products derived from this software
55  *    without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67  * SUCH DAMAGE.
68  *
69  *				New Swap System
70  *				Matthew Dillon
71  *
72  * Radix Bitmap 'blists'.
73  *
74  *	- The new swapper uses the new radix bitmap code.  This should scale
75  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
76  *	  arbitrary degree of fragmentation.
77  *
78  * Features:
79  *
80  *	- on the fly reallocation of swap during putpages.  The new system
81  *	  does not try to keep previously allocated swap blocks for dirty
82  *	  pages.
83  *
84  *	- on the fly deallocation of swap
85  *
86  *	- No more garbage collection required.  Unnecessarily allocated swap
87  *	  blocks only exist for dirty vm_page_t's now and these are already
88  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
89  *	  removal of invalidated swap blocks when a page is destroyed
90  *	  or renamed.
91  *
92  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
93  * @(#)swap_pager.c	8.9 (Berkeley) 3/21/94
94  * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
95  */
96 
97 #include "opt_swap.h"
98 #include <sys/param.h>
99 #include <sys/systm.h>
100 #include <sys/conf.h>
101 #include <sys/kernel.h>
102 #include <sys/proc.h>
103 #include <sys/buf.h>
104 #include <sys/vnode.h>
105 #include <sys/malloc.h>
106 #include <sys/vmmeter.h>
107 #include <sys/sysctl.h>
108 #include <sys/blist.h>
109 #include <sys/lock.h>
110 #include <sys/kcollect.h>
111 
112 #include <vm/vm.h>
113 #include <vm/vm_object.h>
114 #include <vm/vm_page.h>
115 #include <vm/vm_pager.h>
116 #include <vm/vm_pageout.h>
117 #include <vm/swap_pager.h>
118 #include <vm/vm_extern.h>
119 #include <vm/vm_zone.h>
120 #include <vm/vnode_pager.h>
121 
122 #include <sys/buf2.h>
123 #include <vm/vm_page2.h>
124 
125 #ifndef MAX_PAGEOUT_CLUSTER
126 #define MAX_PAGEOUT_CLUSTER	SWB_NPAGES
127 #endif
128 
129 #define SWM_FREE	0x02	/* free, period			*/
130 #define SWM_POP		0x04	/* pop out			*/
131 
132 #define SWBIO_READ	0x01
133 #define SWBIO_WRITE	0x02
134 #define SWBIO_SYNC	0x04
135 #define SWBIO_TTC	0x08	/* for OBJPC_TRY_TO_CACHE */
136 
137 struct swfreeinfo {
138 	vm_object_t	object;
139 	vm_pindex_t	basei;
140 	vm_pindex_t	begi;
141 	vm_pindex_t	endi;	/* inclusive */
142 };
143 
144 struct swswapoffinfo {
145 	vm_object_t	object;
146 	int		devidx;
147 	int		shared;
148 };
149 
150 /*
151  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
152  * in the old system.
153  */
154 
155 int swap_pager_full;		/* swap space exhaustion (task killing) */
156 int swap_fail_ticks;		/* when we became exhausted */
157 int swap_pager_almost_full;	/* swap space exhaustion (w/ hysteresis)*/
158 swblk_t vm_swap_cache_use;
159 swblk_t vm_swap_anon_use;
160 static int vm_report_swap_allocs;
161 
162 static struct krate kswaprate = { 1 };
163 static int nsw_rcount;		/* free read buffers			*/
164 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
165 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
166 static int nsw_wcount_async_max;/* assigned maximum			*/
167 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
168 
169 struct blist *swapblist;
170 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
171 static int swap_burst_read = 0;	/* allow burst reading */
172 static swblk_t swapiterator;	/* linearize allocations */
173 int swap_user_async = 0;	/* user swap pager operation can be async */
174 
175 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin");
176 
177 /* from vm_swap.c */
178 extern struct vnode *swapdev_vp;
179 extern struct swdevt *swdevt;
180 extern int nswdev;
181 
182 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / SWB_DMMAX % nswdev : 0)
183 
184 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
185         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
186 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read,
187         CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins");
188 SYSCTL_INT(_vm, OID_AUTO, swap_user_async,
189         CTLFLAG_RW, &swap_user_async, 0, "Allow async uuser swap write I/O");
190 
191 #if SWBLK_BITS == 64
192 SYSCTL_LONG(_vm, OID_AUTO, swap_cache_use,
193         CTLFLAG_RD, &vm_swap_cache_use, 0, "");
194 SYSCTL_LONG(_vm, OID_AUTO, swap_anon_use,
195         CTLFLAG_RD, &vm_swap_anon_use, 0, "");
196 SYSCTL_LONG(_vm, OID_AUTO, swap_free,
197         CTLFLAG_RD, &vm_swap_size, 0, "");
198 SYSCTL_LONG(_vm, OID_AUTO, swap_size,
199         CTLFLAG_RD, &vm_swap_max, 0, "");
200 #else
201 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use,
202         CTLFLAG_RD, &vm_swap_cache_use, 0, "");
203 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use,
204         CTLFLAG_RD, &vm_swap_anon_use, 0, "");
205 SYSCTL_INT(_vm, OID_AUTO, swap_free,
206         CTLFLAG_RD, &vm_swap_size, 0, "");
207 SYSCTL_INT(_vm, OID_AUTO, swap_size,
208         CTLFLAG_RD, &vm_swap_max, 0, "");
209 #endif
210 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs,
211         CTLFLAG_RW, &vm_report_swap_allocs, 0, "");
212 
213 __read_mostly vm_zone_t	swap_zone;
214 
215 /*
216  * Red-Black tree for swblock entries
217  *
218  * The caller must hold vm_token
219  */
220 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
221 	     vm_pindex_t, swb_index);
222 
223 int
224 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2)
225 {
226 	if (swb1->swb_index < swb2->swb_index)
227 		return(-1);
228 	if (swb1->swb_index > swb2->swb_index)
229 		return(1);
230 	return(0);
231 }
232 
233 static
234 int
235 rb_swblock_scancmp(struct swblock *swb, void *data)
236 {
237 	struct swfreeinfo *info = data;
238 
239 	if (swb->swb_index < info->basei)
240 		return(-1);
241 	if (swb->swb_index > info->endi)
242 		return(1);
243 	return(0);
244 }
245 
246 static
247 int
248 rb_swblock_condcmp(struct swblock *swb, void *data)
249 {
250 	struct swfreeinfo *info = data;
251 
252 	if (swb->swb_index < info->basei)
253 		return(-1);
254 	return(0);
255 }
256 
257 /*
258  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
259  * calls hooked from other parts of the VM system and do not appear here.
260  * (see vm/swap_pager.h).
261  */
262 
263 static void	swap_pager_dealloc (vm_object_t object);
264 static int	swap_pager_getpage (vm_object_t, vm_page_t *, int);
265 static void	swap_chain_iodone(struct bio *biox);
266 
267 struct pagerops swappagerops = {
268 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
269 	swap_pager_getpage,	/* pagein				*/
270 	swap_pager_putpages,	/* pageout				*/
271 	swap_pager_haspage	/* get backing store status for page	*/
272 };
273 
274 /*
275  * SWB_DMMAX is in page-sized chunks with the new swap system.  It was
276  * dev-bsized chunks in the old.  SWB_DMMAX is always a power of 2.
277  *
278  * swap_*() routines are externally accessible.  swp_*() routines are
279  * internal.
280  */
281 
282 int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
283 int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
284 
285 static __inline void	swp_sizecheck (void);
286 static void	swp_pager_async_iodone (struct bio *bio);
287 
288 /*
289  * Swap bitmap functions
290  */
291 
292 static __inline void	swp_pager_freeswapspace(vm_object_t object,
293 						swblk_t blk, int npages);
294 static __inline swblk_t	swp_pager_getswapspace(vm_object_t object, int npages);
295 
296 /*
297  * Metadata functions
298  */
299 
300 static void swp_pager_meta_convert(vm_object_t);
301 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t);
302 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
303 static void swp_pager_meta_free_all(vm_object_t);
304 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
305 
306 /*
307  * SWP_SIZECHECK() -	update swap_pager_full indication
308  *
309  *	update the swap_pager_almost_full indication and warn when we are
310  *	about to run out of swap space, using lowat/hiwat hysteresis.
311  *
312  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
313  *
314  * No restrictions on call
315  * This routine may not block.
316  * SMP races are ok.
317  */
318 static __inline void
319 swp_sizecheck(void)
320 {
321 	if (vm_swap_size < nswap_lowat) {
322 		if (swap_pager_almost_full == 0) {
323 			kprintf("swap_pager: out of swap space\n");
324 			swap_pager_almost_full = 1;
325 			swap_fail_ticks = ticks;
326 		}
327 	} else {
328 		swap_pager_full = 0;
329 		if (vm_swap_size > nswap_hiwat)
330 			swap_pager_almost_full = 0;
331 	}
332 }
333 
334 /*
335  * Long-term data collection on 10-second interval.  Return the value
336  * for KCOLLECT_SWAPPCT and set the values for SWAPANO and SWAPCCAC.
337  *
338  * Return total swap in the scale field.  This can change if swap is
339  * regularly added or removed and may cause some historical confusion
340  * in that case, but SWAPPCT will always be historically accurate.
341  */
342 
343 #define PTOB(value)	((uint64_t)(value) << PAGE_SHIFT)
344 
345 static uint64_t
346 collect_swap_callback(int n)
347 {
348 	uint64_t total = vm_swap_max;
349 	uint64_t anon = vm_swap_anon_use;
350 	uint64_t cache = vm_swap_cache_use;
351 
352 	if (total == 0)		/* avoid divide by zero */
353 		total = 1;
354 	kcollect_setvalue(KCOLLECT_SWAPANO, PTOB(anon));
355 	kcollect_setvalue(KCOLLECT_SWAPCAC, PTOB(cache));
356 	kcollect_setscale(KCOLLECT_SWAPANO,
357 			  KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, PTOB(total)));
358 	kcollect_setscale(KCOLLECT_SWAPCAC,
359 			  KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, PTOB(total)));
360 	return (((anon + cache) * 10000 + (total >> 1)) / total);
361 }
362 
363 /*
364  * SWAP_PAGER_INIT() -	initialize the swap pager!
365  *
366  *	Expected to be started from system init.  NOTE:  This code is run
367  *	before much else so be careful what you depend on.  Most of the VM
368  *	system has yet to be initialized at this point.
369  *
370  * Called from the low level boot code only.
371  */
372 static void
373 swap_pager_init(void *arg __unused)
374 {
375 	kcollect_register(KCOLLECT_SWAPPCT, "swapuse", collect_swap_callback,
376 			  KCOLLECT_SCALE(KCOLLECT_SWAPPCT_FORMAT, 0));
377 	kcollect_register(KCOLLECT_SWAPANO, "swapano", NULL,
378 			  KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, 0));
379 	kcollect_register(KCOLLECT_SWAPCAC, "swapcac", NULL,
380 			  KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, 0));
381 }
382 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL);
383 
384 /*
385  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
386  *
387  *	Expected to be started from pageout process once, prior to entering
388  *	its main loop.
389  *
390  * Called from the low level boot code only.
391  */
392 void
393 swap_pager_swap_init(void)
394 {
395 	int n, n2;
396 
397 	/*
398 	 * Number of in-transit swap bp operations.  Don't
399 	 * exhaust the pbufs completely.  Make sure we
400 	 * initialize workable values (0 will work for hysteresis
401 	 * but it isn't very efficient).
402 	 *
403 	 * The nsw_cluster_max is constrained by the number of pages an XIO
404 	 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
405 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
406 	 * constrained by the swap device interleave stripe size.
407 	 *
408 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
409 	 * designed to prevent other I/O from having high latencies due to
410 	 * our pageout I/O.  The value 4 works well for one or two active swap
411 	 * devices but is probably a little low if you have more.  Even so,
412 	 * a higher value would probably generate only a limited improvement
413 	 * with three or four active swap devices since the system does not
414 	 * typically have to pageout at extreme bandwidths.   We will want
415 	 * at least 2 per swap devices, and 4 is a pretty good value if you
416 	 * have one NFS swap device due to the command/ack latency over NFS.
417 	 * So it all works out pretty well.
418 	 */
419 
420 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
421 
422 	nsw_rcount = (nswbuf_kva + 1) / 2;
423 	nsw_wcount_sync = (nswbuf_kva + 3) / 4;
424 	nsw_wcount_async = 4;
425 	nsw_wcount_async_max = nsw_wcount_async;
426 
427 	/*
428 	 * The zone is dynamically allocated so generally size it to
429 	 * maxswzone (32MB to 256GB of KVM).  Set a minimum size based
430 	 * on physical memory of around 8x (each swblock can hold 16 pages).
431 	 *
432 	 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
433 	 * has increased dramatically.
434 	 */
435 	n = vmstats.v_page_count / 2;
436 	if (maxswzone && n < maxswzone / sizeof(struct swblock))
437 		n = maxswzone / sizeof(struct swblock);
438 	n2 = n;
439 
440 	do {
441 		swap_zone = zinit(
442 			"SWAPMETA",
443 			sizeof(struct swblock),
444 			n,
445 			ZONE_INTERRUPT);
446 		if (swap_zone != NULL)
447 			break;
448 		/*
449 		 * if the allocation failed, try a zone two thirds the
450 		 * size of the previous attempt.
451 		 */
452 		n -= ((n + 2) / 3);
453 	} while (n > 0);
454 
455 	if (swap_zone == NULL)
456 		panic("swap_pager_swap_init: swap_zone == NULL");
457 	if (n2 != n)
458 		kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
459 }
460 
461 /*
462  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
463  *			its metadata structures.
464  *
465  *	This routine is called from the mmap and fork code to create a new
466  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
467  *	and then converting it with swp_pager_meta_convert().
468  *
469  *	We only support unnamed objects.
470  *
471  * No restrictions.
472  */
473 vm_object_t
474 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
475 {
476 	vm_object_t object;
477 
478 	KKASSERT(handle == NULL);
479 	object = vm_object_allocate_hold(OBJT_DEFAULT,
480 					 OFF_TO_IDX(offset + PAGE_MASK + size));
481 	swp_pager_meta_convert(object);
482 	vm_object_drop(object);
483 
484 	return (object);
485 }
486 
487 /*
488  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
489  *
490  *	The swap backing for the object is destroyed.  The code is
491  *	designed such that we can reinstantiate it later, but this
492  *	routine is typically called only when the entire object is
493  *	about to be destroyed.
494  *
495  * The object must be locked or unreferenceable.
496  * No other requirements.
497  */
498 static void
499 swap_pager_dealloc(vm_object_t object)
500 {
501 	vm_object_hold(object);
502 	vm_object_pip_wait(object, "swpdea");
503 
504 	/*
505 	 * Free all remaining metadata.  We only bother to free it from
506 	 * the swap meta data.  We do not attempt to free swapblk's still
507 	 * associated with vm_page_t's for this object.  We do not care
508 	 * if paging is still in progress on some objects.
509 	 */
510 	swp_pager_meta_free_all(object);
511 	vm_object_drop(object);
512 }
513 
514 /************************************************************************
515  *			SWAP PAGER BITMAP ROUTINES			*
516  ************************************************************************/
517 
518 /*
519  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
520  *
521  *	Allocate swap for the requested number of pages.  The starting
522  *	swap block number (a page index) is returned or SWAPBLK_NONE
523  *	if the allocation failed.
524  *
525  *	Also has the side effect of advising that somebody made a mistake
526  *	when they configured swap and didn't configure enough.
527  *
528  * The caller must hold the object.
529  * This routine may not block.
530  */
531 static __inline swblk_t
532 swp_pager_getswapspace(vm_object_t object, int npages)
533 {
534 	swblk_t blk;
535 
536 	lwkt_gettoken(&vm_token);
537 	blk = blist_allocat(swapblist, npages, swapiterator);
538 	if (blk == SWAPBLK_NONE)
539 		blk = blist_allocat(swapblist, npages, 0);
540 	if (blk == SWAPBLK_NONE) {
541 		if (swap_pager_full != 2) {
542 			if (vm_swap_max == 0) {
543 				krateprintf(&kswaprate,
544 					"Warning: The system would like to "
545 					"page to swap but no swap space "
546 					"is configured!\n");
547 			} else {
548 				krateprintf(&kswaprate,
549 					"swap_pager_getswapspace: "
550 					"swap full allocating %d pages\n",
551 					npages);
552 			}
553 			swap_pager_full = 2;
554 			if (swap_pager_almost_full == 0)
555 				swap_fail_ticks = ticks;
556 			swap_pager_almost_full = 1;
557 		}
558 	} else {
559 		/* swapiterator = blk; disable for now, doesn't work well */
560 		swapacctspace(blk, -npages);
561 		if (object->type == OBJT_SWAP)
562 			vm_swap_anon_use += npages;
563 		else
564 			vm_swap_cache_use += npages;
565 		swp_sizecheck();
566 	}
567 	lwkt_reltoken(&vm_token);
568 	return(blk);
569 }
570 
571 /*
572  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
573  *
574  *	This routine returns the specified swap blocks back to the bitmap.
575  *
576  *	Note:  This routine may not block (it could in the old swap code),
577  *	and through the use of the new blist routines it does not block.
578  *
579  * This routine may not block.
580  */
581 
582 static __inline void
583 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages)
584 {
585 	struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)];
586 
587 	lwkt_gettoken(&vm_token);
588 	sp->sw_nused -= npages;
589 	if (object->type == OBJT_SWAP)
590 		vm_swap_anon_use -= npages;
591 	else
592 		vm_swap_cache_use -= npages;
593 
594 	if (sp->sw_flags & SW_CLOSING) {
595 		lwkt_reltoken(&vm_token);
596 		return;
597 	}
598 
599 	blist_free(swapblist, blk, npages);
600 	vm_swap_size += npages;
601 	swp_sizecheck();
602 	lwkt_reltoken(&vm_token);
603 }
604 
605 /*
606  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
607  *				range within an object.
608  *
609  *	This is a globally accessible routine.
610  *
611  *	This routine removes swapblk assignments from swap metadata.
612  *
613  *	The external callers of this routine typically have already destroyed
614  *	or renamed vm_page_t's associated with this range in the object so
615  *	we should be ok.
616  *
617  * No requirements.
618  */
619 void
620 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size)
621 {
622 	if (object->swblock_count == 0)
623 		return;
624 	vm_object_hold(object);
625 	swp_pager_meta_free(object, start, size);
626 	vm_object_drop(object);
627 }
628 
629 /*
630  * No requirements.
631  */
632 void
633 swap_pager_freespace_all(vm_object_t object)
634 {
635 	if (object->swblock_count == 0)
636 		return;
637 	vm_object_hold(object);
638 	swp_pager_meta_free_all(object);
639 	vm_object_drop(object);
640 }
641 
642 /*
643  * This function conditionally frees swap cache swap starting at
644  * (*basei) in the object.  (count) swap blocks will be nominally freed.
645  * The actual number of blocks freed can be more or less than the
646  * requested number.
647  *
648  * This function nominally returns the number of blocks freed.  However,
649  * the actual number of blocks freed may be less then the returned value.
650  * If the function is unable to exhaust the object or if it is able to
651  * free (approximately) the requested number of blocks it returns
652  * a value n > count.
653  *
654  * If we exhaust the object we will return a value n <= count.
655  *
656  * The caller must hold the object.
657  *
658  * WARNING!  If count == 0 then -1 can be returned as a degenerate case,
659  *	     callers should always pass a count value > 0.
660  */
661 static int swap_pager_condfree_callback(struct swblock *swap, void *data);
662 
663 int
664 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count)
665 {
666 	struct swfreeinfo info;
667 	int n;
668 	int t;
669 
670 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
671 
672 	info.object = object;
673 	info.basei = *basei;	/* skip up to this page index */
674 	info.begi = count;	/* max swap pages to destroy */
675 	info.endi = count * 8;	/* max swblocks to scan */
676 
677 	swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp,
678 				swap_pager_condfree_callback, &info);
679 	*basei = info.basei;
680 
681 	/*
682 	 * Take the higher difference swblocks vs pages
683 	 */
684 	n = count - (int)info.begi;
685 	t = count * 8 - (int)info.endi;
686 	if (n < t)
687 		n = t;
688 	if (n < 1)
689 		n = 1;
690 	return(n);
691 }
692 
693 /*
694  * The idea is to free whole meta-block to avoid fragmenting
695  * the swap space or disk I/O.  We only do this if NO VM pages
696  * are present.
697  *
698  * We do not have to deal with clearing PG_SWAPPED in related VM
699  * pages because there are no related VM pages.
700  *
701  * The caller must hold the object.
702  */
703 static int
704 swap_pager_condfree_callback(struct swblock *swap, void *data)
705 {
706 	struct swfreeinfo *info = data;
707 	vm_object_t object = info->object;
708 	int i;
709 
710 	for (i = 0; i < SWAP_META_PAGES; ++i) {
711 		if (vm_page_lookup(object, swap->swb_index + i))
712 			break;
713 	}
714 	info->basei = swap->swb_index + SWAP_META_PAGES;
715 	if (i == SWAP_META_PAGES) {
716 		info->begi -= swap->swb_count;
717 		swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES);
718 	}
719 	--info->endi;
720 	if ((int)info->begi < 0 || (int)info->endi < 0)
721 		return(-1);
722 	lwkt_yield();
723 	return(0);
724 }
725 
726 /*
727  * Called by vm_page_alloc() when a new VM page is inserted
728  * into a VM object.  Checks whether swap has been assigned to
729  * the page and sets PG_SWAPPED as necessary.
730  *
731  * (m) must be busied by caller and remains busied on return.
732  */
733 void
734 swap_pager_page_inserted(vm_page_t m)
735 {
736 	if (m->object->swblock_count) {
737 		vm_object_hold(m->object);
738 		if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE)
739 			vm_page_flag_set(m, PG_SWAPPED);
740 		vm_object_drop(m->object);
741 	}
742 }
743 
744 /*
745  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
746  *
747  *	Assigns swap blocks to the specified range within the object.  The
748  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
749  *
750  *	Returns 0 on success, -1 on failure.
751  *
752  * The caller is responsible for avoiding races in the specified range.
753  * No other requirements.
754  */
755 int
756 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
757 {
758 	int n = 0;
759 	swblk_t blk = SWAPBLK_NONE;
760 	vm_pindex_t beg = start;	/* save start index */
761 
762 	vm_object_hold(object);
763 
764 	while (size) {
765 		if (n == 0) {
766 			n = BLIST_MAX_ALLOC;
767 			while ((blk = swp_pager_getswapspace(object, n)) ==
768 			       SWAPBLK_NONE)
769 			{
770 				n >>= 1;
771 				if (n == 0) {
772 					swp_pager_meta_free(object, beg,
773 							    start - beg);
774 					vm_object_drop(object);
775 					return(-1);
776 				}
777 			}
778 		}
779 		swp_pager_meta_build(object, start, blk);
780 		--size;
781 		++start;
782 		++blk;
783 		--n;
784 	}
785 	swp_pager_meta_free(object, start, n);
786 	vm_object_drop(object);
787 	return(0);
788 }
789 
790 /*
791  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
792  *			and destroy the source.
793  *
794  *	Copy any valid swapblks from the source to the destination.  In
795  *	cases where both the source and destination have a valid swapblk,
796  *	we keep the destination's.
797  *
798  *	This routine is allowed to block.  It may block allocating metadata
799  *	indirectly through swp_pager_meta_build() or if paging is still in
800  *	progress on the source.
801  *
802  *	XXX vm_page_collapse() kinda expects us not to block because we
803  *	supposedly do not need to allocate memory, but for the moment we
804  *	*may* have to get a little memory from the zone allocator, but
805  *	it is taken from the interrupt memory.  We should be ok.
806  *
807  *	The source object contains no vm_page_t's (which is just as well)
808  *	The source object is of type OBJT_SWAP.
809  *
810  *	The source and destination objects must be held by the caller.
811  */
812 void
813 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
814 		vm_pindex_t base_index, int destroysource)
815 {
816 	vm_pindex_t i;
817 
818 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject));
819 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject));
820 
821 	/*
822 	 * transfer source to destination.
823 	 */
824 	for (i = 0; i < dstobject->size; ++i) {
825 		swblk_t dstaddr;
826 
827 		/*
828 		 * Locate (without changing) the swapblk on the destination,
829 		 * unless it is invalid in which case free it silently, or
830 		 * if the destination is a resident page, in which case the
831 		 * source is thrown away.
832 		 */
833 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
834 
835 		if (dstaddr == SWAPBLK_NONE) {
836 			/*
837 			 * Destination has no swapblk and is not resident,
838 			 * copy source.
839 			 */
840 			swblk_t srcaddr;
841 
842 			srcaddr = swp_pager_meta_ctl(srcobject,
843 						     base_index + i, SWM_POP);
844 
845 			if (srcaddr != SWAPBLK_NONE)
846 				swp_pager_meta_build(dstobject, i, srcaddr);
847 		} else {
848 			/*
849 			 * Destination has valid swapblk or it is represented
850 			 * by a resident page.  We destroy the sourceblock.
851 			 */
852 			swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE);
853 		}
854 	}
855 
856 	/*
857 	 * Free left over swap blocks in source.
858 	 *
859 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
860 	 * double-remove the object from the swap queues.
861 	 */
862 	if (destroysource) {
863 		/*
864 		 * Reverting the type is not necessary, the caller is going
865 		 * to destroy srcobject directly, but I'm doing it here
866 		 * for consistency since we've removed the object from its
867 		 * queues.
868 		 */
869 		swp_pager_meta_free_all(srcobject);
870 		if (srcobject->type == OBJT_SWAP)
871 			srcobject->type = OBJT_DEFAULT;
872 	}
873 }
874 
875 /*
876  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
877  *				the requested page.
878  *
879  *	We determine whether good backing store exists for the requested
880  *	page and return TRUE if it does, FALSE if it doesn't.
881  *
882  *	If TRUE, we also try to determine how much valid, contiguous backing
883  *	store exists before and after the requested page within a reasonable
884  *	distance.  We do not try to restrict it to the swap device stripe
885  *	(that is handled in getpages/putpages).  It probably isn't worth
886  *	doing here.
887  *
888  * No requirements.
889  */
890 boolean_t
891 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex)
892 {
893 	swblk_t blk0;
894 
895 	/*
896 	 * do we have good backing store at the requested index ?
897 	 */
898 	vm_object_hold(object);
899 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
900 
901 	if (blk0 == SWAPBLK_NONE) {
902 		vm_object_drop(object);
903 		return (FALSE);
904 	}
905 	vm_object_drop(object);
906 	return (TRUE);
907 }
908 
909 /*
910  * Object must be held exclusive or shared by the caller.
911  */
912 boolean_t
913 swap_pager_haspage_locked(vm_object_t object, vm_pindex_t pindex)
914 {
915 	if (swp_pager_meta_ctl(object, pindex, 0) == SWAPBLK_NONE)
916 		return FALSE;
917 	return TRUE;
918 }
919 
920 /*
921  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
922  *
923  * This removes any associated swap backing store, whether valid or
924  * not, from the page.  This operates on any VM object, not just OBJT_SWAP
925  * objects.
926  *
927  * This routine is typically called when a page is made dirty, at
928  * which point any associated swap can be freed.  MADV_FREE also
929  * calls us in a special-case situation
930  *
931  * NOTE!!!  If the page is clean and the swap was valid, the caller
932  *	    should make the page dirty before calling this routine.
933  *	    This routine does NOT change the m->dirty status of the page.
934  *	    Also: MADV_FREE depends on it.
935  *
936  * The page must be busied.
937  * The caller can hold the object to avoid blocking, else we might block.
938  * No other requirements.
939  */
940 void
941 swap_pager_unswapped(vm_page_t m)
942 {
943 	if (m->flags & PG_SWAPPED) {
944 		vm_object_hold(m->object);
945 		KKASSERT(m->flags & PG_SWAPPED);
946 		swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
947 		vm_page_flag_clear(m, PG_SWAPPED);
948 		vm_object_drop(m->object);
949 	}
950 }
951 
952 /*
953  * SWAP_PAGER_STRATEGY() - read, write, free blocks
954  *
955  * This implements a VM OBJECT strategy function using swap backing store.
956  * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP
957  * types.  Only BUF_CMD_{READ,WRITE,FREEBLKS} is supported, any other
958  * requests will return EINVAL.
959  *
960  * This is intended to be a cacheless interface (i.e. caching occurs at
961  * higher levels), and is also used as a swap-based SSD cache for vnode
962  * and device objects.
963  *
964  * All I/O goes directly to and from the swap device.
965  *
966  * We currently attempt to run I/O synchronously or asynchronously as
967  * the caller requests.  This isn't perfect because we loose error
968  * sequencing when we run multiple ops in parallel to satisfy a request.
969  * But this is swap, so we let it all hang out.
970  *
971  * NOTE: This function supports the KVABIO API wherein bp->b_data might
972  *	 not be synchronized to the current cpu.
973  *
974  * No requirements.
975  */
976 void
977 swap_pager_strategy(vm_object_t object, struct bio *bio)
978 {
979 	struct buf *bp = bio->bio_buf;
980 	struct bio *nbio;
981 	vm_pindex_t start;
982 	vm_pindex_t biox_blkno = 0;
983 	int count;
984 	char *data;
985 	struct bio *biox;
986 	struct buf *bufx;
987 #if 0
988 	struct bio_track *track;
989 #endif
990 
991 #if 0
992 	/*
993 	 * tracking for swapdev vnode I/Os
994 	 */
995 	if (bp->b_cmd == BUF_CMD_READ)
996 		track = &swapdev_vp->v_track_read;
997 	else
998 		track = &swapdev_vp->v_track_write;
999 #endif
1000 
1001 	/*
1002 	 * Only supported commands
1003 	 */
1004 	if (bp->b_cmd != BUF_CMD_FREEBLKS &&
1005 	    bp->b_cmd != BUF_CMD_READ &&
1006 	    bp->b_cmd != BUF_CMD_WRITE) {
1007 		bp->b_error = EINVAL;
1008 		bp->b_flags |= B_ERROR | B_INVAL;
1009 		biodone(bio);
1010 		return;
1011 	}
1012 
1013 	/*
1014 	 * bcount must be an integral number of pages.
1015 	 */
1016 	if (bp->b_bcount & PAGE_MASK) {
1017 		bp->b_error = EINVAL;
1018 		bp->b_flags |= B_ERROR | B_INVAL;
1019 		biodone(bio);
1020 		kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
1021 			"not page bounded\n",
1022 			bp, (long long)bio->bio_offset, (int)bp->b_bcount);
1023 		return;
1024 	}
1025 
1026 	/*
1027 	 * Clear error indication, initialize page index, count, data pointer.
1028 	 */
1029 	bp->b_error = 0;
1030 	bp->b_flags &= ~B_ERROR;
1031 	bp->b_resid = bp->b_bcount;
1032 
1033 	start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
1034 	count = howmany(bp->b_bcount, PAGE_SIZE);
1035 
1036 	/*
1037 	 * WARNING!  Do not dereference *data without issuing a bkvasync()
1038 	 */
1039 	data = bp->b_data;
1040 
1041 	/*
1042 	 * Deal with BUF_CMD_FREEBLKS
1043 	 */
1044 	if (bp->b_cmd == BUF_CMD_FREEBLKS) {
1045 		/*
1046 		 * FREE PAGE(s) - destroy underlying swap that is no longer
1047 		 *		  needed.
1048 		 */
1049 		vm_object_hold(object);
1050 		swp_pager_meta_free(object, start, count);
1051 		vm_object_drop(object);
1052 		bp->b_resid = 0;
1053 		biodone(bio);
1054 		return;
1055 	}
1056 
1057 	/*
1058 	 * We need to be able to create a new cluster of I/O's.  We cannot
1059 	 * use the caller fields of the passed bio so push a new one.
1060 	 *
1061 	 * Because nbio is just a placeholder for the cluster links,
1062 	 * we can biodone() the original bio instead of nbio to make
1063 	 * things a bit more efficient.
1064 	 */
1065 	nbio = push_bio(bio);
1066 	nbio->bio_offset = bio->bio_offset;
1067 	nbio->bio_caller_info1.cluster_head = NULL;
1068 	nbio->bio_caller_info2.cluster_tail = NULL;
1069 
1070 	biox = NULL;
1071 	bufx = NULL;
1072 
1073 	/*
1074 	 * Execute read or write
1075 	 */
1076 	vm_object_hold(object);
1077 
1078 	while (count > 0) {
1079 		swblk_t blk;
1080 
1081 		/*
1082 		 * Obtain block.  If block not found and writing, allocate a
1083 		 * new block and build it into the object.
1084 		 */
1085 		blk = swp_pager_meta_ctl(object, start, 0);
1086 		if ((blk == SWAPBLK_NONE) && bp->b_cmd == BUF_CMD_WRITE) {
1087 			blk = swp_pager_getswapspace(object, 1);
1088 			if (blk == SWAPBLK_NONE) {
1089 				bp->b_error = ENOMEM;
1090 				bp->b_flags |= B_ERROR;
1091 				break;
1092 			}
1093 			swp_pager_meta_build(object, start, blk);
1094 		}
1095 
1096 		/*
1097 		 * Do we have to flush our current collection?  Yes if:
1098 		 *
1099 		 *	- no swap block at this index
1100 		 *	- swap block is not contiguous
1101 		 *	- we cross a physical disk boundry in the
1102 		 *	  stripe.
1103 		 */
1104 		if (biox &&
1105 		    (biox_blkno + btoc(bufx->b_bcount) != blk ||
1106 		     ((biox_blkno ^ blk) & ~SWB_DMMASK))) {
1107 			switch(bp->b_cmd) {
1108 			case BUF_CMD_READ:
1109 				++mycpu->gd_cnt.v_swapin;
1110 				mycpu->gd_cnt.v_swappgsin +=
1111 					btoc(bufx->b_bcount);
1112 				break;
1113 			case BUF_CMD_WRITE:
1114 				++mycpu->gd_cnt.v_swapout;
1115 				mycpu->gd_cnt.v_swappgsout +=
1116 					btoc(bufx->b_bcount);
1117 				bufx->b_dirtyend = bufx->b_bcount;
1118 				break;
1119 			default:
1120 				/* NOT REACHED */
1121 				break;
1122 			}
1123 
1124 			/*
1125 			 * Finished with this buf.
1126 			 */
1127 			KKASSERT(bufx->b_bcount != 0);
1128 			if (bufx->b_cmd != BUF_CMD_READ)
1129 				bufx->b_dirtyend = bufx->b_bcount;
1130 			biox = NULL;
1131 			bufx = NULL;
1132 		}
1133 
1134 		/*
1135 		 * Add new swapblk to biox, instantiating biox if necessary.
1136 		 * Zero-fill reads are able to take a shortcut.
1137 		 */
1138 		if (blk == SWAPBLK_NONE) {
1139 			/*
1140 			 * We can only get here if we are reading.
1141 			 */
1142 			bkvasync(bp);
1143 			bzero(data, PAGE_SIZE);
1144 			bp->b_resid -= PAGE_SIZE;
1145 		} else {
1146 			if (biox == NULL) {
1147 				/* XXX chain count > 4, wait to <= 4 */
1148 
1149 				bufx = getpbuf(NULL);
1150 				bufx->b_flags |= B_KVABIO;
1151 				biox = &bufx->b_bio1;
1152 				cluster_append(nbio, bufx);
1153 				bufx->b_cmd = bp->b_cmd;
1154 				biox->bio_done = swap_chain_iodone;
1155 				biox->bio_offset = (off_t)blk << PAGE_SHIFT;
1156 				biox->bio_caller_info1.cluster_parent = nbio;
1157 				biox_blkno = blk;
1158 				bufx->b_bcount = 0;
1159 				bufx->b_data = data;
1160 			}
1161 			bufx->b_bcount += PAGE_SIZE;
1162 		}
1163 		--count;
1164 		++start;
1165 		data += PAGE_SIZE;
1166 	}
1167 
1168 	vm_object_drop(object);
1169 
1170 	/*
1171 	 *  Flush out last buffer
1172 	 */
1173 	if (biox) {
1174 		if (bufx->b_cmd == BUF_CMD_READ) {
1175 			++mycpu->gd_cnt.v_swapin;
1176 			mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1177 		} else {
1178 			++mycpu->gd_cnt.v_swapout;
1179 			mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1180 			bufx->b_dirtyend = bufx->b_bcount;
1181 		}
1182 		KKASSERT(bufx->b_bcount);
1183 		if (bufx->b_cmd != BUF_CMD_READ)
1184 			bufx->b_dirtyend = bufx->b_bcount;
1185 		/* biox, bufx = NULL */
1186 	}
1187 
1188 	/*
1189 	 * Now initiate all the I/O.  Be careful looping on our chain as
1190 	 * I/O's may complete while we are still initiating them.
1191 	 *
1192 	 * If the request is a 100% sparse read no bios will be present
1193 	 * and we just biodone() the buffer.
1194 	 */
1195 	nbio->bio_caller_info2.cluster_tail = NULL;
1196 	bufx = nbio->bio_caller_info1.cluster_head;
1197 
1198 	if (bufx) {
1199 		while (bufx) {
1200 			biox = &bufx->b_bio1;
1201 			BUF_KERNPROC(bufx);
1202 			bufx = bufx->b_cluster_next;
1203 			vn_strategy(swapdev_vp, biox);
1204 		}
1205 	} else {
1206 		biodone(bio);
1207 	}
1208 
1209 	/*
1210 	 * Completion of the cluster will also call biodone_chain(nbio).
1211 	 * We never call biodone(nbio) so we don't have to worry about
1212 	 * setting up a bio_done callback.  It's handled in the sub-IO.
1213 	 */
1214 	/**/
1215 }
1216 
1217 /*
1218  * biodone callback
1219  *
1220  * No requirements.
1221  */
1222 static void
1223 swap_chain_iodone(struct bio *biox)
1224 {
1225 	struct buf **nextp;
1226 	struct buf *bufx;	/* chained sub-buffer */
1227 	struct bio *nbio;	/* parent nbio with chain glue */
1228 	struct buf *bp;		/* original bp associated with nbio */
1229 	int chain_empty;
1230 
1231 	bufx = biox->bio_buf;
1232 	nbio = biox->bio_caller_info1.cluster_parent;
1233 	bp = nbio->bio_buf;
1234 
1235 	/*
1236 	 * Update the original buffer
1237 	 */
1238         KKASSERT(bp != NULL);
1239 	if (bufx->b_flags & B_ERROR) {
1240 		atomic_set_int(&bufx->b_flags, B_ERROR);
1241 		bp->b_error = bufx->b_error;	/* race ok */
1242 	} else if (bufx->b_resid != 0) {
1243 		atomic_set_int(&bufx->b_flags, B_ERROR);
1244 		bp->b_error = EINVAL;		/* race ok */
1245 	} else {
1246 		atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1247 	}
1248 
1249 	/*
1250 	 * Remove us from the chain.
1251 	 */
1252 	spin_lock(&swapbp_spin);
1253 	nextp = &nbio->bio_caller_info1.cluster_head;
1254 	while (*nextp != bufx) {
1255 		KKASSERT(*nextp != NULL);
1256 		nextp = &(*nextp)->b_cluster_next;
1257 	}
1258 	*nextp = bufx->b_cluster_next;
1259 	chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1260 	spin_unlock(&swapbp_spin);
1261 
1262 	/*
1263 	 * Clean up bufx.  If the chain is now empty we finish out
1264 	 * the parent.  Note that we may be racing other completions
1265 	 * so we must use the chain_empty status from above.
1266 	 */
1267 	if (chain_empty) {
1268 		if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1269 			atomic_set_int(&bp->b_flags, B_ERROR);
1270 			bp->b_error = EINVAL;
1271 		}
1272 		biodone_chain(nbio);
1273         }
1274         relpbuf(bufx, NULL);
1275 }
1276 
1277 /*
1278  * SWAP_PAGER_GETPAGES() - bring page in from swap
1279  *
1280  * The requested page may have to be brought in from swap.  Calculate the
1281  * swap block and bring in additional pages if possible.  All pages must
1282  * have contiguous swap block assignments and reside in the same object.
1283  *
1284  * The caller has a single vm_object_pip_add() reference prior to
1285  * calling us and we should return with the same.
1286  *
1287  * The caller has BUSY'd the page.  We should return with (*mpp) left busy,
1288  * and any additinal pages unbusied.
1289  *
1290  * If the caller encounters a PG_RAM page it will pass it to us even though
1291  * it may be valid and dirty.  We cannot overwrite the page in this case!
1292  * The case is used to allow us to issue pure read-aheads.
1293  *
1294  * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1295  *       the PG_RAM page is validated at the same time as mreq.  What we
1296  *	 really need to do is issue a separate read-ahead pbuf.
1297  *
1298  * No requirements.
1299  */
1300 static int
1301 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
1302 {
1303 	struct buf *bp;
1304 	struct bio *bio;
1305 	vm_page_t mreq;
1306 	vm_page_t m;
1307 	vm_offset_t kva;
1308 	swblk_t blk;
1309 	int i;
1310 	int j;
1311 	int raonly;
1312 	int error;
1313 	u_int32_t busy_count;
1314 	vm_page_t marray[XIO_INTERNAL_PAGES];
1315 
1316 	mreq = *mpp;
1317 
1318 	vm_object_hold(object);
1319 	if (mreq->object != object) {
1320 		panic("swap_pager_getpages: object mismatch %p/%p",
1321 		    object,
1322 		    mreq->object
1323 		);
1324 	}
1325 
1326 	/*
1327 	 * We don't want to overwrite a fully valid page as it might be
1328 	 * dirty.  This case can occur when e.g. vm_fault hits a perfectly
1329 	 * valid page with PG_RAM set.
1330 	 *
1331 	 * In this case we see if the next page is a suitable page-in
1332 	 * candidate and if it is we issue read-ahead.  PG_RAM will be
1333 	 * set on the last page of the read-ahead to continue the pipeline.
1334 	 */
1335 	if (mreq->valid == VM_PAGE_BITS_ALL) {
1336 		if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) {
1337 			vm_object_drop(object);
1338 			return(VM_PAGER_OK);
1339 		}
1340 		blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0);
1341 		if (blk == SWAPBLK_NONE) {
1342 			vm_object_drop(object);
1343 			return(VM_PAGER_OK);
1344 		}
1345 		m = vm_page_lookup_busy_try(object, mreq->pindex + 1,
1346 					    TRUE, &error);
1347 		if (error) {
1348 			vm_object_drop(object);
1349 			return(VM_PAGER_OK);
1350 		} else if (m == NULL) {
1351 			/*
1352 			 * Use VM_ALLOC_QUICK to avoid blocking on cache
1353 			 * page reuse.
1354 			 */
1355 			m = vm_page_alloc(object, mreq->pindex + 1,
1356 					  VM_ALLOC_QUICK);
1357 			if (m == NULL) {
1358 				vm_object_drop(object);
1359 				return(VM_PAGER_OK);
1360 			}
1361 		} else {
1362 			if (m->valid) {
1363 				vm_page_wakeup(m);
1364 				vm_object_drop(object);
1365 				return(VM_PAGER_OK);
1366 			}
1367 			vm_page_unqueue_nowakeup(m);
1368 		}
1369 		/* page is busy */
1370 		mreq = m;
1371 		raonly = 1;
1372 	} else {
1373 		raonly = 0;
1374 	}
1375 
1376 	/*
1377 	 * Try to block-read contiguous pages from swap if sequential,
1378 	 * otherwise just read one page.  Contiguous pages from swap must
1379 	 * reside within a single device stripe because the I/O cannot be
1380 	 * broken up across multiple stripes.
1381 	 *
1382 	 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1383 	 * set up such that the case(s) are handled implicitly.
1384 	 */
1385 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1386 	marray[0] = mreq;
1387 
1388 	for (i = 1; i <= swap_burst_read &&
1389 		    i < XIO_INTERNAL_PAGES &&
1390 		    mreq->pindex + i < object->size; ++i) {
1391 		swblk_t iblk;
1392 
1393 		iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0);
1394 		if (iblk != blk + i)
1395 			break;
1396 		if ((blk ^ iblk) & ~SWB_DMMASK)
1397 			break;
1398 		m = vm_page_lookup_busy_try(object, mreq->pindex + i,
1399 					    TRUE, &error);
1400 		if (error) {
1401 			break;
1402 		} else if (m == NULL) {
1403 			/*
1404 			 * Use VM_ALLOC_QUICK to avoid blocking on cache
1405 			 * page reuse.
1406 			 */
1407 			m = vm_page_alloc(object, mreq->pindex + i,
1408 					  VM_ALLOC_QUICK);
1409 			if (m == NULL)
1410 				break;
1411 		} else {
1412 			if (m->valid) {
1413 				vm_page_wakeup(m);
1414 				break;
1415 			}
1416 			vm_page_unqueue_nowakeup(m);
1417 		}
1418 		/* page is busy */
1419 		marray[i] = m;
1420 	}
1421 	if (i > 1)
1422 		vm_page_flag_set(marray[i - 1], PG_RAM);
1423 
1424 	/*
1425 	 * If mreq is the requested page and we have nothing to do return
1426 	 * VM_PAGER_FAIL.  If raonly is set mreq is just another read-ahead
1427 	 * page and must be cleaned up.
1428 	 */
1429 	if (blk == SWAPBLK_NONE) {
1430 		KKASSERT(i == 1);
1431 		if (raonly) {
1432 			vnode_pager_freepage(mreq);
1433 			vm_object_drop(object);
1434 			return(VM_PAGER_OK);
1435 		} else {
1436 			vm_object_drop(object);
1437 			return(VM_PAGER_FAIL);
1438 		}
1439 	}
1440 
1441 	/*
1442 	 * Map our page(s) into kva for input
1443 	 *
1444 	 * Use the KVABIO API to avoid synchronizing the pmap.
1445 	 */
1446 	bp = getpbuf_kva(&nsw_rcount);
1447 	bio = &bp->b_bio1;
1448 	kva = (vm_offset_t) bp->b_kvabase;
1449 	bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t));
1450 	pmap_qenter_noinval(kva, bp->b_xio.xio_pages, i);
1451 
1452 	bp->b_data = (caddr_t)kva;
1453 	bp->b_bcount = PAGE_SIZE * i;
1454 	bp->b_xio.xio_npages = i;
1455 	bp->b_flags |= B_KVABIO;
1456 	bio->bio_done = swp_pager_async_iodone;
1457 	bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1458 	bio->bio_caller_info1.index = SWBIO_READ;
1459 
1460 	/*
1461 	 * Set index.  If raonly set the index beyond the array so all
1462 	 * the pages are treated the same, otherwise the original mreq is
1463 	 * at index 0.
1464 	 */
1465 	if (raonly)
1466 		bio->bio_driver_info = (void *)(intptr_t)i;
1467 	else
1468 		bio->bio_driver_info = (void *)(intptr_t)0;
1469 
1470 	for (j = 0; j < i; ++j) {
1471 		atomic_set_int(&bp->b_xio.xio_pages[j]->busy_count,
1472 			       PBUSY_SWAPINPROG);
1473 	}
1474 
1475 	mycpu->gd_cnt.v_swapin++;
1476 	mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1477 
1478 	/*
1479 	 * We still hold the lock on mreq, and our automatic completion routine
1480 	 * does not remove it.
1481 	 */
1482 	vm_object_pip_add(object, bp->b_xio.xio_npages);
1483 
1484 	/*
1485 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1486 	 * this point because we automatically release it on completion.
1487 	 * Instead, we look at the one page we are interested in which we
1488 	 * still hold a lock on even through the I/O completion.
1489 	 *
1490 	 * The other pages in our m[] array are also released on completion,
1491 	 * so we cannot assume they are valid anymore either.
1492 	 */
1493 	bp->b_cmd = BUF_CMD_READ;
1494 	BUF_KERNPROC(bp);
1495 	vn_strategy(swapdev_vp, bio);
1496 
1497 	/*
1498 	 * Wait for the page we want to complete.  PBUSY_SWAPINPROG is always
1499 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1500 	 * is set in the meta-data.
1501 	 *
1502 	 * If this is a read-ahead only we return immediately without
1503 	 * waiting for I/O.
1504 	 */
1505 	if (raonly) {
1506 		vm_object_drop(object);
1507 		return(VM_PAGER_OK);
1508 	}
1509 
1510 	/*
1511 	 * Read-ahead includes originally requested page case.
1512 	 */
1513 	for (;;) {
1514 		busy_count = mreq->busy_count;
1515 		cpu_ccfence();
1516 		if ((busy_count & PBUSY_SWAPINPROG) == 0)
1517 			break;
1518 		tsleep_interlock(mreq, 0);
1519 		if (!atomic_cmpset_int(&mreq->busy_count, busy_count,
1520 				       busy_count |
1521 				        PBUSY_SWAPINPROG | PBUSY_WANTED)) {
1522 			continue;
1523 		}
1524 		atomic_set_int(&mreq->flags, PG_REFERENCED);
1525 		mycpu->gd_cnt.v_intrans++;
1526 		if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) {
1527 			kprintf(
1528 			    "swap_pager: indefinite wait buffer: "
1529 			    " bp %p offset: %lld, size: %ld "
1530 			    " m=%p busy=%08x flags=%08x\n",
1531 			    bp,
1532 			    (long long)bio->bio_offset,
1533 			    (long)bp->b_bcount,
1534 			    mreq, mreq->busy_count, mreq->flags);
1535 		}
1536 	}
1537 
1538 	/*
1539 	 * Disallow speculative reads prior to the SWAPINPROG test.
1540 	 */
1541 	cpu_lfence();
1542 
1543 	/*
1544 	 * mreq is left busied after completion, but all the other pages
1545 	 * are freed.  If we had an unrecoverable read error the page will
1546 	 * not be valid.
1547 	 */
1548 	vm_object_drop(object);
1549 	if (mreq->valid != VM_PAGE_BITS_ALL)
1550 		return(VM_PAGER_ERROR);
1551 	else
1552 		return(VM_PAGER_OK);
1553 
1554 	/*
1555 	 * A final note: in a low swap situation, we cannot deallocate swap
1556 	 * and mark a page dirty here because the caller is likely to mark
1557 	 * the page clean when we return, causing the page to possibly revert
1558 	 * to all-zero's later.
1559 	 */
1560 }
1561 
1562 /*
1563  *	swap_pager_putpages:
1564  *
1565  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1566  *
1567  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1568  *	are automatically converted to SWAP objects.
1569  *
1570  *	In a low memory situation we may block in vn_strategy(), but the new
1571  *	vm_page reservation system coupled with properly written VFS devices
1572  *	should ensure that no low-memory deadlock occurs.  This is an area
1573  *	which needs work.
1574  *
1575  *	The parent has N vm_object_pip_add() references prior to
1576  *	calling us and will remove references for rtvals[] that are
1577  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1578  *	completion.
1579  *
1580  *	The parent has soft-busy'd the pages it passes us and will unbusy
1581  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1582  *	We need to unbusy the rest on I/O completion.
1583  *
1584  * No requirements.
1585  */
1586 void
1587 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1588 		    int flags, int *rtvals)
1589 {
1590 	int i;
1591 	int n = 0;
1592 
1593 	vm_object_hold(object);
1594 
1595 	if (count && m[0]->object != object) {
1596 		panic("swap_pager_getpages: object mismatch %p/%p",
1597 		    object,
1598 		    m[0]->object
1599 		);
1600 	}
1601 
1602 	/*
1603 	 * Step 1
1604 	 *
1605 	 * Turn object into OBJT_SWAP
1606 	 * Check for bogus sysops
1607 	 *
1608 	 * Force sync if not pageout process, we don't want any single
1609 	 * non-pageout process to be able to hog the I/O subsystem!  This
1610 	 * can be overridden by setting.
1611 	 */
1612 	if (object->type == OBJT_DEFAULT) {
1613 		if (object->type == OBJT_DEFAULT)
1614 			swp_pager_meta_convert(object);
1615 	}
1616 
1617 	/*
1618 	 * Normally we force synchronous swap I/O if this is not the
1619 	 * pageout daemon to prevent any single user process limited
1620 	 * via RLIMIT_RSS from hogging swap write bandwidth.
1621 	 */
1622 	if (curthread != pagethread &&
1623 	    curthread != emergpager &&
1624 	    swap_user_async == 0) {
1625 		flags |= OBJPC_SYNC;
1626 	}
1627 
1628 	/*
1629 	 * Step 2
1630 	 *
1631 	 * Update nsw parameters from swap_async_max sysctl values.
1632 	 * Do not let the sysop crash the machine with bogus numbers.
1633 	 */
1634 	if (swap_async_max != nsw_wcount_async_max) {
1635 		int n;
1636 
1637 		/*
1638 		 * limit range
1639 		 */
1640 		if ((n = swap_async_max) > nswbuf_kva / 2)
1641 			n = nswbuf_kva / 2;
1642 		if (n < 1)
1643 			n = 1;
1644 		swap_async_max = n;
1645 
1646 		/*
1647 		 * Adjust difference ( if possible ).  If the current async
1648 		 * count is too low, we may not be able to make the adjustment
1649 		 * at this time.
1650 		 *
1651 		 * vm_token needed for nsw_wcount sleep interlock
1652 		 */
1653 		lwkt_gettoken(&vm_token);
1654 		n -= nsw_wcount_async_max;
1655 		if (nsw_wcount_async + n >= 0) {
1656 			nsw_wcount_async_max += n;
1657 			pbuf_adjcount(&nsw_wcount_async, n);
1658 		}
1659 		lwkt_reltoken(&vm_token);
1660 	}
1661 
1662 	/*
1663 	 * Step 3
1664 	 *
1665 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1666 	 * The page is left dirty until the pageout operation completes
1667 	 * successfully.
1668 	 */
1669 
1670 	for (i = 0; i < count; i += n) {
1671 		struct buf *bp;
1672 		struct bio *bio;
1673 		swblk_t blk;
1674 		int j;
1675 
1676 		/*
1677 		 * Maximum I/O size is limited by a number of factors.
1678 		 */
1679 
1680 		n = min(BLIST_MAX_ALLOC, count - i);
1681 		n = min(n, nsw_cluster_max);
1682 
1683 		lwkt_gettoken(&vm_token);
1684 
1685 		/*
1686 		 * Get biggest block of swap we can.  If we fail, fall
1687 		 * back and try to allocate a smaller block.  Don't go
1688 		 * overboard trying to allocate space if it would overly
1689 		 * fragment swap.
1690 		 */
1691 		while (
1692 		    (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE &&
1693 		    n > 4
1694 		) {
1695 			n >>= 1;
1696 		}
1697 		if (blk == SWAPBLK_NONE) {
1698 			for (j = 0; j < n; ++j)
1699 				rtvals[i+j] = VM_PAGER_FAIL;
1700 			lwkt_reltoken(&vm_token);
1701 			continue;
1702 		}
1703 		if (vm_report_swap_allocs > 0) {
1704 			kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n);
1705 			--vm_report_swap_allocs;
1706 		}
1707 
1708 		/*
1709 		 * The I/O we are constructing cannot cross a physical
1710 		 * disk boundry in the swap stripe.
1711 		 */
1712 		if ((blk ^ (blk + n)) & ~SWB_DMMASK) {
1713 			j = ((blk + SWB_DMMAX) & ~SWB_DMMASK) - blk;
1714 			swp_pager_freeswapspace(object, blk + j, n - j);
1715 			n = j;
1716 		}
1717 
1718 		/*
1719 		 * All I/O parameters have been satisfied, build the I/O
1720 		 * request and assign the swap space.
1721 		 *
1722 		 * Use the KVABIO API to avoid synchronizing the pmap.
1723 		 */
1724 		if ((flags & OBJPC_SYNC))
1725 			bp = getpbuf_kva(&nsw_wcount_sync);
1726 		else
1727 			bp = getpbuf_kva(&nsw_wcount_async);
1728 		bio = &bp->b_bio1;
1729 
1730 		lwkt_reltoken(&vm_token);
1731 
1732 		pmap_qenter_noinval((vm_offset_t)bp->b_data, &m[i], n);
1733 
1734 		bp->b_flags |= B_KVABIO;
1735 		bp->b_bcount = PAGE_SIZE * n;
1736 		bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1737 
1738 		for (j = 0; j < n; ++j) {
1739 			vm_page_t mreq = m[i+j];
1740 
1741 			swp_pager_meta_build(mreq->object, mreq->pindex,
1742 					     blk + j);
1743 			if (object->type == OBJT_SWAP)
1744 				vm_page_dirty(mreq);
1745 			rtvals[i+j] = VM_PAGER_OK;
1746 
1747 			atomic_set_int(&mreq->busy_count, PBUSY_SWAPINPROG);
1748 			bp->b_xio.xio_pages[j] = mreq;
1749 		}
1750 		bp->b_xio.xio_npages = n;
1751 
1752 		mycpu->gd_cnt.v_swapout++;
1753 		mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1754 
1755 		bp->b_dirtyoff = 0;		/* req'd for NFS */
1756 		bp->b_dirtyend = bp->b_bcount;	/* req'd for NFS */
1757 		bp->b_cmd = BUF_CMD_WRITE;
1758 		bio->bio_caller_info1.index = SWBIO_WRITE;
1759 
1760 		/*
1761 		 * asynchronous
1762 		 */
1763 		if ((flags & OBJPC_SYNC) == 0) {
1764 			bio->bio_done = swp_pager_async_iodone;
1765 			BUF_KERNPROC(bp);
1766 			vn_strategy(swapdev_vp, bio);
1767 
1768 			for (j = 0; j < n; ++j)
1769 				rtvals[i+j] = VM_PAGER_PEND;
1770 			continue;
1771 		}
1772 
1773 		/*
1774 		 * Issue synchrnously.
1775 		 *
1776 		 * Wait for the sync I/O to complete, then update rtvals.
1777 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1778 		 * our async completion routine at the end, thus avoiding a
1779 		 * double-free.
1780 		 */
1781 		bio->bio_caller_info1.index |= SWBIO_SYNC;
1782 		if (flags & OBJPC_TRY_TO_CACHE)
1783 			bio->bio_caller_info1.index |= SWBIO_TTC;
1784 		bio->bio_done = biodone_sync;
1785 		bio->bio_flags |= BIO_SYNC;
1786 		vn_strategy(swapdev_vp, bio);
1787 		biowait(bio, "swwrt");
1788 
1789 		for (j = 0; j < n; ++j)
1790 			rtvals[i+j] = VM_PAGER_PEND;
1791 
1792 		/*
1793 		 * Now that we are through with the bp, we can call the
1794 		 * normal async completion, which frees everything up.
1795 		 */
1796 		swp_pager_async_iodone(bio);
1797 	}
1798 	vm_object_drop(object);
1799 }
1800 
1801 /*
1802  * No requirements.
1803  *
1804  * Recalculate the low and high-water marks.
1805  */
1806 void
1807 swap_pager_newswap(void)
1808 {
1809 	/*
1810 	 * NOTE: vm_swap_max cannot exceed 1 billion blocks, which is the
1811 	 *	 limitation imposed by the blist code.  Remember that this
1812 	 *	 will be divided by NSWAP_MAX (4), so each swap device is
1813 	 *	 limited to around a terrabyte.
1814 	 */
1815 	if (vm_swap_max) {
1816 		nswap_lowat = (int64_t)vm_swap_max * 4 / 100;	/* 4% left */
1817 		nswap_hiwat = (int64_t)vm_swap_max * 6 / 100;	/* 6% left */
1818 		kprintf("swap low/high-water marks set to %d/%d\n",
1819 			nswap_lowat, nswap_hiwat);
1820 	} else {
1821 		nswap_lowat = 128;
1822 		nswap_hiwat = 512;
1823 	}
1824 	swp_sizecheck();
1825 }
1826 
1827 /*
1828  *	swp_pager_async_iodone:
1829  *
1830  *	Completion routine for asynchronous reads and writes from/to swap.
1831  *	Also called manually by synchronous code to finish up a bp.
1832  *
1833  *	For READ operations, the pages are BUSY'd.  For WRITE operations,
1834  *	the pages are vm_page_t->busy'd.  For READ operations, we BUSY
1835  *	unbusy all pages except the 'main' request page.  For WRITE
1836  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1837  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1838  *
1839  *	This routine may not block.
1840  *
1841  * No requirements.
1842  */
1843 static void
1844 swp_pager_async_iodone(struct bio *bio)
1845 {
1846 	struct buf *bp = bio->bio_buf;
1847 	vm_object_t object = NULL;
1848 	int i;
1849 	int *nswptr;
1850 
1851 	/*
1852 	 * report error
1853 	 */
1854 	if (bp->b_flags & B_ERROR) {
1855 		kprintf(
1856 		    "swap_pager: I/O error - %s failed; offset %lld,"
1857 			"size %ld, error %d\n",
1858 		    ((bio->bio_caller_info1.index & SWBIO_READ) ?
1859 			"pagein" : "pageout"),
1860 		    (long long)bio->bio_offset,
1861 		    (long)bp->b_bcount,
1862 		    bp->b_error
1863 		);
1864 	}
1865 
1866 	/*
1867 	 * set object.
1868 	 */
1869 	if (bp->b_xio.xio_npages)
1870 		object = bp->b_xio.xio_pages[0]->object;
1871 
1872 #if 0
1873 	/* PMAP TESTING CODE (useful, keep it in but #if 0'd) */
1874 	if (bio->bio_caller_info1.index & SWBIO_WRITE) {
1875 		if (bio->bio_crc != iscsi_crc32(bp->b_data, bp->b_bcount)) {
1876 			kprintf("SWAPOUT: BADCRC %08x %08x\n",
1877 				bio->bio_crc,
1878 				iscsi_crc32(bp->b_data, bp->b_bcount));
1879 			for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1880 				vm_page_t m = bp->b_xio.xio_pages[i];
1881 				if ((m->flags & PG_WRITEABLE) &&
1882 				    (pmap_mapped_sync(m) & PG_WRITEABLE)) {
1883 					kprintf("SWAPOUT: "
1884 						"%d/%d %p writable\n",
1885 						i, bp->b_xio.xio_npages, m);
1886 				}
1887 			}
1888 		}
1889 	}
1890 #endif
1891 
1892 	/*
1893 	 * remove the mapping for kernel virtual
1894 	 */
1895 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1896 
1897 	/*
1898 	 * cleanup pages.  If an error occurs writing to swap, we are in
1899 	 * very serious trouble.  If it happens to be a disk error, though,
1900 	 * we may be able to recover by reassigning the swap later on.  So
1901 	 * in this case we remove the m->swapblk assignment for the page
1902 	 * but do not free it in the rlist.  The errornous block(s) are thus
1903 	 * never reallocated as swap.  Redirty the page and continue.
1904 	 */
1905 	for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1906 		vm_page_t m = bp->b_xio.xio_pages[i];
1907 
1908 		if (bp->b_flags & B_ERROR) {
1909 			/*
1910 			 * If an error occurs I'd love to throw the swapblk
1911 			 * away without freeing it back to swapspace, so it
1912 			 * can never be used again.  But I can't from an
1913 			 * interrupt.
1914 			 */
1915 
1916 			if (bio->bio_caller_info1.index & SWBIO_READ) {
1917 				/*
1918 				 * When reading, reqpage needs to stay
1919 				 * locked for the parent, but all other
1920 				 * pages can be freed.  We still want to
1921 				 * wakeup the parent waiting on the page,
1922 				 * though.  ( also: pg_reqpage can be -1 and
1923 				 * not match anything ).
1924 				 *
1925 				 * We have to wake specifically requested pages
1926 				 * up too because we cleared SWAPINPROG and
1927 				 * someone may be waiting for that.
1928 				 *
1929 				 * NOTE: For reads, m->dirty will probably
1930 				 *	 be overridden by the original caller
1931 				 *	 of getpages so don't play cute tricks
1932 				 *	 here.
1933 				 *
1934 				 * NOTE: We can't actually free the page from
1935 				 *	 here, because this is an interrupt.
1936 				 *	 It is not legal to mess with
1937 				 *	 object->memq from an interrupt.
1938 				 *	 Deactivate the page instead.
1939 				 *
1940 				 * WARNING! The instant SWAPINPROG is
1941 				 *	    cleared another cpu may start
1942 				 *	    using the mreq page (it will
1943 				 *	    check m->valid immediately).
1944 				 */
1945 
1946 				m->valid = 0;
1947 				atomic_clear_int(&m->busy_count,
1948 						 PBUSY_SWAPINPROG);
1949 
1950 				/*
1951 				 * bio_driver_info holds the requested page
1952 				 * index.
1953 				 */
1954 				if (i != (int)(intptr_t)bio->bio_driver_info) {
1955 					vm_page_deactivate(m);
1956 					vm_page_wakeup(m);
1957 				} else {
1958 					vm_page_flash(m);
1959 				}
1960 				/*
1961 				 * If i == bp->b_pager.pg_reqpage, do not wake
1962 				 * the page up.  The caller needs to.
1963 				 */
1964 			} else {
1965 				/*
1966 				 * If a write error occurs remove the swap
1967 				 * assignment (note that PG_SWAPPED may or
1968 				 * may not be set depending on prior activity).
1969 				 *
1970 				 * Re-dirty OBJT_SWAP pages as there is no
1971 				 * other backing store, we can't throw the
1972 				 * page away.
1973 				 *
1974 				 * Non-OBJT_SWAP pages (aka swapcache) must
1975 				 * not be dirtied since they may not have
1976 				 * been dirty in the first place, and they
1977 				 * do have backing store (the vnode).
1978 				 */
1979 				vm_page_busy_wait(m, FALSE, "swadpg");
1980 				vm_object_hold(m->object);
1981 				swp_pager_meta_ctl(m->object, m->pindex,
1982 						   SWM_FREE);
1983 				vm_page_flag_clear(m, PG_SWAPPED);
1984 				vm_object_drop(m->object);
1985 				if (m->object->type == OBJT_SWAP) {
1986 					vm_page_dirty(m);
1987 					vm_page_activate(m);
1988 				}
1989 				vm_page_io_finish(m);
1990 				atomic_clear_int(&m->busy_count,
1991 						 PBUSY_SWAPINPROG);
1992 				vm_page_wakeup(m);
1993 			}
1994 		} else if (bio->bio_caller_info1.index & SWBIO_READ) {
1995 			/*
1996 			 * NOTE: for reads, m->dirty will probably be
1997 			 * overridden by the original caller of getpages so
1998 			 * we cannot set them in order to free the underlying
1999 			 * swap in a low-swap situation.  I don't think we'd
2000 			 * want to do that anyway, but it was an optimization
2001 			 * that existed in the old swapper for a time before
2002 			 * it got ripped out due to precisely this problem.
2003 			 *
2004 			 * If not the requested page then deactivate it.
2005 			 *
2006 			 * Note that the requested page, reqpage, is left
2007 			 * busied, but we still have to wake it up.  The
2008 			 * other pages are released (unbusied) by
2009 			 * vm_page_wakeup().  We do not set reqpage's
2010 			 * valid bits here, it is up to the caller.
2011 			 */
2012 
2013 			/*
2014 			 * NOTE: Can't call pmap_clear_modify(m) from an
2015 			 *	 interrupt thread, the pmap code may have to
2016 			 *	 map non-kernel pmaps and currently asserts
2017 			 *	 the case.
2018 			 *
2019 			 * WARNING! The instant SWAPINPROG is
2020 			 *	    cleared another cpu may start
2021 			 *	    using the mreq page (it will
2022 			 *	    check m->valid immediately).
2023 			 */
2024 			/*pmap_clear_modify(m);*/
2025 			m->valid = VM_PAGE_BITS_ALL;
2026 			vm_page_undirty(m);
2027 			vm_page_flag_set(m, PG_SWAPPED);
2028 			atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG);
2029 
2030 			/*
2031 			 * We have to wake specifically requested pages
2032 			 * up too because we cleared SWAPINPROG and
2033 			 * could be waiting for it in getpages.  However,
2034 			 * be sure to not unbusy getpages specifically
2035 			 * requested page - getpages expects it to be
2036 			 * left busy.
2037 			 *
2038 			 * bio_driver_info holds the requested page
2039 			 */
2040 			if (i != (int)(intptr_t)bio->bio_driver_info) {
2041 				vm_page_deactivate(m);
2042 				vm_page_wakeup(m);
2043 			} else {
2044 				vm_page_flash(m);
2045 			}
2046 		} else {
2047 			/*
2048 			 * Mark the page clean but do not mess with the
2049 			 * pmap-layer's modified state.  That state should
2050 			 * also be clear since the caller protected the
2051 			 * page VM_PROT_READ, but allow the case.
2052 			 *
2053 			 * We are in an interrupt, avoid pmap operations.
2054 			 *
2055 			 * If we have a severe page deficit, deactivate the
2056 			 * page.  Do not try to cache it (which would also
2057 			 * involve a pmap op), because the page might still
2058 			 * be read-heavy.
2059 			 *
2060 			 * When using the swap to cache clean vnode pages
2061 			 * we do not mess with the page dirty bits.
2062 			 *
2063 			 * NOTE! Nobody is waiting for the key mreq page
2064 			 *	 on write completion.
2065 			 */
2066 			vm_page_busy_wait(m, FALSE, "swadpg");
2067 			if (m->object->type == OBJT_SWAP)
2068 				vm_page_undirty(m);
2069 			vm_page_flag_set(m, PG_SWAPPED);
2070 			atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG);
2071 			if (vm_page_count_severe())
2072 				vm_page_deactivate(m);
2073 			vm_page_io_finish(m);
2074 			if (bio->bio_caller_info1.index & SWBIO_TTC)
2075 				vm_page_try_to_cache(m);
2076 			else
2077 				vm_page_wakeup(m);
2078 		}
2079 	}
2080 
2081 	/*
2082 	 * adjust pip.  NOTE: the original parent may still have its own
2083 	 * pip refs on the object.
2084 	 */
2085 
2086 	if (object)
2087 		vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages);
2088 
2089 	/*
2090 	 * Release the physical I/O buffer.
2091 	 *
2092 	 * NOTE: Due to synchronous operations in the write case b_cmd may
2093 	 *	 already be set to BUF_CMD_DONE and BIO_SYNC may have already
2094 	 *	 been cleared.
2095 	 *
2096 	 * Use vm_token to interlock nsw_rcount/wcount wakeup?
2097 	 */
2098 	lwkt_gettoken(&vm_token);
2099 	if (bio->bio_caller_info1.index & SWBIO_READ)
2100 		nswptr = &nsw_rcount;
2101 	else if (bio->bio_caller_info1.index & SWBIO_SYNC)
2102 		nswptr = &nsw_wcount_sync;
2103 	else
2104 		nswptr = &nsw_wcount_async;
2105 	bp->b_cmd = BUF_CMD_DONE;
2106 	relpbuf(bp, nswptr);
2107 	lwkt_reltoken(&vm_token);
2108 }
2109 
2110 /*
2111  * Fault-in a potentially swapped page and remove the swap reference.
2112  * (used by swapoff code)
2113  *
2114  * object must be held.
2115  */
2116 static __inline void
2117 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex)
2118 {
2119 	struct vnode *vp;
2120 	vm_page_t m;
2121 	int error;
2122 
2123 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2124 
2125 	if (object->type == OBJT_VNODE) {
2126 		/*
2127 		 * Any swap related to a vnode is due to swapcache.  We must
2128 		 * vget() the vnode in case it is not active (otherwise
2129 		 * vref() will panic).  Calling vm_object_page_remove() will
2130 		 * ensure that any swap ref is removed interlocked with the
2131 		 * page.  clean_only is set to TRUE so we don't throw away
2132 		 * dirty pages.
2133 		 */
2134 		vp = object->handle;
2135 		error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
2136 		if (error == 0) {
2137 			vm_object_page_remove(object, pindex, pindex + 1, TRUE);
2138 			vput(vp);
2139 		}
2140 	} else {
2141 		/*
2142 		 * Otherwise it is a normal OBJT_SWAP object and we can
2143 		 * fault the page in and remove the swap.
2144 		 */
2145 		m = vm_fault_object_page(object, IDX_TO_OFF(pindex),
2146 					 VM_PROT_NONE,
2147 					 VM_FAULT_DIRTY | VM_FAULT_UNSWAP,
2148 					 sharedp, &error);
2149 		if (m)
2150 			vm_page_unhold(m);
2151 	}
2152 }
2153 
2154 /*
2155  * This removes all swap blocks related to a particular device.  We have
2156  * to be careful of ripups during the scan.
2157  */
2158 static int swp_pager_swapoff_callback(struct swblock *swap, void *data);
2159 
2160 int
2161 swap_pager_swapoff(int devidx)
2162 {
2163 	struct vm_object_hash *hash;
2164 	struct swswapoffinfo info;
2165 	struct vm_object marker;
2166 	vm_object_t object;
2167 	int n;
2168 
2169 	bzero(&marker, sizeof(marker));
2170 	marker.type = OBJT_MARKER;
2171 
2172 	for (n = 0; n < VMOBJ_HSIZE; ++n) {
2173 		hash = &vm_object_hash[n];
2174 
2175 		lwkt_gettoken(&hash->token);
2176 		TAILQ_INSERT_HEAD(&hash->list, &marker, object_entry);
2177 
2178 		while ((object = TAILQ_NEXT(&marker, object_entry)) != NULL) {
2179 			if (object->type == OBJT_MARKER)
2180 				goto skip;
2181 			if (object->type != OBJT_SWAP &&
2182 			    object->type != OBJT_VNODE)
2183 				goto skip;
2184 			vm_object_hold(object);
2185 			if (object->type != OBJT_SWAP &&
2186 			    object->type != OBJT_VNODE) {
2187 				vm_object_drop(object);
2188 				goto skip;
2189 			}
2190 
2191 			/*
2192 			 * Object is special in that we can't just pagein
2193 			 * into vm_page's in it (tmpfs, vn).
2194 			 */
2195 			if ((object->flags & OBJ_NOPAGEIN) &&
2196 			    RB_ROOT(&object->swblock_root)) {
2197 				vm_object_drop(object);
2198 				goto skip;
2199 			}
2200 
2201 			info.object = object;
2202 			info.shared = 0;
2203 			info.devidx = devidx;
2204 			swblock_rb_tree_RB_SCAN(&object->swblock_root,
2205 					    NULL, swp_pager_swapoff_callback,
2206 					    &info);
2207 			vm_object_drop(object);
2208 skip:
2209 			if (object == TAILQ_NEXT(&marker, object_entry)) {
2210 				TAILQ_REMOVE(&hash->list, &marker,
2211 					     object_entry);
2212 				TAILQ_INSERT_AFTER(&hash->list, object,
2213 						   &marker, object_entry);
2214 			}
2215 		}
2216 		TAILQ_REMOVE(&hash->list, &marker, object_entry);
2217 		lwkt_reltoken(&hash->token);
2218 	}
2219 
2220 	/*
2221 	 * If we fail to locate all swblocks we just fail gracefully and
2222 	 * do not bother to restore paging on the swap device.  If the
2223 	 * user wants to retry the user can retry.
2224 	 */
2225 	if (swdevt[devidx].sw_nused)
2226 		return (1);
2227 	else
2228 		return (0);
2229 }
2230 
2231 static
2232 int
2233 swp_pager_swapoff_callback(struct swblock *swap, void *data)
2234 {
2235 	struct swswapoffinfo *info = data;
2236 	vm_object_t object = info->object;
2237 	vm_pindex_t index;
2238 	swblk_t v;
2239 	int i;
2240 
2241 	index = swap->swb_index;
2242 	for (i = 0; i < SWAP_META_PAGES; ++i) {
2243 		/*
2244 		 * Make sure we don't race a dying object.  This will
2245 		 * kill the scan of the object's swap blocks entirely.
2246 		 */
2247 		if (object->flags & OBJ_DEAD)
2248 			return(-1);
2249 
2250 		/*
2251 		 * Fault the page, which can obviously block.  If the swap
2252 		 * structure disappears break out.
2253 		 */
2254 		v = swap->swb_pages[i];
2255 		if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) {
2256 			swp_pager_fault_page(object, &info->shared,
2257 					     swap->swb_index + i);
2258 			/* swap ptr might go away */
2259 			if (RB_LOOKUP(swblock_rb_tree,
2260 				      &object->swblock_root, index) != swap) {
2261 				break;
2262 			}
2263 		}
2264 	}
2265 	return(0);
2266 }
2267 
2268 /************************************************************************
2269  *				SWAP META DATA 				*
2270  ************************************************************************
2271  *
2272  *	These routines manipulate the swap metadata stored in the
2273  *	OBJT_SWAP object.
2274  *
2275  *	Swap metadata is implemented with a global hash and not directly
2276  *	linked into the object.  Instead the object simply contains
2277  *	appropriate tracking counters.
2278  */
2279 
2280 /*
2281  * Lookup the swblock containing the specified swap block index.
2282  *
2283  * The caller must hold the object.
2284  */
2285 static __inline
2286 struct swblock *
2287 swp_pager_lookup(vm_object_t object, vm_pindex_t index)
2288 {
2289 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2290 	index &= ~(vm_pindex_t)SWAP_META_MASK;
2291 	return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index));
2292 }
2293 
2294 /*
2295  * Remove a swblock from the RB tree.
2296  *
2297  * The caller must hold the object.
2298  */
2299 static __inline
2300 void
2301 swp_pager_remove(vm_object_t object, struct swblock *swap)
2302 {
2303 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2304 	RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap);
2305 }
2306 
2307 /*
2308  * Convert default object to swap object if necessary
2309  *
2310  * The caller must hold the object.
2311  */
2312 static void
2313 swp_pager_meta_convert(vm_object_t object)
2314 {
2315 	if (object->type == OBJT_DEFAULT) {
2316 		object->type = OBJT_SWAP;
2317 		KKASSERT(object->swblock_count == 0);
2318 	}
2319 }
2320 
2321 /*
2322  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
2323  *
2324  *	We first convert the object to a swap object if it is a default
2325  *	object.  Vnode objects do not need to be converted.
2326  *
2327  *	The specified swapblk is added to the object's swap metadata.  If
2328  *	the swapblk is not valid, it is freed instead.  Any previously
2329  *	assigned swapblk is freed.
2330  *
2331  * The caller must hold the object.
2332  */
2333 static void
2334 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk)
2335 {
2336 	struct swblock *swap;
2337 	struct swblock *oswap;
2338 	vm_pindex_t v;
2339 
2340 	KKASSERT(swapblk != SWAPBLK_NONE);
2341 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2342 
2343 	/*
2344 	 * Convert object if necessary
2345 	 */
2346 	if (object->type == OBJT_DEFAULT)
2347 		swp_pager_meta_convert(object);
2348 
2349 	/*
2350 	 * Locate swblock.  If not found create, but if we aren't adding
2351 	 * anything just return.  If we run out of space in the map we wait
2352 	 * and, since the hash table may have changed, retry.
2353 	 */
2354 retry:
2355 	swap = swp_pager_lookup(object, index);
2356 
2357 	if (swap == NULL) {
2358 		int i;
2359 
2360 		swap = zalloc(swap_zone);
2361 		if (swap == NULL) {
2362 			vm_wait(0);
2363 			goto retry;
2364 		}
2365 		swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK;
2366 		swap->swb_count = 0;
2367 
2368 		++object->swblock_count;
2369 
2370 		for (i = 0; i < SWAP_META_PAGES; ++i)
2371 			swap->swb_pages[i] = SWAPBLK_NONE;
2372 		oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap);
2373 		KKASSERT(oswap == NULL);
2374 	}
2375 
2376 	/*
2377 	 * Delete prior contents of metadata.
2378 	 *
2379 	 * NOTE: Decrement swb_count after the freeing operation (which
2380 	 *	 might block) to prevent racing destruction of the swblock.
2381 	 */
2382 	index &= SWAP_META_MASK;
2383 
2384 	while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) {
2385 		swap->swb_pages[index] = SWAPBLK_NONE;
2386 		/* can block */
2387 		swp_pager_freeswapspace(object, v, 1);
2388 		--swap->swb_count;
2389 		--mycpu->gd_vmtotal.t_vm;
2390 	}
2391 
2392 	/*
2393 	 * Enter block into metadata
2394 	 */
2395 	swap->swb_pages[index] = swapblk;
2396 	if (swapblk != SWAPBLK_NONE) {
2397 		++swap->swb_count;
2398 		++mycpu->gd_vmtotal.t_vm;
2399 	}
2400 }
2401 
2402 /*
2403  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2404  *
2405  *	The requested range of blocks is freed, with any associated swap
2406  *	returned to the swap bitmap.
2407  *
2408  *	This routine will free swap metadata structures as they are cleaned
2409  *	out.  This routine does *NOT* operate on swap metadata associated
2410  *	with resident pages.
2411  *
2412  * The caller must hold the object.
2413  */
2414 static int swp_pager_meta_free_callback(struct swblock *swb, void *data);
2415 
2416 static void
2417 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count)
2418 {
2419 	struct swfreeinfo info;
2420 
2421 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2422 
2423 	/*
2424 	 * Nothing to do
2425 	 */
2426 	if (object->swblock_count == 0) {
2427 		KKASSERT(RB_EMPTY(&object->swblock_root));
2428 		return;
2429 	}
2430 	if (count == 0)
2431 		return;
2432 
2433 	/*
2434 	 * Setup for RB tree scan.  Note that the pindex range can be huge
2435 	 * due to the 64 bit page index space so we cannot safely iterate.
2436 	 */
2437 	info.object = object;
2438 	info.basei = index & ~(vm_pindex_t)SWAP_META_MASK;
2439 	info.begi = index;
2440 	info.endi = index + count - 1;
2441 	swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp,
2442 				swp_pager_meta_free_callback, &info);
2443 }
2444 
2445 /*
2446  * The caller must hold the object.
2447  */
2448 static
2449 int
2450 swp_pager_meta_free_callback(struct swblock *swap, void *data)
2451 {
2452 	struct swfreeinfo *info = data;
2453 	vm_object_t object = info->object;
2454 	int index;
2455 	int eindex;
2456 
2457 	/*
2458 	 * Figure out the range within the swblock.  The wider scan may
2459 	 * return edge-case swap blocks when the start and/or end points
2460 	 * are in the middle of a block.
2461 	 */
2462 	if (swap->swb_index < info->begi)
2463 		index = (int)info->begi & SWAP_META_MASK;
2464 	else
2465 		index = 0;
2466 
2467 	if (swap->swb_index + SWAP_META_PAGES > info->endi)
2468 		eindex = (int)info->endi & SWAP_META_MASK;
2469 	else
2470 		eindex = SWAP_META_MASK;
2471 
2472 	/*
2473 	 * Scan and free the blocks.  The loop terminates early
2474 	 * if (swap) runs out of blocks and could be freed.
2475 	 *
2476 	 * NOTE: Decrement swb_count after swp_pager_freeswapspace()
2477 	 *	 to deal with a zfree race.
2478 	 */
2479 	while (index <= eindex) {
2480 		swblk_t v = swap->swb_pages[index];
2481 
2482 		if (v != SWAPBLK_NONE) {
2483 			swap->swb_pages[index] = SWAPBLK_NONE;
2484 			/* can block */
2485 			swp_pager_freeswapspace(object, v, 1);
2486 			--mycpu->gd_vmtotal.t_vm;
2487 			if (--swap->swb_count == 0) {
2488 				swp_pager_remove(object, swap);
2489 				zfree(swap_zone, swap);
2490 				--object->swblock_count;
2491 				break;
2492 			}
2493 		}
2494 		++index;
2495 	}
2496 
2497 	/* swap may be invalid here due to zfree above */
2498 	lwkt_yield();
2499 
2500 	return(0);
2501 }
2502 
2503 /*
2504  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2505  *
2506  *	This routine locates and destroys all swap metadata associated with
2507  *	an object.
2508  *
2509  * NOTE: Decrement swb_count after the freeing operation (which
2510  *	 might block) to prevent racing destruction of the swblock.
2511  *
2512  * The caller must hold the object.
2513  */
2514 static void
2515 swp_pager_meta_free_all(vm_object_t object)
2516 {
2517 	struct swblock *swap;
2518 	int i;
2519 
2520 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2521 
2522 	while ((swap = RB_ROOT(&object->swblock_root)) != NULL) {
2523 		swp_pager_remove(object, swap);
2524 		for (i = 0; i < SWAP_META_PAGES; ++i) {
2525 			swblk_t v = swap->swb_pages[i];
2526 			if (v != SWAPBLK_NONE) {
2527 				/* can block */
2528 				swp_pager_freeswapspace(object, v, 1);
2529 				--swap->swb_count;
2530 				--mycpu->gd_vmtotal.t_vm;
2531 			}
2532 		}
2533 		if (swap->swb_count != 0)
2534 			panic("swap_pager_meta_free_all: swb_count != 0");
2535 		zfree(swap_zone, swap);
2536 		--object->swblock_count;
2537 		lwkt_yield();
2538 	}
2539 	KKASSERT(object->swblock_count == 0);
2540 }
2541 
2542 /*
2543  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
2544  *
2545  *	This routine is capable of looking up, popping, or freeing
2546  *	swapblk assignments in the swap meta data or in the vm_page_t.
2547  *	The routine typically returns the swapblk being looked-up, or popped,
2548  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
2549  *	was invalid.  This routine will automatically free any invalid
2550  *	meta-data swapblks.
2551  *
2552  *	It is not possible to store invalid swapblks in the swap meta data
2553  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2554  *
2555  *	When acting on a busy resident page and paging is in progress, we
2556  *	have to wait until paging is complete but otherwise can act on the
2557  *	busy page.
2558  *
2559  *	SWM_FREE	remove and free swap block from metadata
2560  *	SWM_POP		remove from meta data but do not free.. pop it out
2561  *
2562  * The caller must hold the object.
2563  */
2564 static swblk_t
2565 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags)
2566 {
2567 	struct swblock *swap;
2568 	swblk_t r1;
2569 
2570 	if (object->swblock_count == 0)
2571 		return(SWAPBLK_NONE);
2572 
2573 	r1 = SWAPBLK_NONE;
2574 	swap = swp_pager_lookup(object, index);
2575 
2576 	if (swap != NULL) {
2577 		index &= SWAP_META_MASK;
2578 		r1 = swap->swb_pages[index];
2579 
2580 		if (r1 != SWAPBLK_NONE) {
2581 			if (flags & (SWM_FREE|SWM_POP)) {
2582 				swap->swb_pages[index] = SWAPBLK_NONE;
2583 				--mycpu->gd_vmtotal.t_vm;
2584 				if (--swap->swb_count == 0) {
2585 					swp_pager_remove(object, swap);
2586 					zfree(swap_zone, swap);
2587 					--object->swblock_count;
2588 				}
2589 			}
2590 			/* swap ptr may be invalid */
2591 			if (flags & SWM_FREE) {
2592 				swp_pager_freeswapspace(object, r1, 1);
2593 				r1 = SWAPBLK_NONE;
2594 			}
2595 		}
2596 		/* swap ptr may be invalid */
2597 	}
2598 	return(r1);
2599 }
2600