xref: /dragonfly/sys/vm/swap_pager.c (revision 956939d5)
1 /*
2  * Copyright (c) 1998,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1994 John S. Dyson
35  * Copyright (c) 1990 University of Utah.
36  * Copyright (c) 1991, 1993
37  *	The Regents of the University of California.  All rights reserved.
38  *
39  * This code is derived from software contributed to Berkeley by
40  * the Systems Programming Group of the University of Utah Computer
41  * Science Department.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by the University of
54  *	California, Berkeley and its contributors.
55  * 4. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *				New Swap System
72  *				Matthew Dillon
73  *
74  * Radix Bitmap 'blists'.
75  *
76  *	- The new swapper uses the new radix bitmap code.  This should scale
77  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
78  *	  arbitrary degree of fragmentation.
79  *
80  * Features:
81  *
82  *	- on the fly reallocation of swap during putpages.  The new system
83  *	  does not try to keep previously allocated swap blocks for dirty
84  *	  pages.
85  *
86  *	- on the fly deallocation of swap
87  *
88  *	- No more garbage collection required.  Unnecessarily allocated swap
89  *	  blocks only exist for dirty vm_page_t's now and these are already
90  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
91  *	  removal of invalidated swap blocks when a page is destroyed
92  *	  or renamed.
93  *
94  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
95  *
96  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
97  *
98  * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
99  * $DragonFly: src/sys/vm/swap_pager.c,v 1.32 2008/07/01 02:02:56 dillon Exp $
100  */
101 
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/conf.h>
105 #include <sys/kernel.h>
106 #include <sys/proc.h>
107 #include <sys/buf.h>
108 #include <sys/vnode.h>
109 #include <sys/malloc.h>
110 #include <sys/vmmeter.h>
111 #include <sys/sysctl.h>
112 #include <sys/blist.h>
113 #include <sys/lock.h>
114 #include <sys/thread2.h>
115 
116 #ifndef MAX_PAGEOUT_CLUSTER
117 #define MAX_PAGEOUT_CLUSTER 16
118 #endif
119 
120 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
121 
122 #include "opt_swap.h"
123 #include <vm/vm.h>
124 #include <vm/vm_object.h>
125 #include <vm/vm_page.h>
126 #include <vm/vm_pager.h>
127 #include <vm/vm_pageout.h>
128 #include <vm/swap_pager.h>
129 #include <vm/vm_extern.h>
130 #include <vm/vm_zone.h>
131 
132 #include <sys/buf2.h>
133 #include <vm/vm_page2.h>
134 
135 #define SWM_FREE	0x02	/* free, period			*/
136 #define SWM_POP		0x04	/* pop out			*/
137 
138 #define SWBIO_READ	0x01
139 #define SWBIO_WRITE	0x02
140 #define SWBIO_SYNC	0x04
141 
142 /*
143  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
144  * in the old system.
145  */
146 
147 extern int vm_swap_size;	/* number of free swap blocks, in pages */
148 
149 int swap_pager_full;		/* swap space exhaustion (task killing) */
150 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
151 static int nsw_rcount;		/* free read buffers			*/
152 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
153 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
154 static int nsw_wcount_async_max;/* assigned maximum			*/
155 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
156 static int sw_alloc_interlock;	/* swap pager allocation interlock	*/
157 
158 struct blist *swapblist;
159 static struct swblock **swhash;
160 static int swhash_mask;
161 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
162 
163 extern struct vnode *swapdev_vp;	/* from vm_swap.c */
164 
165 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
166         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
167 
168 /*
169  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
170  * of searching a named list by hashing it just a little.
171  */
172 
173 #define NOBJLISTS		8
174 
175 #define NOBJLIST(handle)	\
176 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
177 
178 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
179 struct pagerlst		swap_pager_un_object_list;
180 vm_zone_t		swap_zone;
181 
182 /*
183  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
184  * calls hooked from other parts of the VM system and do not appear here.
185  * (see vm/swap_pager.h).
186  */
187 
188 static vm_object_t
189 		swap_pager_alloc (void *handle, off_t size,
190 				  vm_prot_t prot, off_t offset);
191 static void	swap_pager_dealloc (vm_object_t object);
192 static int	swap_pager_getpages (vm_object_t, vm_page_t *, int, int);
193 static void	swap_pager_init (void);
194 static void	swap_pager_unswapped (vm_page_t);
195 static void	swap_pager_strategy (vm_object_t, struct bio *);
196 static void	swap_chain_iodone(struct bio *biox);
197 
198 struct pagerops swappagerops = {
199 	swap_pager_init,	/* early system initialization of pager	*/
200 	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
201 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
202 	swap_pager_getpages,	/* pagein				*/
203 	swap_pager_putpages,	/* pageout				*/
204 	swap_pager_haspage,	/* get backing store status for page	*/
205 	swap_pager_unswapped,	/* remove swap related to page		*/
206 	swap_pager_strategy	/* pager strategy call			*/
207 };
208 
209 /*
210  * dmmax is in page-sized chunks with the new swap system.  It was
211  * dev-bsized chunks in the old.  dmmax is always a power of 2.
212  *
213  * swap_*() routines are externally accessible.  swp_*() routines are
214  * internal.
215  */
216 
217 int dmmax;
218 static int dmmax_mask;
219 int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
220 int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
221 
222 static __inline void	swp_sizecheck (void);
223 static void	swp_pager_async_iodone (struct bio *bio);
224 
225 /*
226  * Swap bitmap functions
227  */
228 
229 static __inline void	swp_pager_freeswapspace (daddr_t blk, int npages);
230 static __inline daddr_t	swp_pager_getswapspace (int npages);
231 
232 /*
233  * Metadata functions
234  */
235 
236 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t);
237 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t);
238 static void swp_pager_meta_free_all (vm_object_t);
239 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int);
240 
241 /*
242  * SWP_SIZECHECK() -	update swap_pager_full indication
243  *
244  *	update the swap_pager_almost_full indication and warn when we are
245  *	about to run out of swap space, using lowat/hiwat hysteresis.
246  *
247  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
248  *
249  *	No restrictions on call
250  *	This routine may not block.
251  *	This routine must be called at splvm()
252  */
253 
254 static __inline void
255 swp_sizecheck(void)
256 {
257 	if (vm_swap_size < nswap_lowat) {
258 		if (swap_pager_almost_full == 0) {
259 			kprintf("swap_pager: out of swap space\n");
260 			swap_pager_almost_full = 1;
261 		}
262 	} else {
263 		swap_pager_full = 0;
264 		if (vm_swap_size > nswap_hiwat)
265 			swap_pager_almost_full = 0;
266 	}
267 }
268 
269 /*
270  * SWAP_PAGER_INIT() -	initialize the swap pager!
271  *
272  *	Expected to be started from system init.  NOTE:  This code is run
273  *	before much else so be careful what you depend on.  Most of the VM
274  *	system has yet to be initialized at this point.
275  */
276 
277 static void
278 swap_pager_init(void)
279 {
280 	/*
281 	 * Initialize object lists
282 	 */
283 	int i;
284 
285 	for (i = 0; i < NOBJLISTS; ++i)
286 		TAILQ_INIT(&swap_pager_object_list[i]);
287 	TAILQ_INIT(&swap_pager_un_object_list);
288 
289 	/*
290 	 * Device Stripe, in PAGE_SIZE'd blocks
291 	 */
292 
293 	dmmax = SWB_NPAGES * 2;
294 	dmmax_mask = ~(dmmax - 1);
295 }
296 
297 /*
298  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
299  *
300  *	Expected to be started from pageout process once, prior to entering
301  *	its main loop.
302  */
303 
304 void
305 swap_pager_swap_init(void)
306 {
307 	int n, n2;
308 
309 	/*
310 	 * Number of in-transit swap bp operations.  Don't
311 	 * exhaust the pbufs completely.  Make sure we
312 	 * initialize workable values (0 will work for hysteresis
313 	 * but it isn't very efficient).
314 	 *
315 	 * The nsw_cluster_max is constrained by the number of pages an XIO
316 	 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
317 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
318 	 * constrained by the swap device interleave stripe size.
319 	 *
320 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
321 	 * designed to prevent other I/O from having high latencies due to
322 	 * our pageout I/O.  The value 4 works well for one or two active swap
323 	 * devices but is probably a little low if you have more.  Even so,
324 	 * a higher value would probably generate only a limited improvement
325 	 * with three or four active swap devices since the system does not
326 	 * typically have to pageout at extreme bandwidths.   We will want
327 	 * at least 2 per swap devices, and 4 is a pretty good value if you
328 	 * have one NFS swap device due to the command/ack latency over NFS.
329 	 * So it all works out pretty well.
330 	 */
331 
332 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
333 
334 	nsw_rcount = (nswbuf + 1) / 2;
335 	nsw_wcount_sync = (nswbuf + 3) / 4;
336 	nsw_wcount_async = 4;
337 	nsw_wcount_async_max = nsw_wcount_async;
338 
339 	/*
340 	 * The zone is dynamically allocated so generally size it to
341 	 * maxswzone (32MB to 512MB of KVM).  Set a minimum size based
342 	 * on physical memory of around 8x (each swblock can hold 16 pages).
343 	 *
344 	 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
345 	 * has increased dramatically.
346 	 */
347 	n = vmstats.v_page_count / 2;
348 	if (maxswzone && n < maxswzone / sizeof(struct swblock))
349 		n = maxswzone / sizeof(struct swblock);
350 	n2 = n;
351 
352 	do {
353 		swap_zone = zinit(
354 			"SWAPMETA",
355 			sizeof(struct swblock),
356 			n,
357 			ZONE_INTERRUPT,
358 			1);
359 		if (swap_zone != NULL)
360 			break;
361 		/*
362 		 * if the allocation failed, try a zone two thirds the
363 		 * size of the previous attempt.
364 		 */
365 		n -= ((n + 2) / 3);
366 	} while (n > 0);
367 
368 	if (swap_zone == NULL)
369 		panic("swap_pager_swap_init: swap_zone == NULL");
370 	if (n2 != n)
371 		kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
372 	n2 = n;
373 
374 	/*
375 	 * Initialize our meta-data hash table.  The swapper does not need to
376 	 * be quite as efficient as the VM system, so we do not use an
377 	 * oversized hash table.
378 	 *
379 	 * 	n: 		size of hash table, must be power of 2
380 	 *	swhash_mask:	hash table index mask
381 	 */
382 
383 	for (n = 1; n < n2 / 8; n *= 2)
384 		;
385 
386 	swhash = kmalloc(sizeof(struct swblock *) * n, M_VMPGDATA,
387 	    M_WAITOK | M_ZERO);
388 
389 	swhash_mask = n - 1;
390 }
391 
392 /*
393  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
394  *			its metadata structures.
395  *
396  *	This routine is called from the mmap and fork code to create a new
397  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
398  *	and then converting it with swp_pager_meta_build().
399  *
400  *	This routine may block in vm_object_allocate() and create a named
401  *	object lookup race, so we must interlock.   We must also run at
402  *	splvm() for the object lookup to handle races with interrupts, but
403  *	we do not have to maintain splvm() in between the lookup and the
404  *	add because (I believe) it is not possible to attempt to create
405  *	a new swap object w/handle when a default object with that handle
406  *	already exists.
407  */
408 
409 static vm_object_t
410 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
411 {
412 	vm_object_t object;
413 
414 	if (handle) {
415 		/*
416 		 * Reference existing named region or allocate new one.  There
417 		 * should not be a race here against swp_pager_meta_build()
418 		 * as called from vm_page_remove() in regards to the lookup
419 		 * of the handle.
420 		 */
421 
422 		while (sw_alloc_interlock) {
423 			sw_alloc_interlock = -1;
424 			tsleep(&sw_alloc_interlock, 0, "swpalc", 0);
425 		}
426 		sw_alloc_interlock = 1;
427 
428 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
429 
430 		if (object != NULL) {
431 			vm_object_reference(object);
432 		} else {
433 			object = vm_object_allocate(OBJT_DEFAULT,
434 				OFF_TO_IDX(offset + PAGE_MASK + size));
435 			object->handle = handle;
436 
437 			swp_pager_meta_build(object, 0, SWAPBLK_NONE);
438 		}
439 
440 		if (sw_alloc_interlock < 0)
441 			wakeup(&sw_alloc_interlock);
442 
443 		sw_alloc_interlock = 0;
444 	} else {
445 		object = vm_object_allocate(OBJT_DEFAULT,
446 			OFF_TO_IDX(offset + PAGE_MASK + size));
447 
448 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
449 	}
450 
451 	return (object);
452 }
453 
454 /*
455  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
456  *
457  *	The swap backing for the object is destroyed.  The code is
458  *	designed such that we can reinstantiate it later, but this
459  *	routine is typically called only when the entire object is
460  *	about to be destroyed.
461  *
462  *	This routine may block, but no longer does.
463  *
464  *	The object must be locked or unreferenceable.
465  */
466 
467 static void
468 swap_pager_dealloc(vm_object_t object)
469 {
470 	/*
471 	 * Remove from list right away so lookups will fail if we block for
472 	 * pageout completion.
473 	 */
474 
475 	if (object->handle == NULL) {
476 		TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
477 	} else {
478 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
479 	}
480 
481 	vm_object_pip_wait(object, "swpdea");
482 
483 	/*
484 	 * Free all remaining metadata.  We only bother to free it from
485 	 * the swap meta data.  We do not attempt to free swapblk's still
486 	 * associated with vm_page_t's for this object.  We do not care
487 	 * if paging is still in progress on some objects.
488 	 */
489 	crit_enter();
490 	swp_pager_meta_free_all(object);
491 	crit_exit();
492 }
493 
494 /************************************************************************
495  *			SWAP PAGER BITMAP ROUTINES			*
496  ************************************************************************/
497 
498 /*
499  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
500  *
501  *	Allocate swap for the requested number of pages.  The starting
502  *	swap block number (a page index) is returned or SWAPBLK_NONE
503  *	if the allocation failed.
504  *
505  *	Also has the side effect of advising that somebody made a mistake
506  *	when they configured swap and didn't configure enough.
507  *
508  *	Must be called at splvm() to avoid races with bitmap frees from
509  *	vm_page_remove() aka swap_pager_page_removed().
510  *
511  *	This routine may not block
512  *	This routine must be called at splvm().
513  */
514 
515 static __inline daddr_t
516 swp_pager_getswapspace(int npages)
517 {
518 	daddr_t blk;
519 
520 	if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
521 		if (swap_pager_full != 2) {
522 			kprintf("swap_pager_getswapspace: failed\n");
523 			swap_pager_full = 2;
524 			swap_pager_almost_full = 1;
525 		}
526 	} else {
527 		vm_swap_size -= npages;
528 		swp_sizecheck();
529 	}
530 	return(blk);
531 }
532 
533 /*
534  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
535  *
536  *	This routine returns the specified swap blocks back to the bitmap.
537  *
538  *	Note:  This routine may not block (it could in the old swap code),
539  *	and through the use of the new blist routines it does not block.
540  *
541  *	We must be called at splvm() to avoid races with bitmap frees from
542  *	vm_page_remove() aka swap_pager_page_removed().
543  *
544  *	This routine may not block
545  *	This routine must be called at splvm().
546  */
547 
548 static __inline void
549 swp_pager_freeswapspace(daddr_t blk, int npages)
550 {
551 	blist_free(swapblist, blk, npages);
552 	vm_swap_size += npages;
553 	swp_sizecheck();
554 }
555 
556 /*
557  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
558  *				range within an object.
559  *
560  *	This is a globally accessible routine.
561  *
562  *	This routine removes swapblk assignments from swap metadata.
563  *
564  *	The external callers of this routine typically have already destroyed
565  *	or renamed vm_page_t's associated with this range in the object so
566  *	we should be ok.
567  *
568  *	This routine may be called at any spl.  We up our spl to splvm temporarily
569  *	in order to perform the metadata removal.
570  */
571 
572 void
573 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
574 {
575 	crit_enter();
576 	swp_pager_meta_free(object, start, size);
577 	crit_exit();
578 }
579 
580 /*
581  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
582  *
583  *	Assigns swap blocks to the specified range within the object.  The
584  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
585  *
586  *	Returns 0 on success, -1 on failure.
587  */
588 
589 int
590 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
591 {
592 	int n = 0;
593 	daddr_t blk = SWAPBLK_NONE;
594 	vm_pindex_t beg = start;	/* save start index */
595 
596 	crit_enter();
597 	while (size) {
598 		if (n == 0) {
599 			n = BLIST_MAX_ALLOC;
600 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
601 				n >>= 1;
602 				if (n == 0) {
603 					swp_pager_meta_free(object, beg, start - beg);
604 					crit_exit();
605 					return(-1);
606 				}
607 			}
608 		}
609 		swp_pager_meta_build(object, start, blk);
610 		--size;
611 		++start;
612 		++blk;
613 		--n;
614 	}
615 	swp_pager_meta_free(object, start, n);
616 	crit_exit();
617 	return(0);
618 }
619 
620 /*
621  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
622  *			and destroy the source.
623  *
624  *	Copy any valid swapblks from the source to the destination.  In
625  *	cases where both the source and destination have a valid swapblk,
626  *	we keep the destination's.
627  *
628  *	This routine is allowed to block.  It may block allocating metadata
629  *	indirectly through swp_pager_meta_build() or if paging is still in
630  *	progress on the source.
631  *
632  *	This routine can be called at any spl
633  *
634  *	XXX vm_page_collapse() kinda expects us not to block because we
635  *	supposedly do not need to allocate memory, but for the moment we
636  *	*may* have to get a little memory from the zone allocator, but
637  *	it is taken from the interrupt memory.  We should be ok.
638  *
639  *	The source object contains no vm_page_t's (which is just as well)
640  *
641  *	The source object is of type OBJT_SWAP.
642  *
643  *	The source and destination objects must be locked or
644  *	inaccessible (XXX are they ?)
645  */
646 
647 void
648 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
649     vm_pindex_t offset, int destroysource)
650 {
651 	vm_pindex_t i;
652 
653 	crit_enter();
654 
655 	/*
656 	 * If destroysource is set, we remove the source object from the
657 	 * swap_pager internal queue now.
658 	 */
659 
660 	if (destroysource) {
661 		if (srcobject->handle == NULL) {
662 			TAILQ_REMOVE(
663 			    &swap_pager_un_object_list,
664 			    srcobject,
665 			    pager_object_list
666 			);
667 		} else {
668 			TAILQ_REMOVE(
669 			    NOBJLIST(srcobject->handle),
670 			    srcobject,
671 			    pager_object_list
672 			);
673 		}
674 	}
675 
676 	/*
677 	 * transfer source to destination.
678 	 */
679 
680 	for (i = 0; i < dstobject->size; ++i) {
681 		daddr_t dstaddr;
682 
683 		/*
684 		 * Locate (without changing) the swapblk on the destination,
685 		 * unless it is invalid in which case free it silently, or
686 		 * if the destination is a resident page, in which case the
687 		 * source is thrown away.
688 		 */
689 
690 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
691 
692 		if (dstaddr == SWAPBLK_NONE) {
693 			/*
694 			 * Destination has no swapblk and is not resident,
695 			 * copy source.
696 			 */
697 			daddr_t srcaddr;
698 
699 			srcaddr = swp_pager_meta_ctl(
700 			    srcobject,
701 			    i + offset,
702 			    SWM_POP
703 			);
704 
705 			if (srcaddr != SWAPBLK_NONE)
706 				swp_pager_meta_build(dstobject, i, srcaddr);
707 		} else {
708 			/*
709 			 * Destination has valid swapblk or it is represented
710 			 * by a resident page.  We destroy the sourceblock.
711 			 */
712 
713 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
714 		}
715 	}
716 
717 	/*
718 	 * Free left over swap blocks in source.
719 	 *
720 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
721 	 * double-remove the object from the swap queues.
722 	 */
723 
724 	if (destroysource) {
725 		swp_pager_meta_free_all(srcobject);
726 		/*
727 		 * Reverting the type is not necessary, the caller is going
728 		 * to destroy srcobject directly, but I'm doing it here
729 		 * for consistency since we've removed the object from its
730 		 * queues.
731 		 */
732 		srcobject->type = OBJT_DEFAULT;
733 	}
734 	crit_exit();
735 }
736 
737 /*
738  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
739  *				the requested page.
740  *
741  *	We determine whether good backing store exists for the requested
742  *	page and return TRUE if it does, FALSE if it doesn't.
743  *
744  *	If TRUE, we also try to determine how much valid, contiguous backing
745  *	store exists before and after the requested page within a reasonable
746  *	distance.  We do not try to restrict it to the swap device stripe
747  *	(that is handled in getpages/putpages).  It probably isn't worth
748  *	doing here.
749  */
750 
751 boolean_t
752 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
753     int *after)
754 {
755 	daddr_t blk0;
756 
757 	/*
758 	 * do we have good backing store at the requested index ?
759 	 */
760 
761 	crit_enter();
762 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
763 
764 	if (blk0 == SWAPBLK_NONE) {
765 		crit_exit();
766 		if (before)
767 			*before = 0;
768 		if (after)
769 			*after = 0;
770 		return (FALSE);
771 	}
772 
773 	/*
774 	 * find backwards-looking contiguous good backing store
775 	 */
776 
777 	if (before != NULL) {
778 		int i;
779 
780 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
781 			daddr_t blk;
782 
783 			if (i > pindex)
784 				break;
785 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
786 			if (blk != blk0 - i)
787 				break;
788 		}
789 		*before = (i - 1);
790 	}
791 
792 	/*
793 	 * find forward-looking contiguous good backing store
794 	 */
795 
796 	if (after != NULL) {
797 		int i;
798 
799 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
800 			daddr_t blk;
801 
802 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
803 			if (blk != blk0 + i)
804 				break;
805 		}
806 		*after = (i - 1);
807 	}
808 	crit_exit();
809 	return (TRUE);
810 }
811 
812 /*
813  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
814  *
815  *	This removes any associated swap backing store, whether valid or
816  *	not, from the page.
817  *
818  *	This routine is typically called when a page is made dirty, at
819  *	which point any associated swap can be freed.  MADV_FREE also
820  *	calls us in a special-case situation
821  *
822  *	NOTE!!!  If the page is clean and the swap was valid, the caller
823  *	should make the page dirty before calling this routine.  This routine
824  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
825  *	depends on it.
826  *
827  *	This routine may not block
828  *	This routine must be called at splvm()
829  */
830 
831 static void
832 swap_pager_unswapped(vm_page_t m)
833 {
834 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
835 }
836 
837 /*
838  * SWAP_PAGER_STRATEGY() - read, write, free blocks
839  *
840  *	This implements the vm_pager_strategy() interface to swap and allows
841  *	other parts of the system to directly access swap as backing store
842  *	through vm_objects of type OBJT_SWAP.  This is intended to be a
843  *	cacheless interface ( i.e. caching occurs at higher levels ).
844  *	Therefore we do not maintain any resident pages.  All I/O goes
845  *	directly to and from the swap device.
846  *
847  *	We currently attempt to run I/O synchronously or asynchronously as
848  *	the caller requests.  This isn't perfect because we loose error
849  *	sequencing when we run multiple ops in parallel to satisfy a request.
850  *	But this is swap, so we let it all hang out.
851  */
852 
853 static void
854 swap_pager_strategy(vm_object_t object, struct bio *bio)
855 {
856 	struct buf *bp = bio->bio_buf;
857 	struct bio *nbio;
858 	vm_pindex_t start;
859 	vm_pindex_t biox_blkno = 0;
860 	int count;
861 	char *data;
862 	struct bio *biox;
863 	struct buf *bufx;
864 	struct bio_track *track;
865 
866 	/*
867 	 * tracking for swapdev vnode I/Os
868 	 */
869 	if (bp->b_cmd == BUF_CMD_READ)
870 		track = &swapdev_vp->v_track_read;
871 	else
872 		track = &swapdev_vp->v_track_write;
873 
874 	if (bp->b_bcount & PAGE_MASK) {
875 		bp->b_error = EINVAL;
876 		bp->b_flags |= B_ERROR | B_INVAL;
877 		biodone(bio);
878 		kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
879 			"not page bounded\n",
880 			bp, (long long)bio->bio_offset, (int)bp->b_bcount);
881 		return;
882 	}
883 
884 	/*
885 	 * Clear error indication, initialize page index, count, data pointer.
886 	 */
887 	bp->b_error = 0;
888 	bp->b_flags &= ~B_ERROR;
889 	bp->b_resid = bp->b_bcount;
890 
891 	start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
892 	count = howmany(bp->b_bcount, PAGE_SIZE);
893 	data = bp->b_data;
894 
895 	/*
896 	 * Deal with BUF_CMD_FREEBLKS
897 	 */
898 	if (bp->b_cmd == BUF_CMD_FREEBLKS) {
899 		/*
900 		 * FREE PAGE(s) - destroy underlying swap that is no longer
901 		 *		  needed.
902 		 */
903 		swp_pager_meta_free(object, start, count);
904 		bp->b_resid = 0;
905 		biodone(bio);
906 		return;
907 	}
908 
909 	/*
910 	 * We need to be able to create a new cluster of I/O's.  We cannot
911 	 * use the caller fields of the passed bio so push a new one.
912 	 *
913 	 * Because nbio is just a placeholder for the cluster links,
914 	 * we can biodone() the original bio instead of nbio to make
915 	 * things a bit more efficient.
916 	 */
917 	nbio = push_bio(bio);
918 	nbio->bio_offset = bio->bio_offset;
919 	nbio->bio_caller_info1.cluster_head = NULL;
920 	nbio->bio_caller_info2.cluster_tail = NULL;
921 
922 	biox = NULL;
923 	bufx = NULL;
924 
925 	/*
926 	 * Execute read or write
927 	 */
928 	while (count > 0) {
929 		daddr_t blk;
930 
931 		/*
932 		 * Obtain block.  If block not found and writing, allocate a
933 		 * new block and build it into the object.
934 		 */
935 		blk = swp_pager_meta_ctl(object, start, 0);
936 		if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) {
937 			blk = swp_pager_getswapspace(1);
938 			if (blk == SWAPBLK_NONE) {
939 				bp->b_error = ENOMEM;
940 				bp->b_flags |= B_ERROR;
941 				break;
942 			}
943 			swp_pager_meta_build(object, start, blk);
944 		}
945 
946 		/*
947 		 * Do we have to flush our current collection?  Yes if:
948 		 *
949 		 *	- no swap block at this index
950 		 *	- swap block is not contiguous
951 		 *	- we cross a physical disk boundry in the
952 		 *	  stripe.
953 		 */
954 		if (
955 		    biox && (biox_blkno + btoc(bufx->b_bcount) != blk ||
956 		     ((biox_blkno ^ blk) & dmmax_mask)
957 		    )
958 		) {
959 			if (bp->b_cmd == BUF_CMD_READ) {
960 				++mycpu->gd_cnt.v_swapin;
961 				mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
962 			} else {
963 				++mycpu->gd_cnt.v_swapout;
964 				mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
965 				bufx->b_dirtyend = bufx->b_bcount;
966 			}
967 
968 			/*
969 			 * Finished with this buf.
970 			 */
971 			KKASSERT(bufx->b_bcount != 0);
972 			if (bufx->b_cmd != BUF_CMD_READ)
973 				bufx->b_dirtyend = bufx->b_bcount;
974 			biox = NULL;
975 			bufx = NULL;
976 		}
977 
978 		/*
979 		 * Add new swapblk to biox, instantiating biox if necessary.
980 		 * Zero-fill reads are able to take a shortcut.
981 		 */
982 		if (blk == SWAPBLK_NONE) {
983 			/*
984 			 * We can only get here if we are reading.  Since
985 			 * we are at splvm() we can safely modify b_resid,
986 			 * even if chain ops are in progress.
987 			 */
988 			bzero(data, PAGE_SIZE);
989 			bp->b_resid -= PAGE_SIZE;
990 		} else {
991 			if (biox == NULL) {
992 				/* XXX chain count > 4, wait to <= 4 */
993 
994 				bufx = getpbuf(NULL);
995 				biox = &bufx->b_bio1;
996 				cluster_append(nbio, bufx);
997 				bufx->b_flags |= (bufx->b_flags & B_ORDERED);
998 				bufx->b_cmd = bp->b_cmd;
999 				biox->bio_done = swap_chain_iodone;
1000 				biox->bio_offset = (off_t)blk << PAGE_SHIFT;
1001 				biox->bio_caller_info1.cluster_parent = nbio;
1002 				biox_blkno = blk;
1003 				bufx->b_bcount = 0;
1004 				bufx->b_data = data;
1005 			}
1006 			bufx->b_bcount += PAGE_SIZE;
1007 		}
1008 		--count;
1009 		++start;
1010 		data += PAGE_SIZE;
1011 	}
1012 
1013 	/*
1014 	 *  Flush out last buffer
1015 	 */
1016 	if (biox) {
1017 		if (bufx->b_cmd == BUF_CMD_READ) {
1018 			++mycpu->gd_cnt.v_swapin;
1019 			mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1020 		} else {
1021 			++mycpu->gd_cnt.v_swapout;
1022 			mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1023 			bufx->b_dirtyend = bufx->b_bcount;
1024 		}
1025 		KKASSERT(bufx->b_bcount);
1026 		if (bufx->b_cmd != BUF_CMD_READ)
1027 			bufx->b_dirtyend = bufx->b_bcount;
1028 		/* biox, bufx = NULL */
1029 	}
1030 
1031 	/*
1032 	 * Now initiate all the I/O.  Be careful looping on our chain as
1033 	 * I/O's may complete while we are still initiating them.
1034 	 */
1035 	nbio->bio_caller_info2.cluster_tail = NULL;
1036 	bufx = nbio->bio_caller_info1.cluster_head;
1037 
1038 	while (bufx) {
1039 		biox = &bufx->b_bio1;
1040 		BUF_KERNPROC(bufx);
1041 		bufx = bufx->b_cluster_next;
1042 		vn_strategy(swapdev_vp, biox);
1043 	}
1044 
1045 	/*
1046 	 * Completion of the cluster will also call biodone_chain(nbio).
1047 	 * We never call biodone(nbio) so we don't have to worry about
1048 	 * setting up a bio_done callback.  It's handled in the sub-IO.
1049 	 */
1050 	/**/
1051 }
1052 
1053 static void
1054 swap_chain_iodone(struct bio *biox)
1055 {
1056 	struct buf **nextp;
1057 	struct buf *bufx;	/* chained sub-buffer */
1058 	struct bio *nbio;	/* parent nbio with chain glue */
1059 	struct buf *bp;		/* original bp associated with nbio */
1060 	int chain_empty;
1061 
1062 	bufx = biox->bio_buf;
1063 	nbio = biox->bio_caller_info1.cluster_parent;
1064 	bp = nbio->bio_buf;
1065 
1066 	/*
1067 	 * Update the original buffer
1068 	 */
1069         KKASSERT(bp != NULL);
1070 	if (bufx->b_flags & B_ERROR) {
1071 		atomic_set_int(&bufx->b_flags, B_ERROR);
1072 		bp->b_error = bufx->b_error;
1073 	} else if (bufx->b_resid != 0) {
1074 		atomic_set_int(&bufx->b_flags, B_ERROR);
1075 		bp->b_error = EINVAL;
1076 	} else {
1077 		atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1078 	}
1079 
1080 	/*
1081 	 * Remove us from the chain.
1082 	 */
1083 	spin_lock_wr(&bp->b_lock.lk_spinlock);
1084 	nextp = &nbio->bio_caller_info1.cluster_head;
1085 	while (*nextp != bufx) {
1086 		KKASSERT(*nextp != NULL);
1087 		nextp = &(*nextp)->b_cluster_next;
1088 	}
1089 	*nextp = bufx->b_cluster_next;
1090 	chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1091 	spin_unlock_wr(&bp->b_lock.lk_spinlock);
1092 
1093 	/*
1094 	 * Clean up bufx.  If the chain is now empty we finish out
1095 	 * the parent.  Note that we may be racing other completions
1096 	 * so we must use the chain_empty status from above.
1097 	 */
1098 	if (chain_empty) {
1099 		if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1100 			atomic_set_int(&bp->b_flags, B_ERROR);
1101 			bp->b_error = EINVAL;
1102 		}
1103 		biodone_chain(nbio);
1104         }
1105         relpbuf(bufx, NULL);
1106 }
1107 
1108 /*
1109  * SWAP_PAGER_GETPAGES() - bring pages in from swap
1110  *
1111  *	Attempt to retrieve (m, count) pages from backing store, but make
1112  *	sure we retrieve at least m[reqpage].  We try to load in as large
1113  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
1114  *	belongs to the same object.
1115  *
1116  *	The code is designed for asynchronous operation and
1117  *	immediate-notification of 'reqpage' but tends not to be
1118  *	used that way.  Please do not optimize-out this algorithmic
1119  *	feature, I intend to improve on it in the future.
1120  *
1121  *	The parent has a single vm_object_pip_add() reference prior to
1122  *	calling us and we should return with the same.
1123  *
1124  *	The parent has BUSY'd the pages.  We should return with 'm'
1125  *	left busy, but the others adjusted.
1126  */
1127 
1128 static int
1129 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
1130 {
1131 	struct buf *bp;
1132 	struct bio *bio;
1133 	vm_page_t mreq;
1134 	int i;
1135 	int j;
1136 	daddr_t blk;
1137 	vm_offset_t kva;
1138 	vm_pindex_t lastpindex;
1139 
1140 	mreq = m[reqpage];
1141 
1142 	if (mreq->object != object) {
1143 		panic("swap_pager_getpages: object mismatch %p/%p",
1144 		    object,
1145 		    mreq->object
1146 		);
1147 	}
1148 
1149 	/*
1150 	 * Calculate range to retrieve.  The pages have already been assigned
1151 	 * their swapblks.  We require a *contiguous* range that falls entirely
1152 	 * within a single device stripe.   If we do not supply it, bad things
1153 	 * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1154 	 * loops are set up such that the case(s) are handled implicitly.
1155 	 *
1156 	 * The swp_*() calls must be made at splvm().  vm_page_free() does
1157 	 * not need to be, but it will go a little faster if it is.
1158 	 */
1159 	crit_enter();
1160 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1161 
1162 	for (i = reqpage - 1; i >= 0; --i) {
1163 		daddr_t iblk;
1164 
1165 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1166 		if (blk != iblk + (reqpage - i))
1167 			break;
1168 		if ((blk ^ iblk) & dmmax_mask)
1169 			break;
1170 	}
1171 	++i;
1172 
1173 	for (j = reqpage + 1; j < count; ++j) {
1174 		daddr_t jblk;
1175 
1176 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1177 		if (blk != jblk - (j - reqpage))
1178 			break;
1179 		if ((blk ^ jblk) & dmmax_mask)
1180 			break;
1181 	}
1182 
1183 	/*
1184 	 * free pages outside our collection range.   Note: we never free
1185 	 * mreq, it must remain busy throughout.
1186 	 */
1187 
1188 	{
1189 		int k;
1190 
1191 		for (k = 0; k < i; ++k)
1192 			vm_page_free(m[k]);
1193 		for (k = j; k < count; ++k)
1194 			vm_page_free(m[k]);
1195 	}
1196 	crit_exit();
1197 
1198 
1199 	/*
1200 	 * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq
1201 	 * still busy, but the others unbusied.
1202 	 */
1203 
1204 	if (blk == SWAPBLK_NONE)
1205 		return(VM_PAGER_FAIL);
1206 
1207 	/*
1208 	 * Get a swap buffer header to perform the IO
1209 	 */
1210 
1211 	bp = getpbuf(&nsw_rcount);
1212 	bio = &bp->b_bio1;
1213 	kva = (vm_offset_t) bp->b_data;
1214 
1215 	/*
1216 	 * map our page(s) into kva for input
1217 	 */
1218 
1219 	pmap_qenter(kva, m + i, j - i);
1220 
1221 	bp->b_data = (caddr_t) kva;
1222 	bp->b_bcount = PAGE_SIZE * (j - i);
1223 	bio->bio_done = swp_pager_async_iodone;
1224 	bio->bio_offset = (off_t)(blk - (reqpage - i)) << PAGE_SHIFT;
1225 	bio->bio_driver_info = (void *)(intptr_t)(reqpage - i);
1226 	bio->bio_caller_info1.index = SWBIO_READ;
1227 
1228 	{
1229 		int k;
1230 
1231 		for (k = i; k < j; ++k) {
1232 			bp->b_xio.xio_pages[k - i] = m[k];
1233 			vm_page_flag_set(m[k], PG_SWAPINPROG);
1234 		}
1235 	}
1236 	bp->b_xio.xio_npages = j - i;
1237 
1238 	mycpu->gd_cnt.v_swapin++;
1239 	mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1240 
1241 	/*
1242 	 * We still hold the lock on mreq, and our automatic completion routine
1243 	 * does not remove it.
1244 	 */
1245 
1246 	vm_object_pip_add(mreq->object, bp->b_xio.xio_npages);
1247 	lastpindex = m[j-1]->pindex;
1248 
1249 	/*
1250 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1251 	 * this point because we automatically release it on completion.
1252 	 * Instead, we look at the one page we are interested in which we
1253 	 * still hold a lock on even through the I/O completion.
1254 	 *
1255 	 * The other pages in our m[] array are also released on completion,
1256 	 * so we cannot assume they are valid anymore either.
1257 	 */
1258 
1259 	bp->b_cmd = BUF_CMD_READ;
1260 	BUF_KERNPROC(bp);
1261 	vn_strategy(swapdev_vp, bio);
1262 
1263 	/*
1264 	 * wait for the page we want to complete.  PG_SWAPINPROG is always
1265 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1266 	 * is set in the meta-data.
1267 	 */
1268 
1269 	crit_enter();
1270 
1271 	while ((mreq->flags & PG_SWAPINPROG) != 0) {
1272 		vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1273 		mycpu->gd_cnt.v_intrans++;
1274 		if (tsleep(mreq, 0, "swread", hz*20)) {
1275 			kprintf(
1276 			    "swap_pager: indefinite wait buffer: "
1277 				" offset: %lld, size: %ld\n",
1278 			    (long long)bio->bio_offset,
1279 			    (long)bp->b_bcount
1280 			);
1281 		}
1282 	}
1283 
1284 	crit_exit();
1285 
1286 	/*
1287 	 * mreq is left bussied after completion, but all the other pages
1288 	 * are freed.  If we had an unrecoverable read error the page will
1289 	 * not be valid.
1290 	 */
1291 
1292 	if (mreq->valid != VM_PAGE_BITS_ALL) {
1293 		return(VM_PAGER_ERROR);
1294 	} else {
1295 		return(VM_PAGER_OK);
1296 	}
1297 
1298 	/*
1299 	 * A final note: in a low swap situation, we cannot deallocate swap
1300 	 * and mark a page dirty here because the caller is likely to mark
1301 	 * the page clean when we return, causing the page to possibly revert
1302 	 * to all-zero's later.
1303 	 */
1304 }
1305 
1306 /*
1307  *	swap_pager_putpages:
1308  *
1309  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1310  *
1311  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1312  *	are automatically converted to SWAP objects.
1313  *
1314  *	In a low memory situation we may block in vn_strategy(), but the new
1315  *	vm_page reservation system coupled with properly written VFS devices
1316  *	should ensure that no low-memory deadlock occurs.  This is an area
1317  *	which needs work.
1318  *
1319  *	The parent has N vm_object_pip_add() references prior to
1320  *	calling us and will remove references for rtvals[] that are
1321  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1322  *	completion.
1323  *
1324  *	The parent has soft-busy'd the pages it passes us and will unbusy
1325  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1326  *	We need to unbusy the rest on I/O completion.
1327  */
1328 void
1329 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1330 		    boolean_t sync, int *rtvals)
1331 {
1332 	int i;
1333 	int n = 0;
1334 
1335 	if (count && m[0]->object != object) {
1336 		panic("swap_pager_getpages: object mismatch %p/%p",
1337 		    object,
1338 		    m[0]->object
1339 		);
1340 	}
1341 
1342 	/*
1343 	 * Step 1
1344 	 *
1345 	 * Turn object into OBJT_SWAP
1346 	 * check for bogus sysops
1347 	 * force sync if not pageout process
1348 	 */
1349 
1350 	if (object->type != OBJT_SWAP)
1351 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1352 
1353 	if (curthread != pagethread)
1354 		sync = TRUE;
1355 
1356 	/*
1357 	 * Step 2
1358 	 *
1359 	 * Update nsw parameters from swap_async_max sysctl values.
1360 	 * Do not let the sysop crash the machine with bogus numbers.
1361 	 */
1362 
1363 	if (swap_async_max != nsw_wcount_async_max) {
1364 		int n;
1365 
1366 		/*
1367 		 * limit range
1368 		 */
1369 		if ((n = swap_async_max) > nswbuf / 2)
1370 			n = nswbuf / 2;
1371 		if (n < 1)
1372 			n = 1;
1373 		swap_async_max = n;
1374 
1375 		/*
1376 		 * Adjust difference ( if possible ).  If the current async
1377 		 * count is too low, we may not be able to make the adjustment
1378 		 * at this time.
1379 		 */
1380 		crit_enter();
1381 		n -= nsw_wcount_async_max;
1382 		if (nsw_wcount_async + n >= 0) {
1383 			nsw_wcount_async += n;
1384 			nsw_wcount_async_max += n;
1385 			wakeup(&nsw_wcount_async);
1386 		}
1387 		crit_exit();
1388 	}
1389 
1390 	/*
1391 	 * Step 3
1392 	 *
1393 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1394 	 * The page is left dirty until the pageout operation completes
1395 	 * successfully.
1396 	 */
1397 
1398 	for (i = 0; i < count; i += n) {
1399 		struct buf *bp;
1400 		struct bio *bio;
1401 		daddr_t blk;
1402 		int j;
1403 
1404 		/*
1405 		 * Maximum I/O size is limited by a number of factors.
1406 		 */
1407 
1408 		n = min(BLIST_MAX_ALLOC, count - i);
1409 		n = min(n, nsw_cluster_max);
1410 
1411 		crit_enter();
1412 
1413 		/*
1414 		 * Get biggest block of swap we can.  If we fail, fall
1415 		 * back and try to allocate a smaller block.  Don't go
1416 		 * overboard trying to allocate space if it would overly
1417 		 * fragment swap.
1418 		 */
1419 		while (
1420 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1421 		    n > 4
1422 		) {
1423 			n >>= 1;
1424 		}
1425 		if (blk == SWAPBLK_NONE) {
1426 			for (j = 0; j < n; ++j)
1427 				rtvals[i+j] = VM_PAGER_FAIL;
1428 			crit_exit();
1429 			continue;
1430 		}
1431 
1432 		/*
1433 		 * The I/O we are constructing cannot cross a physical
1434 		 * disk boundry in the swap stripe.  Note: we are still
1435 		 * at splvm().
1436 		 */
1437 		if ((blk ^ (blk + n)) & dmmax_mask) {
1438 			j = ((blk + dmmax) & dmmax_mask) - blk;
1439 			swp_pager_freeswapspace(blk + j, n - j);
1440 			n = j;
1441 		}
1442 
1443 		/*
1444 		 * All I/O parameters have been satisfied, build the I/O
1445 		 * request and assign the swap space.
1446 		 */
1447 
1448 		if (sync == TRUE)
1449 			bp = getpbuf(&nsw_wcount_sync);
1450 		else
1451 			bp = getpbuf(&nsw_wcount_async);
1452 		bio = &bp->b_bio1;
1453 
1454 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1455 
1456 		bp->b_bcount = PAGE_SIZE * n;
1457 		bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1458 
1459 		for (j = 0; j < n; ++j) {
1460 			vm_page_t mreq = m[i+j];
1461 
1462 			swp_pager_meta_build(
1463 			    mreq->object,
1464 			    mreq->pindex,
1465 			    blk + j
1466 			);
1467 			vm_page_dirty(mreq);
1468 			rtvals[i+j] = VM_PAGER_OK;
1469 
1470 			vm_page_flag_set(mreq, PG_SWAPINPROG);
1471 			bp->b_xio.xio_pages[j] = mreq;
1472 		}
1473 		bp->b_xio.xio_npages = n;
1474 
1475 		mycpu->gd_cnt.v_swapout++;
1476 		mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1477 
1478 		crit_exit();
1479 
1480 		bp->b_dirtyoff = 0;		/* req'd for NFS */
1481 		bp->b_dirtyend = bp->b_bcount;	/* req'd for NFS */
1482 		bp->b_cmd = BUF_CMD_WRITE;
1483 		bio->bio_caller_info1.index = SWBIO_WRITE;
1484 
1485 		/*
1486 		 * asynchronous
1487 		 */
1488 		if (sync == FALSE) {
1489 			bio->bio_done = swp_pager_async_iodone;
1490 			BUF_KERNPROC(bp);
1491 			vn_strategy(swapdev_vp, bio);
1492 
1493 			for (j = 0; j < n; ++j)
1494 				rtvals[i+j] = VM_PAGER_PEND;
1495 			continue;
1496 		}
1497 
1498 		/*
1499 		 * Issue synchrnously.
1500 		 *
1501 		 * Wait for the sync I/O to complete, then update rtvals.
1502 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1503 		 * our async completion routine at the end, thus avoiding a
1504 		 * double-free.
1505 		 */
1506 		bio->bio_caller_info1.index |= SWBIO_SYNC;
1507 		bio->bio_done = biodone_sync;
1508 		bio->bio_flags |= BIO_SYNC;
1509 		vn_strategy(swapdev_vp, bio);
1510 		biowait(bio, "swwrt");
1511 
1512 		for (j = 0; j < n; ++j)
1513 			rtvals[i+j] = VM_PAGER_PEND;
1514 
1515 		/*
1516 		 * Now that we are through with the bp, we can call the
1517 		 * normal async completion, which frees everything up.
1518 		 */
1519 		swp_pager_async_iodone(bio);
1520 	}
1521 }
1522 
1523 void
1524 swap_pager_newswap(void)
1525 {
1526 	swp_sizecheck();
1527 }
1528 
1529 /*
1530  *	swp_pager_async_iodone:
1531  *
1532  *	Completion routine for asynchronous reads and writes from/to swap.
1533  *	Also called manually by synchronous code to finish up a bp.
1534  *
1535  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
1536  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
1537  *	unbusy all pages except the 'main' request page.  For WRITE
1538  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1539  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1540  *
1541  *	This routine may not block.
1542  */
1543 static void
1544 swp_pager_async_iodone(struct bio *bio)
1545 {
1546 	struct buf *bp = bio->bio_buf;
1547 	vm_object_t object = NULL;
1548 	int i;
1549 	int *nswptr;
1550 
1551 	/*
1552 	 * report error
1553 	 */
1554 	if (bp->b_flags & B_ERROR) {
1555 		kprintf(
1556 		    "swap_pager: I/O error - %s failed; offset %lld,"
1557 			"size %ld, error %d\n",
1558 		    ((bio->bio_caller_info1.index & SWBIO_READ) ?
1559 			"pagein" : "pageout"),
1560 		    (long long)bio->bio_offset,
1561 		    (long)bp->b_bcount,
1562 		    bp->b_error
1563 		);
1564 	}
1565 
1566 	/*
1567 	 * set object, raise to splvm().
1568 	 */
1569 	if (bp->b_xio.xio_npages)
1570 		object = bp->b_xio.xio_pages[0]->object;
1571 	crit_enter();
1572 
1573 	/*
1574 	 * remove the mapping for kernel virtual
1575 	 */
1576 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1577 
1578 	/*
1579 	 * cleanup pages.  If an error occurs writing to swap, we are in
1580 	 * very serious trouble.  If it happens to be a disk error, though,
1581 	 * we may be able to recover by reassigning the swap later on.  So
1582 	 * in this case we remove the m->swapblk assignment for the page
1583 	 * but do not free it in the rlist.  The errornous block(s) are thus
1584 	 * never reallocated as swap.  Redirty the page and continue.
1585 	 */
1586 	for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1587 		vm_page_t m = bp->b_xio.xio_pages[i];
1588 
1589 		vm_page_flag_clear(m, PG_SWAPINPROG);
1590 
1591 		if (bp->b_flags & B_ERROR) {
1592 			/*
1593 			 * If an error occurs I'd love to throw the swapblk
1594 			 * away without freeing it back to swapspace, so it
1595 			 * can never be used again.  But I can't from an
1596 			 * interrupt.
1597 			 */
1598 
1599 			if (bio->bio_caller_info1.index & SWBIO_READ) {
1600 				/*
1601 				 * When reading, reqpage needs to stay
1602 				 * locked for the parent, but all other
1603 				 * pages can be freed.  We still want to
1604 				 * wakeup the parent waiting on the page,
1605 				 * though.  ( also: pg_reqpage can be -1 and
1606 				 * not match anything ).
1607 				 *
1608 				 * We have to wake specifically requested pages
1609 				 * up too because we cleared PG_SWAPINPROG and
1610 				 * someone may be waiting for that.
1611 				 *
1612 				 * NOTE: for reads, m->dirty will probably
1613 				 * be overridden by the original caller of
1614 				 * getpages so don't play cute tricks here.
1615 				 *
1616 				 * NOTE: We can't actually free the page from
1617 				 * here, because this is an interrupt.  It
1618 				 * is not legal to mess with object->memq
1619 				 * from an interrupt.  Deactivate the page
1620 				 * instead.
1621 				 */
1622 
1623 				m->valid = 0;
1624 				vm_page_flag_clear(m, PG_ZERO);
1625 
1626 				/*
1627 				 * bio_driver_info holds the requested page
1628 				 * index.
1629 				 */
1630 				if (i != (int)(intptr_t)bio->bio_driver_info) {
1631 					vm_page_deactivate(m);
1632 					vm_page_wakeup(m);
1633 				} else {
1634 					vm_page_flash(m);
1635 				}
1636 				/*
1637 				 * If i == bp->b_pager.pg_reqpage, do not wake
1638 				 * the page up.  The caller needs to.
1639 				 */
1640 			} else {
1641 				/*
1642 				 * If a write error occurs, reactivate page
1643 				 * so it doesn't clog the inactive list,
1644 				 * then finish the I/O.
1645 				 */
1646 				vm_page_dirty(m);
1647 				vm_page_activate(m);
1648 				vm_page_io_finish(m);
1649 			}
1650 		} else if (bio->bio_caller_info1.index & SWBIO_READ) {
1651 			/*
1652 			 * NOTE: for reads, m->dirty will probably be
1653 			 * overridden by the original caller of getpages so
1654 			 * we cannot set them in order to free the underlying
1655 			 * swap in a low-swap situation.  I don't think we'd
1656 			 * want to do that anyway, but it was an optimization
1657 			 * that existed in the old swapper for a time before
1658 			 * it got ripped out due to precisely this problem.
1659 			 *
1660 			 * clear PG_ZERO in page.
1661 			 *
1662 			 * If not the requested page then deactivate it.
1663 			 *
1664 			 * Note that the requested page, reqpage, is left
1665 			 * busied, but we still have to wake it up.  The
1666 			 * other pages are released (unbusied) by
1667 			 * vm_page_wakeup().  We do not set reqpage's
1668 			 * valid bits here, it is up to the caller.
1669 			 */
1670 
1671 			/*
1672 			 * NOTE: can't call pmap_clear_modify(m) from an
1673 			 * interrupt thread, the pmap code may have to map
1674 			 * non-kernel pmaps and currently asserts the case.
1675 			 */
1676 			/*pmap_clear_modify(m);*/
1677 			m->valid = VM_PAGE_BITS_ALL;
1678 			vm_page_undirty(m);
1679 			vm_page_flag_clear(m, PG_ZERO);
1680 
1681 			/*
1682 			 * We have to wake specifically requested pages
1683 			 * up too because we cleared PG_SWAPINPROG and
1684 			 * could be waiting for it in getpages.  However,
1685 			 * be sure to not unbusy getpages specifically
1686 			 * requested page - getpages expects it to be
1687 			 * left busy.
1688 			 *
1689 			 * bio_driver_info holds the requested page
1690 			 */
1691 			if (i != (int)(intptr_t)bio->bio_driver_info) {
1692 				vm_page_deactivate(m);
1693 				vm_page_wakeup(m);
1694 			} else {
1695 				vm_page_flash(m);
1696 			}
1697 		} else {
1698 			/*
1699 			 * Mark the page clean but do not mess with the
1700 			 * pmap-layer's modified state.  That state should
1701 			 * also be clear since the caller protected the
1702 			 * page VM_PROT_READ, but allow the case.
1703 			 *
1704 			 * We are in an interrupt, avoid pmap operations.
1705 			 *
1706 			 * If we have a severe page deficit, deactivate the
1707 			 * page.  Do not try to cache it (which would also
1708 			 * involve a pmap op), because the page might still
1709 			 * be read-heavy.
1710 			 */
1711 			vm_page_undirty(m);
1712 			vm_page_io_finish(m);
1713 			if (vm_page_count_severe())
1714 				vm_page_deactivate(m);
1715 #if 0
1716 			if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1717 				vm_page_protect(m, VM_PROT_READ);
1718 #endif
1719 		}
1720 	}
1721 
1722 	/*
1723 	 * adjust pip.  NOTE: the original parent may still have its own
1724 	 * pip refs on the object.
1725 	 */
1726 
1727 	if (object)
1728 		vm_object_pip_wakeupn(object, bp->b_xio.xio_npages);
1729 
1730 	/*
1731 	 * Release the physical I/O buffer.
1732 	 *
1733 	 * NOTE: Due to synchronous operations in the write case b_cmd may
1734 	 *	 already be set to BUF_CMD_DONE and BIO_SYNC may have already
1735 	 *	 been cleared.
1736 	 */
1737 	if (bio->bio_caller_info1.index & SWBIO_READ)
1738 		nswptr = &nsw_rcount;
1739 	else if (bio->bio_caller_info1.index & SWBIO_SYNC)
1740 		nswptr = &nsw_wcount_sync;
1741 	else
1742 		nswptr = &nsw_wcount_async;
1743 	bp->b_cmd = BUF_CMD_DONE;
1744 	relpbuf(bp, nswptr);
1745 	crit_exit();
1746 }
1747 
1748 /************************************************************************
1749  *				SWAP META DATA 				*
1750  ************************************************************************
1751  *
1752  *	These routines manipulate the swap metadata stored in the
1753  *	OBJT_SWAP object.  All swp_*() routines must be called at
1754  *	splvm() because swap can be freed up by the low level vm_page
1755  *	code which might be called from interrupts beyond what splbio() covers.
1756  *
1757  *	Swap metadata is implemented with a global hash and not directly
1758  *	linked into the object.  Instead the object simply contains
1759  *	appropriate tracking counters.
1760  */
1761 
1762 /*
1763  * SWP_PAGER_HASH() -	hash swap meta data
1764  *
1765  *	This is an inline helper function which hashes the swapblk given
1766  *	the object and page index.  It returns a pointer to a pointer
1767  *	to the object, or a pointer to a NULL pointer if it could not
1768  *	find a swapblk.
1769  *
1770  *	This routine must be called at splvm().
1771  */
1772 
1773 static __inline struct swblock **
1774 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1775 {
1776 	struct swblock **pswap;
1777 	struct swblock *swap;
1778 
1779 	index &= ~SWAP_META_MASK;
1780 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1781 
1782 	while ((swap = *pswap) != NULL) {
1783 		if (swap->swb_object == object &&
1784 		    swap->swb_index == index
1785 		) {
1786 			break;
1787 		}
1788 		pswap = &swap->swb_hnext;
1789 	}
1790 	return(pswap);
1791 }
1792 
1793 /*
1794  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
1795  *
1796  *	We first convert the object to a swap object if it is a default
1797  *	object.
1798  *
1799  *	The specified swapblk is added to the object's swap metadata.  If
1800  *	the swapblk is not valid, it is freed instead.  Any previously
1801  *	assigned swapblk is freed.
1802  *
1803  *	This routine must be called at splvm(), except when used to convert
1804  *	an OBJT_DEFAULT object into an OBJT_SWAP object.
1805 
1806  */
1807 
1808 static void
1809 swp_pager_meta_build(
1810 	vm_object_t object,
1811 	vm_pindex_t index,
1812 	daddr_t swapblk
1813 ) {
1814 	struct swblock *swap;
1815 	struct swblock **pswap;
1816 
1817 	/*
1818 	 * Convert default object to swap object if necessary
1819 	 */
1820 
1821 	if (object->type != OBJT_SWAP) {
1822 		object->type = OBJT_SWAP;
1823 		object->un_pager.swp.swp_bcount = 0;
1824 
1825 		if (object->handle != NULL) {
1826 			TAILQ_INSERT_TAIL(
1827 			    NOBJLIST(object->handle),
1828 			    object,
1829 			    pager_object_list
1830 			);
1831 		} else {
1832 			TAILQ_INSERT_TAIL(
1833 			    &swap_pager_un_object_list,
1834 			    object,
1835 			    pager_object_list
1836 			);
1837 		}
1838 	}
1839 
1840 	/*
1841 	 * Locate hash entry.  If not found create, but if we aren't adding
1842 	 * anything just return.  If we run out of space in the map we wait
1843 	 * and, since the hash table may have changed, retry.
1844 	 */
1845 
1846 retry:
1847 	pswap = swp_pager_hash(object, index);
1848 
1849 	if ((swap = *pswap) == NULL) {
1850 		int i;
1851 
1852 		if (swapblk == SWAPBLK_NONE)
1853 			return;
1854 
1855 		swap = *pswap = zalloc(swap_zone);
1856 		if (swap == NULL) {
1857 			vm_wait(0);
1858 			goto retry;
1859 		}
1860 		swap->swb_hnext = NULL;
1861 		swap->swb_object = object;
1862 		swap->swb_index = index & ~SWAP_META_MASK;
1863 		swap->swb_count = 0;
1864 
1865 		++object->un_pager.swp.swp_bcount;
1866 
1867 		for (i = 0; i < SWAP_META_PAGES; ++i)
1868 			swap->swb_pages[i] = SWAPBLK_NONE;
1869 	}
1870 
1871 	/*
1872 	 * Delete prior contents of metadata
1873 	 */
1874 
1875 	index &= SWAP_META_MASK;
1876 
1877 	if (swap->swb_pages[index] != SWAPBLK_NONE) {
1878 		swp_pager_freeswapspace(swap->swb_pages[index], 1);
1879 		--swap->swb_count;
1880 	}
1881 
1882 	/*
1883 	 * Enter block into metadata
1884 	 */
1885 
1886 	swap->swb_pages[index] = swapblk;
1887 	if (swapblk != SWAPBLK_NONE)
1888 		++swap->swb_count;
1889 }
1890 
1891 /*
1892  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1893  *
1894  *	The requested range of blocks is freed, with any associated swap
1895  *	returned to the swap bitmap.
1896  *
1897  *	This routine will free swap metadata structures as they are cleaned
1898  *	out.  This routine does *NOT* operate on swap metadata associated
1899  *	with resident pages.
1900  *
1901  *	This routine must be called at splvm()
1902  */
1903 
1904 static void
1905 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1906 {
1907 	if (object->type != OBJT_SWAP)
1908 		return;
1909 
1910 	while (count > 0) {
1911 		struct swblock **pswap;
1912 		struct swblock *swap;
1913 
1914 		pswap = swp_pager_hash(object, index);
1915 
1916 		if ((swap = *pswap) != NULL) {
1917 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1918 
1919 			if (v != SWAPBLK_NONE) {
1920 				swp_pager_freeswapspace(v, 1);
1921 				swap->swb_pages[index & SWAP_META_MASK] =
1922 					SWAPBLK_NONE;
1923 				if (--swap->swb_count == 0) {
1924 					*pswap = swap->swb_hnext;
1925 					zfree(swap_zone, swap);
1926 					--object->un_pager.swp.swp_bcount;
1927 				}
1928 			}
1929 			--count;
1930 			++index;
1931 		} else {
1932 			int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1933 			count -= n;
1934 			index += n;
1935 		}
1936 	}
1937 }
1938 
1939 /*
1940  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1941  *
1942  *	This routine locates and destroys all swap metadata associated with
1943  *	an object.
1944  *
1945  *	This routine must be called at splvm()
1946  */
1947 
1948 static void
1949 swp_pager_meta_free_all(vm_object_t object)
1950 {
1951 	daddr_t index = 0;
1952 
1953 	if (object->type != OBJT_SWAP)
1954 		return;
1955 
1956 	while (object->un_pager.swp.swp_bcount) {
1957 		struct swblock **pswap;
1958 		struct swblock *swap;
1959 
1960 		pswap = swp_pager_hash(object, index);
1961 		if ((swap = *pswap) != NULL) {
1962 			int i;
1963 
1964 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1965 				daddr_t v = swap->swb_pages[i];
1966 				if (v != SWAPBLK_NONE) {
1967 					--swap->swb_count;
1968 					swp_pager_freeswapspace(v, 1);
1969 				}
1970 			}
1971 			if (swap->swb_count != 0)
1972 				panic("swap_pager_meta_free_all: swb_count != 0");
1973 			*pswap = swap->swb_hnext;
1974 			zfree(swap_zone, swap);
1975 			--object->un_pager.swp.swp_bcount;
1976 		}
1977 		index += SWAP_META_PAGES;
1978 		if (index > 0x20000000)
1979 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1980 	}
1981 }
1982 
1983 /*
1984  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
1985  *
1986  *	This routine is capable of looking up, popping, or freeing
1987  *	swapblk assignments in the swap meta data or in the vm_page_t.
1988  *	The routine typically returns the swapblk being looked-up, or popped,
1989  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1990  *	was invalid.  This routine will automatically free any invalid
1991  *	meta-data swapblks.
1992  *
1993  *	It is not possible to store invalid swapblks in the swap meta data
1994  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1995  *
1996  *	When acting on a busy resident page and paging is in progress, we
1997  *	have to wait until paging is complete but otherwise can act on the
1998  *	busy page.
1999  *
2000  *	This routine must be called at splvm().
2001  *
2002  *	SWM_FREE	remove and free swap block from metadata
2003  *	SWM_POP		remove from meta data but do not free.. pop it out
2004  */
2005 
2006 static daddr_t
2007 swp_pager_meta_ctl(
2008 	vm_object_t object,
2009 	vm_pindex_t index,
2010 	int flags
2011 ) {
2012 	struct swblock **pswap;
2013 	struct swblock *swap;
2014 	daddr_t r1;
2015 
2016 	/*
2017 	 * The meta data only exists of the object is OBJT_SWAP
2018 	 * and even then might not be allocated yet.
2019 	 */
2020 
2021 	if (object->type != OBJT_SWAP)
2022 		return(SWAPBLK_NONE);
2023 
2024 	r1 = SWAPBLK_NONE;
2025 	pswap = swp_pager_hash(object, index);
2026 
2027 	if ((swap = *pswap) != NULL) {
2028 		index &= SWAP_META_MASK;
2029 		r1 = swap->swb_pages[index];
2030 
2031 		if (r1 != SWAPBLK_NONE) {
2032 			if (flags & SWM_FREE) {
2033 				swp_pager_freeswapspace(r1, 1);
2034 				r1 = SWAPBLK_NONE;
2035 			}
2036 			if (flags & (SWM_FREE|SWM_POP)) {
2037 				swap->swb_pages[index] = SWAPBLK_NONE;
2038 				if (--swap->swb_count == 0) {
2039 					*pswap = swap->swb_hnext;
2040 					zfree(swap_zone, swap);
2041 					--object->un_pager.swp.swp_bcount;
2042 				}
2043 			}
2044 		}
2045 	}
2046 	return(r1);
2047 }
2048