xref: /dragonfly/sys/vm/vm_pager.c (revision 38b720cd)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $
63  */
64 
65 /*
66  *	Paging space routine stubs.  Emulates a matchmaker-like interface
67  *	for builtin pagers.
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/vnode.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/dsched.h>
77 #include <sys/proc.h>
78 #include <sys/sysctl.h>
79 #include <sys/thread2.h>
80 
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_extern.h>
88 
89 #include <sys/buf2.h>
90 #include <vm/vm_page2.h>
91 
92 extern struct pagerops defaultpagerops;
93 extern struct pagerops swappagerops;
94 extern struct pagerops vnodepagerops;
95 extern struct pagerops devicepagerops;
96 extern struct pagerops physpagerops;
97 
98 static int dead_pager_getpage (vm_object_t, vm_page_t *, int);
99 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *);
100 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t);
101 static void dead_pager_dealloc (vm_object_t);
102 
103 /*
104  * No requirements.
105  */
106 static int
107 dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess)
108 {
109 	return VM_PAGER_FAIL;
110 }
111 
112 /*
113  * No requirements.
114  */
115 static void
116 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
117 		    int *rtvals)
118 {
119 	int i;
120 
121 	for (i = 0; i < count; i++) {
122 		rtvals[i] = VM_PAGER_AGAIN;
123 	}
124 }
125 
126 /*
127  * No requirements.
128  */
129 static boolean_t
130 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex)
131 {
132 	return FALSE;
133 }
134 
135 /*
136  * No requirements.
137  */
138 static void
139 dead_pager_dealloc(vm_object_t object)
140 {
141 	KKASSERT(object->swblock_count == 0);
142 	return;
143 }
144 
145 static struct pagerops deadpagerops = {
146 	dead_pager_dealloc,
147 	dead_pager_getpage,
148 	dead_pager_putpages,
149 	dead_pager_haspage
150 };
151 
152 struct pagerops *pagertab[] = {
153 	&defaultpagerops,	/* OBJT_DEFAULT */
154 	&swappagerops,		/* OBJT_SWAP */
155 	&vnodepagerops,		/* OBJT_VNODE */
156 	&devicepagerops,	/* OBJT_DEVICE */
157 	&devicepagerops,	/* OBJT_MGTDEVICE */
158 	&physpagerops,		/* OBJT_PHYS */
159 	&deadpagerops		/* OBJT_DEAD */
160 };
161 
162 int npagers = NELEM(pagertab);
163 
164 /*
165  * Kernel address space for mapping pages.
166  * Used by pagers where KVAs are needed for IO.
167  *
168  * XXX needs to be large enough to support the number of pending async
169  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
170  * (MAXPHYS == 64k) if you want to get the most efficiency.
171  */
172 #define PAGER_MAP_SIZE	(8 * 1024 * 1024)
173 
174 #define BSWHSIZE	16
175 #define BSWHMASK	(BSWHSIZE - 1)
176 
177 TAILQ_HEAD(swqueue, buf);
178 
179 int pager_map_size = PAGER_MAP_SIZE;
180 struct vm_map pager_map;
181 
182 static vm_offset_t swapbkva_mem;	/* swap buffers kva */
183 static vm_offset_t swapbkva_kva;	/* swap buffers kva */
184 static struct swqueue bswlist_mem[BSWHSIZE];	/* with preallocated memory */
185 static struct swqueue bswlist_kva[BSWHSIZE];	/* with kva */
186 static struct swqueue bswlist_raw[BSWHSIZE];	/* without kva */
187 static struct spinlock bswspin_mem[BSWHSIZE];
188 static struct spinlock bswspin_kva[BSWHSIZE];
189 static struct spinlock bswspin_raw[BSWHSIZE];
190 static int pbuf_raw_count;
191 static int pbuf_kva_count;
192 static int pbuf_mem_count;
193 
194 SYSCTL_INT(_vfs, OID_AUTO, pbuf_raw_count, CTLFLAG_RD, &pbuf_raw_count, 0,
195     "Kernel pbuf raw reservations");
196 SYSCTL_INT(_vfs, OID_AUTO, pbuf_kva_count, CTLFLAG_RD, &pbuf_kva_count, 0,
197     "Kernel pbuf kva reservations");
198 SYSCTL_INT(_vfs, OID_AUTO, pbuf_mem_count, CTLFLAG_RD, &pbuf_mem_count, 0,
199     "Kernel pbuf mem reservations");
200 
201 /*
202  * Initialize the swap buffer list.
203  *
204  * Called from the low level boot code only.
205  */
206 static void
207 vm_pager_init(void *arg __unused)
208 {
209 	int i;
210 
211 	for (i = 0; i < BSWHSIZE; ++i) {
212 		TAILQ_INIT(&bswlist_mem[i]);
213 		TAILQ_INIT(&bswlist_kva[i]);
214 		TAILQ_INIT(&bswlist_raw[i]);
215 		spin_init(&bswspin_mem[i], "bswmem");
216 		spin_init(&bswspin_kva[i], "bswkva");
217 		spin_init(&bswspin_raw[i], "bswraw");
218 	}
219 }
220 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_SECOND, vm_pager_init, NULL);
221 
222 /*
223  * Called from the low level boot code only.
224  */
225 static
226 void
227 vm_pager_bufferinit(void *dummy __unused)
228 {
229 	struct buf *bp;
230 	long i;
231 
232 	/*
233 	 * Reserve KVM space for pbuf data.
234 	 */
235 	swapbkva_mem = kmem_alloc_pageable(&pager_map, nswbuf_mem * MAXPHYS);
236 	if (!swapbkva_mem)
237 		panic("Not enough pager_map VM space for physical buffers");
238 	swapbkva_kva = kmem_alloc_pageable(&pager_map, nswbuf_kva * MAXPHYS);
239 	if (!swapbkva_kva)
240 		panic("Not enough pager_map VM space for physical buffers");
241 
242 	/*
243 	 * Initial pbuf setup.
244 	 *
245 	 * mem - These pbufs have permanently allocated memory
246 	 * kva - These pbufs have unallocated kva reservations
247 	 * raw - These pbufs have no kva reservations
248 	 */
249 
250 	/*
251 	 * Buffers with pre-allocated kernel memory can be convenient for
252 	 * copyin/copyout because no SMP page invalidation or other pmap
253 	 * operations are needed.
254 	 */
255 #if 1
256 	bp = swbuf_mem;
257 	for (i = 0; i < nswbuf_mem; ++i, ++bp) {
258 		vm_page_t m;
259 		vm_pindex_t pg;
260 		int j;
261 
262 		bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva_mem;
263 		bp->b_kvasize = MAXPHYS;
264 		bp->b_swindex = i & BSWHMASK;
265 		BUF_LOCKINIT(bp);
266 		buf_dep_init(bp);
267 		TAILQ_INSERT_HEAD(&bswlist_mem[i & BSWHMASK], bp, b_freelist);
268 		atomic_add_int(&pbuf_mem_count, 1);
269 		bp->b_data = bp->b_kvabase;
270 		bp->b_bcount = MAXPHYS;
271 		bp->b_xio.xio_pages = bp->b_xio.xio_internal_pages;
272 
273 		pg = (vm_offset_t)bp->b_kvabase >> PAGE_SHIFT;
274 		vm_object_hold(&kernel_object);
275 		for (j = 0; j < MAXPHYS / PAGE_SIZE; ++j) {
276 			m = vm_page_alloc(&kernel_object, pg, VM_ALLOC_NORMAL |
277 							      VM_ALLOC_SYSTEM);
278 			KKASSERT(m != NULL);
279 			bp->b_xio.xio_internal_pages[j] = m;
280 			vm_page_wire(m);
281 			/* early boot, no other cpus running yet */
282 			pmap_kenter_noinval(pg * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
283 			cpu_invlpg((void *)(pg * PAGE_SIZE));
284 			vm_page_wakeup(m);
285 			++pg;
286 		}
287 		vm_object_drop(&kernel_object);
288 		bp->b_xio.xio_npages = j;
289 	}
290 #endif
291 
292 	/*
293 	 * Buffers with pre-assigned KVA bases.  The KVA has no memory pages
294 	 * assigned to it.  Saves the caller from having to reserve KVA for
295 	 * the page map.
296 	 */
297 	bp = swbuf_kva;
298 	for (i = 0; i < nswbuf_kva; ++i, ++bp) {
299 		bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva_kva;
300 		bp->b_kvasize = MAXPHYS;
301 		bp->b_swindex = i & BSWHMASK;
302 		BUF_LOCKINIT(bp);
303 		buf_dep_init(bp);
304 		TAILQ_INSERT_HEAD(&bswlist_kva[i & BSWHMASK], bp, b_freelist);
305 		atomic_add_int(&pbuf_kva_count, 1);
306 	}
307 
308 	/*
309 	 * RAW buffers with no KVA mappings.
310 	 *
311 	 * NOTE: We use KM_NOTLBSYNC here to reduce unnecessary IPIs
312 	 *	 during startup, which can really slow down emulated
313 	 *	 systems.
314 	 */
315 	nswbuf_raw = nbuf * 2;
316 	swbuf_raw = (void *)kmem_alloc3(&kernel_map,
317 				round_page(nswbuf_raw * sizeof(struct buf)),
318 				KM_NOTLBSYNC);
319 	smp_invltlb();
320 	bp = swbuf_raw;
321 	for (i = 0; i < nswbuf_raw; ++i, ++bp) {
322 		bp->b_swindex = i & BSWHMASK;
323 		BUF_LOCKINIT(bp);
324 		buf_dep_init(bp);
325 		TAILQ_INSERT_HEAD(&bswlist_raw[i & BSWHMASK], bp, b_freelist);
326 		atomic_add_int(&pbuf_raw_count, 1);
327 	}
328 }
329 
330 SYSINIT(do_vmpg, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, vm_pager_bufferinit, NULL);
331 
332 /*
333  * No requirements.
334  */
335 void
336 vm_pager_deallocate(vm_object_t object)
337 {
338 	(*pagertab[object->type]->pgo_dealloc) (object);
339 }
340 
341 /*
342  * vm_pager_get_pages() - inline, see vm/vm_pager.h
343  * vm_pager_put_pages() - inline, see vm/vm_pager.h
344  * vm_pager_has_page() - inline, see vm/vm_pager.h
345  * vm_pager_page_inserted() - inline, see vm/vm_pager.h
346  * vm_pager_page_removed() - inline, see vm/vm_pager.h
347  */
348 
349 /*
350  * Search the specified pager object list for an object with the
351  * specified handle.  If an object with the specified handle is found,
352  * increase its reference count and return it.  Otherwise, return NULL.
353  *
354  * The pager object list must be locked.
355  */
356 vm_object_t
357 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
358 {
359 	vm_object_t object;
360 
361 	TAILQ_FOREACH(object, pg_list, pager_object_list) {
362 		if (object->handle == handle) {
363 			VM_OBJECT_LOCK(object);
364 			if ((object->flags & OBJ_DEAD) == 0) {
365 				vm_object_reference_locked(object);
366 				VM_OBJECT_UNLOCK(object);
367 				break;
368 			}
369 			VM_OBJECT_UNLOCK(object);
370 		}
371 	}
372 	return (object);
373 }
374 
375 /*
376  * Initialize a physical buffer.
377  *
378  * No requirements.
379  */
380 static void
381 initpbuf(struct buf *bp)
382 {
383 	bp->b_qindex = 0;		/* BQUEUE_NONE */
384 	bp->b_data = bp->b_kvabase;	/* NULL if pbuf sans kva */
385 	bp->b_flags = B_PAGING;
386 	bp->b_cmd = BUF_CMD_DONE;
387 	bp->b_error = 0;
388 	bp->b_bcount = 0;
389 	bp->b_bufsize = MAXPHYS;
390 	initbufbio(bp);
391 	xio_init(&bp->b_xio);
392 	BUF_LOCK(bp, LK_EXCLUSIVE);
393 }
394 
395 /*
396  * Allocate a physical buffer
397  *
398  * If (pfreecnt != NULL) then *pfreecnt will be decremented on return and
399  * the function will block while it is <= 0.
400  *
401  * Physical buffers can be with or without KVA space reserved.  There
402  * are severe limitations on the ones with KVA reserved, and fewer
403  * limitations on the ones without.  getpbuf() gets one without,
404  * getpbuf_kva() gets one with.
405  *
406  * No requirements.
407  */
408 struct buf *
409 getpbuf(int *pfreecnt)
410 {
411 	struct buf *bp;
412 	int iter;
413 	int loops;
414 
415 	for (;;) {
416 		while (pfreecnt && *pfreecnt <= 0) {
417 			tsleep_interlock(pfreecnt, 0);
418 			if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
419 				tsleep(pfreecnt, PINTERLOCKED, "wswbuf0", 0);
420 		}
421 		if (pbuf_raw_count <= 0) {
422 			tsleep_interlock(&pbuf_raw_count, 0);
423 			if ((int)atomic_fetchadd_int(&pbuf_raw_count, 0) <= 0)
424 				tsleep(&pbuf_raw_count, PINTERLOCKED,
425 				       "wswbuf1", 0);
426 			continue;
427 		}
428 		iter = mycpuid & BSWHMASK;
429 		for (loops = BSWHSIZE; loops; --loops) {
430 			if (TAILQ_FIRST(&bswlist_raw[iter]) == NULL) {
431 				iter = (iter + 1) & BSWHMASK;
432 				continue;
433 			}
434 			spin_lock(&bswspin_raw[iter]);
435 			if ((bp = TAILQ_FIRST(&bswlist_raw[iter])) == NULL) {
436 				spin_unlock(&bswspin_raw[iter]);
437 				iter = (iter + 1) & BSWHMASK;
438 				continue;
439 			}
440 			TAILQ_REMOVE(&bswlist_raw[iter], bp, b_freelist);
441 			atomic_add_int(&pbuf_raw_count, -1);
442 			if (pfreecnt)
443 				atomic_add_int(pfreecnt, -1);
444 			spin_unlock(&bswspin_raw[iter]);
445 			initpbuf(bp);
446 
447 			return bp;
448 		}
449 	}
450 	/* not reached */
451 }
452 
453 struct buf *
454 getpbuf_kva(int *pfreecnt)
455 {
456 	struct buf *bp;
457 	int iter;
458 	int loops;
459 
460 	for (;;) {
461 		while (pfreecnt && *pfreecnt <= 0) {
462 			tsleep_interlock(pfreecnt, 0);
463 			if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
464 				tsleep(pfreecnt, PINTERLOCKED, "wswbuf2", 0);
465 		}
466 		if (pbuf_kva_count <= 0) {
467 			tsleep_interlock(&pbuf_kva_count, 0);
468 			if ((int)atomic_fetchadd_int(&pbuf_kva_count, 0) <= 0)
469 				tsleep(&pbuf_kva_count, PINTERLOCKED,
470 				       "wswbuf3", 0);
471 			continue;
472 		}
473 		iter = mycpuid & BSWHMASK;
474 		for (loops = BSWHSIZE; loops; --loops) {
475 			if (TAILQ_FIRST(&bswlist_kva[iter]) == NULL) {
476 				iter = (iter + 1) & BSWHMASK;
477 				continue;
478 			}
479 			spin_lock(&bswspin_kva[iter]);
480 			if ((bp = TAILQ_FIRST(&bswlist_kva[iter])) == NULL) {
481 				spin_unlock(&bswspin_kva[iter]);
482 				iter = (iter + 1) & BSWHMASK;
483 				continue;
484 			}
485 			TAILQ_REMOVE(&bswlist_kva[iter], bp, b_freelist);
486 			atomic_add_int(&pbuf_kva_count, -1);
487 			if (pfreecnt)
488 				atomic_add_int(pfreecnt, -1);
489 			spin_unlock(&bswspin_kva[iter]);
490 			initpbuf(bp);
491 
492 			return bp;
493 		}
494 	}
495 	/* not reached */
496 }
497 
498 /*
499  * Allocate a pbuf with kernel memory already preallocated.  Caller must
500  * not change the mapping.
501  */
502 struct buf *
503 getpbuf_mem(int *pfreecnt)
504 {
505 	struct buf *bp;
506 	int iter;
507 	int loops;
508 
509 	for (;;) {
510 		while (pfreecnt && *pfreecnt <= 0) {
511 			tsleep_interlock(pfreecnt, 0);
512 			if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
513 				tsleep(pfreecnt, PINTERLOCKED, "wswbuf4", 0);
514 		}
515 		if (pbuf_mem_count <= 0) {
516 			tsleep_interlock(&pbuf_mem_count, 0);
517 			if ((int)atomic_fetchadd_int(&pbuf_mem_count, 0) <= 0)
518 				tsleep(&pbuf_mem_count, PINTERLOCKED,
519 				       "wswbuf5", 0);
520 			continue;
521 		}
522 		iter = mycpuid & BSWHMASK;
523 		for (loops = BSWHSIZE; loops; --loops) {
524 			if (TAILQ_FIRST(&bswlist_mem[iter]) == NULL) {
525 				iter = (iter + 1) & BSWHMASK;
526 				continue;
527 			}
528 			spin_lock(&bswspin_mem[iter]);
529 			if ((bp = TAILQ_FIRST(&bswlist_mem[iter])) == NULL) {
530 				spin_unlock(&bswspin_mem[iter]);
531 				iter = (iter + 1) & BSWHMASK;
532 				continue;
533 			}
534 			TAILQ_REMOVE(&bswlist_mem[iter], bp, b_freelist);
535 			atomic_add_int(&pbuf_mem_count, -1);
536 			if (pfreecnt)
537 				atomic_add_int(pfreecnt, -1);
538 			spin_unlock(&bswspin_mem[iter]);
539 			initpbuf(bp);
540 
541 			return bp;
542 		}
543 	}
544 	/* not reached */
545 }
546 
547 /*
548  * Allocate a physical buffer, if one is available.
549  *
550  * Note that there is no NULL hack here - all subsystems using this
551  * call are required to use a non-NULL pfreecnt.
552  *
553  * No requirements.
554  */
555 struct buf *
556 trypbuf(int *pfreecnt)
557 {
558 	struct buf *bp;
559 	int iter = mycpuid & BSWHMASK;
560 	int loops;
561 
562 	for (loops = BSWHSIZE; loops; --loops) {
563 		if (*pfreecnt <= 0 || TAILQ_FIRST(&bswlist_raw[iter]) == NULL) {
564 			iter = (iter + 1) & BSWHMASK;
565 			continue;
566 		}
567 		spin_lock(&bswspin_raw[iter]);
568 		if (*pfreecnt <= 0 ||
569 		    (bp = TAILQ_FIRST(&bswlist_raw[iter])) == NULL) {
570 			spin_unlock(&bswspin_raw[iter]);
571 			iter = (iter + 1) & BSWHMASK;
572 			continue;
573 		}
574 		TAILQ_REMOVE(&bswlist_raw[iter], bp, b_freelist);
575 		atomic_add_int(&pbuf_raw_count, -1);
576 		atomic_add_int(pfreecnt, -1);
577 
578 		spin_unlock(&bswspin_raw[iter]);
579 
580 		initpbuf(bp);
581 
582 		return bp;
583 	}
584 	return NULL;
585 }
586 
587 struct buf *
588 trypbuf_kva(int *pfreecnt)
589 {
590 	struct buf *bp;
591 	int iter = mycpuid & BSWHMASK;
592 	int loops;
593 
594 	for (loops = BSWHSIZE; loops; --loops) {
595 		if (*pfreecnt <= 0 || TAILQ_FIRST(&bswlist_kva[iter]) == NULL) {
596 			iter = (iter + 1) & BSWHMASK;
597 			continue;
598 		}
599 		spin_lock(&bswspin_kva[iter]);
600 		if (*pfreecnt <= 0 ||
601 		    (bp = TAILQ_FIRST(&bswlist_kva[iter])) == NULL) {
602 			spin_unlock(&bswspin_kva[iter]);
603 			iter = (iter + 1) & BSWHMASK;
604 			continue;
605 		}
606 		TAILQ_REMOVE(&bswlist_kva[iter], bp, b_freelist);
607 		atomic_add_int(&pbuf_kva_count, -1);
608 		atomic_add_int(pfreecnt, -1);
609 
610 		spin_unlock(&bswspin_kva[iter]);
611 
612 		initpbuf(bp);
613 
614 		return bp;
615 	}
616 	return NULL;
617 }
618 
619 /*
620  * Release a physical buffer
621  *
622  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
623  *	relatively soon when the rest of the subsystems get smart about it. XXX
624  *
625  * No requirements.
626  */
627 void
628 relpbuf(struct buf *bp, int *pfreecnt)
629 {
630 	int wake = 0;
631 	int wake_free = 0;
632 	int iter = bp->b_swindex;
633 
634 	KKASSERT(bp->b_flags & B_PAGING);
635 	dsched_buf_exit(bp);
636 
637 	BUF_UNLOCK(bp);
638 
639 	if (bp >= swbuf_mem && bp < &swbuf_mem[nswbuf_mem]) {
640 		KKASSERT(bp->b_kvabase);
641 		spin_lock(&bswspin_mem[iter]);
642 		TAILQ_INSERT_HEAD(&bswlist_mem[iter], bp, b_freelist);
643 		if (atomic_fetchadd_int(&pbuf_mem_count, 1) == nswbuf_mem / 4)
644 			wake = 1;
645 		if (pfreecnt) {
646 			if (atomic_fetchadd_int(pfreecnt, 1) == 1)
647 				wake_free = 1;
648 		}
649 		spin_unlock(&bswspin_mem[iter]);
650 		if (wake)
651 			wakeup(&pbuf_mem_count);
652 	} else if (bp >= swbuf_kva && bp < &swbuf_kva[nswbuf_kva]) {
653 		KKASSERT(bp->b_kvabase);
654 		spin_lock(&bswspin_kva[iter]);
655 		TAILQ_INSERT_HEAD(&bswlist_kva[iter], bp, b_freelist);
656 		if (atomic_fetchadd_int(&pbuf_kva_count, 1) == nswbuf_kva / 4)
657 			wake = 1;
658 		if (pfreecnt) {
659 			if (atomic_fetchadd_int(pfreecnt, 1) == 1)
660 				wake_free = 1;
661 		}
662 		spin_unlock(&bswspin_kva[iter]);
663 		if (wake)
664 			wakeup(&pbuf_kva_count);
665 	} else {
666 		KKASSERT(bp->b_kvabase == NULL);
667 		KKASSERT(bp >= swbuf_raw && bp < &swbuf_raw[nswbuf_raw]);
668 		spin_lock(&bswspin_raw[iter]);
669 		TAILQ_INSERT_HEAD(&bswlist_raw[iter], bp, b_freelist);
670 		if (atomic_fetchadd_int(&pbuf_raw_count, 1) == nswbuf_raw / 4)
671 			wake = 1;
672 		if (pfreecnt) {
673 			if (atomic_fetchadd_int(pfreecnt, 1) == 1)
674 				wake_free = 1;
675 		}
676 		spin_unlock(&bswspin_raw[iter]);
677 		if (wake)
678 			wakeup(&pbuf_raw_count);
679 	}
680 	if (wake_free)
681 		wakeup(pfreecnt);
682 }
683 
684 void
685 pbuf_adjcount(int *pfreecnt, int n)
686 {
687 	if (n) {
688 		atomic_add_int(pfreecnt, n);
689 		wakeup(pfreecnt);
690 	}
691 }
692