xref: /dragonfly/sys/vm/vm_pager.c (revision a1626531)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $
63  */
64 
65 /*
66  *	Paging space routine stubs.  Emulates a matchmaker-like interface
67  *	for builtin pagers.
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/vnode.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/dsched.h>
77 #include <sys/proc.h>
78 #include <sys/sysctl.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_kern.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_extern.h>
87 
88 #include <sys/buf2.h>
89 #include <vm/vm_page2.h>
90 
91 static	pgo_dealloc_t		dead_pager_dealloc;
92 static	pgo_getpage_t		dead_pager_getpage;
93 static	pgo_putpages_t		dead_pager_putpages;
94 static	pgo_haspage_t		dead_pager_haspage;
95 
96 static struct pagerops deadpagerops = {
97 	.pgo_dealloc =		dead_pager_dealloc,
98 	.pgo_getpage =		dead_pager_getpage,
99 	.pgo_putpages =		dead_pager_putpages,
100 	.pgo_haspage =		dead_pager_haspage
101 };
102 
103 extern struct pagerops defaultpagerops;
104 extern struct pagerops swappagerops;
105 extern struct pagerops vnodepagerops;
106 extern struct pagerops devicepagerops;
107 extern struct pagerops physpagerops;
108 
109 /*
110  * No requirements.
111  */
112 static int
113 dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess)
114 {
115 	return VM_PAGER_FAIL;
116 }
117 
118 /*
119  * No requirements.
120  */
121 static void
122 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
123 		    int *rtvals)
124 {
125 	int i;
126 
127 	for (i = 0; i < count; i++) {
128 		rtvals[i] = VM_PAGER_AGAIN;
129 	}
130 }
131 
132 /*
133  * No requirements.
134  */
135 static boolean_t
136 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex)
137 {
138 	return FALSE;
139 }
140 
141 /*
142  * No requirements.
143  */
144 static void
145 dead_pager_dealloc(vm_object_t object)
146 {
147 	KKASSERT(object->swblock_count == 0);
148 	return;
149 }
150 
151 struct pagerops *pagertab[] = {
152 	&defaultpagerops,	/* OBJT_DEFAULT */
153 	&swappagerops,		/* OBJT_SWAP */
154 	&vnodepagerops,		/* OBJT_VNODE */
155 	&devicepagerops,	/* OBJT_DEVICE */
156 	&devicepagerops,	/* OBJT_MGTDEVICE */
157 	&physpagerops,		/* OBJT_PHYS */
158 	&deadpagerops		/* OBJT_DEAD */
159 };
160 
161 int npagers = NELEM(pagertab);
162 
163 /*
164  * Kernel address space for mapping pages.
165  * Used by pagers where KVAs are needed for IO.
166  *
167  * XXX needs to be large enough to support the number of pending async
168  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
169  * (MAXPHYS == 64k) if you want to get the most efficiency.
170  */
171 #define PAGER_MAP_SIZE	(8 * 1024 * 1024)
172 
173 #define BSWHSIZE	16
174 #define BSWHMASK	(BSWHSIZE - 1)
175 
176 TAILQ_HEAD(swqueue, buf);
177 
178 int pager_map_size = PAGER_MAP_SIZE;
179 struct vm_map pager_map;
180 
181 static vm_offset_t swapbkva_mem;	/* swap buffers kva */
182 static vm_offset_t swapbkva_kva;	/* swap buffers kva */
183 static struct swqueue bswlist_mem[BSWHSIZE];	/* with preallocated memory */
184 static struct swqueue bswlist_kva[BSWHSIZE];	/* with kva */
185 static struct swqueue bswlist_raw[BSWHSIZE];	/* without kva */
186 static struct spinlock bswspin_mem[BSWHSIZE];
187 static struct spinlock bswspin_kva[BSWHSIZE];
188 static struct spinlock bswspin_raw[BSWHSIZE];
189 static int pbuf_raw_count;
190 static int pbuf_kva_count;
191 static int pbuf_mem_count;
192 
193 SYSCTL_INT(_vm, OID_AUTO, pbuf_raw_count, CTLFLAG_RD, &pbuf_raw_count, 0,
194     "Kernel pbuf raw reservations");
195 SYSCTL_INT(_vm, OID_AUTO, pbuf_kva_count, CTLFLAG_RD, &pbuf_kva_count, 0,
196     "Kernel pbuf kva reservations");
197 SYSCTL_INT(_vm, OID_AUTO, pbuf_mem_count, CTLFLAG_RD, &pbuf_mem_count, 0,
198     "Kernel pbuf mem reservations");
199 
200 /*
201  * Initialize the swap buffer list.
202  *
203  * Called from the low level boot code only.
204  */
205 static void
206 vm_pager_init(void *arg __unused)
207 {
208 	int i;
209 
210 	for (i = 0; i < BSWHSIZE; ++i) {
211 		TAILQ_INIT(&bswlist_mem[i]);
212 		TAILQ_INIT(&bswlist_kva[i]);
213 		TAILQ_INIT(&bswlist_raw[i]);
214 		spin_init(&bswspin_mem[i], "bswmem");
215 		spin_init(&bswspin_kva[i], "bswkva");
216 		spin_init(&bswspin_raw[i], "bswraw");
217 	}
218 }
219 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_SECOND, vm_pager_init, NULL);
220 
221 /*
222  * Called from the low level boot code only.
223  */
224 static
225 void
226 vm_pager_bufferinit(void *dummy __unused)
227 {
228 	struct buf *bp;
229 	long i;
230 
231 	/*
232 	 * Reserve KVM space for pbuf data.
233 	 */
234 	swapbkva_mem = kmem_alloc_pageable(&pager_map, nswbuf_mem * MAXPHYS,
235 					   VM_SUBSYS_BUFDATA);
236 	if (!swapbkva_mem)
237 		panic("Not enough pager_map VM space for physical buffers");
238 	swapbkva_kva = kmem_alloc_pageable(&pager_map, nswbuf_kva * MAXPHYS,
239 					   VM_SUBSYS_BUFDATA);
240 	if (!swapbkva_kva)
241 		panic("Not enough pager_map VM space for physical buffers");
242 
243 	/*
244 	 * Initial pbuf setup.
245 	 *
246 	 * mem - These pbufs have permanently allocated memory
247 	 * kva - These pbufs have unallocated kva reservations
248 	 * raw - These pbufs have no kva reservations
249 	 */
250 
251 	/*
252 	 * Buffers with pre-allocated kernel memory can be convenient for
253 	 * copyin/copyout because no SMP page invalidation or other pmap
254 	 * operations are needed.
255 	 */
256 	bp = swbuf_mem;
257 	for (i = 0; i < nswbuf_mem; ++i, ++bp) {
258 		vm_page_t m;
259 		vm_pindex_t pg;
260 		int j;
261 
262 		bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva_mem;
263 		bp->b_kvasize = MAXPHYS;
264 		bp->b_swindex = i & BSWHMASK;
265 		bp->b_cpumask = smp_active_mask;
266 		BUF_LOCKINIT(bp);
267 		buf_dep_init(bp);
268 		TAILQ_INSERT_HEAD(&bswlist_mem[i & BSWHMASK], bp, b_freelist);
269 		atomic_add_int(&pbuf_mem_count, 1);
270 		bp->b_data = bp->b_kvabase;
271 		bp->b_bcount = MAXPHYS;
272 		bp->b_xio.xio_pages = bp->b_xio.xio_internal_pages;
273 
274 		pg = (vm_offset_t)bp->b_kvabase >> PAGE_SHIFT;
275 		vm_object_hold(&kernel_object);
276 		for (j = 0; j < MAXPHYS / PAGE_SIZE; ++j) {
277 			m = vm_page_alloc(&kernel_object, pg, VM_ALLOC_NORMAL |
278 							      VM_ALLOC_SYSTEM);
279 			KKASSERT(m != NULL);
280 			bp->b_xio.xio_internal_pages[j] = m;
281 			vm_page_wire(m);
282 			/* early boot, no other cpus running yet */
283 			pmap_kenter_noinval(pg * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
284 			cpu_invlpg((void *)(pg * PAGE_SIZE));
285 			vm_page_wakeup(m);
286 			++pg;
287 		}
288 		vm_object_drop(&kernel_object);
289 		bp->b_xio.xio_npages = j;
290 	}
291 
292 	/*
293 	 * Buffers with pre-assigned KVA bases.  The KVA has no memory pages
294 	 * assigned to it.  Saves the caller from having to reserve KVA for
295 	 * the page map.
296 	 */
297 	bp = swbuf_kva;
298 	for (i = 0; i < nswbuf_kva; ++i, ++bp) {
299 		bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva_kva;
300 		bp->b_kvasize = MAXPHYS;
301 		bp->b_swindex = i & BSWHMASK;
302 		BUF_LOCKINIT(bp);
303 		buf_dep_init(bp);
304 		TAILQ_INSERT_HEAD(&bswlist_kva[i & BSWHMASK], bp, b_freelist);
305 		atomic_add_int(&pbuf_kva_count, 1);
306 	}
307 
308 	/*
309 	 * RAW buffers with no KVA mappings.
310 	 *
311 	 * NOTE: We use KM_NOTLBSYNC here to reduce unnecessary IPIs
312 	 *	 during startup, which can really slow down emulated
313 	 *	 systems.
314 	 */
315 	nswbuf_raw = nbuf * 2;
316 	swbuf_raw = (void *)kmem_alloc3(&kernel_map,
317 				round_page(nswbuf_raw * sizeof(struct buf)),
318 				VM_SUBSYS_BUFDATA,
319 				KM_NOTLBSYNC);
320 	smp_invltlb();
321 	bp = swbuf_raw;
322 	for (i = 0; i < nswbuf_raw; ++i, ++bp) {
323 		bp->b_swindex = i & BSWHMASK;
324 		BUF_LOCKINIT(bp);
325 		buf_dep_init(bp);
326 		TAILQ_INSERT_HEAD(&bswlist_raw[i & BSWHMASK], bp, b_freelist);
327 		atomic_add_int(&pbuf_raw_count, 1);
328 	}
329 }
330 
331 SYSINIT(do_vmpg, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, vm_pager_bufferinit, NULL);
332 
333 /*
334  * No requirements.
335  */
336 void
337 vm_pager_deallocate(vm_object_t object)
338 {
339 	(*pagertab[object->type]->pgo_dealloc) (object);
340 }
341 
342 /*
343  * vm_pager_get_pages() - inline, see vm/vm_pager.h
344  * vm_pager_put_pages() - inline, see vm/vm_pager.h
345  * vm_pager_has_page() - inline, see vm/vm_pager.h
346  * vm_pager_page_inserted() - inline, see vm/vm_pager.h
347  * vm_pager_page_removed() - inline, see vm/vm_pager.h
348  */
349 
350 /*
351  * Search the specified pager object list for an object with the
352  * specified handle.  If an object with the specified handle is found,
353  * increase its reference count and return it.  Otherwise, return NULL.
354  *
355  * The pager object list must be locked.
356  */
357 vm_object_t
358 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
359 {
360 	vm_object_t object;
361 
362 	TAILQ_FOREACH(object, pg_list, pager_object_entry) {
363 		if (object->handle == handle) {
364 			VM_OBJECT_LOCK(object);
365 			if ((object->flags & OBJ_DEAD) == 0) {
366 				vm_object_reference_locked(object);
367 				VM_OBJECT_UNLOCK(object);
368 				break;
369 			}
370 			VM_OBJECT_UNLOCK(object);
371 		}
372 	}
373 	return (object);
374 }
375 
376 /*
377  * Initialize a physical buffer.
378  *
379  * No requirements.
380  */
381 static void
382 initpbuf(struct buf *bp)
383 {
384 	bp->b_qindex = 0;		/* BQUEUE_NONE */
385 	bp->b_data = bp->b_kvabase;	/* NULL if pbuf sans kva */
386 	bp->b_flags = B_PAGING;
387 	bp->b_cmd = BUF_CMD_DONE;
388 	bp->b_error = 0;
389 	bp->b_bcount = 0;
390 	bp->b_bufsize = MAXPHYS;
391 	initbufbio(bp);
392 	xio_init(&bp->b_xio);
393 	BUF_LOCK(bp, LK_EXCLUSIVE);
394 }
395 
396 /*
397  * Allocate a physical buffer
398  *
399  * If (pfreecnt != NULL) then *pfreecnt will be decremented on return and
400  * the function will block while it is <= 0.
401  *
402  * Physical buffers can be with or without KVA space reserved.  There
403  * are severe limitations on the ones with KVA reserved, and fewer
404  * limitations on the ones without.  getpbuf() gets one without,
405  * getpbuf_kva() gets one with.
406  *
407  * No requirements.
408  */
409 struct buf *
410 getpbuf(int *pfreecnt)
411 {
412 	struct buf *bp;
413 	int iter;
414 	int loops;
415 
416 	for (;;) {
417 		while (pfreecnt && *pfreecnt <= 0) {
418 			tsleep_interlock(pfreecnt, 0);
419 			if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
420 				tsleep(pfreecnt, PINTERLOCKED, "wswbuf0", 0);
421 		}
422 		if (pbuf_raw_count <= 0) {
423 			tsleep_interlock(&pbuf_raw_count, 0);
424 			if ((int)atomic_fetchadd_int(&pbuf_raw_count, 0) <= 0)
425 				tsleep(&pbuf_raw_count, PINTERLOCKED,
426 				       "wswbuf1", 0);
427 			continue;
428 		}
429 		iter = mycpuid & BSWHMASK;
430 		for (loops = BSWHSIZE; loops; --loops) {
431 			if (TAILQ_FIRST(&bswlist_raw[iter]) == NULL) {
432 				iter = (iter + 1) & BSWHMASK;
433 				continue;
434 			}
435 			spin_lock(&bswspin_raw[iter]);
436 			if ((bp = TAILQ_FIRST(&bswlist_raw[iter])) == NULL) {
437 				spin_unlock(&bswspin_raw[iter]);
438 				iter = (iter + 1) & BSWHMASK;
439 				continue;
440 			}
441 			TAILQ_REMOVE(&bswlist_raw[iter], bp, b_freelist);
442 			atomic_add_int(&pbuf_raw_count, -1);
443 			if (pfreecnt)
444 				atomic_add_int(pfreecnt, -1);
445 			spin_unlock(&bswspin_raw[iter]);
446 			initpbuf(bp);
447 
448 			return bp;
449 		}
450 	}
451 	/* not reached */
452 }
453 
454 struct buf *
455 getpbuf_kva(int *pfreecnt)
456 {
457 	struct buf *bp;
458 	int iter;
459 	int loops;
460 
461 	for (;;) {
462 		while (pfreecnt && *pfreecnt <= 0) {
463 			tsleep_interlock(pfreecnt, 0);
464 			if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
465 				tsleep(pfreecnt, PINTERLOCKED, "wswbuf2", 0);
466 		}
467 		if (pbuf_kva_count <= 0) {
468 			tsleep_interlock(&pbuf_kva_count, 0);
469 			if ((int)atomic_fetchadd_int(&pbuf_kva_count, 0) <= 0)
470 				tsleep(&pbuf_kva_count, PINTERLOCKED,
471 				       "wswbuf3", 0);
472 			continue;
473 		}
474 		iter = mycpuid & BSWHMASK;
475 		for (loops = BSWHSIZE; loops; --loops) {
476 			if (TAILQ_FIRST(&bswlist_kva[iter]) == NULL) {
477 				iter = (iter + 1) & BSWHMASK;
478 				continue;
479 			}
480 			spin_lock(&bswspin_kva[iter]);
481 			if ((bp = TAILQ_FIRST(&bswlist_kva[iter])) == NULL) {
482 				spin_unlock(&bswspin_kva[iter]);
483 				iter = (iter + 1) & BSWHMASK;
484 				continue;
485 			}
486 			TAILQ_REMOVE(&bswlist_kva[iter], bp, b_freelist);
487 			atomic_add_int(&pbuf_kva_count, -1);
488 			if (pfreecnt)
489 				atomic_add_int(pfreecnt, -1);
490 			spin_unlock(&bswspin_kva[iter]);
491 			initpbuf(bp);
492 
493 			return bp;
494 		}
495 	}
496 	/* not reached */
497 }
498 
499 /*
500  * Allocate a pbuf with kernel memory already preallocated.  Caller must
501  * not change the mapping.
502  */
503 struct buf *
504 getpbuf_mem(int *pfreecnt)
505 {
506 	struct buf *bp;
507 	int iter;
508 	int loops;
509 
510 	for (;;) {
511 		while (pfreecnt && *pfreecnt <= 0) {
512 			tsleep_interlock(pfreecnt, 0);
513 			if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
514 				tsleep(pfreecnt, PINTERLOCKED, "wswbuf4", 0);
515 		}
516 		if (pbuf_mem_count <= 0) {
517 			tsleep_interlock(&pbuf_mem_count, 0);
518 			if ((int)atomic_fetchadd_int(&pbuf_mem_count, 0) <= 0)
519 				tsleep(&pbuf_mem_count, PINTERLOCKED,
520 				       "wswbuf5", 0);
521 			continue;
522 		}
523 		iter = mycpuid & BSWHMASK;
524 		for (loops = BSWHSIZE; loops; --loops) {
525 			if (TAILQ_FIRST(&bswlist_mem[iter]) == NULL) {
526 				iter = (iter + 1) & BSWHMASK;
527 				continue;
528 			}
529 			spin_lock(&bswspin_mem[iter]);
530 			if ((bp = TAILQ_FIRST(&bswlist_mem[iter])) == NULL) {
531 				spin_unlock(&bswspin_mem[iter]);
532 				iter = (iter + 1) & BSWHMASK;
533 				continue;
534 			}
535 			TAILQ_REMOVE(&bswlist_mem[iter], bp, b_freelist);
536 			atomic_add_int(&pbuf_mem_count, -1);
537 			if (pfreecnt)
538 				atomic_add_int(pfreecnt, -1);
539 			spin_unlock(&bswspin_mem[iter]);
540 			initpbuf(bp);
541 
542 			return bp;
543 		}
544 	}
545 	/* not reached */
546 }
547 
548 /*
549  * Allocate a physical buffer, if one is available.
550  *
551  * Note that there is no NULL hack here - all subsystems using this
552  * call are required to use a non-NULL pfreecnt.
553  *
554  * No requirements.
555  */
556 struct buf *
557 trypbuf(int *pfreecnt)
558 {
559 	struct buf *bp;
560 	int iter = mycpuid & BSWHMASK;
561 	int loops;
562 
563 	for (loops = BSWHSIZE; loops; --loops) {
564 		if (*pfreecnt <= 0 || TAILQ_FIRST(&bswlist_raw[iter]) == NULL) {
565 			iter = (iter + 1) & BSWHMASK;
566 			continue;
567 		}
568 		spin_lock(&bswspin_raw[iter]);
569 		if (*pfreecnt <= 0 ||
570 		    (bp = TAILQ_FIRST(&bswlist_raw[iter])) == NULL) {
571 			spin_unlock(&bswspin_raw[iter]);
572 			iter = (iter + 1) & BSWHMASK;
573 			continue;
574 		}
575 		TAILQ_REMOVE(&bswlist_raw[iter], bp, b_freelist);
576 		atomic_add_int(&pbuf_raw_count, -1);
577 		atomic_add_int(pfreecnt, -1);
578 
579 		spin_unlock(&bswspin_raw[iter]);
580 
581 		initpbuf(bp);
582 
583 		return bp;
584 	}
585 	return NULL;
586 }
587 
588 struct buf *
589 trypbuf_kva(int *pfreecnt)
590 {
591 	struct buf *bp;
592 	int iter = mycpuid & BSWHMASK;
593 	int loops;
594 
595 	for (loops = BSWHSIZE; loops; --loops) {
596 		if (*pfreecnt <= 0 || TAILQ_FIRST(&bswlist_kva[iter]) == NULL) {
597 			iter = (iter + 1) & BSWHMASK;
598 			continue;
599 		}
600 		spin_lock(&bswspin_kva[iter]);
601 		if (*pfreecnt <= 0 ||
602 		    (bp = TAILQ_FIRST(&bswlist_kva[iter])) == NULL) {
603 			spin_unlock(&bswspin_kva[iter]);
604 			iter = (iter + 1) & BSWHMASK;
605 			continue;
606 		}
607 		TAILQ_REMOVE(&bswlist_kva[iter], bp, b_freelist);
608 		atomic_add_int(&pbuf_kva_count, -1);
609 		atomic_add_int(pfreecnt, -1);
610 
611 		spin_unlock(&bswspin_kva[iter]);
612 
613 		initpbuf(bp);
614 
615 		return bp;
616 	}
617 	return NULL;
618 }
619 
620 /*
621  * Release a physical buffer
622  *
623  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
624  *	relatively soon when the rest of the subsystems get smart about it. XXX
625  *
626  * No requirements.
627  */
628 void
629 relpbuf(struct buf *bp, int *pfreecnt)
630 {
631 	int wake = 0;
632 	int wake_free = 0;
633 	int iter = bp->b_swindex;
634 
635 	KKASSERT(bp->b_flags & B_PAGING);
636 	dsched_buf_exit(bp);
637 
638 	BUF_UNLOCK(bp);
639 
640 	if (bp >= swbuf_mem && bp < &swbuf_mem[nswbuf_mem]) {
641 		KKASSERT(bp->b_kvabase);
642 		spin_lock(&bswspin_mem[iter]);
643 		TAILQ_INSERT_HEAD(&bswlist_mem[iter], bp, b_freelist);
644 		if (atomic_fetchadd_int(&pbuf_mem_count, 1) == nswbuf_mem / 4)
645 			wake = 1;
646 		if (pfreecnt) {
647 			if (atomic_fetchadd_int(pfreecnt, 1) == 1)
648 				wake_free = 1;
649 		}
650 		spin_unlock(&bswspin_mem[iter]);
651 		if (wake)
652 			wakeup(&pbuf_mem_count);
653 	} else if (bp >= swbuf_kva && bp < &swbuf_kva[nswbuf_kva]) {
654 		KKASSERT(bp->b_kvabase);
655 		CPUMASK_ASSZERO(bp->b_cpumask);
656 		spin_lock(&bswspin_kva[iter]);
657 		TAILQ_INSERT_HEAD(&bswlist_kva[iter], bp, b_freelist);
658 		if (atomic_fetchadd_int(&pbuf_kva_count, 1) == nswbuf_kva / 4)
659 			wake = 1;
660 		if (pfreecnt) {
661 			if (atomic_fetchadd_int(pfreecnt, 1) == 1)
662 				wake_free = 1;
663 		}
664 		spin_unlock(&bswspin_kva[iter]);
665 		if (wake)
666 			wakeup(&pbuf_kva_count);
667 	} else {
668 		KKASSERT(bp->b_kvabase == NULL);
669 		KKASSERT(bp >= swbuf_raw && bp < &swbuf_raw[nswbuf_raw]);
670 		CPUMASK_ASSZERO(bp->b_cpumask);
671 		spin_lock(&bswspin_raw[iter]);
672 		TAILQ_INSERT_HEAD(&bswlist_raw[iter], bp, b_freelist);
673 		if (atomic_fetchadd_int(&pbuf_raw_count, 1) == nswbuf_raw / 4)
674 			wake = 1;
675 		if (pfreecnt) {
676 			if (atomic_fetchadd_int(pfreecnt, 1) == 1)
677 				wake_free = 1;
678 		}
679 		spin_unlock(&bswspin_raw[iter]);
680 		if (wake)
681 			wakeup(&pbuf_raw_count);
682 	}
683 	if (wake_free)
684 		wakeup(pfreecnt);
685 }
686 
687 void
688 pbuf_adjcount(int *pfreecnt, int n)
689 {
690 	if (n) {
691 		atomic_add_int(pfreecnt, n);
692 		wakeup(pfreecnt);
693 	}
694 }
695