xref: /dragonfly/sys/vm/vm_pager.c (revision 678e8cc6)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $
67  */
68 
69 /*
70  *	Paging space routine stubs.  Emulates a matchmaker-like interface
71  *	for builtin pagers.
72  */
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/buf.h>
79 #include <sys/ucred.h>
80 #include <sys/malloc.h>
81 #include <sys/dsched.h>
82 #include <sys/proc.h>
83 #include <sys/sysctl.h>
84 #include <sys/thread2.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_extern.h>
93 
94 #include <sys/buf2.h>
95 
96 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data");
97 
98 extern struct pagerops defaultpagerops;
99 extern struct pagerops swappagerops;
100 extern struct pagerops vnodepagerops;
101 extern struct pagerops devicepagerops;
102 extern struct pagerops physpagerops;
103 
104 int cluster_pbuf_freecnt = -1;	/* unlimited to begin with */
105 
106 static int dead_pager_getpage (vm_object_t, vm_page_t *, int);
107 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *);
108 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t);
109 static void dead_pager_dealloc (vm_object_t);
110 
111 /*
112  * No requirements.
113  */
114 static int
115 dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess)
116 {
117 	return VM_PAGER_FAIL;
118 }
119 
120 /*
121  * No requirements.
122  */
123 static void
124 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
125 		    int *rtvals)
126 {
127 	int i;
128 
129 	for (i = 0; i < count; i++) {
130 		rtvals[i] = VM_PAGER_AGAIN;
131 	}
132 }
133 
134 /*
135  * No requirements.
136  */
137 static int
138 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex)
139 {
140 	return FALSE;
141 }
142 
143 /*
144  * No requirements.
145  */
146 static void
147 dead_pager_dealloc(vm_object_t object)
148 {
149 	KKASSERT(object->swblock_count == 0);
150 	return;
151 }
152 
153 static struct pagerops deadpagerops = {
154 	dead_pager_dealloc,
155 	dead_pager_getpage,
156 	dead_pager_putpages,
157 	dead_pager_haspage
158 };
159 
160 struct pagerops *pagertab[] = {
161 	&defaultpagerops,	/* OBJT_DEFAULT */
162 	&swappagerops,		/* OBJT_SWAP */
163 	&vnodepagerops,		/* OBJT_VNODE */
164 	&devicepagerops,	/* OBJT_DEVICE */
165 	&physpagerops,		/* OBJT_PHYS */
166 	&deadpagerops		/* OBJT_DEAD */
167 };
168 
169 int npagers = NELEM(pagertab);
170 
171 /*
172  * Kernel address space for mapping pages.
173  * Used by pagers where KVAs are needed for IO.
174  *
175  * XXX needs to be large enough to support the number of pending async
176  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
177  * (MAXPHYS == 64k) if you want to get the most efficiency.
178  */
179 #define PAGER_MAP_SIZE	(8 * 1024 * 1024)
180 
181 TAILQ_HEAD(swqueue, buf);
182 
183 int pager_map_size = PAGER_MAP_SIZE;
184 struct vm_map pager_map;
185 
186 static int bswneeded_raw;
187 static int bswneeded_kva;
188 static int nswbuf_raw;
189 static struct buf *swbuf_raw;
190 static vm_offset_t swapbkva;		/* swap buffers kva */
191 static struct swqueue bswlist_raw;	/* without kva */
192 static struct swqueue bswlist_kva;	/* with kva */
193 static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin);
194 static int pbuf_raw_count;
195 static int pbuf_kva_count;
196 
197 SYSCTL_INT(_vfs, OID_AUTO, pbuf_raw_count, CTLFLAG_RD, &pbuf_raw_count, 0,
198     "Kernel virtual address space reservations");
199 SYSCTL_INT(_vfs, OID_AUTO, pbuf_kva_count, CTLFLAG_RD, &pbuf_kva_count, 0,
200     "Kernel raw address space reservations");
201 
202 /*
203  * Initialize the swap buffer list.
204  *
205  * Called from the low level boot code only.
206  */
207 static void
208 vm_pager_init(void *arg __unused)
209 {
210 	TAILQ_INIT(&bswlist_raw);
211 	TAILQ_INIT(&bswlist_kva);
212 }
213 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_SECOND, vm_pager_init, NULL)
214 
215 /*
216  * Called from the low level boot code only.
217  */
218 void
219 vm_pager_bufferinit(void)
220 {
221 	struct buf *bp;
222 	int i;
223 
224 	/*
225 	 * Reserve KVM space for pbuf data.
226 	 */
227 	swapbkva = kmem_alloc_pageable(&pager_map, nswbuf * MAXPHYS);
228 	if (!swapbkva)
229 		panic("Not enough pager_map VM space for physical buffers");
230 
231 	/*
232 	 * Initial pbuf setup.  These pbufs have KVA reservations.
233 	 */
234 	bp = swbuf;
235 	for (i = 0; i < nswbuf; ++i, ++bp) {
236 		bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva;
237 		bp->b_kvasize = MAXPHYS;
238 		BUF_LOCKINIT(bp);
239 		buf_dep_init(bp);
240 		TAILQ_INSERT_HEAD(&bswlist_kva, bp, b_freelist);
241 		++pbuf_kva_count;
242 	}
243 
244 	/*
245 	 * Initial pbuf setup.  These pbufs do not have KVA reservations,
246 	 * so we can have a lot more of them.  These are typically used
247 	 * to massage low level buf/bio requests.
248 	 */
249 	nswbuf_raw = nbuf * 2;
250 	swbuf_raw = (void *)kmem_alloc(&kernel_map,
251 				round_page(nswbuf_raw * sizeof(struct buf)));
252 	bp = swbuf_raw;
253 	for (i = 0; i < nswbuf_raw; ++i, ++bp) {
254 		BUF_LOCKINIT(bp);
255 		buf_dep_init(bp);
256 		TAILQ_INSERT_HEAD(&bswlist_raw, bp, b_freelist);
257 		++pbuf_raw_count;
258 	}
259 
260 	/*
261 	 * Allow the clustering code to use half of our pbufs.
262 	 */
263 	cluster_pbuf_freecnt = nswbuf / 2;
264 }
265 
266 /*
267  * No requirements.
268  */
269 void
270 vm_pager_deallocate(vm_object_t object)
271 {
272 	(*pagertab[object->type]->pgo_dealloc) (object);
273 }
274 
275 /*
276  * vm_pager_get_pages() - inline, see vm/vm_pager.h
277  * vm_pager_put_pages() - inline, see vm/vm_pager.h
278  * vm_pager_has_page() - inline, see vm/vm_pager.h
279  * vm_pager_page_inserted() - inline, see vm/vm_pager.h
280  * vm_pager_page_removed() - inline, see vm/vm_pager.h
281  */
282 
283 #if 0
284 /*
285  *	vm_pager_sync:
286  *
287  *	Called by pageout daemon before going back to sleep.
288  *	Gives pagers a chance to clean up any completed async pageing
289  *	operations.
290  */
291 void
292 vm_pager_sync(void)
293 {
294 	struct pagerops **pgops;
295 
296 	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
297 		if (pgops && ((*pgops)->pgo_sync != NULL))
298 			(*(*pgops)->pgo_sync) ();
299 }
300 
301 #endif
302 
303 /*
304  * Initialize a physical buffer.
305  *
306  * No requirements.
307  */
308 static void
309 initpbuf(struct buf *bp)
310 {
311 	bp->b_qindex = 0;		/* BQUEUE_NONE */
312 	bp->b_data = bp->b_kvabase;	/* NULL if pbuf sans kva */
313 	bp->b_flags = B_PAGING;
314 	bp->b_cmd = BUF_CMD_DONE;
315 	bp->b_error = 0;
316 	bp->b_bcount = 0;
317 	bp->b_bufsize = MAXPHYS;
318 	initbufbio(bp);
319 	xio_init(&bp->b_xio);
320 	BUF_LOCK(bp, LK_EXCLUSIVE);
321 }
322 
323 /*
324  * Allocate a physical buffer
325  *
326  *	There are a limited number (nswbuf) of physical buffers.  We need
327  *	to make sure that no single subsystem is able to hog all of them,
328  *	so each subsystem implements a counter which is typically initialized
329  *	to 1/2 nswbuf.  getpbuf() decrements this counter in allocation and
330  *	increments it on release, and blocks if the counter hits zero.  A
331  *	subsystem may initialize the counter to -1 to disable the feature,
332  *	but it must still be sure to match up all uses of getpbuf() with
333  *	relpbuf() using the same variable.
334  *
335  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
336  *	relatively soon when the rest of the subsystems get smart about it. XXX
337  *
338  *	Physical buffers can be with or without KVA space reserved.  There
339  *	are severe limitations on the ones with KVA reserved, and fewer
340  *	limitations on the ones without.  getpbuf() gets one without,
341  *	getpbuf_kva() gets one with.
342  *
343  * No requirements.
344  */
345 struct buf *
346 getpbuf(int *pfreecnt)
347 {
348 	struct buf *bp;
349 
350 	spin_lock(&bswspin);
351 
352 	for (;;) {
353 		if (pfreecnt) {
354 			while (*pfreecnt == 0)
355 				ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0);
356 		}
357 
358 		/* get a bp from the swap buffer header pool */
359 		if ((bp = TAILQ_FIRST(&bswlist_raw)) != NULL)
360 			break;
361 		bswneeded_raw = 1;
362 		ssleep(&bswneeded_raw, &bswspin, 0, "wswbuf1", 0);
363 		/* loop in case someone else grabbed one */
364 	}
365 	TAILQ_REMOVE(&bswlist_raw, bp, b_freelist);
366 	--pbuf_raw_count;
367 	if (pfreecnt)
368 		--*pfreecnt;
369 
370 	spin_unlock(&bswspin);
371 
372 	initpbuf(bp);
373 	KKASSERT(dsched_is_clear_buf_priv(bp));
374 
375 	return (bp);
376 }
377 
378 struct buf *
379 getpbuf_kva(int *pfreecnt)
380 {
381 	struct buf *bp;
382 
383 	spin_lock(&bswspin);
384 
385 	for (;;) {
386 		if (pfreecnt) {
387 			while (*pfreecnt == 0)
388 				ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0);
389 		}
390 
391 		/* get a bp from the swap buffer header pool */
392 		if ((bp = TAILQ_FIRST(&bswlist_kva)) != NULL)
393 			break;
394 		bswneeded_kva = 1;
395 		ssleep(&bswneeded_kva, &bswspin, 0, "wswbuf1", 0);
396 		/* loop in case someone else grabbed one */
397 	}
398 	TAILQ_REMOVE(&bswlist_kva, bp, b_freelist);
399 	--pbuf_kva_count;
400 	if (pfreecnt)
401 		--*pfreecnt;
402 
403 	spin_unlock(&bswspin);
404 
405 	initpbuf(bp);
406 	KKASSERT(dsched_is_clear_buf_priv(bp));
407 
408 	return (bp);
409 }
410 
411 /*
412  * Allocate a physical buffer, if one is available.
413  *
414  *	Note that there is no NULL hack here - all subsystems using this
415  *	call understand how to use pfreecnt.
416  *
417  * No requirements.
418  */
419 struct buf *
420 trypbuf(int *pfreecnt)
421 {
422 	struct buf *bp;
423 
424 	spin_lock(&bswspin);
425 
426 	if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist_raw)) == NULL) {
427 		spin_unlock(&bswspin);
428 		return NULL;
429 	}
430 	TAILQ_REMOVE(&bswlist_raw, bp, b_freelist);
431 	--pbuf_raw_count;
432 	--*pfreecnt;
433 
434 	spin_unlock(&bswspin);
435 
436 	initpbuf(bp);
437 
438 	return bp;
439 }
440 
441 struct buf *
442 trypbuf_kva(int *pfreecnt)
443 {
444 	struct buf *bp;
445 
446 	spin_lock(&bswspin);
447 
448 	if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist_kva)) == NULL) {
449 		spin_unlock(&bswspin);
450 		return NULL;
451 	}
452 	TAILQ_REMOVE(&bswlist_kva, bp, b_freelist);
453 	--pbuf_kva_count;
454 	--*pfreecnt;
455 
456 	spin_unlock(&bswspin);
457 
458 	initpbuf(bp);
459 
460 	return bp;
461 }
462 
463 /*
464  * Release a physical buffer
465  *
466  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
467  *	relatively soon when the rest of the subsystems get smart about it. XXX
468  *
469  * No requirements.
470  */
471 void
472 relpbuf(struct buf *bp, int *pfreecnt)
473 {
474 	int wake_bsw_kva = 0;
475 	int wake_bsw_raw = 0;
476 	int wake_freecnt = 0;
477 
478 	KKASSERT(bp->b_flags & B_PAGING);
479 	dsched_exit_buf(bp);
480 
481 	BUF_UNLOCK(bp);
482 
483 	spin_lock(&bswspin);
484 	if (bp->b_kvabase) {
485 		TAILQ_INSERT_HEAD(&bswlist_kva, bp, b_freelist);
486 		++pbuf_kva_count;
487 	} else {
488 		TAILQ_INSERT_HEAD(&bswlist_raw, bp, b_freelist);
489 		++pbuf_raw_count;
490 	}
491 	if (bswneeded_kva) {
492 		bswneeded_kva = 0;
493 		wake_bsw_kva = 1;
494 	}
495 	if (bswneeded_raw) {
496 		bswneeded_raw = 0;
497 		wake_bsw_raw = 1;
498 	}
499 	if (pfreecnt) {
500 		if (++*pfreecnt == 1)
501 			wake_freecnt = 1;
502 	}
503 	spin_unlock(&bswspin);
504 
505 	if (wake_bsw_kva)
506 		wakeup(&bswneeded_kva);
507 	if (wake_bsw_raw)
508 		wakeup(&bswneeded_raw);
509 	if (wake_freecnt)
510 		wakeup(pfreecnt);
511 }
512