xref: /freebsd/sys/vm/vm_pager.c (revision 38069501)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Paging space routine stubs.  Emulates a matchmaker-like interface
63  *	for builtin pagers.
64  */
65 
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD$");
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/vnode.h>
73 #include <sys/bio.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/malloc.h>
77 #include <sys/rwlock.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_extern.h>
86 
87 int cluster_pbuf_freecnt = -1;	/* unlimited to begin with */
88 
89 struct buf *swbuf;
90 
91 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
92 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
93     vm_ooffset_t, struct ucred *);
94 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
95 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
96 static void dead_pager_dealloc(vm_object_t);
97 
98 static int
99 dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind,
100     int *rahead)
101 {
102 
103 	return (VM_PAGER_FAIL);
104 }
105 
106 static vm_object_t
107 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
108     vm_ooffset_t off, struct ucred *cred)
109 {
110 
111 	return (NULL);
112 }
113 
114 static void
115 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count,
116     int flags, int *rtvals)
117 {
118 	int i;
119 
120 	for (i = 0; i < count; i++)
121 		rtvals[i] = VM_PAGER_AGAIN;
122 }
123 
124 static int
125 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
126 {
127 
128 	if (prev != NULL)
129 		*prev = 0;
130 	if (next != NULL)
131 		*next = 0;
132 	return (FALSE);
133 }
134 
135 static void
136 dead_pager_dealloc(vm_object_t object)
137 {
138 
139 }
140 
141 static struct pagerops deadpagerops = {
142 	.pgo_alloc = 	dead_pager_alloc,
143 	.pgo_dealloc =	dead_pager_dealloc,
144 	.pgo_getpages =	dead_pager_getpages,
145 	.pgo_putpages =	dead_pager_putpages,
146 	.pgo_haspage =	dead_pager_haspage,
147 };
148 
149 struct pagerops *pagertab[] = {
150 	&defaultpagerops,	/* OBJT_DEFAULT */
151 	&swappagerops,		/* OBJT_SWAP */
152 	&vnodepagerops,		/* OBJT_VNODE */
153 	&devicepagerops,	/* OBJT_DEVICE */
154 	&physpagerops,		/* OBJT_PHYS */
155 	&deadpagerops,		/* OBJT_DEAD */
156 	&sgpagerops,		/* OBJT_SG */
157 	&mgtdevicepagerops,	/* OBJT_MGTDEVICE */
158 };
159 
160 /*
161  * Kernel address space for mapping pages.
162  * Used by pagers where KVAs are needed for IO.
163  *
164  * XXX needs to be large enough to support the number of pending async
165  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
166  * (MAXPHYS == 64k) if you want to get the most efficiency.
167  */
168 struct mtx_padalign __exclusive_cache_line pbuf_mtx;
169 static TAILQ_HEAD(swqueue, buf) bswlist;
170 static int bswneeded;
171 vm_offset_t swapbkva;		/* swap buffers kva */
172 
173 void
174 vm_pager_init(void)
175 {
176 	struct pagerops **pgops;
177 
178 	TAILQ_INIT(&bswlist);
179 	/*
180 	 * Initialize known pagers
181 	 */
182 	for (pgops = pagertab; pgops < &pagertab[nitems(pagertab)]; pgops++)
183 		if ((*pgops)->pgo_init != NULL)
184 			(*(*pgops)->pgo_init)();
185 }
186 
187 void
188 vm_pager_bufferinit(void)
189 {
190 	struct buf *bp;
191 	int i;
192 
193 	mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF);
194 	bp = swbuf;
195 	/*
196 	 * Now set up swap and physical I/O buffer headers.
197 	 */
198 	for (i = 0; i < nswbuf; i++, bp++) {
199 		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
200 		BUF_LOCKINIT(bp);
201 		LIST_INIT(&bp->b_dep);
202 		bp->b_rcred = bp->b_wcred = NOCRED;
203 		bp->b_xflags = 0;
204 	}
205 
206 	cluster_pbuf_freecnt = nswbuf / 2;
207 	vnode_pbuf_freecnt = nswbuf / 2 + 1;
208 	vnode_async_pbuf_freecnt = nswbuf / 2;
209 }
210 
211 /*
212  * Allocate an instance of a pager of the given type.
213  * Size, protection and offset parameters are passed in for pagers that
214  * need to perform page-level validation (e.g. the device pager).
215  */
216 vm_object_t
217 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
218     vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
219 {
220 	vm_object_t ret;
221 	struct pagerops *ops;
222 
223 	ops = pagertab[type];
224 	if (ops)
225 		ret = (*ops->pgo_alloc)(handle, size, prot, off, cred);
226 	else
227 		ret = NULL;
228 	return (ret);
229 }
230 
231 /*
232  *	The object must be locked.
233  */
234 void
235 vm_pager_deallocate(vm_object_t object)
236 {
237 
238 	VM_OBJECT_ASSERT_WLOCKED(object);
239 	(*pagertab[object->type]->pgo_dealloc) (object);
240 }
241 
242 static void
243 vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
244 {
245 #ifdef INVARIANTS
246 
247 	VM_OBJECT_ASSERT_WLOCKED(object);
248 	KASSERT(count > 0, ("%s: 0 count", __func__));
249 	/*
250 	 * All pages must be busied, not mapped, not fully valid,
251 	 * not dirty and belong to the proper object.
252 	 */
253 	for (int i = 0 ; i < count; i++) {
254 		if (m[i] == bogus_page)
255 			continue;
256 		vm_page_assert_xbusied(m[i]);
257 		KASSERT(!pmap_page_is_mapped(m[i]),
258 		    ("%s: page %p is mapped", __func__, m[i]));
259 		KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
260 		    ("%s: request for a valid page %p", __func__, m[i]));
261 		KASSERT(m[i]->dirty == 0,
262 		    ("%s: page %p is dirty", __func__, m[i]));
263 		KASSERT(m[i]->object == object,
264 		    ("%s: wrong object %p/%p", __func__, object, m[i]->object));
265 	}
266 #endif
267 }
268 
269 /*
270  * Page in the pages for the object using its associated pager.
271  * The requested page must be fully valid on successful return.
272  */
273 int
274 vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
275     int *rahead)
276 {
277 #ifdef INVARIANTS
278 	vm_pindex_t pindex = m[0]->pindex;
279 #endif
280 	int r;
281 
282 	vm_pager_assert_in(object, m, count);
283 
284 	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind,
285 	    rahead);
286 	if (r != VM_PAGER_OK)
287 		return (r);
288 
289 	for (int i = 0; i < count; i++) {
290 		/*
291 		 * If pager has replaced a page, assert that it had
292 		 * updated the array.
293 		 */
294 		KASSERT(m[i] == vm_page_lookup(object, pindex++),
295 		    ("%s: mismatch page %p pindex %ju", __func__,
296 		    m[i], (uintmax_t )pindex - 1));
297 		/*
298 		 * Zero out partially filled data.
299 		 */
300 		if (m[i]->valid != VM_PAGE_BITS_ALL)
301 			vm_page_zero_invalid(m[i], TRUE);
302 	}
303 	return (VM_PAGER_OK);
304 }
305 
306 int
307 vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
308     int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
309 {
310 
311 	vm_pager_assert_in(object, m, count);
312 
313 	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
314 	    count, rbehind, rahead, iodone, arg));
315 }
316 
317 /*
318  * vm_pager_put_pages() - inline, see vm/vm_pager.h
319  * vm_pager_has_page() - inline, see vm/vm_pager.h
320  */
321 
322 /*
323  * Search the specified pager object list for an object with the
324  * specified handle.  If an object with the specified handle is found,
325  * increase its reference count and return it.  Otherwise, return NULL.
326  *
327  * The pager object list must be locked.
328  */
329 vm_object_t
330 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
331 {
332 	vm_object_t object;
333 
334 	TAILQ_FOREACH(object, pg_list, pager_object_list) {
335 		if (object->handle == handle) {
336 			VM_OBJECT_WLOCK(object);
337 			if ((object->flags & OBJ_DEAD) == 0) {
338 				vm_object_reference_locked(object);
339 				VM_OBJECT_WUNLOCK(object);
340 				break;
341 			}
342 			VM_OBJECT_WUNLOCK(object);
343 		}
344 	}
345 	return (object);
346 }
347 
348 /*
349  * initialize a physical buffer
350  */
351 
352 /*
353  * XXX This probably belongs in vfs_bio.c
354  */
355 static void
356 initpbuf(struct buf *bp)
357 {
358 
359 	KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj"));
360 	KASSERT(bp->b_vp == NULL, ("initpbuf with vp"));
361 	bp->b_rcred = NOCRED;
362 	bp->b_wcred = NOCRED;
363 	bp->b_qindex = 0;	/* On no queue (QUEUE_NONE) */
364 	bp->b_kvabase = (caddr_t)(MAXPHYS * (bp - swbuf)) + swapbkva;
365 	bp->b_data = bp->b_kvabase;
366 	bp->b_kvasize = MAXPHYS;
367 	bp->b_flags = 0;
368 	bp->b_xflags = 0;
369 	bp->b_ioflags = 0;
370 	bp->b_iodone = NULL;
371 	bp->b_error = 0;
372 	BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
373 	buf_track(bp, __func__);
374 }
375 
376 /*
377  * allocate a physical buffer
378  *
379  *	There are a limited number (nswbuf) of physical buffers.  We need
380  *	to make sure that no single subsystem is able to hog all of them,
381  *	so each subsystem implements a counter which is typically initialized
382  *	to 1/2 nswbuf.  getpbuf() decrements this counter in allocation and
383  *	increments it on release, and blocks if the counter hits zero.  A
384  *	subsystem may initialize the counter to -1 to disable the feature,
385  *	but it must still be sure to match up all uses of getpbuf() with
386  *	relpbuf() using the same variable.
387  *
388  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
389  *	relatively soon when the rest of the subsystems get smart about it. XXX
390  */
391 struct buf *
392 getpbuf(int *pfreecnt)
393 {
394 	struct buf *bp;
395 
396 	mtx_lock(&pbuf_mtx);
397 	for (;;) {
398 		if (pfreecnt != NULL) {
399 			while (*pfreecnt == 0) {
400 				msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
401 			}
402 		}
403 
404 		/* get a bp from the swap buffer header pool */
405 		if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
406 			break;
407 
408 		bswneeded = 1;
409 		msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
410 		/* loop in case someone else grabbed one */
411 	}
412 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
413 	if (pfreecnt)
414 		--*pfreecnt;
415 	mtx_unlock(&pbuf_mtx);
416 	initpbuf(bp);
417 	return (bp);
418 }
419 
420 /*
421  * allocate a physical buffer, if one is available.
422  *
423  *	Note that there is no NULL hack here - all subsystems using this
424  *	call understand how to use pfreecnt.
425  */
426 struct buf *
427 trypbuf(int *pfreecnt)
428 {
429 	struct buf *bp;
430 
431 	mtx_lock(&pbuf_mtx);
432 	if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
433 		mtx_unlock(&pbuf_mtx);
434 		return NULL;
435 	}
436 	TAILQ_REMOVE(&bswlist, bp, b_freelist);
437 	--*pfreecnt;
438 	mtx_unlock(&pbuf_mtx);
439 	initpbuf(bp);
440 	return (bp);
441 }
442 
443 /*
444  * release a physical buffer
445  *
446  *	NOTE: pfreecnt can be NULL, but this 'feature' will be removed
447  *	relatively soon when the rest of the subsystems get smart about it. XXX
448  */
449 void
450 relpbuf(struct buf *bp, int *pfreecnt)
451 {
452 
453 	if (bp->b_rcred != NOCRED) {
454 		crfree(bp->b_rcred);
455 		bp->b_rcred = NOCRED;
456 	}
457 	if (bp->b_wcred != NOCRED) {
458 		crfree(bp->b_wcred);
459 		bp->b_wcred = NOCRED;
460 	}
461 
462 	KASSERT(bp->b_vp == NULL, ("relpbuf with vp"));
463 	KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj"));
464 
465 	buf_track(bp, __func__);
466 	BUF_UNLOCK(bp);
467 
468 	mtx_lock(&pbuf_mtx);
469 	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
470 
471 	if (bswneeded) {
472 		bswneeded = 0;
473 		wakeup(&bswneeded);
474 	}
475 	if (pfreecnt) {
476 		if (++*pfreecnt == 1)
477 			wakeup(pfreecnt);
478 	}
479 	mtx_unlock(&pbuf_mtx);
480 }
481 
482 /*
483  * Associate a p-buffer with a vnode.
484  *
485  * Also sets B_PAGING flag to indicate that vnode is not fully associated
486  * with the buffer.  i.e. the bp has not been linked into the vnode or
487  * ref-counted.
488  */
489 void
490 pbgetvp(struct vnode *vp, struct buf *bp)
491 {
492 
493 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
494 	KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
495 
496 	bp->b_vp = vp;
497 	bp->b_flags |= B_PAGING;
498 	bp->b_bufobj = &vp->v_bufobj;
499 }
500 
501 /*
502  * Associate a p-buffer with a vnode.
503  *
504  * Also sets B_PAGING flag to indicate that vnode is not fully associated
505  * with the buffer.  i.e. the bp has not been linked into the vnode or
506  * ref-counted.
507  */
508 void
509 pbgetbo(struct bufobj *bo, struct buf *bp)
510 {
511 
512 	KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
513 	KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
514 
515 	bp->b_flags |= B_PAGING;
516 	bp->b_bufobj = bo;
517 }
518 
519 /*
520  * Disassociate a p-buffer from a vnode.
521  */
522 void
523 pbrelvp(struct buf *bp)
524 {
525 
526 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
527 	KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
528 	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
529 	    ("pbrelvp: pager buf on vnode list."));
530 
531 	bp->b_vp = NULL;
532 	bp->b_bufobj = NULL;
533 	bp->b_flags &= ~B_PAGING;
534 }
535 
536 /*
537  * Disassociate a p-buffer from a bufobj.
538  */
539 void
540 pbrelbo(struct buf *bp)
541 {
542 
543 	KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
544 	KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
545 	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
546 	    ("pbrelbo: pager buf on vnode list."));
547 
548 	bp->b_bufobj = NULL;
549 	bp->b_flags &= ~B_PAGING;
550 }
551