xref: /freebsd/sys/vm/vm_pager.c (revision 681ce946)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 /*
64  *	Paging space routine stubs.  Emulates a matchmaker-like interface
65  *	for builtin pagers.
66  */
67 
68 #include <sys/cdefs.h>
69 __FBSDID("$FreeBSD$");
70 
71 #include "opt_param.h"
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/vnode.h>
77 #include <sys/bio.h>
78 #include <sys/buf.h>
79 #include <sys/ucred.h>
80 #include <sys/malloc.h>
81 #include <sys/rwlock.h>
82 #include <sys/user.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vm_extern.h>
91 #include <vm/uma.h>
92 
93 uma_zone_t pbuf_zone;
94 static int	pbuf_init(void *, int, int);
95 static int	pbuf_ctor(void *, int, void *, int);
96 static void	pbuf_dtor(void *, int, void *);
97 
98 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
99 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
100     vm_ooffset_t, struct ucred *);
101 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
102 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
103 static void dead_pager_dealloc(vm_object_t);
104 static void dead_pager_getvp(vm_object_t, struct vnode **, bool *);
105 
106 static int
107 dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind,
108     int *rahead)
109 {
110 
111 	return (VM_PAGER_FAIL);
112 }
113 
114 static vm_object_t
115 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
116     vm_ooffset_t off, struct ucred *cred)
117 {
118 
119 	return (NULL);
120 }
121 
122 static void
123 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count,
124     int flags, int *rtvals)
125 {
126 	int i;
127 
128 	for (i = 0; i < count; i++)
129 		rtvals[i] = VM_PAGER_AGAIN;
130 }
131 
132 static int
133 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
134 {
135 
136 	if (prev != NULL)
137 		*prev = 0;
138 	if (next != NULL)
139 		*next = 0;
140 	return (FALSE);
141 }
142 
143 static void
144 dead_pager_dealloc(vm_object_t object)
145 {
146 
147 }
148 
149 static void
150 dead_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
151 {
152 	/*
153 	 * For OBJT_DEAD objects, v_writecount was handled in
154 	 * vnode_pager_dealloc().
155 	 */
156 }
157 
158 static const struct pagerops deadpagerops = {
159 	.pgo_kvme_type = KVME_TYPE_DEAD,
160 	.pgo_alloc = 	dead_pager_alloc,
161 	.pgo_dealloc =	dead_pager_dealloc,
162 	.pgo_getpages =	dead_pager_getpages,
163 	.pgo_putpages =	dead_pager_putpages,
164 	.pgo_haspage =	dead_pager_haspage,
165 	.pgo_getvp =	dead_pager_getvp,
166 };
167 
168 const struct pagerops *pagertab[16] __read_mostly = {
169 	[OBJT_DEFAULT] =	&defaultpagerops,
170 	[OBJT_SWAP] =		&swappagerops,
171 	[OBJT_VNODE] =		&vnodepagerops,
172 	[OBJT_DEVICE] =		&devicepagerops,
173 	[OBJT_PHYS] =		&physpagerops,
174 	[OBJT_DEAD] =		&deadpagerops,
175 	[OBJT_SG] = 		&sgpagerops,
176 	[OBJT_MGTDEVICE] = 	&mgtdevicepagerops,
177 };
178 static struct mtx pagertab_lock;
179 
180 void
181 vm_pager_init(void)
182 {
183 	const struct pagerops **pgops;
184 	int i;
185 
186 	mtx_init(&pagertab_lock, "dynpag", NULL, MTX_DEF);
187 
188 	/*
189 	 * Initialize known pagers
190 	 */
191 	for (i = 0; i < OBJT_FIRST_DYN; i++) {
192 		pgops = &pagertab[i];
193 		if ((*pgops)->pgo_init != NULL)
194 			(*(*pgops)->pgo_init)();
195 	}
196 }
197 
198 static int nswbuf_max;
199 
200 void
201 vm_pager_bufferinit(void)
202 {
203 
204 	/* Main zone for paging bufs. */
205 	pbuf_zone = uma_zcreate("pbuf",
206 	    sizeof(struct buf) + PBUF_PAGES * sizeof(vm_page_t),
207 	    pbuf_ctor, pbuf_dtor, pbuf_init, NULL, UMA_ALIGN_CACHE,
208 	    UMA_ZONE_NOFREE);
209 	/* Few systems may still use this zone directly, so it needs a limit. */
210 	nswbuf_max += uma_zone_set_max(pbuf_zone, NSWBUF_MIN);
211 }
212 
213 uma_zone_t
214 pbuf_zsecond_create(const char *name, int max)
215 {
216 	uma_zone_t zone;
217 
218 	zone = uma_zsecond_create(name, pbuf_ctor, pbuf_dtor, NULL, NULL,
219 	    pbuf_zone);
220 
221 #ifdef KMSAN
222 	/*
223 	 * Shrink the size of the pbuf pools if KMSAN is enabled, otherwise the
224 	 * shadows of the large KVA allocations eat up too much memory.
225 	 */
226 	max /= 3;
227 #endif
228 
229 	/*
230 	 * uma_prealloc() rounds up to items per slab. If we would prealloc
231 	 * immediately on every pbuf_zsecond_create(), we may accumulate too
232 	 * much of difference between hard limit and prealloced items, which
233 	 * means wasted memory.
234 	 */
235 	if (nswbuf_max > 0)
236 		nswbuf_max += uma_zone_set_max(zone, max);
237 	else
238 		uma_prealloc(pbuf_zone, uma_zone_set_max(zone, max));
239 
240 	return (zone);
241 }
242 
243 static void
244 pbuf_prealloc(void *arg __unused)
245 {
246 
247 	uma_prealloc(pbuf_zone, nswbuf_max);
248 	nswbuf_max = -1;
249 }
250 
251 SYSINIT(pbuf, SI_SUB_KTHREAD_BUF, SI_ORDER_ANY, pbuf_prealloc, NULL);
252 
253 /*
254  * Allocate an instance of a pager of the given type.
255  * Size, protection and offset parameters are passed in for pagers that
256  * need to perform page-level validation (e.g. the device pager).
257  */
258 vm_object_t
259 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
260     vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
261 {
262 	MPASS(type < nitems(pagertab));
263 
264 	return ((*pagertab[type]->pgo_alloc)(handle, size, prot, off, cred));
265 }
266 
267 /*
268  *	The object must be locked.
269  */
270 void
271 vm_pager_deallocate(vm_object_t object)
272 {
273 
274 	VM_OBJECT_ASSERT_WLOCKED(object);
275 	MPASS(object->type < nitems(pagertab));
276 	(*pagertab[object->type]->pgo_dealloc) (object);
277 }
278 
279 static void
280 vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
281 {
282 #ifdef INVARIANTS
283 
284 	/*
285 	 * All pages must be consecutive, busied, not mapped, not fully valid,
286 	 * not dirty and belong to the proper object.  Some pages may be the
287 	 * bogus page, but the first and last pages must be a real ones.
288 	 */
289 
290 	VM_OBJECT_ASSERT_UNLOCKED(object);
291 	VM_OBJECT_ASSERT_PAGING(object);
292 	KASSERT(count > 0, ("%s: 0 count", __func__));
293 	for (int i = 0 ; i < count; i++) {
294 		if (m[i] == bogus_page) {
295 			KASSERT(i != 0 && i != count - 1,
296 			    ("%s: page %d is the bogus page", __func__, i));
297 			continue;
298 		}
299 		vm_page_assert_xbusied(m[i]);
300 		KASSERT(!pmap_page_is_mapped(m[i]),
301 		    ("%s: page %p is mapped", __func__, m[i]));
302 		KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
303 		    ("%s: request for a valid page %p", __func__, m[i]));
304 		KASSERT(m[i]->dirty == 0,
305 		    ("%s: page %p is dirty", __func__, m[i]));
306 		KASSERT(m[i]->object == object,
307 		    ("%s: wrong object %p/%p", __func__, object, m[i]->object));
308 		KASSERT(m[i]->pindex == m[0]->pindex + i,
309 		    ("%s: page %p isn't consecutive", __func__, m[i]));
310 	}
311 #endif
312 }
313 
314 /*
315  * Page in the pages for the object using its associated pager.
316  * The requested page must be fully valid on successful return.
317  */
318 int
319 vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
320     int *rahead)
321 {
322 #ifdef INVARIANTS
323 	vm_pindex_t pindex = m[0]->pindex;
324 #endif
325 	int r;
326 
327 	MPASS(object->type < nitems(pagertab));
328 	vm_pager_assert_in(object, m, count);
329 
330 	r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind,
331 	    rahead);
332 	if (r != VM_PAGER_OK)
333 		return (r);
334 
335 	for (int i = 0; i < count; i++) {
336 		/*
337 		 * If pager has replaced a page, assert that it had
338 		 * updated the array.
339 		 */
340 #ifdef INVARIANTS
341 		KASSERT(m[i] == vm_page_relookup(object, pindex++),
342 		    ("%s: mismatch page %p pindex %ju", __func__,
343 		    m[i], (uintmax_t )pindex - 1));
344 #endif
345 
346 		/*
347 		 * Zero out partially filled data.
348 		 */
349 		if (m[i]->valid != VM_PAGE_BITS_ALL)
350 			vm_page_zero_invalid(m[i], TRUE);
351 	}
352 	return (VM_PAGER_OK);
353 }
354 
355 int
356 vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
357     int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
358 {
359 
360 	MPASS(object->type < nitems(pagertab));
361 	vm_pager_assert_in(object, m, count);
362 
363 	return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
364 	    count, rbehind, rahead, iodone, arg));
365 }
366 
367 /*
368  * vm_pager_put_pages() - inline, see vm/vm_pager.h
369  * vm_pager_has_page() - inline, see vm/vm_pager.h
370  */
371 
372 /*
373  * Search the specified pager object list for an object with the
374  * specified handle.  If an object with the specified handle is found,
375  * increase its reference count and return it.  Otherwise, return NULL.
376  *
377  * The pager object list must be locked.
378  */
379 vm_object_t
380 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
381 {
382 	vm_object_t object;
383 
384 	TAILQ_FOREACH(object, pg_list, pager_object_list) {
385 		if (object->handle == handle) {
386 			VM_OBJECT_WLOCK(object);
387 			if ((object->flags & OBJ_DEAD) == 0) {
388 				vm_object_reference_locked(object);
389 				VM_OBJECT_WUNLOCK(object);
390 				break;
391 			}
392 			VM_OBJECT_WUNLOCK(object);
393 		}
394 	}
395 	return (object);
396 }
397 
398 int
399 vm_pager_alloc_dyn_type(struct pagerops *ops, int base_type)
400 {
401 	int res;
402 
403 	mtx_lock(&pagertab_lock);
404 	MPASS(base_type == -1 ||
405 	    (base_type >= OBJT_DEFAULT && base_type < nitems(pagertab)));
406 	for (res = OBJT_FIRST_DYN; res < nitems(pagertab); res++) {
407 		if (pagertab[res] == NULL)
408 			break;
409 	}
410 	if (res == nitems(pagertab)) {
411 		mtx_unlock(&pagertab_lock);
412 		return (-1);
413 	}
414 	if (base_type != -1) {
415 		MPASS(pagertab[base_type] != NULL);
416 #define	FIX(n)								\
417 		if (ops->pgo_##n == NULL)				\
418 			ops->pgo_##n = pagertab[base_type]->pgo_##n
419 		FIX(init);
420 		FIX(alloc);
421 		FIX(dealloc);
422 		FIX(getpages);
423 		FIX(getpages_async);
424 		FIX(putpages);
425 		FIX(haspage);
426 		FIX(populate);
427 		FIX(pageunswapped);
428 		FIX(update_writecount);
429 		FIX(release_writecount);
430 		FIX(set_writeable_dirty);
431 		FIX(mightbedirty);
432 		FIX(getvp);
433 		FIX(freespace);
434 #undef FIX
435 	}
436 	pagertab[res] = ops;	/* XXXKIB should be rel, but acq is too much */
437 	mtx_unlock(&pagertab_lock);
438 	return (res);
439 }
440 
441 void
442 vm_pager_free_dyn_type(objtype_t type)
443 {
444 	MPASS(type >= OBJT_FIRST_DYN && type < nitems(pagertab));
445 
446 	mtx_lock(&pagertab_lock);
447 	MPASS(pagertab[type] != NULL);
448 	pagertab[type] = NULL;
449 	mtx_unlock(&pagertab_lock);
450 }
451 
452 static int
453 pbuf_ctor(void *mem, int size, void *arg, int flags)
454 {
455 	struct buf *bp = mem;
456 
457 	bp->b_vp = NULL;
458 	bp->b_bufobj = NULL;
459 
460 	/* copied from initpbuf() */
461 	bp->b_rcred = NOCRED;
462 	bp->b_wcred = NOCRED;
463 	bp->b_qindex = 0;       /* On no queue (QUEUE_NONE) */
464 	bp->b_data = bp->b_kvabase;
465 	bp->b_xflags = 0;
466 	bp->b_flags = B_MAXPHYS;
467 	bp->b_ioflags = 0;
468 	bp->b_iodone = NULL;
469 	bp->b_error = 0;
470 	BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
471 
472 	return (0);
473 }
474 
475 static void
476 pbuf_dtor(void *mem, int size, void *arg)
477 {
478 	struct buf *bp = mem;
479 
480 	if (bp->b_rcred != NOCRED) {
481 		crfree(bp->b_rcred);
482 		bp->b_rcred = NOCRED;
483 	}
484 	if (bp->b_wcred != NOCRED) {
485 		crfree(bp->b_wcred);
486 		bp->b_wcred = NOCRED;
487 	}
488 
489 	BUF_UNLOCK(bp);
490 }
491 
492 static int
493 pbuf_init(void *mem, int size, int flags)
494 {
495 	struct buf *bp = mem;
496 
497 	bp->b_kvabase = (void *)kva_alloc(ptoa(PBUF_PAGES));
498 	if (bp->b_kvabase == NULL)
499 		return (ENOMEM);
500 	bp->b_kvasize = ptoa(PBUF_PAGES);
501 	BUF_LOCKINIT(bp);
502 	LIST_INIT(&bp->b_dep);
503 	bp->b_rcred = bp->b_wcred = NOCRED;
504 	bp->b_xflags = 0;
505 
506 	return (0);
507 }
508 
509 /*
510  * Associate a p-buffer with a vnode.
511  *
512  * Also sets B_PAGING flag to indicate that vnode is not fully associated
513  * with the buffer.  i.e. the bp has not been linked into the vnode or
514  * ref-counted.
515  */
516 void
517 pbgetvp(struct vnode *vp, struct buf *bp)
518 {
519 
520 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
521 	KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
522 
523 	bp->b_vp = vp;
524 	bp->b_flags |= B_PAGING;
525 	bp->b_bufobj = &vp->v_bufobj;
526 }
527 
528 /*
529  * Associate a p-buffer with a vnode.
530  *
531  * Also sets B_PAGING flag to indicate that vnode is not fully associated
532  * with the buffer.  i.e. the bp has not been linked into the vnode or
533  * ref-counted.
534  */
535 void
536 pbgetbo(struct bufobj *bo, struct buf *bp)
537 {
538 
539 	KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
540 	KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
541 
542 	bp->b_flags |= B_PAGING;
543 	bp->b_bufobj = bo;
544 }
545 
546 /*
547  * Disassociate a p-buffer from a vnode.
548  */
549 void
550 pbrelvp(struct buf *bp)
551 {
552 
553 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
554 	KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
555 	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
556 	    ("pbrelvp: pager buf on vnode list."));
557 
558 	bp->b_vp = NULL;
559 	bp->b_bufobj = NULL;
560 	bp->b_flags &= ~B_PAGING;
561 }
562 
563 /*
564  * Disassociate a p-buffer from a bufobj.
565  */
566 void
567 pbrelbo(struct buf *bp)
568 {
569 
570 	KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
571 	KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
572 	KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
573 	    ("pbrelbo: pager buf on vnode list."));
574 
575 	bp->b_bufobj = NULL;
576 	bp->b_flags &= ~B_PAGING;
577 }
578 
579 void
580 vm_object_set_writeable_dirty(vm_object_t object)
581 {
582 	pgo_set_writeable_dirty_t *method;
583 
584 	MPASS(object->type < nitems(pagertab));
585 
586 	method = pagertab[object->type]->pgo_set_writeable_dirty;
587 	if (method != NULL)
588 		method(object);
589 }
590 
591 bool
592 vm_object_mightbedirty(vm_object_t object)
593 {
594 	pgo_mightbedirty_t *method;
595 
596 	MPASS(object->type < nitems(pagertab));
597 
598 	method = pagertab[object->type]->pgo_mightbedirty;
599 	if (method == NULL)
600 		return (false);
601 	return (method(object));
602 }
603 
604 /*
605  * Return the kvme type of the given object.
606  * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL.
607  */
608 int
609 vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
610 {
611 	VM_OBJECT_ASSERT_LOCKED(object);
612 	MPASS(object->type < nitems(pagertab));
613 
614 	if (vpp != NULL)
615 		*vpp = vm_object_vnode(object);
616 	return (pagertab[object->type]->pgo_kvme_type);
617 }
618