xref: /freebsd/sys/vm/phys_pager.c (revision d411c1d6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Peter Wemm
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/proc.h>
37 #include <sys/mutex.h>
38 #include <sys/mman.h>
39 #include <sys/rwlock.h>
40 #include <sys/sysctl.h>
41 #include <sys/user.h>
42 
43 #include <vm/vm.h>
44 #include <vm/vm_param.h>
45 #include <vm/vm_object.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_pageout.h>
48 #include <vm/vm_pager.h>
49 
50 /* list of phys pager objects */
51 static struct pagerlst phys_pager_object_list;
52 /* protect access to phys_pager_object_list */
53 static struct mtx phys_pager_mtx;
54 
55 static int default_phys_pager_getpages(vm_object_t object, vm_page_t *m,
56     int count, int *rbehind, int *rahead);
57 static int default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
58     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last);
59 static boolean_t default_phys_pager_haspage(vm_object_t object,
60     vm_pindex_t pindex, int *before, int *after);
61 const struct phys_pager_ops default_phys_pg_ops = {
62 	.phys_pg_getpages = default_phys_pager_getpages,
63 	.phys_pg_populate = default_phys_pager_populate,
64 	.phys_pg_haspage = default_phys_pager_haspage,
65 	.phys_pg_ctor = NULL,
66 	.phys_pg_dtor = NULL,
67 };
68 
69 static void
70 phys_pager_init(void)
71 {
72 
73 	TAILQ_INIT(&phys_pager_object_list);
74 	mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
75 }
76 
77 vm_object_t
78 phys_pager_allocate(void *handle, const struct phys_pager_ops *ops, void *data,
79     vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
80 {
81 	vm_object_t object, object1;
82 	vm_pindex_t pindex;
83 	bool init;
84 
85 	/*
86 	 * Offset should be page aligned.
87 	 */
88 	if (foff & PAGE_MASK)
89 		return (NULL);
90 
91 	pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
92 	init = true;
93 
94 	if (handle != NULL) {
95 		mtx_lock(&phys_pager_mtx);
96 		/*
97 		 * Look up pager, creating as necessary.
98 		 */
99 		object1 = NULL;
100 		object = vm_pager_object_lookup(&phys_pager_object_list, handle);
101 		if (object == NULL) {
102 			/*
103 			 * Allocate object and associate it with the pager.
104 			 */
105 			mtx_unlock(&phys_pager_mtx);
106 			object1 = vm_object_allocate(OBJT_PHYS, pindex);
107 			mtx_lock(&phys_pager_mtx);
108 			object = vm_pager_object_lookup(&phys_pager_object_list,
109 			    handle);
110 			if (object != NULL) {
111 				/*
112 				 * We raced with other thread while
113 				 * allocating object.
114 				 */
115 				if (pindex > object->size)
116 					object->size = pindex;
117 				init = false;
118 			} else {
119 				object = object1;
120 				object1 = NULL;
121 				object->handle = handle;
122 				object->un_pager.phys.ops = ops;
123 				object->un_pager.phys.data_ptr = data;
124 				if (ops->phys_pg_populate != NULL)
125 					vm_object_set_flag(object, OBJ_POPULATE);
126 				TAILQ_INSERT_TAIL(&phys_pager_object_list,
127 				    object, pager_object_list);
128 			}
129 		} else {
130 			if (pindex > object->size)
131 				object->size = pindex;
132 		}
133 		mtx_unlock(&phys_pager_mtx);
134 		vm_object_deallocate(object1);
135 	} else {
136 		object = vm_object_allocate(OBJT_PHYS, pindex);
137 		object->un_pager.phys.ops = ops;
138 		object->un_pager.phys.data_ptr = data;
139 		if (ops->phys_pg_populate != NULL)
140 			vm_object_set_flag(object, OBJ_POPULATE);
141 	}
142 	if (init && ops->phys_pg_ctor != NULL)
143 		ops->phys_pg_ctor(object, prot, foff, cred);
144 
145 	return (object);
146 }
147 
148 static vm_object_t
149 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
150     vm_ooffset_t foff, struct ucred *ucred)
151 {
152 	return (phys_pager_allocate(handle, &default_phys_pg_ops, NULL,
153 	    size, prot, foff, ucred));
154 }
155 
156 static void
157 phys_pager_dealloc(vm_object_t object)
158 {
159 
160 	if (object->handle != NULL) {
161 		VM_OBJECT_WUNLOCK(object);
162 		mtx_lock(&phys_pager_mtx);
163 		TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
164 		mtx_unlock(&phys_pager_mtx);
165 		VM_OBJECT_WLOCK(object);
166 	}
167 	object->type = OBJT_DEAD;
168 	if (object->un_pager.phys.ops->phys_pg_dtor != NULL)
169 		object->un_pager.phys.ops->phys_pg_dtor(object);
170 	object->handle = NULL;
171 }
172 
173 /*
174  * Fill as many pages as vm_fault has allocated for us.
175  */
176 static int
177 default_phys_pager_getpages(vm_object_t object, vm_page_t *m, int count,
178     int *rbehind, int *rahead)
179 {
180 	int i;
181 
182 	for (i = 0; i < count; i++) {
183 		if (vm_page_none_valid(m[i])) {
184 			if ((m[i]->flags & PG_ZERO) == 0)
185 				pmap_zero_page(m[i]);
186 			vm_page_valid(m[i]);
187 		}
188 		KASSERT(vm_page_all_valid(m[i]),
189 		    ("phys_pager_getpages: partially valid page %p", m[i]));
190 		KASSERT(m[i]->dirty == 0,
191 		    ("phys_pager_getpages: dirty page %p", m[i]));
192 	}
193 	if (rbehind)
194 		*rbehind = 0;
195 	if (rahead)
196 		*rahead = 0;
197 	return (VM_PAGER_OK);
198 }
199 
200 static int
201 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
202     int *rahead)
203 {
204 	return (object->un_pager.phys.ops->phys_pg_getpages(object, m,
205 	    count, rbehind, rahead));
206 }
207 
208 /*
209  * Implement a pretty aggressive clustered getpages strategy.  Hint that
210  * everything in an entire 4MB window should be prefaulted at once.
211  *
212  * 4MB (1024 slots per page table page) is convenient for x86,
213  * but may not be for other arches.
214  */
215 #ifndef PHYSCLUSTER
216 #define PHYSCLUSTER 1024
217 #endif
218 static int phys_pager_cluster = PHYSCLUSTER;
219 SYSCTL_INT(_vm, OID_AUTO, phys_pager_cluster, CTLFLAG_RWTUN,
220     &phys_pager_cluster, 0,
221     "prefault window size for phys pager");
222 
223 /*
224  * Max hint to vm_page_alloc() about the further allocation needs
225  * inside the phys_pager_populate() loop.  The number of bits used to
226  * implement VM_ALLOC_COUNT() determines the hard limit on this value.
227  * That limit is currently 65535.
228  */
229 #define	PHYSALLOC	16
230 
231 static int
232 default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
233     int fault_type __unused, vm_prot_t max_prot __unused, vm_pindex_t *first,
234     vm_pindex_t *last)
235 {
236 	vm_page_t m;
237 	vm_pindex_t base, end, i;
238 	int ahead;
239 
240 	base = rounddown(pidx, phys_pager_cluster);
241 	end = base + phys_pager_cluster - 1;
242 	if (end >= object->size)
243 		end = object->size - 1;
244 	if (*first > base)
245 		base = *first;
246 	if (end > *last)
247 		end = *last;
248 	*first = base;
249 	*last = end;
250 
251 	for (i = base; i <= end; i++) {
252 		ahead = MIN(end - i, PHYSALLOC);
253 		m = vm_page_grab(object, i,
254 		    VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead));
255 		if (!vm_page_all_valid(m))
256 			vm_page_zero_invalid(m, TRUE);
257 		KASSERT(m->dirty == 0,
258 		    ("phys_pager_populate: dirty page %p", m));
259 	}
260 	return (VM_PAGER_OK);
261 }
262 
263 static int
264 phys_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
265     vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
266 {
267 	return (object->un_pager.phys.ops->phys_pg_populate(object, pidx,
268 	    fault_type, max_prot, first, last));
269 }
270 
271 static void
272 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
273     int *rtvals)
274 {
275 
276 	panic("phys_pager_putpage called");
277 }
278 
279 static boolean_t
280 default_phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
281     int *after)
282 {
283 	vm_pindex_t base, end;
284 
285 	base = rounddown(pindex, phys_pager_cluster);
286 	end = base + phys_pager_cluster - 1;
287 	if (before != NULL)
288 		*before = pindex - base;
289 	if (after != NULL)
290 		*after = end - pindex;
291 	return (TRUE);
292 }
293 
294 static boolean_t
295 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
296     int *after)
297 {
298 	return (object->un_pager.phys.ops->phys_pg_haspage(object, pindex,
299 	    before, after));
300 }
301 
302 const struct pagerops physpagerops = {
303 	.pgo_kvme_type = KVME_TYPE_PHYS,
304 	.pgo_init =	phys_pager_init,
305 	.pgo_alloc =	phys_pager_alloc,
306 	.pgo_dealloc = 	phys_pager_dealloc,
307 	.pgo_getpages =	phys_pager_getpages,
308 	.pgo_putpages =	phys_pager_putpages,
309 	.pgo_haspage =	phys_pager_haspage,
310 	.pgo_populate =	phys_pager_populate,
311 };
312