xref: /dragonfly/sys/vm/device_pager.c (revision db299a73)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)device_pager.c	8.1 (Berkeley) 6/11/93
37  * $FreeBSD: src/sys/vm/device_pager.c,v 1.46.2.1 2000/08/02 21:54:37 peter Exp $
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/conf.h>
44 #include <sys/mman.h>
45 #include <sys/device.h>
46 #include <sys/queue.h>
47 #include <sys/malloc.h>
48 #include <sys/thread2.h>
49 #include <sys/mutex2.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_zone.h>
56 #include <vm/vm_page2.h>
57 
58 static void dev_pager_dealloc (vm_object_t);
59 static int dev_pager_getpage (vm_object_t, vm_page_t *, int);
60 static void dev_pager_putpages (vm_object_t, vm_page_t *, int, int, int *);
61 static boolean_t dev_pager_haspage (vm_object_t, vm_pindex_t);
62 
63 /* list of device pager objects */
64 static TAILQ_HEAD(, vm_page) dev_freepages_list =
65 		TAILQ_HEAD_INITIALIZER(dev_freepages_list);
66 static MALLOC_DEFINE(M_FICTITIOUS_PAGES, "device-mapped pages",
67 		"Device mapped pages");
68 
69 static vm_page_t dev_pager_getfake (vm_paddr_t, int);
70 static void dev_pager_putfake (vm_page_t);
71 
72 struct pagerops devicepagerops = {
73 	dev_pager_dealloc,
74 	dev_pager_getpage,
75 	dev_pager_putpages,
76 	dev_pager_haspage
77 };
78 
79 /* list of device pager objects */
80 static struct pagerlst dev_pager_object_list =
81 		TAILQ_HEAD_INITIALIZER(dev_pager_object_list);
82 /* protect list manipulation */
83 static struct mtx dev_pager_mtx = MTX_INITIALIZER;
84 
85 static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
86     vm_ooffset_t foff, struct ucred *cred, u_short *pg_color);
87 static void old_dev_pager_dtor(void *handle);
88 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
89     int prot, vm_page_t *mres);
90 
91 static struct cdev_pager_ops old_dev_pager_ops = {
92 	.cdev_pg_ctor = old_dev_pager_ctor,
93 	.cdev_pg_dtor = old_dev_pager_dtor,
94 	.cdev_pg_fault = old_dev_pager_fault
95 };
96 
97 vm_object_t
98 cdev_pager_lookup(void *handle)
99 {
100 	vm_object_t object;
101 	mtx_lock(&dev_pager_mtx);
102 	object = vm_pager_object_lookup(&dev_pager_object_list, handle);
103 	mtx_unlock(&dev_pager_mtx);
104 	return (object);
105 }
106 
107 vm_object_t
108 cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
109 	vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
110 {
111 	cdev_t dev;
112 	vm_object_t object;
113 	u_short color;
114 	dev = handle;
115 
116 	/*
117 	 * Offset should be page aligned.
118 	 */
119 	if (foff & PAGE_MASK)
120 		return (NULL);
121 
122 	size = round_page64(size);
123 
124 	if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
125 		return (NULL);
126 
127 	/*
128 	 * Look up pager, creating as necessary.
129 	 */
130 	mtx_lock(&dev_pager_mtx);
131 	object = vm_pager_object_lookup(&dev_pager_object_list, handle);
132 	if (object == NULL) {
133 		/*
134 		 * Allocate object and associate it with the pager.
135 		 */
136 		object = vm_object_allocate_hold(tp,
137 						 OFF_TO_IDX(foff + size));
138 		object->handle = handle;
139 		object->un_pager.devp.ops = ops;
140 		object->un_pager.devp.dev = handle;
141 		TAILQ_INIT(&object->un_pager.devp.devp_pglist);
142 		dev->si_object = object;
143 
144 		TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
145 		    pager_object_list);
146 
147 		vm_object_drop(object);
148 	} else {
149 		/*
150 		 * Gain a reference to the object.
151 		 */
152 		vm_object_hold(object);
153 		vm_object_reference_locked(object);
154 		if (OFF_TO_IDX(foff + size) > object->size)
155 			object->size = OFF_TO_IDX(foff + size);
156 		vm_object_drop(object);
157 	}
158 	mtx_unlock(&dev_pager_mtx);
159 
160 	return (object);
161 }
162 
163 /*
164  * No requirements.
165  */
166 vm_object_t
167 dev_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff)
168 {
169 	return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
170 	    size, prot, foff, NULL));
171 }
172 
173 /* XXX */
174 void
175 cdev_pager_free_page(vm_object_t object, vm_page_t m)
176 {
177 	if (object->type == OBJT_MGTDEVICE) {
178 		KKASSERT((m->flags & PG_FICTITIOUS) != 0);
179 		pmap_page_protect(m, VM_PROT_NONE);
180 		vm_page_remove(m);
181 		vm_page_wakeup(m);
182 	} else if (object->type == OBJT_DEVICE) {
183 		TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
184 		dev_pager_putfake(m);
185 	}
186 }
187 
188 /*
189  * No requirements.
190  */
191 static void
192 dev_pager_dealloc(vm_object_t object)
193 {
194 	vm_page_t m;
195 
196 	mtx_lock(&dev_pager_mtx);
197         object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
198 
199 	TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
200 
201 	if (object->type == OBJT_DEVICE) {
202 		/*
203 		 * Free up our fake pages.
204 		 */
205 		while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) !=
206 		       NULL) {
207 			TAILQ_REMOVE(&object->un_pager.devp.devp_pglist,
208 				     m, pageq);
209 			dev_pager_putfake(m);
210 		}
211 	}
212 	mtx_unlock(&dev_pager_mtx);
213 }
214 
215 /*
216  * No requirements.
217  */
218 static int
219 dev_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
220 {
221 	vm_page_t page;
222 	int error;
223 
224 	mtx_lock(&dev_pager_mtx);
225 
226 	page = *mpp;
227 
228 	error = object->un_pager.devp.ops->cdev_pg_fault(object,
229             IDX_TO_OFF(page->pindex), PROT_READ, mpp);
230 
231 	mtx_unlock(&dev_pager_mtx);
232 
233 	return (error);
234 }
235 
236 /*
237  * No requirements.
238  */
239 static void
240 dev_pager_putpages(vm_object_t object, vm_page_t *m,
241 		   int count, int sync, int *rtvals)
242 {
243 	panic("dev_pager_putpage called");
244 }
245 
246 /*
247  * No requirements.
248  */
249 static boolean_t
250 dev_pager_haspage(vm_object_t object, vm_pindex_t pindex)
251 {
252 	return (TRUE);
253 }
254 
255 /*
256  * The caller must hold dev_pager_mtx
257  */
258 static vm_page_t
259 dev_pager_getfake(vm_paddr_t paddr, int pat_mode)
260 {
261 	vm_page_t m;
262 
263 	m = kmalloc(sizeof(*m), M_FICTITIOUS_PAGES, M_WAITOK|M_ZERO);
264 
265 	pmap_page_init(m);
266 
267 	m->flags = PG_BUSY | PG_FICTITIOUS;
268 	m->valid = VM_PAGE_BITS_ALL;
269 	m->dirty = 0;
270 	m->busy = 0;
271 	m->queue = PQ_NONE;
272 	m->object = NULL;
273 
274 	m->wire_count = 1;
275 	m->hold_count = 0;
276 	m->phys_addr = paddr;
277 	m->pat_mode = pat_mode;
278 
279 	return (m);
280 }
281 
282 /*
283  * Synthesized VM pages must be structurally stable for lockless lookups to
284  * work properly.
285  *
286  * The caller must hold dev_pager_mtx
287  */
288 static void
289 dev_pager_putfake(vm_page_t m)
290 {
291 	if (!(m->flags & PG_FICTITIOUS))
292 		panic("dev_pager_putfake: bad page");
293 	KKASSERT(m->object == NULL);
294 	KKASSERT(m->hold_count == 0);
295 	kfree(m, M_FICTITIOUS_PAGES);
296 }
297 
298 static int
299 old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
300     vm_ooffset_t foff, struct ucred *cred,  u_short *color)
301 {
302 	unsigned int npages;
303 	vm_offset_t off;
304 	cdev_t dev;
305 
306 	dev = handle;
307 
308 	/*
309 	 * Check that the specified range of the device allows the desired
310 	 * protection.
311 	 *
312 	 * XXX assumes VM_PROT_* == PROT_*
313 	 */
314 	npages = OFF_TO_IDX(size);
315 	for (off = foff; npages--; off += PAGE_SIZE) {
316 		if (dev_dmmap(dev, off, (int)prot, NULL) == -1)
317 			return (EINVAL);
318 	}
319 
320 	return (0);
321 }
322 
323 static void old_dev_pager_dtor(void *handle)
324 {
325 	cdev_t dev;
326 
327 	dev = handle;
328 	if (dev != NULL) {
329 		KKASSERT(dev->si_object);
330 		dev->si_object = NULL;
331 	}
332 }
333 
334 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
335     int prot, vm_page_t *mres)
336 {
337 	vm_paddr_t paddr;
338 	vm_page_t page;
339 	vm_offset_t pidx = OFF_TO_IDX(offset);
340 	cdev_t dev;
341 
342 	page = *mres;
343 	dev = object->handle;
344 
345 	paddr = pmap_phys_address(
346 		    dev_dmmap(dev, offset, prot, NULL));
347 	KASSERT(paddr != -1,("dev_pager_getpage: map function returns error"));
348 	KKASSERT(object->type == OBJT_DEVICE);
349 
350 	if (page->flags & PG_FICTITIOUS) {
351 		/*
352 		 * If the passed in reqpage page is already a fake page,
353 		 * update it with the new physical address.
354 		 */
355 		page->phys_addr = paddr;
356 		page->valid = VM_PAGE_BITS_ALL;
357 	} else {
358 		/*
359 		 * Replace the passed in reqpage page with our own fake page
360 		 * and free up all the original pages.
361 		 */
362 		page = dev_pager_getfake(paddr, object->memattr);
363 		TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
364 				  page, pageq);
365 		vm_object_hold(object);
366 		vm_page_free(*mres);
367 		if (vm_page_insert(page, object, pidx) == FALSE) {
368 			panic("dev_pager_getpage: page (%p,%016jx) exists",
369 			      object, (uintmax_t)pidx);
370 		}
371 		vm_object_drop(object);
372 	}
373 
374 	return (VM_PAGER_OK);
375 }
376 
377