xref: /dragonfly/sys/vm/device_pager.c (revision 0dace59e)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)device_pager.c	8.1 (Berkeley) 6/11/93
37  * $FreeBSD: src/sys/vm/device_pager.c,v 1.46.2.1 2000/08/02 21:54:37 peter Exp $
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/conf.h>
44 #include <sys/mman.h>
45 #include <sys/device.h>
46 #include <sys/queue.h>
47 #include <sys/malloc.h>
48 #include <sys/thread2.h>
49 #include <sys/mutex2.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_zone.h>
56 #include <vm/vm_page2.h>
57 
58 static void dev_pager_dealloc (vm_object_t);
59 static int dev_pager_getpage (vm_object_t, vm_page_t *, int);
60 static void dev_pager_putpages (vm_object_t, vm_page_t *, int,
61 		boolean_t, int *);
62 static boolean_t dev_pager_haspage (vm_object_t, vm_pindex_t);
63 
64 /* list of device pager objects */
65 static TAILQ_HEAD(, vm_page) dev_freepages_list =
66 		TAILQ_HEAD_INITIALIZER(dev_freepages_list);
67 static MALLOC_DEFINE(M_FICTITIOUS_PAGES, "device-mapped pages",
68 		"Device mapped pages");
69 
70 static vm_page_t dev_pager_getfake (vm_paddr_t, int);
71 static void dev_pager_putfake (vm_page_t);
72 
73 struct pagerops devicepagerops = {
74 	dev_pager_dealloc,
75 	dev_pager_getpage,
76 	dev_pager_putpages,
77 	dev_pager_haspage
78 };
79 
80 /* list of device pager objects */
81 static struct pagerlst dev_pager_object_list =
82 		TAILQ_HEAD_INITIALIZER(dev_pager_object_list);
83 /* protect list manipulation */
84 static struct mtx dev_pager_mtx = MTX_INITIALIZER;
85 
86 static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
87     vm_ooffset_t foff, struct ucred *cred, u_short *pg_color);
88 static void old_dev_pager_dtor(void *handle);
89 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
90     int prot, vm_page_t *mres);
91 
92 static struct cdev_pager_ops old_dev_pager_ops = {
93 	.cdev_pg_ctor = old_dev_pager_ctor,
94 	.cdev_pg_dtor = old_dev_pager_dtor,
95 	.cdev_pg_fault = old_dev_pager_fault
96 };
97 
98 vm_object_t
99 cdev_pager_lookup(void *handle)
100 {
101 	vm_object_t object;
102 	mtx_lock(&dev_pager_mtx);
103 	object = vm_pager_object_lookup(&dev_pager_object_list, handle);
104 	mtx_unlock(&dev_pager_mtx);
105 	return (object);
106 }
107 
108 vm_object_t
109 cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
110 	vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
111 {
112 	cdev_t dev;
113 	vm_object_t object;
114 	u_short color;
115 	dev = handle;
116 
117 	/*
118 	 * Offset should be page aligned.
119 	 */
120 	if (foff & PAGE_MASK)
121 		return (NULL);
122 
123 	size = round_page64(size);
124 
125 	if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
126 		return (NULL);
127 
128 	/*
129 	 * Look up pager, creating as necessary.
130 	 */
131 	mtx_lock(&dev_pager_mtx);
132 	object = vm_pager_object_lookup(&dev_pager_object_list, handle);
133 	if (object == NULL) {
134 		/*
135 		 * Allocate object and associate it with the pager.
136 		 */
137 		object = vm_object_allocate_hold(tp,
138 						 OFF_TO_IDX(foff + size));
139 		object->handle = handle;
140 		object->un_pager.devp.ops = ops;
141 		object->un_pager.devp.dev = handle;
142 		TAILQ_INIT(&object->un_pager.devp.devp_pglist);
143 		dev->si_object = object;
144 
145 		TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
146 		    pager_object_list);
147 
148 		vm_object_drop(object);
149 	} else {
150 		/*
151 		 * Gain a reference to the object.
152 		 */
153 		vm_object_hold(object);
154 		vm_object_reference_locked(object);
155 		if (OFF_TO_IDX(foff + size) > object->size)
156 			object->size = OFF_TO_IDX(foff + size);
157 		vm_object_drop(object);
158 	}
159 	mtx_unlock(&dev_pager_mtx);
160 
161 	return (object);
162 }
163 
164 /*
165  * No requirements.
166  */
167 vm_object_t
168 dev_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff)
169 {
170 	return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
171 	    size, prot, foff, NULL));
172 }
173 
174 /* XXX */
175 void
176 cdev_pager_free_page(vm_object_t object, vm_page_t m)
177 {
178 	if (object->type == OBJT_MGTDEVICE) {
179 		KKASSERT((m->flags & PG_FICTITIOUS) != 0);
180 		pmap_page_protect(m, VM_PROT_NONE);
181 		vm_page_remove(m);
182 		vm_page_wakeup(m);
183 	} else if (object->type == OBJT_DEVICE) {
184 		TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
185 		dev_pager_putfake(m);
186 	}
187 }
188 
189 /*
190  * No requirements.
191  */
192 static void
193 dev_pager_dealloc(vm_object_t object)
194 {
195 	vm_page_t m;
196 
197 	mtx_lock(&dev_pager_mtx);
198         object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
199 
200 	TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
201 
202 	if (object->type == OBJT_DEVICE) {
203 		/*
204 		 * Free up our fake pages.
205 		 */
206 		while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) !=
207 		       NULL) {
208 			TAILQ_REMOVE(&object->un_pager.devp.devp_pglist,
209 				     m, pageq);
210 			dev_pager_putfake(m);
211 		}
212 	}
213 	mtx_unlock(&dev_pager_mtx);
214 }
215 
216 /*
217  * No requirements.
218  */
219 static int
220 dev_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
221 {
222 	vm_page_t page;
223 	int error;
224 
225 	mtx_lock(&dev_pager_mtx);
226 
227 	page = *mpp;
228 
229 	error = object->un_pager.devp.ops->cdev_pg_fault(object,
230             IDX_TO_OFF(page->pindex), PROT_READ, mpp);
231 
232 	mtx_unlock(&dev_pager_mtx);
233 
234 	return (error);
235 }
236 
237 /*
238  * No requirements.
239  */
240 static void
241 dev_pager_putpages(vm_object_t object, vm_page_t *m,
242 		   int count, boolean_t sync, int *rtvals)
243 {
244 	panic("dev_pager_putpage called");
245 }
246 
247 /*
248  * No requirements.
249  */
250 static boolean_t
251 dev_pager_haspage(vm_object_t object, vm_pindex_t pindex)
252 {
253 	return (TRUE);
254 }
255 
256 /*
257  * The caller must hold dev_pager_mtx
258  */
259 static vm_page_t
260 dev_pager_getfake(vm_paddr_t paddr, int pat_mode)
261 {
262 	vm_page_t m;
263 
264 	m = kmalloc(sizeof(*m), M_FICTITIOUS_PAGES, M_WAITOK|M_ZERO);
265 
266 	pmap_page_init(m);
267 
268 	m->flags = PG_BUSY | PG_FICTITIOUS;
269 	m->valid = VM_PAGE_BITS_ALL;
270 	m->dirty = 0;
271 	m->busy = 0;
272 	m->queue = PQ_NONE;
273 	m->object = NULL;
274 
275 	m->wire_count = 1;
276 	m->hold_count = 0;
277 	m->phys_addr = paddr;
278 	m->pat_mode = pat_mode;
279 
280 	return (m);
281 }
282 
283 /*
284  * Synthesized VM pages must be structurally stable for lockless lookups to
285  * work properly.
286  *
287  * The caller must hold dev_pager_mtx
288  */
289 static void
290 dev_pager_putfake(vm_page_t m)
291 {
292 	if (!(m->flags & PG_FICTITIOUS))
293 		panic("dev_pager_putfake: bad page");
294 	KKASSERT(m->object == NULL);
295 	KKASSERT(m->hold_count == 0);
296 	kfree(m, M_FICTITIOUS_PAGES);
297 }
298 
299 static int
300 old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
301     vm_ooffset_t foff, struct ucred *cred,  u_short *color)
302 {
303 	unsigned int npages;
304 	vm_offset_t off;
305 	cdev_t dev;
306 
307 	dev = handle;
308 
309 	/*
310 	 * Check that the specified range of the device allows the desired
311 	 * protection.
312 	 *
313 	 * XXX assumes VM_PROT_* == PROT_*
314 	 */
315 	npages = OFF_TO_IDX(size);
316 	for (off = foff; npages--; off += PAGE_SIZE) {
317 		if (dev_dmmap(dev, off, (int)prot) == -1)
318 			return (EINVAL);
319 	}
320 
321 	return (0);
322 }
323 
324 static void old_dev_pager_dtor(void *handle)
325 {
326 	cdev_t dev;
327 
328 	dev = handle;
329 	if (dev != NULL) {
330 		KKASSERT(dev->si_object);
331 		dev->si_object = NULL;
332 	}
333 }
334 
335 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
336     int prot, vm_page_t *mres)
337 {
338 	vm_paddr_t paddr;
339 	vm_page_t page;
340 	vm_offset_t pidx = OFF_TO_IDX(offset);
341 	cdev_t dev;
342 
343 	page = *mres;
344 	dev = object->handle;
345 
346 	paddr = pmap_phys_address(
347 		    dev_dmmap(dev, offset, prot));
348 	KASSERT(paddr != -1,("dev_pager_getpage: map function returns error"));
349 	KKASSERT(object->type == OBJT_DEVICE);
350 
351 	if (page->flags & PG_FICTITIOUS) {
352 		/*
353 		 * If the passed in reqpage page is already a fake page,
354 		 * update it with the new physical address.
355 		 */
356 		page->phys_addr = paddr;
357 		page->valid = VM_PAGE_BITS_ALL;
358 	} else {
359 		/*
360 		 * Replace the passed in reqpage page with our own fake page
361 		 * and free up all the original pages.
362 		 */
363 		page = dev_pager_getfake(paddr, object->memattr);
364 		TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
365 				  page, pageq);
366 		vm_object_hold(object);
367 		vm_page_free(*mres);
368 		if (vm_page_insert(page, object, pidx) == FALSE) {
369 			panic("dev_pager_getpage: page (%p,%016jx) exists",
370 			      object, (uintmax_t)pidx);
371 		}
372 		vm_object_drop(object);
373 	}
374 
375 	return (VM_PAGER_OK);
376 }
377 
378