1 /* $OpenBSD: uvm_device.c,v 1.68 2024/12/15 11:02:59 mpi Exp $ */
2 /* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
29 */
30
31 /*
32 * uvm_device.c: the device pager.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/conf.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40
41 #include <uvm/uvm.h>
42 #include <uvm/uvm_device.h>
43
44 #include "drm.h"
45
46 /*
47 * private global data structure
48 *
49 * we keep a list of active device objects in the system.
50 */
51
52 LIST_HEAD(, uvm_device) udv_list = LIST_HEAD_INITIALIZER(udv_list);
53 struct mutex udv_lock = MUTEX_INITIALIZER(IPL_NONE);
54
55 /*
56 * functions
57 */
58 static void udv_reference(struct uvm_object *);
59 static void udv_detach(struct uvm_object *);
60 static int udv_fault(struct uvm_faultinfo *, vaddr_t,
61 vm_page_t *, int, int, vm_fault_t,
62 vm_prot_t, int);
63 static boolean_t udv_flush(struct uvm_object *, voff_t, voff_t,
64 int);
65
66 /*
67 * master pager structure
68 */
69 const struct uvm_pagerops uvm_deviceops = {
70 .pgo_reference = udv_reference,
71 .pgo_detach = udv_detach,
72 .pgo_fault = udv_fault,
73 .pgo_flush = udv_flush,
74 };
75
76 /*
77 * the ops!
78 */
79
80
81 /*
82 * udv_attach
83 *
84 * get a VM object that is associated with a device. allocate a new
85 * one if needed.
86 *
87 * => nothing should be locked so that we can sleep here.
88 *
89 * The last two arguments (off and size) are only used for access checking.
90 */
91 struct uvm_object *
udv_attach(dev_t device,vm_prot_t accessprot,voff_t off,vsize_t size)92 udv_attach(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
93 {
94 struct uvm_device *udv, *lcv;
95 paddr_t (*mapfn)(dev_t, off_t, int);
96 #if NDRM > 0
97 struct uvm_object *obj;
98 #endif
99
100 /*
101 * before we do anything, ensure this device supports mmap
102 */
103 mapfn = cdevsw[major(device)].d_mmap;
104 if (mapfn == NULL ||
105 mapfn == (paddr_t (*)(dev_t, off_t, int)) enodev ||
106 mapfn == (paddr_t (*)(dev_t, off_t, int)) nullop)
107 return(NULL);
108
109 /*
110 * Negative offsets on the object are not allowed.
111 */
112 if (off < 0)
113 return(NULL);
114
115 #if NDRM > 0
116 obj = udv_attach_drm(device, accessprot, off, size);
117 if (obj)
118 return(obj);
119 #endif
120
121 /*
122 * Check that the specified range of the device allows the
123 * desired protection.
124 *
125 * XXX clobbers off and size, but nothing else here needs them.
126 */
127 while (size != 0) {
128 if ((*mapfn)(device, off, accessprot) == -1)
129 return (NULL);
130 off += PAGE_SIZE; size -= PAGE_SIZE;
131 }
132
133 /*
134 * keep looping until we get it
135 */
136 for (;;) {
137 /*
138 * first, attempt to find it on the main list
139 */
140 mtx_enter(&udv_lock);
141 LIST_FOREACH(lcv, &udv_list, u_list) {
142 if (device == lcv->u_device)
143 break;
144 }
145
146 /*
147 * got it on main list. put a hold on it and unlock udv_lock.
148 */
149 if (lcv) {
150 /*
151 * if someone else has a hold on it, sleep and start
152 * over again. Else, we need take HOLD flag so we
153 * don't have to re-order locking here.
154 */
155 if (lcv->u_flags & UVM_DEVICE_HOLD) {
156 lcv->u_flags |= UVM_DEVICE_WANTED;
157 msleep_nsec(lcv, &udv_lock, PVM | PNORELOCK,
158 "udv_attach", INFSLP);
159 continue;
160 }
161
162 /* we are now holding it */
163 lcv->u_flags |= UVM_DEVICE_HOLD;
164 mtx_leave(&udv_lock);
165
166 /*
167 * bump reference count, unhold, return.
168 */
169 rw_enter(lcv->u_obj.vmobjlock, RW_WRITE);
170 lcv->u_obj.uo_refs++;
171 rw_exit(lcv->u_obj.vmobjlock);
172
173 mtx_enter(&udv_lock);
174 if (lcv->u_flags & UVM_DEVICE_WANTED)
175 wakeup(lcv);
176 lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
177 mtx_leave(&udv_lock);
178 return(&lcv->u_obj);
179 }
180
181 /*
182 * Did not find it on main list. Need to allocate a new one.
183 */
184 mtx_leave(&udv_lock);
185 /* NOTE: we could sleep in the following malloc() */
186 udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK);
187 uvm_obj_init(&udv->u_obj, &uvm_deviceops, 1);
188 mtx_enter(&udv_lock);
189
190 /*
191 * now we have to double check to make sure no one added it
192 * to the list while we were sleeping...
193 */
194 LIST_FOREACH(lcv, &udv_list, u_list) {
195 if (device == lcv->u_device)
196 break;
197 }
198
199 /*
200 * did we lose a race to someone else?
201 * free our memory and retry.
202 */
203 if (lcv) {
204 mtx_leave(&udv_lock);
205 uvm_obj_destroy(&udv->u_obj);
206 free(udv, M_TEMP, sizeof(*udv));
207 continue;
208 }
209
210 /*
211 * we have it! init the data structures, add to list
212 * and return.
213 */
214 udv->u_flags = 0;
215 udv->u_device = device;
216 LIST_INSERT_HEAD(&udv_list, udv, u_list);
217 mtx_leave(&udv_lock);
218 return(&udv->u_obj);
219 }
220 /*NOTREACHED*/
221 }
222
223 /*
224 * udv_reference
225 *
226 * add a reference to a VM object. Note that the reference count must
227 * already be one (the passed in reference) so there is no chance of the
228 * udv being released or locked out here.
229 */
230 static void
udv_reference(struct uvm_object * uobj)231 udv_reference(struct uvm_object *uobj)
232 {
233 rw_enter(uobj->vmobjlock, RW_WRITE);
234 uobj->uo_refs++;
235 rw_exit(uobj->vmobjlock);
236 }
237
238 /*
239 * udv_detach
240 *
241 * remove a reference to a VM object.
242 */
243 static void
udv_detach(struct uvm_object * uobj)244 udv_detach(struct uvm_object *uobj)
245 {
246 struct uvm_device *udv = (struct uvm_device *)uobj;
247
248 /*
249 * loop until done
250 */
251 again:
252 rw_enter(uobj->vmobjlock, RW_WRITE);
253 if (uobj->uo_refs > 1) {
254 uobj->uo_refs--;
255 rw_exit(uobj->vmobjlock);
256 return;
257 }
258 KASSERT(uobj->uo_npages == 0 && RBT_EMPTY(uvm_objtree, &uobj->memt));
259
260 /*
261 * is it being held? if so, wait until others are done.
262 */
263 mtx_enter(&udv_lock);
264 if (udv->u_flags & UVM_DEVICE_HOLD) {
265 udv->u_flags |= UVM_DEVICE_WANTED;
266 rw_exit(uobj->vmobjlock);
267 msleep_nsec(udv, &udv_lock, PVM | PNORELOCK, "udv_detach",
268 INFSLP);
269 goto again;
270 }
271
272 /*
273 * got it! nuke it now.
274 */
275 LIST_REMOVE(udv, u_list);
276 if (udv->u_flags & UVM_DEVICE_WANTED)
277 wakeup(udv);
278 mtx_leave(&udv_lock);
279 rw_exit(uobj->vmobjlock);
280
281 uvm_obj_destroy(uobj);
282 free(udv, M_TEMP, sizeof(*udv));
283 }
284
285
286 /*
287 * udv_flush
288 *
289 * flush pages out of a uvm object. a no-op for devices.
290 */
291 static boolean_t
udv_flush(struct uvm_object * uobj,voff_t start,voff_t stop,int flags)292 udv_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
293 {
294
295 return(TRUE);
296 }
297
298 /*
299 * udv_fault: non-standard fault routine for device "pages"
300 *
301 * => rather than having a "get" function, we have a fault routine
302 * since we don't return vm_pages we need full control over the
303 * pmap_enter map in
304 * => on return, we unlock all fault data structures
305 * => flags: PGO_ALLPAGES: get all of the pages
306 * PGO_LOCKED: fault data structures are locked
307 * XXX: currently PGO_LOCKED is always required ... consider removing
308 * it as a flag
309 * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
310 */
311 static int
udv_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,vm_page_t * pps,int npages,int centeridx,vm_fault_t fault_type,vm_prot_t access_type,int flags)312 udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages,
313 int centeridx, vm_fault_t fault_type, vm_prot_t access_type, int flags)
314 {
315 struct vm_map_entry *entry = ufi->entry;
316 struct uvm_object *uobj = entry->object.uvm_obj;
317 struct uvm_device *udv = (struct uvm_device *)uobj;
318 vaddr_t curr_va;
319 off_t curr_offset;
320 paddr_t paddr;
321 int lcv, retval;
322 dev_t device;
323 paddr_t (*mapfn)(dev_t, off_t, int);
324 vm_prot_t mapprot;
325
326 KERNEL_ASSERT_LOCKED();
327
328 /*
329 * we do not allow device mappings to be mapped copy-on-write
330 * so we kill any attempt to do so here.
331 */
332 if (UVM_ET_ISCOPYONWRITE(entry)) {
333 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
334 return EACCES;
335 }
336
337 /*
338 * get device map function.
339 */
340 device = udv->u_device;
341 mapfn = cdevsw[major(device)].d_mmap;
342
343 /*
344 * now we must determine the offset in udv to use and the VA to
345 * use for pmap_enter. note that we always use orig_map's pmap
346 * for pmap_enter (even if we have a submap). since virtual
347 * addresses in a submap must match the main map, this is ok.
348 */
349 /* udv offset = (offset from start of entry) + entry's offset */
350 curr_offset = entry->offset + (vaddr - entry->start);
351 /* pmap va = vaddr (virtual address of pps[0]) */
352 curr_va = vaddr;
353
354 /*
355 * loop over the page range entering in as needed
356 */
357 retval = 0;
358 for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
359 curr_va += PAGE_SIZE) {
360 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
361 continue;
362
363 if (pps[lcv] == PGO_DONTCARE)
364 continue;
365
366 paddr = (*mapfn)(device, curr_offset, access_type);
367 if (paddr == -1) {
368 retval = EACCES; /* XXX */
369 break;
370 }
371 mapprot = ufi->entry->protection;
372 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr,
373 mapprot, PMAP_CANFAIL | mapprot) != 0) {
374 /*
375 * pmap_enter() didn't have the resource to
376 * enter this mapping. Unlock everything,
377 * wait for the pagedaemon to free up some
378 * pages, and then tell uvm_fault() to start
379 * the fault again.
380 *
381 * XXX Needs some rethinking for the PGO_ALLPAGES
382 * XXX case.
383 */
384 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
385 uobj);
386
387 /* sync what we have so far */
388 pmap_update(ufi->orig_map->pmap);
389 uvm_wait("udv_fault");
390 return ERESTART;
391 }
392 }
393
394 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
395 pmap_update(ufi->orig_map->pmap);
396 return retval;
397 }
398