1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)device_pager.c 8.1 (Berkeley) 6/11/93 37 * $FreeBSD: src/sys/vm/device_pager.c,v 1.46.2.1 2000/08/02 21:54:37 peter Exp $ 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/conf.h> 44 #include <sys/mman.h> 45 #include <sys/device.h> 46 #include <sys/queue.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex2.h> 49 50 #include <vm/vm.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pager.h> 54 #include <vm/vm_zone.h> 55 #include <vm/vm_page2.h> 56 57 static pgo_dealloc_t dev_pager_dealloc; 58 static pgo_getpage_t dev_pager_getpage; 59 static pgo_putpages_t dev_pager_putpages; 60 static pgo_haspage_t dev_pager_haspage; 61 62 struct pagerops devicepagerops = { 63 .pgo_dealloc = dev_pager_dealloc, 64 .pgo_getpage = dev_pager_getpage, 65 .pgo_putpages = dev_pager_putpages, 66 .pgo_haspage = dev_pager_haspage 67 }; 68 69 /* list of device pager objects */ 70 static TAILQ_HEAD(, vm_page) dev_freepages_list = 71 TAILQ_HEAD_INITIALIZER(dev_freepages_list); 72 static MALLOC_DEFINE(M_FICTITIOUS_PAGES, "device-mapped pages", 73 "Device mapped pages"); 74 75 static vm_page_t dev_pager_getfake (vm_paddr_t, int); 76 static void dev_pager_putfake (vm_page_t); 77 78 /* list of device pager objects */ 79 static struct pagerlst dev_pager_object_list = 80 TAILQ_HEAD_INITIALIZER(dev_pager_object_list); 81 /* protect list manipulation */ 82 static struct mtx dev_pager_mtx = MTX_INITIALIZER("devpgr"); 83 84 static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 85 vm_ooffset_t foff, struct ucred *cred, u_short *pg_color); 86 static void old_dev_pager_dtor(void *handle); 87 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, 88 int prot, vm_page_t *mres); 89 90 static struct cdev_pager_ops old_dev_pager_ops = { 91 .cdev_pg_ctor = old_dev_pager_ctor, 92 .cdev_pg_dtor = old_dev_pager_dtor, 93 .cdev_pg_fault = old_dev_pager_fault 94 }; 95 96 vm_object_t 97 cdev_pager_lookup(void *handle) 98 { 99 vm_object_t object; 100 101 mtx_lock(&dev_pager_mtx); 102 object = vm_pager_object_lookup(&dev_pager_object_list, handle); 103 mtx_unlock(&dev_pager_mtx); 104 105 return (object); 106 } 107 108 vm_object_t 109 cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops, 110 vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred) 111 { 112 cdev_t dev; 113 vm_object_t object; 114 u_short color; 115 116 /* 117 * Offset should be page aligned. 118 */ 119 if (foff & PAGE_MASK) 120 return (NULL); 121 122 size = round_page64(size); 123 124 if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0) 125 return (NULL); 126 127 /* 128 * Look up pager, creating as necessary. 129 */ 130 mtx_lock(&dev_pager_mtx); 131 object = vm_pager_object_lookup(&dev_pager_object_list, handle); 132 if (object == NULL) { 133 /* 134 * Allocate object and associate it with the pager. 135 */ 136 object = vm_object_allocate_hold(tp, 137 OFF_TO_IDX(foff + size)); 138 object->handle = handle; 139 object->un_pager.devp.ops = ops; 140 object->un_pager.devp.dev = handle; 141 TAILQ_INIT(&object->un_pager.devp.devp_pglist); 142 143 /* 144 * handle is only a device for old_dev_pager_ctor. 145 */ 146 if (ops->cdev_pg_ctor == old_dev_pager_ctor) { 147 dev = handle; 148 dev->si_object = object; 149 } 150 151 TAILQ_INSERT_TAIL(&dev_pager_object_list, object, 152 pager_object_entry); 153 154 vm_object_drop(object); 155 } else { 156 /* 157 * Gain a reference to the object. 158 */ 159 vm_object_hold(object); 160 vm_object_reference_locked(object); 161 if (OFF_TO_IDX(foff + size) > object->size) 162 object->size = OFF_TO_IDX(foff + size); 163 vm_object_drop(object); 164 } 165 mtx_unlock(&dev_pager_mtx); 166 167 return (object); 168 } 169 170 /* 171 * No requirements. 172 */ 173 vm_object_t 174 dev_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff) 175 { 176 return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops, 177 size, prot, foff, NULL)); 178 } 179 180 /* 181 * Caller must hold object lock. 182 */ 183 void 184 cdev_pager_free_page(vm_object_t object, vm_page_t m) 185 { 186 if (object->type == OBJT_MGTDEVICE) { 187 KKASSERT((m->flags & PG_FICTITIOUS) != 0); 188 pmap_page_protect(m, VM_PROT_NONE); 189 vm_page_remove(m); 190 vm_page_wakeup(m); 191 } else if (object->type == OBJT_DEVICE) { 192 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq); 193 dev_pager_putfake(m); 194 } 195 } 196 197 /* 198 * No requirements. 199 */ 200 static void 201 dev_pager_dealloc(vm_object_t object) 202 { 203 vm_page_t m; 204 205 /* 206 * NOTE: Callback may recurse into the device pager so do not 207 * obtain dev_pager_mtx until after it returns. 208 * 209 * The mutex should only be needed when manipulating the list. 210 */ 211 object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev); 212 213 mtx_lock(&dev_pager_mtx); 214 TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_entry); 215 mtx_unlock(&dev_pager_mtx); 216 217 if (object->type == OBJT_DEVICE) { 218 /* 219 * Free up our fake pages. 220 */ 221 while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != 222 NULL) { 223 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, 224 m, pageq); 225 dev_pager_putfake(m); 226 } 227 } 228 } 229 230 /* 231 * No requirements. 232 * 233 * WARNING! Do not obtain dev_pager_mtx here, doing so will cause a 234 * deadlock in DRMs VM paging code. 235 */ 236 static int 237 dev_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 238 { 239 vm_page_t page; 240 int error; 241 242 page = *mpp; 243 244 error = object->un_pager.devp.ops->cdev_pg_fault( 245 object, IDX_TO_OFF(page->pindex), 246 PROT_READ, mpp); 247 248 return (error); 249 } 250 251 /* 252 * No requirements. 253 */ 254 static void 255 dev_pager_putpages(vm_object_t object, vm_page_t *m, 256 int count, int flags, int *rtvals) 257 { 258 panic("dev_pager_putpage called"); 259 } 260 261 /* 262 * No requirements. 263 */ 264 static boolean_t 265 dev_pager_haspage(vm_object_t object, vm_pindex_t pindex) 266 { 267 return (TRUE); 268 } 269 270 /* 271 * The caller does not need to hold dev_pager_mtx() but caller must ensure 272 * no page-use collision. 273 */ 274 static vm_page_t 275 dev_pager_getfake(vm_paddr_t paddr, int pat_mode) 276 { 277 vm_page_t m; 278 279 m = kmalloc(sizeof(*m), M_FICTITIOUS_PAGES, M_WAITOK|M_ZERO); 280 281 pmap_page_init(m); 282 283 m->flags = PG_FICTITIOUS | PG_UNQUEUED; 284 m->valid = VM_PAGE_BITS_ALL; 285 m->dirty = 0; 286 m->queue = PQ_NONE; 287 m->object = NULL; 288 289 m->busy_count = PBUSY_LOCKED; 290 m->wire_count = 1; 291 m->hold_count = 0; 292 m->phys_addr = paddr; 293 m->pat_mode = pat_mode; 294 295 spin_init(&m->spin, "dev_page"); 296 297 return (m); 298 } 299 300 /* 301 * The caller does not need to hold dev_pager_mtx() but caller must ensure 302 * no page-use collision within the object. 303 */ 304 static void 305 dev_pager_putfake(vm_page_t m) 306 { 307 if (!(m->flags & PG_FICTITIOUS)) 308 panic("dev_pager_putfake: bad page"); 309 KKASSERT(m->object == NULL); 310 KKASSERT(m->hold_count == 0); 311 kfree(m, M_FICTITIOUS_PAGES); 312 } 313 314 static int 315 old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 316 vm_ooffset_t foff, struct ucred *cred, u_short *color) 317 { 318 unsigned int npages; 319 vm_offset_t off; 320 cdev_t dev; 321 322 dev = handle; 323 324 /* 325 * Check that the specified range of the device allows the desired 326 * protection. 327 * 328 * XXX assumes VM_PROT_* == PROT_* 329 */ 330 npages = OFF_TO_IDX(size); 331 for (off = foff; npages--; off += PAGE_SIZE) { 332 if (dev_dmmap(dev, off, (int)prot, NULL) == -1) 333 return (EINVAL); 334 } 335 336 return (0); 337 } 338 339 static void old_dev_pager_dtor(void *handle) 340 { 341 cdev_t dev; 342 343 dev = handle; 344 if (dev != NULL) { 345 KKASSERT(dev->si_object); 346 dev->si_object = NULL; 347 } 348 } 349 350 static int 351 old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, 352 int prot, vm_page_t *mres) 353 { 354 vm_paddr_t paddr; 355 vm_page_t page; 356 vm_offset_t pidx = OFF_TO_IDX(offset); 357 cdev_t dev; 358 359 page = *mres; 360 dev = object->handle; 361 362 paddr = pmap_phys_address(dev_dmmap(dev, offset, prot, NULL)); 363 KASSERT(paddr != -1,("dev_pager_getpage: map function returns error")); 364 KKASSERT(object->type == OBJT_DEVICE); 365 366 if (page->flags & PG_FICTITIOUS) { 367 /* 368 * If the passed in reqpage page is already a fake page, 369 * update it with the new physical address. 370 */ 371 page->phys_addr = paddr; 372 page->valid = VM_PAGE_BITS_ALL; 373 } else { 374 /* 375 * Replace the passed in reqpage page with our own fake page 376 * and free up all the original pages. Object lock must be 377 * held when manipulating devp_pglist and inserting the 378 * page. 379 */ 380 page = dev_pager_getfake(paddr, object->memattr); 381 vm_object_hold(object); 382 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, 383 page, pageq); 384 vm_page_free(*mres); 385 if (vm_page_insert(page, object, pidx) == FALSE) { 386 panic("dev_pager_getpage: page (%p,%016jx) exists", 387 object, (uintmax_t)pidx); 388 } 389 vm_object_drop(object); 390 } 391 return (VM_PAGER_OK); 392 } 393 394