1 /* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * @(#)device_pager.c 7.5 (Berkeley) 02/19/92 13 */ 14 15 /* 16 * Page to/from special files. 17 */ 18 19 #include "devpager.h" 20 #if NDEVPAGER > 0 21 22 #include "param.h" 23 #include "conf.h" 24 #include "mman.h" 25 #include "malloc.h" 26 27 #include "vm.h" 28 #include "vm_page.h" 29 #include "vm_kern.h" 30 #include "device_pager.h" 31 32 queue_head_t dev_pager_list; /* list of managed devices */ 33 34 #ifdef DEBUG 35 int dpagerdebug = 0; 36 #define DDB_FOLLOW 0x01 37 #define DDB_INIT 0x02 38 #define DDB_ALLOC 0x04 39 #define DDB_FAIL 0x08 40 #endif 41 42 void 43 dev_pager_init() 44 { 45 #ifdef DEBUG 46 if (dpagerdebug & DDB_FOLLOW) 47 printf("dev_pager_init()\n"); 48 #endif 49 queue_init(&dev_pager_list); 50 } 51 52 vm_pager_t 53 dev_pager_alloc(handle, size, prot) 54 caddr_t handle; 55 vm_size_t size; 56 vm_prot_t prot; 57 { 58 dev_t dev; 59 vm_pager_t pager; 60 int (*mapfunc)(), nprot; 61 register vm_object_t object; 62 register vm_page_t page; 63 register dev_pager_t devp; 64 register int npages, off; 65 extern int nullop(), enodev(); 66 67 68 #ifdef DEBUG 69 if (dpagerdebug & DDB_FOLLOW) 70 printf("dev_pager_alloc(%x, %x, %x)\n", handle, size, prot); 71 #endif 72 /* 73 * Pageout to device, should never happen. 74 */ 75 if (handle == NULL) 76 panic("dev_pager_alloc called"); 77 78 /* 79 * Look it up, creating as necessary 80 */ 81 pager = vm_pager_lookup(&dev_pager_list, handle); 82 if (pager == NULL) { 83 /* 84 * Validation. Make sure this device can be mapped 85 * and that range to map is acceptible to device. 86 */ 87 dev = (dev_t)handle; 88 mapfunc = cdevsw[major(dev)].d_mmap; 89 if (!mapfunc || mapfunc == enodev || mapfunc == nullop) 90 return(NULL); 91 nprot = 0; 92 if (prot & VM_PROT_READ) 93 nprot |= PROT_READ; 94 if (prot & VM_PROT_WRITE) 95 nprot |= PROT_WRITE; 96 if (prot & VM_PROT_EXECUTE) 97 nprot |= PROT_EXEC; 98 npages = atop(round_page(size)); 99 for (off = 0; npages--; off += PAGE_SIZE) 100 if ((*mapfunc)(dev, off, nprot) == -1) 101 return(NULL); 102 /* 103 * Allocate and initialize pager structs 104 */ 105 pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK); 106 if (pager == NULL) 107 return(NULL); 108 devp = (dev_pager_t)malloc(sizeof *devp, M_VMPGDATA, M_WAITOK); 109 if (devp == NULL) { 110 free((caddr_t)pager, M_VMPAGER); 111 return(NULL); 112 } 113 devp->devp_dev = dev; 114 devp->devp_npages = atop(round_page(size)); 115 pager->pg_handle = handle; 116 pager->pg_ops = &devicepagerops; 117 pager->pg_type = PG_DEVICE; 118 pager->pg_data = (caddr_t)devp; 119 /* 120 * Allocate object and vm_page structures to describe memory 121 */ 122 npages = devp->devp_npages; 123 object = devp->devp_object = vm_object_allocate(ptoa(npages)); 124 vm_object_enter(object, pager); 125 vm_object_setpager(object, pager, (vm_offset_t)0, FALSE); 126 devp->devp_pages = (vm_page_t) 127 kmem_alloc(kernel_map, npages*sizeof(struct vm_page)); 128 off = 0; 129 for (page = devp->devp_pages; 130 page < &devp->devp_pages[npages]; page++) { 131 vm_object_lock(object); 132 VM_PAGE_INIT(page, object, off); 133 page->phys_addr = 134 pmap_phys_address((*mapfunc)(dev, off, nprot)); 135 page->wire_count = 1; 136 page->fictitious = TRUE; 137 PAGE_WAKEUP(page); 138 vm_object_unlock(object); 139 off += PAGE_SIZE; 140 } 141 /* 142 * Finally, put it on the managed list so other can find it. 143 */ 144 queue_enter(&dev_pager_list, devp, dev_pager_t, devp_list); 145 #ifdef DEBUG 146 if (dpagerdebug & DDB_ALLOC) { 147 printf("dev_pager_alloc: pages %d@%x\n", 148 devp->devp_npages, devp->devp_pages); 149 printf("dev_pager_alloc: pager %x devp %x object %x\n", 150 pager, devp, object); 151 vm_object_print(object, FALSE); 152 } 153 #endif 154 } else { 155 /* 156 * vm_object_lookup() gains a reference and also 157 * removes the object from the cache. 158 */ 159 devp = (dev_pager_t)pager->pg_data; 160 if (vm_object_lookup(pager) != devp->devp_object) 161 panic("dev_pager_setup: bad object"); 162 } 163 return(pager); 164 165 } 166 167 void 168 dev_pager_dealloc(pager) 169 vm_pager_t pager; 170 { 171 dev_pager_t devp = (dev_pager_t)pager->pg_data; 172 register vm_object_t object; 173 174 #ifdef DEBUG 175 if (dpagerdebug & DDB_FOLLOW) 176 printf("dev_pager_dealloc(%x)\n", pager); 177 #endif 178 queue_remove(&dev_pager_list, devp, dev_pager_t, devp_list); 179 object = devp->devp_object; 180 #ifdef DEBUG 181 if (dpagerdebug & DDB_ALLOC) 182 printf("dev_pager_dealloc: devp %x object %x pages %d@%x\n", 183 devp, object, devp->devp_npages, devp->devp_pages); 184 #endif 185 while (!queue_empty(&object->memq)) 186 vm_page_remove((vm_page_t)queue_first(&object->memq)); 187 kmem_free(kernel_map, (vm_offset_t)devp->devp_pages, 188 devp->devp_npages * sizeof(struct vm_page)); 189 free((caddr_t)devp, M_VMPGDATA); 190 pager->pg_data = 0; 191 } 192 193 dev_pager_getpage(pager, m, sync) 194 vm_pager_t pager; 195 vm_page_t m; 196 boolean_t sync; 197 { 198 #ifdef DEBUG 199 if (dpagerdebug & DDB_FOLLOW) 200 printf("dev_pager_getpage(%x, %x)\n", pager, m); 201 #endif 202 return(VM_PAGER_BAD); 203 } 204 205 dev_pager_putpage(pager, m, sync) 206 vm_pager_t pager; 207 vm_page_t m; 208 boolean_t sync; 209 { 210 #ifdef DEBUG 211 if (dpagerdebug & DDB_FOLLOW) 212 printf("dev_pager_putpage(%x, %x)\n", pager, m); 213 #endif 214 if (pager == NULL) 215 return; 216 panic("dev_pager_putpage called"); 217 } 218 219 boolean_t 220 dev_pager_haspage(pager, offset) 221 vm_pager_t pager; 222 vm_offset_t offset; 223 { 224 #ifdef DEBUG 225 if (dpagerdebug & DDB_FOLLOW) 226 printf("dev_pager_haspage(%x, %x)\n", pager, offset); 227 #endif 228 return(TRUE); 229 } 230 #endif 231