1 /* 2 * Copyright (c) 2000 Peter Wemm 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/vm/phys_pager.c,v 1.3.2.3 2000/12/17 02:05:41 alfred Exp $ 26 * $DragonFly: src/sys/vm/phys_pager.c,v 1.5 2006/03/27 01:54:18 dillon Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/linker_set.h> 32 #include <sys/conf.h> 33 #include <sys/mman.h> 34 #include <sys/sysctl.h> 35 36 #include <vm/vm.h> 37 #include <vm/vm_object.h> 38 #include <vm/vm_page.h> 39 #include <vm/vm_pager.h> 40 #include <vm/vm_zone.h> 41 42 #include <sys/thread2.h> 43 44 /* list of device pager objects */ 45 static struct pagerlst phys_pager_object_list; 46 47 static int phys_pager_alloc_lock, phys_pager_alloc_lock_want; 48 49 static void 50 phys_pager_init(void) 51 { 52 53 TAILQ_INIT(&phys_pager_object_list); 54 } 55 56 static vm_object_t 57 phys_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff) 58 { 59 vm_object_t object; 60 61 /* 62 * Offset should be page aligned. 63 */ 64 if (foff & PAGE_MASK) 65 return (NULL); 66 67 size = round_page(size); 68 69 if (handle != NULL) { 70 /* 71 * Lock to prevent object creation race condition. 72 */ 73 while (phys_pager_alloc_lock) { 74 phys_pager_alloc_lock_want++; 75 tsleep(&phys_pager_alloc_lock, 0, "ppall", 0); 76 phys_pager_alloc_lock_want--; 77 } 78 phys_pager_alloc_lock = 1; 79 80 /* 81 * Look up pager, creating as necessary. 82 */ 83 object = vm_pager_object_lookup(&phys_pager_object_list, handle); 84 if (object == NULL) { 85 /* 86 * Allocate object and associate it with the pager. 87 */ 88 object = vm_object_allocate(OBJT_PHYS, 89 OFF_TO_IDX(foff + size)); 90 object->handle = handle; 91 TAILQ_INSERT_TAIL(&phys_pager_object_list, object, 92 pager_object_list); 93 } else { 94 /* 95 * Gain a reference to the object. 96 */ 97 vm_object_reference(object); 98 if (OFF_TO_IDX(foff + size) > object->size) 99 object->size = OFF_TO_IDX(foff + size); 100 } 101 phys_pager_alloc_lock = 0; 102 if (phys_pager_alloc_lock_want) 103 wakeup(&phys_pager_alloc_lock); 104 } else { 105 object = vm_object_allocate(OBJT_PHYS, 106 OFF_TO_IDX(foff + size)); 107 } 108 109 return (object); 110 } 111 112 static void 113 phys_pager_dealloc(vm_object_t object) 114 { 115 116 if (object->handle != NULL) 117 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list); 118 } 119 120 static int 121 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 122 { 123 int i; 124 125 crit_enter(); 126 /* 127 * Fill as many pages as vm_fault has allocated for us. 128 */ 129 for (i = 0; i < count; i++) { 130 if ((m[i]->flags & PG_ZERO) == 0) 131 vm_page_zero_fill(m[i]); 132 vm_page_flag_set(m[i], PG_ZERO); 133 /* Switch off pv_entries */ 134 vm_page_unmanage(m[i]); 135 m[i]->valid = VM_PAGE_BITS_ALL; 136 m[i]->dirty = 0; 137 /* The requested page must remain busy, the others not. */ 138 if (reqpage != i) { 139 vm_page_flag_clear(m[i], PG_BUSY); 140 m[i]->busy = 0; 141 } 142 } 143 crit_exit(); 144 145 return (VM_PAGER_OK); 146 } 147 148 static void 149 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync, 150 int *rtvals) 151 { 152 153 panic("phys_pager_putpage called"); 154 } 155 156 /* 157 * Implement a pretty aggressive clustered getpages strategy. Hint that 158 * everything in an entire 4MB window should be prefaulted at once. 159 * 160 * XXX 4MB (1024 slots per page table page) is convenient for x86, 161 * but may not be for other arches. 162 */ 163 #ifndef PHYSCLUSTER 164 #define PHYSCLUSTER 1024 165 #endif 166 static boolean_t 167 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 168 int *after) 169 { 170 vm_pindex_t base, end; 171 172 base = pindex & (~(PHYSCLUSTER - 1)); 173 end = base + (PHYSCLUSTER - 1); 174 if (before != NULL) 175 *before = pindex - base; 176 if (after != NULL) 177 *after = end - pindex; 178 return (TRUE); 179 } 180 181 struct pagerops physpagerops = { 182 phys_pager_init, 183 phys_pager_alloc, 184 phys_pager_dealloc, 185 phys_pager_getpages, 186 phys_pager_putpages, 187 phys_pager_haspage, 188 NULL 189 }; 190