1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)vm_user.c 8.2 (Berkeley) 01/12/94 11 * 12 * 13 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14 * All rights reserved. 15 * 16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17 * 18 * Permission to use, copy, modify and distribute this software and 19 * its documentation is hereby granted, provided that both the copyright 20 * notice and this permission notice appear in all copies of the 21 * software, derivative works or modified versions, and any portions 22 * thereof, and that both notices appear in supporting documentation. 23 * 24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27 * 28 * Carnegie Mellon requests users of this software to return to 29 * 30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31 * School of Computer Science 32 * Carnegie Mellon University 33 * Pittsburgh PA 15213-3890 34 * 35 * any improvements or extensions that they make and grant Carnegie the 36 * rights to redistribute these changes. 37 */ 38 39 /* 40 * User-exported virtual memory functions. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/proc.h> 46 47 #include <vm/vm.h> 48 49 simple_lock_data_t vm_alloc_lock; /* XXX */ 50 51 #ifdef MACHVMCOMPAT 52 /* 53 * BSD style syscall interfaces to MACH calls 54 * All return MACH return values. 55 */ 56 struct svm_allocate_args { 57 vm_map_t map; 58 vm_offset_t *addr; 59 vm_size_t size; 60 boolean_t anywhere; 61 }; 62 /* ARGSUSED */ 63 int 64 svm_allocate(p, uap, retval) 65 struct proc *p; 66 struct svm_allocate_args *uap; 67 int *retval; 68 { 69 vm_offset_t addr; 70 int rv; 71 72 uap->map = p->p_map; /* XXX */ 73 74 if (copyin((caddr_t)uap->addr, (caddr_t)&addr, sizeof (addr))) 75 rv = KERN_INVALID_ARGUMENT; 76 else 77 rv = vm_allocate(uap->map, &addr, uap->size, uap->anywhere); 78 if (rv == KERN_SUCCESS) { 79 if (copyout((caddr_t)&addr, (caddr_t)uap->addr, sizeof(addr))) 80 rv = KERN_INVALID_ARGUMENT; 81 } 82 return((int)rv); 83 } 84 85 struct svm_deallocate_args { 86 vm_map_t map; 87 vm_offset_t addr; 88 vm_size_t size; 89 }; 90 /* ARGSUSED */ 91 int 92 svm_deallocate(p, uap, retval) 93 struct proc *p; 94 struct svm_deallocate_args *uap; 95 int *retval; 96 { 97 int rv; 98 99 uap->map = p->p_map; /* XXX */ 100 rv = vm_deallocate(uap->map, uap->addr, uap->size); 101 return((int)rv); 102 } 103 104 struct svm_inherit_args { 105 vm_map_t map; 106 vm_offset_t addr; 107 vm_size_t size; 108 vm_inherit_t inherit; 109 }; 110 /* ARGSUSED */ 111 int 112 svm_inherit(p, uap, retval) 113 struct proc *p; 114 struct svm_inherit_args *uap; 115 int *retval; 116 { 117 int rv; 118 119 uap->map = p->p_map; /* XXX */ 120 rv = vm_inherit(uap->map, uap->addr, uap->size, uap->inherit); 121 return((int)rv); 122 } 123 124 struct svm_protect_args { 125 vm_map_t map; 126 vm_offset_t addr; 127 vm_size_t size; 128 boolean_t setmax; 129 vm_prot_t prot; 130 }; 131 /* ARGSUSED */ 132 int 133 svm_protect(p, uap, retval) 134 struct proc *p; 135 struct svm_protect_args *uap; 136 int *retval; 137 { 138 int rv; 139 140 uap->map = p->p_map; /* XXX */ 141 rv = vm_protect(uap->map, uap->addr, uap->size, uap->setmax, uap->prot); 142 return((int)rv); 143 } 144 145 /* 146 * vm_inherit sets the inheritence of the specified range in the 147 * specified map. 148 */ 149 int 150 vm_inherit(map, start, size, new_inheritance) 151 register vm_map_t map; 152 vm_offset_t start; 153 vm_size_t size; 154 vm_inherit_t new_inheritance; 155 { 156 if (map == NULL) 157 return(KERN_INVALID_ARGUMENT); 158 159 return(vm_map_inherit(map, trunc_page(start), round_page(start+size), new_inheritance)); 160 } 161 162 /* 163 * vm_protect sets the protection of the specified range in the 164 * specified map. 165 */ 166 167 int 168 vm_protect(map, start, size, set_maximum, new_protection) 169 register vm_map_t map; 170 vm_offset_t start; 171 vm_size_t size; 172 boolean_t set_maximum; 173 vm_prot_t new_protection; 174 { 175 if (map == NULL) 176 return(KERN_INVALID_ARGUMENT); 177 178 return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum)); 179 } 180 #endif 181 182 /* 183 * vm_allocate allocates "zero fill" memory in the specfied 184 * map. 185 */ 186 int 187 vm_allocate(map, addr, size, anywhere) 188 register vm_map_t map; 189 register vm_offset_t *addr; 190 register vm_size_t size; 191 boolean_t anywhere; 192 { 193 int result; 194 195 if (map == NULL) 196 return(KERN_INVALID_ARGUMENT); 197 if (size == 0) { 198 *addr = 0; 199 return(KERN_SUCCESS); 200 } 201 202 if (anywhere) 203 *addr = vm_map_min(map); 204 else 205 *addr = trunc_page(*addr); 206 size = round_page(size); 207 208 result = vm_map_find(map, NULL, (vm_offset_t) 0, addr, size, anywhere); 209 210 return(result); 211 } 212 213 /* 214 * vm_deallocate deallocates the specified range of addresses in the 215 * specified address map. 216 */ 217 int 218 vm_deallocate(map, start, size) 219 register vm_map_t map; 220 vm_offset_t start; 221 vm_size_t size; 222 { 223 if (map == NULL) 224 return(KERN_INVALID_ARGUMENT); 225 226 if (size == (vm_offset_t) 0) 227 return(KERN_SUCCESS); 228 229 return(vm_map_remove(map, trunc_page(start), round_page(start+size))); 230 } 231 232 /* 233 * Similar to vm_allocate but assigns an explicit pager. 234 */ 235 int 236 vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal) 237 register vm_map_t map; 238 register vm_offset_t *addr; 239 register vm_size_t size; 240 boolean_t anywhere; 241 vm_pager_t pager; 242 vm_offset_t poffset; 243 boolean_t internal; 244 { 245 register vm_object_t object; 246 register int result; 247 248 if (map == NULL) 249 return(KERN_INVALID_ARGUMENT); 250 251 *addr = trunc_page(*addr); 252 size = round_page(size); 253 254 /* 255 * Lookup the pager/paging-space in the object cache. 256 * If it's not there, then create a new object and cache 257 * it. 258 */ 259 object = vm_object_lookup(pager); 260 cnt.v_lookups++; 261 if (object == NULL) { 262 object = vm_object_allocate(size); 263 /* 264 * From Mike Hibler: "unnamed anonymous objects should never 265 * be on the hash list ... For now you can just change 266 * vm_allocate_with_pager to not do vm_object_enter if this 267 * is an internal object ..." 268 */ 269 if (!internal) 270 vm_object_enter(object, pager); 271 } else 272 cnt.v_hits++; 273 if (internal) 274 object->flags |= OBJ_INTERNAL; 275 else { 276 object->flags &= ~OBJ_INTERNAL; 277 cnt.v_nzfod -= atop(size); 278 } 279 280 result = vm_map_find(map, object, poffset, addr, size, anywhere); 281 if (result != KERN_SUCCESS) 282 vm_object_deallocate(object); 283 else if (pager != NULL) 284 vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); 285 return(result); 286 } 287