1 /* $OpenBSD: uvm_map.h,v 1.42 2009/08/28 00:40:03 ariane Exp $ */ 2 /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 43 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 #ifndef _UVM_UVM_MAP_H_ 71 #define _UVM_UVM_MAP_H_ 72 73 #include <sys/rwlock.h> 74 75 #ifdef _KERNEL 76 77 /* 78 * UVM_MAP_CLIP_START: ensure that the entry begins at or after 79 * the starting address, if it doesn't we split the entry. 80 * 81 * => map must be locked by caller 82 */ 83 84 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ 85 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); } 86 87 /* 88 * UVM_MAP_CLIP_END: ensure that the entry ends at or before 89 * the ending address, if it does't we split the entry. 90 * 91 * => map must be locked by caller 92 */ 93 94 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ 95 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); } 96 97 /* 98 * extract flags 99 */ 100 #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */ 101 #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */ 102 #define UVM_EXTRACT_QREF 0x4 /* use quick refs */ 103 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */ 104 105 #endif /* _KERNEL */ 106 107 #include <uvm/uvm_anon.h> 108 109 /* 110 * types defined: 111 * 112 * vm_map_t the high-level address map data structure. 113 * vm_map_entry_t an entry in an address map. 114 * vm_map_version_t a timestamp of a map, for use with vm_map_lookup 115 */ 116 117 /* 118 * Objects which live in maps may be either VM objects, or another map 119 * (called a "sharing map") which denotes read-write sharing with other maps. 120 * 121 * XXXCDC: private pager data goes here now 122 */ 123 124 union vm_map_object { 125 struct uvm_object *uvm_obj; /* UVM OBJECT */ 126 struct vm_map *sub_map; /* belongs to another map */ 127 }; 128 129 /* 130 * Address map entries consist of start and end addresses, 131 * a VM object (or sharing map) and offset into that object, 132 * and user-exported inheritance and protection information. 133 * Also included is control information for virtual copy operations. 134 */ 135 struct vm_map_entry { 136 RB_ENTRY(vm_map_entry) rb_entry; /* tree information */ 137 vaddr_t ownspace; /* free space after */ 138 vaddr_t space; /* space in subtree */ 139 struct vm_map_entry *prev; /* previous entry */ 140 struct vm_map_entry *next; /* next entry */ 141 vaddr_t start; /* start address */ 142 vaddr_t end; /* end address */ 143 union vm_map_object object; /* object I point to */ 144 voff_t offset; /* offset into object */ 145 int etype; /* entry type */ 146 vm_prot_t protection; /* protection code */ 147 vm_prot_t max_protection; /* maximum protection */ 148 vm_inherit_t inheritance; /* inheritance */ 149 int wired_count; /* can be paged if == 0 */ 150 struct vm_aref aref; /* anonymous overlay */ 151 int advice; /* madvise advice */ 152 #define uvm_map_entry_stop_copy flags 153 u_int8_t flags; /* flags */ 154 155 #define UVM_MAP_STATIC 0x01 /* static map entry */ 156 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ 157 }; 158 159 /* 160 * Marks the map entry as a guard page, using vm_map_entry.etype. 161 */ 162 #define MAP_ET_KVAGUARD 0x10 /* guard entry */ 163 164 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) 165 166 /* 167 * Maps are doubly-linked lists of map entries, kept sorted 168 * by address. A single hint is provided to start 169 * searches again from the last successful search, 170 * insertion, or removal. 171 * 172 * LOCKING PROTOCOL NOTES: 173 * ----------------------- 174 * 175 * VM map locking is a little complicated. There are both shared 176 * and exclusive locks on maps. However, it is sometimes required 177 * to downgrade an exclusive lock to a shared lock, and upgrade to 178 * an exclusive lock again (to perform error recovery). However, 179 * another thread *must not* queue itself to receive an exclusive 180 * lock while before we upgrade back to exclusive, otherwise the 181 * error recovery becomes extremely difficult, if not impossible. 182 * 183 * In order to prevent this scenario, we introduce the notion of 184 * a `busy' map. A `busy' map is read-locked, but other threads 185 * attempting to write-lock wait for this flag to clear before 186 * entering the lock manager. A map may only be marked busy 187 * when the map is write-locked (and then the map must be downgraded 188 * to read-locked), and may only be marked unbusy by the thread 189 * which marked it busy (holding *either* a read-lock or a 190 * write-lock, the latter being gained by an upgrade). 191 * 192 * Access to the map `flags' member is controlled by the `flags_lock' 193 * simple lock. Note that some flags are static (set once at map 194 * creation time, and never changed), and thus require no locking 195 * to check those flags. All flags which are r/w must be set or 196 * cleared while the `flags_lock' is asserted. Additional locking 197 * requirements are: 198 * 199 * VM_MAP_PAGEABLE r/o static flag; no locking required 200 * 201 * VM_MAP_INTRSAFE r/o static flag; no locking required 202 * 203 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when 204 * map is write-locked. may be tested 205 * without asserting `flags_lock'. 206 * 207 * VM_MAP_BUSY r/w; may only be set when map is 208 * write-locked, may only be cleared by 209 * thread which set it, map read-locked 210 * or write-locked. must be tested 211 * while `flags_lock' is asserted. 212 * 213 * VM_MAP_WANTLOCK r/w; may only be set when the map 214 * is busy, and thread is attempting 215 * to write-lock. must be tested 216 * while `flags_lock' is asserted. 217 */ 218 struct vm_map { 219 struct pmap * pmap; /* Physical map */ 220 struct rwlock lock; /* Lock for map data */ 221 RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */ 222 struct vm_map_entry header; /* List of entries */ 223 int nentries; /* Number of entries */ 224 vsize_t size; /* virtual size */ 225 int ref_count; /* Reference count */ 226 simple_lock_data_t ref_lock; /* Lock for ref_count field */ 227 vm_map_entry_t hint; /* hint for quick lookups */ 228 simple_lock_data_t hint_lock; /* lock for hint storage */ 229 vm_map_entry_t first_free; /* First free space hint */ 230 int flags; /* flags */ 231 unsigned int timestamp; /* Version number */ 232 #define min_offset header.start 233 #define max_offset header.end 234 }; 235 236 /* vm_map flags */ 237 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ 238 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ 239 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ 240 #define VM_MAP_BUSY 0x08 /* rw: map is busy */ 241 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ 242 243 /* XXX: number of kernel maps and entries to statically allocate */ 244 245 #if !defined(MAX_KMAPENT) 246 #define MAX_KMAPENT 1024 /* XXXCDC: no crash */ 247 #endif /* !defined MAX_KMAPENT */ 248 249 #ifdef _KERNEL 250 #define vm_map_modflags(map, set, clear) \ 251 do { \ 252 (map)->flags = ((map)->flags | (set)) & ~(clear); \ 253 } while (0) 254 #endif /* _KERNEL */ 255 256 /* 257 * Interrupt-safe maps must also be kept on a special list, 258 * to assist uvm_fault() in avoiding locking problems. 259 */ 260 struct vm_map_intrsafe { 261 struct vm_map vmi_map; 262 LIST_ENTRY(vm_map_intrsafe) vmi_list; 263 }; 264 265 /* 266 * globals: 267 */ 268 269 #ifdef _KERNEL 270 271 #ifdef PMAP_GROWKERNEL 272 extern vaddr_t uvm_maxkaddr; 273 #endif 274 275 /* 276 * protos: the following prototypes define the interface to vm_map 277 */ 278 279 void uvm_map_deallocate(vm_map_t); 280 281 int uvm_map_clean(vm_map_t, vaddr_t, vaddr_t, int); 282 void uvm_map_clip_start(vm_map_t, vm_map_entry_t, vaddr_t); 283 void uvm_map_clip_end(vm_map_t, vm_map_entry_t, vaddr_t); 284 vm_map_t uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); 285 int uvm_map_extract(vm_map_t, vaddr_t, vsize_t, 286 vm_map_t, vaddr_t *, int); 287 vm_map_entry_t uvm_map_findspace(vm_map_t, vaddr_t, vsize_t, vaddr_t *, 288 struct uvm_object *, voff_t, vsize_t, int); 289 vaddr_t uvm_map_pie(vaddr_t); 290 vaddr_t uvm_map_hint(struct proc *, vm_prot_t); 291 int uvm_map_inherit(vm_map_t, vaddr_t, vaddr_t, vm_inherit_t); 292 int uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int); 293 void uvm_map_init(void); 294 boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *); 295 void uvm_map_reference(vm_map_t); 296 int uvm_map_replace(vm_map_t, vaddr_t, vaddr_t, 297 vm_map_entry_t, int); 298 int uvm_map_reserve(vm_map_t, vsize_t, vaddr_t, vsize_t, 299 vaddr_t *); 300 void uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int); 301 int uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t); 302 #define uvm_unmap(_m, _s, _e) uvm_unmap_p(_m, _s, _e, 0) 303 void uvm_unmap_p(vm_map_t, vaddr_t, vaddr_t, struct proc *); 304 void uvm_unmap_detach(vm_map_entry_t,int); 305 void uvm_unmap_remove(vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t *, 306 struct proc *, boolean_t); 307 308 #endif /* _KERNEL */ 309 310 /* 311 * VM map locking operations: 312 * 313 * These operations perform locking on the data portion of the 314 * map. 315 * 316 * vm_map_lock_try: try to lock a map, failing if it is already locked. 317 * 318 * vm_map_lock: acquire an exclusive (write) lock on a map. 319 * 320 * vm_map_lock_read: acquire a shared (read) lock on a map. 321 * 322 * vm_map_unlock: release an exclusive lock on a map. 323 * 324 * vm_map_unlock_read: release a shared lock on a map. 325 * 326 * vm_map_downgrade: downgrade an exclusive lock to a shared lock. 327 * 328 * vm_map_upgrade: upgrade a shared lock to an exclusive lock. 329 * 330 * vm_map_busy: mark a map as busy. 331 * 332 * vm_map_unbusy: clear busy status on a map. 333 * 334 * Note that "intrsafe" maps use only exclusive, spin locks. We simply 335 * use the sleep lock's interlock for this. 336 */ 337 338 #ifdef _KERNEL 339 /* XXX: clean up later */ 340 #include <sys/time.h> 341 #include <sys/proc.h> /* for tsleep(), wakeup() */ 342 #include <sys/systm.h> /* for panic() */ 343 344 static __inline boolean_t vm_map_lock_try(vm_map_t); 345 static __inline void vm_map_lock(vm_map_t); 346 extern const char vmmapbsy[]; 347 348 static __inline boolean_t 349 vm_map_lock_try(struct vm_map *map) 350 { 351 boolean_t rv; 352 353 if (map->flags & VM_MAP_INTRSAFE) { 354 rv = TRUE; 355 } else { 356 if (map->flags & VM_MAP_BUSY) { 357 return (FALSE); 358 } 359 rv = (rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP) == 0); 360 } 361 362 if (rv) 363 map->timestamp++; 364 365 return (rv); 366 } 367 368 static __inline void 369 vm_map_lock(struct vm_map *map) 370 { 371 if (map->flags & VM_MAP_INTRSAFE) 372 return; 373 374 do { 375 while (map->flags & VM_MAP_BUSY) { 376 map->flags |= VM_MAP_WANTLOCK; 377 tsleep(&map->flags, PVM, (char *)vmmapbsy, 0); 378 } 379 } while (rw_enter(&map->lock, RW_WRITE|RW_SLEEPFAIL) != 0); 380 381 map->timestamp++; 382 } 383 384 #define vm_map_lock_read(map) rw_enter_read(&(map)->lock) 385 386 #define vm_map_unlock(map) \ 387 do { \ 388 if (((map)->flags & VM_MAP_INTRSAFE) == 0) \ 389 rw_exit(&(map)->lock); \ 390 } while (0) 391 392 #define vm_map_unlock_read(map) rw_exit_read(&(map)->lock) 393 394 #define vm_map_downgrade(map) rw_enter(&(map)->lock, RW_DOWNGRADE) 395 396 #define vm_map_upgrade(map) \ 397 do { \ 398 rw_exit_read(&(map)->lock); \ 399 rw_enter_write(&(map)->lock); \ 400 } while (0) 401 402 #define vm_map_busy(map) \ 403 do { \ 404 (map)->flags |= VM_MAP_BUSY; \ 405 } while (0) 406 407 #define vm_map_unbusy(map) \ 408 do { \ 409 int oflags; \ 410 \ 411 oflags = (map)->flags; \ 412 (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \ 413 if (oflags & VM_MAP_WANTLOCK) \ 414 wakeup(&(map)->flags); \ 415 } while (0) 416 #endif /* _KERNEL */ 417 418 /* 419 * Functions implemented as macros 420 */ 421 #define vm_map_min(map) ((map)->min_offset) 422 #define vm_map_max(map) ((map)->max_offset) 423 #define vm_map_pmap(map) ((map)->pmap) 424 425 #endif /* _UVM_UVM_MAP_H_ */ 426