1 /* $NetBSD: uvm_map.h,v 1.33 2002/11/02 07:40:49 perry Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 42 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69 #ifndef _UVM_UVM_MAP_H_ 70 #define _UVM_UVM_MAP_H_ 71 72 /* 73 * uvm_map.h 74 */ 75 76 #ifdef _KERNEL 77 78 /* 79 * macros 80 */ 81 82 /* 83 * UVM_MAP_CLIP_START: ensure that the entry begins at or after 84 * the starting address, if it doesn't we split the entry. 85 * 86 * => map must be locked by caller 87 */ 88 89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ 90 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); } 91 92 /* 93 * UVM_MAP_CLIP_END: ensure that the entry ends at or before 94 * the ending address, if it does't we split the entry. 95 * 96 * => map must be locked by caller 97 */ 98 99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ 100 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); } 101 102 /* 103 * extract flags 104 */ 105 #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */ 106 #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */ 107 #define UVM_EXTRACT_QREF 0x4 /* use quick refs */ 108 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */ 109 110 #endif /* _KERNEL */ 111 112 #include <uvm/uvm_anon.h> 113 114 /* 115 * Address map entries consist of start and end addresses, 116 * a VM object (or sharing map) and offset into that object, 117 * and user-exported inheritance and protection information. 118 * Also included is control information for virtual copy operations. 119 */ 120 struct vm_map_entry { 121 struct vm_map_entry *prev; /* previous entry */ 122 struct vm_map_entry *next; /* next entry */ 123 vaddr_t start; /* start address */ 124 vaddr_t end; /* end address */ 125 union { 126 struct uvm_object *uvm_obj; /* uvm object */ 127 struct vm_map *sub_map; /* belongs to another map */ 128 } object; /* object I point to */ 129 voff_t offset; /* offset into object */ 130 int etype; /* entry type */ 131 vm_prot_t protection; /* protection code */ 132 vm_prot_t max_protection; /* maximum protection */ 133 vm_inherit_t inheritance; /* inheritance */ 134 int wired_count; /* can be paged if == 0 */ 135 struct vm_aref aref; /* anonymous overlay */ 136 int advice; /* madvise advice */ 137 #define uvm_map_entry_stop_copy flags 138 u_int8_t flags; /* flags */ 139 140 #define UVM_MAP_STATIC 0x01 /* static map entry */ 141 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ 142 143 }; 144 145 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) 146 147 /* 148 * Maps are doubly-linked lists of map entries, kept sorted 149 * by address. A single hint is provided to start 150 * searches again from the last successful search, 151 * insertion, or removal. 152 * 153 * LOCKING PROTOCOL NOTES: 154 * ----------------------- 155 * 156 * VM map locking is a little complicated. There are both shared 157 * and exclusive locks on maps. However, it is sometimes required 158 * to downgrade an exclusive lock to a shared lock, and upgrade to 159 * an exclusive lock again (to perform error recovery). However, 160 * another thread *must not* queue itself to receive an exclusive 161 * lock while before we upgrade back to exclusive, otherwise the 162 * error recovery becomes extremely difficult, if not impossible. 163 * 164 * In order to prevent this scenario, we introduce the notion of 165 * a `busy' map. A `busy' map is read-locked, but other threads 166 * attempting to write-lock wait for this flag to clear before 167 * entering the lock manager. A map may only be marked busy 168 * when the map is write-locked (and then the map must be downgraded 169 * to read-locked), and may only be marked unbusy by the thread 170 * which marked it busy (holding *either* a read-lock or a 171 * write-lock, the latter being gained by an upgrade). 172 * 173 * Access to the map `flags' member is controlled by the `flags_lock' 174 * simple lock. Note that some flags are static (set once at map 175 * creation time, and never changed), and thus require no locking 176 * to check those flags. All flags which are r/w must be set or 177 * cleared while the `flags_lock' is asserted. Additional locking 178 * requirements are: 179 * 180 * VM_MAP_PAGEABLE r/o static flag; no locking required 181 * 182 * VM_MAP_INTRSAFE r/o static flag; no locking required 183 * 184 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when 185 * map is write-locked. may be tested 186 * without asserting `flags_lock'. 187 * 188 * VM_MAP_BUSY r/w; may only be set when map is 189 * write-locked, may only be cleared by 190 * thread which set it, map read-locked 191 * or write-locked. must be tested 192 * while `flags_lock' is asserted. 193 * 194 * VM_MAP_WANTLOCK r/w; may only be set when the map 195 * is busy, and thread is attempting 196 * to write-lock. must be tested 197 * while `flags_lock' is asserted. 198 */ 199 struct vm_map { 200 struct pmap * pmap; /* Physical map */ 201 struct lock lock; /* Lock for map data */ 202 struct vm_map_entry header; /* List of entries */ 203 int nentries; /* Number of entries */ 204 vsize_t size; /* virtual size */ 205 int ref_count; /* Reference count */ 206 struct simplelock ref_lock; /* Lock for ref_count field */ 207 struct vm_map_entry * hint; /* hint for quick lookups */ 208 struct simplelock hint_lock; /* lock for hint storage */ 209 struct vm_map_entry * first_free; /* First free space hint */ 210 int flags; /* flags */ 211 struct simplelock flags_lock; /* Lock for flags field */ 212 unsigned int timestamp; /* Version number */ 213 #define min_offset header.start 214 #define max_offset header.end 215 }; 216 217 /* vm_map flags */ 218 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ 219 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ 220 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ 221 #define VM_MAP_BUSY 0x08 /* rw: map is busy */ 222 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ 223 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ 224 225 /* XXX: number of kernel maps and entries to statically allocate */ 226 227 #if !defined(MAX_KMAPENT) 228 #if (50 + (2 * NPROC) > 1000) 229 #define MAX_KMAPENT (50 + (2 * NPROC)) 230 #else 231 #define MAX_KMAPENT 1000 /* XXXCDC: no crash */ 232 #endif 233 #endif /* !defined MAX_KMAPENT */ 234 235 #ifdef _KERNEL 236 #define vm_map_modflags(map, set, clear) \ 237 do { \ 238 simple_lock(&(map)->flags_lock); \ 239 (map)->flags = ((map)->flags | (set)) & ~(clear); \ 240 simple_unlock(&(map)->flags_lock); \ 241 } while (/*CONSTCOND*/ 0) 242 #endif /* _KERNEL */ 243 244 /* 245 * handle inline options 246 */ 247 248 #ifdef UVM_MAP_INLINE 249 #define MAP_INLINE static __inline 250 #else 251 #define MAP_INLINE /* nothing */ 252 #endif /* UVM_MAP_INLINE */ 253 254 /* 255 * globals: 256 */ 257 258 #ifdef _KERNEL 259 260 #ifdef PMAP_GROWKERNEL 261 extern vaddr_t uvm_maxkaddr; 262 #endif 263 264 /* 265 * protos: the following prototypes define the interface to vm_map 266 */ 267 268 MAP_INLINE 269 void uvm_map_deallocate __P((struct vm_map *)); 270 271 int uvm_map_clean __P((struct vm_map *, vaddr_t, vaddr_t, int)); 272 void uvm_map_clip_start __P((struct vm_map *, struct vm_map_entry *, 273 vaddr_t)); 274 void uvm_map_clip_end __P((struct vm_map *, struct vm_map_entry *, 275 vaddr_t)); 276 MAP_INLINE 277 struct vm_map *uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int)); 278 int uvm_map_extract __P((struct vm_map *, vaddr_t, vsize_t, 279 struct vm_map *, vaddr_t *, int)); 280 struct vm_map_entry *uvm_map_findspace __P((struct vm_map *, vaddr_t, vsize_t, 281 vaddr_t *, struct uvm_object *, voff_t, vsize_t, int)); 282 int uvm_map_inherit __P((struct vm_map *, vaddr_t, vaddr_t, 283 vm_inherit_t)); 284 int uvm_map_advice __P((struct vm_map *, vaddr_t, vaddr_t, int)); 285 void uvm_map_init __P((void)); 286 boolean_t uvm_map_lookup_entry __P((struct vm_map *, vaddr_t, 287 struct vm_map_entry **)); 288 MAP_INLINE 289 void uvm_map_reference __P((struct vm_map *)); 290 int uvm_map_replace __P((struct vm_map *, vaddr_t, vaddr_t, 291 struct vm_map_entry *, int)); 292 int uvm_map_reserve __P((struct vm_map *, vsize_t, vaddr_t, vsize_t, 293 vaddr_t *)); 294 void uvm_map_setup __P((struct vm_map *, vaddr_t, vaddr_t, int)); 295 int uvm_map_submap __P((struct vm_map *, vaddr_t, vaddr_t, 296 struct vm_map *)); 297 MAP_INLINE 298 void uvm_unmap __P((struct vm_map *, vaddr_t, vaddr_t)); 299 void uvm_unmap_detach __P((struct vm_map_entry *,int)); 300 void uvm_unmap_remove __P((struct vm_map *, vaddr_t, vaddr_t, 301 struct vm_map_entry **)); 302 303 #endif /* _KERNEL */ 304 305 /* 306 * VM map locking operations: 307 * 308 * These operations perform locking on the data portion of the 309 * map. 310 * 311 * vm_map_lock_try: try to lock a map, failing if it is already locked. 312 * 313 * vm_map_lock: acquire an exclusive (write) lock on a map. 314 * 315 * vm_map_lock_read: acquire a shared (read) lock on a map. 316 * 317 * vm_map_unlock: release an exclusive lock on a map. 318 * 319 * vm_map_unlock_read: release a shared lock on a map. 320 * 321 * vm_map_downgrade: downgrade an exclusive lock to a shared lock. 322 * 323 * vm_map_upgrade: upgrade a shared lock to an exclusive lock. 324 * 325 * vm_map_busy: mark a map as busy. 326 * 327 * vm_map_unbusy: clear busy status on a map. 328 * 329 * Note that "intrsafe" maps use only exclusive, spin locks. We simply 330 * use the sleep lock's interlock for this. 331 */ 332 333 #ifdef _KERNEL 334 /* XXX: clean up later */ 335 #include <sys/time.h> 336 #include <sys/proc.h> /* for tsleep(), wakeup() */ 337 #include <sys/systm.h> /* for panic() */ 338 339 static __inline boolean_t vm_map_lock_try __P((struct vm_map *)); 340 static __inline void vm_map_lock __P((struct vm_map *)); 341 extern const char vmmapbsy[]; 342 343 static __inline boolean_t 344 vm_map_lock_try(map) 345 struct vm_map *map; 346 { 347 boolean_t rv; 348 349 if (map->flags & VM_MAP_INTRSAFE) 350 rv = simple_lock_try(&map->lock.lk_interlock); 351 else { 352 simple_lock(&map->flags_lock); 353 if (map->flags & VM_MAP_BUSY) { 354 simple_unlock(&map->flags_lock); 355 return (FALSE); 356 } 357 rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK, 358 &map->flags_lock) == 0); 359 } 360 361 if (rv) 362 map->timestamp++; 363 364 return (rv); 365 } 366 367 static __inline void 368 vm_map_lock(map) 369 struct vm_map *map; 370 { 371 int error; 372 373 if (map->flags & VM_MAP_INTRSAFE) { 374 simple_lock(&map->lock.lk_interlock); 375 return; 376 } 377 378 try_again: 379 simple_lock(&map->flags_lock); 380 while (map->flags & VM_MAP_BUSY) { 381 map->flags |= VM_MAP_WANTLOCK; 382 ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock); 383 } 384 385 error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK, 386 &map->flags_lock); 387 388 if (error) { 389 KASSERT(error == ENOLCK); 390 goto try_again; 391 } 392 393 (map)->timestamp++; 394 } 395 396 #ifdef DIAGNOSTIC 397 #define vm_map_lock_read(map) \ 398 do { \ 399 if ((map)->flags & VM_MAP_INTRSAFE) \ 400 panic("vm_map_lock_read: intrsafe Map"); \ 401 (void) lockmgr(&(map)->lock, LK_SHARED, NULL); \ 402 } while (/*CONSTCOND*/ 0) 403 #else 404 #define vm_map_lock_read(map) \ 405 (void) lockmgr(&(map)->lock, LK_SHARED, NULL) 406 #endif 407 408 #define vm_map_unlock(map) \ 409 do { \ 410 if ((map)->flags & VM_MAP_INTRSAFE) \ 411 simple_unlock(&(map)->lock.lk_interlock); \ 412 else \ 413 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL); \ 414 } while (/*CONSTCOND*/ 0) 415 416 #define vm_map_unlock_read(map) \ 417 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL) 418 419 #define vm_map_downgrade(map) \ 420 (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL) 421 422 #ifdef DIAGNOSTIC 423 #define vm_map_upgrade(map) \ 424 do { \ 425 if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0) \ 426 panic("vm_map_upgrade: failed to upgrade lock"); \ 427 } while (/*CONSTCOND*/ 0) 428 #else 429 #define vm_map_upgrade(map) \ 430 (void) lockmgr(&(map)->lock, LK_UPGRADE, NULL) 431 #endif 432 433 #define vm_map_busy(map) \ 434 do { \ 435 simple_lock(&(map)->flags_lock); \ 436 (map)->flags |= VM_MAP_BUSY; \ 437 simple_unlock(&(map)->flags_lock); \ 438 } while (/*CONSTCOND*/ 0) 439 440 #define vm_map_unbusy(map) \ 441 do { \ 442 int oflags; \ 443 \ 444 simple_lock(&(map)->flags_lock); \ 445 oflags = (map)->flags; \ 446 (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \ 447 simple_unlock(&(map)->flags_lock); \ 448 if (oflags & VM_MAP_WANTLOCK) \ 449 wakeup(&(map)->flags); \ 450 } while (/*CONSTCOND*/ 0) 451 #endif /* _KERNEL */ 452 453 /* 454 * Functions implemented as macros 455 */ 456 #define vm_map_min(map) ((map)->min_offset) 457 #define vm_map_max(map) ((map)->max_offset) 458 #define vm_map_pmap(map) ((map)->pmap) 459 460 #endif /* _UVM_UVM_MAP_H_ */ 461