1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_map.h,v 1.15 2005/01/20 18:00:38 dillon Exp $ 66 */ 67 68 /* 69 * Virtual memory map module definitions. 70 */ 71 72 #ifndef _VM_MAP_ 73 #define _VM_MAP_ 74 75 #include <sys/tree.h> 76 struct vm_map_rb_tree; 77 RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 78 79 /* 80 * Types defined: 81 * 82 * vm_map_t the high-level address map data structure. 83 * vm_map_entry_t an entry in an address map. 84 */ 85 86 typedef u_int vm_eflags_t; 87 88 /* 89 * Objects which live in maps may be either VM objects, or 90 * another map (called a "sharing map") which denotes read-write 91 * sharing with other maps. 92 */ 93 94 union vm_map_object { 95 struct vm_object *vm_object; /* object object */ 96 struct vm_map *sub_map; /* belongs to another map */ 97 }; 98 99 /* 100 * Address map entries consist of start and end addresses, 101 * a VM object (or sharing map) and offset into that object, 102 * and user-exported inheritance and protection information. 103 * Also included is control information for virtual copy operations. 104 */ 105 struct vm_map_entry { 106 struct vm_map_entry *prev; /* previous entry */ 107 struct vm_map_entry *next; /* next entry */ 108 RB_ENTRY(vm_map_entry) rb_entry; 109 vm_offset_t start; /* start address */ 110 vm_offset_t end; /* end address */ 111 vm_offset_t avail_ssize; /* amt can grow if this is a stack */ 112 union vm_map_object object; /* object I point to */ 113 vm_ooffset_t offset; /* offset into object */ 114 vm_eflags_t eflags; /* map entry flags */ 115 /* Only in task maps: */ 116 vm_prot_t protection; /* protection code */ 117 vm_prot_t max_protection; /* maximum protection */ 118 vm_inherit_t inheritance; /* inheritance */ 119 int wired_count; /* can be paged if = 0 */ 120 vm_pindex_t lastr; /* last read */ 121 }; 122 123 #define MAP_ENTRY_NOSYNC 0x0001 124 #define MAP_ENTRY_IS_SUB_MAP 0x0002 125 #define MAP_ENTRY_COW 0x0004 126 #define MAP_ENTRY_NEEDS_COPY 0x0008 127 #define MAP_ENTRY_NOFAULT 0x0010 128 #define MAP_ENTRY_USER_WIRED 0x0020 129 130 #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */ 131 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */ 132 #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */ 133 #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */ 134 135 #define MAP_ENTRY_BEHAV_MASK 0x00C0 136 137 #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ 138 #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */ 139 #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ 140 141 /* 142 * flags for vm_map_[un]clip_range() 143 */ 144 #define MAP_CLIP_NO_HOLES 0x0001 145 146 /* 147 * This reserve count for vm_map_entry_reserve() should cover all nominal 148 * single-insertion operations, including any necessary clipping. 149 */ 150 #define MAP_RESERVE_COUNT 4 151 #define MAP_RESERVE_SLOP 32 152 153 static __inline u_char 154 vm_map_entry_behavior(struct vm_map_entry *entry) 155 { 156 return entry->eflags & MAP_ENTRY_BEHAV_MASK; 157 } 158 159 static __inline void 160 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) 161 { 162 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 163 (behavior & MAP_ENTRY_BEHAV_MASK); 164 } 165 166 /* 167 * Maps are doubly-linked lists of map entries, kept sorted 168 * by address. A single hint is provided to start 169 * searches again from the last successful search, 170 * insertion, or removal. 171 * 172 * Note: the lock structure cannot be the first element of vm_map 173 * because this can result in a running lockup between two or more 174 * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait() 175 * and free tsleep/waking up 'map' and the underlying lockmgr also 176 * sleeping and waking up on 'map'. The lockup occurs when the map fills 177 * up. The 'exec' map, for example. 178 */ 179 struct vm_map { 180 struct vm_map_entry header; /* List of entries */ 181 RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root; 182 struct lock lock; /* Lock for map data */ 183 int nentries; /* Number of entries */ 184 vm_size_t size; /* virtual size */ 185 u_char system_map; /* Am I a system map? */ 186 u_char infork; /* Am I in fork processing? */ 187 vm_map_entry_t hint; /* hint for quick lookups */ 188 unsigned int timestamp; /* Version number */ 189 vm_map_entry_t first_free; /* First free space hint */ 190 struct pmap *pmap; /* Physical map */ 191 #define min_offset header.start 192 #define max_offset header.end 193 }; 194 195 /* 196 * Registered upcall 197 */ 198 struct upcall; 199 200 struct vmupcall { 201 struct vmupcall *vu_next; 202 void *vu_func; /* user upcall function */ 203 void *vu_data; /* user data */ 204 void *vu_ctx; /* user context function */ 205 struct proc *vu_proc; /* process that registered upcall */ 206 int vu_id; /* upcall identifier */ 207 int vu_pending; /* upcall request pending */ 208 }; 209 210 /* 211 * Shareable process virtual address space. 212 * May eventually be merged with vm_map. 213 * Several fields are temporary (text, data stuff). 214 */ 215 struct vmspace { 216 struct vm_map vm_map; /* VM address map */ 217 struct pmap vm_pmap; /* private physical map */ 218 int vm_refcnt; /* number of references */ 219 caddr_t vm_shm; /* SYS5 shared memory private data XXX */ 220 /* we copy from vm_startcopy to the end of the structure on fork */ 221 #define vm_startcopy vm_rssize 222 segsz_t vm_rssize; /* current resident set size in pages */ 223 segsz_t vm_swrss; /* resident set size before last swap */ 224 segsz_t vm_tsize; /* text size (pages) XXX */ 225 segsz_t vm_dsize; /* data size (pages) XXX */ 226 segsz_t vm_ssize; /* stack size (pages) */ 227 caddr_t vm_taddr; /* user virtual address of text XXX */ 228 caddr_t vm_daddr; /* user virtual address of data XXX */ 229 caddr_t vm_maxsaddr; /* user VA at max stack growth */ 230 caddr_t vm_minsaddr; /* user VA at max stack growth */ 231 #define vm_endcopy vm_exitingcnt 232 int vm_exitingcnt; /* several procsses zombied in exit1 */ 233 int vm_upccount; /* number of registered upcalls */ 234 struct vmupcall *vm_upcalls; /* registered upcalls */ 235 }; 236 237 /* 238 * Resident executable holding structure. A user program can take a snapshot 239 * of just its VM address space (typically done just after dynamic link 240 * libraries have completed loading) and register it as a resident 241 * executable associated with the program binary's vnode, which is also 242 * locked into memory. Future execs of the vnode will start with a copy 243 * of the resident vmspace instead of running the binary from scratch, 244 * avoiding both the kernel ELF loader *AND* all shared library mapping and 245 * relocation code, and will call a different entry point (the stack pointer 246 * is reset to the top of the stack) supplied when the vmspace was registered. 247 */ 248 struct vmresident { 249 struct vnode *vr_vnode; /* associated vnode */ 250 TAILQ_ENTRY(vmresident) vr_link; /* linked list of res sts */ 251 struct vmspace *vr_vmspace; /* vmspace to fork */ 252 intptr_t vr_entry_addr; /* registered entry point */ 253 struct sysentvec *vr_sysent; /* system call vects */ 254 int vr_id; /* registration id */ 255 }; 256 257 #ifdef _KERNEL 258 /* 259 * Macros: vm_map_lock, etc. 260 * Function: 261 * Perform locking on the data portion of a map. Note that 262 * these macros mimic procedure calls returning void. The 263 * semicolon is supplied by the user of these macros, not 264 * by the macros themselves. The macros can safely be used 265 * as unbraced elements in a higher level statement. 266 */ 267 268 #ifdef DIAGNOSTIC 269 /* #define MAP_LOCK_DIAGNOSTIC 1 */ 270 #ifdef MAP_LOCK_DIAGNOSTIC 271 #define vm_map_lock(map) \ 272 do { \ 273 printf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \ 274 if (lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL, curthread) != 0) { \ 275 panic("vm_map_lock: failed to get lock"); \ 276 } \ 277 (map)->timestamp++; \ 278 } while(0) 279 #else 280 #define vm_map_lock(map) \ 281 do { \ 282 if (lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL, curthread) != 0) { \ 283 panic("vm_map_lock: failed to get lock"); \ 284 } \ 285 (map)->timestamp++; \ 286 } while(0) 287 #endif 288 #else 289 #define vm_map_lock(map) \ 290 do { \ 291 lockmgr(&(map)->lock, LK_EXCLUSIVE, NULL, curthread); \ 292 (map)->timestamp++; \ 293 } while(0) 294 #endif /* DIAGNOSTIC */ 295 296 #if defined(MAP_LOCK_DIAGNOSTIC) 297 #define vm_map_unlock(map) \ 298 do { \ 299 printf ("locking map LK_RELEASE: 0x%x\n", map); \ 300 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread); \ 301 } while (0) 302 #define vm_map_lock_read(map) \ 303 do { \ 304 printf ("locking map LK_SHARED: 0x%x\n", map); \ 305 lockmgr(&(map)->lock, LK_SHARED, NULL, curthread); \ 306 } while (0) 307 #define vm_map_unlock_read(map) \ 308 do { \ 309 printf ("locking map LK_RELEASE: 0x%x\n", map); \ 310 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread); \ 311 } while (0) 312 #else 313 #define vm_map_unlock(map) \ 314 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread) 315 #define vm_map_lock_read(map) \ 316 lockmgr(&(map)->lock, LK_SHARED, NULL, curthread) 317 #define vm_map_unlock_read(map) \ 318 lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread) 319 #endif 320 321 static __inline__ int 322 _vm_map_lock_upgrade(vm_map_t map, struct thread *td) { 323 int error; 324 #if defined(MAP_LOCK_DIAGNOSTIC) 325 printf("locking map LK_EXCLUPGRADE: 0x%x\n", map); 326 #endif 327 error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td); 328 if (error == 0) 329 map->timestamp++; 330 return error; 331 } 332 333 #define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, curthread) 334 335 #if defined(MAP_LOCK_DIAGNOSTIC) 336 #define vm_map_lock_downgrade(map) \ 337 do { \ 338 printf ("locking map LK_DOWNGRADE: 0x%x\n", map); \ 339 lockmgr(&(map)->lock, LK_DOWNGRADE, NULL, curthread); \ 340 } while (0) 341 #else 342 #define vm_map_lock_downgrade(map) \ 343 lockmgr(&(map)->lock, LK_DOWNGRADE, NULL, curthread) 344 #endif 345 346 #define vm_map_set_recursive(map) \ 347 do { \ 348 lwkt_tokref ilock; \ 349 lwkt_gettoken(&ilock, &(map)->lock.lk_interlock); \ 350 (map)->lock.lk_flags |= LK_CANRECURSE; \ 351 lwkt_reltoken(&ilock); \ 352 } while(0) 353 #define vm_map_clear_recursive(map) \ 354 do { \ 355 lwkt_tokref ilock; \ 356 lwkt_gettoken(&ilock, &(map)->lock.lk_interlock); \ 357 (map)->lock.lk_flags &= ~LK_CANRECURSE; \ 358 lwkt_reltoken(&ilock); \ 359 } while(0) 360 361 #endif /* _KERNEL */ 362 363 /* 364 * Functions implemented as macros 365 */ 366 #define vm_map_min(map) ((map)->min_offset) 367 #define vm_map_max(map) ((map)->max_offset) 368 #define vm_map_pmap(map) ((map)->pmap) 369 370 static __inline struct pmap * 371 vmspace_pmap(struct vmspace *vmspace) 372 { 373 return &vmspace->vm_pmap; 374 } 375 376 static __inline long 377 vmspace_resident_count(struct vmspace *vmspace) 378 { 379 return pmap_resident_count(vmspace_pmap(vmspace)); 380 } 381 382 /* 383 * Number of kernel maps and entries to statically allocate, required 384 * during boot to bootstrap the VM system. 385 */ 386 #define MAX_KMAP 10 387 #define MAX_MAPENT 256 388 389 /* 390 * Copy-on-write flags for vm_map operations 391 */ 392 #define MAP_UNUSED_01 0x0001 393 #define MAP_COPY_ON_WRITE 0x0002 394 #define MAP_NOFAULT 0x0004 395 #define MAP_PREFAULT 0x0008 396 #define MAP_PREFAULT_PARTIAL 0x0010 397 #define MAP_DISABLE_SYNCER 0x0020 398 #define MAP_DISABLE_COREDUMP 0x0100 399 #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ 400 401 /* 402 * vm_fault option flags 403 */ 404 #define VM_FAULT_NORMAL 0 /* Nothing special */ 405 #define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */ 406 #define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */ 407 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE) 408 #define VM_FAULT_HOLD 4 /* Hold the page */ 409 #define VM_FAULT_DIRTY 8 /* Dirty the page */ 410 411 #ifdef _KERNEL 412 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); 413 struct pmap; 414 struct globaldata; 415 void vm_map_entry_reserve_cpu_init(struct globaldata *gd); 416 int vm_map_entry_reserve(int); 417 int vm_map_entry_kreserve(int); 418 void vm_map_entry_release(int); 419 void vm_map_entry_krelease(int); 420 vm_map_t vm_map_create (struct pmap *, vm_offset_t, vm_offset_t); 421 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *); 422 int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int); 423 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t, vm_offset_t *); 424 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); 425 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t); 426 int vm_map_insert (vm_map_t, int *, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int); 427 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, 428 vm_pindex_t *, vm_prot_t *, boolean_t *); 429 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int); 430 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); 431 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int); 432 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t); 433 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); 434 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); 435 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); 436 void vm_map_startup (void); 437 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); 438 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int); 439 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *); 440 void vm_init2 (void); 441 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *); 442 void vm_freeze_copyopts (vm_object_t, vm_pindex_t, vm_pindex_t); 443 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int); 444 int vm_map_growstack (struct proc *p, vm_offset_t addr); 445 int vmspace_swap_count (struct vmspace *vmspace); 446 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *); 447 448 #endif 449 #endif /* _VM_MAP_ */ 450