1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_map.h,v 1.30 2007/04/29 18:25:41 dillon Exp $ 66 */ 67 68 /* 69 * Virtual memory map module definitions. 70 */ 71 72 #ifndef _VM_VM_MAP_H_ 73 #define _VM_VM_MAP_H_ 74 75 #ifndef _SYS_TYPES_H_ 76 #include <sys/types.h> 77 #endif 78 #ifndef _SYS_TREE_H_ 79 #include <sys/tree.h> 80 #endif 81 #ifndef _SYS_SYSREF_H_ 82 #include <sys/sysref.h> 83 #endif 84 #ifndef _SYS_LOCK_H_ 85 #include <sys/lock.h> 86 #endif 87 #ifndef _SYS_VKERNEL_H_ 88 #include <sys/vkernel.h> 89 #endif 90 #ifndef _VM_VM_H_ 91 #include <vm/vm.h> 92 #endif 93 #ifndef _MACHINE_PMAP_H_ 94 #include <machine/pmap.h> 95 #endif 96 #ifndef _VM_VM_OBJECT_H_ 97 #include <vm/vm_object.h> 98 #endif 99 #ifndef _SYS_NULL_H_ 100 #include <sys/_null.h> 101 #endif 102 103 struct vm_map_rb_tree; 104 RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 105 106 /* 107 * Types defined: 108 * 109 * vm_map_t the high-level address map data structure. 110 * vm_map_entry_t an entry in an address map. 111 */ 112 113 typedef u_int vm_flags_t; 114 typedef u_int vm_eflags_t; 115 116 /* 117 * Objects which live in maps may be either VM objects, or 118 * another map (called a "sharing map") which denotes read-write 119 * sharing with other maps. 120 */ 121 union vm_map_object { 122 struct vm_object *vm_object; /* object object */ 123 struct vm_map *sub_map; /* belongs to another map */ 124 }; 125 126 union vm_map_aux { 127 vm_offset_t avail_ssize; /* amt can grow if this is a stack */ 128 vpte_t master_pde; /* virtual page table root */ 129 }; 130 131 /* 132 * Address map entries consist of start and end addresses, 133 * a VM object (or sharing map) and offset into that object, 134 * and user-exported inheritance and protection information. 135 * Also included is control information for virtual copy operations. 136 * 137 * When used with MAP_STACK, avail_ssize is used to determine the 138 * limits of stack growth. 139 * 140 * When used with VM_MAPTYPE_VPAGETABLE, avail_ssize stores the 141 * page directory index. 142 */ 143 struct vm_map_entry { 144 struct vm_map_entry *prev; /* previous entry */ 145 struct vm_map_entry *next; /* next entry */ 146 RB_ENTRY(vm_map_entry) rb_entry; 147 vm_offset_t start; /* start address */ 148 vm_offset_t end; /* end address */ 149 union vm_map_aux aux; /* auxillary data */ 150 union vm_map_object object; /* object I point to */ 151 vm_ooffset_t offset; /* offset into object */ 152 vm_eflags_t eflags; /* map entry flags */ 153 vm_maptype_t maptype; /* type of VM mapping */ 154 vm_prot_t protection; /* protection code */ 155 vm_prot_t max_protection; /* maximum protection */ 156 vm_inherit_t inheritance; /* inheritance */ 157 int wired_count; /* can be paged if = 0 */ 158 }; 159 160 #define MAP_ENTRY_NOSYNC 0x0001 161 #define MAP_ENTRY_STACK 0x0002 162 #define MAP_ENTRY_COW 0x0004 163 #define MAP_ENTRY_NEEDS_COPY 0x0008 164 #define MAP_ENTRY_NOFAULT 0x0010 165 #define MAP_ENTRY_USER_WIRED 0x0020 166 167 #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */ 168 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */ 169 #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */ 170 #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */ 171 172 #define MAP_ENTRY_BEHAV_MASK 0x00C0 173 174 #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ 175 #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */ 176 #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ 177 #define MAP_ENTRY_KSTACK 0x0800 /* guarded kernel stack */ 178 179 /* 180 * flags for vm_map_[un]clip_range() 181 */ 182 #define MAP_CLIP_NO_HOLES 0x0001 183 184 /* 185 * This reserve count for vm_map_entry_reserve() should cover all nominal 186 * single-insertion operations, including any necessary clipping. 187 */ 188 #define MAP_RESERVE_COUNT 4 189 #define MAP_RESERVE_SLOP 32 190 191 static __inline u_char 192 vm_map_entry_behavior(struct vm_map_entry *entry) 193 { 194 return entry->eflags & MAP_ENTRY_BEHAV_MASK; 195 } 196 197 static __inline void 198 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) 199 { 200 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 201 (behavior & MAP_ENTRY_BEHAV_MASK); 202 } 203 204 /* 205 * Maps are doubly-linked lists of map entries, kept sorted 206 * by address. A single hint is provided to start 207 * searches again from the last successful search, 208 * insertion, or removal. 209 * 210 * Note: the lock structure cannot be the first element of vm_map 211 * because this can result in a running lockup between two or more 212 * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait() 213 * and free tsleep/waking up 'map' and the underlying lockmgr also 214 * sleeping and waking up on 'map'. The lockup occurs when the map fills 215 * up. The 'exec' map, for example. 216 */ 217 struct vm_map { 218 struct vm_map_entry header; /* List of entries */ 219 RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root; 220 struct lock lock; /* Lock for map data */ 221 int nentries; /* Number of entries */ 222 vm_size_t size; /* virtual size */ 223 u_char system_map; /* Am I a system map? */ 224 u_char infork; /* Am I in fork processing? */ 225 vm_map_entry_t hint; /* hint for quick lookups */ 226 unsigned int timestamp; /* Version number */ 227 vm_map_entry_t first_free; /* First free space hint */ 228 vm_flags_t flags; /* flags for this vm_map */ 229 struct pmap *pmap; /* Physical map */ 230 #define min_offset header.start 231 #define max_offset header.end 232 }; 233 234 /* 235 * vm_flags_t values 236 */ 237 #define MAP_WIREFUTURE 0x01 /* wire all future pages */ 238 239 /* 240 * Registered upcall 241 */ 242 struct upcall; 243 244 struct vmupcall { 245 struct vmupcall *vu_next; 246 void *vu_func; /* user upcall function */ 247 void *vu_data; /* user data */ 248 void *vu_ctx; /* user context function */ 249 struct lwp *vu_lwp; /* process that registered upcall */ 250 int vu_id; /* upcall identifier */ 251 int vu_pending; /* upcall request pending */ 252 }; 253 254 /* 255 * Shareable process virtual address space. 256 * 257 * Refd pointers from vmresident, proc 258 */ 259 struct vmspace { 260 struct vm_map vm_map; /* VM address map */ 261 struct pmap vm_pmap; /* private physical map */ 262 int vm_unused01; 263 caddr_t vm_shm; /* SYS5 shared memory private data XXX */ 264 /* we copy from vm_startcopy to the end of the structure on fork */ 265 #define vm_startcopy vm_rssize 266 segsz_t vm_rssize; /* current resident set size in pages */ 267 segsz_t vm_swrss; /* resident set size before last swap */ 268 segsz_t vm_tsize; /* text size (pages) XXX */ 269 segsz_t vm_dsize; /* data size (pages) XXX */ 270 segsz_t vm_ssize; /* stack size (pages) */ 271 caddr_t vm_taddr; /* user virtual address of text XXX */ 272 caddr_t vm_daddr; /* user virtual address of data XXX */ 273 caddr_t vm_maxsaddr; /* user VA at max stack growth */ 274 caddr_t vm_minsaddr; /* user VA at max stack growth */ 275 #define vm_endcopy vm_exitingcnt 276 int vm_exitingcnt; /* several procsses zombied in exit1 */ 277 int vm_upccount; /* number of registered upcalls */ 278 int vm_pagesupply; 279 struct vmupcall *vm_upcalls; /* registered upcalls */ 280 struct sysref vm_sysref; /* sysref, refcnt, etc */ 281 }; 282 283 /* 284 * Resident executable holding structure. A user program can take a snapshot 285 * of just its VM address space (typically done just after dynamic link 286 * libraries have completed loading) and register it as a resident 287 * executable associated with the program binary's vnode, which is also 288 * locked into memory. Future execs of the vnode will start with a copy 289 * of the resident vmspace instead of running the binary from scratch, 290 * avoiding both the kernel ELF loader *AND* all shared library mapping and 291 * relocation code, and will call a different entry point (the stack pointer 292 * is reset to the top of the stack) supplied when the vmspace was registered. 293 */ 294 struct vmresident { 295 struct vnode *vr_vnode; /* associated vnode */ 296 TAILQ_ENTRY(vmresident) vr_link; /* linked list of res sts */ 297 struct vmspace *vr_vmspace; /* vmspace to fork */ 298 intptr_t vr_entry_addr; /* registered entry point */ 299 struct sysentvec *vr_sysent; /* system call vects */ 300 int vr_id; /* registration id */ 301 int vr_refs; /* temporary refs */ 302 }; 303 304 #ifdef _KERNEL 305 /* 306 * Macros: vm_map_lock, etc. 307 * Function: 308 * Perform locking on the data portion of a map. Note that 309 * these macros mimic procedure calls returning void. The 310 * semicolon is supplied by the user of these macros, not 311 * by the macros themselves. The macros can safely be used 312 * as unbraced elements in a higher level statement. 313 */ 314 315 #define ASSERT_VM_MAP_LOCKED(map) KKASSERT(lockowned(&(map)->lock)) 316 317 #ifdef DIAGNOSTIC 318 /* #define MAP_LOCK_DIAGNOSTIC 1 */ 319 #ifdef MAP_LOCK_DIAGNOSTIC 320 #define vm_map_lock(map) \ 321 do { \ 322 kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \ 323 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \ 324 panic("vm_map_lock: failed to get lock"); \ 325 } \ 326 (map)->timestamp++; \ 327 } while(0) 328 #else 329 #define vm_map_lock(map) \ 330 do { \ 331 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \ 332 panic("vm_map_lock: failed to get lock"); \ 333 } \ 334 (map)->timestamp++; \ 335 } while(0) 336 #endif 337 #else 338 #define vm_map_lock(map) \ 339 do { \ 340 lockmgr(&(map)->lock, LK_EXCLUSIVE); \ 341 (map)->timestamp++; \ 342 } while(0) 343 #endif /* DIAGNOSTIC */ 344 345 #if defined(MAP_LOCK_DIAGNOSTIC) 346 #define vm_map_unlock(map) \ 347 do { \ 348 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \ 349 lockmgr(&(map)->lock, LK_RELEASE); \ 350 } while (0) 351 #define vm_map_lock_read(map) \ 352 do { \ 353 kprintf ("locking map LK_SHARED: 0x%x\n", map); \ 354 lockmgr(&(map)->lock, LK_SHARED); \ 355 } while (0) 356 #define vm_map_unlock_read(map) \ 357 do { \ 358 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \ 359 lockmgr(&(map)->lock, LK_RELEASE); \ 360 } while (0) 361 #else 362 #define vm_map_unlock(map) \ 363 lockmgr(&(map)->lock, LK_RELEASE) 364 #define vm_map_lock_read(map) \ 365 lockmgr(&(map)->lock, LK_SHARED) 366 #define vm_map_unlock_read(map) \ 367 lockmgr(&(map)->lock, LK_RELEASE) 368 #endif 369 370 static __inline__ int 371 vm_map_lock_upgrade(vm_map_t map) { 372 int error; 373 #if defined(MAP_LOCK_DIAGNOSTIC) 374 kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map); 375 #endif 376 error = lockmgr(&map->lock, LK_EXCLUPGRADE); 377 if (error == 0) 378 map->timestamp++; 379 return error; 380 } 381 382 #if defined(MAP_LOCK_DIAGNOSTIC) 383 #define vm_map_lock_downgrade(map) \ 384 do { \ 385 kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \ 386 lockmgr(&(map)->lock, LK_DOWNGRADE); \ 387 } while (0) 388 #else 389 #define vm_map_lock_downgrade(map) \ 390 lockmgr(&(map)->lock, LK_DOWNGRADE) 391 #endif 392 393 #endif /* _KERNEL */ 394 395 /* 396 * Functions implemented as macros 397 */ 398 #define vm_map_min(map) ((map)->min_offset) 399 #define vm_map_max(map) ((map)->max_offset) 400 #define vm_map_pmap(map) ((map)->pmap) 401 402 /* 403 * Must not block 404 */ 405 static __inline struct pmap * 406 vmspace_pmap(struct vmspace *vmspace) 407 { 408 return &vmspace->vm_pmap; 409 } 410 411 static __inline long 412 vmspace_resident_count(struct vmspace *vmspace) 413 { 414 return pmap_resident_count(vmspace_pmap(vmspace)); 415 } 416 417 /* Calculates the proportional RSS and returning the 418 * accrued result. 419 */ 420 static __inline u_int 421 vmspace_president_count(struct vmspace *vmspace) 422 { 423 vm_map_t map = &vmspace->vm_map; 424 vm_map_entry_t cur; 425 vm_object_t object; 426 u_int count = 0; 427 428 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 429 switch(cur->maptype) { 430 case VM_MAPTYPE_NORMAL: 431 case VM_MAPTYPE_VPAGETABLE: 432 if ((object = cur->object.vm_object) == NULL) 433 break; 434 if (object->type != OBJT_DEFAULT && 435 object->type != OBJT_SWAP) { 436 break; 437 } 438 if(object->agg_pv_list_count != 0) { 439 count += (object->resident_page_count / object->agg_pv_list_count); 440 } 441 break; 442 default: 443 break; 444 } 445 } 446 return(count); 447 } 448 449 /* 450 * Number of kernel maps and entries to statically allocate, required 451 * during boot to bootstrap the VM system. 452 */ 453 #define MAX_KMAP 10 454 #define MAX_MAPENT 2048 /* required to support up to 64 cpus */ 455 456 /* 457 * Copy-on-write flags for vm_map operations 458 */ 459 #define MAP_UNUSED_01 0x0001 460 #define MAP_COPY_ON_WRITE 0x0002 461 #define MAP_NOFAULT 0x0004 462 #define MAP_PREFAULT 0x0008 463 #define MAP_PREFAULT_PARTIAL 0x0010 464 #define MAP_DISABLE_SYNCER 0x0020 465 #define MAP_IS_STACK 0x0040 466 #define MAP_IS_KSTACK 0x0080 467 #define MAP_DISABLE_COREDUMP 0x0100 468 #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ 469 470 /* 471 * vm_fault option flags 472 */ 473 #define VM_FAULT_NORMAL 0x00 /* Nothing special */ 474 #define VM_FAULT_CHANGE_WIRING 0x01 /* Change the wiring as appropriate */ 475 #define VM_FAULT_USER_WIRE 0x02 /* Likewise, but for user purposes */ 476 #define VM_FAULT_BURST 0x04 /* Burst fault can be done */ 477 #define VM_FAULT_DIRTY 0x08 /* Dirty the page */ 478 #define VM_FAULT_UNSWAP 0x10 /* Remove backing store from the page */ 479 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE) 480 481 #ifdef _KERNEL 482 483 extern struct sysref_class vmspace_sysref_class; 484 485 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, 486 vm_prot_t, boolean_t); 487 struct pmap; 488 struct globaldata; 489 void vm_map_entry_allocate_object(vm_map_entry_t); 490 void vm_map_entry_reserve_cpu_init(struct globaldata *gd); 491 int vm_map_entry_reserve(int); 492 int vm_map_entry_kreserve(int); 493 void vm_map_entry_release(int); 494 void vm_map_entry_krelease(int); 495 vm_map_t vm_map_create (vm_map_t, struct pmap *, vm_offset_t, vm_offset_t); 496 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *); 497 int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, 498 vm_offset_t *, vm_size_t, vm_size_t, 499 boolean_t, vm_maptype_t, 500 vm_prot_t, vm_prot_t, 501 int); 502 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t, 503 int, vm_offset_t *); 504 vm_offset_t vm_map_hint(struct proc *, vm_offset_t, vm_prot_t); 505 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); 506 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t); 507 int vm_map_insert (vm_map_t, int *, vm_object_t, vm_ooffset_t, 508 vm_offset_t, vm_offset_t, 509 vm_maptype_t, 510 vm_prot_t, vm_prot_t, 511 int); 512 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, 513 vm_pindex_t *, vm_prot_t *, boolean_t *); 514 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int); 515 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); 516 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int); 517 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t); 518 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); 519 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); 520 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); 521 void vm_map_startup (void); 522 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); 523 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t); 524 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *); 525 void vm_init2 (void); 526 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *); 527 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, int, 528 vm_prot_t, vm_prot_t, int); 529 int vm_map_growstack (struct proc *p, vm_offset_t addr); 530 int vmspace_swap_count (struct vmspace *vmspace); 531 int vmspace_anonymous_count (struct vmspace *vmspace); 532 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *); 533 void vm_map_transition_wait(vm_map_t map); 534 535 536 #endif 537 #endif /* _VM_VM_MAP_H_ */ 538