1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2003-2017 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * This code is derived from software contributed to The DragonFly Project 10 * by Matthew Dillon <dillon@backplane.com> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $ 65 */ 66 67 /* 68 * Virtual memory map module definitions. 69 */ 70 71 #ifndef _VM_VM_MAP_H_ 72 #define _VM_VM_MAP_H_ 73 74 #ifndef _SYS_TYPES_H_ 75 #include <sys/types.h> 76 #endif 77 #ifdef _KERNEL 78 #ifndef _SYS_KERNEL_H_ 79 #include <sys/kernel.h> /* ticks */ 80 #endif 81 #endif 82 #ifndef _SYS_TREE_H_ 83 #include <sys/tree.h> 84 #endif 85 #ifndef _SYS_LOCK_H_ 86 #include <sys/lock.h> 87 #endif 88 #ifndef _SYS_VKERNEL_H_ 89 #include <sys/vkernel.h> 90 #endif 91 #ifndef _VM_VM_H_ 92 #include <vm/vm.h> 93 #endif 94 #ifndef _MACHINE_PMAP_H_ 95 #include <machine/pmap.h> 96 #endif 97 #ifndef _VM_VM_OBJECT_H_ 98 #include <vm/vm_object.h> 99 #endif 100 #ifndef _SYS_NULL_H_ 101 #include <sys/_null.h> 102 #endif 103 104 struct vm_map_rb_tree; 105 RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 106 107 /* 108 * Types defined: 109 * 110 * vm_map_t the high-level address map data structure. 111 * vm_map_entry_t an entry in an address map. 112 */ 113 114 typedef u_int vm_flags_t; 115 typedef u_int vm_eflags_t; 116 117 /* 118 * A vm_map_entry may reference an object, a submap, a uksmap, or a 119 * direct user-kernel shared map. 120 */ 121 union vm_map_object { 122 struct vm_object *vm_object; /* object object */ 123 struct vm_map *sub_map; /* belongs to another map */ 124 int (*uksmap)(struct cdev *dev, vm_page_t fake); 125 void *map_object; /* generic */ 126 }; 127 128 union vm_map_aux { 129 vm_offset_t avail_ssize; /* amt can grow if this is a stack */ 130 vpte_t master_pde; /* virtual page table root */ 131 struct cdev *dev; 132 void *map_aux; 133 }; 134 135 /* 136 * vm_map_entry identifiers, used as a debugging aid 137 */ 138 typedef enum { 139 VM_SUBSYS_UNKNOWN, 140 VM_SUBSYS_KMALLOC, 141 VM_SUBSYS_STACK, 142 VM_SUBSYS_IMGACT, 143 VM_SUBSYS_EFI, 144 VM_SUBSYS_RESERVED, 145 VM_SUBSYS_INIT, 146 VM_SUBSYS_PIPE, 147 VM_SUBSYS_PROC, 148 VM_SUBSYS_SHMEM, 149 VM_SUBSYS_SYSMAP, 150 VM_SUBSYS_MMAP, 151 VM_SUBSYS_BRK, 152 VM_SUBSYS_BOGUS, 153 VM_SUBSYS_BUF, 154 VM_SUBSYS_BUFDATA, 155 VM_SUBSYS_GD, 156 VM_SUBSYS_IPIQ, 157 VM_SUBSYS_PVENTRY, 158 VM_SUBSYS_PML4, 159 VM_SUBSYS_MAPDEV, 160 VM_SUBSYS_ZALLOC, 161 162 VM_SUBSYS_DM, 163 VM_SUBSYS_CONTIG, 164 VM_SUBSYS_DRM, 165 VM_SUBSYS_DRM_GEM, 166 VM_SUBSYS_DRM_SCAT, 167 VM_SUBSYS_DRM_VMAP, 168 VM_SUBSYS_DRM_TTM, 169 VM_SUBSYS_HAMMER, 170 171 VM_SUBSYS_LIMIT /* end of list */ 172 } vm_subsys_t; 173 174 /* 175 * Address map entries consist of start and end addresses, 176 * a VM object (or sharing map) and offset into that object, 177 * and user-exported inheritance and protection information. 178 * Also included is control information for virtual copy operations. 179 * 180 * When used with MAP_STACK, avail_ssize is used to determine the 181 * limits of stack growth. 182 * 183 * When used with VM_MAPTYPE_VPAGETABLE, avail_ssize stores the 184 * page directory index. 185 */ 186 struct vm_map_entry { 187 struct vm_map_entry *prev; /* previous entry */ 188 struct vm_map_entry *next; /* next entry */ 189 RB_ENTRY(vm_map_entry) rb_entry; 190 vm_offset_t start; /* start address */ 191 vm_offset_t end; /* end address */ 192 union vm_map_aux aux; /* auxillary data */ 193 union vm_map_object object; /* object I point to */ 194 vm_ooffset_t offset; /* offset into object */ 195 vm_eflags_t eflags; /* map entry flags */ 196 vm_maptype_t maptype; /* type of VM mapping */ 197 vm_prot_t protection; /* protection code */ 198 vm_prot_t max_protection; /* maximum protection */ 199 vm_inherit_t inheritance; /* inheritance */ 200 int wired_count; /* can be paged if = 0 */ 201 vm_subsys_t id; /* subsystem id */ 202 }; 203 204 #define MAP_ENTRY_NOSYNC 0x0001 205 #define MAP_ENTRY_STACK 0x0002 206 #define MAP_ENTRY_COW 0x0004 207 #define MAP_ENTRY_NEEDS_COPY 0x0008 208 #define MAP_ENTRY_NOFAULT 0x0010 209 #define MAP_ENTRY_USER_WIRED 0x0020 210 211 #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */ 212 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */ 213 #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */ 214 #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */ 215 216 #define MAP_ENTRY_BEHAV_MASK 0x00C0 217 218 #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ 219 #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */ 220 #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ 221 #define MAP_ENTRY_KSTACK 0x0800 /* guarded kernel stack */ 222 223 /* 224 * flags for vm_map_[un]clip_range() 225 */ 226 #define MAP_CLIP_NO_HOLES 0x0001 227 228 /* 229 * This reserve count for vm_map_entry_reserve() should cover all nominal 230 * single-insertion operations, including any necessary clipping. 231 */ 232 #define MAP_RESERVE_COUNT 4 233 #define MAP_RESERVE_SLOP 64 234 #define MAP_RESERVE_HYST (MAP_RESERVE_SLOP - MAP_RESERVE_SLOP / 8) 235 236 /* 237 * vm_map_lookup wflags 238 */ 239 #define FW_WIRED 0x0001 240 #define FW_DIDCOW 0x0002 241 242 static __inline u_char 243 vm_map_entry_behavior(struct vm_map_entry *entry) 244 { 245 return entry->eflags & MAP_ENTRY_BEHAV_MASK; 246 } 247 248 static __inline void 249 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) 250 { 251 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 252 (behavior & MAP_ENTRY_BEHAV_MASK); 253 } 254 255 /* 256 * VA interlock for map (VPAGETABLE / vkernel support) 257 */ 258 struct vm_map_ilock { 259 struct vm_map_ilock *next; 260 int flags; 261 vm_offset_t ran_beg; 262 vm_offset_t ran_end; /* non-inclusive */ 263 }; 264 265 #define ILOCK_WAITING 0x00000001 266 267 /* 268 * Hinting mechanism used by vm_map_findspace() to figure out where to start 269 * an iteration looking for a hole big enough for the requested allocation. 270 * This can be important in situations where large amounts of kernel memory 271 * are being managed. For example, if the system is managing tens of 272 * thousands of processes or threads. 273 * 274 * If a hint is present it guarantees that no compatible hole exists prior 275 * to the (start) address. The (start) address itself is not necessarily 276 * a hole. 277 */ 278 #define VM_MAP_FFCOUNT 4 279 #define VM_MAP_FFMASK (VM_MAP_FFCOUNT - 1) 280 281 struct vm_map_freehint { 282 vm_offset_t start; 283 vm_offset_t length; 284 vm_offset_t align; 285 int unused01; 286 }; 287 typedef struct vm_map_freehint vm_map_freehint_t; 288 289 /* 290 * Maps are doubly-linked lists of map entries, kept sorted by address. 291 * A single hint is provided to start searches again from the last 292 * successful search, insertion, or removal. 293 * 294 * NOTE: The lock structure cannot be the first element of vm_map 295 * because this can result in a running lockup between two or more 296 * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait() 297 * and free tsleep/waking up 'map' and the underlying lockmgr also 298 * sleeping and waking up on 'map'. The lockup occurs when the map fills 299 * up. The 'exec' map, for example. 300 * 301 * NOTE: The vm_map structure can be hard-locked with the lockmgr lock 302 * or soft-serialized with the token, or both. 303 */ 304 struct vm_map { 305 struct vm_map_entry header; /* List of entries */ 306 RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root; 307 struct lock lock; /* Lock for map data */ 308 int nentries; /* Number of entries */ 309 unsigned int timestamp; /* Version number */ 310 vm_size_t size; /* virtual size */ 311 u_char system_map; /* Am I a system map? */ 312 u_char freehint_newindex; 313 u_char unused02; 314 u_char unused03; 315 vm_flags_t flags; /* flags for this vm_map */ 316 vm_map_freehint_t freehint[VM_MAP_FFCOUNT]; 317 struct pmap *pmap; /* Physical map */ 318 u_int president_cache; /* Remember president count */ 319 u_int president_ticks; /* Save ticks for cache */ 320 struct vm_map_ilock *ilock_base;/* interlocks */ 321 struct spinlock ilock_spin; /* interlocks (spinlock for) */ 322 struct lwkt_token token; /* Soft serializer */ 323 vm_offset_t pgout_offset; /* for RLIMIT_RSS scans */ 324 #define min_offset header.start 325 #define max_offset header.end 326 }; 327 328 /* 329 * vm_flags_t values 330 */ 331 #define MAP_WIREFUTURE 0x0001 /* wire all future pages */ 332 333 /* 334 * Shareable process virtual address space. 335 * 336 * Refd pointers from vmresident, proc 337 */ 338 struct vmspace { 339 struct vm_map vm_map; /* VM address map */ 340 struct pmap vm_pmap; /* private physical map */ 341 int vm_flags; 342 caddr_t vm_shm; /* SYS5 shared memory private data XXX */ 343 /* we copy from vm_startcopy to the end of the structure on fork */ 344 #define vm_startcopy vm_rssize 345 segsz_t vm_rssize; /* current resident set size in pages */ 346 segsz_t vm_swrss; /* resident set size before last swap */ 347 segsz_t vm_tsize; /* text size (pages) XXX */ 348 segsz_t vm_dsize; /* data size (pages) XXX */ 349 segsz_t vm_ssize; /* stack size (pages) */ 350 caddr_t vm_taddr; /* user virtual address of text XXX */ 351 caddr_t vm_daddr; /* user virtual address of data XXX */ 352 caddr_t vm_maxsaddr; /* user VA at max stack growth */ 353 caddr_t vm_minsaddr; /* user VA at max stack growth */ 354 #define vm_endcopy vm_unused01 355 int vm_unused01; 356 int vm_unused02; 357 int vm_pagesupply; 358 u_int vm_holdcnt; /* temporary hold count and exit sequencing */ 359 u_int vm_refcnt; /* normal ref count */ 360 }; 361 362 #define VM_REF_DELETED 0x80000000U 363 364 #define VMSPACE_EXIT1 0x0001 /* partial exit */ 365 #define VMSPACE_EXIT2 0x0002 /* full exit */ 366 367 #define VMSPACE_HOLDEXIT 0x80000000 368 369 /* 370 * Resident executable holding structure. A user program can take a snapshot 371 * of just its VM address space (typically done just after dynamic link 372 * libraries have completed loading) and register it as a resident 373 * executable associated with the program binary's vnode, which is also 374 * locked into memory. Future execs of the vnode will start with a copy 375 * of the resident vmspace instead of running the binary from scratch, 376 * avoiding both the kernel ELF loader *AND* all shared library mapping and 377 * relocation code, and will call a different entry point (the stack pointer 378 * is reset to the top of the stack) supplied when the vmspace was registered. 379 */ 380 struct vmresident { 381 struct vnode *vr_vnode; /* associated vnode */ 382 TAILQ_ENTRY(vmresident) vr_link; /* linked list of res sts */ 383 struct vmspace *vr_vmspace; /* vmspace to fork */ 384 intptr_t vr_entry_addr; /* registered entry point */ 385 struct sysentvec *vr_sysent; /* system call vects */ 386 int vr_id; /* registration id */ 387 int vr_refs; /* temporary refs */ 388 }; 389 390 #ifdef _KERNEL 391 /* 392 * Macros: vm_map_lock, etc. 393 * Function: 394 * Perform locking on the data portion of a map. Note that 395 * these macros mimic procedure calls returning void. The 396 * semicolon is supplied by the user of these macros, not 397 * by the macros themselves. The macros can safely be used 398 * as unbraced elements in a higher level statement. 399 */ 400 401 #define ASSERT_VM_MAP_LOCKED(map) KKASSERT(lockowned(&(map)->lock)) 402 403 #ifdef DIAGNOSTIC 404 /* #define MAP_LOCK_DIAGNOSTIC 1 */ 405 #ifdef MAP_LOCK_DIAGNOSTIC 406 #define vm_map_lock(map) \ 407 do { \ 408 kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \ 409 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \ 410 panic("vm_map_lock: failed to get lock"); \ 411 } \ 412 (map)->timestamp++; \ 413 } while(0) 414 #else 415 #define vm_map_lock(map) \ 416 do { \ 417 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \ 418 panic("vm_map_lock: failed to get lock"); \ 419 } \ 420 (map)->timestamp++; \ 421 } while(0) 422 #endif 423 #else 424 #define vm_map_lock(map) \ 425 do { \ 426 lockmgr(&(map)->lock, LK_EXCLUSIVE); \ 427 (map)->timestamp++; \ 428 } while(0) 429 #endif /* DIAGNOSTIC */ 430 431 #if defined(MAP_LOCK_DIAGNOSTIC) 432 #define vm_map_unlock(map) \ 433 do { \ 434 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \ 435 lockmgr(&(map)->lock, LK_RELEASE); \ 436 } while (0) 437 #define vm_map_lock_read(map) \ 438 do { \ 439 kprintf ("locking map LK_SHARED: 0x%x\n", map); \ 440 lockmgr(&(map)->lock, LK_SHARED); \ 441 } while (0) 442 #define vm_map_unlock_read(map) \ 443 do { \ 444 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \ 445 lockmgr(&(map)->lock, LK_RELEASE); \ 446 } while (0) 447 #else 448 #define vm_map_unlock(map) \ 449 lockmgr(&(map)->lock, LK_RELEASE) 450 #define vm_map_lock_read(map) \ 451 lockmgr(&(map)->lock, LK_SHARED) 452 #define vm_map_unlock_read(map) \ 453 lockmgr(&(map)->lock, LK_RELEASE) 454 #endif 455 456 #define vm_map_lock_read_try(map) \ 457 lockmgr(&(map)->lock, LK_SHARED | LK_NOWAIT) 458 459 static __inline__ int 460 vm_map_lock_read_to(vm_map_t map) 461 { 462 int error; 463 464 #if defined(MAP_LOCK_DIAGNOSTIC) 465 kprintf ("locking map LK_SHARED: 0x%x\n", map); 466 #endif 467 error = lockmgr(&(map)->lock, LK_SHARED | LK_TIMELOCK); 468 return error; 469 } 470 471 static __inline__ int 472 vm_map_lock_upgrade(vm_map_t map) { 473 int error; 474 #if defined(MAP_LOCK_DIAGNOSTIC) 475 kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map); 476 #endif 477 error = lockmgr(&map->lock, LK_EXCLUPGRADE); 478 if (error == 0) 479 map->timestamp++; 480 return error; 481 } 482 483 #if defined(MAP_LOCK_DIAGNOSTIC) 484 #define vm_map_lock_downgrade(map) \ 485 do { \ 486 kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \ 487 lockmgr(&(map)->lock, LK_DOWNGRADE); \ 488 } while (0) 489 #else 490 #define vm_map_lock_downgrade(map) \ 491 lockmgr(&(map)->lock, LK_DOWNGRADE) 492 #endif 493 494 #endif /* _KERNEL */ 495 496 /* 497 * Functions implemented as macros 498 */ 499 #define vm_map_min(map) ((map)->min_offset) 500 #define vm_map_max(map) ((map)->max_offset) 501 #define vm_map_pmap(map) ((map)->pmap) 502 503 /* 504 * Must not block 505 */ 506 static __inline struct pmap * 507 vmspace_pmap(struct vmspace *vmspace) 508 { 509 return &vmspace->vm_pmap; 510 } 511 512 /* 513 * Caller must hold the vmspace->vm_map.token 514 */ 515 static __inline long 516 vmspace_resident_count(struct vmspace *vmspace) 517 { 518 return pmap_resident_count(vmspace_pmap(vmspace)); 519 } 520 521 /* 522 * Calculates the proportional RSS and returning the 523 * accrued result. This is a loose value for statistics/display 524 * purposes only and will only be updated if we can acquire 525 * a non-blocking map lock. 526 * 527 * (used by userland or the kernel) 528 */ 529 static __inline u_int 530 vmspace_president_count(struct vmspace *vmspace) 531 { 532 vm_map_t map = &vmspace->vm_map; 533 vm_map_entry_t cur; 534 vm_object_t object; 535 u_int count = 0; 536 537 #ifdef _KERNEL 538 if (map->president_ticks == ticks / hz || vm_map_lock_read_try(map)) 539 return(map->president_cache); 540 #endif 541 542 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 543 switch(cur->maptype) { 544 case VM_MAPTYPE_NORMAL: 545 case VM_MAPTYPE_VPAGETABLE: 546 if ((object = cur->object.vm_object) == NULL) 547 break; 548 if (object->type != OBJT_DEFAULT && 549 object->type != OBJT_SWAP) { 550 break; 551 } 552 553 #if 0 554 /* 555 * synchronize non-zero case, contents of field 556 * can change at any time due to pmap ops. 557 */ 558 if ((n = object->agg_pv_list_count) != 0) { 559 #ifdef _KERNEL 560 cpu_ccfence(); 561 #endif 562 count += object->resident_page_count / n; 563 } 564 #endif 565 break; 566 default: 567 break; 568 } 569 } 570 #ifdef _KERNEL 571 map->president_cache = count; 572 map->president_ticks = ticks / hz; 573 vm_map_unlock_read(map); 574 #endif 575 576 return(count); 577 } 578 579 /* 580 * Number of kernel maps and entries to statically allocate, required 581 * during boot to bootstrap the VM system. 582 */ 583 #define MAX_KMAP 10 584 #define MAX_MAPENT (SMP_MAXCPU * 32 + 1024) 585 586 /* 587 * Copy-on-write flags for vm_map operations 588 */ 589 #define MAP_UNUSED_01 0x0001 590 #define MAP_COPY_ON_WRITE 0x0002 591 #define MAP_NOFAULT 0x0004 592 #define MAP_PREFAULT 0x0008 593 #define MAP_PREFAULT_PARTIAL 0x0010 594 #define MAP_DISABLE_SYNCER 0x0020 595 #define MAP_IS_STACK 0x0040 596 #define MAP_IS_KSTACK 0x0080 597 #define MAP_DISABLE_COREDUMP 0x0100 598 #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ 599 #define MAP_PREFAULT_RELOCK 0x0200 600 601 /* 602 * vm_fault option flags 603 */ 604 #define VM_FAULT_NORMAL 0x00 /* Nothing special */ 605 #define VM_FAULT_CHANGE_WIRING 0x01 /* Change the wiring as appropriate */ 606 #define VM_FAULT_USER_WIRE 0x02 /* Likewise, but for user purposes */ 607 #define VM_FAULT_BURST 0x04 /* Burst fault can be done */ 608 #define VM_FAULT_DIRTY 0x08 /* Dirty the page */ 609 #define VM_FAULT_UNSWAP 0x10 /* Remove backing store from the page */ 610 #define VM_FAULT_BURST_QUICK 0x20 /* Special case shared vm_object */ 611 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE) 612 #define VM_FAULT_USERMODE 0x40 613 614 #ifdef _KERNEL 615 616 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, 617 vm_prot_t, boolean_t); 618 struct pmap; 619 struct globaldata; 620 void vm_map_entry_allocate_object(vm_map_entry_t); 621 void vm_map_entry_reserve_cpu_init(struct globaldata *gd); 622 int vm_map_entry_reserve(int); 623 int vm_map_entry_kreserve(int); 624 void vm_map_entry_release(int); 625 void vm_map_entry_krelease(int); 626 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *); 627 int vm_map_find (vm_map_t, void *, void *, 628 vm_ooffset_t, vm_offset_t *, vm_size_t, 629 vm_size_t, boolean_t, 630 vm_maptype_t, vm_subsys_t id, 631 vm_prot_t, vm_prot_t, int); 632 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t, 633 int, vm_offset_t *); 634 vm_offset_t vm_map_hint(struct proc *, vm_offset_t, vm_prot_t); 635 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); 636 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t); 637 int vm_map_insert (vm_map_t, int *, void *, void *, 638 vm_ooffset_t, vm_offset_t, vm_offset_t, 639 vm_maptype_t, vm_subsys_t id, 640 vm_prot_t, vm_prot_t, int); 641 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, 642 vm_map_entry_t *, vm_object_t *, 643 vm_pindex_t *, vm_prot_t *, int *); 644 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int); 645 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); 646 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int); 647 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t); 648 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); 649 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); 650 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); 651 void vm_map_startup (void); 652 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); 653 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t); 654 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *); 655 void vm_init2 (void); 656 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *); 657 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, int, 658 vm_prot_t, vm_prot_t, int); 659 int vm_map_growstack (vm_map_t map, vm_offset_t addr); 660 vm_offset_t vmspace_swap_count (struct vmspace *vmspace); 661 vm_offset_t vmspace_anonymous_count (struct vmspace *vmspace); 662 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *); 663 void vm_map_transition_wait(vm_map_t map, int relock); 664 665 void vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock, 666 vm_offset_t ran_beg, vm_offset_t ran_end); 667 void vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock); 668 669 670 #if defined(__x86_64__) && defined(_KERNEL_VIRTUAL) 671 int vkernel_module_memory_alloc(vm_offset_t *, size_t); 672 void vkernel_module_memory_free(vm_offset_t, size_t); 673 #endif 674 675 #endif 676 #endif /* _VM_VM_MAP_H_ */ 677