1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vm_map.h 8.9 (Berkeley) 5/17/95 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 * 60 * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $ 61 */ 62 63 /* 64 * Virtual memory map module definitions. 65 */ 66 67 #ifndef _VM_VM_MAP_H_ 68 #define _VM_VM_MAP_H_ 69 70 #ifndef _SYS_TYPES_H_ 71 #include <sys/types.h> 72 #endif 73 #ifdef _KERNEL 74 #ifndef _SYS_KERNEL_H_ 75 #include <sys/kernel.h> /* ticks */ 76 #endif 77 #endif 78 #ifndef _SYS_TREE_H_ 79 #include <sys/tree.h> 80 #endif 81 #ifndef _SYS_SYSREF_H_ 82 #include <sys/sysref.h> 83 #endif 84 #ifndef _SYS_LOCK_H_ 85 #include <sys/lock.h> 86 #endif 87 #ifndef _SYS_VKERNEL_H_ 88 #include <sys/vkernel.h> 89 #endif 90 #ifndef _VM_VM_H_ 91 #include <vm/vm.h> 92 #endif 93 #ifndef _MACHINE_PMAP_H_ 94 #include <machine/pmap.h> 95 #endif 96 #ifndef _VM_VM_OBJECT_H_ 97 #include <vm/vm_object.h> 98 #endif 99 #ifndef _SYS_NULL_H_ 100 #include <sys/_null.h> 101 #endif 102 103 struct vm_map_rb_tree; 104 RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 105 106 /* 107 * Types defined: 108 * 109 * vm_map_t the high-level address map data structure. 110 * vm_map_entry_t an entry in an address map. 111 */ 112 113 typedef u_int vm_flags_t; 114 typedef u_int vm_eflags_t; 115 116 /* 117 * A vm_map_entry may reference an object, a submap, a uksmap, or a 118 * direct user-kernel shared map. 119 */ 120 union vm_map_object { 121 struct vm_object *vm_object; /* object object */ 122 struct vm_map *sub_map; /* belongs to another map */ 123 int (*uksmap)(struct cdev *dev, vm_page_t fake); 124 void *map_object; /* generic */ 125 }; 126 127 union vm_map_aux { 128 vm_offset_t avail_ssize; /* amt can grow if this is a stack */ 129 vpte_t master_pde; /* virtual page table root */ 130 struct cdev *dev; 131 void *map_aux; 132 }; 133 134 /* 135 * vm_map_entry identifiers, used as a debugging aid 136 */ 137 typedef enum { 138 VM_SUBSYS_UNKNOWN, 139 VM_SUBSYS_KMALLOC, 140 VM_SUBSYS_STACK, 141 VM_SUBSYS_IMGACT, 142 VM_SUBSYS_EFI, 143 VM_SUBSYS_RESERVED, 144 VM_SUBSYS_INIT, 145 VM_SUBSYS_PIPE, 146 VM_SUBSYS_PROC, 147 VM_SUBSYS_SHMEM, 148 VM_SUBSYS_SYSMAP, 149 VM_SUBSYS_MMAP, 150 VM_SUBSYS_BRK, 151 VM_SUBSYS_BOGUS, 152 VM_SUBSYS_BUF, 153 VM_SUBSYS_BUFDATA, 154 VM_SUBSYS_GD, 155 VM_SUBSYS_IPIQ, 156 VM_SUBSYS_PVENTRY, 157 VM_SUBSYS_PML4, 158 VM_SUBSYS_MAPDEV, 159 VM_SUBSYS_ZALLOC, 160 161 VM_SUBSYS_DM, 162 VM_SUBSYS_CONTIG, 163 VM_SUBSYS_DRM, 164 VM_SUBSYS_DRM_GEM, 165 VM_SUBSYS_DRM_SCAT, 166 VM_SUBSYS_DRM_VMAP, 167 VM_SUBSYS_DRM_TTM, 168 VM_SUBSYS_HAMMER, 169 170 VM_SUBSYS_LIMIT /* end of list */ 171 } vm_subsys_t; 172 173 /* 174 * Address map entries consist of start and end addresses, 175 * a VM object (or sharing map) and offset into that object, 176 * and user-exported inheritance and protection information. 177 * Also included is control information for virtual copy operations. 178 * 179 * When used with MAP_STACK, avail_ssize is used to determine the 180 * limits of stack growth. 181 * 182 * When used with VM_MAPTYPE_VPAGETABLE, avail_ssize stores the 183 * page directory index. 184 */ 185 struct vm_map_entry { 186 struct vm_map_entry *prev; /* previous entry */ 187 struct vm_map_entry *next; /* next entry */ 188 RB_ENTRY(vm_map_entry) rb_entry; 189 vm_offset_t start; /* start address */ 190 vm_offset_t end; /* end address */ 191 union vm_map_aux aux; /* auxillary data */ 192 union vm_map_object object; /* object I point to */ 193 vm_ooffset_t offset; /* offset into object */ 194 vm_eflags_t eflags; /* map entry flags */ 195 vm_maptype_t maptype; /* type of VM mapping */ 196 vm_prot_t protection; /* protection code */ 197 vm_prot_t max_protection; /* maximum protection */ 198 vm_inherit_t inheritance; /* inheritance */ 199 int wired_count; /* can be paged if = 0 */ 200 vm_subsys_t id; /* subsystem id */ 201 }; 202 203 #define MAP_ENTRY_NOSYNC 0x0001 204 #define MAP_ENTRY_STACK 0x0002 205 #define MAP_ENTRY_COW 0x0004 206 #define MAP_ENTRY_NEEDS_COPY 0x0008 207 #define MAP_ENTRY_NOFAULT 0x0010 208 #define MAP_ENTRY_USER_WIRED 0x0020 209 210 #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */ 211 #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */ 212 #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */ 213 #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */ 214 215 #define MAP_ENTRY_BEHAV_MASK 0x00C0 216 217 #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ 218 #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiter's in transition */ 219 #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ 220 #define MAP_ENTRY_KSTACK 0x0800 /* guarded kernel stack */ 221 222 /* 223 * flags for vm_map_[un]clip_range() 224 */ 225 #define MAP_CLIP_NO_HOLES 0x0001 226 227 /* 228 * This reserve count for vm_map_entry_reserve() should cover all nominal 229 * single-insertion operations, including any necessary clipping. 230 */ 231 #define MAP_RESERVE_COUNT 4 232 #define MAP_RESERVE_SLOP 32 233 234 static __inline u_char 235 vm_map_entry_behavior(struct vm_map_entry *entry) 236 { 237 return entry->eflags & MAP_ENTRY_BEHAV_MASK; 238 } 239 240 static __inline void 241 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior) 242 { 243 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 244 (behavior & MAP_ENTRY_BEHAV_MASK); 245 } 246 247 /* 248 * VA interlock for map (VPAGETABLE / vkernel support) 249 */ 250 struct vm_map_ilock { 251 struct vm_map_ilock *next; 252 int flags; 253 vm_offset_t ran_beg; 254 vm_offset_t ran_end; /* non-inclusive */ 255 }; 256 257 #define ILOCK_WAITING 0x00000001 258 259 /* 260 * Maps are doubly-linked lists of map entries, kept sorted by address. 261 * A single hint is provided to start searches again from the last 262 * successful search, insertion, or removal. 263 * 264 * NOTE: The lock structure cannot be the first element of vm_map 265 * because this can result in a running lockup between two or more 266 * system processes trying to kmem_alloc_wait() due to kmem_alloc_wait() 267 * and free tsleep/waking up 'map' and the underlying lockmgr also 268 * sleeping and waking up on 'map'. The lockup occurs when the map fills 269 * up. The 'exec' map, for example. 270 * 271 * NOTE: The vm_map structure can be hard-locked with the lockmgr lock 272 * or soft-serialized with the token, or both. 273 */ 274 struct vm_map { 275 struct vm_map_entry header; /* List of entries */ 276 RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root; 277 struct lock lock; /* Lock for map data */ 278 int nentries; /* Number of entries */ 279 vm_size_t size; /* virtual size */ 280 u_char system_map; /* Am I a system map? */ 281 vm_map_entry_t hint; /* hint for quick lookups */ 282 unsigned int timestamp; /* Version number */ 283 vm_map_entry_t first_free; /* First free space hint */ 284 vm_flags_t flags; /* flags for this vm_map */ 285 struct pmap *pmap; /* Physical map */ 286 u_int president_cache; /* Remember president count */ 287 u_int president_ticks; /* Save ticks for cache */ 288 struct vm_map_ilock *ilock_base;/* interlocks */ 289 struct spinlock ilock_spin; /* interlocks (spinlock for) */ 290 struct lwkt_token token; /* Soft serializer */ 291 vm_offset_t pgout_offset; /* for RLIMIT_RSS scans */ 292 #define min_offset header.start 293 #define max_offset header.end 294 }; 295 296 /* 297 * vm_flags_t values 298 */ 299 #define MAP_WIREFUTURE 0x0001 /* wire all future pages */ 300 301 /* 302 * Shareable process virtual address space. 303 * 304 * Refd pointers from vmresident, proc 305 */ 306 struct vmspace { 307 struct vm_map vm_map; /* VM address map */ 308 struct pmap vm_pmap; /* private physical map */ 309 int vm_flags; 310 caddr_t vm_shm; /* SYS5 shared memory private data XXX */ 311 /* we copy from vm_startcopy to the end of the structure on fork */ 312 #define vm_startcopy vm_rssize 313 segsz_t vm_rssize; /* current resident set size in pages */ 314 segsz_t vm_swrss; /* resident set size before last swap */ 315 segsz_t vm_tsize; /* text size (pages) XXX */ 316 segsz_t vm_dsize; /* data size (pages) XXX */ 317 segsz_t vm_ssize; /* stack size (pages) */ 318 caddr_t vm_taddr; /* user virtual address of text XXX */ 319 caddr_t vm_daddr; /* user virtual address of data XXX */ 320 caddr_t vm_maxsaddr; /* user VA at max stack growth */ 321 caddr_t vm_minsaddr; /* user VA at max stack growth */ 322 #define vm_endcopy vm_unused01 323 int vm_unused01; 324 int vm_unused02; 325 int vm_pagesupply; 326 u_int vm_holdcnt; /* temporary hold count and exit sequencing */ 327 u_int vm_refcnt; /* normal ref count */ 328 }; 329 330 #define VM_REF_DELETED 0x80000000U 331 332 #define VMSPACE_EXIT1 0x0001 /* partial exit */ 333 #define VMSPACE_EXIT2 0x0002 /* full exit */ 334 335 #define VMSPACE_HOLDEXIT 0x80000000 336 337 /* 338 * Resident executable holding structure. A user program can take a snapshot 339 * of just its VM address space (typically done just after dynamic link 340 * libraries have completed loading) and register it as a resident 341 * executable associated with the program binary's vnode, which is also 342 * locked into memory. Future execs of the vnode will start with a copy 343 * of the resident vmspace instead of running the binary from scratch, 344 * avoiding both the kernel ELF loader *AND* all shared library mapping and 345 * relocation code, and will call a different entry point (the stack pointer 346 * is reset to the top of the stack) supplied when the vmspace was registered. 347 */ 348 struct vmresident { 349 struct vnode *vr_vnode; /* associated vnode */ 350 TAILQ_ENTRY(vmresident) vr_link; /* linked list of res sts */ 351 struct vmspace *vr_vmspace; /* vmspace to fork */ 352 intptr_t vr_entry_addr; /* registered entry point */ 353 struct sysentvec *vr_sysent; /* system call vects */ 354 int vr_id; /* registration id */ 355 int vr_refs; /* temporary refs */ 356 }; 357 358 #ifdef _KERNEL 359 /* 360 * Macros: vm_map_lock, etc. 361 * Function: 362 * Perform locking on the data portion of a map. Note that 363 * these macros mimic procedure calls returning void. The 364 * semicolon is supplied by the user of these macros, not 365 * by the macros themselves. The macros can safely be used 366 * as unbraced elements in a higher level statement. 367 */ 368 369 #define ASSERT_VM_MAP_LOCKED(map) KKASSERT(lockowned(&(map)->lock)) 370 371 #ifdef DIAGNOSTIC 372 /* #define MAP_LOCK_DIAGNOSTIC 1 */ 373 #ifdef MAP_LOCK_DIAGNOSTIC 374 #define vm_map_lock(map) \ 375 do { \ 376 kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \ 377 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \ 378 panic("vm_map_lock: failed to get lock"); \ 379 } \ 380 (map)->timestamp++; \ 381 } while(0) 382 #else 383 #define vm_map_lock(map) \ 384 do { \ 385 if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \ 386 panic("vm_map_lock: failed to get lock"); \ 387 } \ 388 (map)->timestamp++; \ 389 } while(0) 390 #endif 391 #else 392 #define vm_map_lock(map) \ 393 do { \ 394 lockmgr(&(map)->lock, LK_EXCLUSIVE); \ 395 (map)->timestamp++; \ 396 } while(0) 397 #endif /* DIAGNOSTIC */ 398 399 #if defined(MAP_LOCK_DIAGNOSTIC) 400 #define vm_map_unlock(map) \ 401 do { \ 402 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \ 403 lockmgr(&(map)->lock, LK_RELEASE); \ 404 } while (0) 405 #define vm_map_lock_read(map) \ 406 do { \ 407 kprintf ("locking map LK_SHARED: 0x%x\n", map); \ 408 lockmgr(&(map)->lock, LK_SHARED); \ 409 } while (0) 410 #define vm_map_unlock_read(map) \ 411 do { \ 412 kprintf ("locking map LK_RELEASE: 0x%x\n", map); \ 413 lockmgr(&(map)->lock, LK_RELEASE); \ 414 } while (0) 415 #else 416 #define vm_map_unlock(map) \ 417 lockmgr(&(map)->lock, LK_RELEASE) 418 #define vm_map_lock_read(map) \ 419 lockmgr(&(map)->lock, LK_SHARED) 420 #define vm_map_unlock_read(map) \ 421 lockmgr(&(map)->lock, LK_RELEASE) 422 #endif 423 424 #define vm_map_lock_read_try(map) \ 425 lockmgr(&(map)->lock, LK_SHARED | LK_NOWAIT) 426 427 static __inline__ int 428 vm_map_lock_read_to(vm_map_t map) 429 { 430 int error; 431 432 #if defined(MAP_LOCK_DIAGNOSTIC) 433 kprintf ("locking map LK_SHARED: 0x%x\n", map); 434 #endif 435 error = lockmgr(&(map)->lock, LK_SHARED | LK_TIMELOCK); 436 return error; 437 } 438 439 static __inline__ int 440 vm_map_lock_upgrade(vm_map_t map) { 441 int error; 442 #if defined(MAP_LOCK_DIAGNOSTIC) 443 kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map); 444 #endif 445 error = lockmgr(&map->lock, LK_EXCLUPGRADE); 446 if (error == 0) 447 map->timestamp++; 448 return error; 449 } 450 451 #if defined(MAP_LOCK_DIAGNOSTIC) 452 #define vm_map_lock_downgrade(map) \ 453 do { \ 454 kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \ 455 lockmgr(&(map)->lock, LK_DOWNGRADE); \ 456 } while (0) 457 #else 458 #define vm_map_lock_downgrade(map) \ 459 lockmgr(&(map)->lock, LK_DOWNGRADE) 460 #endif 461 462 #endif /* _KERNEL */ 463 464 /* 465 * Functions implemented as macros 466 */ 467 #define vm_map_min(map) ((map)->min_offset) 468 #define vm_map_max(map) ((map)->max_offset) 469 #define vm_map_pmap(map) ((map)->pmap) 470 471 /* 472 * Must not block 473 */ 474 static __inline struct pmap * 475 vmspace_pmap(struct vmspace *vmspace) 476 { 477 return &vmspace->vm_pmap; 478 } 479 480 /* 481 * Caller must hold the vmspace->vm_map.token 482 */ 483 static __inline long 484 vmspace_resident_count(struct vmspace *vmspace) 485 { 486 return pmap_resident_count(vmspace_pmap(vmspace)); 487 } 488 489 /* 490 * Calculates the proportional RSS and returning the 491 * accrued result. This is a loose value for statistics/display 492 * purposes only and will only be updated if we can acquire 493 * a non-blocking map lock. 494 * 495 * (used by userland or the kernel) 496 */ 497 static __inline u_int 498 vmspace_president_count(struct vmspace *vmspace) 499 { 500 vm_map_t map = &vmspace->vm_map; 501 vm_map_entry_t cur; 502 vm_object_t object; 503 u_int count = 0; 504 505 #ifdef _KERNEL 506 if (map->president_ticks == ticks / hz || vm_map_lock_read_try(map)) 507 return(map->president_cache); 508 #endif 509 510 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 511 switch(cur->maptype) { 512 case VM_MAPTYPE_NORMAL: 513 case VM_MAPTYPE_VPAGETABLE: 514 if ((object = cur->object.vm_object) == NULL) 515 break; 516 if (object->type != OBJT_DEFAULT && 517 object->type != OBJT_SWAP) { 518 break; 519 } 520 521 #if 0 522 /* 523 * synchronize non-zero case, contents of field 524 * can change at any time due to pmap ops. 525 */ 526 if ((n = object->agg_pv_list_count) != 0) { 527 #ifdef _KERNEL 528 cpu_ccfence(); 529 #endif 530 count += object->resident_page_count / n; 531 } 532 #endif 533 break; 534 default: 535 break; 536 } 537 } 538 #ifdef _KERNEL 539 map->president_cache = count; 540 map->president_ticks = ticks / hz; 541 vm_map_unlock_read(map); 542 #endif 543 544 return(count); 545 } 546 547 /* 548 * Number of kernel maps and entries to statically allocate, required 549 * during boot to bootstrap the VM system. 550 */ 551 #define MAX_KMAP 10 552 #define MAX_MAPENT (SMP_MAXCPU * 32 + 1024) 553 554 /* 555 * Copy-on-write flags for vm_map operations 556 */ 557 #define MAP_UNUSED_01 0x0001 558 #define MAP_COPY_ON_WRITE 0x0002 559 #define MAP_NOFAULT 0x0004 560 #define MAP_PREFAULT 0x0008 561 #define MAP_PREFAULT_PARTIAL 0x0010 562 #define MAP_DISABLE_SYNCER 0x0020 563 #define MAP_IS_STACK 0x0040 564 #define MAP_IS_KSTACK 0x0080 565 #define MAP_DISABLE_COREDUMP 0x0100 566 #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ 567 #define MAP_PREFAULT_RELOCK 0x0200 568 569 /* 570 * vm_fault option flags 571 */ 572 #define VM_FAULT_NORMAL 0x00 /* Nothing special */ 573 #define VM_FAULT_CHANGE_WIRING 0x01 /* Change the wiring as appropriate */ 574 #define VM_FAULT_USER_WIRE 0x02 /* Likewise, but for user purposes */ 575 #define VM_FAULT_BURST 0x04 /* Burst fault can be done */ 576 #define VM_FAULT_DIRTY 0x08 /* Dirty the page */ 577 #define VM_FAULT_UNSWAP 0x10 /* Remove backing store from the page */ 578 #define VM_FAULT_BURST_QUICK 0x20 /* Special case shared vm_object */ 579 #define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE) 580 #define VM_FAULT_USERMODE 0x40 581 582 #ifdef _KERNEL 583 584 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, 585 vm_prot_t, boolean_t); 586 struct pmap; 587 struct globaldata; 588 void vm_map_entry_allocate_object(vm_map_entry_t); 589 void vm_map_entry_reserve_cpu_init(struct globaldata *gd); 590 int vm_map_entry_reserve(int); 591 int vm_map_entry_kreserve(int); 592 void vm_map_entry_release(int); 593 void vm_map_entry_krelease(int); 594 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *); 595 int vm_map_find (vm_map_t, void *, void *, 596 vm_ooffset_t, vm_offset_t *, vm_size_t, 597 vm_size_t, boolean_t, 598 vm_maptype_t, vm_subsys_t id, 599 vm_prot_t, vm_prot_t, int); 600 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t, 601 int, vm_offset_t *); 602 vm_offset_t vm_map_hint(struct proc *, vm_offset_t, vm_prot_t); 603 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); 604 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t); 605 int vm_map_insert (vm_map_t, int *, void *, void *, 606 vm_ooffset_t, vm_offset_t, vm_offset_t, 607 vm_maptype_t, vm_subsys_t id, 608 vm_prot_t, vm_prot_t, int); 609 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, 610 vm_pindex_t *, vm_prot_t *, boolean_t *); 611 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int); 612 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); 613 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int); 614 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t); 615 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); 616 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); 617 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); 618 void vm_map_startup (void); 619 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); 620 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t); 621 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *); 622 void vm_init2 (void); 623 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *); 624 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, int, 625 vm_prot_t, vm_prot_t, int); 626 int vm_map_growstack (vm_map_t map, vm_offset_t addr); 627 vm_offset_t vmspace_swap_count (struct vmspace *vmspace); 628 vm_offset_t vmspace_anonymous_count (struct vmspace *vmspace); 629 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *); 630 void vm_map_transition_wait(vm_map_t map); 631 632 void vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock, 633 vm_offset_t ran_beg, vm_offset_t ran_end); 634 void vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock); 635 636 637 #if defined(__x86_64__) && defined(_KERNEL_VIRTUAL) 638 int vkernel_module_memory_alloc(vm_offset_t *, size_t); 639 void vkernel_module_memory_free(vm_offset_t, size_t); 640 #endif 641 642 #endif 643 #endif /* _VM_VM_MAP_H_ */ 644