1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory mapping module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/elf.h> 73 #include <sys/kernel.h> 74 #include <sys/ktr.h> 75 #include <sys/lock.h> 76 #include <sys/mutex.h> 77 #include <sys/proc.h> 78 #include <sys/vmmeter.h> 79 #include <sys/mman.h> 80 #include <sys/vnode.h> 81 #include <sys/racct.h> 82 #include <sys/resourcevar.h> 83 #include <sys/rwlock.h> 84 #include <sys/file.h> 85 #include <sys/sysctl.h> 86 #include <sys/sysent.h> 87 #include <sys/shm.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_param.h> 91 #include <vm/pmap.h> 92 #include <vm/vm_map.h> 93 #include <vm/vm_page.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/vm_object.h> 96 #include <vm/vm_pager.h> 97 #include <vm/vm_kern.h> 98 #include <vm/vm_extern.h> 99 #include <vm/vnode_pager.h> 100 #include <vm/swap_pager.h> 101 #include <vm/uma.h> 102 103 /* 104 * Virtual memory maps provide for the mapping, protection, 105 * and sharing of virtual memory objects. In addition, 106 * this module provides for an efficient virtual copy of 107 * memory from one map to another. 108 * 109 * Synchronization is required prior to most operations. 110 * 111 * Maps consist of an ordered doubly-linked list of simple 112 * entries; a self-adjusting binary search tree of these 113 * entries is used to speed up lookups. 114 * 115 * Since portions of maps are specified by start/end addresses, 116 * which may not align with existing map entries, all 117 * routines merely "clip" entries to these start/end values. 118 * [That is, an entry is split into two, bordering at a 119 * start or end value.] Note that these clippings may not 120 * always be necessary (as the two resulting entries are then 121 * not changed); however, the clipping is done for convenience. 122 * 123 * As mentioned above, virtual copy operations are performed 124 * by copying VM object references from one map to 125 * another, and then marking both regions as copy-on-write. 126 */ 127 128 static struct mtx map_sleep_mtx; 129 static uma_zone_t mapentzone; 130 static uma_zone_t kmapentzone; 131 static uma_zone_t mapzone; 132 static uma_zone_t vmspace_zone; 133 static int vmspace_zinit(void *mem, int size, int flags); 134 static int vm_map_zinit(void *mem, int ize, int flags); 135 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, 136 vm_offset_t max); 137 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); 138 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); 139 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); 140 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, 141 vm_map_entry_t gap_entry); 142 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 143 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); 144 #ifdef INVARIANTS 145 static void vm_map_zdtor(void *mem, int size, void *arg); 146 static void vmspace_zdtor(void *mem, int size, void *arg); 147 #endif 148 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, 149 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, 150 int cow); 151 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 152 vm_offset_t failed_addr); 153 154 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ 155 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ 156 !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) 157 158 /* 159 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type 160 * stable. 161 */ 162 #define PROC_VMSPACE_LOCK(p) do { } while (0) 163 #define PROC_VMSPACE_UNLOCK(p) do { } while (0) 164 165 /* 166 * VM_MAP_RANGE_CHECK: [ internal use only ] 167 * 168 * Asserts that the starting and ending region 169 * addresses fall within the valid range of the map. 170 */ 171 #define VM_MAP_RANGE_CHECK(map, start, end) \ 172 { \ 173 if (start < vm_map_min(map)) \ 174 start = vm_map_min(map); \ 175 if (end > vm_map_max(map)) \ 176 end = vm_map_max(map); \ 177 if (start > end) \ 178 start = end; \ 179 } 180 181 /* 182 * vm_map_startup: 183 * 184 * Initialize the vm_map module. Must be called before 185 * any other vm_map routines. 186 * 187 * Map and entry structures are allocated from the general 188 * purpose memory pool with some exceptions: 189 * 190 * - The kernel map and kmem submap are allocated statically. 191 * - Kernel map entries are allocated out of a static pool. 192 * 193 * These restrictions are necessary since malloc() uses the 194 * maps and requires map entries. 195 */ 196 197 void 198 vm_map_startup(void) 199 { 200 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); 201 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, 202 #ifdef INVARIANTS 203 vm_map_zdtor, 204 #else 205 NULL, 206 #endif 207 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 208 uma_prealloc(mapzone, MAX_KMAP); 209 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 210 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 211 UMA_ZONE_MTXCLASS | UMA_ZONE_VM); 212 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 213 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 214 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, 215 #ifdef INVARIANTS 216 vmspace_zdtor, 217 #else 218 NULL, 219 #endif 220 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 221 } 222 223 static int 224 vmspace_zinit(void *mem, int size, int flags) 225 { 226 struct vmspace *vm; 227 228 vm = (struct vmspace *)mem; 229 230 vm->vm_map.pmap = NULL; 231 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); 232 PMAP_LOCK_INIT(vmspace_pmap(vm)); 233 return (0); 234 } 235 236 static int 237 vm_map_zinit(void *mem, int size, int flags) 238 { 239 vm_map_t map; 240 241 map = (vm_map_t)mem; 242 memset(map, 0, sizeof(*map)); 243 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); 244 sx_init(&map->lock, "vm map (user)"); 245 return (0); 246 } 247 248 #ifdef INVARIANTS 249 static void 250 vmspace_zdtor(void *mem, int size, void *arg) 251 { 252 struct vmspace *vm; 253 254 vm = (struct vmspace *)mem; 255 256 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); 257 } 258 static void 259 vm_map_zdtor(void *mem, int size, void *arg) 260 { 261 vm_map_t map; 262 263 map = (vm_map_t)mem; 264 KASSERT(map->nentries == 0, 265 ("map %p nentries == %d on free.", 266 map, map->nentries)); 267 KASSERT(map->size == 0, 268 ("map %p size == %lu on free.", 269 map, (unsigned long)map->size)); 270 } 271 #endif /* INVARIANTS */ 272 273 /* 274 * Allocate a vmspace structure, including a vm_map and pmap, 275 * and initialize those structures. The refcnt is set to 1. 276 * 277 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit(). 278 */ 279 struct vmspace * 280 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit) 281 { 282 struct vmspace *vm; 283 284 vm = uma_zalloc(vmspace_zone, M_WAITOK); 285 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL")); 286 if (!pinit(vmspace_pmap(vm))) { 287 uma_zfree(vmspace_zone, vm); 288 return (NULL); 289 } 290 CTR1(KTR_VM, "vmspace_alloc: %p", vm); 291 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); 292 vm->vm_refcnt = 1; 293 vm->vm_shm = NULL; 294 vm->vm_swrss = 0; 295 vm->vm_tsize = 0; 296 vm->vm_dsize = 0; 297 vm->vm_ssize = 0; 298 vm->vm_taddr = 0; 299 vm->vm_daddr = 0; 300 vm->vm_maxsaddr = 0; 301 return (vm); 302 } 303 304 #ifdef RACCT 305 static void 306 vmspace_container_reset(struct proc *p) 307 { 308 309 PROC_LOCK(p); 310 racct_set(p, RACCT_DATA, 0); 311 racct_set(p, RACCT_STACK, 0); 312 racct_set(p, RACCT_RSS, 0); 313 racct_set(p, RACCT_MEMLOCK, 0); 314 racct_set(p, RACCT_VMEM, 0); 315 PROC_UNLOCK(p); 316 } 317 #endif 318 319 static inline void 320 vmspace_dofree(struct vmspace *vm) 321 { 322 323 CTR1(KTR_VM, "vmspace_free: %p", vm); 324 325 /* 326 * Make sure any SysV shm is freed, it might not have been in 327 * exit1(). 328 */ 329 shmexit(vm); 330 331 /* 332 * Lock the map, to wait out all other references to it. 333 * Delete all of the mappings and pages they hold, then call 334 * the pmap module to reclaim anything left. 335 */ 336 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), 337 vm_map_max(&vm->vm_map)); 338 339 pmap_release(vmspace_pmap(vm)); 340 vm->vm_map.pmap = NULL; 341 uma_zfree(vmspace_zone, vm); 342 } 343 344 void 345 vmspace_free(struct vmspace *vm) 346 { 347 348 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 349 "vmspace_free() called"); 350 351 if (vm->vm_refcnt == 0) 352 panic("vmspace_free: attempt to free already freed vmspace"); 353 354 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) 355 vmspace_dofree(vm); 356 } 357 358 void 359 vmspace_exitfree(struct proc *p) 360 { 361 struct vmspace *vm; 362 363 PROC_VMSPACE_LOCK(p); 364 vm = p->p_vmspace; 365 p->p_vmspace = NULL; 366 PROC_VMSPACE_UNLOCK(p); 367 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); 368 vmspace_free(vm); 369 } 370 371 void 372 vmspace_exit(struct thread *td) 373 { 374 int refcnt; 375 struct vmspace *vm; 376 struct proc *p; 377 378 /* 379 * Release user portion of address space. 380 * This releases references to vnodes, 381 * which could cause I/O if the file has been unlinked. 382 * Need to do this early enough that we can still sleep. 383 * 384 * The last exiting process to reach this point releases as 385 * much of the environment as it can. vmspace_dofree() is the 386 * slower fallback in case another process had a temporary 387 * reference to the vmspace. 388 */ 389 390 p = td->td_proc; 391 vm = p->p_vmspace; 392 atomic_add_int(&vmspace0.vm_refcnt, 1); 393 refcnt = vm->vm_refcnt; 394 do { 395 if (refcnt > 1 && p->p_vmspace != &vmspace0) { 396 /* Switch now since other proc might free vmspace */ 397 PROC_VMSPACE_LOCK(p); 398 p->p_vmspace = &vmspace0; 399 PROC_VMSPACE_UNLOCK(p); 400 pmap_activate(td); 401 } 402 } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1)); 403 if (refcnt == 1) { 404 if (p->p_vmspace != vm) { 405 /* vmspace not yet freed, switch back */ 406 PROC_VMSPACE_LOCK(p); 407 p->p_vmspace = vm; 408 PROC_VMSPACE_UNLOCK(p); 409 pmap_activate(td); 410 } 411 pmap_remove_pages(vmspace_pmap(vm)); 412 /* Switch now since this proc will free vmspace */ 413 PROC_VMSPACE_LOCK(p); 414 p->p_vmspace = &vmspace0; 415 PROC_VMSPACE_UNLOCK(p); 416 pmap_activate(td); 417 vmspace_dofree(vm); 418 } 419 #ifdef RACCT 420 if (racct_enable) 421 vmspace_container_reset(p); 422 #endif 423 } 424 425 /* Acquire reference to vmspace owned by another process. */ 426 427 struct vmspace * 428 vmspace_acquire_ref(struct proc *p) 429 { 430 struct vmspace *vm; 431 int refcnt; 432 433 PROC_VMSPACE_LOCK(p); 434 vm = p->p_vmspace; 435 if (vm == NULL) { 436 PROC_VMSPACE_UNLOCK(p); 437 return (NULL); 438 } 439 refcnt = vm->vm_refcnt; 440 do { 441 if (refcnt <= 0) { /* Avoid 0->1 transition */ 442 PROC_VMSPACE_UNLOCK(p); 443 return (NULL); 444 } 445 } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1)); 446 if (vm != p->p_vmspace) { 447 PROC_VMSPACE_UNLOCK(p); 448 vmspace_free(vm); 449 return (NULL); 450 } 451 PROC_VMSPACE_UNLOCK(p); 452 return (vm); 453 } 454 455 /* 456 * Switch between vmspaces in an AIO kernel process. 457 * 458 * The new vmspace is either the vmspace of a user process obtained 459 * from an active AIO request or the initial vmspace of the AIO kernel 460 * process (when it is idling). Because user processes will block to 461 * drain any active AIO requests before proceeding in exit() or 462 * execve(), the reference count for vmspaces from AIO requests can 463 * never be 0. Similarly, AIO kernel processes hold an extra 464 * reference on their initial vmspace for the life of the process. As 465 * a result, the 'newvm' vmspace always has a non-zero reference 466 * count. This permits an additional reference on 'newvm' to be 467 * acquired via a simple atomic increment rather than the loop in 468 * vmspace_acquire_ref() above. 469 */ 470 void 471 vmspace_switch_aio(struct vmspace *newvm) 472 { 473 struct vmspace *oldvm; 474 475 /* XXX: Need some way to assert that this is an aio daemon. */ 476 477 KASSERT(newvm->vm_refcnt > 0, 478 ("vmspace_switch_aio: newvm unreferenced")); 479 480 oldvm = curproc->p_vmspace; 481 if (oldvm == newvm) 482 return; 483 484 /* 485 * Point to the new address space and refer to it. 486 */ 487 curproc->p_vmspace = newvm; 488 atomic_add_int(&newvm->vm_refcnt, 1); 489 490 /* Activate the new mapping. */ 491 pmap_activate(curthread); 492 493 vmspace_free(oldvm); 494 } 495 496 void 497 _vm_map_lock(vm_map_t map, const char *file, int line) 498 { 499 500 if (map->system_map) 501 mtx_lock_flags_(&map->system_mtx, 0, file, line); 502 else 503 sx_xlock_(&map->lock, file, line); 504 map->timestamp++; 505 } 506 507 void 508 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add) 509 { 510 vm_object_t object, object1; 511 struct vnode *vp; 512 513 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) 514 return; 515 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 516 ("Submap with execs")); 517 object = entry->object.vm_object; 518 KASSERT(object != NULL, ("No object for text, entry %p", entry)); 519 VM_OBJECT_RLOCK(object); 520 while ((object1 = object->backing_object) != NULL) { 521 VM_OBJECT_RLOCK(object1); 522 VM_OBJECT_RUNLOCK(object); 523 object = object1; 524 } 525 526 vp = NULL; 527 if (object->type == OBJT_DEAD) { 528 /* 529 * For OBJT_DEAD objects, v_writecount was handled in 530 * vnode_pager_dealloc(). 531 */ 532 } else if (object->type == OBJT_VNODE) { 533 vp = object->handle; 534 } else if (object->type == OBJT_SWAP) { 535 KASSERT((object->flags & OBJ_TMPFS_NODE) != 0, 536 ("vm_map_entry_set_vnode_text: swap and !TMPFS " 537 "entry %p, object %p, add %d", entry, object, add)); 538 /* 539 * Tmpfs VREG node, which was reclaimed, has 540 * OBJ_TMPFS_NODE flag set, but not OBJ_TMPFS. In 541 * this case there is no v_writecount to adjust. 542 */ 543 if ((object->flags & OBJ_TMPFS) != 0) 544 vp = object->un_pager.swp.swp_tmpfs; 545 } else { 546 KASSERT(0, 547 ("vm_map_entry_set_vnode_text: wrong object type, " 548 "entry %p, object %p, add %d", entry, object, add)); 549 } 550 if (vp != NULL) { 551 if (add) { 552 VOP_SET_TEXT_CHECKED(vp); 553 VM_OBJECT_RUNLOCK(object); 554 } else { 555 vhold(vp); 556 VM_OBJECT_RUNLOCK(object); 557 vn_lock(vp, LK_SHARED | LK_RETRY); 558 VOP_UNSET_TEXT_CHECKED(vp); 559 VOP_UNLOCK(vp, 0); 560 vdrop(vp); 561 } 562 } else { 563 VM_OBJECT_RUNLOCK(object); 564 } 565 } 566 567 /* 568 * Use a different name for this vm_map_entry field when it's use 569 * is not consistent with its use as part of an ordered search tree. 570 */ 571 #define defer_next right 572 573 static void 574 vm_map_process_deferred(void) 575 { 576 struct thread *td; 577 vm_map_entry_t entry, next; 578 vm_object_t object; 579 580 td = curthread; 581 entry = td->td_map_def_user; 582 td->td_map_def_user = NULL; 583 while (entry != NULL) { 584 next = entry->defer_next; 585 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | 586 MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT | 587 MAP_ENTRY_VN_EXEC)); 588 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { 589 /* 590 * Decrement the object's writemappings and 591 * possibly the vnode's v_writecount. 592 */ 593 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 594 ("Submap with writecount")); 595 object = entry->object.vm_object; 596 KASSERT(object != NULL, ("No object for writecount")); 597 vm_pager_release_writecount(object, entry->start, 598 entry->end); 599 } 600 vm_map_entry_set_vnode_text(entry, false); 601 vm_map_entry_deallocate(entry, FALSE); 602 entry = next; 603 } 604 } 605 606 #ifdef INVARIANTS 607 static void 608 _vm_map_assert_locked(vm_map_t map, const char *file, int line) 609 { 610 611 if (map->system_map) 612 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 613 else 614 sx_assert_(&map->lock, SA_XLOCKED, file, line); 615 } 616 617 #define VM_MAP_ASSERT_LOCKED(map) \ 618 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) 619 620 enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL }; 621 #ifdef DIAGNOSTIC 622 static int enable_vmmap_check = VMMAP_CHECK_UNLOCK; 623 #else 624 static int enable_vmmap_check = VMMAP_CHECK_NONE; 625 #endif 626 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN, 627 &enable_vmmap_check, 0, "Enable vm map consistency checking"); 628 629 static void _vm_map_assert_consistent(vm_map_t map, int check); 630 631 #define VM_MAP_ASSERT_CONSISTENT(map) \ 632 _vm_map_assert_consistent(map, VMMAP_CHECK_ALL) 633 #ifdef DIAGNOSTIC 634 #define VM_MAP_UNLOCK_CONSISTENT(map) do { \ 635 if (map->nupdates > map->nentries) { \ 636 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK); \ 637 map->nupdates = 0; \ 638 } \ 639 } while (0) 640 #else 641 #define VM_MAP_UNLOCK_CONSISTENT(map) 642 #endif 643 #else 644 #define VM_MAP_ASSERT_LOCKED(map) 645 #define VM_MAP_ASSERT_CONSISTENT(map) 646 #define VM_MAP_UNLOCK_CONSISTENT(map) 647 #endif /* INVARIANTS */ 648 649 void 650 _vm_map_unlock(vm_map_t map, const char *file, int line) 651 { 652 653 VM_MAP_UNLOCK_CONSISTENT(map); 654 if (map->system_map) 655 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 656 else { 657 sx_xunlock_(&map->lock, file, line); 658 vm_map_process_deferred(); 659 } 660 } 661 662 void 663 _vm_map_lock_read(vm_map_t map, const char *file, int line) 664 { 665 666 if (map->system_map) 667 mtx_lock_flags_(&map->system_mtx, 0, file, line); 668 else 669 sx_slock_(&map->lock, file, line); 670 } 671 672 void 673 _vm_map_unlock_read(vm_map_t map, const char *file, int line) 674 { 675 676 if (map->system_map) 677 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 678 else { 679 sx_sunlock_(&map->lock, file, line); 680 vm_map_process_deferred(); 681 } 682 } 683 684 int 685 _vm_map_trylock(vm_map_t map, const char *file, int line) 686 { 687 int error; 688 689 error = map->system_map ? 690 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 691 !sx_try_xlock_(&map->lock, file, line); 692 if (error == 0) 693 map->timestamp++; 694 return (error == 0); 695 } 696 697 int 698 _vm_map_trylock_read(vm_map_t map, const char *file, int line) 699 { 700 int error; 701 702 error = map->system_map ? 703 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : 704 !sx_try_slock_(&map->lock, file, line); 705 return (error == 0); 706 } 707 708 /* 709 * _vm_map_lock_upgrade: [ internal use only ] 710 * 711 * Tries to upgrade a read (shared) lock on the specified map to a write 712 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a 713 * non-zero value if the upgrade fails. If the upgrade fails, the map is 714 * returned without a read or write lock held. 715 * 716 * Requires that the map be read locked. 717 */ 718 int 719 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) 720 { 721 unsigned int last_timestamp; 722 723 if (map->system_map) { 724 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 725 } else { 726 if (!sx_try_upgrade_(&map->lock, file, line)) { 727 last_timestamp = map->timestamp; 728 sx_sunlock_(&map->lock, file, line); 729 vm_map_process_deferred(); 730 /* 731 * If the map's timestamp does not change while the 732 * map is unlocked, then the upgrade succeeds. 733 */ 734 sx_xlock_(&map->lock, file, line); 735 if (last_timestamp != map->timestamp) { 736 sx_xunlock_(&map->lock, file, line); 737 return (1); 738 } 739 } 740 } 741 map->timestamp++; 742 return (0); 743 } 744 745 void 746 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) 747 { 748 749 if (map->system_map) { 750 mtx_assert_(&map->system_mtx, MA_OWNED, file, line); 751 } else { 752 VM_MAP_UNLOCK_CONSISTENT(map); 753 sx_downgrade_(&map->lock, file, line); 754 } 755 } 756 757 /* 758 * vm_map_locked: 759 * 760 * Returns a non-zero value if the caller holds a write (exclusive) lock 761 * on the specified map and the value "0" otherwise. 762 */ 763 int 764 vm_map_locked(vm_map_t map) 765 { 766 767 if (map->system_map) 768 return (mtx_owned(&map->system_mtx)); 769 else 770 return (sx_xlocked(&map->lock)); 771 } 772 773 /* 774 * _vm_map_unlock_and_wait: 775 * 776 * Atomically releases the lock on the specified map and puts the calling 777 * thread to sleep. The calling thread will remain asleep until either 778 * vm_map_wakeup() is performed on the map or the specified timeout is 779 * exceeded. 780 * 781 * WARNING! This function does not perform deferred deallocations of 782 * objects and map entries. Therefore, the calling thread is expected to 783 * reacquire the map lock after reawakening and later perform an ordinary 784 * unlock operation, such as vm_map_unlock(), before completing its 785 * operation on the map. 786 */ 787 int 788 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) 789 { 790 791 VM_MAP_UNLOCK_CONSISTENT(map); 792 mtx_lock(&map_sleep_mtx); 793 if (map->system_map) 794 mtx_unlock_flags_(&map->system_mtx, 0, file, line); 795 else 796 sx_xunlock_(&map->lock, file, line); 797 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 798 timo)); 799 } 800 801 /* 802 * vm_map_wakeup: 803 * 804 * Awaken any threads that have slept on the map using 805 * vm_map_unlock_and_wait(). 806 */ 807 void 808 vm_map_wakeup(vm_map_t map) 809 { 810 811 /* 812 * Acquire and release map_sleep_mtx to prevent a wakeup() 813 * from being performed (and lost) between the map unlock 814 * and the msleep() in _vm_map_unlock_and_wait(). 815 */ 816 mtx_lock(&map_sleep_mtx); 817 mtx_unlock(&map_sleep_mtx); 818 wakeup(&map->root); 819 } 820 821 void 822 vm_map_busy(vm_map_t map) 823 { 824 825 VM_MAP_ASSERT_LOCKED(map); 826 map->busy++; 827 } 828 829 void 830 vm_map_unbusy(vm_map_t map) 831 { 832 833 VM_MAP_ASSERT_LOCKED(map); 834 KASSERT(map->busy, ("vm_map_unbusy: not busy")); 835 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { 836 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); 837 wakeup(&map->busy); 838 } 839 } 840 841 void 842 vm_map_wait_busy(vm_map_t map) 843 { 844 845 VM_MAP_ASSERT_LOCKED(map); 846 while (map->busy) { 847 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); 848 if (map->system_map) 849 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); 850 else 851 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); 852 } 853 map->timestamp++; 854 } 855 856 long 857 vmspace_resident_count(struct vmspace *vmspace) 858 { 859 return pmap_resident_count(vmspace_pmap(vmspace)); 860 } 861 862 /* 863 * vm_map_create: 864 * 865 * Creates and returns a new empty VM map with 866 * the given physical map structure, and having 867 * the given lower and upper address bounds. 868 */ 869 vm_map_t 870 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 871 { 872 vm_map_t result; 873 874 result = uma_zalloc(mapzone, M_WAITOK); 875 CTR1(KTR_VM, "vm_map_create: %p", result); 876 _vm_map_init(result, pmap, min, max); 877 return (result); 878 } 879 880 /* 881 * Initialize an existing vm_map structure 882 * such as that in the vmspace structure. 883 */ 884 static void 885 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 886 { 887 888 map->header.next = map->header.prev = &map->header; 889 map->header.eflags = MAP_ENTRY_HEADER; 890 map->needs_wakeup = FALSE; 891 map->system_map = 0; 892 map->pmap = pmap; 893 map->header.end = min; 894 map->header.start = max; 895 map->flags = 0; 896 map->root = NULL; 897 map->timestamp = 0; 898 map->busy = 0; 899 map->anon_loc = 0; 900 #ifdef DIAGNOSTIC 901 map->nupdates = 0; 902 #endif 903 } 904 905 void 906 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) 907 { 908 909 _vm_map_init(map, pmap, min, max); 910 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); 911 sx_init(&map->lock, "user map"); 912 } 913 914 /* 915 * vm_map_entry_dispose: [ internal use only ] 916 * 917 * Inverse of vm_map_entry_create. 918 */ 919 static void 920 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) 921 { 922 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); 923 } 924 925 /* 926 * vm_map_entry_create: [ internal use only ] 927 * 928 * Allocates a VM map entry for insertion. 929 * No entry fields are filled in. 930 */ 931 static vm_map_entry_t 932 vm_map_entry_create(vm_map_t map) 933 { 934 vm_map_entry_t new_entry; 935 936 if (map->system_map) 937 new_entry = uma_zalloc(kmapentzone, M_NOWAIT); 938 else 939 new_entry = uma_zalloc(mapentzone, M_WAITOK); 940 if (new_entry == NULL) 941 panic("vm_map_entry_create: kernel resources exhausted"); 942 return (new_entry); 943 } 944 945 /* 946 * vm_map_entry_set_behavior: 947 * 948 * Set the expected access behavior, either normal, random, or 949 * sequential. 950 */ 951 static inline void 952 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) 953 { 954 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | 955 (behavior & MAP_ENTRY_BEHAV_MASK); 956 } 957 958 /* 959 * vm_map_entry_max_free_{left,right}: 960 * 961 * Compute the size of the largest free gap between two entries, 962 * one the root of a tree and the other the ancestor of that root 963 * that is the least or greatest ancestor found on the search path. 964 */ 965 static inline vm_size_t 966 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor) 967 { 968 969 return (root->left != NULL ? 970 root->left->max_free : root->start - left_ancestor->end); 971 } 972 973 static inline vm_size_t 974 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor) 975 { 976 977 return (root->right != NULL ? 978 root->right->max_free : right_ancestor->start - root->end); 979 } 980 981 #define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ 982 vm_size_t max_free; \ 983 \ 984 /* \ 985 * Infer root->right->max_free == root->max_free when \ 986 * y->max_free < root->max_free || root->max_free == 0. \ 987 * Otherwise, look right to find it. \ 988 */ \ 989 y = root->left; \ 990 max_free = root->max_free; \ 991 KASSERT(max_free >= vm_map_entry_max_free_right(root, rlist), \ 992 ("%s: max_free invariant fails", __func__)); \ 993 if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \ 994 max_free = vm_map_entry_max_free_right(root, rlist); \ 995 if (y != NULL && (test)) { \ 996 /* Rotate right and make y root. */ \ 997 root->left = y->right; \ 998 y->right = root; \ 999 if (max_free < y->max_free) \ 1000 root->max_free = max_free = MAX(max_free, \ 1001 vm_map_entry_max_free_left(root, y)); \ 1002 root = y; \ 1003 y = root->left; \ 1004 } \ 1005 /* Copy right->max_free. Put root on rlist. */ \ 1006 root->max_free = max_free; \ 1007 KASSERT(max_free == vm_map_entry_max_free_right(root, rlist), \ 1008 ("%s: max_free not copied from right", __func__)); \ 1009 root->left = rlist; \ 1010 rlist = root; \ 1011 root = y; \ 1012 } while (0) 1013 1014 #define SPLAY_RIGHT_STEP(root, y, llist, test) do { \ 1015 vm_size_t max_free; \ 1016 \ 1017 /* \ 1018 * Infer root->left->max_free == root->max_free when \ 1019 * y->max_free < root->max_free || root->max_free == 0. \ 1020 * Otherwise, look left to find it. \ 1021 */ \ 1022 y = root->right; \ 1023 max_free = root->max_free; \ 1024 KASSERT(max_free >= vm_map_entry_max_free_left(root, llist), \ 1025 ("%s: max_free invariant fails", __func__)); \ 1026 if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \ 1027 max_free = vm_map_entry_max_free_left(root, llist); \ 1028 if (y != NULL && (test)) { \ 1029 /* Rotate left and make y root. */ \ 1030 root->right = y->left; \ 1031 y->left = root; \ 1032 if (max_free < y->max_free) \ 1033 root->max_free = max_free = MAX(max_free, \ 1034 vm_map_entry_max_free_right(root, y)); \ 1035 root = y; \ 1036 y = root->right; \ 1037 } \ 1038 /* Copy left->max_free. Put root on llist. */ \ 1039 root->max_free = max_free; \ 1040 KASSERT(max_free == vm_map_entry_max_free_left(root, llist), \ 1041 ("%s: max_free not copied from left", __func__)); \ 1042 root->right = llist; \ 1043 llist = root; \ 1044 root = y; \ 1045 } while (0) 1046 1047 /* 1048 * Walk down the tree until we find addr or a NULL pointer where addr would go, 1049 * breaking off left and right subtrees of nodes less than, or greater than 1050 * addr. Treat pointers to nodes with max_free < length as NULL pointers. 1051 * llist and rlist are the two sides in reverse order (bottom-up), with llist 1052 * linked by the right pointer and rlist linked by the left pointer in the 1053 * vm_map_entry, and both lists terminated by &map->header. This function, and 1054 * the subsequent call to vm_map_splay_merge, rely on the start and end address 1055 * values in &map->header. 1056 */ 1057 static vm_map_entry_t 1058 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length, 1059 vm_map_entry_t *out_llist, vm_map_entry_t *out_rlist) 1060 { 1061 vm_map_entry_t llist, rlist, root, y; 1062 1063 llist = rlist = &map->header; 1064 root = map->root; 1065 while (root != NULL && root->max_free >= length) { 1066 KASSERT(llist->end <= root->start && root->end <= rlist->start, 1067 ("%s: root not within tree bounds", __func__)); 1068 if (addr < root->start) { 1069 SPLAY_LEFT_STEP(root, y, rlist, 1070 y->max_free >= length && addr < y->start); 1071 } else if (addr >= root->end) { 1072 SPLAY_RIGHT_STEP(root, y, llist, 1073 y->max_free >= length && addr >= y->end); 1074 } else 1075 break; 1076 } 1077 *out_llist = llist; 1078 *out_rlist = rlist; 1079 return (root); 1080 } 1081 1082 static void 1083 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist) 1084 { 1085 vm_map_entry_t rlist, y; 1086 1087 root = root->right; 1088 rlist = *iolist; 1089 while (root != NULL) 1090 SPLAY_LEFT_STEP(root, y, rlist, true); 1091 *iolist = rlist; 1092 } 1093 1094 static void 1095 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist) 1096 { 1097 vm_map_entry_t llist, y; 1098 1099 root = root->left; 1100 llist = *iolist; 1101 while (root != NULL) 1102 SPLAY_RIGHT_STEP(root, y, llist, true); 1103 *iolist = llist; 1104 } 1105 1106 static inline void 1107 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b) 1108 { 1109 vm_map_entry_t tmp; 1110 1111 tmp = *b; 1112 *b = *a; 1113 *a = tmp; 1114 } 1115 1116 /* 1117 * Walk back up the two spines, flip the pointers and set max_free. The 1118 * subtrees of the root go at the bottom of llist and rlist. 1119 */ 1120 static void 1121 vm_map_splay_merge(vm_map_t map, vm_map_entry_t root, 1122 vm_map_entry_t llist, vm_map_entry_t rlist) 1123 { 1124 vm_map_entry_t prev; 1125 vm_size_t max_free_left, max_free_right; 1126 1127 max_free_left = vm_map_entry_max_free_left(root, llist); 1128 if (llist != &map->header) { 1129 prev = root->left; 1130 do { 1131 /* 1132 * The max_free values of the children of llist are in 1133 * llist->max_free and max_free_left. Update with the 1134 * max value. 1135 */ 1136 llist->max_free = max_free_left = 1137 MAX(llist->max_free, max_free_left); 1138 vm_map_entry_swap(&llist->right, &prev); 1139 vm_map_entry_swap(&prev, &llist); 1140 } while (llist != &map->header); 1141 root->left = prev; 1142 } 1143 max_free_right = vm_map_entry_max_free_right(root, rlist); 1144 if (rlist != &map->header) { 1145 prev = root->right; 1146 do { 1147 /* 1148 * The max_free values of the children of rlist are in 1149 * rlist->max_free and max_free_right. Update with the 1150 * max value. 1151 */ 1152 rlist->max_free = max_free_right = 1153 MAX(rlist->max_free, max_free_right); 1154 vm_map_entry_swap(&rlist->left, &prev); 1155 vm_map_entry_swap(&prev, &rlist); 1156 } while (rlist != &map->header); 1157 root->right = prev; 1158 } 1159 root->max_free = MAX(max_free_left, max_free_right); 1160 map->root = root; 1161 #ifdef DIAGNOSTIC 1162 ++map->nupdates; 1163 #endif 1164 } 1165 1166 /* 1167 * vm_map_splay: 1168 * 1169 * The Sleator and Tarjan top-down splay algorithm with the 1170 * following variation. Max_free must be computed bottom-up, so 1171 * on the downward pass, maintain the left and right spines in 1172 * reverse order. Then, make a second pass up each side to fix 1173 * the pointers and compute max_free. The time bound is O(log n) 1174 * amortized. 1175 * 1176 * The new root is the vm_map_entry containing "addr", or else an 1177 * adjacent entry (lower if possible) if addr is not in the tree. 1178 * 1179 * The map must be locked, and leaves it so. 1180 * 1181 * Returns: the new root. 1182 */ 1183 static vm_map_entry_t 1184 vm_map_splay(vm_map_t map, vm_offset_t addr) 1185 { 1186 vm_map_entry_t llist, rlist, root; 1187 1188 root = vm_map_splay_split(map, addr, 0, &llist, &rlist); 1189 if (root != NULL) { 1190 /* do nothing */ 1191 } else if (llist != &map->header) { 1192 /* 1193 * Recover the greatest node in the left 1194 * subtree and make it the root. 1195 */ 1196 root = llist; 1197 llist = root->right; 1198 root->right = NULL; 1199 } else if (rlist != &map->header) { 1200 /* 1201 * Recover the least node in the right 1202 * subtree and make it the root. 1203 */ 1204 root = rlist; 1205 rlist = root->left; 1206 root->left = NULL; 1207 } else { 1208 /* There is no root. */ 1209 return (NULL); 1210 } 1211 vm_map_splay_merge(map, root, llist, rlist); 1212 VM_MAP_ASSERT_CONSISTENT(map); 1213 return (root); 1214 } 1215 1216 /* 1217 * vm_map_entry_{un,}link: 1218 * 1219 * Insert/remove entries from maps. 1220 */ 1221 static void 1222 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) 1223 { 1224 vm_map_entry_t llist, rlist, root; 1225 1226 CTR3(KTR_VM, 1227 "vm_map_entry_link: map %p, nentries %d, entry %p", map, 1228 map->nentries, entry); 1229 VM_MAP_ASSERT_LOCKED(map); 1230 map->nentries++; 1231 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1232 KASSERT(root == NULL, 1233 ("vm_map_entry_link: link object already mapped")); 1234 entry->prev = llist; 1235 entry->next = rlist; 1236 llist->next = rlist->prev = entry; 1237 entry->left = entry->right = NULL; 1238 vm_map_splay_merge(map, entry, llist, rlist); 1239 VM_MAP_ASSERT_CONSISTENT(map); 1240 } 1241 1242 enum unlink_merge_type { 1243 UNLINK_MERGE_NONE, 1244 UNLINK_MERGE_NEXT 1245 }; 1246 1247 static void 1248 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, 1249 enum unlink_merge_type op) 1250 { 1251 vm_map_entry_t llist, rlist, root, y; 1252 1253 VM_MAP_ASSERT_LOCKED(map); 1254 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1255 KASSERT(root != NULL, 1256 ("vm_map_entry_unlink: unlink object not mapped")); 1257 1258 vm_map_splay_findnext(root, &rlist); 1259 switch (op) { 1260 case UNLINK_MERGE_NEXT: 1261 rlist->start = root->start; 1262 rlist->offset = root->offset; 1263 y = root->left; 1264 root = rlist; 1265 rlist = root->left; 1266 root->left = y; 1267 break; 1268 case UNLINK_MERGE_NONE: 1269 vm_map_splay_findprev(root, &llist); 1270 if (llist != &map->header) { 1271 root = llist; 1272 llist = root->right; 1273 root->right = NULL; 1274 } else if (rlist != &map->header) { 1275 root = rlist; 1276 rlist = root->left; 1277 root->left = NULL; 1278 } else 1279 root = NULL; 1280 break; 1281 } 1282 y = entry->next; 1283 y->prev = entry->prev; 1284 y->prev->next = y; 1285 if (root != NULL) 1286 vm_map_splay_merge(map, root, llist, rlist); 1287 else 1288 map->root = NULL; 1289 VM_MAP_ASSERT_CONSISTENT(map); 1290 map->nentries--; 1291 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, 1292 map->nentries, entry); 1293 } 1294 1295 /* 1296 * vm_map_entry_resize: 1297 * 1298 * Resize a vm_map_entry, recompute the amount of free space that 1299 * follows it and propagate that value up the tree. 1300 * 1301 * The map must be locked, and leaves it so. 1302 */ 1303 static void 1304 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount) 1305 { 1306 vm_map_entry_t llist, rlist, root; 1307 1308 VM_MAP_ASSERT_LOCKED(map); 1309 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); 1310 KASSERT(root != NULL, 1311 ("%s: resize object not mapped", __func__)); 1312 vm_map_splay_findnext(root, &rlist); 1313 root->right = NULL; 1314 entry->end += grow_amount; 1315 vm_map_splay_merge(map, root, llist, rlist); 1316 VM_MAP_ASSERT_CONSISTENT(map); 1317 CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p", 1318 __func__, map, map->nentries, entry); 1319 } 1320 1321 /* 1322 * vm_map_lookup_entry: [ internal use only ] 1323 * 1324 * Finds the map entry containing (or 1325 * immediately preceding) the specified address 1326 * in the given map; the entry is returned 1327 * in the "entry" parameter. The boolean 1328 * result indicates whether the address is 1329 * actually contained in the map. 1330 */ 1331 boolean_t 1332 vm_map_lookup_entry( 1333 vm_map_t map, 1334 vm_offset_t address, 1335 vm_map_entry_t *entry) /* OUT */ 1336 { 1337 vm_map_entry_t cur, lbound; 1338 boolean_t locked; 1339 1340 /* 1341 * If the map is empty, then the map entry immediately preceding 1342 * "address" is the map's header. 1343 */ 1344 cur = map->root; 1345 if (cur == NULL) { 1346 *entry = &map->header; 1347 return (FALSE); 1348 } 1349 if (address >= cur->start && cur->end > address) { 1350 *entry = cur; 1351 return (TRUE); 1352 } 1353 if ((locked = vm_map_locked(map)) || 1354 sx_try_upgrade(&map->lock)) { 1355 /* 1356 * Splay requires a write lock on the map. However, it only 1357 * restructures the binary search tree; it does not otherwise 1358 * change the map. Thus, the map's timestamp need not change 1359 * on a temporary upgrade. 1360 */ 1361 cur = vm_map_splay(map, address); 1362 if (!locked) { 1363 VM_MAP_UNLOCK_CONSISTENT(map); 1364 sx_downgrade(&map->lock); 1365 } 1366 1367 /* 1368 * If "address" is contained within a map entry, the new root 1369 * is that map entry. Otherwise, the new root is a map entry 1370 * immediately before or after "address". 1371 */ 1372 if (address < cur->start) { 1373 *entry = &map->header; 1374 return (FALSE); 1375 } 1376 *entry = cur; 1377 return (address < cur->end); 1378 } 1379 /* 1380 * Since the map is only locked for read access, perform a 1381 * standard binary search tree lookup for "address". 1382 */ 1383 lbound = &map->header; 1384 do { 1385 if (address < cur->start) { 1386 cur = cur->left; 1387 } else if (cur->end <= address) { 1388 lbound = cur; 1389 cur = cur->right; 1390 } else { 1391 *entry = cur; 1392 return (TRUE); 1393 } 1394 } while (cur != NULL); 1395 *entry = lbound; 1396 return (FALSE); 1397 } 1398 1399 /* 1400 * vm_map_insert: 1401 * 1402 * Inserts the given whole VM object into the target 1403 * map at the specified address range. The object's 1404 * size should match that of the address range. 1405 * 1406 * Requires that the map be locked, and leaves it so. 1407 * 1408 * If object is non-NULL, ref count must be bumped by caller 1409 * prior to making call to account for the new entry. 1410 */ 1411 int 1412 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1413 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) 1414 { 1415 vm_map_entry_t new_entry, prev_entry; 1416 struct ucred *cred; 1417 vm_eflags_t protoeflags; 1418 vm_inherit_t inheritance; 1419 1420 VM_MAP_ASSERT_LOCKED(map); 1421 KASSERT(object != kernel_object || 1422 (cow & MAP_COPY_ON_WRITE) == 0, 1423 ("vm_map_insert: kernel object and COW")); 1424 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0, 1425 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 1426 KASSERT((prot & ~max) == 0, 1427 ("prot %#x is not subset of max_prot %#x", prot, max)); 1428 1429 /* 1430 * Check that the start and end points are not bogus. 1431 */ 1432 if (start < vm_map_min(map) || end > vm_map_max(map) || 1433 start >= end) 1434 return (KERN_INVALID_ADDRESS); 1435 1436 /* 1437 * Find the entry prior to the proposed starting address; if it's part 1438 * of an existing entry, this range is bogus. 1439 */ 1440 if (vm_map_lookup_entry(map, start, &prev_entry)) 1441 return (KERN_NO_SPACE); 1442 1443 /* 1444 * Assert that the next entry doesn't overlap the end point. 1445 */ 1446 if (vm_map_entry_succ(prev_entry)->start < end) 1447 return (KERN_NO_SPACE); 1448 1449 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || 1450 max != VM_PROT_NONE)) 1451 return (KERN_INVALID_ARGUMENT); 1452 1453 protoeflags = 0; 1454 if (cow & MAP_COPY_ON_WRITE) 1455 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY; 1456 if (cow & MAP_NOFAULT) 1457 protoeflags |= MAP_ENTRY_NOFAULT; 1458 if (cow & MAP_DISABLE_SYNCER) 1459 protoeflags |= MAP_ENTRY_NOSYNC; 1460 if (cow & MAP_DISABLE_COREDUMP) 1461 protoeflags |= MAP_ENTRY_NOCOREDUMP; 1462 if (cow & MAP_STACK_GROWS_DOWN) 1463 protoeflags |= MAP_ENTRY_GROWS_DOWN; 1464 if (cow & MAP_STACK_GROWS_UP) 1465 protoeflags |= MAP_ENTRY_GROWS_UP; 1466 if (cow & MAP_WRITECOUNT) 1467 protoeflags |= MAP_ENTRY_WRITECNT; 1468 if (cow & MAP_VN_EXEC) 1469 protoeflags |= MAP_ENTRY_VN_EXEC; 1470 if ((cow & MAP_CREATE_GUARD) != 0) 1471 protoeflags |= MAP_ENTRY_GUARD; 1472 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0) 1473 protoeflags |= MAP_ENTRY_STACK_GAP_DN; 1474 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0) 1475 protoeflags |= MAP_ENTRY_STACK_GAP_UP; 1476 if (cow & MAP_INHERIT_SHARE) 1477 inheritance = VM_INHERIT_SHARE; 1478 else 1479 inheritance = VM_INHERIT_DEFAULT; 1480 1481 cred = NULL; 1482 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0) 1483 goto charged; 1484 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && 1485 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { 1486 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) 1487 return (KERN_RESOURCE_SHORTAGE); 1488 KASSERT(object == NULL || 1489 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 || 1490 object->cred == NULL, 1491 ("overcommit: vm_map_insert o %p", object)); 1492 cred = curthread->td_ucred; 1493 } 1494 1495 charged: 1496 /* Expand the kernel pmap, if necessary. */ 1497 if (map == kernel_map && end > kernel_vm_end) 1498 pmap_growkernel(end); 1499 if (object != NULL) { 1500 /* 1501 * OBJ_ONEMAPPING must be cleared unless this mapping 1502 * is trivially proven to be the only mapping for any 1503 * of the object's pages. (Object granularity 1504 * reference counting is insufficient to recognize 1505 * aliases with precision.) 1506 */ 1507 if ((object->flags & OBJ_ANON) != 0) { 1508 VM_OBJECT_WLOCK(object); 1509 if (object->ref_count > 1 || object->shadow_count != 0) 1510 vm_object_clear_flag(object, OBJ_ONEMAPPING); 1511 VM_OBJECT_WUNLOCK(object); 1512 } 1513 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == 1514 protoeflags && 1515 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP | 1516 MAP_VN_EXEC)) == 0 && 1517 prev_entry->end == start && (prev_entry->cred == cred || 1518 (prev_entry->object.vm_object != NULL && 1519 prev_entry->object.vm_object->cred == cred)) && 1520 vm_object_coalesce(prev_entry->object.vm_object, 1521 prev_entry->offset, 1522 (vm_size_t)(prev_entry->end - prev_entry->start), 1523 (vm_size_t)(end - prev_entry->end), cred != NULL && 1524 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) { 1525 /* 1526 * We were able to extend the object. Determine if we 1527 * can extend the previous map entry to include the 1528 * new range as well. 1529 */ 1530 if (prev_entry->inheritance == inheritance && 1531 prev_entry->protection == prot && 1532 prev_entry->max_protection == max && 1533 prev_entry->wired_count == 0) { 1534 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) == 1535 0, ("prev_entry %p has incoherent wiring", 1536 prev_entry)); 1537 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0) 1538 map->size += end - prev_entry->end; 1539 vm_map_entry_resize(map, prev_entry, 1540 end - prev_entry->end); 1541 vm_map_try_merge_entries(map, prev_entry, 1542 vm_map_entry_succ(prev_entry)); 1543 return (KERN_SUCCESS); 1544 } 1545 1546 /* 1547 * If we can extend the object but cannot extend the 1548 * map entry, we have to create a new map entry. We 1549 * must bump the ref count on the extended object to 1550 * account for it. object may be NULL. 1551 */ 1552 object = prev_entry->object.vm_object; 1553 offset = prev_entry->offset + 1554 (prev_entry->end - prev_entry->start); 1555 vm_object_reference(object); 1556 if (cred != NULL && object != NULL && object->cred != NULL && 1557 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 1558 /* Object already accounts for this uid. */ 1559 cred = NULL; 1560 } 1561 } 1562 if (cred != NULL) 1563 crhold(cred); 1564 1565 /* 1566 * Create a new entry 1567 */ 1568 new_entry = vm_map_entry_create(map); 1569 new_entry->start = start; 1570 new_entry->end = end; 1571 new_entry->cred = NULL; 1572 1573 new_entry->eflags = protoeflags; 1574 new_entry->object.vm_object = object; 1575 new_entry->offset = offset; 1576 1577 new_entry->inheritance = inheritance; 1578 new_entry->protection = prot; 1579 new_entry->max_protection = max; 1580 new_entry->wired_count = 0; 1581 new_entry->wiring_thread = NULL; 1582 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; 1583 new_entry->next_read = start; 1584 1585 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), 1586 ("overcommit: vm_map_insert leaks vm_map %p", new_entry)); 1587 new_entry->cred = cred; 1588 1589 /* 1590 * Insert the new entry into the list 1591 */ 1592 vm_map_entry_link(map, new_entry); 1593 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0) 1594 map->size += new_entry->end - new_entry->start; 1595 1596 /* 1597 * Try to coalesce the new entry with both the previous and next 1598 * entries in the list. Previously, we only attempted to coalesce 1599 * with the previous entry when object is NULL. Here, we handle the 1600 * other cases, which are less common. 1601 */ 1602 vm_map_try_merge_entries(map, prev_entry, new_entry); 1603 vm_map_try_merge_entries(map, new_entry, vm_map_entry_succ(new_entry)); 1604 1605 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { 1606 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), 1607 end - start, cow & MAP_PREFAULT_PARTIAL); 1608 } 1609 1610 return (KERN_SUCCESS); 1611 } 1612 1613 /* 1614 * vm_map_findspace: 1615 * 1616 * Find the first fit (lowest VM address) for "length" free bytes 1617 * beginning at address >= start in the given map. 1618 * 1619 * In a vm_map_entry, "max_free" is the maximum amount of 1620 * contiguous free space between an entry in its subtree and a 1621 * neighbor of that entry. This allows finding a free region in 1622 * one path down the tree, so O(log n) amortized with splay 1623 * trees. 1624 * 1625 * The map must be locked, and leaves it so. 1626 * 1627 * Returns: starting address if sufficient space, 1628 * vm_map_max(map)-length+1 if insufficient space. 1629 */ 1630 vm_offset_t 1631 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length) 1632 { 1633 vm_map_entry_t llist, rlist, root, y; 1634 vm_size_t left_length; 1635 vm_offset_t gap_end; 1636 1637 /* 1638 * Request must fit within min/max VM address and must avoid 1639 * address wrap. 1640 */ 1641 start = MAX(start, vm_map_min(map)); 1642 if (start >= vm_map_max(map) || length > vm_map_max(map) - start) 1643 return (vm_map_max(map) - length + 1); 1644 1645 /* Empty tree means wide open address space. */ 1646 if (map->root == NULL) 1647 return (start); 1648 1649 /* 1650 * After splay_split, if start is within an entry, push it to the start 1651 * of the following gap. If rlist is at the end of the gap containing 1652 * start, save the end of that gap in gap_end to see if the gap is big 1653 * enough; otherwise set gap_end to start skip gap-checking and move 1654 * directly to a search of the right subtree. 1655 */ 1656 root = vm_map_splay_split(map, start, length, &llist, &rlist); 1657 gap_end = rlist->start; 1658 if (root != NULL) { 1659 start = root->end; 1660 if (root->right != NULL) 1661 gap_end = start; 1662 } else if (rlist != &map->header) { 1663 root = rlist; 1664 rlist = root->left; 1665 root->left = NULL; 1666 } else { 1667 root = llist; 1668 llist = root->right; 1669 root->right = NULL; 1670 } 1671 vm_map_splay_merge(map, root, llist, rlist); 1672 VM_MAP_ASSERT_CONSISTENT(map); 1673 if (length <= gap_end - start) 1674 return (start); 1675 1676 /* With max_free, can immediately tell if no solution. */ 1677 if (root->right == NULL || length > root->right->max_free) 1678 return (vm_map_max(map) - length + 1); 1679 1680 /* 1681 * Splay for the least large-enough gap in the right subtree. 1682 */ 1683 llist = rlist = &map->header; 1684 for (left_length = 0;; 1685 left_length = vm_map_entry_max_free_left(root, llist)) { 1686 if (length <= left_length) 1687 SPLAY_LEFT_STEP(root, y, rlist, 1688 length <= vm_map_entry_max_free_left(y, llist)); 1689 else 1690 SPLAY_RIGHT_STEP(root, y, llist, 1691 length > vm_map_entry_max_free_left(y, root)); 1692 if (root == NULL) 1693 break; 1694 } 1695 root = llist; 1696 llist = root->right; 1697 root->right = NULL; 1698 if (rlist != &map->header) { 1699 y = rlist; 1700 rlist = y->left; 1701 y->left = NULL; 1702 vm_map_splay_merge(map, y, &map->header, rlist); 1703 y->max_free = MAX( 1704 vm_map_entry_max_free_left(y, root), 1705 vm_map_entry_max_free_right(y, &map->header)); 1706 root->right = y; 1707 } 1708 vm_map_splay_merge(map, root, llist, &map->header); 1709 VM_MAP_ASSERT_CONSISTENT(map); 1710 return (root->end); 1711 } 1712 1713 int 1714 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1715 vm_offset_t start, vm_size_t length, vm_prot_t prot, 1716 vm_prot_t max, int cow) 1717 { 1718 vm_offset_t end; 1719 int result; 1720 1721 end = start + length; 1722 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1723 object == NULL, 1724 ("vm_map_fixed: non-NULL backing object for stack")); 1725 vm_map_lock(map); 1726 VM_MAP_RANGE_CHECK(map, start, end); 1727 if ((cow & MAP_CHECK_EXCL) == 0) 1728 vm_map_delete(map, start, end); 1729 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1730 result = vm_map_stack_locked(map, start, length, sgrowsiz, 1731 prot, max, cow); 1732 } else { 1733 result = vm_map_insert(map, object, offset, start, end, 1734 prot, max, cow); 1735 } 1736 vm_map_unlock(map); 1737 return (result); 1738 } 1739 1740 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10}; 1741 static const int aslr_pages_rnd_32[2] = {0x100, 0x4}; 1742 1743 static int cluster_anon = 1; 1744 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, 1745 &cluster_anon, 0, 1746 "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always"); 1747 1748 static bool 1749 clustering_anon_allowed(vm_offset_t addr) 1750 { 1751 1752 switch (cluster_anon) { 1753 case 0: 1754 return (false); 1755 case 1: 1756 return (addr == 0); 1757 case 2: 1758 default: 1759 return (true); 1760 } 1761 } 1762 1763 static long aslr_restarts; 1764 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, 1765 &aslr_restarts, 0, 1766 "Number of aslr failures"); 1767 1768 #define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31) 1769 1770 /* 1771 * Searches for the specified amount of free space in the given map with the 1772 * specified alignment. Performs an address-ordered, first-fit search from 1773 * the given address "*addr", with an optional upper bound "max_addr". If the 1774 * parameter "alignment" is zero, then the alignment is computed from the 1775 * given (object, offset) pair so as to enable the greatest possible use of 1776 * superpage mappings. Returns KERN_SUCCESS and the address of the free space 1777 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. 1778 * 1779 * The map must be locked. Initially, there must be at least "length" bytes 1780 * of free space at the given address. 1781 */ 1782 static int 1783 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1784 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, 1785 vm_offset_t alignment) 1786 { 1787 vm_offset_t aligned_addr, free_addr; 1788 1789 VM_MAP_ASSERT_LOCKED(map); 1790 free_addr = *addr; 1791 KASSERT(free_addr == vm_map_findspace(map, free_addr, length), 1792 ("caller failed to provide space %#jx at address %p", 1793 (uintmax_t)length, (void *)free_addr)); 1794 for (;;) { 1795 /* 1796 * At the start of every iteration, the free space at address 1797 * "*addr" is at least "length" bytes. 1798 */ 1799 if (alignment == 0) 1800 pmap_align_superpage(object, offset, addr, length); 1801 else if ((*addr & (alignment - 1)) != 0) { 1802 *addr &= ~(alignment - 1); 1803 *addr += alignment; 1804 } 1805 aligned_addr = *addr; 1806 if (aligned_addr == free_addr) { 1807 /* 1808 * Alignment did not change "*addr", so "*addr" must 1809 * still provide sufficient free space. 1810 */ 1811 return (KERN_SUCCESS); 1812 } 1813 1814 /* 1815 * Test for address wrap on "*addr". A wrapped "*addr" could 1816 * be a valid address, in which case vm_map_findspace() cannot 1817 * be relied upon to fail. 1818 */ 1819 if (aligned_addr < free_addr) 1820 return (KERN_NO_SPACE); 1821 *addr = vm_map_findspace(map, aligned_addr, length); 1822 if (*addr + length > vm_map_max(map) || 1823 (max_addr != 0 && *addr + length > max_addr)) 1824 return (KERN_NO_SPACE); 1825 free_addr = *addr; 1826 if (free_addr == aligned_addr) { 1827 /* 1828 * If a successful call to vm_map_findspace() did not 1829 * change "*addr", then "*addr" must still be aligned 1830 * and provide sufficient free space. 1831 */ 1832 return (KERN_SUCCESS); 1833 } 1834 } 1835 } 1836 1837 /* 1838 * vm_map_find finds an unallocated region in the target address 1839 * map with the given length. The search is defined to be 1840 * first-fit from the specified address; the region found is 1841 * returned in the same parameter. 1842 * 1843 * If object is non-NULL, ref count must be bumped by caller 1844 * prior to making call to account for the new entry. 1845 */ 1846 int 1847 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1848 vm_offset_t *addr, /* IN/OUT */ 1849 vm_size_t length, vm_offset_t max_addr, int find_space, 1850 vm_prot_t prot, vm_prot_t max, int cow) 1851 { 1852 vm_offset_t alignment, curr_min_addr, min_addr; 1853 int gap, pidx, rv, try; 1854 bool cluster, en_aslr, update_anon; 1855 1856 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || 1857 object == NULL, 1858 ("vm_map_find: non-NULL backing object for stack")); 1859 MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE && 1860 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)); 1861 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL || 1862 (object->flags & OBJ_COLORED) == 0)) 1863 find_space = VMFS_ANY_SPACE; 1864 if (find_space >> 8 != 0) { 1865 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags")); 1866 alignment = (vm_offset_t)1 << (find_space >> 8); 1867 } else 1868 alignment = 0; 1869 en_aslr = (map->flags & MAP_ASLR) != 0; 1870 update_anon = cluster = clustering_anon_allowed(*addr) && 1871 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 && 1872 find_space != VMFS_NO_SPACE && object == NULL && 1873 (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP | 1874 MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE; 1875 curr_min_addr = min_addr = *addr; 1876 if (en_aslr && min_addr == 0 && !cluster && 1877 find_space != VMFS_NO_SPACE && 1878 (map->flags & MAP_ASLR_IGNSTART) != 0) 1879 curr_min_addr = min_addr = vm_map_min(map); 1880 try = 0; 1881 vm_map_lock(map); 1882 if (cluster) { 1883 curr_min_addr = map->anon_loc; 1884 if (curr_min_addr == 0) 1885 cluster = false; 1886 } 1887 if (find_space != VMFS_NO_SPACE) { 1888 KASSERT(find_space == VMFS_ANY_SPACE || 1889 find_space == VMFS_OPTIMAL_SPACE || 1890 find_space == VMFS_SUPER_SPACE || 1891 alignment != 0, ("unexpected VMFS flag")); 1892 again: 1893 /* 1894 * When creating an anonymous mapping, try clustering 1895 * with an existing anonymous mapping first. 1896 * 1897 * We make up to two attempts to find address space 1898 * for a given find_space value. The first attempt may 1899 * apply randomization or may cluster with an existing 1900 * anonymous mapping. If this first attempt fails, 1901 * perform a first-fit search of the available address 1902 * space. 1903 * 1904 * If all tries failed, and find_space is 1905 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE. 1906 * Again enable clustering and randomization. 1907 */ 1908 try++; 1909 MPASS(try <= 2); 1910 1911 if (try == 2) { 1912 /* 1913 * Second try: we failed either to find a 1914 * suitable region for randomizing the 1915 * allocation, or to cluster with an existing 1916 * mapping. Retry with free run. 1917 */ 1918 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ? 1919 vm_map_min(map) : min_addr; 1920 atomic_add_long(&aslr_restarts, 1); 1921 } 1922 1923 if (try == 1 && en_aslr && !cluster) { 1924 /* 1925 * Find space for allocation, including 1926 * gap needed for later randomization. 1927 */ 1928 pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 && 1929 (find_space == VMFS_SUPER_SPACE || find_space == 1930 VMFS_OPTIMAL_SPACE) ? 1 : 0; 1931 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR && 1932 (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ? 1933 aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx]; 1934 *addr = vm_map_findspace(map, curr_min_addr, 1935 length + gap * pagesizes[pidx]); 1936 if (*addr + length + gap * pagesizes[pidx] > 1937 vm_map_max(map)) 1938 goto again; 1939 /* And randomize the start address. */ 1940 *addr += (arc4random() % gap) * pagesizes[pidx]; 1941 if (max_addr != 0 && *addr + length > max_addr) 1942 goto again; 1943 } else { 1944 *addr = vm_map_findspace(map, curr_min_addr, length); 1945 if (*addr + length > vm_map_max(map) || 1946 (max_addr != 0 && *addr + length > max_addr)) { 1947 if (cluster) { 1948 cluster = false; 1949 MPASS(try == 1); 1950 goto again; 1951 } 1952 rv = KERN_NO_SPACE; 1953 goto done; 1954 } 1955 } 1956 1957 if (find_space != VMFS_ANY_SPACE && 1958 (rv = vm_map_alignspace(map, object, offset, addr, length, 1959 max_addr, alignment)) != KERN_SUCCESS) { 1960 if (find_space == VMFS_OPTIMAL_SPACE) { 1961 find_space = VMFS_ANY_SPACE; 1962 curr_min_addr = min_addr; 1963 cluster = update_anon; 1964 try = 0; 1965 goto again; 1966 } 1967 goto done; 1968 } 1969 } else if ((cow & MAP_REMAP) != 0) { 1970 if (*addr < vm_map_min(map) || 1971 *addr + length > vm_map_max(map) || 1972 *addr + length <= length) { 1973 rv = KERN_INVALID_ADDRESS; 1974 goto done; 1975 } 1976 vm_map_delete(map, *addr, *addr + length); 1977 } 1978 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { 1979 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, 1980 max, cow); 1981 } else { 1982 rv = vm_map_insert(map, object, offset, *addr, *addr + length, 1983 prot, max, cow); 1984 } 1985 if (rv == KERN_SUCCESS && update_anon) 1986 map->anon_loc = *addr + length; 1987 done: 1988 vm_map_unlock(map); 1989 return (rv); 1990 } 1991 1992 /* 1993 * vm_map_find_min() is a variant of vm_map_find() that takes an 1994 * additional parameter (min_addr) and treats the given address 1995 * (*addr) differently. Specifically, it treats *addr as a hint 1996 * and not as the minimum address where the mapping is created. 1997 * 1998 * This function works in two phases. First, it tries to 1999 * allocate above the hint. If that fails and the hint is 2000 * greater than min_addr, it performs a second pass, replacing 2001 * the hint with min_addr as the minimum address for the 2002 * allocation. 2003 */ 2004 int 2005 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 2006 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, 2007 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, 2008 int cow) 2009 { 2010 vm_offset_t hint; 2011 int rv; 2012 2013 hint = *addr; 2014 for (;;) { 2015 rv = vm_map_find(map, object, offset, addr, length, max_addr, 2016 find_space, prot, max, cow); 2017 if (rv == KERN_SUCCESS || min_addr >= hint) 2018 return (rv); 2019 *addr = hint = min_addr; 2020 } 2021 } 2022 2023 /* 2024 * A map entry with any of the following flags set must not be merged with 2025 * another entry. 2026 */ 2027 #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \ 2028 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC) 2029 2030 static bool 2031 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) 2032 { 2033 2034 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 || 2035 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, 2036 ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable", 2037 prev, entry)); 2038 return (prev->end == entry->start && 2039 prev->object.vm_object == entry->object.vm_object && 2040 (prev->object.vm_object == NULL || 2041 prev->offset + (prev->end - prev->start) == entry->offset) && 2042 prev->eflags == entry->eflags && 2043 prev->protection == entry->protection && 2044 prev->max_protection == entry->max_protection && 2045 prev->inheritance == entry->inheritance && 2046 prev->wired_count == entry->wired_count && 2047 prev->cred == entry->cred); 2048 } 2049 2050 static void 2051 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) 2052 { 2053 2054 /* 2055 * If the backing object is a vnode object, vm_object_deallocate() 2056 * calls vrele(). However, vrele() does not lock the vnode because 2057 * the vnode has additional references. Thus, the map lock can be 2058 * kept without causing a lock-order reversal with the vnode lock. 2059 * 2060 * Since we count the number of virtual page mappings in 2061 * object->un_pager.vnp.writemappings, the writemappings value 2062 * should not be adjusted when the entry is disposed of. 2063 */ 2064 if (entry->object.vm_object != NULL) 2065 vm_object_deallocate(entry->object.vm_object); 2066 if (entry->cred != NULL) 2067 crfree(entry->cred); 2068 vm_map_entry_dispose(map, entry); 2069 } 2070 2071 /* 2072 * vm_map_try_merge_entries: 2073 * 2074 * Compare the given map entry to its predecessor, and merge its precessor 2075 * into it if possible. The entry remains valid, and may be extended. 2076 * The predecessor may be deleted. 2077 * 2078 * The map must be locked. 2079 */ 2080 void 2081 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev, vm_map_entry_t entry) 2082 { 2083 2084 VM_MAP_ASSERT_LOCKED(map); 2085 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && 2086 vm_map_mergeable_neighbors(prev, entry)) { 2087 vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT); 2088 vm_map_merged_neighbor_dispose(map, prev); 2089 } 2090 } 2091 2092 /* 2093 * vm_map_entry_back: 2094 * 2095 * Allocate an object to back a map entry. 2096 */ 2097 static inline void 2098 vm_map_entry_back(vm_map_entry_t entry) 2099 { 2100 vm_object_t object; 2101 2102 KASSERT(entry->object.vm_object == NULL, 2103 ("map entry %p has backing object", entry)); 2104 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2105 ("map entry %p is a submap", entry)); 2106 object = vm_object_allocate_anon(atop(entry->end - entry->start)); 2107 entry->object.vm_object = object; 2108 entry->offset = 0; 2109 if (entry->cred != NULL) { 2110 object->cred = entry->cred; 2111 object->charge = entry->end - entry->start; 2112 entry->cred = NULL; 2113 } 2114 } 2115 2116 /* 2117 * vm_map_entry_charge_object 2118 * 2119 * If there is no object backing this entry, create one. Otherwise, if 2120 * the entry has cred, give it to the backing object. 2121 */ 2122 static inline void 2123 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry) 2124 { 2125 2126 VM_MAP_ASSERT_LOCKED(map); 2127 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, 2128 ("map entry %p is a submap", entry)); 2129 if (entry->object.vm_object == NULL && !map->system_map && 2130 (entry->eflags & MAP_ENTRY_GUARD) == 0) 2131 vm_map_entry_back(entry); 2132 else if (entry->object.vm_object != NULL && 2133 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && 2134 entry->cred != NULL) { 2135 VM_OBJECT_WLOCK(entry->object.vm_object); 2136 KASSERT(entry->object.vm_object->cred == NULL, 2137 ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); 2138 entry->object.vm_object->cred = entry->cred; 2139 entry->object.vm_object->charge = entry->end - entry->start; 2140 VM_OBJECT_WUNLOCK(entry->object.vm_object); 2141 entry->cred = NULL; 2142 } 2143 } 2144 2145 /* 2146 * vm_map_clip_start: [ internal use only ] 2147 * 2148 * Asserts that the given entry begins at or after 2149 * the specified address; if necessary, 2150 * it splits the entry into two. 2151 */ 2152 #define vm_map_clip_start(map, entry, startaddr) \ 2153 { \ 2154 if (startaddr > entry->start) \ 2155 _vm_map_clip_start(map, entry, startaddr); \ 2156 } 2157 2158 /* 2159 * This routine is called only when it is known that 2160 * the entry must be split. 2161 */ 2162 static void 2163 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) 2164 { 2165 vm_map_entry_t new_entry; 2166 2167 VM_MAP_ASSERT_LOCKED(map); 2168 KASSERT(entry->end > start && entry->start < start, 2169 ("_vm_map_clip_start: invalid clip of entry %p", entry)); 2170 2171 /* 2172 * Create a backing object now, if none exists, so that more individual 2173 * objects won't be created after the map entry is split. 2174 */ 2175 vm_map_entry_charge_object(map, entry); 2176 2177 /* Clone the entry. */ 2178 new_entry = vm_map_entry_create(map); 2179 *new_entry = *entry; 2180 2181 /* 2182 * Split off the front portion. Insert the new entry BEFORE this one, 2183 * so that this entry has the specified starting address. 2184 */ 2185 new_entry->end = start; 2186 entry->offset += (start - entry->start); 2187 entry->start = start; 2188 if (new_entry->cred != NULL) 2189 crhold(entry->cred); 2190 2191 vm_map_entry_link(map, new_entry); 2192 2193 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2194 vm_object_reference(new_entry->object.vm_object); 2195 vm_map_entry_set_vnode_text(new_entry, true); 2196 /* 2197 * The object->un_pager.vnp.writemappings for the 2198 * object of MAP_ENTRY_WRITECNT type entry shall be 2199 * kept as is here. The virtual pages are 2200 * re-distributed among the clipped entries, so the sum is 2201 * left the same. 2202 */ 2203 } 2204 } 2205 2206 /* 2207 * vm_map_clip_end: [ internal use only ] 2208 * 2209 * Asserts that the given entry ends at or before 2210 * the specified address; if necessary, 2211 * it splits the entry into two. 2212 */ 2213 #define vm_map_clip_end(map, entry, endaddr) \ 2214 { \ 2215 if ((endaddr) < (entry->end)) \ 2216 _vm_map_clip_end((map), (entry), (endaddr)); \ 2217 } 2218 2219 /* 2220 * This routine is called only when it is known that 2221 * the entry must be split. 2222 */ 2223 static void 2224 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) 2225 { 2226 vm_map_entry_t new_entry; 2227 2228 VM_MAP_ASSERT_LOCKED(map); 2229 KASSERT(entry->start < end && entry->end > end, 2230 ("_vm_map_clip_end: invalid clip of entry %p", entry)); 2231 2232 /* 2233 * Create a backing object now, if none exists, so that more individual 2234 * objects won't be created after the map entry is split. 2235 */ 2236 vm_map_entry_charge_object(map, entry); 2237 2238 /* Clone the entry. */ 2239 new_entry = vm_map_entry_create(map); 2240 *new_entry = *entry; 2241 2242 /* 2243 * Split off the back portion. Insert the new entry AFTER this one, 2244 * so that this entry has the specified ending address. 2245 */ 2246 new_entry->start = entry->end = end; 2247 new_entry->offset += (end - entry->start); 2248 if (new_entry->cred != NULL) 2249 crhold(entry->cred); 2250 2251 vm_map_entry_link(map, new_entry); 2252 2253 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2254 vm_object_reference(new_entry->object.vm_object); 2255 vm_map_entry_set_vnode_text(new_entry, true); 2256 } 2257 } 2258 2259 /* 2260 * vm_map_submap: [ kernel use only ] 2261 * 2262 * Mark the given range as handled by a subordinate map. 2263 * 2264 * This range must have been created with vm_map_find, 2265 * and no other operations may have been performed on this 2266 * range prior to calling vm_map_submap. 2267 * 2268 * Only a limited number of operations can be performed 2269 * within this rage after calling vm_map_submap: 2270 * vm_fault 2271 * [Don't try vm_map_copy!] 2272 * 2273 * To remove a submapping, one must first remove the 2274 * range from the superior map, and then destroy the 2275 * submap (if desired). [Better yet, don't try it.] 2276 */ 2277 int 2278 vm_map_submap( 2279 vm_map_t map, 2280 vm_offset_t start, 2281 vm_offset_t end, 2282 vm_map_t submap) 2283 { 2284 vm_map_entry_t entry; 2285 int result; 2286 2287 result = KERN_INVALID_ARGUMENT; 2288 2289 vm_map_lock(submap); 2290 submap->flags |= MAP_IS_SUB_MAP; 2291 vm_map_unlock(submap); 2292 2293 vm_map_lock(map); 2294 2295 VM_MAP_RANGE_CHECK(map, start, end); 2296 2297 if (vm_map_lookup_entry(map, start, &entry)) { 2298 vm_map_clip_start(map, entry, start); 2299 } else 2300 entry = vm_map_entry_succ(entry); 2301 2302 vm_map_clip_end(map, entry, end); 2303 2304 if ((entry->start == start) && (entry->end == end) && 2305 ((entry->eflags & MAP_ENTRY_COW) == 0) && 2306 (entry->object.vm_object == NULL)) { 2307 entry->object.sub_map = submap; 2308 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 2309 result = KERN_SUCCESS; 2310 } 2311 vm_map_unlock(map); 2312 2313 if (result != KERN_SUCCESS) { 2314 vm_map_lock(submap); 2315 submap->flags &= ~MAP_IS_SUB_MAP; 2316 vm_map_unlock(submap); 2317 } 2318 return (result); 2319 } 2320 2321 /* 2322 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified 2323 */ 2324 #define MAX_INIT_PT 96 2325 2326 /* 2327 * vm_map_pmap_enter: 2328 * 2329 * Preload the specified map's pmap with mappings to the specified 2330 * object's memory-resident pages. No further physical pages are 2331 * allocated, and no further virtual pages are retrieved from secondary 2332 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a 2333 * limited number of page mappings are created at the low-end of the 2334 * specified address range. (For this purpose, a superpage mapping 2335 * counts as one page mapping.) Otherwise, all resident pages within 2336 * the specified address range are mapped. 2337 */ 2338 static void 2339 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, 2340 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) 2341 { 2342 vm_offset_t start; 2343 vm_page_t p, p_start; 2344 vm_pindex_t mask, psize, threshold, tmpidx; 2345 2346 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) 2347 return; 2348 VM_OBJECT_RLOCK(object); 2349 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2350 VM_OBJECT_RUNLOCK(object); 2351 VM_OBJECT_WLOCK(object); 2352 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { 2353 pmap_object_init_pt(map->pmap, addr, object, pindex, 2354 size); 2355 VM_OBJECT_WUNLOCK(object); 2356 return; 2357 } 2358 VM_OBJECT_LOCK_DOWNGRADE(object); 2359 } 2360 2361 psize = atop(size); 2362 if (psize + pindex > object->size) { 2363 if (object->size < pindex) { 2364 VM_OBJECT_RUNLOCK(object); 2365 return; 2366 } 2367 psize = object->size - pindex; 2368 } 2369 2370 start = 0; 2371 p_start = NULL; 2372 threshold = MAX_INIT_PT; 2373 2374 p = vm_page_find_least(object, pindex); 2375 /* 2376 * Assert: the variable p is either (1) the page with the 2377 * least pindex greater than or equal to the parameter pindex 2378 * or (2) NULL. 2379 */ 2380 for (; 2381 p != NULL && (tmpidx = p->pindex - pindex) < psize; 2382 p = TAILQ_NEXT(p, listq)) { 2383 /* 2384 * don't allow an madvise to blow away our really 2385 * free pages allocating pv entries. 2386 */ 2387 if (((flags & MAP_PREFAULT_MADVISE) != 0 && 2388 vm_page_count_severe()) || 2389 ((flags & MAP_PREFAULT_PARTIAL) != 0 && 2390 tmpidx >= threshold)) { 2391 psize = tmpidx; 2392 break; 2393 } 2394 if (vm_page_all_valid(p)) { 2395 if (p_start == NULL) { 2396 start = addr + ptoa(tmpidx); 2397 p_start = p; 2398 } 2399 /* Jump ahead if a superpage mapping is possible. */ 2400 if (p->psind > 0 && ((addr + ptoa(tmpidx)) & 2401 (pagesizes[p->psind] - 1)) == 0) { 2402 mask = atop(pagesizes[p->psind]) - 1; 2403 if (tmpidx + mask < psize && 2404 vm_page_ps_test(p, PS_ALL_VALID, NULL)) { 2405 p += mask; 2406 threshold += mask; 2407 } 2408 } 2409 } else if (p_start != NULL) { 2410 pmap_enter_object(map->pmap, start, addr + 2411 ptoa(tmpidx), p_start, prot); 2412 p_start = NULL; 2413 } 2414 } 2415 if (p_start != NULL) 2416 pmap_enter_object(map->pmap, start, addr + ptoa(psize), 2417 p_start, prot); 2418 VM_OBJECT_RUNLOCK(object); 2419 } 2420 2421 /* 2422 * vm_map_protect: 2423 * 2424 * Sets the protection of the specified address 2425 * region in the target map. If "set_max" is 2426 * specified, the maximum protection is to be set; 2427 * otherwise, only the current protection is affected. 2428 */ 2429 int 2430 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 2431 vm_prot_t new_prot, boolean_t set_max) 2432 { 2433 vm_map_entry_t current, entry, in_tran; 2434 vm_object_t obj; 2435 struct ucred *cred; 2436 vm_prot_t old_prot; 2437 int rv; 2438 2439 if (start == end) 2440 return (KERN_SUCCESS); 2441 2442 again: 2443 in_tran = NULL; 2444 vm_map_lock(map); 2445 2446 /* 2447 * Ensure that we are not concurrently wiring pages. vm_map_wire() may 2448 * need to fault pages into the map and will drop the map lock while 2449 * doing so, and the VM object may end up in an inconsistent state if we 2450 * update the protection on the map entry in between faults. 2451 */ 2452 vm_map_wait_busy(map); 2453 2454 VM_MAP_RANGE_CHECK(map, start, end); 2455 2456 if (!vm_map_lookup_entry(map, start, &entry)) 2457 entry = vm_map_entry_succ(entry); 2458 2459 /* 2460 * Make a first pass to check for protection violations. 2461 */ 2462 for (current = entry; current->start < end; 2463 current = vm_map_entry_succ(current)) { 2464 if ((current->eflags & MAP_ENTRY_GUARD) != 0) 2465 continue; 2466 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2467 vm_map_unlock(map); 2468 return (KERN_INVALID_ARGUMENT); 2469 } 2470 if ((new_prot & current->max_protection) != new_prot) { 2471 vm_map_unlock(map); 2472 return (KERN_PROTECTION_FAILURE); 2473 } 2474 if ((current->eflags & MAP_ENTRY_IN_TRANSITION) != 0) 2475 in_tran = current; 2476 } 2477 2478 /* 2479 * Postpone the operation until all in-transition map entries have 2480 * stabilized. An in-transition entry might already have its pages 2481 * wired and wired_count incremented, but not yet have its 2482 * MAP_ENTRY_USER_WIRED flag set. In which case, we would fail to call 2483 * vm_fault_copy_entry() in the final loop below. 2484 */ 2485 if (in_tran != NULL) { 2486 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2487 vm_map_unlock_and_wait(map, 0); 2488 goto again; 2489 } 2490 2491 /* 2492 * Before changing the protections, try to reserve swap space for any 2493 * private (i.e., copy-on-write) mappings that are transitioning from 2494 * read-only to read/write access. If a reservation fails, break out 2495 * of this loop early and let the next loop simplify the entries, since 2496 * some may now be mergeable. 2497 */ 2498 rv = KERN_SUCCESS; 2499 vm_map_clip_start(map, entry, start); 2500 for (current = entry; current->start < end; 2501 current = vm_map_entry_succ(current)) { 2502 2503 vm_map_clip_end(map, current, end); 2504 2505 if (set_max || 2506 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || 2507 ENTRY_CHARGED(current) || 2508 (current->eflags & MAP_ENTRY_GUARD) != 0) { 2509 continue; 2510 } 2511 2512 cred = curthread->td_ucred; 2513 obj = current->object.vm_object; 2514 2515 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { 2516 if (!swap_reserve(current->end - current->start)) { 2517 rv = KERN_RESOURCE_SHORTAGE; 2518 end = current->end; 2519 break; 2520 } 2521 crhold(cred); 2522 current->cred = cred; 2523 continue; 2524 } 2525 2526 VM_OBJECT_WLOCK(obj); 2527 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { 2528 VM_OBJECT_WUNLOCK(obj); 2529 continue; 2530 } 2531 2532 /* 2533 * Charge for the whole object allocation now, since 2534 * we cannot distinguish between non-charged and 2535 * charged clipped mapping of the same object later. 2536 */ 2537 KASSERT(obj->charge == 0, 2538 ("vm_map_protect: object %p overcharged (entry %p)", 2539 obj, current)); 2540 if (!swap_reserve(ptoa(obj->size))) { 2541 VM_OBJECT_WUNLOCK(obj); 2542 rv = KERN_RESOURCE_SHORTAGE; 2543 end = current->end; 2544 break; 2545 } 2546 2547 crhold(cred); 2548 obj->cred = cred; 2549 obj->charge = ptoa(obj->size); 2550 VM_OBJECT_WUNLOCK(obj); 2551 } 2552 2553 /* 2554 * If enough swap space was available, go back and fix up protections. 2555 * Otherwise, just simplify entries, since some may have been modified. 2556 * [Note that clipping is not necessary the second time.] 2557 */ 2558 for (current = entry; current->start < end; 2559 vm_map_try_merge_entries(map, vm_map_entry_pred(current), current), 2560 current = vm_map_entry_succ(current)) { 2561 if (rv != KERN_SUCCESS || 2562 (current->eflags & MAP_ENTRY_GUARD) != 0) 2563 continue; 2564 2565 old_prot = current->protection; 2566 2567 if (set_max) 2568 current->protection = 2569 (current->max_protection = new_prot) & 2570 old_prot; 2571 else 2572 current->protection = new_prot; 2573 2574 /* 2575 * For user wired map entries, the normal lazy evaluation of 2576 * write access upgrades through soft page faults is 2577 * undesirable. Instead, immediately copy any pages that are 2578 * copy-on-write and enable write access in the physical map. 2579 */ 2580 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && 2581 (current->protection & VM_PROT_WRITE) != 0 && 2582 (old_prot & VM_PROT_WRITE) == 0) 2583 vm_fault_copy_entry(map, map, current, current, NULL); 2584 2585 /* 2586 * When restricting access, update the physical map. Worry 2587 * about copy-on-write here. 2588 */ 2589 if ((old_prot & ~current->protection) != 0) { 2590 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 2591 VM_PROT_ALL) 2592 pmap_protect(map->pmap, current->start, 2593 current->end, 2594 current->protection & MASK(current)); 2595 #undef MASK 2596 } 2597 } 2598 vm_map_try_merge_entries(map, vm_map_entry_pred(current), current); 2599 vm_map_unlock(map); 2600 return (rv); 2601 } 2602 2603 /* 2604 * vm_map_madvise: 2605 * 2606 * This routine traverses a processes map handling the madvise 2607 * system call. Advisories are classified as either those effecting 2608 * the vm_map_entry structure, or those effecting the underlying 2609 * objects. 2610 */ 2611 int 2612 vm_map_madvise( 2613 vm_map_t map, 2614 vm_offset_t start, 2615 vm_offset_t end, 2616 int behav) 2617 { 2618 vm_map_entry_t current, entry; 2619 bool modify_map; 2620 2621 /* 2622 * Some madvise calls directly modify the vm_map_entry, in which case 2623 * we need to use an exclusive lock on the map and we need to perform 2624 * various clipping operations. Otherwise we only need a read-lock 2625 * on the map. 2626 */ 2627 switch(behav) { 2628 case MADV_NORMAL: 2629 case MADV_SEQUENTIAL: 2630 case MADV_RANDOM: 2631 case MADV_NOSYNC: 2632 case MADV_AUTOSYNC: 2633 case MADV_NOCORE: 2634 case MADV_CORE: 2635 if (start == end) 2636 return (0); 2637 modify_map = true; 2638 vm_map_lock(map); 2639 break; 2640 case MADV_WILLNEED: 2641 case MADV_DONTNEED: 2642 case MADV_FREE: 2643 if (start == end) 2644 return (0); 2645 modify_map = false; 2646 vm_map_lock_read(map); 2647 break; 2648 default: 2649 return (EINVAL); 2650 } 2651 2652 /* 2653 * Locate starting entry and clip if necessary. 2654 */ 2655 VM_MAP_RANGE_CHECK(map, start, end); 2656 2657 if (vm_map_lookup_entry(map, start, &entry)) { 2658 if (modify_map) 2659 vm_map_clip_start(map, entry, start); 2660 } else { 2661 entry = vm_map_entry_succ(entry); 2662 } 2663 2664 if (modify_map) { 2665 /* 2666 * madvise behaviors that are implemented in the vm_map_entry. 2667 * 2668 * We clip the vm_map_entry so that behavioral changes are 2669 * limited to the specified address range. 2670 */ 2671 for (current = entry; current->start < end; 2672 current = vm_map_entry_succ(current)) { 2673 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2674 continue; 2675 2676 vm_map_clip_end(map, current, end); 2677 2678 switch (behav) { 2679 case MADV_NORMAL: 2680 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 2681 break; 2682 case MADV_SEQUENTIAL: 2683 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 2684 break; 2685 case MADV_RANDOM: 2686 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 2687 break; 2688 case MADV_NOSYNC: 2689 current->eflags |= MAP_ENTRY_NOSYNC; 2690 break; 2691 case MADV_AUTOSYNC: 2692 current->eflags &= ~MAP_ENTRY_NOSYNC; 2693 break; 2694 case MADV_NOCORE: 2695 current->eflags |= MAP_ENTRY_NOCOREDUMP; 2696 break; 2697 case MADV_CORE: 2698 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 2699 break; 2700 default: 2701 break; 2702 } 2703 vm_map_try_merge_entries(map, 2704 vm_map_entry_pred(current), current); 2705 } 2706 vm_map_try_merge_entries(map, vm_map_entry_pred(current), 2707 current); 2708 vm_map_unlock(map); 2709 } else { 2710 vm_pindex_t pstart, pend; 2711 2712 /* 2713 * madvise behaviors that are implemented in the underlying 2714 * vm_object. 2715 * 2716 * Since we don't clip the vm_map_entry, we have to clip 2717 * the vm_object pindex and count. 2718 */ 2719 for (current = entry; current->start < end; 2720 current = vm_map_entry_succ(current)) { 2721 vm_offset_t useEnd, useStart; 2722 2723 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 2724 continue; 2725 2726 /* 2727 * MADV_FREE would otherwise rewind time to 2728 * the creation of the shadow object. Because 2729 * we hold the VM map read-locked, neither the 2730 * entry's object nor the presence of a 2731 * backing object can change. 2732 */ 2733 if (behav == MADV_FREE && 2734 current->object.vm_object != NULL && 2735 current->object.vm_object->backing_object != NULL) 2736 continue; 2737 2738 pstart = OFF_TO_IDX(current->offset); 2739 pend = pstart + atop(current->end - current->start); 2740 useStart = current->start; 2741 useEnd = current->end; 2742 2743 if (current->start < start) { 2744 pstart += atop(start - current->start); 2745 useStart = start; 2746 } 2747 if (current->end > end) { 2748 pend -= atop(current->end - end); 2749 useEnd = end; 2750 } 2751 2752 if (pstart >= pend) 2753 continue; 2754 2755 /* 2756 * Perform the pmap_advise() before clearing 2757 * PGA_REFERENCED in vm_page_advise(). Otherwise, a 2758 * concurrent pmap operation, such as pmap_remove(), 2759 * could clear a reference in the pmap and set 2760 * PGA_REFERENCED on the page before the pmap_advise() 2761 * had completed. Consequently, the page would appear 2762 * referenced based upon an old reference that 2763 * occurred before this pmap_advise() ran. 2764 */ 2765 if (behav == MADV_DONTNEED || behav == MADV_FREE) 2766 pmap_advise(map->pmap, useStart, useEnd, 2767 behav); 2768 2769 vm_object_madvise(current->object.vm_object, pstart, 2770 pend, behav); 2771 2772 /* 2773 * Pre-populate paging structures in the 2774 * WILLNEED case. For wired entries, the 2775 * paging structures are already populated. 2776 */ 2777 if (behav == MADV_WILLNEED && 2778 current->wired_count == 0) { 2779 vm_map_pmap_enter(map, 2780 useStart, 2781 current->protection, 2782 current->object.vm_object, 2783 pstart, 2784 ptoa(pend - pstart), 2785 MAP_PREFAULT_MADVISE 2786 ); 2787 } 2788 } 2789 vm_map_unlock_read(map); 2790 } 2791 return (0); 2792 } 2793 2794 2795 /* 2796 * vm_map_inherit: 2797 * 2798 * Sets the inheritance of the specified address 2799 * range in the target map. Inheritance 2800 * affects how the map will be shared with 2801 * child maps at the time of vmspace_fork. 2802 */ 2803 int 2804 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 2805 vm_inherit_t new_inheritance) 2806 { 2807 vm_map_entry_t entry; 2808 vm_map_entry_t temp_entry; 2809 2810 switch (new_inheritance) { 2811 case VM_INHERIT_NONE: 2812 case VM_INHERIT_COPY: 2813 case VM_INHERIT_SHARE: 2814 case VM_INHERIT_ZERO: 2815 break; 2816 default: 2817 return (KERN_INVALID_ARGUMENT); 2818 } 2819 if (start == end) 2820 return (KERN_SUCCESS); 2821 vm_map_lock(map); 2822 VM_MAP_RANGE_CHECK(map, start, end); 2823 if (vm_map_lookup_entry(map, start, &temp_entry)) { 2824 entry = temp_entry; 2825 vm_map_clip_start(map, entry, start); 2826 } else 2827 entry = vm_map_entry_succ(temp_entry); 2828 while (entry->start < end) { 2829 vm_map_clip_end(map, entry, end); 2830 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || 2831 new_inheritance != VM_INHERIT_ZERO) 2832 entry->inheritance = new_inheritance; 2833 vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); 2834 entry = vm_map_entry_succ(entry); 2835 } 2836 vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); 2837 vm_map_unlock(map); 2838 return (KERN_SUCCESS); 2839 } 2840 2841 /* 2842 * vm_map_entry_in_transition: 2843 * 2844 * Release the map lock, and sleep until the entry is no longer in 2845 * transition. Awake and acquire the map lock. If the map changed while 2846 * another held the lock, lookup a possibly-changed entry at or after the 2847 * 'start' position of the old entry. 2848 */ 2849 static vm_map_entry_t 2850 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start, 2851 vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry) 2852 { 2853 vm_map_entry_t entry; 2854 vm_offset_t start; 2855 u_int last_timestamp; 2856 2857 VM_MAP_ASSERT_LOCKED(map); 2858 KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 2859 ("not in-tranition map entry %p", in_entry)); 2860 /* 2861 * We have not yet clipped the entry. 2862 */ 2863 start = MAX(in_start, in_entry->start); 2864 in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2865 last_timestamp = map->timestamp; 2866 if (vm_map_unlock_and_wait(map, 0)) { 2867 /* 2868 * Allow interruption of user wiring/unwiring? 2869 */ 2870 } 2871 vm_map_lock(map); 2872 if (last_timestamp + 1 == map->timestamp) 2873 return (in_entry); 2874 2875 /* 2876 * Look again for the entry because the map was modified while it was 2877 * unlocked. Specifically, the entry may have been clipped, merged, or 2878 * deleted. 2879 */ 2880 if (!vm_map_lookup_entry(map, start, &entry)) { 2881 if (!holes_ok) { 2882 *io_end = start; 2883 return (NULL); 2884 } 2885 entry = vm_map_entry_succ(entry); 2886 } 2887 return (entry); 2888 } 2889 2890 /* 2891 * vm_map_unwire: 2892 * 2893 * Implements both kernel and user unwiring. 2894 */ 2895 int 2896 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 2897 int flags) 2898 { 2899 vm_map_entry_t entry, first_entry; 2900 int rv; 2901 bool first_iteration, holes_ok, need_wakeup, user_unwire; 2902 2903 if (start == end) 2904 return (KERN_SUCCESS); 2905 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 2906 user_unwire = (flags & VM_MAP_WIRE_USER) != 0; 2907 vm_map_lock(map); 2908 VM_MAP_RANGE_CHECK(map, start, end); 2909 if (!vm_map_lookup_entry(map, start, &first_entry)) { 2910 if (holes_ok) 2911 first_entry = vm_map_entry_succ(first_entry); 2912 else { 2913 vm_map_unlock(map); 2914 return (KERN_INVALID_ADDRESS); 2915 } 2916 } 2917 first_iteration = true; 2918 entry = first_entry; 2919 rv = KERN_SUCCESS; 2920 while (entry->start < end) { 2921 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2922 /* 2923 * We have not yet clipped the entry. 2924 */ 2925 entry = vm_map_entry_in_transition(map, start, &end, 2926 holes_ok, entry); 2927 if (entry == NULL) { 2928 if (first_iteration) { 2929 vm_map_unlock(map); 2930 return (KERN_INVALID_ADDRESS); 2931 } 2932 rv = KERN_INVALID_ADDRESS; 2933 break; 2934 } 2935 first_entry = first_iteration ? entry : NULL; 2936 continue; 2937 } 2938 first_iteration = false; 2939 vm_map_clip_start(map, entry, start); 2940 vm_map_clip_end(map, entry, end); 2941 /* 2942 * Mark the entry in case the map lock is released. (See 2943 * above.) 2944 */ 2945 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 2946 entry->wiring_thread == NULL, 2947 ("owned map entry %p", entry)); 2948 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 2949 entry->wiring_thread = curthread; 2950 /* 2951 * Check the map for holes in the specified region. 2952 * If holes_ok, skip this check. 2953 */ 2954 if (!holes_ok && 2955 (entry->end < end && 2956 vm_map_entry_succ(entry)->start > entry->end)) { 2957 end = entry->end; 2958 rv = KERN_INVALID_ADDRESS; 2959 break; 2960 } 2961 /* 2962 * If system unwiring, require that the entry is system wired. 2963 */ 2964 if (!user_unwire && 2965 vm_map_entry_system_wired_count(entry) == 0) { 2966 end = entry->end; 2967 rv = KERN_INVALID_ARGUMENT; 2968 break; 2969 } 2970 entry = vm_map_entry_succ(entry); 2971 } 2972 need_wakeup = false; 2973 if (first_entry == NULL && 2974 !vm_map_lookup_entry(map, start, &first_entry)) { 2975 KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); 2976 first_entry = vm_map_entry_succ(first_entry); 2977 } 2978 for (entry = first_entry; entry->start < end; 2979 entry = vm_map_entry_succ(entry)) { 2980 /* 2981 * If holes_ok was specified, an empty 2982 * space in the unwired region could have been mapped 2983 * while the map lock was dropped for draining 2984 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread 2985 * could be simultaneously wiring this new mapping 2986 * entry. Detect these cases and skip any entries 2987 * marked as in transition by us. 2988 */ 2989 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 2990 entry->wiring_thread != curthread) { 2991 KASSERT(holes_ok, 2992 ("vm_map_unwire: !HOLESOK and new/changed entry")); 2993 continue; 2994 } 2995 2996 if (rv == KERN_SUCCESS && (!user_unwire || 2997 (entry->eflags & MAP_ENTRY_USER_WIRED))) { 2998 if (entry->wired_count == 1) 2999 vm_map_entry_unwire(map, entry); 3000 else 3001 entry->wired_count--; 3002 if (user_unwire) 3003 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3004 } 3005 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3006 ("vm_map_unwire: in-transition flag missing %p", entry)); 3007 KASSERT(entry->wiring_thread == curthread, 3008 ("vm_map_unwire: alien wire %p", entry)); 3009 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 3010 entry->wiring_thread = NULL; 3011 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3012 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3013 need_wakeup = true; 3014 } 3015 vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); 3016 } 3017 vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); 3018 vm_map_unlock(map); 3019 if (need_wakeup) 3020 vm_map_wakeup(map); 3021 return (rv); 3022 } 3023 3024 static void 3025 vm_map_wire_user_count_sub(u_long npages) 3026 { 3027 3028 atomic_subtract_long(&vm_user_wire_count, npages); 3029 } 3030 3031 static bool 3032 vm_map_wire_user_count_add(u_long npages) 3033 { 3034 u_long wired; 3035 3036 wired = vm_user_wire_count; 3037 do { 3038 if (npages + wired > vm_page_max_user_wired) 3039 return (false); 3040 } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired, 3041 npages + wired)); 3042 3043 return (true); 3044 } 3045 3046 /* 3047 * vm_map_wire_entry_failure: 3048 * 3049 * Handle a wiring failure on the given entry. 3050 * 3051 * The map should be locked. 3052 */ 3053 static void 3054 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, 3055 vm_offset_t failed_addr) 3056 { 3057 3058 VM_MAP_ASSERT_LOCKED(map); 3059 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && 3060 entry->wired_count == 1, 3061 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); 3062 KASSERT(failed_addr < entry->end, 3063 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); 3064 3065 /* 3066 * If any pages at the start of this entry were successfully wired, 3067 * then unwire them. 3068 */ 3069 if (failed_addr > entry->start) { 3070 pmap_unwire(map->pmap, entry->start, failed_addr); 3071 vm_object_unwire(entry->object.vm_object, entry->offset, 3072 failed_addr - entry->start, PQ_ACTIVE); 3073 } 3074 3075 /* 3076 * Assign an out-of-range value to represent the failure to wire this 3077 * entry. 3078 */ 3079 entry->wired_count = -1; 3080 } 3081 3082 int 3083 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3084 { 3085 int rv; 3086 3087 vm_map_lock(map); 3088 rv = vm_map_wire_locked(map, start, end, flags); 3089 vm_map_unlock(map); 3090 return (rv); 3091 } 3092 3093 3094 /* 3095 * vm_map_wire_locked: 3096 * 3097 * Implements both kernel and user wiring. Returns with the map locked, 3098 * the map lock may be dropped. 3099 */ 3100 int 3101 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) 3102 { 3103 vm_map_entry_t entry, first_entry, tmp_entry; 3104 vm_offset_t faddr, saved_end, saved_start; 3105 u_long npages; 3106 u_int last_timestamp; 3107 int rv; 3108 bool first_iteration, holes_ok, need_wakeup, user_wire; 3109 vm_prot_t prot; 3110 3111 VM_MAP_ASSERT_LOCKED(map); 3112 3113 if (start == end) 3114 return (KERN_SUCCESS); 3115 prot = 0; 3116 if (flags & VM_MAP_WIRE_WRITE) 3117 prot |= VM_PROT_WRITE; 3118 holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0; 3119 user_wire = (flags & VM_MAP_WIRE_USER) != 0; 3120 VM_MAP_RANGE_CHECK(map, start, end); 3121 if (!vm_map_lookup_entry(map, start, &first_entry)) { 3122 if (holes_ok) 3123 first_entry = vm_map_entry_succ(first_entry); 3124 else 3125 return (KERN_INVALID_ADDRESS); 3126 } 3127 first_iteration = true; 3128 entry = first_entry; 3129 while (entry->start < end) { 3130 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 3131 /* 3132 * We have not yet clipped the entry. 3133 */ 3134 entry = vm_map_entry_in_transition(map, start, &end, 3135 holes_ok, entry); 3136 if (entry == NULL) { 3137 if (first_iteration) 3138 return (KERN_INVALID_ADDRESS); 3139 rv = KERN_INVALID_ADDRESS; 3140 goto done; 3141 } 3142 first_entry = first_iteration ? entry : NULL; 3143 continue; 3144 } 3145 first_iteration = false; 3146 vm_map_clip_start(map, entry, start); 3147 vm_map_clip_end(map, entry, end); 3148 /* 3149 * Mark the entry in case the map lock is released. (See 3150 * above.) 3151 */ 3152 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && 3153 entry->wiring_thread == NULL, 3154 ("owned map entry %p", entry)); 3155 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 3156 entry->wiring_thread = curthread; 3157 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 3158 || (entry->protection & prot) != prot) { 3159 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; 3160 if (!holes_ok) { 3161 end = entry->end; 3162 rv = KERN_INVALID_ADDRESS; 3163 goto done; 3164 } 3165 } else if (entry->wired_count == 0) { 3166 entry->wired_count++; 3167 3168 npages = atop(entry->end - entry->start); 3169 if (user_wire && !vm_map_wire_user_count_add(npages)) { 3170 vm_map_wire_entry_failure(map, entry, 3171 entry->start); 3172 end = entry->end; 3173 rv = KERN_RESOURCE_SHORTAGE; 3174 goto done; 3175 } 3176 3177 /* 3178 * Release the map lock, relying on the in-transition 3179 * mark. Mark the map busy for fork. 3180 */ 3181 saved_start = entry->start; 3182 saved_end = entry->end; 3183 last_timestamp = map->timestamp; 3184 vm_map_busy(map); 3185 vm_map_unlock(map); 3186 3187 faddr = saved_start; 3188 do { 3189 /* 3190 * Simulate a fault to get the page and enter 3191 * it into the physical map. 3192 */ 3193 if ((rv = vm_fault(map, faddr, 3194 VM_PROT_NONE, VM_FAULT_WIRE, NULL)) != 3195 KERN_SUCCESS) 3196 break; 3197 } while ((faddr += PAGE_SIZE) < saved_end); 3198 vm_map_lock(map); 3199 vm_map_unbusy(map); 3200 if (last_timestamp + 1 != map->timestamp) { 3201 /* 3202 * Look again for the entry because the map was 3203 * modified while it was unlocked. The entry 3204 * may have been clipped, but NOT merged or 3205 * deleted. 3206 */ 3207 if (!vm_map_lookup_entry(map, saved_start, 3208 &tmp_entry)) 3209 KASSERT(false, 3210 ("vm_map_wire: lookup failed")); 3211 if (entry == first_entry) 3212 first_entry = tmp_entry; 3213 else 3214 first_entry = NULL; 3215 entry = tmp_entry; 3216 while (entry->end < saved_end) { 3217 /* 3218 * In case of failure, handle entries 3219 * that were not fully wired here; 3220 * fully wired entries are handled 3221 * later. 3222 */ 3223 if (rv != KERN_SUCCESS && 3224 faddr < entry->end) 3225 vm_map_wire_entry_failure(map, 3226 entry, faddr); 3227 entry = vm_map_entry_succ(entry); 3228 } 3229 } 3230 if (rv != KERN_SUCCESS) { 3231 vm_map_wire_entry_failure(map, entry, faddr); 3232 if (user_wire) 3233 vm_map_wire_user_count_sub(npages); 3234 end = entry->end; 3235 goto done; 3236 } 3237 } else if (!user_wire || 3238 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3239 entry->wired_count++; 3240 } 3241 /* 3242 * Check the map for holes in the specified region. 3243 * If holes_ok was specified, skip this check. 3244 */ 3245 if (!holes_ok && 3246 entry->end < end && 3247 vm_map_entry_succ(entry)->start > entry->end) { 3248 end = entry->end; 3249 rv = KERN_INVALID_ADDRESS; 3250 goto done; 3251 } 3252 entry = vm_map_entry_succ(entry); 3253 } 3254 rv = KERN_SUCCESS; 3255 done: 3256 need_wakeup = false; 3257 if (first_entry == NULL && 3258 !vm_map_lookup_entry(map, start, &first_entry)) { 3259 KASSERT(holes_ok, ("vm_map_wire: lookup failed")); 3260 first_entry = vm_map_entry_succ(first_entry); 3261 } 3262 for (entry = first_entry; entry->start < end; 3263 entry = vm_map_entry_succ(entry)) { 3264 /* 3265 * If holes_ok was specified, an empty 3266 * space in the unwired region could have been mapped 3267 * while the map lock was dropped for faulting in the 3268 * pages or draining MAP_ENTRY_IN_TRANSITION. 3269 * Moreover, another thread could be simultaneously 3270 * wiring this new mapping entry. Detect these cases 3271 * and skip any entries marked as in transition not by us. 3272 */ 3273 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || 3274 entry->wiring_thread != curthread) { 3275 KASSERT(holes_ok, 3276 ("vm_map_wire: !HOLESOK and new/changed entry")); 3277 continue; 3278 } 3279 3280 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { 3281 /* do nothing */ 3282 } else if (rv == KERN_SUCCESS) { 3283 if (user_wire) 3284 entry->eflags |= MAP_ENTRY_USER_WIRED; 3285 } else if (entry->wired_count == -1) { 3286 /* 3287 * Wiring failed on this entry. Thus, unwiring is 3288 * unnecessary. 3289 */ 3290 entry->wired_count = 0; 3291 } else if (!user_wire || 3292 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 3293 /* 3294 * Undo the wiring. Wiring succeeded on this entry 3295 * but failed on a later entry. 3296 */ 3297 if (entry->wired_count == 1) { 3298 vm_map_entry_unwire(map, entry); 3299 if (user_wire) 3300 vm_map_wire_user_count_sub( 3301 atop(entry->end - entry->start)); 3302 } else 3303 entry->wired_count--; 3304 } 3305 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, 3306 ("vm_map_wire: in-transition flag missing %p", entry)); 3307 KASSERT(entry->wiring_thread == curthread, 3308 ("vm_map_wire: alien wire %p", entry)); 3309 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | 3310 MAP_ENTRY_WIRE_SKIPPED); 3311 entry->wiring_thread = NULL; 3312 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 3313 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 3314 need_wakeup = true; 3315 } 3316 vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); 3317 } 3318 vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); 3319 if (need_wakeup) 3320 vm_map_wakeup(map); 3321 return (rv); 3322 } 3323 3324 /* 3325 * vm_map_sync 3326 * 3327 * Push any dirty cached pages in the address range to their pager. 3328 * If syncio is TRUE, dirty pages are written synchronously. 3329 * If invalidate is TRUE, any cached pages are freed as well. 3330 * 3331 * If the size of the region from start to end is zero, we are 3332 * supposed to flush all modified pages within the region containing 3333 * start. Unfortunately, a region can be split or coalesced with 3334 * neighboring regions, making it difficult to determine what the 3335 * original region was. Therefore, we approximate this requirement by 3336 * flushing the current region containing start. 3337 * 3338 * Returns an error if any part of the specified range is not mapped. 3339 */ 3340 int 3341 vm_map_sync( 3342 vm_map_t map, 3343 vm_offset_t start, 3344 vm_offset_t end, 3345 boolean_t syncio, 3346 boolean_t invalidate) 3347 { 3348 vm_map_entry_t current; 3349 vm_map_entry_t entry; 3350 vm_size_t size; 3351 vm_object_t object; 3352 vm_ooffset_t offset; 3353 unsigned int last_timestamp; 3354 boolean_t failed; 3355 3356 vm_map_lock_read(map); 3357 VM_MAP_RANGE_CHECK(map, start, end); 3358 if (!vm_map_lookup_entry(map, start, &entry)) { 3359 vm_map_unlock_read(map); 3360 return (KERN_INVALID_ADDRESS); 3361 } else if (start == end) { 3362 start = entry->start; 3363 end = entry->end; 3364 } 3365 /* 3366 * Make a first pass to check for user-wired memory and holes. 3367 */ 3368 for (current = entry; current->start < end; 3369 current = vm_map_entry_succ(current)) { 3370 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { 3371 vm_map_unlock_read(map); 3372 return (KERN_INVALID_ARGUMENT); 3373 } 3374 if (end > current->end && 3375 current->end != vm_map_entry_succ(current)->start) { 3376 vm_map_unlock_read(map); 3377 return (KERN_INVALID_ADDRESS); 3378 } 3379 } 3380 3381 if (invalidate) 3382 pmap_remove(map->pmap, start, end); 3383 failed = FALSE; 3384 3385 /* 3386 * Make a second pass, cleaning/uncaching pages from the indicated 3387 * objects as we go. 3388 */ 3389 for (current = entry; current->start < end;) { 3390 offset = current->offset + (start - current->start); 3391 size = (end <= current->end ? end : current->end) - start; 3392 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 3393 vm_map_t smap; 3394 vm_map_entry_t tentry; 3395 vm_size_t tsize; 3396 3397 smap = current->object.sub_map; 3398 vm_map_lock_read(smap); 3399 (void) vm_map_lookup_entry(smap, offset, &tentry); 3400 tsize = tentry->end - offset; 3401 if (tsize < size) 3402 size = tsize; 3403 object = tentry->object.vm_object; 3404 offset = tentry->offset + (offset - tentry->start); 3405 vm_map_unlock_read(smap); 3406 } else { 3407 object = current->object.vm_object; 3408 } 3409 vm_object_reference(object); 3410 last_timestamp = map->timestamp; 3411 vm_map_unlock_read(map); 3412 if (!vm_object_sync(object, offset, size, syncio, invalidate)) 3413 failed = TRUE; 3414 start += size; 3415 vm_object_deallocate(object); 3416 vm_map_lock_read(map); 3417 if (last_timestamp == map->timestamp || 3418 !vm_map_lookup_entry(map, start, ¤t)) 3419 current = vm_map_entry_succ(current); 3420 } 3421 3422 vm_map_unlock_read(map); 3423 return (failed ? KERN_FAILURE : KERN_SUCCESS); 3424 } 3425 3426 /* 3427 * vm_map_entry_unwire: [ internal use only ] 3428 * 3429 * Make the region specified by this entry pageable. 3430 * 3431 * The map in question should be locked. 3432 * [This is the reason for this routine's existence.] 3433 */ 3434 static void 3435 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 3436 { 3437 vm_size_t size; 3438 3439 VM_MAP_ASSERT_LOCKED(map); 3440 KASSERT(entry->wired_count > 0, 3441 ("vm_map_entry_unwire: entry %p isn't wired", entry)); 3442 3443 size = entry->end - entry->start; 3444 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) 3445 vm_map_wire_user_count_sub(atop(size)); 3446 pmap_unwire(map->pmap, entry->start, entry->end); 3447 vm_object_unwire(entry->object.vm_object, entry->offset, size, 3448 PQ_ACTIVE); 3449 entry->wired_count = 0; 3450 } 3451 3452 static void 3453 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) 3454 { 3455 3456 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) 3457 vm_object_deallocate(entry->object.vm_object); 3458 uma_zfree(system_map ? kmapentzone : mapentzone, entry); 3459 } 3460 3461 /* 3462 * vm_map_entry_delete: [ internal use only ] 3463 * 3464 * Deallocate the given entry from the target map. 3465 */ 3466 static void 3467 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) 3468 { 3469 vm_object_t object; 3470 vm_pindex_t offidxstart, offidxend, count, size1; 3471 vm_size_t size; 3472 3473 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); 3474 object = entry->object.vm_object; 3475 3476 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { 3477 MPASS(entry->cred == NULL); 3478 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); 3479 MPASS(object == NULL); 3480 vm_map_entry_deallocate(entry, map->system_map); 3481 return; 3482 } 3483 3484 size = entry->end - entry->start; 3485 map->size -= size; 3486 3487 if (entry->cred != NULL) { 3488 swap_release_by_cred(size, entry->cred); 3489 crfree(entry->cred); 3490 } 3491 3492 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) { 3493 entry->object.vm_object = NULL; 3494 } else if ((object->flags & OBJ_ANON) != 0 || 3495 object == kernel_object) { 3496 KASSERT(entry->cred == NULL || object->cred == NULL || 3497 (entry->eflags & MAP_ENTRY_NEEDS_COPY), 3498 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); 3499 count = atop(size); 3500 offidxstart = OFF_TO_IDX(entry->offset); 3501 offidxend = offidxstart + count; 3502 VM_OBJECT_WLOCK(object); 3503 if (object->ref_count != 1 && 3504 ((object->flags & OBJ_ONEMAPPING) != 0 || 3505 object == kernel_object)) { 3506 vm_object_collapse(object); 3507 3508 /* 3509 * The option OBJPR_NOTMAPPED can be passed here 3510 * because vm_map_delete() already performed 3511 * pmap_remove() on the only mapping to this range 3512 * of pages. 3513 */ 3514 vm_object_page_remove(object, offidxstart, offidxend, 3515 OBJPR_NOTMAPPED); 3516 if (object->type == OBJT_SWAP) 3517 swap_pager_freespace(object, offidxstart, 3518 count); 3519 if (offidxend >= object->size && 3520 offidxstart < object->size) { 3521 size1 = object->size; 3522 object->size = offidxstart; 3523 if (object->cred != NULL) { 3524 size1 -= object->size; 3525 KASSERT(object->charge >= ptoa(size1), 3526 ("object %p charge < 0", object)); 3527 swap_release_by_cred(ptoa(size1), 3528 object->cred); 3529 object->charge -= ptoa(size1); 3530 } 3531 } 3532 } 3533 VM_OBJECT_WUNLOCK(object); 3534 } 3535 if (map->system_map) 3536 vm_map_entry_deallocate(entry, TRUE); 3537 else { 3538 entry->defer_next = curthread->td_map_def_user; 3539 curthread->td_map_def_user = entry; 3540 } 3541 } 3542 3543 /* 3544 * vm_map_delete: [ internal use only ] 3545 * 3546 * Deallocates the given address range from the target 3547 * map. 3548 */ 3549 int 3550 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) 3551 { 3552 vm_map_entry_t entry; 3553 vm_map_entry_t first_entry; 3554 3555 VM_MAP_ASSERT_LOCKED(map); 3556 if (start == end) 3557 return (KERN_SUCCESS); 3558 3559 /* 3560 * Find the start of the region, and clip it 3561 */ 3562 if (!vm_map_lookup_entry(map, start, &first_entry)) 3563 entry = vm_map_entry_succ(first_entry); 3564 else { 3565 entry = first_entry; 3566 vm_map_clip_start(map, entry, start); 3567 } 3568 3569 /* 3570 * Step through all entries in this region 3571 */ 3572 while (entry->start < end) { 3573 vm_map_entry_t next; 3574 3575 /* 3576 * Wait for wiring or unwiring of an entry to complete. 3577 * Also wait for any system wirings to disappear on 3578 * user maps. 3579 */ 3580 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || 3581 (vm_map_pmap(map) != kernel_pmap && 3582 vm_map_entry_system_wired_count(entry) != 0)) { 3583 unsigned int last_timestamp; 3584 vm_offset_t saved_start; 3585 vm_map_entry_t tmp_entry; 3586 3587 saved_start = entry->start; 3588 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 3589 last_timestamp = map->timestamp; 3590 (void) vm_map_unlock_and_wait(map, 0); 3591 vm_map_lock(map); 3592 if (last_timestamp + 1 != map->timestamp) { 3593 /* 3594 * Look again for the entry because the map was 3595 * modified while it was unlocked. 3596 * Specifically, the entry may have been 3597 * clipped, merged, or deleted. 3598 */ 3599 if (!vm_map_lookup_entry(map, saved_start, 3600 &tmp_entry)) 3601 entry = vm_map_entry_succ(tmp_entry); 3602 else { 3603 entry = tmp_entry; 3604 vm_map_clip_start(map, entry, 3605 saved_start); 3606 } 3607 } 3608 continue; 3609 } 3610 vm_map_clip_end(map, entry, end); 3611 3612 next = vm_map_entry_succ(entry); 3613 3614 /* 3615 * Unwire before removing addresses from the pmap; otherwise, 3616 * unwiring will put the entries back in the pmap. 3617 */ 3618 if (entry->wired_count != 0) 3619 vm_map_entry_unwire(map, entry); 3620 3621 /* 3622 * Remove mappings for the pages, but only if the 3623 * mappings could exist. For instance, it does not 3624 * make sense to call pmap_remove() for guard entries. 3625 */ 3626 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || 3627 entry->object.vm_object != NULL) 3628 pmap_remove(map->pmap, entry->start, entry->end); 3629 3630 if (entry->end == map->anon_loc) 3631 map->anon_loc = entry->start; 3632 3633 /* 3634 * Delete the entry only after removing all pmap 3635 * entries pointing to its pages. (Otherwise, its 3636 * page frames may be reallocated, and any modify bits 3637 * will be set in the wrong object!) 3638 */ 3639 vm_map_entry_delete(map, entry); 3640 entry = next; 3641 } 3642 return (KERN_SUCCESS); 3643 } 3644 3645 /* 3646 * vm_map_remove: 3647 * 3648 * Remove the given address range from the target map. 3649 * This is the exported form of vm_map_delete. 3650 */ 3651 int 3652 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 3653 { 3654 int result; 3655 3656 vm_map_lock(map); 3657 VM_MAP_RANGE_CHECK(map, start, end); 3658 result = vm_map_delete(map, start, end); 3659 vm_map_unlock(map); 3660 return (result); 3661 } 3662 3663 /* 3664 * vm_map_check_protection: 3665 * 3666 * Assert that the target map allows the specified privilege on the 3667 * entire address region given. The entire region must be allocated. 3668 * 3669 * WARNING! This code does not and should not check whether the 3670 * contents of the region is accessible. For example a smaller file 3671 * might be mapped into a larger address space. 3672 * 3673 * NOTE! This code is also called by munmap(). 3674 * 3675 * The map must be locked. A read lock is sufficient. 3676 */ 3677 boolean_t 3678 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 3679 vm_prot_t protection) 3680 { 3681 vm_map_entry_t entry; 3682 vm_map_entry_t tmp_entry; 3683 3684 if (!vm_map_lookup_entry(map, start, &tmp_entry)) 3685 return (FALSE); 3686 entry = tmp_entry; 3687 3688 while (start < end) { 3689 /* 3690 * No holes allowed! 3691 */ 3692 if (start < entry->start) 3693 return (FALSE); 3694 /* 3695 * Check protection associated with entry. 3696 */ 3697 if ((entry->protection & protection) != protection) 3698 return (FALSE); 3699 /* go to next entry */ 3700 start = entry->end; 3701 entry = vm_map_entry_succ(entry); 3702 } 3703 return (TRUE); 3704 } 3705 3706 /* 3707 * vm_map_copy_entry: 3708 * 3709 * Copies the contents of the source entry to the destination 3710 * entry. The entries *must* be aligned properly. 3711 */ 3712 static void 3713 vm_map_copy_entry( 3714 vm_map_t src_map, 3715 vm_map_t dst_map, 3716 vm_map_entry_t src_entry, 3717 vm_map_entry_t dst_entry, 3718 vm_ooffset_t *fork_charge) 3719 { 3720 vm_object_t src_object; 3721 vm_map_entry_t fake_entry; 3722 vm_offset_t size; 3723 struct ucred *cred; 3724 int charged; 3725 3726 VM_MAP_ASSERT_LOCKED(dst_map); 3727 3728 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 3729 return; 3730 3731 if (src_entry->wired_count == 0 || 3732 (src_entry->protection & VM_PROT_WRITE) == 0) { 3733 /* 3734 * If the source entry is marked needs_copy, it is already 3735 * write-protected. 3736 */ 3737 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 && 3738 (src_entry->protection & VM_PROT_WRITE) != 0) { 3739 pmap_protect(src_map->pmap, 3740 src_entry->start, 3741 src_entry->end, 3742 src_entry->protection & ~VM_PROT_WRITE); 3743 } 3744 3745 /* 3746 * Make a copy of the object. 3747 */ 3748 size = src_entry->end - src_entry->start; 3749 if ((src_object = src_entry->object.vm_object) != NULL) { 3750 VM_OBJECT_WLOCK(src_object); 3751 charged = ENTRY_CHARGED(src_entry); 3752 if (src_object->handle == NULL && 3753 (src_object->flags & OBJ_ANON) != 0) { 3754 vm_object_collapse(src_object); 3755 if ((src_object->flags & OBJ_ONEMAPPING) != 0) { 3756 vm_object_split(src_entry); 3757 src_object = 3758 src_entry->object.vm_object; 3759 } 3760 } 3761 vm_object_reference_locked(src_object); 3762 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 3763 if (src_entry->cred != NULL && 3764 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 3765 KASSERT(src_object->cred == NULL, 3766 ("OVERCOMMIT: vm_map_copy_entry: cred %p", 3767 src_object)); 3768 src_object->cred = src_entry->cred; 3769 src_object->charge = size; 3770 } 3771 VM_OBJECT_WUNLOCK(src_object); 3772 dst_entry->object.vm_object = src_object; 3773 if (charged) { 3774 cred = curthread->td_ucred; 3775 crhold(cred); 3776 dst_entry->cred = cred; 3777 *fork_charge += size; 3778 if (!(src_entry->eflags & 3779 MAP_ENTRY_NEEDS_COPY)) { 3780 crhold(cred); 3781 src_entry->cred = cred; 3782 *fork_charge += size; 3783 } 3784 } 3785 src_entry->eflags |= MAP_ENTRY_COW | 3786 MAP_ENTRY_NEEDS_COPY; 3787 dst_entry->eflags |= MAP_ENTRY_COW | 3788 MAP_ENTRY_NEEDS_COPY; 3789 dst_entry->offset = src_entry->offset; 3790 if (src_entry->eflags & MAP_ENTRY_WRITECNT) { 3791 /* 3792 * MAP_ENTRY_WRITECNT cannot 3793 * indicate write reference from 3794 * src_entry, since the entry is 3795 * marked as needs copy. Allocate a 3796 * fake entry that is used to 3797 * decrement object->un_pager writecount 3798 * at the appropriate time. Attach 3799 * fake_entry to the deferred list. 3800 */ 3801 fake_entry = vm_map_entry_create(dst_map); 3802 fake_entry->eflags = MAP_ENTRY_WRITECNT; 3803 src_entry->eflags &= ~MAP_ENTRY_WRITECNT; 3804 vm_object_reference(src_object); 3805 fake_entry->object.vm_object = src_object; 3806 fake_entry->start = src_entry->start; 3807 fake_entry->end = src_entry->end; 3808 fake_entry->defer_next = 3809 curthread->td_map_def_user; 3810 curthread->td_map_def_user = fake_entry; 3811 } 3812 3813 pmap_copy(dst_map->pmap, src_map->pmap, 3814 dst_entry->start, dst_entry->end - dst_entry->start, 3815 src_entry->start); 3816 } else { 3817 dst_entry->object.vm_object = NULL; 3818 dst_entry->offset = 0; 3819 if (src_entry->cred != NULL) { 3820 dst_entry->cred = curthread->td_ucred; 3821 crhold(dst_entry->cred); 3822 *fork_charge += size; 3823 } 3824 } 3825 } else { 3826 /* 3827 * We don't want to make writeable wired pages copy-on-write. 3828 * Immediately copy these pages into the new map by simulating 3829 * page faults. The new pages are pageable. 3830 */ 3831 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, 3832 fork_charge); 3833 } 3834 } 3835 3836 /* 3837 * vmspace_map_entry_forked: 3838 * Update the newly-forked vmspace each time a map entry is inherited 3839 * or copied. The values for vm_dsize and vm_tsize are approximate 3840 * (and mostly-obsolete ideas in the face of mmap(2) et al.) 3841 */ 3842 static void 3843 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, 3844 vm_map_entry_t entry) 3845 { 3846 vm_size_t entrysize; 3847 vm_offset_t newend; 3848 3849 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) 3850 return; 3851 entrysize = entry->end - entry->start; 3852 vm2->vm_map.size += entrysize; 3853 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { 3854 vm2->vm_ssize += btoc(entrysize); 3855 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && 3856 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { 3857 newend = MIN(entry->end, 3858 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); 3859 vm2->vm_dsize += btoc(newend - entry->start); 3860 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && 3861 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { 3862 newend = MIN(entry->end, 3863 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); 3864 vm2->vm_tsize += btoc(newend - entry->start); 3865 } 3866 } 3867 3868 /* 3869 * vmspace_fork: 3870 * Create a new process vmspace structure and vm_map 3871 * based on those of an existing process. The new map 3872 * is based on the old map, according to the inheritance 3873 * values on the regions in that map. 3874 * 3875 * XXX It might be worth coalescing the entries added to the new vmspace. 3876 * 3877 * The source map must not be locked. 3878 */ 3879 struct vmspace * 3880 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) 3881 { 3882 struct vmspace *vm2; 3883 vm_map_t new_map, old_map; 3884 vm_map_entry_t new_entry, old_entry; 3885 vm_object_t object; 3886 int error, locked; 3887 vm_inherit_t inh; 3888 3889 old_map = &vm1->vm_map; 3890 /* Copy immutable fields of vm1 to vm2. */ 3891 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), 3892 pmap_pinit); 3893 if (vm2 == NULL) 3894 return (NULL); 3895 3896 vm2->vm_taddr = vm1->vm_taddr; 3897 vm2->vm_daddr = vm1->vm_daddr; 3898 vm2->vm_maxsaddr = vm1->vm_maxsaddr; 3899 vm_map_lock(old_map); 3900 if (old_map->busy) 3901 vm_map_wait_busy(old_map); 3902 new_map = &vm2->vm_map; 3903 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ 3904 KASSERT(locked, ("vmspace_fork: lock failed")); 3905 3906 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap); 3907 if (error != 0) { 3908 sx_xunlock(&old_map->lock); 3909 sx_xunlock(&new_map->lock); 3910 vm_map_process_deferred(); 3911 vmspace_free(vm2); 3912 return (NULL); 3913 } 3914 3915 new_map->anon_loc = old_map->anon_loc; 3916 3917 old_entry = old_map->header.next; 3918 3919 while (old_entry != &old_map->header) { 3920 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 3921 panic("vm_map_fork: encountered a submap"); 3922 3923 inh = old_entry->inheritance; 3924 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 && 3925 inh != VM_INHERIT_NONE) 3926 inh = VM_INHERIT_COPY; 3927 3928 switch (inh) { 3929 case VM_INHERIT_NONE: 3930 break; 3931 3932 case VM_INHERIT_SHARE: 3933 /* 3934 * Clone the entry, creating the shared object if necessary. 3935 */ 3936 object = old_entry->object.vm_object; 3937 if (object == NULL) { 3938 vm_map_entry_back(old_entry); 3939 object = old_entry->object.vm_object; 3940 } 3941 3942 /* 3943 * Add the reference before calling vm_object_shadow 3944 * to insure that a shadow object is created. 3945 */ 3946 vm_object_reference(object); 3947 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3948 vm_object_shadow(&old_entry->object.vm_object, 3949 &old_entry->offset, 3950 old_entry->end - old_entry->start); 3951 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3952 /* Transfer the second reference too. */ 3953 vm_object_reference( 3954 old_entry->object.vm_object); 3955 3956 /* 3957 * As in vm_map_merged_neighbor_dispose(), 3958 * the vnode lock will not be acquired in 3959 * this call to vm_object_deallocate(). 3960 */ 3961 vm_object_deallocate(object); 3962 object = old_entry->object.vm_object; 3963 } 3964 VM_OBJECT_WLOCK(object); 3965 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3966 if (old_entry->cred != NULL) { 3967 KASSERT(object->cred == NULL, ("vmspace_fork both cred")); 3968 object->cred = old_entry->cred; 3969 object->charge = old_entry->end - old_entry->start; 3970 old_entry->cred = NULL; 3971 } 3972 3973 /* 3974 * Assert the correct state of the vnode 3975 * v_writecount while the object is locked, to 3976 * not relock it later for the assertion 3977 * correctness. 3978 */ 3979 if (old_entry->eflags & MAP_ENTRY_WRITECNT && 3980 object->type == OBJT_VNODE) { 3981 KASSERT(((struct vnode *)object->handle)-> 3982 v_writecount > 0, 3983 ("vmspace_fork: v_writecount %p", object)); 3984 KASSERT(object->un_pager.vnp.writemappings > 0, 3985 ("vmspace_fork: vnp.writecount %p", 3986 object)); 3987 } 3988 VM_OBJECT_WUNLOCK(object); 3989 3990 /* 3991 * Clone the entry, referencing the shared object. 3992 */ 3993 new_entry = vm_map_entry_create(new_map); 3994 *new_entry = *old_entry; 3995 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 3996 MAP_ENTRY_IN_TRANSITION); 3997 new_entry->wiring_thread = NULL; 3998 new_entry->wired_count = 0; 3999 if (new_entry->eflags & MAP_ENTRY_WRITECNT) { 4000 vm_pager_update_writecount(object, 4001 new_entry->start, new_entry->end); 4002 } 4003 vm_map_entry_set_vnode_text(new_entry, true); 4004 4005 /* 4006 * Insert the entry into the new map -- we know we're 4007 * inserting at the end of the new map. 4008 */ 4009 vm_map_entry_link(new_map, new_entry); 4010 vmspace_map_entry_forked(vm1, vm2, new_entry); 4011 4012 /* 4013 * Update the physical map 4014 */ 4015 pmap_copy(new_map->pmap, old_map->pmap, 4016 new_entry->start, 4017 (old_entry->end - old_entry->start), 4018 old_entry->start); 4019 break; 4020 4021 case VM_INHERIT_COPY: 4022 /* 4023 * Clone the entry and link into the map. 4024 */ 4025 new_entry = vm_map_entry_create(new_map); 4026 *new_entry = *old_entry; 4027 /* 4028 * Copied entry is COW over the old object. 4029 */ 4030 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | 4031 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT); 4032 new_entry->wiring_thread = NULL; 4033 new_entry->wired_count = 0; 4034 new_entry->object.vm_object = NULL; 4035 new_entry->cred = NULL; 4036 vm_map_entry_link(new_map, new_entry); 4037 vmspace_map_entry_forked(vm1, vm2, new_entry); 4038 vm_map_copy_entry(old_map, new_map, old_entry, 4039 new_entry, fork_charge); 4040 vm_map_entry_set_vnode_text(new_entry, true); 4041 break; 4042 4043 case VM_INHERIT_ZERO: 4044 /* 4045 * Create a new anonymous mapping entry modelled from 4046 * the old one. 4047 */ 4048 new_entry = vm_map_entry_create(new_map); 4049 memset(new_entry, 0, sizeof(*new_entry)); 4050 4051 new_entry->start = old_entry->start; 4052 new_entry->end = old_entry->end; 4053 new_entry->eflags = old_entry->eflags & 4054 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | 4055 MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC); 4056 new_entry->protection = old_entry->protection; 4057 new_entry->max_protection = old_entry->max_protection; 4058 new_entry->inheritance = VM_INHERIT_ZERO; 4059 4060 vm_map_entry_link(new_map, new_entry); 4061 vmspace_map_entry_forked(vm1, vm2, new_entry); 4062 4063 new_entry->cred = curthread->td_ucred; 4064 crhold(new_entry->cred); 4065 *fork_charge += (new_entry->end - new_entry->start); 4066 4067 break; 4068 } 4069 old_entry = vm_map_entry_succ(old_entry); 4070 } 4071 /* 4072 * Use inlined vm_map_unlock() to postpone handling the deferred 4073 * map entries, which cannot be done until both old_map and 4074 * new_map locks are released. 4075 */ 4076 sx_xunlock(&old_map->lock); 4077 sx_xunlock(&new_map->lock); 4078 vm_map_process_deferred(); 4079 4080 return (vm2); 4081 } 4082 4083 /* 4084 * Create a process's stack for exec_new_vmspace(). This function is never 4085 * asked to wire the newly created stack. 4086 */ 4087 int 4088 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4089 vm_prot_t prot, vm_prot_t max, int cow) 4090 { 4091 vm_size_t growsize, init_ssize; 4092 rlim_t vmemlim; 4093 int rv; 4094 4095 MPASS((map->flags & MAP_WIREFUTURE) == 0); 4096 growsize = sgrowsiz; 4097 init_ssize = (max_ssize < growsize) ? max_ssize : growsize; 4098 vm_map_lock(map); 4099 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4100 /* If we would blow our VMEM resource limit, no go */ 4101 if (map->size + init_ssize > vmemlim) { 4102 rv = KERN_NO_SPACE; 4103 goto out; 4104 } 4105 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot, 4106 max, cow); 4107 out: 4108 vm_map_unlock(map); 4109 return (rv); 4110 } 4111 4112 static int stack_guard_page = 1; 4113 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN, 4114 &stack_guard_page, 0, 4115 "Specifies the number of guard pages for a stack that grows"); 4116 4117 static int 4118 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 4119 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow) 4120 { 4121 vm_map_entry_t new_entry, prev_entry; 4122 vm_offset_t bot, gap_bot, gap_top, top; 4123 vm_size_t init_ssize, sgp; 4124 int orient, rv; 4125 4126 /* 4127 * The stack orientation is piggybacked with the cow argument. 4128 * Extract it into orient and mask the cow argument so that we 4129 * don't pass it around further. 4130 */ 4131 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP); 4132 KASSERT(orient != 0, ("No stack grow direction")); 4133 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP), 4134 ("bi-dir stack")); 4135 4136 if (addrbos < vm_map_min(map) || 4137 addrbos + max_ssize > vm_map_max(map) || 4138 addrbos + max_ssize <= addrbos) 4139 return (KERN_INVALID_ADDRESS); 4140 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || 4141 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : 4142 (vm_size_t)stack_guard_page * PAGE_SIZE; 4143 if (sgp >= max_ssize) 4144 return (KERN_INVALID_ARGUMENT); 4145 4146 init_ssize = growsize; 4147 if (max_ssize < init_ssize + sgp) 4148 init_ssize = max_ssize - sgp; 4149 4150 /* If addr is already mapped, no go */ 4151 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) 4152 return (KERN_NO_SPACE); 4153 4154 /* 4155 * If we can't accommodate max_ssize in the current mapping, no go. 4156 */ 4157 if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize) 4158 return (KERN_NO_SPACE); 4159 4160 /* 4161 * We initially map a stack of only init_ssize. We will grow as 4162 * needed later. Depending on the orientation of the stack (i.e. 4163 * the grow direction) we either map at the top of the range, the 4164 * bottom of the range or in the middle. 4165 * 4166 * Note: we would normally expect prot and max to be VM_PROT_ALL, 4167 * and cow to be 0. Possibly we should eliminate these as input 4168 * parameters, and just pass these values here in the insert call. 4169 */ 4170 if (orient == MAP_STACK_GROWS_DOWN) { 4171 bot = addrbos + max_ssize - init_ssize; 4172 top = bot + init_ssize; 4173 gap_bot = addrbos; 4174 gap_top = bot; 4175 } else /* if (orient == MAP_STACK_GROWS_UP) */ { 4176 bot = addrbos; 4177 top = bot + init_ssize; 4178 gap_bot = top; 4179 gap_top = addrbos + max_ssize; 4180 } 4181 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); 4182 if (rv != KERN_SUCCESS) 4183 return (rv); 4184 new_entry = vm_map_entry_succ(prev_entry); 4185 KASSERT(new_entry->end == top || new_entry->start == bot, 4186 ("Bad entry start/end for new stack entry")); 4187 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 || 4188 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0, 4189 ("new entry lacks MAP_ENTRY_GROWS_DOWN")); 4190 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 || 4191 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0, 4192 ("new entry lacks MAP_ENTRY_GROWS_UP")); 4193 if (gap_bot == gap_top) 4194 return (KERN_SUCCESS); 4195 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE, 4196 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ? 4197 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP)); 4198 if (rv == KERN_SUCCESS) { 4199 /* 4200 * Gap can never successfully handle a fault, so 4201 * read-ahead logic is never used for it. Re-use 4202 * next_read of the gap entry to store 4203 * stack_guard_page for vm_map_growstack(). 4204 */ 4205 if (orient == MAP_STACK_GROWS_DOWN) 4206 vm_map_entry_pred(new_entry)->next_read = sgp; 4207 else 4208 vm_map_entry_succ(new_entry)->next_read = sgp; 4209 } else { 4210 (void)vm_map_delete(map, bot, top); 4211 } 4212 return (rv); 4213 } 4214 4215 /* 4216 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we 4217 * successfully grow the stack. 4218 */ 4219 static int 4220 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry) 4221 { 4222 vm_map_entry_t stack_entry; 4223 struct proc *p; 4224 struct vmspace *vm; 4225 struct ucred *cred; 4226 vm_offset_t gap_end, gap_start, grow_start; 4227 vm_size_t grow_amount, guard, max_grow; 4228 rlim_t lmemlim, stacklim, vmemlim; 4229 int rv, rv1; 4230 bool gap_deleted, grow_down, is_procstack; 4231 #ifdef notyet 4232 uint64_t limit; 4233 #endif 4234 #ifdef RACCT 4235 int error; 4236 #endif 4237 4238 p = curproc; 4239 vm = p->p_vmspace; 4240 4241 /* 4242 * Disallow stack growth when the access is performed by a 4243 * debugger or AIO daemon. The reason is that the wrong 4244 * resource limits are applied. 4245 */ 4246 if (p != initproc && (map != &p->p_vmspace->vm_map || 4247 p->p_textvp == NULL)) 4248 return (KERN_FAILURE); 4249 4250 MPASS(!map->system_map); 4251 4252 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK); 4253 stacklim = lim_cur(curthread, RLIMIT_STACK); 4254 vmemlim = lim_cur(curthread, RLIMIT_VMEM); 4255 retry: 4256 /* If addr is not in a hole for a stack grow area, no need to grow. */ 4257 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry)) 4258 return (KERN_FAILURE); 4259 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0) 4260 return (KERN_SUCCESS); 4261 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) { 4262 stack_entry = vm_map_entry_succ(gap_entry); 4263 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 || 4264 stack_entry->start != gap_entry->end) 4265 return (KERN_FAILURE); 4266 grow_amount = round_page(stack_entry->start - addr); 4267 grow_down = true; 4268 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) { 4269 stack_entry = vm_map_entry_pred(gap_entry); 4270 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 || 4271 stack_entry->end != gap_entry->start) 4272 return (KERN_FAILURE); 4273 grow_amount = round_page(addr + 1 - stack_entry->end); 4274 grow_down = false; 4275 } else { 4276 return (KERN_FAILURE); 4277 } 4278 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 || 4279 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 : 4280 gap_entry->next_read; 4281 max_grow = gap_entry->end - gap_entry->start; 4282 if (guard > max_grow) 4283 return (KERN_NO_SPACE); 4284 max_grow -= guard; 4285 if (grow_amount > max_grow) 4286 return (KERN_NO_SPACE); 4287 4288 /* 4289 * If this is the main process stack, see if we're over the stack 4290 * limit. 4291 */ 4292 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr && 4293 addr < (vm_offset_t)p->p_sysent->sv_usrstack; 4294 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) 4295 return (KERN_NO_SPACE); 4296 4297 #ifdef RACCT 4298 if (racct_enable) { 4299 PROC_LOCK(p); 4300 if (is_procstack && racct_set(p, RACCT_STACK, 4301 ctob(vm->vm_ssize) + grow_amount)) { 4302 PROC_UNLOCK(p); 4303 return (KERN_NO_SPACE); 4304 } 4305 PROC_UNLOCK(p); 4306 } 4307 #endif 4308 4309 grow_amount = roundup(grow_amount, sgrowsiz); 4310 if (grow_amount > max_grow) 4311 grow_amount = max_grow; 4312 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { 4313 grow_amount = trunc_page((vm_size_t)stacklim) - 4314 ctob(vm->vm_ssize); 4315 } 4316 4317 #ifdef notyet 4318 PROC_LOCK(p); 4319 limit = racct_get_available(p, RACCT_STACK); 4320 PROC_UNLOCK(p); 4321 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) 4322 grow_amount = limit - ctob(vm->vm_ssize); 4323 #endif 4324 4325 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) { 4326 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) { 4327 rv = KERN_NO_SPACE; 4328 goto out; 4329 } 4330 #ifdef RACCT 4331 if (racct_enable) { 4332 PROC_LOCK(p); 4333 if (racct_set(p, RACCT_MEMLOCK, 4334 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) { 4335 PROC_UNLOCK(p); 4336 rv = KERN_NO_SPACE; 4337 goto out; 4338 } 4339 PROC_UNLOCK(p); 4340 } 4341 #endif 4342 } 4343 4344 /* If we would blow our VMEM resource limit, no go */ 4345 if (map->size + grow_amount > vmemlim) { 4346 rv = KERN_NO_SPACE; 4347 goto out; 4348 } 4349 #ifdef RACCT 4350 if (racct_enable) { 4351 PROC_LOCK(p); 4352 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { 4353 PROC_UNLOCK(p); 4354 rv = KERN_NO_SPACE; 4355 goto out; 4356 } 4357 PROC_UNLOCK(p); 4358 } 4359 #endif 4360 4361 if (vm_map_lock_upgrade(map)) { 4362 gap_entry = NULL; 4363 vm_map_lock_read(map); 4364 goto retry; 4365 } 4366 4367 if (grow_down) { 4368 grow_start = gap_entry->end - grow_amount; 4369 if (gap_entry->start + grow_amount == gap_entry->end) { 4370 gap_start = gap_entry->start; 4371 gap_end = gap_entry->end; 4372 vm_map_entry_delete(map, gap_entry); 4373 gap_deleted = true; 4374 } else { 4375 MPASS(gap_entry->start < gap_entry->end - grow_amount); 4376 vm_map_entry_resize(map, gap_entry, -grow_amount); 4377 gap_deleted = false; 4378 } 4379 rv = vm_map_insert(map, NULL, 0, grow_start, 4380 grow_start + grow_amount, 4381 stack_entry->protection, stack_entry->max_protection, 4382 MAP_STACK_GROWS_DOWN); 4383 if (rv != KERN_SUCCESS) { 4384 if (gap_deleted) { 4385 rv1 = vm_map_insert(map, NULL, 0, gap_start, 4386 gap_end, VM_PROT_NONE, VM_PROT_NONE, 4387 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN); 4388 MPASS(rv1 == KERN_SUCCESS); 4389 } else 4390 vm_map_entry_resize(map, gap_entry, 4391 grow_amount); 4392 } 4393 } else { 4394 grow_start = stack_entry->end; 4395 cred = stack_entry->cred; 4396 if (cred == NULL && stack_entry->object.vm_object != NULL) 4397 cred = stack_entry->object.vm_object->cred; 4398 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) 4399 rv = KERN_NO_SPACE; 4400 /* Grow the underlying object if applicable. */ 4401 else if (stack_entry->object.vm_object == NULL || 4402 vm_object_coalesce(stack_entry->object.vm_object, 4403 stack_entry->offset, 4404 (vm_size_t)(stack_entry->end - stack_entry->start), 4405 grow_amount, cred != NULL)) { 4406 if (gap_entry->start + grow_amount == gap_entry->end) { 4407 vm_map_entry_delete(map, gap_entry); 4408 vm_map_entry_resize(map, stack_entry, 4409 grow_amount); 4410 } else { 4411 gap_entry->start += grow_amount; 4412 stack_entry->end += grow_amount; 4413 } 4414 map->size += grow_amount; 4415 rv = KERN_SUCCESS; 4416 } else 4417 rv = KERN_FAILURE; 4418 } 4419 if (rv == KERN_SUCCESS && is_procstack) 4420 vm->vm_ssize += btoc(grow_amount); 4421 4422 /* 4423 * Heed the MAP_WIREFUTURE flag if it was set for this process. 4424 */ 4425 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { 4426 rv = vm_map_wire_locked(map, grow_start, 4427 grow_start + grow_amount, 4428 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 4429 } 4430 vm_map_lock_downgrade(map); 4431 4432 out: 4433 #ifdef RACCT 4434 if (racct_enable && rv != KERN_SUCCESS) { 4435 PROC_LOCK(p); 4436 error = racct_set(p, RACCT_VMEM, map->size); 4437 KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); 4438 if (!old_mlock) { 4439 error = racct_set(p, RACCT_MEMLOCK, 4440 ptoa(pmap_wired_count(map->pmap))); 4441 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed")); 4442 } 4443 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); 4444 KASSERT(error == 0, ("decreasing RACCT_STACK failed")); 4445 PROC_UNLOCK(p); 4446 } 4447 #endif 4448 4449 return (rv); 4450 } 4451 4452 /* 4453 * Unshare the specified VM space for exec. If other processes are 4454 * mapped to it, then create a new one. The new vmspace is null. 4455 */ 4456 int 4457 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) 4458 { 4459 struct vmspace *oldvmspace = p->p_vmspace; 4460 struct vmspace *newvmspace; 4461 4462 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0, 4463 ("vmspace_exec recursed")); 4464 newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit); 4465 if (newvmspace == NULL) 4466 return (ENOMEM); 4467 newvmspace->vm_swrss = oldvmspace->vm_swrss; 4468 /* 4469 * This code is written like this for prototype purposes. The 4470 * goal is to avoid running down the vmspace here, but let the 4471 * other process's that are still using the vmspace to finally 4472 * run it down. Even though there is little or no chance of blocking 4473 * here, it is a good idea to keep this form for future mods. 4474 */ 4475 PROC_VMSPACE_LOCK(p); 4476 p->p_vmspace = newvmspace; 4477 PROC_VMSPACE_UNLOCK(p); 4478 if (p == curthread->td_proc) 4479 pmap_activate(curthread); 4480 curthread->td_pflags |= TDP_EXECVMSPC; 4481 return (0); 4482 } 4483 4484 /* 4485 * Unshare the specified VM space for forcing COW. This 4486 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 4487 */ 4488 int 4489 vmspace_unshare(struct proc *p) 4490 { 4491 struct vmspace *oldvmspace = p->p_vmspace; 4492 struct vmspace *newvmspace; 4493 vm_ooffset_t fork_charge; 4494 4495 if (oldvmspace->vm_refcnt == 1) 4496 return (0); 4497 fork_charge = 0; 4498 newvmspace = vmspace_fork(oldvmspace, &fork_charge); 4499 if (newvmspace == NULL) 4500 return (ENOMEM); 4501 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { 4502 vmspace_free(newvmspace); 4503 return (ENOMEM); 4504 } 4505 PROC_VMSPACE_LOCK(p); 4506 p->p_vmspace = newvmspace; 4507 PROC_VMSPACE_UNLOCK(p); 4508 if (p == curthread->td_proc) 4509 pmap_activate(curthread); 4510 vmspace_free(oldvmspace); 4511 return (0); 4512 } 4513 4514 /* 4515 * vm_map_lookup: 4516 * 4517 * Finds the VM object, offset, and 4518 * protection for a given virtual address in the 4519 * specified map, assuming a page fault of the 4520 * type specified. 4521 * 4522 * Leaves the map in question locked for read; return 4523 * values are guaranteed until a vm_map_lookup_done 4524 * call is performed. Note that the map argument 4525 * is in/out; the returned map must be used in 4526 * the call to vm_map_lookup_done. 4527 * 4528 * A handle (out_entry) is returned for use in 4529 * vm_map_lookup_done, to make that fast. 4530 * 4531 * If a lookup is requested with "write protection" 4532 * specified, the map may be changed to perform virtual 4533 * copying operations, although the data referenced will 4534 * remain the same. 4535 */ 4536 int 4537 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 4538 vm_offset_t vaddr, 4539 vm_prot_t fault_typea, 4540 vm_map_entry_t *out_entry, /* OUT */ 4541 vm_object_t *object, /* OUT */ 4542 vm_pindex_t *pindex, /* OUT */ 4543 vm_prot_t *out_prot, /* OUT */ 4544 boolean_t *wired) /* OUT */ 4545 { 4546 vm_map_entry_t entry; 4547 vm_map_t map = *var_map; 4548 vm_prot_t prot; 4549 vm_prot_t fault_type = fault_typea; 4550 vm_object_t eobject; 4551 vm_size_t size; 4552 struct ucred *cred; 4553 4554 RetryLookup: 4555 4556 vm_map_lock_read(map); 4557 4558 RetryLookupLocked: 4559 /* 4560 * Lookup the faulting address. 4561 */ 4562 if (!vm_map_lookup_entry(map, vaddr, out_entry)) { 4563 vm_map_unlock_read(map); 4564 return (KERN_INVALID_ADDRESS); 4565 } 4566 4567 entry = *out_entry; 4568 4569 /* 4570 * Handle submaps. 4571 */ 4572 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4573 vm_map_t old_map = map; 4574 4575 *var_map = map = entry->object.sub_map; 4576 vm_map_unlock_read(old_map); 4577 goto RetryLookup; 4578 } 4579 4580 /* 4581 * Check whether this task is allowed to have this page. 4582 */ 4583 prot = entry->protection; 4584 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) { 4585 fault_typea &= ~VM_PROT_FAULT_LOOKUP; 4586 if (prot == VM_PROT_NONE && map != kernel_map && 4587 (entry->eflags & MAP_ENTRY_GUARD) != 0 && 4588 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | 4589 MAP_ENTRY_STACK_GAP_UP)) != 0 && 4590 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) 4591 goto RetryLookupLocked; 4592 } 4593 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4594 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { 4595 vm_map_unlock_read(map); 4596 return (KERN_PROTECTION_FAILURE); 4597 } 4598 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & 4599 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) != 4600 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY), 4601 ("entry %p flags %x", entry, entry->eflags)); 4602 if ((fault_typea & VM_PROT_COPY) != 0 && 4603 (entry->max_protection & VM_PROT_WRITE) == 0 && 4604 (entry->eflags & MAP_ENTRY_COW) == 0) { 4605 vm_map_unlock_read(map); 4606 return (KERN_PROTECTION_FAILURE); 4607 } 4608 4609 /* 4610 * If this page is not pageable, we have to get it for all possible 4611 * accesses. 4612 */ 4613 *wired = (entry->wired_count != 0); 4614 if (*wired) 4615 fault_type = entry->protection; 4616 size = entry->end - entry->start; 4617 /* 4618 * If the entry was copy-on-write, we either ... 4619 */ 4620 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4621 /* 4622 * If we want to write the page, we may as well handle that 4623 * now since we've got the map locked. 4624 * 4625 * If we don't need to write the page, we just demote the 4626 * permissions allowed. 4627 */ 4628 if ((fault_type & VM_PROT_WRITE) != 0 || 4629 (fault_typea & VM_PROT_COPY) != 0) { 4630 /* 4631 * Make a new object, and place it in the object 4632 * chain. Note that no new references have appeared 4633 * -- one just moved from the map to the new 4634 * object. 4635 */ 4636 if (vm_map_lock_upgrade(map)) 4637 goto RetryLookup; 4638 4639 if (entry->cred == NULL) { 4640 /* 4641 * The debugger owner is charged for 4642 * the memory. 4643 */ 4644 cred = curthread->td_ucred; 4645 crhold(cred); 4646 if (!swap_reserve_by_cred(size, cred)) { 4647 crfree(cred); 4648 vm_map_unlock(map); 4649 return (KERN_RESOURCE_SHORTAGE); 4650 } 4651 entry->cred = cred; 4652 } 4653 vm_object_shadow(&entry->object.vm_object, 4654 &entry->offset, size); 4655 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 4656 eobject = entry->object.vm_object; 4657 if (eobject->cred != NULL) { 4658 /* 4659 * The object was not shadowed. 4660 */ 4661 swap_release_by_cred(size, entry->cred); 4662 crfree(entry->cred); 4663 entry->cred = NULL; 4664 } else if (entry->cred != NULL) { 4665 VM_OBJECT_WLOCK(eobject); 4666 eobject->cred = entry->cred; 4667 eobject->charge = size; 4668 VM_OBJECT_WUNLOCK(eobject); 4669 entry->cred = NULL; 4670 } 4671 4672 vm_map_lock_downgrade(map); 4673 } else { 4674 /* 4675 * We're attempting to read a copy-on-write page -- 4676 * don't allow writes. 4677 */ 4678 prot &= ~VM_PROT_WRITE; 4679 } 4680 } 4681 4682 /* 4683 * Create an object if necessary. 4684 */ 4685 if (entry->object.vm_object == NULL && 4686 !map->system_map) { 4687 if (vm_map_lock_upgrade(map)) 4688 goto RetryLookup; 4689 entry->object.vm_object = vm_object_allocate_anon(atop(size)); 4690 entry->offset = 0; 4691 if (entry->cred != NULL) { 4692 VM_OBJECT_WLOCK(entry->object.vm_object); 4693 entry->object.vm_object->cred = entry->cred; 4694 entry->object.vm_object->charge = size; 4695 VM_OBJECT_WUNLOCK(entry->object.vm_object); 4696 entry->cred = NULL; 4697 } 4698 vm_map_lock_downgrade(map); 4699 } 4700 4701 /* 4702 * Return the object/offset from this entry. If the entry was 4703 * copy-on-write or empty, it has been fixed up. 4704 */ 4705 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4706 *object = entry->object.vm_object; 4707 4708 *out_prot = prot; 4709 return (KERN_SUCCESS); 4710 } 4711 4712 /* 4713 * vm_map_lookup_locked: 4714 * 4715 * Lookup the faulting address. A version of vm_map_lookup that returns 4716 * KERN_FAILURE instead of blocking on map lock or memory allocation. 4717 */ 4718 int 4719 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ 4720 vm_offset_t vaddr, 4721 vm_prot_t fault_typea, 4722 vm_map_entry_t *out_entry, /* OUT */ 4723 vm_object_t *object, /* OUT */ 4724 vm_pindex_t *pindex, /* OUT */ 4725 vm_prot_t *out_prot, /* OUT */ 4726 boolean_t *wired) /* OUT */ 4727 { 4728 vm_map_entry_t entry; 4729 vm_map_t map = *var_map; 4730 vm_prot_t prot; 4731 vm_prot_t fault_type = fault_typea; 4732 4733 /* 4734 * Lookup the faulting address. 4735 */ 4736 if (!vm_map_lookup_entry(map, vaddr, out_entry)) 4737 return (KERN_INVALID_ADDRESS); 4738 4739 entry = *out_entry; 4740 4741 /* 4742 * Fail if the entry refers to a submap. 4743 */ 4744 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) 4745 return (KERN_FAILURE); 4746 4747 /* 4748 * Check whether this task is allowed to have this page. 4749 */ 4750 prot = entry->protection; 4751 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 4752 if ((fault_type & prot) != fault_type) 4753 return (KERN_PROTECTION_FAILURE); 4754 4755 /* 4756 * If this page is not pageable, we have to get it for all possible 4757 * accesses. 4758 */ 4759 *wired = (entry->wired_count != 0); 4760 if (*wired) 4761 fault_type = entry->protection; 4762 4763 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 4764 /* 4765 * Fail if the entry was copy-on-write for a write fault. 4766 */ 4767 if (fault_type & VM_PROT_WRITE) 4768 return (KERN_FAILURE); 4769 /* 4770 * We're attempting to read a copy-on-write page -- 4771 * don't allow writes. 4772 */ 4773 prot &= ~VM_PROT_WRITE; 4774 } 4775 4776 /* 4777 * Fail if an object should be created. 4778 */ 4779 if (entry->object.vm_object == NULL && !map->system_map) 4780 return (KERN_FAILURE); 4781 4782 /* 4783 * Return the object/offset from this entry. If the entry was 4784 * copy-on-write or empty, it has been fixed up. 4785 */ 4786 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 4787 *object = entry->object.vm_object; 4788 4789 *out_prot = prot; 4790 return (KERN_SUCCESS); 4791 } 4792 4793 /* 4794 * vm_map_lookup_done: 4795 * 4796 * Releases locks acquired by a vm_map_lookup 4797 * (according to the handle returned by that lookup). 4798 */ 4799 void 4800 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) 4801 { 4802 /* 4803 * Unlock the main-level map 4804 */ 4805 vm_map_unlock_read(map); 4806 } 4807 4808 vm_offset_t 4809 vm_map_max_KBI(const struct vm_map *map) 4810 { 4811 4812 return (vm_map_max(map)); 4813 } 4814 4815 vm_offset_t 4816 vm_map_min_KBI(const struct vm_map *map) 4817 { 4818 4819 return (vm_map_min(map)); 4820 } 4821 4822 pmap_t 4823 vm_map_pmap_KBI(vm_map_t map) 4824 { 4825 4826 return (map->pmap); 4827 } 4828 4829 #ifdef INVARIANTS 4830 static void 4831 _vm_map_assert_consistent(vm_map_t map, int check) 4832 { 4833 vm_map_entry_t entry, prev; 4834 vm_size_t max_left, max_right; 4835 4836 if (enable_vmmap_check != check) 4837 return; 4838 4839 prev = &map->header; 4840 VM_MAP_ENTRY_FOREACH(entry, map) { 4841 KASSERT(prev->end <= entry->start, 4842 ("map %p prev->end = %jx, start = %jx", map, 4843 (uintmax_t)prev->end, (uintmax_t)entry->start)); 4844 KASSERT(entry->start < entry->end, 4845 ("map %p start = %jx, end = %jx", map, 4846 (uintmax_t)entry->start, (uintmax_t)entry->end)); 4847 KASSERT(entry->end <= vm_map_entry_succ(entry)->start, 4848 ("map %p end = %jx, next->start = %jx", map, 4849 (uintmax_t)entry->end, 4850 (uintmax_t)vm_map_entry_succ(entry)->start)); 4851 KASSERT(entry->left == NULL || 4852 entry->left->start < entry->start, 4853 ("map %p left->start = %jx, start = %jx", map, 4854 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); 4855 KASSERT(entry->right == NULL || 4856 entry->start < entry->right->start, 4857 ("map %p start = %jx, right->start = %jx", map, 4858 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); 4859 max_left = vm_map_entry_max_free_left(entry, 4860 vm_map_entry_pred(entry)); 4861 max_right = vm_map_entry_max_free_right(entry, 4862 vm_map_entry_succ(entry)); 4863 KASSERT(entry->max_free == MAX(max_left, max_right), 4864 ("map %p max = %jx, max_left = %jx, max_right = %jx", map, 4865 (uintmax_t)entry->max_free, 4866 (uintmax_t)max_left, (uintmax_t)max_right)); 4867 prev = entry; 4868 } 4869 KASSERT(prev->end <= entry->start, 4870 ("map %p prev->end = %jx, start = %jx", map, 4871 (uintmax_t)prev->end, (uintmax_t)entry->start)); 4872 } 4873 #endif 4874 4875 #include "opt_ddb.h" 4876 #ifdef DDB 4877 #include <sys/kernel.h> 4878 4879 #include <ddb/ddb.h> 4880 4881 static void 4882 vm_map_print(vm_map_t map) 4883 { 4884 vm_map_entry_t entry, prev; 4885 4886 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 4887 (void *)map, 4888 (void *)map->pmap, map->nentries, map->timestamp); 4889 4890 db_indent += 2; 4891 prev = &map->header; 4892 VM_MAP_ENTRY_FOREACH(entry, map) { 4893 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n", 4894 (void *)entry, (void *)entry->start, (void *)entry->end, 4895 entry->eflags); 4896 { 4897 static char *inheritance_name[4] = 4898 {"share", "copy", "none", "donate_copy"}; 4899 4900 db_iprintf(" prot=%x/%x/%s", 4901 entry->protection, 4902 entry->max_protection, 4903 inheritance_name[(int)(unsigned char) 4904 entry->inheritance]); 4905 if (entry->wired_count != 0) 4906 db_printf(", wired"); 4907 } 4908 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 4909 db_printf(", share=%p, offset=0x%jx\n", 4910 (void *)entry->object.sub_map, 4911 (uintmax_t)entry->offset); 4912 if (prev == &map->header || 4913 prev->object.sub_map != 4914 entry->object.sub_map) { 4915 db_indent += 2; 4916 vm_map_print((vm_map_t)entry->object.sub_map); 4917 db_indent -= 2; 4918 } 4919 } else { 4920 if (entry->cred != NULL) 4921 db_printf(", ruid %d", entry->cred->cr_ruid); 4922 db_printf(", object=%p, offset=0x%jx", 4923 (void *)entry->object.vm_object, 4924 (uintmax_t)entry->offset); 4925 if (entry->object.vm_object && entry->object.vm_object->cred) 4926 db_printf(", obj ruid %d charge %jx", 4927 entry->object.vm_object->cred->cr_ruid, 4928 (uintmax_t)entry->object.vm_object->charge); 4929 if (entry->eflags & MAP_ENTRY_COW) 4930 db_printf(", copy (%s)", 4931 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 4932 db_printf("\n"); 4933 4934 if (prev == &map->header || 4935 prev->object.vm_object != 4936 entry->object.vm_object) { 4937 db_indent += 2; 4938 vm_object_print((db_expr_t)(intptr_t) 4939 entry->object.vm_object, 4940 0, 0, (char *)0); 4941 db_indent -= 2; 4942 } 4943 } 4944 prev = entry; 4945 } 4946 db_indent -= 2; 4947 } 4948 4949 DB_SHOW_COMMAND(map, map) 4950 { 4951 4952 if (!have_addr) { 4953 db_printf("usage: show map <addr>\n"); 4954 return; 4955 } 4956 vm_map_print((vm_map_t)addr); 4957 } 4958 4959 DB_SHOW_COMMAND(procvm, procvm) 4960 { 4961 struct proc *p; 4962 4963 if (have_addr) { 4964 p = db_lookup_proc(addr); 4965 } else { 4966 p = curproc; 4967 } 4968 4969 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 4970 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 4971 (void *)vmspace_pmap(p->p_vmspace)); 4972 4973 vm_map_print((vm_map_t)&p->p_vmspace->vm_map); 4974 } 4975 4976 #endif /* DDB */ 4977