1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 * 66 * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $ 67 * $DragonFly: src/sys/vm/vm_map.c,v 1.56 2007/04/29 18:25:41 dillon Exp $ 68 */ 69 70 /* 71 * Virtual memory mapping module. 72 */ 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/kernel.h> 77 #include <sys/proc.h> 78 #include <sys/serialize.h> 79 #include <sys/lock.h> 80 #include <sys/vmmeter.h> 81 #include <sys/mman.h> 82 #include <sys/vnode.h> 83 #include <sys/resourcevar.h> 84 #include <sys/shm.h> 85 #include <sys/tree.h> 86 #include <sys/malloc.h> 87 88 #include <vm/vm.h> 89 #include <vm/vm_param.h> 90 #include <vm/pmap.h> 91 #include <vm/vm_map.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_pager.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_extern.h> 97 #include <vm/swap_pager.h> 98 #include <vm/vm_zone.h> 99 100 #include <sys/thread2.h> 101 #include <sys/sysref2.h> 102 103 /* 104 * Virtual memory maps provide for the mapping, protection, and sharing 105 * of virtual memory objects. In addition, this module provides for an 106 * efficient virtual copy of memory from one map to another. 107 * 108 * Synchronization is required prior to most operations. 109 * 110 * Maps consist of an ordered doubly-linked list of simple entries. 111 * A hint and a RB tree is used to speed-up lookups. 112 * 113 * Callers looking to modify maps specify start/end addresses which cause 114 * the related map entry to be clipped if necessary, and then later 115 * recombined if the pieces remained compatible. 116 * 117 * Virtual copy operations are performed by copying VM object references 118 * from one map to another, and then marking both regions as copy-on-write. 119 */ 120 static void vmspace_terminate(struct vmspace *vm); 121 static void vmspace_lock(struct vmspace *vm); 122 static void vmspace_unlock(struct vmspace *vm); 123 static void vmspace_dtor(void *obj, void *private); 124 125 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore"); 126 127 struct sysref_class vmspace_sysref_class = { 128 .name = "vmspace", 129 .mtype = M_VMSPACE, 130 .proto = SYSREF_PROTO_VMSPACE, 131 .offset = offsetof(struct vmspace, vm_sysref), 132 .objsize = sizeof(struct vmspace), 133 .mag_capacity = 32, 134 .flags = SRC_MANAGEDINIT, 135 .dtor = vmspace_dtor, 136 .ops = { 137 .terminate = (sysref_terminate_func_t)vmspace_terminate, 138 .lock = (sysref_lock_func_t)vmspace_lock, 139 .unlock = (sysref_lock_func_t)vmspace_unlock 140 } 141 }; 142 143 #define VMEPERCPU 2 144 145 static struct vm_zone mapentzone_store, mapzone_store; 146 static vm_zone_t mapentzone, mapzone; 147 static struct vm_object mapentobj, mapobj; 148 149 static struct vm_map_entry map_entry_init[MAX_MAPENT]; 150 static struct vm_map_entry cpu_map_entry_init[MAXCPU][VMEPERCPU]; 151 static struct vm_map map_init[MAX_KMAP]; 152 153 static void vm_map_entry_shadow(vm_map_entry_t entry); 154 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *); 155 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *); 156 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 157 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 158 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *); 159 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t); 160 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t, 161 vm_map_entry_t); 162 static void vm_map_split (vm_map_entry_t); 163 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags); 164 165 /* 166 * Initialize the vm_map module. Must be called before any other vm_map 167 * routines. 168 * 169 * Map and entry structures are allocated from the general purpose 170 * memory pool with some exceptions: 171 * 172 * - The kernel map is allocated statically. 173 * - Initial kernel map entries are allocated out of a static pool. 174 * 175 * These restrictions are necessary since malloc() uses the 176 * maps and requires map entries. 177 * 178 * Called from the low level boot code only. 179 */ 180 void 181 vm_map_startup(void) 182 { 183 mapzone = &mapzone_store; 184 zbootinit(mapzone, "MAP", sizeof (struct vm_map), 185 map_init, MAX_KMAP); 186 mapentzone = &mapentzone_store; 187 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 188 map_entry_init, MAX_MAPENT); 189 } 190 191 /* 192 * Called prior to any vmspace allocations. 193 * 194 * Called from the low level boot code only. 195 */ 196 void 197 vm_init2(void) 198 { 199 zinitna(mapentzone, &mapentobj, NULL, 0, 0, 200 ZONE_USE_RESERVE | ZONE_SPECIAL, 1); 201 zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1); 202 pmap_init2(); 203 vm_object_init2(); 204 } 205 206 207 /* 208 * Red black tree functions 209 * 210 * The caller must hold the related map lock. 211 */ 212 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b); 213 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare); 214 215 /* a->start is address, and the only field has to be initialized */ 216 static int 217 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b) 218 { 219 if (a->start < b->start) 220 return(-1); 221 else if (a->start > b->start) 222 return(1); 223 return(0); 224 } 225 226 /* 227 * Allocate a vmspace structure, including a vm_map and pmap. 228 * Initialize numerous fields. While the initial allocation is zerod, 229 * subsequence reuse from the objcache leaves elements of the structure 230 * intact (particularly the pmap), so portions must be zerod. 231 * 232 * The structure is not considered activated until we call sysref_activate(). 233 * 234 * No requirements. 235 */ 236 struct vmspace * 237 vmspace_alloc(vm_offset_t min, vm_offset_t max) 238 { 239 struct vmspace *vm; 240 241 lwkt_gettoken(&vmspace_token); 242 vm = sysref_alloc(&vmspace_sysref_class); 243 bzero(&vm->vm_startcopy, 244 (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy); 245 vm_map_init(&vm->vm_map, min, max, NULL); 246 pmap_pinit(vmspace_pmap(vm)); /* (some fields reused) */ 247 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 248 vm->vm_shm = NULL; 249 vm->vm_exitingcnt = 0; 250 cpu_vmspace_alloc(vm); 251 sysref_activate(&vm->vm_sysref); 252 lwkt_reltoken(&vmspace_token); 253 254 return (vm); 255 } 256 257 /* 258 * dtor function - Some elements of the pmap are retained in the 259 * free-cached vmspaces to improve performance. We have to clean them up 260 * here before returning the vmspace to the memory pool. 261 * 262 * No requirements. 263 */ 264 static void 265 vmspace_dtor(void *obj, void *private) 266 { 267 struct vmspace *vm = obj; 268 269 pmap_puninit(vmspace_pmap(vm)); 270 } 271 272 /* 273 * Called in two cases: 274 * 275 * (1) When the last sysref is dropped, but exitingcnt might still be 276 * non-zero. 277 * 278 * (2) When there are no sysrefs (i.e. refcnt is negative) left and the 279 * exitingcnt becomes zero 280 * 281 * sysref will not scrap the object until we call sysref_put() once more 282 * after the last ref has been dropped. 283 * 284 * Interlocked by the sysref API. 285 */ 286 static void 287 vmspace_terminate(struct vmspace *vm) 288 { 289 int count; 290 291 /* 292 * If exitingcnt is non-zero we can't get rid of the entire vmspace 293 * yet, but we can scrap user memory. 294 */ 295 lwkt_gettoken(&vmspace_token); 296 if (vm->vm_exitingcnt) { 297 shmexit(vm); 298 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS, 299 VM_MAX_USER_ADDRESS); 300 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS, 301 VM_MAX_USER_ADDRESS); 302 lwkt_reltoken(&vmspace_token); 303 return; 304 } 305 cpu_vmspace_free(vm); 306 307 /* 308 * Make sure any SysV shm is freed, it might not have in 309 * exit1() 310 */ 311 shmexit(vm); 312 313 KKASSERT(vm->vm_upcalls == NULL); 314 315 /* 316 * Lock the map, to wait out all other references to it. 317 * Delete all of the mappings and pages they hold, then call 318 * the pmap module to reclaim anything left. 319 */ 320 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 321 vm_map_lock(&vm->vm_map); 322 vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 323 vm->vm_map.max_offset, &count); 324 vm_map_unlock(&vm->vm_map); 325 vm_map_entry_release(count); 326 327 pmap_release(vmspace_pmap(vm)); 328 sysref_put(&vm->vm_sysref); 329 lwkt_reltoken(&vmspace_token); 330 } 331 332 /* 333 * vmspaces are not currently locked. 334 */ 335 static void 336 vmspace_lock(struct vmspace *vm __unused) 337 { 338 } 339 340 static void 341 vmspace_unlock(struct vmspace *vm __unused) 342 { 343 } 344 345 /* 346 * This is called during exit indicating that the vmspace is no 347 * longer in used by an exiting process, but the process has not yet 348 * been cleaned up. 349 * 350 * No requirements. 351 */ 352 void 353 vmspace_exitbump(struct vmspace *vm) 354 { 355 lwkt_gettoken(&vmspace_token); 356 ++vm->vm_exitingcnt; 357 lwkt_reltoken(&vmspace_token); 358 } 359 360 /* 361 * This is called in the wait*() handling code. The vmspace can be terminated 362 * after the last wait is finished using it. 363 * 364 * No requirements. 365 */ 366 void 367 vmspace_exitfree(struct proc *p) 368 { 369 struct vmspace *vm; 370 371 lwkt_gettoken(&vmspace_token); 372 vm = p->p_vmspace; 373 p->p_vmspace = NULL; 374 375 if (--vm->vm_exitingcnt == 0 && sysref_isinactive(&vm->vm_sysref)) 376 vmspace_terminate(vm); 377 lwkt_reltoken(&vmspace_token); 378 } 379 380 /* 381 * Swap useage is determined by taking the proportional swap used by 382 * VM objects backing the VM map. To make up for fractional losses, 383 * if the VM object has any swap use at all the associated map entries 384 * count for at least 1 swap page. 385 * 386 * No requirements. 387 */ 388 int 389 vmspace_swap_count(struct vmspace *vmspace) 390 { 391 vm_map_t map = &vmspace->vm_map; 392 vm_map_entry_t cur; 393 vm_object_t object; 394 int count = 0; 395 int n; 396 397 lwkt_gettoken(&vmspace_token); 398 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 399 switch(cur->maptype) { 400 case VM_MAPTYPE_NORMAL: 401 case VM_MAPTYPE_VPAGETABLE: 402 if ((object = cur->object.vm_object) == NULL) 403 break; 404 if (object->swblock_count) { 405 n = (cur->end - cur->start) / PAGE_SIZE; 406 count += object->swblock_count * 407 SWAP_META_PAGES * n / object->size + 1; 408 } 409 break; 410 default: 411 break; 412 } 413 } 414 lwkt_reltoken(&vmspace_token); 415 return(count); 416 } 417 418 /* 419 * Calculate the approximate number of anonymous pages in use by 420 * this vmspace. To make up for fractional losses, we count each 421 * VM object as having at least 1 anonymous page. 422 * 423 * No requirements. 424 */ 425 int 426 vmspace_anonymous_count(struct vmspace *vmspace) 427 { 428 vm_map_t map = &vmspace->vm_map; 429 vm_map_entry_t cur; 430 vm_object_t object; 431 int count = 0; 432 433 lwkt_gettoken(&vmspace_token); 434 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 435 switch(cur->maptype) { 436 case VM_MAPTYPE_NORMAL: 437 case VM_MAPTYPE_VPAGETABLE: 438 if ((object = cur->object.vm_object) == NULL) 439 break; 440 if (object->type != OBJT_DEFAULT && 441 object->type != OBJT_SWAP) { 442 break; 443 } 444 count += object->resident_page_count; 445 break; 446 default: 447 break; 448 } 449 } 450 lwkt_reltoken(&vmspace_token); 451 return(count); 452 } 453 454 /* 455 * Creates and returns a new empty VM map with the given physical map 456 * structure, and having the given lower and upper address bounds. 457 * 458 * No requirements. 459 */ 460 vm_map_t 461 vm_map_create(vm_map_t result, pmap_t pmap, vm_offset_t min, vm_offset_t max) 462 { 463 if (result == NULL) 464 result = zalloc(mapzone); 465 vm_map_init(result, min, max, pmap); 466 return (result); 467 } 468 469 /* 470 * Initialize an existing vm_map structure such as that in the vmspace 471 * structure. The pmap is initialized elsewhere. 472 * 473 * No requirements. 474 */ 475 void 476 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap) 477 { 478 map->header.next = map->header.prev = &map->header; 479 RB_INIT(&map->rb_root); 480 map->nentries = 0; 481 map->size = 0; 482 map->system_map = 0; 483 map->infork = 0; 484 map->min_offset = min; 485 map->max_offset = max; 486 map->pmap = pmap; 487 map->first_free = &map->header; 488 map->hint = &map->header; 489 map->timestamp = 0; 490 map->flags = 0; 491 lockinit(&map->lock, "thrd_sleep", 0, 0); 492 } 493 494 /* 495 * Shadow the vm_map_entry's object. This typically needs to be done when 496 * a write fault is taken on an entry which had previously been cloned by 497 * fork(). The shared object (which might be NULL) must become private so 498 * we add a shadow layer above it. 499 * 500 * Object allocation for anonymous mappings is defered as long as possible. 501 * When creating a shadow, however, the underlying object must be instantiated 502 * so it can be shared. 503 * 504 * If the map segment is governed by a virtual page table then it is 505 * possible to address offsets beyond the mapped area. Just allocate 506 * a maximally sized object for this case. 507 * 508 * The vm_map must be exclusively locked. 509 * No other requirements. 510 */ 511 static 512 void 513 vm_map_entry_shadow(vm_map_entry_t entry) 514 { 515 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 516 vm_object_shadow(&entry->object.vm_object, &entry->offset, 517 0x7FFFFFFF); /* XXX */ 518 } else { 519 vm_object_shadow(&entry->object.vm_object, &entry->offset, 520 atop(entry->end - entry->start)); 521 } 522 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 523 } 524 525 /* 526 * Allocate an object for a vm_map_entry. 527 * 528 * Object allocation for anonymous mappings is defered as long as possible. 529 * This function is called when we can defer no longer, generally when a map 530 * entry might be split or forked or takes a page fault. 531 * 532 * If the map segment is governed by a virtual page table then it is 533 * possible to address offsets beyond the mapped area. Just allocate 534 * a maximally sized object for this case. 535 * 536 * The vm_map must be exclusively locked. 537 * No other requirements. 538 */ 539 void 540 vm_map_entry_allocate_object(vm_map_entry_t entry) 541 { 542 vm_object_t obj; 543 544 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 545 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */ 546 } else { 547 obj = vm_object_allocate(OBJT_DEFAULT, 548 atop(entry->end - entry->start)); 549 } 550 entry->object.vm_object = obj; 551 entry->offset = 0; 552 } 553 554 /* 555 * Set an initial negative count so the first attempt to reserve 556 * space preloads a bunch of vm_map_entry's for this cpu. Also 557 * pre-allocate 2 vm_map_entries which will be needed by zalloc() to 558 * map a new page for vm_map_entry structures. SMP systems are 559 * particularly sensitive. 560 * 561 * This routine is called in early boot so we cannot just call 562 * vm_map_entry_reserve(). 563 * 564 * Called from the low level boot code only (for each cpu) 565 */ 566 void 567 vm_map_entry_reserve_cpu_init(globaldata_t gd) 568 { 569 vm_map_entry_t entry; 570 int i; 571 572 gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2; 573 entry = &cpu_map_entry_init[gd->gd_cpuid][0]; 574 for (i = 0; i < VMEPERCPU; ++i, ++entry) { 575 entry->next = gd->gd_vme_base; 576 gd->gd_vme_base = entry; 577 } 578 } 579 580 /* 581 * Reserves vm_map_entry structures so code later on can manipulate 582 * map_entry structures within a locked map without blocking trying 583 * to allocate a new vm_map_entry. 584 * 585 * No requirements. 586 */ 587 int 588 vm_map_entry_reserve(int count) 589 { 590 struct globaldata *gd = mycpu; 591 vm_map_entry_t entry; 592 593 /* 594 * Make sure we have enough structures in gd_vme_base to handle 595 * the reservation request. 596 */ 597 crit_enter(); 598 while (gd->gd_vme_avail < count) { 599 entry = zalloc(mapentzone); 600 entry->next = gd->gd_vme_base; 601 gd->gd_vme_base = entry; 602 ++gd->gd_vme_avail; 603 } 604 gd->gd_vme_avail -= count; 605 crit_exit(); 606 607 return(count); 608 } 609 610 /* 611 * Releases previously reserved vm_map_entry structures that were not 612 * used. If we have too much junk in our per-cpu cache clean some of 613 * it out. 614 * 615 * No requirements. 616 */ 617 void 618 vm_map_entry_release(int count) 619 { 620 struct globaldata *gd = mycpu; 621 vm_map_entry_t entry; 622 623 crit_enter(); 624 gd->gd_vme_avail += count; 625 while (gd->gd_vme_avail > MAP_RESERVE_SLOP) { 626 entry = gd->gd_vme_base; 627 KKASSERT(entry != NULL); 628 gd->gd_vme_base = entry->next; 629 --gd->gd_vme_avail; 630 crit_exit(); 631 zfree(mapentzone, entry); 632 crit_enter(); 633 } 634 crit_exit(); 635 } 636 637 /* 638 * Reserve map entry structures for use in kernel_map itself. These 639 * entries have *ALREADY* been reserved on a per-cpu basis when the map 640 * was inited. This function is used by zalloc() to avoid a recursion 641 * when zalloc() itself needs to allocate additional kernel memory. 642 * 643 * This function works like the normal reserve but does not load the 644 * vm_map_entry cache (because that would result in an infinite 645 * recursion). Note that gd_vme_avail may go negative. This is expected. 646 * 647 * Any caller of this function must be sure to renormalize after 648 * potentially eating entries to ensure that the reserve supply 649 * remains intact. 650 * 651 * No requirements. 652 */ 653 int 654 vm_map_entry_kreserve(int count) 655 { 656 struct globaldata *gd = mycpu; 657 658 crit_enter(); 659 gd->gd_vme_avail -= count; 660 crit_exit(); 661 KASSERT(gd->gd_vme_base != NULL, 662 ("no reserved entries left, gd_vme_avail = %d\n", 663 gd->gd_vme_avail)); 664 return(count); 665 } 666 667 /* 668 * Release previously reserved map entries for kernel_map. We do not 669 * attempt to clean up like the normal release function as this would 670 * cause an unnecessary (but probably not fatal) deep procedure call. 671 * 672 * No requirements. 673 */ 674 void 675 vm_map_entry_krelease(int count) 676 { 677 struct globaldata *gd = mycpu; 678 679 crit_enter(); 680 gd->gd_vme_avail += count; 681 crit_exit(); 682 } 683 684 /* 685 * Allocates a VM map entry for insertion. No entry fields are filled in. 686 * 687 * The entries should have previously been reserved. The reservation count 688 * is tracked in (*countp). 689 * 690 * No requirements. 691 */ 692 static vm_map_entry_t 693 vm_map_entry_create(vm_map_t map, int *countp) 694 { 695 struct globaldata *gd = mycpu; 696 vm_map_entry_t entry; 697 698 KKASSERT(*countp > 0); 699 --*countp; 700 crit_enter(); 701 entry = gd->gd_vme_base; 702 KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp)); 703 gd->gd_vme_base = entry->next; 704 crit_exit(); 705 706 return(entry); 707 } 708 709 /* 710 * Dispose of a vm_map_entry that is no longer being referenced. 711 * 712 * No requirements. 713 */ 714 static void 715 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp) 716 { 717 struct globaldata *gd = mycpu; 718 719 KKASSERT(map->hint != entry); 720 KKASSERT(map->first_free != entry); 721 722 ++*countp; 723 crit_enter(); 724 entry->next = gd->gd_vme_base; 725 gd->gd_vme_base = entry; 726 crit_exit(); 727 } 728 729 730 /* 731 * Insert/remove entries from maps. 732 * 733 * The related map must be exclusively locked. 734 * No other requirements. 735 * 736 * NOTE! We currently acquire the vmspace_token only to avoid races 737 * against the pageout daemon's calls to vmspace_*_count(), which 738 * are unable to safely lock the vm_map without potentially 739 * deadlocking. 740 */ 741 static __inline void 742 vm_map_entry_link(vm_map_t map, 743 vm_map_entry_t after_where, 744 vm_map_entry_t entry) 745 { 746 ASSERT_VM_MAP_LOCKED(map); 747 748 lwkt_gettoken(&vmspace_token); 749 map->nentries++; 750 entry->prev = after_where; 751 entry->next = after_where->next; 752 entry->next->prev = entry; 753 after_where->next = entry; 754 if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry)) 755 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry); 756 lwkt_reltoken(&vmspace_token); 757 } 758 759 static __inline void 760 vm_map_entry_unlink(vm_map_t map, 761 vm_map_entry_t entry) 762 { 763 vm_map_entry_t prev; 764 vm_map_entry_t next; 765 766 ASSERT_VM_MAP_LOCKED(map); 767 768 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 769 panic("vm_map_entry_unlink: attempt to mess with " 770 "locked entry! %p", entry); 771 } 772 lwkt_gettoken(&vmspace_token); 773 prev = entry->prev; 774 next = entry->next; 775 next->prev = prev; 776 prev->next = next; 777 vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry); 778 map->nentries--; 779 lwkt_reltoken(&vmspace_token); 780 } 781 782 /* 783 * Finds the map entry containing (or immediately preceding) the specified 784 * address in the given map. The entry is returned in (*entry). 785 * 786 * The boolean result indicates whether the address is actually contained 787 * in the map. 788 * 789 * The related map must be locked. 790 * No other requirements. 791 */ 792 boolean_t 793 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry) 794 { 795 vm_map_entry_t tmp; 796 vm_map_entry_t last; 797 798 ASSERT_VM_MAP_LOCKED(map); 799 #if 0 800 /* 801 * XXX TEMPORARILY DISABLED. For some reason our attempt to revive 802 * the hint code with the red-black lookup meets with system crashes 803 * and lockups. We do not yet know why. 804 * 805 * It is possible that the problem is related to the setting 806 * of the hint during map_entry deletion, in the code specified 807 * at the GGG comment later on in this file. 808 */ 809 /* 810 * Quickly check the cached hint, there's a good chance of a match. 811 */ 812 if (map->hint != &map->header) { 813 tmp = map->hint; 814 if (address >= tmp->start && address < tmp->end) { 815 *entry = tmp; 816 return(TRUE); 817 } 818 } 819 #endif 820 821 /* 822 * Locate the record from the top of the tree. 'last' tracks the 823 * closest prior record and is returned if no match is found, which 824 * in binary tree terms means tracking the most recent right-branch 825 * taken. If there is no prior record, &map->header is returned. 826 */ 827 last = &map->header; 828 tmp = RB_ROOT(&map->rb_root); 829 830 while (tmp) { 831 if (address >= tmp->start) { 832 if (address < tmp->end) { 833 *entry = tmp; 834 map->hint = tmp; 835 return(TRUE); 836 } 837 last = tmp; 838 tmp = RB_RIGHT(tmp, rb_entry); 839 } else { 840 tmp = RB_LEFT(tmp, rb_entry); 841 } 842 } 843 *entry = last; 844 return (FALSE); 845 } 846 847 /* 848 * Inserts the given whole VM object into the target map at the specified 849 * address range. The object's size should match that of the address range. 850 * 851 * The map must be exclusively locked. 852 * The caller must have reserved sufficient vm_map_entry structures. 853 * 854 * If object is non-NULL, ref count must be bumped by caller 855 * prior to making call to account for the new entry. 856 */ 857 int 858 vm_map_insert(vm_map_t map, int *countp, 859 vm_object_t object, vm_ooffset_t offset, 860 vm_offset_t start, vm_offset_t end, 861 vm_maptype_t maptype, 862 vm_prot_t prot, vm_prot_t max, 863 int cow) 864 { 865 vm_map_entry_t new_entry; 866 vm_map_entry_t prev_entry; 867 vm_map_entry_t temp_entry; 868 vm_eflags_t protoeflags; 869 870 ASSERT_VM_MAP_LOCKED(map); 871 872 /* 873 * Check that the start and end points are not bogus. 874 */ 875 if ((start < map->min_offset) || (end > map->max_offset) || 876 (start >= end)) 877 return (KERN_INVALID_ADDRESS); 878 879 /* 880 * Find the entry prior to the proposed starting address; if it's part 881 * of an existing entry, this range is bogus. 882 */ 883 if (vm_map_lookup_entry(map, start, &temp_entry)) 884 return (KERN_NO_SPACE); 885 886 prev_entry = temp_entry; 887 888 /* 889 * Assert that the next entry doesn't overlap the end point. 890 */ 891 892 if ((prev_entry->next != &map->header) && 893 (prev_entry->next->start < end)) 894 return (KERN_NO_SPACE); 895 896 protoeflags = 0; 897 898 if (cow & MAP_COPY_ON_WRITE) 899 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 900 901 if (cow & MAP_NOFAULT) { 902 protoeflags |= MAP_ENTRY_NOFAULT; 903 904 KASSERT(object == NULL, 905 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 906 } 907 if (cow & MAP_DISABLE_SYNCER) 908 protoeflags |= MAP_ENTRY_NOSYNC; 909 if (cow & MAP_DISABLE_COREDUMP) 910 protoeflags |= MAP_ENTRY_NOCOREDUMP; 911 if (cow & MAP_IS_STACK) 912 protoeflags |= MAP_ENTRY_STACK; 913 if (cow & MAP_IS_KSTACK) 914 protoeflags |= MAP_ENTRY_KSTACK; 915 916 lwkt_gettoken(&vm_token); 917 lwkt_gettoken(&vmobj_token); 918 919 if (object) { 920 /* 921 * When object is non-NULL, it could be shared with another 922 * process. We have to set or clear OBJ_ONEMAPPING 923 * appropriately. 924 */ 925 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 926 vm_object_clear_flag(object, OBJ_ONEMAPPING); 927 } 928 } 929 else if ((prev_entry != &map->header) && 930 (prev_entry->eflags == protoeflags) && 931 (prev_entry->end == start) && 932 (prev_entry->wired_count == 0) && 933 prev_entry->maptype == maptype && 934 ((prev_entry->object.vm_object == NULL) || 935 vm_object_coalesce(prev_entry->object.vm_object, 936 OFF_TO_IDX(prev_entry->offset), 937 (vm_size_t)(prev_entry->end - prev_entry->start), 938 (vm_size_t)(end - prev_entry->end)))) { 939 /* 940 * We were able to extend the object. Determine if we 941 * can extend the previous map entry to include the 942 * new range as well. 943 */ 944 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 945 (prev_entry->protection == prot) && 946 (prev_entry->max_protection == max)) { 947 lwkt_reltoken(&vmobj_token); 948 lwkt_reltoken(&vm_token); 949 map->size += (end - prev_entry->end); 950 prev_entry->end = end; 951 vm_map_simplify_entry(map, prev_entry, countp); 952 return (KERN_SUCCESS); 953 } 954 955 /* 956 * If we can extend the object but cannot extend the 957 * map entry, we have to create a new map entry. We 958 * must bump the ref count on the extended object to 959 * account for it. object may be NULL. 960 */ 961 object = prev_entry->object.vm_object; 962 offset = prev_entry->offset + 963 (prev_entry->end - prev_entry->start); 964 vm_object_reference_locked(object); 965 } 966 967 lwkt_reltoken(&vmobj_token); 968 lwkt_reltoken(&vm_token); 969 970 /* 971 * NOTE: if conditionals fail, object can be NULL here. This occurs 972 * in things like the buffer map where we manage kva but do not manage 973 * backing objects. 974 */ 975 976 /* 977 * Create a new entry 978 */ 979 980 new_entry = vm_map_entry_create(map, countp); 981 new_entry->start = start; 982 new_entry->end = end; 983 984 new_entry->maptype = maptype; 985 new_entry->eflags = protoeflags; 986 new_entry->object.vm_object = object; 987 new_entry->offset = offset; 988 new_entry->aux.master_pde = 0; 989 990 new_entry->inheritance = VM_INHERIT_DEFAULT; 991 new_entry->protection = prot; 992 new_entry->max_protection = max; 993 new_entry->wired_count = 0; 994 995 /* 996 * Insert the new entry into the list 997 */ 998 999 vm_map_entry_link(map, prev_entry, new_entry); 1000 map->size += new_entry->end - new_entry->start; 1001 1002 /* 1003 * Update the free space hint. Entries cannot overlap. 1004 * An exact comparison is needed to avoid matching 1005 * against the map->header. 1006 */ 1007 if ((map->first_free == prev_entry) && 1008 (prev_entry->end == new_entry->start)) { 1009 map->first_free = new_entry; 1010 } 1011 1012 #if 0 1013 /* 1014 * Temporarily removed to avoid MAP_STACK panic, due to 1015 * MAP_STACK being a huge hack. Will be added back in 1016 * when MAP_STACK (and the user stack mapping) is fixed. 1017 */ 1018 /* 1019 * It may be possible to simplify the entry 1020 */ 1021 vm_map_simplify_entry(map, new_entry, countp); 1022 #endif 1023 1024 /* 1025 * Try to pre-populate the page table. Mappings governed by virtual 1026 * page tables cannot be prepopulated without a lot of work, so 1027 * don't try. 1028 */ 1029 if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) && 1030 maptype != VM_MAPTYPE_VPAGETABLE) { 1031 pmap_object_init_pt(map->pmap, start, prot, 1032 object, OFF_TO_IDX(offset), end - start, 1033 cow & MAP_PREFAULT_PARTIAL); 1034 } 1035 1036 return (KERN_SUCCESS); 1037 } 1038 1039 /* 1040 * Find sufficient space for `length' bytes in the given map, starting at 1041 * `start'. Returns 0 on success, 1 on no space. 1042 * 1043 * This function will returned an arbitrarily aligned pointer. If no 1044 * particular alignment is required you should pass align as 1. Note that 1045 * the map may return PAGE_SIZE aligned pointers if all the lengths used in 1046 * the map are a multiple of PAGE_SIZE, even if you pass a smaller align 1047 * argument. 1048 * 1049 * 'align' should be a power of 2 but is not required to be. 1050 * 1051 * The map must be exclusively locked. 1052 * No other requirements. 1053 */ 1054 int 1055 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, 1056 vm_size_t align, int flags, vm_offset_t *addr) 1057 { 1058 vm_map_entry_t entry, next; 1059 vm_offset_t end; 1060 vm_offset_t align_mask; 1061 1062 if (start < map->min_offset) 1063 start = map->min_offset; 1064 if (start > map->max_offset) 1065 return (1); 1066 1067 /* 1068 * If the alignment is not a power of 2 we will have to use 1069 * a mod/division, set align_mask to a special value. 1070 */ 1071 if ((align | (align - 1)) + 1 != (align << 1)) 1072 align_mask = (vm_offset_t)-1; 1073 else 1074 align_mask = align - 1; 1075 1076 /* 1077 * Look for the first possible address; if there's already something 1078 * at this address, we have to start after it. 1079 */ 1080 if (start == map->min_offset) { 1081 if ((entry = map->first_free) != &map->header) 1082 start = entry->end; 1083 } else { 1084 vm_map_entry_t tmp; 1085 1086 if (vm_map_lookup_entry(map, start, &tmp)) 1087 start = tmp->end; 1088 entry = tmp; 1089 } 1090 1091 /* 1092 * Look through the rest of the map, trying to fit a new region in the 1093 * gap between existing regions, or after the very last region. 1094 */ 1095 for (;; start = (entry = next)->end) { 1096 /* 1097 * Adjust the proposed start by the requested alignment, 1098 * be sure that we didn't wrap the address. 1099 */ 1100 if (align_mask == (vm_offset_t)-1) 1101 end = ((start + align - 1) / align) * align; 1102 else 1103 end = (start + align_mask) & ~align_mask; 1104 if (end < start) 1105 return (1); 1106 start = end; 1107 /* 1108 * Find the end of the proposed new region. Be sure we didn't 1109 * go beyond the end of the map, or wrap around the address. 1110 * Then check to see if this is the last entry or if the 1111 * proposed end fits in the gap between this and the next 1112 * entry. 1113 */ 1114 end = start + length; 1115 if (end > map->max_offset || end < start) 1116 return (1); 1117 next = entry->next; 1118 1119 /* 1120 * If the next entry's start address is beyond the desired 1121 * end address we may have found a good entry. 1122 * 1123 * If the next entry is a stack mapping we do not map into 1124 * the stack's reserved space. 1125 * 1126 * XXX continue to allow mapping into the stack's reserved 1127 * space if doing a MAP_STACK mapping inside a MAP_STACK 1128 * mapping, for backwards compatibility. But the caller 1129 * really should use MAP_STACK | MAP_TRYFIXED if they 1130 * want to do that. 1131 */ 1132 if (next == &map->header) 1133 break; 1134 if (next->start >= end) { 1135 if ((next->eflags & MAP_ENTRY_STACK) == 0) 1136 break; 1137 if (flags & MAP_STACK) 1138 break; 1139 if (next->start - next->aux.avail_ssize >= end) 1140 break; 1141 } 1142 } 1143 map->hint = entry; 1144 1145 /* 1146 * Grow the kernel_map if necessary. pmap_growkernel() will panic 1147 * if it fails. The kernel_map is locked and nothing can steal 1148 * our address space if pmap_growkernel() blocks. 1149 * 1150 * NOTE: This may be unconditionally called for kldload areas on 1151 * x86_64 because these do not bump kernel_vm_end (which would 1152 * fill 128G worth of page tables!). Therefore we must not 1153 * retry. 1154 */ 1155 if (map == &kernel_map) { 1156 vm_offset_t kstop; 1157 1158 kstop = round_page(start + length); 1159 if (kstop > kernel_vm_end) 1160 pmap_growkernel(start, kstop); 1161 } 1162 *addr = start; 1163 return (0); 1164 } 1165 1166 /* 1167 * vm_map_find finds an unallocated region in the target address map with 1168 * the given length. The search is defined to be first-fit from the 1169 * specified address; the region found is returned in the same parameter. 1170 * 1171 * If object is non-NULL, ref count must be bumped by caller 1172 * prior to making call to account for the new entry. 1173 * 1174 * No requirements. This function will lock the map temporarily. 1175 */ 1176 int 1177 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 1178 vm_offset_t *addr, vm_size_t length, vm_size_t align, 1179 boolean_t fitit, 1180 vm_maptype_t maptype, 1181 vm_prot_t prot, vm_prot_t max, 1182 int cow) 1183 { 1184 vm_offset_t start; 1185 int result; 1186 int count; 1187 1188 start = *addr; 1189 1190 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1191 vm_map_lock(map); 1192 if (fitit) { 1193 if (vm_map_findspace(map, start, length, align, 0, addr)) { 1194 vm_map_unlock(map); 1195 vm_map_entry_release(count); 1196 return (KERN_NO_SPACE); 1197 } 1198 start = *addr; 1199 } 1200 result = vm_map_insert(map, &count, object, offset, 1201 start, start + length, 1202 maptype, 1203 prot, max, 1204 cow); 1205 vm_map_unlock(map); 1206 vm_map_entry_release(count); 1207 1208 return (result); 1209 } 1210 1211 /* 1212 * Simplify the given map entry by merging with either neighbor. This 1213 * routine also has the ability to merge with both neighbors. 1214 * 1215 * This routine guarentees that the passed entry remains valid (though 1216 * possibly extended). When merging, this routine may delete one or 1217 * both neighbors. No action is taken on entries which have their 1218 * in-transition flag set. 1219 * 1220 * The map must be exclusively locked. 1221 */ 1222 void 1223 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp) 1224 { 1225 vm_map_entry_t next, prev; 1226 vm_size_t prevsize, esize; 1227 1228 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1229 ++mycpu->gd_cnt.v_intrans_coll; 1230 return; 1231 } 1232 1233 if (entry->maptype == VM_MAPTYPE_SUBMAP) 1234 return; 1235 1236 prev = entry->prev; 1237 if (prev != &map->header) { 1238 prevsize = prev->end - prev->start; 1239 if ( (prev->end == entry->start) && 1240 (prev->maptype == entry->maptype) && 1241 (prev->object.vm_object == entry->object.vm_object) && 1242 (!prev->object.vm_object || 1243 (prev->offset + prevsize == entry->offset)) && 1244 (prev->eflags == entry->eflags) && 1245 (prev->protection == entry->protection) && 1246 (prev->max_protection == entry->max_protection) && 1247 (prev->inheritance == entry->inheritance) && 1248 (prev->wired_count == entry->wired_count)) { 1249 if (map->first_free == prev) 1250 map->first_free = entry; 1251 if (map->hint == prev) 1252 map->hint = entry; 1253 vm_map_entry_unlink(map, prev); 1254 entry->start = prev->start; 1255 entry->offset = prev->offset; 1256 if (prev->object.vm_object) 1257 vm_object_deallocate(prev->object.vm_object); 1258 vm_map_entry_dispose(map, prev, countp); 1259 } 1260 } 1261 1262 next = entry->next; 1263 if (next != &map->header) { 1264 esize = entry->end - entry->start; 1265 if ((entry->end == next->start) && 1266 (next->maptype == entry->maptype) && 1267 (next->object.vm_object == entry->object.vm_object) && 1268 (!entry->object.vm_object || 1269 (entry->offset + esize == next->offset)) && 1270 (next->eflags == entry->eflags) && 1271 (next->protection == entry->protection) && 1272 (next->max_protection == entry->max_protection) && 1273 (next->inheritance == entry->inheritance) && 1274 (next->wired_count == entry->wired_count)) { 1275 if (map->first_free == next) 1276 map->first_free = entry; 1277 if (map->hint == next) 1278 map->hint = entry; 1279 vm_map_entry_unlink(map, next); 1280 entry->end = next->end; 1281 if (next->object.vm_object) 1282 vm_object_deallocate(next->object.vm_object); 1283 vm_map_entry_dispose(map, next, countp); 1284 } 1285 } 1286 } 1287 1288 /* 1289 * Asserts that the given entry begins at or after the specified address. 1290 * If necessary, it splits the entry into two. 1291 */ 1292 #define vm_map_clip_start(map, entry, startaddr, countp) \ 1293 { \ 1294 if (startaddr > entry->start) \ 1295 _vm_map_clip_start(map, entry, startaddr, countp); \ 1296 } 1297 1298 /* 1299 * This routine is called only when it is known that the entry must be split. 1300 * 1301 * The map must be exclusively locked. 1302 */ 1303 static void 1304 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, 1305 int *countp) 1306 { 1307 vm_map_entry_t new_entry; 1308 1309 /* 1310 * Split off the front portion -- note that we must insert the new 1311 * entry BEFORE this one, so that this entry has the specified 1312 * starting address. 1313 */ 1314 1315 vm_map_simplify_entry(map, entry, countp); 1316 1317 /* 1318 * If there is no object backing this entry, we might as well create 1319 * one now. If we defer it, an object can get created after the map 1320 * is clipped, and individual objects will be created for the split-up 1321 * map. This is a bit of a hack, but is also about the best place to 1322 * put this improvement. 1323 */ 1324 if (entry->object.vm_object == NULL && !map->system_map) { 1325 vm_map_entry_allocate_object(entry); 1326 } 1327 1328 new_entry = vm_map_entry_create(map, countp); 1329 *new_entry = *entry; 1330 1331 new_entry->end = start; 1332 entry->offset += (start - entry->start); 1333 entry->start = start; 1334 1335 vm_map_entry_link(map, entry->prev, new_entry); 1336 1337 switch(entry->maptype) { 1338 case VM_MAPTYPE_NORMAL: 1339 case VM_MAPTYPE_VPAGETABLE: 1340 vm_object_reference(new_entry->object.vm_object); 1341 break; 1342 default: 1343 break; 1344 } 1345 } 1346 1347 /* 1348 * Asserts that the given entry ends at or before the specified address. 1349 * If necessary, it splits the entry into two. 1350 * 1351 * The map must be exclusively locked. 1352 */ 1353 #define vm_map_clip_end(map, entry, endaddr, countp) \ 1354 { \ 1355 if (endaddr < entry->end) \ 1356 _vm_map_clip_end(map, entry, endaddr, countp); \ 1357 } 1358 1359 /* 1360 * This routine is called only when it is known that the entry must be split. 1361 * 1362 * The map must be exclusively locked. 1363 */ 1364 static void 1365 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, 1366 int *countp) 1367 { 1368 vm_map_entry_t new_entry; 1369 1370 /* 1371 * If there is no object backing this entry, we might as well create 1372 * one now. If we defer it, an object can get created after the map 1373 * is clipped, and individual objects will be created for the split-up 1374 * map. This is a bit of a hack, but is also about the best place to 1375 * put this improvement. 1376 */ 1377 1378 if (entry->object.vm_object == NULL && !map->system_map) { 1379 vm_map_entry_allocate_object(entry); 1380 } 1381 1382 /* 1383 * Create a new entry and insert it AFTER the specified entry 1384 */ 1385 1386 new_entry = vm_map_entry_create(map, countp); 1387 *new_entry = *entry; 1388 1389 new_entry->start = entry->end = end; 1390 new_entry->offset += (end - entry->start); 1391 1392 vm_map_entry_link(map, entry, new_entry); 1393 1394 switch(entry->maptype) { 1395 case VM_MAPTYPE_NORMAL: 1396 case VM_MAPTYPE_VPAGETABLE: 1397 vm_object_reference(new_entry->object.vm_object); 1398 break; 1399 default: 1400 break; 1401 } 1402 } 1403 1404 /* 1405 * Asserts that the starting and ending region addresses fall within the 1406 * valid range for the map. 1407 */ 1408 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1409 { \ 1410 if (start < vm_map_min(map)) \ 1411 start = vm_map_min(map); \ 1412 if (end > vm_map_max(map)) \ 1413 end = vm_map_max(map); \ 1414 if (start > end) \ 1415 start = end; \ 1416 } 1417 1418 /* 1419 * Used to block when an in-transition collison occurs. The map 1420 * is unlocked for the sleep and relocked before the return. 1421 */ 1422 void 1423 vm_map_transition_wait(vm_map_t map) 1424 { 1425 tsleep_interlock(map, 0); 1426 vm_map_unlock(map); 1427 tsleep(map, PINTERLOCKED, "vment", 0); 1428 vm_map_lock(map); 1429 } 1430 1431 /* 1432 * When we do blocking operations with the map lock held it is 1433 * possible that a clip might have occured on our in-transit entry, 1434 * requiring an adjustment to the entry in our loop. These macros 1435 * help the pageable and clip_range code deal with the case. The 1436 * conditional costs virtually nothing if no clipping has occured. 1437 */ 1438 1439 #define CLIP_CHECK_BACK(entry, save_start) \ 1440 do { \ 1441 while (entry->start != save_start) { \ 1442 entry = entry->prev; \ 1443 KASSERT(entry != &map->header, ("bad entry clip")); \ 1444 } \ 1445 } while(0) 1446 1447 #define CLIP_CHECK_FWD(entry, save_end) \ 1448 do { \ 1449 while (entry->end != save_end) { \ 1450 entry = entry->next; \ 1451 KASSERT(entry != &map->header, ("bad entry clip")); \ 1452 } \ 1453 } while(0) 1454 1455 1456 /* 1457 * Clip the specified range and return the base entry. The 1458 * range may cover several entries starting at the returned base 1459 * and the first and last entry in the covering sequence will be 1460 * properly clipped to the requested start and end address. 1461 * 1462 * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES 1463 * flag. 1464 * 1465 * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries 1466 * covered by the requested range. 1467 * 1468 * The map must be exclusively locked on entry and will remain locked 1469 * on return. If no range exists or the range contains holes and you 1470 * specified that no holes were allowed, NULL will be returned. This 1471 * routine may temporarily unlock the map in order avoid a deadlock when 1472 * sleeping. 1473 */ 1474 static 1475 vm_map_entry_t 1476 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 1477 int *countp, int flags) 1478 { 1479 vm_map_entry_t start_entry; 1480 vm_map_entry_t entry; 1481 1482 /* 1483 * Locate the entry and effect initial clipping. The in-transition 1484 * case does not occur very often so do not try to optimize it. 1485 */ 1486 again: 1487 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) 1488 return (NULL); 1489 entry = start_entry; 1490 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1491 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1492 ++mycpu->gd_cnt.v_intrans_coll; 1493 ++mycpu->gd_cnt.v_intrans_wait; 1494 vm_map_transition_wait(map); 1495 /* 1496 * entry and/or start_entry may have been clipped while 1497 * we slept, or may have gone away entirely. We have 1498 * to restart from the lookup. 1499 */ 1500 goto again; 1501 } 1502 1503 /* 1504 * Since we hold an exclusive map lock we do not have to restart 1505 * after clipping, even though clipping may block in zalloc. 1506 */ 1507 vm_map_clip_start(map, entry, start, countp); 1508 vm_map_clip_end(map, entry, end, countp); 1509 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1510 1511 /* 1512 * Scan entries covered by the range. When working on the next 1513 * entry a restart need only re-loop on the current entry which 1514 * we have already locked, since 'next' may have changed. Also, 1515 * even though entry is safe, it may have been clipped so we 1516 * have to iterate forwards through the clip after sleeping. 1517 */ 1518 while (entry->next != &map->header && entry->next->start < end) { 1519 vm_map_entry_t next = entry->next; 1520 1521 if (flags & MAP_CLIP_NO_HOLES) { 1522 if (next->start > entry->end) { 1523 vm_map_unclip_range(map, start_entry, 1524 start, entry->end, countp, flags); 1525 return(NULL); 1526 } 1527 } 1528 1529 if (next->eflags & MAP_ENTRY_IN_TRANSITION) { 1530 vm_offset_t save_end = entry->end; 1531 next->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1532 ++mycpu->gd_cnt.v_intrans_coll; 1533 ++mycpu->gd_cnt.v_intrans_wait; 1534 vm_map_transition_wait(map); 1535 1536 /* 1537 * clips might have occured while we blocked. 1538 */ 1539 CLIP_CHECK_FWD(entry, save_end); 1540 CLIP_CHECK_BACK(start_entry, start); 1541 continue; 1542 } 1543 /* 1544 * No restart necessary even though clip_end may block, we 1545 * are holding the map lock. 1546 */ 1547 vm_map_clip_end(map, next, end, countp); 1548 next->eflags |= MAP_ENTRY_IN_TRANSITION; 1549 entry = next; 1550 } 1551 if (flags & MAP_CLIP_NO_HOLES) { 1552 if (entry->end != end) { 1553 vm_map_unclip_range(map, start_entry, 1554 start, entry->end, countp, flags); 1555 return(NULL); 1556 } 1557 } 1558 return(start_entry); 1559 } 1560 1561 /* 1562 * Undo the effect of vm_map_clip_range(). You should pass the same 1563 * flags and the same range that you passed to vm_map_clip_range(). 1564 * This code will clear the in-transition flag on the entries and 1565 * wake up anyone waiting. This code will also simplify the sequence 1566 * and attempt to merge it with entries before and after the sequence. 1567 * 1568 * The map must be locked on entry and will remain locked on return. 1569 * 1570 * Note that you should also pass the start_entry returned by 1571 * vm_map_clip_range(). However, if you block between the two calls 1572 * with the map unlocked please be aware that the start_entry may 1573 * have been clipped and you may need to scan it backwards to find 1574 * the entry corresponding with the original start address. You are 1575 * responsible for this, vm_map_unclip_range() expects the correct 1576 * start_entry to be passed to it and will KASSERT otherwise. 1577 */ 1578 static 1579 void 1580 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry, 1581 vm_offset_t start, vm_offset_t end, 1582 int *countp, int flags) 1583 { 1584 vm_map_entry_t entry; 1585 1586 entry = start_entry; 1587 1588 KASSERT(entry->start == start, ("unclip_range: illegal base entry")); 1589 while (entry != &map->header && entry->start < end) { 1590 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, 1591 ("in-transition flag not set during unclip on: %p", 1592 entry)); 1593 KASSERT(entry->end <= end, 1594 ("unclip_range: tail wasn't clipped")); 1595 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1596 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1597 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1598 wakeup(map); 1599 } 1600 entry = entry->next; 1601 } 1602 1603 /* 1604 * Simplification does not block so there is no restart case. 1605 */ 1606 entry = start_entry; 1607 while (entry != &map->header && entry->start < end) { 1608 vm_map_simplify_entry(map, entry, countp); 1609 entry = entry->next; 1610 } 1611 } 1612 1613 /* 1614 * Mark the given range as handled by a subordinate map. 1615 * 1616 * This range must have been created with vm_map_find(), and no other 1617 * operations may have been performed on this range prior to calling 1618 * vm_map_submap(). 1619 * 1620 * Submappings cannot be removed. 1621 * 1622 * No requirements. 1623 */ 1624 int 1625 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap) 1626 { 1627 vm_map_entry_t entry; 1628 int result = KERN_INVALID_ARGUMENT; 1629 int count; 1630 1631 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1632 vm_map_lock(map); 1633 1634 VM_MAP_RANGE_CHECK(map, start, end); 1635 1636 if (vm_map_lookup_entry(map, start, &entry)) { 1637 vm_map_clip_start(map, entry, start, &count); 1638 } else { 1639 entry = entry->next; 1640 } 1641 1642 vm_map_clip_end(map, entry, end, &count); 1643 1644 if ((entry->start == start) && (entry->end == end) && 1645 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1646 (entry->object.vm_object == NULL)) { 1647 entry->object.sub_map = submap; 1648 entry->maptype = VM_MAPTYPE_SUBMAP; 1649 result = KERN_SUCCESS; 1650 } 1651 vm_map_unlock(map); 1652 vm_map_entry_release(count); 1653 1654 return (result); 1655 } 1656 1657 /* 1658 * Sets the protection of the specified address region in the target map. 1659 * If "set_max" is specified, the maximum protection is to be set; 1660 * otherwise, only the current protection is affected. 1661 * 1662 * The protection is not applicable to submaps, but is applicable to normal 1663 * maps and maps governed by virtual page tables. For example, when operating 1664 * on a virtual page table our protection basically controls how COW occurs 1665 * on the backing object, whereas the virtual page table abstraction itself 1666 * is an abstraction for userland. 1667 * 1668 * No requirements. 1669 */ 1670 int 1671 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1672 vm_prot_t new_prot, boolean_t set_max) 1673 { 1674 vm_map_entry_t current; 1675 vm_map_entry_t entry; 1676 int count; 1677 1678 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1679 vm_map_lock(map); 1680 1681 VM_MAP_RANGE_CHECK(map, start, end); 1682 1683 if (vm_map_lookup_entry(map, start, &entry)) { 1684 vm_map_clip_start(map, entry, start, &count); 1685 } else { 1686 entry = entry->next; 1687 } 1688 1689 /* 1690 * Make a first pass to check for protection violations. 1691 */ 1692 current = entry; 1693 while ((current != &map->header) && (current->start < end)) { 1694 if (current->maptype == VM_MAPTYPE_SUBMAP) { 1695 vm_map_unlock(map); 1696 vm_map_entry_release(count); 1697 return (KERN_INVALID_ARGUMENT); 1698 } 1699 if ((new_prot & current->max_protection) != new_prot) { 1700 vm_map_unlock(map); 1701 vm_map_entry_release(count); 1702 return (KERN_PROTECTION_FAILURE); 1703 } 1704 current = current->next; 1705 } 1706 1707 /* 1708 * Go back and fix up protections. [Note that clipping is not 1709 * necessary the second time.] 1710 */ 1711 current = entry; 1712 1713 while ((current != &map->header) && (current->start < end)) { 1714 vm_prot_t old_prot; 1715 1716 vm_map_clip_end(map, current, end, &count); 1717 1718 old_prot = current->protection; 1719 if (set_max) { 1720 current->protection = 1721 (current->max_protection = new_prot) & 1722 old_prot; 1723 } else { 1724 current->protection = new_prot; 1725 } 1726 1727 /* 1728 * Update physical map if necessary. Worry about copy-on-write 1729 * here -- CHECK THIS XXX 1730 */ 1731 1732 if (current->protection != old_prot) { 1733 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1734 VM_PROT_ALL) 1735 1736 pmap_protect(map->pmap, current->start, 1737 current->end, 1738 current->protection & MASK(current)); 1739 #undef MASK 1740 } 1741 1742 vm_map_simplify_entry(map, current, &count); 1743 1744 current = current->next; 1745 } 1746 1747 vm_map_unlock(map); 1748 vm_map_entry_release(count); 1749 return (KERN_SUCCESS); 1750 } 1751 1752 /* 1753 * This routine traverses a processes map handling the madvise 1754 * system call. Advisories are classified as either those effecting 1755 * the vm_map_entry structure, or those effecting the underlying 1756 * objects. 1757 * 1758 * The <value> argument is used for extended madvise calls. 1759 * 1760 * No requirements. 1761 */ 1762 int 1763 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, 1764 int behav, off_t value) 1765 { 1766 vm_map_entry_t current, entry; 1767 int modify_map = 0; 1768 int error = 0; 1769 int count; 1770 1771 /* 1772 * Some madvise calls directly modify the vm_map_entry, in which case 1773 * we need to use an exclusive lock on the map and we need to perform 1774 * various clipping operations. Otherwise we only need a read-lock 1775 * on the map. 1776 */ 1777 1778 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1779 1780 switch(behav) { 1781 case MADV_NORMAL: 1782 case MADV_SEQUENTIAL: 1783 case MADV_RANDOM: 1784 case MADV_NOSYNC: 1785 case MADV_AUTOSYNC: 1786 case MADV_NOCORE: 1787 case MADV_CORE: 1788 case MADV_SETMAP: 1789 case MADV_INVAL: 1790 modify_map = 1; 1791 vm_map_lock(map); 1792 break; 1793 case MADV_WILLNEED: 1794 case MADV_DONTNEED: 1795 case MADV_FREE: 1796 vm_map_lock_read(map); 1797 break; 1798 default: 1799 vm_map_entry_release(count); 1800 return (EINVAL); 1801 } 1802 1803 /* 1804 * Locate starting entry and clip if necessary. 1805 */ 1806 1807 VM_MAP_RANGE_CHECK(map, start, end); 1808 1809 if (vm_map_lookup_entry(map, start, &entry)) { 1810 if (modify_map) 1811 vm_map_clip_start(map, entry, start, &count); 1812 } else { 1813 entry = entry->next; 1814 } 1815 1816 if (modify_map) { 1817 /* 1818 * madvise behaviors that are implemented in the vm_map_entry. 1819 * 1820 * We clip the vm_map_entry so that behavioral changes are 1821 * limited to the specified address range. 1822 */ 1823 for (current = entry; 1824 (current != &map->header) && (current->start < end); 1825 current = current->next 1826 ) { 1827 if (current->maptype == VM_MAPTYPE_SUBMAP) 1828 continue; 1829 1830 vm_map_clip_end(map, current, end, &count); 1831 1832 switch (behav) { 1833 case MADV_NORMAL: 1834 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1835 break; 1836 case MADV_SEQUENTIAL: 1837 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1838 break; 1839 case MADV_RANDOM: 1840 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1841 break; 1842 case MADV_NOSYNC: 1843 current->eflags |= MAP_ENTRY_NOSYNC; 1844 break; 1845 case MADV_AUTOSYNC: 1846 current->eflags &= ~MAP_ENTRY_NOSYNC; 1847 break; 1848 case MADV_NOCORE: 1849 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1850 break; 1851 case MADV_CORE: 1852 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1853 break; 1854 case MADV_INVAL: 1855 /* 1856 * Invalidate the related pmap entries, used 1857 * to flush portions of the real kernel's 1858 * pmap when the caller has removed or 1859 * modified existing mappings in a virtual 1860 * page table. 1861 */ 1862 pmap_remove(map->pmap, 1863 current->start, current->end); 1864 break; 1865 case MADV_SETMAP: 1866 /* 1867 * Set the page directory page for a map 1868 * governed by a virtual page table. Mark 1869 * the entry as being governed by a virtual 1870 * page table if it is not. 1871 * 1872 * XXX the page directory page is stored 1873 * in the avail_ssize field if the map_entry. 1874 * 1875 * XXX the map simplification code does not 1876 * compare this field so weird things may 1877 * happen if you do not apply this function 1878 * to the entire mapping governed by the 1879 * virtual page table. 1880 */ 1881 if (current->maptype != VM_MAPTYPE_VPAGETABLE) { 1882 error = EINVAL; 1883 break; 1884 } 1885 current->aux.master_pde = value; 1886 pmap_remove(map->pmap, 1887 current->start, current->end); 1888 break; 1889 default: 1890 error = EINVAL; 1891 break; 1892 } 1893 vm_map_simplify_entry(map, current, &count); 1894 } 1895 vm_map_unlock(map); 1896 } else { 1897 vm_pindex_t pindex; 1898 int count; 1899 1900 /* 1901 * madvise behaviors that are implemented in the underlying 1902 * vm_object. 1903 * 1904 * Since we don't clip the vm_map_entry, we have to clip 1905 * the vm_object pindex and count. 1906 * 1907 * NOTE! We currently do not support these functions on 1908 * virtual page tables. 1909 */ 1910 for (current = entry; 1911 (current != &map->header) && (current->start < end); 1912 current = current->next 1913 ) { 1914 vm_offset_t useStart; 1915 1916 if (current->maptype != VM_MAPTYPE_NORMAL) 1917 continue; 1918 1919 pindex = OFF_TO_IDX(current->offset); 1920 count = atop(current->end - current->start); 1921 useStart = current->start; 1922 1923 if (current->start < start) { 1924 pindex += atop(start - current->start); 1925 count -= atop(start - current->start); 1926 useStart = start; 1927 } 1928 if (current->end > end) 1929 count -= atop(current->end - end); 1930 1931 if (count <= 0) 1932 continue; 1933 1934 vm_object_madvise(current->object.vm_object, 1935 pindex, count, behav); 1936 1937 /* 1938 * Try to populate the page table. Mappings governed 1939 * by virtual page tables cannot be pre-populated 1940 * without a lot of work so don't try. 1941 */ 1942 if (behav == MADV_WILLNEED && 1943 current->maptype != VM_MAPTYPE_VPAGETABLE) { 1944 pmap_object_init_pt( 1945 map->pmap, 1946 useStart, 1947 current->protection, 1948 current->object.vm_object, 1949 pindex, 1950 (count << PAGE_SHIFT), 1951 MAP_PREFAULT_MADVISE 1952 ); 1953 } 1954 } 1955 vm_map_unlock_read(map); 1956 } 1957 vm_map_entry_release(count); 1958 return(error); 1959 } 1960 1961 1962 /* 1963 * Sets the inheritance of the specified address range in the target map. 1964 * Inheritance affects how the map will be shared with child maps at the 1965 * time of vm_map_fork. 1966 */ 1967 int 1968 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1969 vm_inherit_t new_inheritance) 1970 { 1971 vm_map_entry_t entry; 1972 vm_map_entry_t temp_entry; 1973 int count; 1974 1975 switch (new_inheritance) { 1976 case VM_INHERIT_NONE: 1977 case VM_INHERIT_COPY: 1978 case VM_INHERIT_SHARE: 1979 break; 1980 default: 1981 return (KERN_INVALID_ARGUMENT); 1982 } 1983 1984 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1985 vm_map_lock(map); 1986 1987 VM_MAP_RANGE_CHECK(map, start, end); 1988 1989 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1990 entry = temp_entry; 1991 vm_map_clip_start(map, entry, start, &count); 1992 } else 1993 entry = temp_entry->next; 1994 1995 while ((entry != &map->header) && (entry->start < end)) { 1996 vm_map_clip_end(map, entry, end, &count); 1997 1998 entry->inheritance = new_inheritance; 1999 2000 vm_map_simplify_entry(map, entry, &count); 2001 2002 entry = entry->next; 2003 } 2004 vm_map_unlock(map); 2005 vm_map_entry_release(count); 2006 return (KERN_SUCCESS); 2007 } 2008 2009 /* 2010 * Implement the semantics of mlock 2011 */ 2012 int 2013 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, 2014 boolean_t new_pageable) 2015 { 2016 vm_map_entry_t entry; 2017 vm_map_entry_t start_entry; 2018 vm_offset_t end; 2019 int rv = KERN_SUCCESS; 2020 int count; 2021 2022 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2023 vm_map_lock(map); 2024 VM_MAP_RANGE_CHECK(map, start, real_end); 2025 end = real_end; 2026 2027 start_entry = vm_map_clip_range(map, start, end, &count, 2028 MAP_CLIP_NO_HOLES); 2029 if (start_entry == NULL) { 2030 vm_map_unlock(map); 2031 vm_map_entry_release(count); 2032 return (KERN_INVALID_ADDRESS); 2033 } 2034 2035 if (new_pageable == 0) { 2036 entry = start_entry; 2037 while ((entry != &map->header) && (entry->start < end)) { 2038 vm_offset_t save_start; 2039 vm_offset_t save_end; 2040 2041 /* 2042 * Already user wired or hard wired (trivial cases) 2043 */ 2044 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 2045 entry = entry->next; 2046 continue; 2047 } 2048 if (entry->wired_count != 0) { 2049 entry->wired_count++; 2050 entry->eflags |= MAP_ENTRY_USER_WIRED; 2051 entry = entry->next; 2052 continue; 2053 } 2054 2055 /* 2056 * A new wiring requires instantiation of appropriate 2057 * management structures and the faulting in of the 2058 * page. 2059 */ 2060 if (entry->maptype != VM_MAPTYPE_SUBMAP) { 2061 int copyflag = entry->eflags & 2062 MAP_ENTRY_NEEDS_COPY; 2063 if (copyflag && ((entry->protection & 2064 VM_PROT_WRITE) != 0)) { 2065 vm_map_entry_shadow(entry); 2066 } else if (entry->object.vm_object == NULL && 2067 !map->system_map) { 2068 vm_map_entry_allocate_object(entry); 2069 } 2070 } 2071 entry->wired_count++; 2072 entry->eflags |= MAP_ENTRY_USER_WIRED; 2073 2074 /* 2075 * Now fault in the area. Note that vm_fault_wire() 2076 * may release the map lock temporarily, it will be 2077 * relocked on return. The in-transition 2078 * flag protects the entries. 2079 */ 2080 save_start = entry->start; 2081 save_end = entry->end; 2082 rv = vm_fault_wire(map, entry, TRUE); 2083 if (rv) { 2084 CLIP_CHECK_BACK(entry, save_start); 2085 for (;;) { 2086 KASSERT(entry->wired_count == 1, ("bad wired_count on entry")); 2087 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2088 entry->wired_count = 0; 2089 if (entry->end == save_end) 2090 break; 2091 entry = entry->next; 2092 KASSERT(entry != &map->header, ("bad entry clip during backout")); 2093 } 2094 end = save_start; /* unwire the rest */ 2095 break; 2096 } 2097 /* 2098 * note that even though the entry might have been 2099 * clipped, the USER_WIRED flag we set prevents 2100 * duplication so we do not have to do a 2101 * clip check. 2102 */ 2103 entry = entry->next; 2104 } 2105 2106 /* 2107 * If we failed fall through to the unwiring section to 2108 * unwire what we had wired so far. 'end' has already 2109 * been adjusted. 2110 */ 2111 if (rv) 2112 new_pageable = 1; 2113 2114 /* 2115 * start_entry might have been clipped if we unlocked the 2116 * map and blocked. No matter how clipped it has gotten 2117 * there should be a fragment that is on our start boundary. 2118 */ 2119 CLIP_CHECK_BACK(start_entry, start); 2120 } 2121 2122 /* 2123 * Deal with the unwiring case. 2124 */ 2125 if (new_pageable) { 2126 /* 2127 * This is the unwiring case. We must first ensure that the 2128 * range to be unwired is really wired down. We know there 2129 * are no holes. 2130 */ 2131 entry = start_entry; 2132 while ((entry != &map->header) && (entry->start < end)) { 2133 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 2134 rv = KERN_INVALID_ARGUMENT; 2135 goto done; 2136 } 2137 KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry)); 2138 entry = entry->next; 2139 } 2140 2141 /* 2142 * Now decrement the wiring count for each region. If a region 2143 * becomes completely unwired, unwire its physical pages and 2144 * mappings. 2145 */ 2146 /* 2147 * The map entries are processed in a loop, checking to 2148 * make sure the entry is wired and asserting it has a wired 2149 * count. However, another loop was inserted more-or-less in 2150 * the middle of the unwiring path. This loop picks up the 2151 * "entry" loop variable from the first loop without first 2152 * setting it to start_entry. Naturally, the secound loop 2153 * is never entered and the pages backing the entries are 2154 * never unwired. This can lead to a leak of wired pages. 2155 */ 2156 entry = start_entry; 2157 while ((entry != &map->header) && (entry->start < end)) { 2158 KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, 2159 ("expected USER_WIRED on entry %p", entry)); 2160 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2161 entry->wired_count--; 2162 if (entry->wired_count == 0) 2163 vm_fault_unwire(map, entry); 2164 entry = entry->next; 2165 } 2166 } 2167 done: 2168 vm_map_unclip_range(map, start_entry, start, real_end, &count, 2169 MAP_CLIP_NO_HOLES); 2170 map->timestamp++; 2171 vm_map_unlock(map); 2172 vm_map_entry_release(count); 2173 return (rv); 2174 } 2175 2176 /* 2177 * Sets the pageability of the specified address range in the target map. 2178 * Regions specified as not pageable require locked-down physical 2179 * memory and physical page maps. 2180 * 2181 * The map must not be locked, but a reference must remain to the map 2182 * throughout the call. 2183 * 2184 * This function may be called via the zalloc path and must properly 2185 * reserve map entries for kernel_map. 2186 * 2187 * No requirements. 2188 */ 2189 int 2190 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags) 2191 { 2192 vm_map_entry_t entry; 2193 vm_map_entry_t start_entry; 2194 vm_offset_t end; 2195 int rv = KERN_SUCCESS; 2196 int count; 2197 2198 if (kmflags & KM_KRESERVE) 2199 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 2200 else 2201 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2202 vm_map_lock(map); 2203 VM_MAP_RANGE_CHECK(map, start, real_end); 2204 end = real_end; 2205 2206 start_entry = vm_map_clip_range(map, start, end, &count, 2207 MAP_CLIP_NO_HOLES); 2208 if (start_entry == NULL) { 2209 vm_map_unlock(map); 2210 rv = KERN_INVALID_ADDRESS; 2211 goto failure; 2212 } 2213 if ((kmflags & KM_PAGEABLE) == 0) { 2214 /* 2215 * Wiring. 2216 * 2217 * 1. Holding the write lock, we create any shadow or zero-fill 2218 * objects that need to be created. Then we clip each map 2219 * entry to the region to be wired and increment its wiring 2220 * count. We create objects before clipping the map entries 2221 * to avoid object proliferation. 2222 * 2223 * 2. We downgrade to a read lock, and call vm_fault_wire to 2224 * fault in the pages for any newly wired area (wired_count is 2225 * 1). 2226 * 2227 * Downgrading to a read lock for vm_fault_wire avoids a 2228 * possible deadlock with another process that may have faulted 2229 * on one of the pages to be wired (it would mark the page busy, 2230 * blocking us, then in turn block on the map lock that we 2231 * hold). Because of problems in the recursive lock package, 2232 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 2233 * any actions that require the write lock must be done 2234 * beforehand. Because we keep the read lock on the map, the 2235 * copy-on-write status of the entries we modify here cannot 2236 * change. 2237 */ 2238 entry = start_entry; 2239 while ((entry != &map->header) && (entry->start < end)) { 2240 /* 2241 * Trivial case if the entry is already wired 2242 */ 2243 if (entry->wired_count) { 2244 entry->wired_count++; 2245 entry = entry->next; 2246 continue; 2247 } 2248 2249 /* 2250 * The entry is being newly wired, we have to setup 2251 * appropriate management structures. A shadow 2252 * object is required for a copy-on-write region, 2253 * or a normal object for a zero-fill region. We 2254 * do not have to do this for entries that point to sub 2255 * maps because we won't hold the lock on the sub map. 2256 */ 2257 if (entry->maptype != VM_MAPTYPE_SUBMAP) { 2258 int copyflag = entry->eflags & 2259 MAP_ENTRY_NEEDS_COPY; 2260 if (copyflag && ((entry->protection & 2261 VM_PROT_WRITE) != 0)) { 2262 vm_map_entry_shadow(entry); 2263 } else if (entry->object.vm_object == NULL && 2264 !map->system_map) { 2265 vm_map_entry_allocate_object(entry); 2266 } 2267 } 2268 2269 entry->wired_count++; 2270 entry = entry->next; 2271 } 2272 2273 /* 2274 * Pass 2. 2275 */ 2276 2277 /* 2278 * HACK HACK HACK HACK 2279 * 2280 * vm_fault_wire() temporarily unlocks the map to avoid 2281 * deadlocks. The in-transition flag from vm_map_clip_range 2282 * call should protect us from changes while the map is 2283 * unlocked. T 2284 * 2285 * NOTE: Previously this comment stated that clipping might 2286 * still occur while the entry is unlocked, but from 2287 * what I can tell it actually cannot. 2288 * 2289 * It is unclear whether the CLIP_CHECK_*() calls 2290 * are still needed but we keep them in anyway. 2291 * 2292 * HACK HACK HACK HACK 2293 */ 2294 2295 entry = start_entry; 2296 while (entry != &map->header && entry->start < end) { 2297 /* 2298 * If vm_fault_wire fails for any page we need to undo 2299 * what has been done. We decrement the wiring count 2300 * for those pages which have not yet been wired (now) 2301 * and unwire those that have (later). 2302 */ 2303 vm_offset_t save_start = entry->start; 2304 vm_offset_t save_end = entry->end; 2305 2306 if (entry->wired_count == 1) 2307 rv = vm_fault_wire(map, entry, FALSE); 2308 if (rv) { 2309 CLIP_CHECK_BACK(entry, save_start); 2310 for (;;) { 2311 KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly")); 2312 entry->wired_count = 0; 2313 if (entry->end == save_end) 2314 break; 2315 entry = entry->next; 2316 KASSERT(entry != &map->header, ("bad entry clip during backout")); 2317 } 2318 end = save_start; 2319 break; 2320 } 2321 CLIP_CHECK_FWD(entry, save_end); 2322 entry = entry->next; 2323 } 2324 2325 /* 2326 * If a failure occured undo everything by falling through 2327 * to the unwiring code. 'end' has already been adjusted 2328 * appropriately. 2329 */ 2330 if (rv) 2331 kmflags |= KM_PAGEABLE; 2332 2333 /* 2334 * start_entry is still IN_TRANSITION but may have been 2335 * clipped since vm_fault_wire() unlocks and relocks the 2336 * map. No matter how clipped it has gotten there should 2337 * be a fragment that is on our start boundary. 2338 */ 2339 CLIP_CHECK_BACK(start_entry, start); 2340 } 2341 2342 if (kmflags & KM_PAGEABLE) { 2343 /* 2344 * This is the unwiring case. We must first ensure that the 2345 * range to be unwired is really wired down. We know there 2346 * are no holes. 2347 */ 2348 entry = start_entry; 2349 while ((entry != &map->header) && (entry->start < end)) { 2350 if (entry->wired_count == 0) { 2351 rv = KERN_INVALID_ARGUMENT; 2352 goto done; 2353 } 2354 entry = entry->next; 2355 } 2356 2357 /* 2358 * Now decrement the wiring count for each region. If a region 2359 * becomes completely unwired, unwire its physical pages and 2360 * mappings. 2361 */ 2362 entry = start_entry; 2363 while ((entry != &map->header) && (entry->start < end)) { 2364 entry->wired_count--; 2365 if (entry->wired_count == 0) 2366 vm_fault_unwire(map, entry); 2367 entry = entry->next; 2368 } 2369 } 2370 done: 2371 vm_map_unclip_range(map, start_entry, start, real_end, 2372 &count, MAP_CLIP_NO_HOLES); 2373 map->timestamp++; 2374 vm_map_unlock(map); 2375 failure: 2376 if (kmflags & KM_KRESERVE) 2377 vm_map_entry_krelease(count); 2378 else 2379 vm_map_entry_release(count); 2380 return (rv); 2381 } 2382 2383 /* 2384 * Mark a newly allocated address range as wired but do not fault in 2385 * the pages. The caller is expected to load the pages into the object. 2386 * 2387 * The map must be locked on entry and will remain locked on return. 2388 * No other requirements. 2389 */ 2390 void 2391 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, 2392 int *countp) 2393 { 2394 vm_map_entry_t scan; 2395 vm_map_entry_t entry; 2396 2397 entry = vm_map_clip_range(map, addr, addr + size, 2398 countp, MAP_CLIP_NO_HOLES); 2399 for (scan = entry; 2400 scan != &map->header && scan->start < addr + size; 2401 scan = scan->next) { 2402 KKASSERT(entry->wired_count == 0); 2403 entry->wired_count = 1; 2404 } 2405 vm_map_unclip_range(map, entry, addr, addr + size, 2406 countp, MAP_CLIP_NO_HOLES); 2407 } 2408 2409 /* 2410 * Push any dirty cached pages in the address range to their pager. 2411 * If syncio is TRUE, dirty pages are written synchronously. 2412 * If invalidate is TRUE, any cached pages are freed as well. 2413 * 2414 * This routine is called by sys_msync() 2415 * 2416 * Returns an error if any part of the specified range is not mapped. 2417 * 2418 * No requirements. 2419 */ 2420 int 2421 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, 2422 boolean_t syncio, boolean_t invalidate) 2423 { 2424 vm_map_entry_t current; 2425 vm_map_entry_t entry; 2426 vm_size_t size; 2427 vm_object_t object; 2428 vm_ooffset_t offset; 2429 2430 vm_map_lock_read(map); 2431 VM_MAP_RANGE_CHECK(map, start, end); 2432 if (!vm_map_lookup_entry(map, start, &entry)) { 2433 vm_map_unlock_read(map); 2434 return (KERN_INVALID_ADDRESS); 2435 } 2436 /* 2437 * Make a first pass to check for holes. 2438 */ 2439 for (current = entry; current->start < end; current = current->next) { 2440 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2441 vm_map_unlock_read(map); 2442 return (KERN_INVALID_ARGUMENT); 2443 } 2444 if (end > current->end && 2445 (current->next == &map->header || 2446 current->end != current->next->start)) { 2447 vm_map_unlock_read(map); 2448 return (KERN_INVALID_ADDRESS); 2449 } 2450 } 2451 2452 if (invalidate) 2453 pmap_remove(vm_map_pmap(map), start, end); 2454 2455 /* 2456 * Make a second pass, cleaning/uncaching pages from the indicated 2457 * objects as we go. 2458 * 2459 * Hold vm_token to avoid blocking in vm_object_reference() 2460 */ 2461 lwkt_gettoken(&vm_token); 2462 lwkt_gettoken(&vmobj_token); 2463 2464 for (current = entry; current->start < end; current = current->next) { 2465 offset = current->offset + (start - current->start); 2466 size = (end <= current->end ? end : current->end) - start; 2467 if (current->maptype == VM_MAPTYPE_SUBMAP) { 2468 vm_map_t smap; 2469 vm_map_entry_t tentry; 2470 vm_size_t tsize; 2471 2472 smap = current->object.sub_map; 2473 vm_map_lock_read(smap); 2474 vm_map_lookup_entry(smap, offset, &tentry); 2475 tsize = tentry->end - offset; 2476 if (tsize < size) 2477 size = tsize; 2478 object = tentry->object.vm_object; 2479 offset = tentry->offset + (offset - tentry->start); 2480 vm_map_unlock_read(smap); 2481 } else { 2482 object = current->object.vm_object; 2483 } 2484 /* 2485 * Note that there is absolutely no sense in writing out 2486 * anonymous objects, so we track down the vnode object 2487 * to write out. 2488 * We invalidate (remove) all pages from the address space 2489 * anyway, for semantic correctness. 2490 * 2491 * note: certain anonymous maps, such as MAP_NOSYNC maps, 2492 * may start out with a NULL object. 2493 */ 2494 while (object && object->backing_object) { 2495 offset += object->backing_object_offset; 2496 object = object->backing_object; 2497 if (object->size < OFF_TO_IDX( offset + size)) 2498 size = IDX_TO_OFF(object->size) - offset; 2499 } 2500 if (object && (object->type == OBJT_VNODE) && 2501 (current->protection & VM_PROT_WRITE) && 2502 (object->flags & OBJ_NOMSYNC) == 0) { 2503 /* 2504 * Flush pages if writing is allowed, invalidate them 2505 * if invalidation requested. Pages undergoing I/O 2506 * will be ignored by vm_object_page_remove(). 2507 * 2508 * We cannot lock the vnode and then wait for paging 2509 * to complete without deadlocking against vm_fault. 2510 * Instead we simply call vm_object_page_remove() and 2511 * allow it to block internally on a page-by-page 2512 * basis when it encounters pages undergoing async 2513 * I/O. 2514 */ 2515 int flags; 2516 2517 vm_object_reference_locked(object); 2518 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY); 2519 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 2520 flags |= invalidate ? OBJPC_INVAL : 0; 2521 2522 /* 2523 * When operating on a virtual page table just 2524 * flush the whole object. XXX we probably ought 2525 * to 2526 */ 2527 switch(current->maptype) { 2528 case VM_MAPTYPE_NORMAL: 2529 vm_object_page_clean(object, 2530 OFF_TO_IDX(offset), 2531 OFF_TO_IDX(offset + size + PAGE_MASK), 2532 flags); 2533 break; 2534 case VM_MAPTYPE_VPAGETABLE: 2535 vm_object_page_clean(object, 0, 0, flags); 2536 break; 2537 } 2538 vn_unlock(((struct vnode *)object->handle)); 2539 vm_object_deallocate_locked(object); 2540 } 2541 if (object && invalidate && 2542 ((object->type == OBJT_VNODE) || 2543 (object->type == OBJT_DEVICE))) { 2544 int clean_only = 2545 (object->type == OBJT_DEVICE) ? FALSE : TRUE; 2546 vm_object_reference_locked(object); 2547 switch(current->maptype) { 2548 case VM_MAPTYPE_NORMAL: 2549 vm_object_page_remove(object, 2550 OFF_TO_IDX(offset), 2551 OFF_TO_IDX(offset + size + PAGE_MASK), 2552 clean_only); 2553 break; 2554 case VM_MAPTYPE_VPAGETABLE: 2555 vm_object_page_remove(object, 0, 0, clean_only); 2556 break; 2557 } 2558 vm_object_deallocate_locked(object); 2559 } 2560 start += size; 2561 } 2562 2563 lwkt_reltoken(&vmobj_token); 2564 lwkt_reltoken(&vm_token); 2565 vm_map_unlock_read(map); 2566 2567 return (KERN_SUCCESS); 2568 } 2569 2570 /* 2571 * Make the region specified by this entry pageable. 2572 * 2573 * The vm_map must be exclusively locked. 2574 */ 2575 static void 2576 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2577 { 2578 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2579 entry->wired_count = 0; 2580 vm_fault_unwire(map, entry); 2581 } 2582 2583 /* 2584 * Deallocate the given entry from the target map. 2585 * 2586 * The vm_map must be exclusively locked. 2587 */ 2588 static void 2589 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp) 2590 { 2591 vm_map_entry_unlink(map, entry); 2592 map->size -= entry->end - entry->start; 2593 2594 switch(entry->maptype) { 2595 case VM_MAPTYPE_NORMAL: 2596 case VM_MAPTYPE_VPAGETABLE: 2597 vm_object_deallocate(entry->object.vm_object); 2598 break; 2599 default: 2600 break; 2601 } 2602 2603 vm_map_entry_dispose(map, entry, countp); 2604 } 2605 2606 /* 2607 * Deallocates the given address range from the target map. 2608 * 2609 * The vm_map must be exclusively locked. 2610 */ 2611 int 2612 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp) 2613 { 2614 vm_object_t object; 2615 vm_map_entry_t entry; 2616 vm_map_entry_t first_entry; 2617 2618 ASSERT_VM_MAP_LOCKED(map); 2619 again: 2620 /* 2621 * Find the start of the region, and clip it. Set entry to point 2622 * at the first record containing the requested address or, if no 2623 * such record exists, the next record with a greater address. The 2624 * loop will run from this point until a record beyond the termination 2625 * address is encountered. 2626 * 2627 * map->hint must be adjusted to not point to anything we delete, 2628 * so set it to the entry prior to the one being deleted. 2629 * 2630 * GGG see other GGG comment. 2631 */ 2632 if (vm_map_lookup_entry(map, start, &first_entry)) { 2633 entry = first_entry; 2634 vm_map_clip_start(map, entry, start, countp); 2635 map->hint = entry->prev; /* possible problem XXX */ 2636 } else { 2637 map->hint = first_entry; /* possible problem XXX */ 2638 entry = first_entry->next; 2639 } 2640 2641 /* 2642 * If a hole opens up prior to the current first_free then 2643 * adjust first_free. As with map->hint, map->first_free 2644 * cannot be left set to anything we might delete. 2645 */ 2646 if (entry == &map->header) { 2647 map->first_free = &map->header; 2648 } else if (map->first_free->start >= start) { 2649 map->first_free = entry->prev; 2650 } 2651 2652 /* 2653 * Step through all entries in this region 2654 */ 2655 while ((entry != &map->header) && (entry->start < end)) { 2656 vm_map_entry_t next; 2657 vm_offset_t s, e; 2658 vm_pindex_t offidxstart, offidxend, count; 2659 2660 /* 2661 * If we hit an in-transition entry we have to sleep and 2662 * retry. It's easier (and not really slower) to just retry 2663 * since this case occurs so rarely and the hint is already 2664 * pointing at the right place. We have to reset the 2665 * start offset so as not to accidently delete an entry 2666 * another process just created in vacated space. 2667 */ 2668 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2669 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2670 start = entry->start; 2671 ++mycpu->gd_cnt.v_intrans_coll; 2672 ++mycpu->gd_cnt.v_intrans_wait; 2673 vm_map_transition_wait(map); 2674 goto again; 2675 } 2676 vm_map_clip_end(map, entry, end, countp); 2677 2678 s = entry->start; 2679 e = entry->end; 2680 next = entry->next; 2681 2682 offidxstart = OFF_TO_IDX(entry->offset); 2683 count = OFF_TO_IDX(e - s); 2684 object = entry->object.vm_object; 2685 2686 /* 2687 * Unwire before removing addresses from the pmap; otherwise, 2688 * unwiring will put the entries back in the pmap. 2689 */ 2690 if (entry->wired_count != 0) 2691 vm_map_entry_unwire(map, entry); 2692 2693 offidxend = offidxstart + count; 2694 2695 /* 2696 * Hold vm_token when manipulating vm_objects, 2697 * 2698 * Hold vmobj_token when potentially adding or removing 2699 * objects (collapse requires both). 2700 */ 2701 lwkt_gettoken(&vm_token); 2702 lwkt_gettoken(&vmobj_token); 2703 2704 if (object == &kernel_object) { 2705 vm_object_page_remove(object, offidxstart, 2706 offidxend, FALSE); 2707 } else { 2708 pmap_remove(map->pmap, s, e); 2709 2710 if (object != NULL && 2711 object->ref_count != 1 && 2712 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == 2713 OBJ_ONEMAPPING && 2714 (object->type == OBJT_DEFAULT || 2715 object->type == OBJT_SWAP)) { 2716 vm_object_collapse(object); 2717 vm_object_page_remove(object, offidxstart, 2718 offidxend, FALSE); 2719 if (object->type == OBJT_SWAP) { 2720 swap_pager_freespace(object, 2721 offidxstart, 2722 count); 2723 } 2724 if (offidxend >= object->size && 2725 offidxstart < object->size) { 2726 object->size = offidxstart; 2727 } 2728 } 2729 } 2730 lwkt_reltoken(&vmobj_token); 2731 lwkt_reltoken(&vm_token); 2732 2733 /* 2734 * Delete the entry (which may delete the object) only after 2735 * removing all pmap entries pointing to its pages. 2736 * (Otherwise, its page frames may be reallocated, and any 2737 * modify bits will be set in the wrong object!) 2738 */ 2739 vm_map_entry_delete(map, entry, countp); 2740 entry = next; 2741 } 2742 return (KERN_SUCCESS); 2743 } 2744 2745 /* 2746 * Remove the given address range from the target map. 2747 * This is the exported form of vm_map_delete. 2748 * 2749 * No requirements. 2750 */ 2751 int 2752 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2753 { 2754 int result; 2755 int count; 2756 2757 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2758 vm_map_lock(map); 2759 VM_MAP_RANGE_CHECK(map, start, end); 2760 result = vm_map_delete(map, start, end, &count); 2761 vm_map_unlock(map); 2762 vm_map_entry_release(count); 2763 2764 return (result); 2765 } 2766 2767 /* 2768 * Assert that the target map allows the specified privilege on the 2769 * entire address region given. The entire region must be allocated. 2770 * 2771 * The caller must specify whether the vm_map is already locked or not. 2772 */ 2773 boolean_t 2774 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2775 vm_prot_t protection, boolean_t have_lock) 2776 { 2777 vm_map_entry_t entry; 2778 vm_map_entry_t tmp_entry; 2779 boolean_t result; 2780 2781 if (have_lock == FALSE) 2782 vm_map_lock_read(map); 2783 2784 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 2785 if (have_lock == FALSE) 2786 vm_map_unlock_read(map); 2787 return (FALSE); 2788 } 2789 entry = tmp_entry; 2790 2791 result = TRUE; 2792 while (start < end) { 2793 if (entry == &map->header) { 2794 result = FALSE; 2795 break; 2796 } 2797 /* 2798 * No holes allowed! 2799 */ 2800 2801 if (start < entry->start) { 2802 result = FALSE; 2803 break; 2804 } 2805 /* 2806 * Check protection associated with entry. 2807 */ 2808 2809 if ((entry->protection & protection) != protection) { 2810 result = FALSE; 2811 break; 2812 } 2813 /* go to next entry */ 2814 2815 start = entry->end; 2816 entry = entry->next; 2817 } 2818 if (have_lock == FALSE) 2819 vm_map_unlock_read(map); 2820 return (result); 2821 } 2822 2823 /* 2824 * Split the pages in a map entry into a new object. This affords 2825 * easier removal of unused pages, and keeps object inheritance from 2826 * being a negative impact on memory usage. 2827 * 2828 * The vm_map must be exclusively locked. 2829 */ 2830 static void 2831 vm_map_split(vm_map_entry_t entry) 2832 { 2833 vm_page_t m; 2834 vm_object_t orig_object, new_object, source; 2835 vm_offset_t s, e; 2836 vm_pindex_t offidxstart, offidxend, idx; 2837 vm_size_t size; 2838 vm_ooffset_t offset; 2839 2840 orig_object = entry->object.vm_object; 2841 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 2842 return; 2843 if (orig_object->ref_count <= 1) 2844 return; 2845 2846 offset = entry->offset; 2847 s = entry->start; 2848 e = entry->end; 2849 2850 offidxstart = OFF_TO_IDX(offset); 2851 offidxend = offidxstart + OFF_TO_IDX(e - s); 2852 size = offidxend - offidxstart; 2853 2854 switch(orig_object->type) { 2855 case OBJT_DEFAULT: 2856 new_object = default_pager_alloc(NULL, IDX_TO_OFF(size), 2857 VM_PROT_ALL, 0); 2858 break; 2859 case OBJT_SWAP: 2860 new_object = swap_pager_alloc(NULL, IDX_TO_OFF(size), 2861 VM_PROT_ALL, 0); 2862 break; 2863 default: 2864 /* not reached */ 2865 new_object = NULL; 2866 KKASSERT(0); 2867 } 2868 if (new_object == NULL) 2869 return; 2870 2871 /* 2872 * vm_token required when manipulating vm_objects. 2873 */ 2874 lwkt_gettoken(&vm_token); 2875 lwkt_gettoken(&vmobj_token); 2876 2877 source = orig_object->backing_object; 2878 if (source != NULL) { 2879 /* Referenced by new_object */ 2880 vm_object_reference_locked(source); 2881 LIST_INSERT_HEAD(&source->shadow_head, 2882 new_object, shadow_list); 2883 vm_object_clear_flag(source, OBJ_ONEMAPPING); 2884 new_object->backing_object_offset = 2885 orig_object->backing_object_offset + 2886 IDX_TO_OFF(offidxstart); 2887 new_object->backing_object = source; 2888 source->shadow_count++; 2889 source->generation++; 2890 } 2891 2892 for (idx = 0; idx < size; idx++) { 2893 vm_page_t m; 2894 2895 retry: 2896 m = vm_page_lookup(orig_object, offidxstart + idx); 2897 if (m == NULL) 2898 continue; 2899 2900 /* 2901 * We must wait for pending I/O to complete before we can 2902 * rename the page. 2903 * 2904 * We do not have to VM_PROT_NONE the page as mappings should 2905 * not be changed by this operation. 2906 */ 2907 if (vm_page_sleep_busy(m, TRUE, "spltwt")) 2908 goto retry; 2909 vm_page_busy(m); 2910 vm_page_rename(m, new_object, idx); 2911 /* page automatically made dirty by rename and cache handled */ 2912 vm_page_busy(m); 2913 } 2914 2915 if (orig_object->type == OBJT_SWAP) { 2916 vm_object_pip_add(orig_object, 1); 2917 /* 2918 * copy orig_object pages into new_object 2919 * and destroy unneeded pages in 2920 * shadow object. 2921 */ 2922 swap_pager_copy(orig_object, new_object, offidxstart, 0); 2923 vm_object_pip_wakeup(orig_object); 2924 } 2925 2926 /* 2927 * Wakeup the pages we played with. No spl protection is needed 2928 * for a simple wakeup. 2929 */ 2930 for (idx = 0; idx < size; idx++) { 2931 m = vm_page_lookup(new_object, idx); 2932 if (m) 2933 vm_page_wakeup(m); 2934 } 2935 2936 entry->object.vm_object = new_object; 2937 entry->offset = 0LL; 2938 vm_object_deallocate_locked(orig_object); 2939 lwkt_reltoken(&vmobj_token); 2940 lwkt_reltoken(&vm_token); 2941 } 2942 2943 /* 2944 * Copies the contents of the source entry to the destination 2945 * entry. The entries *must* be aligned properly. 2946 * 2947 * The vm_map must be exclusively locked. 2948 * vm_token must be held 2949 */ 2950 static void 2951 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map, 2952 vm_map_entry_t src_entry, vm_map_entry_t dst_entry) 2953 { 2954 vm_object_t src_object; 2955 2956 if (dst_entry->maptype == VM_MAPTYPE_SUBMAP) 2957 return; 2958 if (src_entry->maptype == VM_MAPTYPE_SUBMAP) 2959 return; 2960 2961 ASSERT_LWKT_TOKEN_HELD(&vm_token); 2962 lwkt_gettoken(&vmobj_token); /* required for collapse */ 2963 2964 if (src_entry->wired_count == 0) { 2965 /* 2966 * If the source entry is marked needs_copy, it is already 2967 * write-protected. 2968 */ 2969 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2970 pmap_protect(src_map->pmap, 2971 src_entry->start, 2972 src_entry->end, 2973 src_entry->protection & ~VM_PROT_WRITE); 2974 } 2975 2976 /* 2977 * Make a copy of the object. 2978 */ 2979 if ((src_object = src_entry->object.vm_object) != NULL) { 2980 if ((src_object->handle == NULL) && 2981 (src_object->type == OBJT_DEFAULT || 2982 src_object->type == OBJT_SWAP)) { 2983 vm_object_collapse(src_object); 2984 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2985 vm_map_split(src_entry); 2986 src_object = src_entry->object.vm_object; 2987 } 2988 } 2989 2990 vm_object_reference_locked(src_object); 2991 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2992 dst_entry->object.vm_object = src_object; 2993 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2994 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2995 dst_entry->offset = src_entry->offset; 2996 } else { 2997 dst_entry->object.vm_object = NULL; 2998 dst_entry->offset = 0; 2999 } 3000 3001 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 3002 dst_entry->end - dst_entry->start, src_entry->start); 3003 } else { 3004 /* 3005 * Of course, wired down pages can't be set copy-on-write. 3006 * Cause wired pages to be copied into the new map by 3007 * simulating faults (the new pages are pageable) 3008 */ 3009 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 3010 } 3011 lwkt_reltoken(&vmobj_token); 3012 } 3013 3014 /* 3015 * vmspace_fork: 3016 * Create a new process vmspace structure and vm_map 3017 * based on those of an existing process. The new map 3018 * is based on the old map, according to the inheritance 3019 * values on the regions in that map. 3020 * 3021 * The source map must not be locked. 3022 * No requirements. 3023 */ 3024 struct vmspace * 3025 vmspace_fork(struct vmspace *vm1) 3026 { 3027 struct vmspace *vm2; 3028 vm_map_t old_map = &vm1->vm_map; 3029 vm_map_t new_map; 3030 vm_map_entry_t old_entry; 3031 vm_map_entry_t new_entry; 3032 vm_object_t object; 3033 int count; 3034 3035 lwkt_gettoken(&vm_token); 3036 lwkt_gettoken(&vmspace_token); 3037 lwkt_gettoken(&vmobj_token); 3038 vm_map_lock(old_map); 3039 old_map->infork = 1; 3040 3041 /* 3042 * XXX Note: upcalls are not copied. 3043 */ 3044 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 3045 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 3046 (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy); 3047 new_map = &vm2->vm_map; /* XXX */ 3048 new_map->timestamp = 1; 3049 3050 vm_map_lock(new_map); 3051 3052 count = 0; 3053 old_entry = old_map->header.next; 3054 while (old_entry != &old_map->header) { 3055 ++count; 3056 old_entry = old_entry->next; 3057 } 3058 3059 count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT); 3060 3061 old_entry = old_map->header.next; 3062 while (old_entry != &old_map->header) { 3063 if (old_entry->maptype == VM_MAPTYPE_SUBMAP) 3064 panic("vm_map_fork: encountered a submap"); 3065 3066 switch (old_entry->inheritance) { 3067 case VM_INHERIT_NONE: 3068 break; 3069 case VM_INHERIT_SHARE: 3070 /* 3071 * Clone the entry, creating the shared object if 3072 * necessary. 3073 */ 3074 object = old_entry->object.vm_object; 3075 if (object == NULL) { 3076 vm_map_entry_allocate_object(old_entry); 3077 object = old_entry->object.vm_object; 3078 } 3079 3080 /* 3081 * Add the reference before calling vm_map_entry_shadow 3082 * to insure that a shadow object is created. 3083 */ 3084 vm_object_reference_locked(object); 3085 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3086 vm_map_entry_shadow(old_entry); 3087 /* Transfer the second reference too. */ 3088 vm_object_reference_locked( 3089 old_entry->object.vm_object); 3090 vm_object_deallocate_locked(object); 3091 object = old_entry->object.vm_object; 3092 } 3093 vm_object_clear_flag(object, OBJ_ONEMAPPING); 3094 3095 /* 3096 * Clone the entry, referencing the shared object. 3097 */ 3098 new_entry = vm_map_entry_create(new_map, &count); 3099 *new_entry = *old_entry; 3100 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3101 new_entry->wired_count = 0; 3102 3103 /* 3104 * Insert the entry into the new map -- we know we're 3105 * inserting at the end of the new map. 3106 */ 3107 3108 vm_map_entry_link(new_map, new_map->header.prev, 3109 new_entry); 3110 3111 /* 3112 * Update the physical map 3113 */ 3114 pmap_copy(new_map->pmap, old_map->pmap, 3115 new_entry->start, 3116 (old_entry->end - old_entry->start), 3117 old_entry->start); 3118 break; 3119 case VM_INHERIT_COPY: 3120 /* 3121 * Clone the entry and link into the map. 3122 */ 3123 new_entry = vm_map_entry_create(new_map, &count); 3124 *new_entry = *old_entry; 3125 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 3126 new_entry->wired_count = 0; 3127 new_entry->object.vm_object = NULL; 3128 vm_map_entry_link(new_map, new_map->header.prev, 3129 new_entry); 3130 vm_map_copy_entry(old_map, new_map, old_entry, 3131 new_entry); 3132 break; 3133 } 3134 old_entry = old_entry->next; 3135 } 3136 3137 new_map->size = old_map->size; 3138 old_map->infork = 0; 3139 vm_map_unlock(old_map); 3140 vm_map_unlock(new_map); 3141 vm_map_entry_release(count); 3142 3143 lwkt_reltoken(&vmobj_token); 3144 lwkt_reltoken(&vmspace_token); 3145 lwkt_reltoken(&vm_token); 3146 3147 return (vm2); 3148 } 3149 3150 /* 3151 * Create an auto-grow stack entry 3152 * 3153 * No requirements. 3154 */ 3155 int 3156 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 3157 int flags, vm_prot_t prot, vm_prot_t max, int cow) 3158 { 3159 vm_map_entry_t prev_entry; 3160 vm_map_entry_t new_stack_entry; 3161 vm_size_t init_ssize; 3162 int rv; 3163 int count; 3164 vm_offset_t tmpaddr; 3165 3166 cow |= MAP_IS_STACK; 3167 3168 if (max_ssize < sgrowsiz) 3169 init_ssize = max_ssize; 3170 else 3171 init_ssize = sgrowsiz; 3172 3173 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3174 vm_map_lock(map); 3175 3176 /* 3177 * Find space for the mapping 3178 */ 3179 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) { 3180 if (vm_map_findspace(map, addrbos, max_ssize, 1, 3181 flags, &tmpaddr)) { 3182 vm_map_unlock(map); 3183 vm_map_entry_release(count); 3184 return (KERN_NO_SPACE); 3185 } 3186 addrbos = tmpaddr; 3187 } 3188 3189 /* If addr is already mapped, no go */ 3190 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 3191 vm_map_unlock(map); 3192 vm_map_entry_release(count); 3193 return (KERN_NO_SPACE); 3194 } 3195 3196 #if 0 3197 /* XXX already handled by kern_mmap() */ 3198 /* If we would blow our VMEM resource limit, no go */ 3199 if (map->size + init_ssize > 3200 curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 3201 vm_map_unlock(map); 3202 vm_map_entry_release(count); 3203 return (KERN_NO_SPACE); 3204 } 3205 #endif 3206 3207 /* 3208 * If we can't accomodate max_ssize in the current mapping, 3209 * no go. However, we need to be aware that subsequent user 3210 * mappings might map into the space we have reserved for 3211 * stack, and currently this space is not protected. 3212 * 3213 * Hopefully we will at least detect this condition 3214 * when we try to grow the stack. 3215 */ 3216 if ((prev_entry->next != &map->header) && 3217 (prev_entry->next->start < addrbos + max_ssize)) { 3218 vm_map_unlock(map); 3219 vm_map_entry_release(count); 3220 return (KERN_NO_SPACE); 3221 } 3222 3223 /* 3224 * We initially map a stack of only init_ssize. We will 3225 * grow as needed later. Since this is to be a grow 3226 * down stack, we map at the top of the range. 3227 * 3228 * Note: we would normally expect prot and max to be 3229 * VM_PROT_ALL, and cow to be 0. Possibly we should 3230 * eliminate these as input parameters, and just 3231 * pass these values here in the insert call. 3232 */ 3233 rv = vm_map_insert(map, &count, 3234 NULL, 0, addrbos + max_ssize - init_ssize, 3235 addrbos + max_ssize, 3236 VM_MAPTYPE_NORMAL, 3237 prot, max, 3238 cow); 3239 3240 /* Now set the avail_ssize amount */ 3241 if (rv == KERN_SUCCESS) { 3242 if (prev_entry != &map->header) 3243 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count); 3244 new_stack_entry = prev_entry->next; 3245 if (new_stack_entry->end != addrbos + max_ssize || 3246 new_stack_entry->start != addrbos + max_ssize - init_ssize) 3247 panic ("Bad entry start/end for new stack entry"); 3248 else 3249 new_stack_entry->aux.avail_ssize = max_ssize - init_ssize; 3250 } 3251 3252 vm_map_unlock(map); 3253 vm_map_entry_release(count); 3254 return (rv); 3255 } 3256 3257 /* 3258 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 3259 * desired address is already mapped, or if we successfully grow 3260 * the stack. Also returns KERN_SUCCESS if addr is outside the 3261 * stack range (this is strange, but preserves compatibility with 3262 * the grow function in vm_machdep.c). 3263 * 3264 * No requirements. 3265 */ 3266 int 3267 vm_map_growstack (struct proc *p, vm_offset_t addr) 3268 { 3269 vm_map_entry_t prev_entry; 3270 vm_map_entry_t stack_entry; 3271 vm_map_entry_t new_stack_entry; 3272 struct vmspace *vm = p->p_vmspace; 3273 vm_map_t map = &vm->vm_map; 3274 vm_offset_t end; 3275 int grow_amount; 3276 int rv = KERN_SUCCESS; 3277 int is_procstack; 3278 int use_read_lock = 1; 3279 int count; 3280 3281 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3282 Retry: 3283 if (use_read_lock) 3284 vm_map_lock_read(map); 3285 else 3286 vm_map_lock(map); 3287 3288 /* If addr is already in the entry range, no need to grow.*/ 3289 if (vm_map_lookup_entry(map, addr, &prev_entry)) 3290 goto done; 3291 3292 if ((stack_entry = prev_entry->next) == &map->header) 3293 goto done; 3294 if (prev_entry == &map->header) 3295 end = stack_entry->start - stack_entry->aux.avail_ssize; 3296 else 3297 end = prev_entry->end; 3298 3299 /* 3300 * This next test mimics the old grow function in vm_machdep.c. 3301 * It really doesn't quite make sense, but we do it anyway 3302 * for compatibility. 3303 * 3304 * If not growable stack, return success. This signals the 3305 * caller to proceed as he would normally with normal vm. 3306 */ 3307 if (stack_entry->aux.avail_ssize < 1 || 3308 addr >= stack_entry->start || 3309 addr < stack_entry->start - stack_entry->aux.avail_ssize) { 3310 goto done; 3311 } 3312 3313 /* Find the minimum grow amount */ 3314 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 3315 if (grow_amount > stack_entry->aux.avail_ssize) { 3316 rv = KERN_NO_SPACE; 3317 goto done; 3318 } 3319 3320 /* 3321 * If there is no longer enough space between the entries 3322 * nogo, and adjust the available space. Note: this 3323 * should only happen if the user has mapped into the 3324 * stack area after the stack was created, and is 3325 * probably an error. 3326 * 3327 * This also effectively destroys any guard page the user 3328 * might have intended by limiting the stack size. 3329 */ 3330 if (grow_amount > stack_entry->start - end) { 3331 if (use_read_lock && vm_map_lock_upgrade(map)) { 3332 use_read_lock = 0; 3333 goto Retry; 3334 } 3335 use_read_lock = 0; 3336 stack_entry->aux.avail_ssize = stack_entry->start - end; 3337 rv = KERN_NO_SPACE; 3338 goto done; 3339 } 3340 3341 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 3342 3343 /* If this is the main process stack, see if we're over the 3344 * stack limit. 3345 */ 3346 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 3347 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 3348 rv = KERN_NO_SPACE; 3349 goto done; 3350 } 3351 3352 /* Round up the grow amount modulo SGROWSIZ */ 3353 grow_amount = roundup (grow_amount, sgrowsiz); 3354 if (grow_amount > stack_entry->aux.avail_ssize) { 3355 grow_amount = stack_entry->aux.avail_ssize; 3356 } 3357 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 3358 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 3359 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 3360 ctob(vm->vm_ssize); 3361 } 3362 3363 /* If we would blow our VMEM resource limit, no go */ 3364 if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 3365 rv = KERN_NO_SPACE; 3366 goto done; 3367 } 3368 3369 if (use_read_lock && vm_map_lock_upgrade(map)) { 3370 use_read_lock = 0; 3371 goto Retry; 3372 } 3373 use_read_lock = 0; 3374 3375 /* Get the preliminary new entry start value */ 3376 addr = stack_entry->start - grow_amount; 3377 3378 /* If this puts us into the previous entry, cut back our growth 3379 * to the available space. Also, see the note above. 3380 */ 3381 if (addr < end) { 3382 stack_entry->aux.avail_ssize = stack_entry->start - end; 3383 addr = end; 3384 } 3385 3386 rv = vm_map_insert(map, &count, 3387 NULL, 0, addr, stack_entry->start, 3388 VM_MAPTYPE_NORMAL, 3389 VM_PROT_ALL, VM_PROT_ALL, 3390 0); 3391 3392 /* Adjust the available stack space by the amount we grew. */ 3393 if (rv == KERN_SUCCESS) { 3394 if (prev_entry != &map->header) 3395 vm_map_clip_end(map, prev_entry, addr, &count); 3396 new_stack_entry = prev_entry->next; 3397 if (new_stack_entry->end != stack_entry->start || 3398 new_stack_entry->start != addr) 3399 panic ("Bad stack grow start/end in new stack entry"); 3400 else { 3401 new_stack_entry->aux.avail_ssize = 3402 stack_entry->aux.avail_ssize - 3403 (new_stack_entry->end - new_stack_entry->start); 3404 if (is_procstack) 3405 vm->vm_ssize += btoc(new_stack_entry->end - 3406 new_stack_entry->start); 3407 } 3408 3409 if (map->flags & MAP_WIREFUTURE) 3410 vm_map_unwire(map, new_stack_entry->start, 3411 new_stack_entry->end, FALSE); 3412 } 3413 3414 done: 3415 if (use_read_lock) 3416 vm_map_unlock_read(map); 3417 else 3418 vm_map_unlock(map); 3419 vm_map_entry_release(count); 3420 return (rv); 3421 } 3422 3423 /* 3424 * Unshare the specified VM space for exec. If other processes are 3425 * mapped to it, then create a new one. The new vmspace is null. 3426 * 3427 * No requirements. 3428 */ 3429 void 3430 vmspace_exec(struct proc *p, struct vmspace *vmcopy) 3431 { 3432 struct vmspace *oldvmspace = p->p_vmspace; 3433 struct vmspace *newvmspace; 3434 vm_map_t map = &p->p_vmspace->vm_map; 3435 3436 /* 3437 * If we are execing a resident vmspace we fork it, otherwise 3438 * we create a new vmspace. Note that exitingcnt and upcalls 3439 * are not copied to the new vmspace. 3440 */ 3441 lwkt_gettoken(&vmspace_token); 3442 if (vmcopy) { 3443 newvmspace = vmspace_fork(vmcopy); 3444 } else { 3445 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 3446 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 3447 (caddr_t)&oldvmspace->vm_endcopy - 3448 (caddr_t)&oldvmspace->vm_startcopy); 3449 } 3450 3451 /* 3452 * Finish initializing the vmspace before assigning it 3453 * to the process. The vmspace will become the current vmspace 3454 * if p == curproc. 3455 */ 3456 pmap_pinit2(vmspace_pmap(newvmspace)); 3457 pmap_replacevm(p, newvmspace, 0); 3458 sysref_put(&oldvmspace->vm_sysref); 3459 lwkt_reltoken(&vmspace_token); 3460 } 3461 3462 /* 3463 * Unshare the specified VM space for forcing COW. This 3464 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3465 * 3466 * The exitingcnt test is not strictly necessary but has been 3467 * included for code sanity (to make the code a bit more deterministic). 3468 */ 3469 void 3470 vmspace_unshare(struct proc *p) 3471 { 3472 struct vmspace *oldvmspace = p->p_vmspace; 3473 struct vmspace *newvmspace; 3474 3475 lwkt_gettoken(&vmspace_token); 3476 if (oldvmspace->vm_sysref.refcnt == 1 && oldvmspace->vm_exitingcnt == 0) 3477 return; 3478 newvmspace = vmspace_fork(oldvmspace); 3479 pmap_pinit2(vmspace_pmap(newvmspace)); 3480 pmap_replacevm(p, newvmspace, 0); 3481 sysref_put(&oldvmspace->vm_sysref); 3482 lwkt_reltoken(&vmspace_token); 3483 } 3484 3485 /* 3486 * Finds the VM object, offset, and protection for a given virtual address 3487 * in the specified map, assuming a page fault of the type specified. 3488 * 3489 * Leaves the map in question locked for read; return values are guaranteed 3490 * until a vm_map_lookup_done call is performed. Note that the map argument 3491 * is in/out; the returned map must be used in the call to vm_map_lookup_done. 3492 * 3493 * A handle (out_entry) is returned for use in vm_map_lookup_done, to make 3494 * that fast. 3495 * 3496 * If a lookup is requested with "write protection" specified, the map may 3497 * be changed to perform virtual copying operations, although the data 3498 * referenced will remain the same. 3499 * 3500 * No requirements. 3501 */ 3502 int 3503 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3504 vm_offset_t vaddr, 3505 vm_prot_t fault_typea, 3506 vm_map_entry_t *out_entry, /* OUT */ 3507 vm_object_t *object, /* OUT */ 3508 vm_pindex_t *pindex, /* OUT */ 3509 vm_prot_t *out_prot, /* OUT */ 3510 boolean_t *wired) /* OUT */ 3511 { 3512 vm_map_entry_t entry; 3513 vm_map_t map = *var_map; 3514 vm_prot_t prot; 3515 vm_prot_t fault_type = fault_typea; 3516 int use_read_lock = 1; 3517 int rv = KERN_SUCCESS; 3518 3519 RetryLookup: 3520 if (use_read_lock) 3521 vm_map_lock_read(map); 3522 else 3523 vm_map_lock(map); 3524 3525 /* 3526 * If the map has an interesting hint, try it before calling full 3527 * blown lookup routine. 3528 */ 3529 entry = map->hint; 3530 *out_entry = entry; 3531 3532 if ((entry == &map->header) || 3533 (vaddr < entry->start) || (vaddr >= entry->end)) { 3534 vm_map_entry_t tmp_entry; 3535 3536 /* 3537 * Entry was either not a valid hint, or the vaddr was not 3538 * contained in the entry, so do a full lookup. 3539 */ 3540 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { 3541 rv = KERN_INVALID_ADDRESS; 3542 goto done; 3543 } 3544 3545 entry = tmp_entry; 3546 *out_entry = entry; 3547 } 3548 3549 /* 3550 * Handle submaps. 3551 */ 3552 if (entry->maptype == VM_MAPTYPE_SUBMAP) { 3553 vm_map_t old_map = map; 3554 3555 *var_map = map = entry->object.sub_map; 3556 if (use_read_lock) 3557 vm_map_unlock_read(old_map); 3558 else 3559 vm_map_unlock(old_map); 3560 use_read_lock = 1; 3561 goto RetryLookup; 3562 } 3563 3564 /* 3565 * Check whether this task is allowed to have this page. 3566 * Note the special case for MAP_ENTRY_COW 3567 * pages with an override. This is to implement a forced 3568 * COW for debuggers. 3569 */ 3570 3571 if (fault_type & VM_PROT_OVERRIDE_WRITE) 3572 prot = entry->max_protection; 3573 else 3574 prot = entry->protection; 3575 3576 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 3577 if ((fault_type & prot) != fault_type) { 3578 rv = KERN_PROTECTION_FAILURE; 3579 goto done; 3580 } 3581 3582 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3583 (entry->eflags & MAP_ENTRY_COW) && 3584 (fault_type & VM_PROT_WRITE) && 3585 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 3586 rv = KERN_PROTECTION_FAILURE; 3587 goto done; 3588 } 3589 3590 /* 3591 * If this page is not pageable, we have to get it for all possible 3592 * accesses. 3593 */ 3594 *wired = (entry->wired_count != 0); 3595 if (*wired) 3596 prot = fault_type = entry->protection; 3597 3598 /* 3599 * Virtual page tables may need to update the accessed (A) bit 3600 * in a page table entry. Upgrade the fault to a write fault for 3601 * that case if the map will support it. If the map does not support 3602 * it the page table entry simply will not be updated. 3603 */ 3604 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) { 3605 if (prot & VM_PROT_WRITE) 3606 fault_type |= VM_PROT_WRITE; 3607 } 3608 3609 /* 3610 * If the entry was copy-on-write, we either ... 3611 */ 3612 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3613 /* 3614 * If we want to write the page, we may as well handle that 3615 * now since we've got the map locked. 3616 * 3617 * If we don't need to write the page, we just demote the 3618 * permissions allowed. 3619 */ 3620 3621 if (fault_type & VM_PROT_WRITE) { 3622 /* 3623 * Make a new object, and place it in the object 3624 * chain. Note that no new references have appeared 3625 * -- one just moved from the map to the new 3626 * object. 3627 */ 3628 3629 if (use_read_lock && vm_map_lock_upgrade(map)) { 3630 use_read_lock = 0; 3631 goto RetryLookup; 3632 } 3633 use_read_lock = 0; 3634 3635 vm_map_entry_shadow(entry); 3636 } else { 3637 /* 3638 * We're attempting to read a copy-on-write page -- 3639 * don't allow writes. 3640 */ 3641 3642 prot &= ~VM_PROT_WRITE; 3643 } 3644 } 3645 3646 /* 3647 * Create an object if necessary. 3648 */ 3649 if (entry->object.vm_object == NULL && 3650 !map->system_map) { 3651 if (use_read_lock && vm_map_lock_upgrade(map)) { 3652 use_read_lock = 0; 3653 goto RetryLookup; 3654 } 3655 use_read_lock = 0; 3656 vm_map_entry_allocate_object(entry); 3657 } 3658 3659 /* 3660 * Return the object/offset from this entry. If the entry was 3661 * copy-on-write or empty, it has been fixed up. 3662 */ 3663 3664 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3665 *object = entry->object.vm_object; 3666 3667 /* 3668 * Return whether this is the only map sharing this data. On 3669 * success we return with a read lock held on the map. On failure 3670 * we return with the map unlocked. 3671 */ 3672 *out_prot = prot; 3673 done: 3674 if (rv == KERN_SUCCESS) { 3675 if (use_read_lock == 0) 3676 vm_map_lock_downgrade(map); 3677 } else if (use_read_lock) { 3678 vm_map_unlock_read(map); 3679 } else { 3680 vm_map_unlock(map); 3681 } 3682 return (rv); 3683 } 3684 3685 /* 3686 * Releases locks acquired by a vm_map_lookup() 3687 * (according to the handle returned by that lookup). 3688 * 3689 * No other requirements. 3690 */ 3691 void 3692 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count) 3693 { 3694 /* 3695 * Unlock the main-level map 3696 */ 3697 vm_map_unlock_read(map); 3698 if (count) 3699 vm_map_entry_release(count); 3700 } 3701 3702 #include "opt_ddb.h" 3703 #ifdef DDB 3704 #include <sys/kernel.h> 3705 3706 #include <ddb/ddb.h> 3707 3708 /* 3709 * Debugging only 3710 */ 3711 DB_SHOW_COMMAND(map, vm_map_print) 3712 { 3713 static int nlines; 3714 /* XXX convert args. */ 3715 vm_map_t map = (vm_map_t)addr; 3716 boolean_t full = have_addr; 3717 3718 vm_map_entry_t entry; 3719 3720 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3721 (void *)map, 3722 (void *)map->pmap, map->nentries, map->timestamp); 3723 nlines++; 3724 3725 if (!full && db_indent) 3726 return; 3727 3728 db_indent += 2; 3729 for (entry = map->header.next; entry != &map->header; 3730 entry = entry->next) { 3731 db_iprintf("map entry %p: start=%p, end=%p\n", 3732 (void *)entry, (void *)entry->start, (void *)entry->end); 3733 nlines++; 3734 { 3735 static char *inheritance_name[4] = 3736 {"share", "copy", "none", "donate_copy"}; 3737 3738 db_iprintf(" prot=%x/%x/%s", 3739 entry->protection, 3740 entry->max_protection, 3741 inheritance_name[(int)(unsigned char)entry->inheritance]); 3742 if (entry->wired_count != 0) 3743 db_printf(", wired"); 3744 } 3745 if (entry->maptype == VM_MAPTYPE_SUBMAP) { 3746 /* XXX no %qd in kernel. Truncate entry->offset. */ 3747 db_printf(", share=%p, offset=0x%lx\n", 3748 (void *)entry->object.sub_map, 3749 (long)entry->offset); 3750 nlines++; 3751 if ((entry->prev == &map->header) || 3752 (entry->prev->object.sub_map != 3753 entry->object.sub_map)) { 3754 db_indent += 2; 3755 vm_map_print((db_expr_t)(intptr_t) 3756 entry->object.sub_map, 3757 full, 0, NULL); 3758 db_indent -= 2; 3759 } 3760 } else { 3761 /* XXX no %qd in kernel. Truncate entry->offset. */ 3762 db_printf(", object=%p, offset=0x%lx", 3763 (void *)entry->object.vm_object, 3764 (long)entry->offset); 3765 if (entry->eflags & MAP_ENTRY_COW) 3766 db_printf(", copy (%s)", 3767 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3768 db_printf("\n"); 3769 nlines++; 3770 3771 if ((entry->prev == &map->header) || 3772 (entry->prev->object.vm_object != 3773 entry->object.vm_object)) { 3774 db_indent += 2; 3775 vm_object_print((db_expr_t)(intptr_t) 3776 entry->object.vm_object, 3777 full, 0, NULL); 3778 nlines += 4; 3779 db_indent -= 2; 3780 } 3781 } 3782 } 3783 db_indent -= 2; 3784 if (db_indent == 0) 3785 nlines = 0; 3786 } 3787 3788 /* 3789 * Debugging only 3790 */ 3791 DB_SHOW_COMMAND(procvm, procvm) 3792 { 3793 struct proc *p; 3794 3795 if (have_addr) { 3796 p = (struct proc *) addr; 3797 } else { 3798 p = curproc; 3799 } 3800 3801 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3802 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3803 (void *)vmspace_pmap(p->p_vmspace)); 3804 3805 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3806 } 3807 3808 #endif /* DDB */ 3809