1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $ 65 * $DragonFly: src/sys/vm/vm_map.c,v 1.12 2003/09/26 19:23:34 dillon Exp $ 66 */ 67 68 /* 69 * Virtual memory mapping module. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/proc.h> 75 #include <sys/lock.h> 76 #include <sys/vmmeter.h> 77 #include <sys/mman.h> 78 #include <sys/vnode.h> 79 #include <sys/resourcevar.h> 80 #include <sys/shm.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 #include <vm/swap_pager.h> 92 #include <vm/vm_zone.h> 93 94 #include <sys/thread2.h> 95 96 /* 97 * Virtual memory maps provide for the mapping, protection, 98 * and sharing of virtual memory objects. In addition, 99 * this module provides for an efficient virtual copy of 100 * memory from one map to another. 101 * 102 * Synchronization is required prior to most operations. 103 * 104 * Maps consist of an ordered doubly-linked list of simple 105 * entries; a single hint is used to speed up lookups. 106 * 107 * Since portions of maps are specified by start/end addresses, 108 * which may not align with existing map entries, all 109 * routines merely "clip" entries to these start/end values. 110 * [That is, an entry is split into two, bordering at a 111 * start or end value.] Note that these clippings may not 112 * always be necessary (as the two resulting entries are then 113 * not changed); however, the clipping is done for convenience. 114 * 115 * As mentioned above, virtual copy operations are performed 116 * by copying VM object references from one map to 117 * another, and then marking both regions as copy-on-write. 118 */ 119 120 /* 121 * vm_map_startup: 122 * 123 * Initialize the vm_map module. Must be called before 124 * any other vm_map routines. 125 * 126 * Map and entry structures are allocated from the general 127 * purpose memory pool with some exceptions: 128 * 129 * - The kernel map and kmem submap are allocated statically. 130 * - Kernel map entries are allocated out of a static pool. 131 * 132 * These restrictions are necessary since malloc() uses the 133 * maps and requires map entries. 134 */ 135 136 static struct vm_zone mapentzone_store, mapzone_store; 137 static vm_zone_t mapentzone, mapzone, vmspace_zone; 138 static struct vm_object mapentobj, mapobj; 139 140 static struct vm_map_entry map_entry_init[MAX_MAPENT]; 141 static struct vm_map map_init[MAX_KMAP]; 142 143 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *); 144 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *); 145 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 146 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *); 147 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *); 148 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t); 149 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t, 150 vm_map_entry_t); 151 static void vm_map_split (vm_map_entry_t); 152 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags); 153 154 void 155 vm_map_startup() 156 { 157 mapzone = &mapzone_store; 158 zbootinit(mapzone, "MAP", sizeof (struct vm_map), 159 map_init, MAX_KMAP); 160 mapentzone = &mapentzone_store; 161 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry), 162 map_entry_init, MAX_MAPENT); 163 } 164 165 /* 166 * Allocate a vmspace structure, including a vm_map and pmap, 167 * and initialize those structures. The refcnt is set to 1. 168 * The remaining fields must be initialized by the caller. 169 */ 170 struct vmspace * 171 vmspace_alloc(min, max) 172 vm_offset_t min, max; 173 { 174 struct vmspace *vm; 175 176 vm = zalloc(vmspace_zone); 177 vm_map_init(&vm->vm_map, min, max); 178 pmap_pinit(vmspace_pmap(vm)); 179 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ 180 vm->vm_refcnt = 1; 181 vm->vm_shm = NULL; 182 vm->vm_exitingcnt = 0; 183 return (vm); 184 } 185 186 void 187 vm_init2(void) 188 { 189 zinitna(mapentzone, &mapentobj, NULL, 0, 0, ZONE_USE_RESERVE, 1); 190 zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1); 191 vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3); 192 pmap_init2(); 193 vm_object_init2(); 194 } 195 196 static __inline void 197 vmspace_dofree(struct vmspace *vm) 198 { 199 int count; 200 201 /* 202 * Make sure any SysV shm is freed, it might not have in 203 * exit1() 204 */ 205 shmexit(vm); 206 207 /* 208 * Lock the map, to wait out all other references to it. 209 * Delete all of the mappings and pages they hold, then call 210 * the pmap module to reclaim anything left. 211 */ 212 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 213 vm_map_lock(&vm->vm_map); 214 vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, 215 vm->vm_map.max_offset, &count); 216 vm_map_unlock(&vm->vm_map); 217 vm_map_entry_release(count); 218 219 pmap_release(vmspace_pmap(vm)); 220 zfree(vmspace_zone, vm); 221 } 222 223 void 224 vmspace_free(struct vmspace *vm) 225 { 226 if (vm->vm_refcnt == 0) 227 panic("vmspace_free: attempt to free already freed vmspace"); 228 229 if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0) 230 vmspace_dofree(vm); 231 } 232 233 void 234 vmspace_exitfree(struct proc *p) 235 { 236 struct vmspace *vm; 237 238 vm = p->p_vmspace; 239 p->p_vmspace = NULL; 240 241 /* 242 * cleanup by parent process wait()ing on exiting child. vm_refcnt 243 * may not be 0 (e.g. fork() and child exits without exec()ing). 244 * exitingcnt may increment above 0 and drop back down to zero 245 * several times while vm_refcnt is held non-zero. vm_refcnt 246 * may also increment above 0 and drop back down to zero several 247 * times while vm_exitingcnt is held non-zero. 248 * 249 * The last wait on the exiting child's vmspace will clean up 250 * the remainder of the vmspace. 251 */ 252 if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0) 253 vmspace_dofree(vm); 254 } 255 256 /* 257 * vmspace_swap_count() - count the approximate swap useage in pages for a 258 * vmspace. 259 * 260 * Swap useage is determined by taking the proportional swap used by 261 * VM objects backing the VM map. To make up for fractional losses, 262 * if the VM object has any swap use at all the associated map entries 263 * count for at least 1 swap page. 264 */ 265 int 266 vmspace_swap_count(struct vmspace *vmspace) 267 { 268 vm_map_t map = &vmspace->vm_map; 269 vm_map_entry_t cur; 270 int count = 0; 271 272 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 273 vm_object_t object; 274 275 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 276 (object = cur->object.vm_object) != NULL && 277 object->type == OBJT_SWAP 278 ) { 279 int n = (cur->end - cur->start) / PAGE_SIZE; 280 281 if (object->un_pager.swp.swp_bcount) { 282 count += object->un_pager.swp.swp_bcount * 283 SWAP_META_PAGES * n / object->size + 1; 284 } 285 } 286 } 287 return(count); 288 } 289 290 291 /* 292 * vm_map_create: 293 * 294 * Creates and returns a new empty VM map with 295 * the given physical map structure, and having 296 * the given lower and upper address bounds. 297 */ 298 vm_map_t 299 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) 300 { 301 vm_map_t result; 302 303 result = zalloc(mapzone); 304 vm_map_init(result, min, max); 305 result->pmap = pmap; 306 return (result); 307 } 308 309 /* 310 * Initialize an existing vm_map structure 311 * such as that in the vmspace structure. 312 * The pmap is set elsewhere. 313 */ 314 void 315 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max) 316 { 317 map->header.next = map->header.prev = &map->header; 318 map->nentries = 0; 319 map->size = 0; 320 map->system_map = 0; 321 map->infork = 0; 322 map->min_offset = min; 323 map->max_offset = max; 324 map->first_free = &map->header; 325 map->hint = &map->header; 326 map->timestamp = 0; 327 lockinit(&map->lock, 0, "thrd_sleep", 0, LK_NOPAUSE); 328 } 329 330 /* 331 * vm_map_entry_reserve: 332 * 333 * Reserves vm_map_entry structures outside of the critical path 334 */ 335 int 336 vm_map_entry_reserve(int count) 337 { 338 struct globaldata *gd = mycpu; 339 vm_map_entry_t entry; 340 341 crit_enter(); 342 gd->gd_vme_avail -= count; 343 344 /* 345 * Make sure we have enough structures in gd_vme_base to handle 346 * the reservation request. 347 */ 348 while (gd->gd_vme_avail < 0) { 349 entry = zalloc(mapentzone); 350 entry->next = gd->gd_vme_base; 351 gd->gd_vme_base = entry; 352 ++gd->gd_vme_avail; 353 } 354 crit_exit(); 355 return(count); 356 } 357 358 /* 359 * vm_map_entry_release: 360 * 361 * Releases previously reserved vm_map_entry structures that were not 362 * used. If we have too much junk in our per-cpu cache clean some of 363 * it out. 364 */ 365 void 366 vm_map_entry_release(int count) 367 { 368 struct globaldata *gd = mycpu; 369 vm_map_entry_t entry; 370 371 crit_enter(); 372 gd->gd_vme_avail += count; 373 while (gd->gd_vme_avail > MAP_RESERVE_SLOP) { 374 entry = gd->gd_vme_base; 375 KKASSERT(entry != NULL); 376 gd->gd_vme_base = entry->next; 377 --gd->gd_vme_avail; 378 crit_exit(); 379 zfree(mapentzone, entry); 380 crit_enter(); 381 } 382 crit_exit(); 383 } 384 385 /* 386 * vm_map_entry_kreserve: 387 * 388 * Reserve map entry structures for use in kernel_map or (if it exists) 389 * kmem_map. These entries have *ALREADY* been reserved on a per-cpu 390 * basis. 391 * 392 * XXX if multiple kernel map entries are used without any intervening 393 * use by another map the KKASSERT() may assert. 394 */ 395 int 396 vm_map_entry_kreserve(int count) 397 { 398 struct globaldata *gd = mycpu; 399 400 crit_enter(); 401 gd->gd_vme_kdeficit += count; 402 crit_exit(); 403 KKASSERT(gd->gd_vme_base != NULL); 404 return(count); 405 } 406 407 /* 408 * vm_map_entry_krelease: 409 * 410 * Release previously reserved map entries for kernel_map or kmem_map 411 * use. This routine determines how many entries were actually used and 412 * replentishes the kernel reserve supply from vme_avail. 413 * 414 * If there is insufficient supply vme_avail will go negative, which is 415 * ok. We cannot safely call zalloc in this function without getting 416 * into a recursion deadlock. zalloc() will call vm_map_entry_reserve() 417 * to regenerate the lost entries. 418 */ 419 void 420 vm_map_entry_krelease(int count) 421 { 422 struct globaldata *gd = mycpu; 423 424 crit_enter(); 425 gd->gd_vme_kdeficit -= count; 426 gd->gd_vme_avail -= gd->gd_vme_kdeficit; /* can go negative */ 427 gd->gd_vme_kdeficit = 0; 428 crit_exit(); 429 } 430 431 /* 432 * vm_map_entry_create: [ internal use only ] 433 * 434 * Allocates a VM map entry for insertion. No entry fields are filled 435 * in. 436 * 437 * This routine may be called from an interrupt thread but not a FAST 438 * interrupt. This routine may recurse the map lock. 439 */ 440 static vm_map_entry_t 441 vm_map_entry_create(vm_map_t map, int *countp) 442 { 443 struct globaldata *gd = mycpu; 444 vm_map_entry_t entry; 445 446 KKASSERT(*countp > 0); 447 --*countp; 448 crit_enter(); 449 entry = gd->gd_vme_base; 450 KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp)); 451 gd->gd_vme_base = entry->next; 452 crit_exit(); 453 return(entry); 454 } 455 456 /* 457 * vm_map_entry_dispose: [ internal use only ] 458 * 459 * Dispose of a vm_map_entry that is no longer being referenced. This 460 * function may be called from an interrupt. 461 */ 462 static void 463 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp) 464 { 465 struct globaldata *gd = mycpu; 466 467 ++*countp; 468 crit_enter(); 469 entry->next = gd->gd_vme_base; 470 gd->gd_vme_base = entry; 471 crit_exit(); 472 } 473 474 475 /* 476 * vm_map_entry_{un,}link: 477 * 478 * Insert/remove entries from maps. 479 */ 480 static __inline void 481 vm_map_entry_link(vm_map_t map, 482 vm_map_entry_t after_where, 483 vm_map_entry_t entry) 484 { 485 map->nentries++; 486 entry->prev = after_where; 487 entry->next = after_where->next; 488 entry->next->prev = entry; 489 after_where->next = entry; 490 } 491 492 static __inline void 493 vm_map_entry_unlink(vm_map_t map, 494 vm_map_entry_t entry) 495 { 496 vm_map_entry_t prev; 497 vm_map_entry_t next; 498 499 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) 500 panic("vm_map_entry_unlink: attempt to mess with locked entry! %p", entry); 501 prev = entry->prev; 502 next = entry->next; 503 next->prev = prev; 504 prev->next = next; 505 map->nentries--; 506 } 507 508 /* 509 * SAVE_HINT: 510 * 511 * Saves the specified entry as the hint for 512 * future lookups. 513 */ 514 #define SAVE_HINT(map,value) \ 515 (map)->hint = (value); 516 517 /* 518 * vm_map_lookup_entry: [ internal use only ] 519 * 520 * Finds the map entry containing (or 521 * immediately preceding) the specified address 522 * in the given map; the entry is returned 523 * in the "entry" parameter. The boolean 524 * result indicates whether the address is 525 * actually contained in the map. 526 */ 527 boolean_t 528 vm_map_lookup_entry(map, address, entry) 529 vm_map_t map; 530 vm_offset_t address; 531 vm_map_entry_t *entry; /* OUT */ 532 { 533 vm_map_entry_t cur; 534 vm_map_entry_t last; 535 536 /* 537 * Start looking either from the head of the list, or from the hint. 538 */ 539 540 cur = map->hint; 541 542 if (cur == &map->header) 543 cur = cur->next; 544 545 if (address >= cur->start) { 546 /* 547 * Go from hint to end of list. 548 * 549 * But first, make a quick check to see if we are already looking 550 * at the entry we want (which is usually the case). Note also 551 * that we don't need to save the hint here... it is the same 552 * hint (unless we are at the header, in which case the hint 553 * didn't buy us anything anyway). 554 */ 555 last = &map->header; 556 if ((cur != last) && (cur->end > address)) { 557 *entry = cur; 558 return (TRUE); 559 } 560 } else { 561 /* 562 * Go from start to hint, *inclusively* 563 */ 564 last = cur->next; 565 cur = map->header.next; 566 } 567 568 /* 569 * Search linearly 570 */ 571 572 while (cur != last) { 573 if (cur->end > address) { 574 if (address >= cur->start) { 575 /* 576 * Save this lookup for future hints, and 577 * return 578 */ 579 580 *entry = cur; 581 SAVE_HINT(map, cur); 582 return (TRUE); 583 } 584 break; 585 } 586 cur = cur->next; 587 } 588 *entry = cur->prev; 589 SAVE_HINT(map, *entry); 590 return (FALSE); 591 } 592 593 /* 594 * vm_map_insert: 595 * 596 * Inserts the given whole VM object into the target 597 * map at the specified address range. The object's 598 * size should match that of the address range. 599 * 600 * Requires that the map be locked, and leaves it so. Requires that 601 * sufficient vm_map_entry structures have been reserved and tracks 602 * the use via countp. 603 * 604 * If object is non-NULL, ref count must be bumped by caller 605 * prior to making call to account for the new entry. 606 */ 607 int 608 vm_map_insert(vm_map_t map, int *countp, 609 vm_object_t object, vm_ooffset_t offset, 610 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, 611 int cow) 612 { 613 vm_map_entry_t new_entry; 614 vm_map_entry_t prev_entry; 615 vm_map_entry_t temp_entry; 616 vm_eflags_t protoeflags; 617 618 /* 619 * Check that the start and end points are not bogus. 620 */ 621 622 if ((start < map->min_offset) || (end > map->max_offset) || 623 (start >= end)) 624 return (KERN_INVALID_ADDRESS); 625 626 /* 627 * Find the entry prior to the proposed starting address; if it's part 628 * of an existing entry, this range is bogus. 629 */ 630 631 if (vm_map_lookup_entry(map, start, &temp_entry)) 632 return (KERN_NO_SPACE); 633 634 prev_entry = temp_entry; 635 636 /* 637 * Assert that the next entry doesn't overlap the end point. 638 */ 639 640 if ((prev_entry->next != &map->header) && 641 (prev_entry->next->start < end)) 642 return (KERN_NO_SPACE); 643 644 protoeflags = 0; 645 646 if (cow & MAP_COPY_ON_WRITE) 647 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; 648 649 if (cow & MAP_NOFAULT) { 650 protoeflags |= MAP_ENTRY_NOFAULT; 651 652 KASSERT(object == NULL, 653 ("vm_map_insert: paradoxical MAP_NOFAULT request")); 654 } 655 if (cow & MAP_DISABLE_SYNCER) 656 protoeflags |= MAP_ENTRY_NOSYNC; 657 if (cow & MAP_DISABLE_COREDUMP) 658 protoeflags |= MAP_ENTRY_NOCOREDUMP; 659 660 if (object) { 661 /* 662 * When object is non-NULL, it could be shared with another 663 * process. We have to set or clear OBJ_ONEMAPPING 664 * appropriately. 665 */ 666 if ((object->ref_count > 1) || (object->shadow_count != 0)) { 667 vm_object_clear_flag(object, OBJ_ONEMAPPING); 668 } 669 } 670 else if ((prev_entry != &map->header) && 671 (prev_entry->eflags == protoeflags) && 672 (prev_entry->end == start) && 673 (prev_entry->wired_count == 0) && 674 ((prev_entry->object.vm_object == NULL) || 675 vm_object_coalesce(prev_entry->object.vm_object, 676 OFF_TO_IDX(prev_entry->offset), 677 (vm_size_t)(prev_entry->end - prev_entry->start), 678 (vm_size_t)(end - prev_entry->end)))) { 679 /* 680 * We were able to extend the object. Determine if we 681 * can extend the previous map entry to include the 682 * new range as well. 683 */ 684 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) && 685 (prev_entry->protection == prot) && 686 (prev_entry->max_protection == max)) { 687 map->size += (end - prev_entry->end); 688 prev_entry->end = end; 689 vm_map_simplify_entry(map, prev_entry, countp); 690 return (KERN_SUCCESS); 691 } 692 693 /* 694 * If we can extend the object but cannot extend the 695 * map entry, we have to create a new map entry. We 696 * must bump the ref count on the extended object to 697 * account for it. object may be NULL. 698 */ 699 object = prev_entry->object.vm_object; 700 offset = prev_entry->offset + 701 (prev_entry->end - prev_entry->start); 702 vm_object_reference(object); 703 } 704 705 /* 706 * NOTE: if conditionals fail, object can be NULL here. This occurs 707 * in things like the buffer map where we manage kva but do not manage 708 * backing objects. 709 */ 710 711 /* 712 * Create a new entry 713 */ 714 715 new_entry = vm_map_entry_create(map, countp); 716 new_entry->start = start; 717 new_entry->end = end; 718 719 new_entry->eflags = protoeflags; 720 new_entry->object.vm_object = object; 721 new_entry->offset = offset; 722 new_entry->avail_ssize = 0; 723 724 new_entry->inheritance = VM_INHERIT_DEFAULT; 725 new_entry->protection = prot; 726 new_entry->max_protection = max; 727 new_entry->wired_count = 0; 728 729 /* 730 * Insert the new entry into the list 731 */ 732 733 vm_map_entry_link(map, prev_entry, new_entry); 734 map->size += new_entry->end - new_entry->start; 735 736 /* 737 * Update the free space hint 738 */ 739 if ((map->first_free == prev_entry) && 740 (prev_entry->end >= new_entry->start)) { 741 map->first_free = new_entry; 742 } 743 744 #if 0 745 /* 746 * Temporarily removed to avoid MAP_STACK panic, due to 747 * MAP_STACK being a huge hack. Will be added back in 748 * when MAP_STACK (and the user stack mapping) is fixed. 749 */ 750 /* 751 * It may be possible to simplify the entry 752 */ 753 vm_map_simplify_entry(map, new_entry, countp); 754 #endif 755 756 if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { 757 pmap_object_init_pt(map->pmap, start, 758 object, OFF_TO_IDX(offset), end - start, 759 cow & MAP_PREFAULT_PARTIAL); 760 } 761 762 return (KERN_SUCCESS); 763 } 764 765 /* 766 * Find sufficient space for `length' bytes in the given map, starting at 767 * `start'. The map must be locked. Returns 0 on success, 1 on no space. 768 * 769 * This function will returned an arbitrarily aligned pointer. If no 770 * particular alignment is required you should pass align as 1. Note that 771 * the map may return PAGE_SIZE aligned pointers if all the lengths used in 772 * the map are a multiple of PAGE_SIZE, even if you pass a smaller align 773 * argument. 774 * 775 * 'align' should be a power of 2 but is not required to be. 776 */ 777 int 778 vm_map_findspace( 779 vm_map_t map, 780 vm_offset_t start, 781 vm_size_t length, 782 vm_offset_t align, 783 vm_offset_t *addr) 784 { 785 vm_map_entry_t entry, next; 786 vm_offset_t end; 787 vm_offset_t align_mask; 788 789 if (start < map->min_offset) 790 start = map->min_offset; 791 if (start > map->max_offset) 792 return (1); 793 794 /* 795 * If the alignment is not a power of 2 we will have to use 796 * a mod/division, set align_mask to a special value. 797 */ 798 if ((align | (align - 1)) + 1 != (align << 1)) 799 align_mask = (vm_offset_t)-1; 800 else 801 align_mask = align - 1; 802 803 retry: 804 /* 805 * Look for the first possible address; if there's already something 806 * at this address, we have to start after it. 807 */ 808 if (start == map->min_offset) { 809 if ((entry = map->first_free) != &map->header) 810 start = entry->end; 811 } else { 812 vm_map_entry_t tmp; 813 814 if (vm_map_lookup_entry(map, start, &tmp)) 815 start = tmp->end; 816 entry = tmp; 817 } 818 819 /* 820 * Look through the rest of the map, trying to fit a new region in the 821 * gap between existing regions, or after the very last region. 822 */ 823 for (;; start = (entry = next)->end) { 824 /* 825 * Adjust the proposed start by the requested alignment, 826 * be sure that we didn't wrap the address. 827 */ 828 if (align_mask == (vm_offset_t)-1) 829 end = ((start + align - 1) / align) * align; 830 else 831 end = (start + align_mask) & ~align_mask; 832 if (end < start) 833 return (1); 834 start = end; 835 /* 836 * Find the end of the proposed new region. Be sure we didn't 837 * go beyond the end of the map, or wrap around the address. 838 * Then check to see if this is the last entry or if the 839 * proposed end fits in the gap between this and the next 840 * entry. 841 */ 842 end = start + length; 843 if (end > map->max_offset || end < start) 844 return (1); 845 next = entry->next; 846 if (next == &map->header || next->start >= end) 847 break; 848 } 849 SAVE_HINT(map, entry); 850 if (map == kernel_map) { 851 vm_offset_t ksize; 852 if ((ksize = round_page(start + length)) > kernel_vm_end) { 853 pmap_growkernel(ksize); 854 goto retry; 855 } 856 } 857 *addr = start; 858 return (0); 859 } 860 861 /* 862 * vm_map_find finds an unallocated region in the target address 863 * map with the given length. The search is defined to be 864 * first-fit from the specified address; the region found is 865 * returned in the same parameter. 866 * 867 * If object is non-NULL, ref count must be bumped by caller 868 * prior to making call to account for the new entry. 869 */ 870 int 871 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, 872 vm_offset_t *addr, /* IN/OUT */ 873 vm_size_t length, boolean_t find_space, vm_prot_t prot, 874 vm_prot_t max, int cow) 875 { 876 vm_offset_t start; 877 int result; 878 int count; 879 #if defined(USE_KMEM_MAP) 880 int s = 0; 881 #endif 882 883 start = *addr; 884 885 #if defined(USE_KMEM_MAP) 886 if (map == kmem_map || map == mb_map) 887 s = splvm(); 888 #endif 889 890 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 891 vm_map_lock(map); 892 if (find_space) { 893 if (vm_map_findspace(map, start, length, 1, addr)) { 894 vm_map_unlock(map); 895 vm_map_entry_release(count); 896 #if defined(USE_KMEM_MAP) 897 if (map == kmem_map || map == mb_map) 898 splx(s); 899 #endif 900 return (KERN_NO_SPACE); 901 } 902 start = *addr; 903 } 904 result = vm_map_insert(map, &count, object, offset, 905 start, start + length, prot, max, cow); 906 vm_map_unlock(map); 907 vm_map_entry_release(count); 908 909 #if defined(USE_KMEM_MAP) 910 if (map == kmem_map || map == mb_map) 911 splx(s); 912 #endif 913 914 return (result); 915 } 916 917 /* 918 * vm_map_simplify_entry: 919 * 920 * Simplify the given map entry by merging with either neighbor. This 921 * routine also has the ability to merge with both neighbors. 922 * 923 * The map must be locked. 924 * 925 * This routine guarentees that the passed entry remains valid (though 926 * possibly extended). When merging, this routine may delete one or 927 * both neighbors. No action is taken on entries which have their 928 * in-transition flag set. 929 */ 930 void 931 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp) 932 { 933 vm_map_entry_t next, prev; 934 vm_size_t prevsize, esize; 935 936 if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) { 937 ++mycpu->gd_cnt.v_intrans_coll; 938 return; 939 } 940 941 prev = entry->prev; 942 if (prev != &map->header) { 943 prevsize = prev->end - prev->start; 944 if ( (prev->end == entry->start) && 945 (prev->object.vm_object == entry->object.vm_object) && 946 (!prev->object.vm_object || 947 (prev->offset + prevsize == entry->offset)) && 948 (prev->eflags == entry->eflags) && 949 (prev->protection == entry->protection) && 950 (prev->max_protection == entry->max_protection) && 951 (prev->inheritance == entry->inheritance) && 952 (prev->wired_count == entry->wired_count)) { 953 if (map->first_free == prev) 954 map->first_free = entry; 955 if (map->hint == prev) 956 map->hint = entry; 957 vm_map_entry_unlink(map, prev); 958 entry->start = prev->start; 959 entry->offset = prev->offset; 960 if (prev->object.vm_object) 961 vm_object_deallocate(prev->object.vm_object); 962 vm_map_entry_dispose(map, prev, countp); 963 } 964 } 965 966 next = entry->next; 967 if (next != &map->header) { 968 esize = entry->end - entry->start; 969 if ((entry->end == next->start) && 970 (next->object.vm_object == entry->object.vm_object) && 971 (!entry->object.vm_object || 972 (entry->offset + esize == next->offset)) && 973 (next->eflags == entry->eflags) && 974 (next->protection == entry->protection) && 975 (next->max_protection == entry->max_protection) && 976 (next->inheritance == entry->inheritance) && 977 (next->wired_count == entry->wired_count)) { 978 if (map->first_free == next) 979 map->first_free = entry; 980 if (map->hint == next) 981 map->hint = entry; 982 vm_map_entry_unlink(map, next); 983 entry->end = next->end; 984 if (next->object.vm_object) 985 vm_object_deallocate(next->object.vm_object); 986 vm_map_entry_dispose(map, next, countp); 987 } 988 } 989 } 990 /* 991 * vm_map_clip_start: [ internal use only ] 992 * 993 * Asserts that the given entry begins at or after 994 * the specified address; if necessary, 995 * it splits the entry into two. 996 */ 997 #define vm_map_clip_start(map, entry, startaddr, countp) \ 998 { \ 999 if (startaddr > entry->start) \ 1000 _vm_map_clip_start(map, entry, startaddr, countp); \ 1001 } 1002 1003 /* 1004 * This routine is called only when it is known that 1005 * the entry must be split. 1006 */ 1007 static void 1008 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, int *countp) 1009 { 1010 vm_map_entry_t new_entry; 1011 1012 /* 1013 * Split off the front portion -- note that we must insert the new 1014 * entry BEFORE this one, so that this entry has the specified 1015 * starting address. 1016 */ 1017 1018 vm_map_simplify_entry(map, entry, countp); 1019 1020 /* 1021 * If there is no object backing this entry, we might as well create 1022 * one now. If we defer it, an object can get created after the map 1023 * is clipped, and individual objects will be created for the split-up 1024 * map. This is a bit of a hack, but is also about the best place to 1025 * put this improvement. 1026 */ 1027 1028 if (entry->object.vm_object == NULL && !map->system_map) { 1029 vm_object_t object; 1030 object = vm_object_allocate(OBJT_DEFAULT, 1031 atop(entry->end - entry->start)); 1032 entry->object.vm_object = object; 1033 entry->offset = 0; 1034 } 1035 1036 new_entry = vm_map_entry_create(map, countp); 1037 *new_entry = *entry; 1038 1039 new_entry->end = start; 1040 entry->offset += (start - entry->start); 1041 entry->start = start; 1042 1043 vm_map_entry_link(map, entry->prev, new_entry); 1044 1045 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1046 vm_object_reference(new_entry->object.vm_object); 1047 } 1048 } 1049 1050 /* 1051 * vm_map_clip_end: [ internal use only ] 1052 * 1053 * Asserts that the given entry ends at or before 1054 * the specified address; if necessary, 1055 * it splits the entry into two. 1056 */ 1057 1058 #define vm_map_clip_end(map, entry, endaddr, countp) \ 1059 { \ 1060 if (endaddr < entry->end) \ 1061 _vm_map_clip_end(map, entry, endaddr, countp); \ 1062 } 1063 1064 /* 1065 * This routine is called only when it is known that 1066 * the entry must be split. 1067 */ 1068 static void 1069 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, int *countp) 1070 { 1071 vm_map_entry_t new_entry; 1072 1073 /* 1074 * If there is no object backing this entry, we might as well create 1075 * one now. If we defer it, an object can get created after the map 1076 * is clipped, and individual objects will be created for the split-up 1077 * map. This is a bit of a hack, but is also about the best place to 1078 * put this improvement. 1079 */ 1080 1081 if (entry->object.vm_object == NULL && !map->system_map) { 1082 vm_object_t object; 1083 object = vm_object_allocate(OBJT_DEFAULT, 1084 atop(entry->end - entry->start)); 1085 entry->object.vm_object = object; 1086 entry->offset = 0; 1087 } 1088 1089 /* 1090 * Create a new entry and insert it AFTER the specified entry 1091 */ 1092 1093 new_entry = vm_map_entry_create(map, countp); 1094 *new_entry = *entry; 1095 1096 new_entry->start = entry->end = end; 1097 new_entry->offset += (end - entry->start); 1098 1099 vm_map_entry_link(map, entry, new_entry); 1100 1101 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1102 vm_object_reference(new_entry->object.vm_object); 1103 } 1104 } 1105 1106 /* 1107 * VM_MAP_RANGE_CHECK: [ internal use only ] 1108 * 1109 * Asserts that the starting and ending region 1110 * addresses fall within the valid range of the map. 1111 */ 1112 #define VM_MAP_RANGE_CHECK(map, start, end) \ 1113 { \ 1114 if (start < vm_map_min(map)) \ 1115 start = vm_map_min(map); \ 1116 if (end > vm_map_max(map)) \ 1117 end = vm_map_max(map); \ 1118 if (start > end) \ 1119 start = end; \ 1120 } 1121 1122 /* 1123 * vm_map_transition_wait: [ kernel use only ] 1124 * 1125 * Used to block when an in-transition collison occurs. The map 1126 * is unlocked for the sleep and relocked before the return. 1127 */ 1128 static 1129 void 1130 vm_map_transition_wait(vm_map_t map) 1131 { 1132 vm_map_unlock(map); 1133 tsleep(map, 0, "vment", 0); 1134 vm_map_lock(map); 1135 } 1136 1137 /* 1138 * CLIP_CHECK_BACK 1139 * CLIP_CHECK_FWD 1140 * 1141 * When we do blocking operations with the map lock held it is 1142 * possible that a clip might have occured on our in-transit entry, 1143 * requiring an adjustment to the entry in our loop. These macros 1144 * help the pageable and clip_range code deal with the case. The 1145 * conditional costs virtually nothing if no clipping has occured. 1146 */ 1147 1148 #define CLIP_CHECK_BACK(entry, save_start) \ 1149 do { \ 1150 while (entry->start != save_start) { \ 1151 entry = entry->prev; \ 1152 KASSERT(entry != &map->header, ("bad entry clip")); \ 1153 } \ 1154 } while(0) 1155 1156 #define CLIP_CHECK_FWD(entry, save_end) \ 1157 do { \ 1158 while (entry->end != save_end) { \ 1159 entry = entry->next; \ 1160 KASSERT(entry != &map->header, ("bad entry clip")); \ 1161 } \ 1162 } while(0) 1163 1164 1165 /* 1166 * vm_map_clip_range: [ kernel use only ] 1167 * 1168 * Clip the specified range and return the base entry. The 1169 * range may cover several entries starting at the returned base 1170 * and the first and last entry in the covering sequence will be 1171 * properly clipped to the requested start and end address. 1172 * 1173 * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES 1174 * flag. 1175 * 1176 * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries 1177 * covered by the requested range. 1178 * 1179 * The map must be exclusively locked on entry and will remain locked 1180 * on return. If no range exists or the range contains holes and you 1181 * specified that no holes were allowed, NULL will be returned. This 1182 * routine may temporarily unlock the map in order avoid a deadlock when 1183 * sleeping. 1184 */ 1185 static 1186 vm_map_entry_t 1187 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end, 1188 int *countp, int flags) 1189 { 1190 vm_map_entry_t start_entry; 1191 vm_map_entry_t entry; 1192 1193 /* 1194 * Locate the entry and effect initial clipping. The in-transition 1195 * case does not occur very often so do not try to optimize it. 1196 */ 1197 again: 1198 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) 1199 return (NULL); 1200 entry = start_entry; 1201 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 1202 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1203 ++mycpu->gd_cnt.v_intrans_coll; 1204 ++mycpu->gd_cnt.v_intrans_wait; 1205 vm_map_transition_wait(map); 1206 /* 1207 * entry and/or start_entry may have been clipped while 1208 * we slept, or may have gone away entirely. We have 1209 * to restart from the lookup. 1210 */ 1211 goto again; 1212 } 1213 /* 1214 * Since we hold an exclusive map lock we do not have to restart 1215 * after clipping, even though clipping may block in zalloc. 1216 */ 1217 vm_map_clip_start(map, entry, start, countp); 1218 vm_map_clip_end(map, entry, end, countp); 1219 entry->eflags |= MAP_ENTRY_IN_TRANSITION; 1220 1221 /* 1222 * Scan entries covered by the range. When working on the next 1223 * entry a restart need only re-loop on the current entry which 1224 * we have already locked, since 'next' may have changed. Also, 1225 * even though entry is safe, it may have been clipped so we 1226 * have to iterate forwards through the clip after sleeping. 1227 */ 1228 while (entry->next != &map->header && entry->next->start < end) { 1229 vm_map_entry_t next = entry->next; 1230 1231 if (flags & MAP_CLIP_NO_HOLES) { 1232 if (next->start > entry->end) { 1233 vm_map_unclip_range(map, start_entry, 1234 start, entry->end, countp, flags); 1235 return(NULL); 1236 } 1237 } 1238 1239 if (next->eflags & MAP_ENTRY_IN_TRANSITION) { 1240 vm_offset_t save_end = entry->end; 1241 next->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 1242 ++mycpu->gd_cnt.v_intrans_coll; 1243 ++mycpu->gd_cnt.v_intrans_wait; 1244 vm_map_transition_wait(map); 1245 1246 /* 1247 * clips might have occured while we blocked. 1248 */ 1249 CLIP_CHECK_FWD(entry, save_end); 1250 CLIP_CHECK_BACK(start_entry, start); 1251 continue; 1252 } 1253 /* 1254 * No restart necessary even though clip_end may block, we 1255 * are holding the map lock. 1256 */ 1257 vm_map_clip_end(map, next, end, countp); 1258 next->eflags |= MAP_ENTRY_IN_TRANSITION; 1259 entry = next; 1260 } 1261 if (flags & MAP_CLIP_NO_HOLES) { 1262 if (entry->end != end) { 1263 vm_map_unclip_range(map, start_entry, 1264 start, entry->end, countp, flags); 1265 return(NULL); 1266 } 1267 } 1268 return(start_entry); 1269 } 1270 1271 /* 1272 * vm_map_unclip_range: [ kernel use only ] 1273 * 1274 * Undo the effect of vm_map_clip_range(). You should pass the same 1275 * flags and the same range that you passed to vm_map_clip_range(). 1276 * This code will clear the in-transition flag on the entries and 1277 * wake up anyone waiting. This code will also simplify the sequence 1278 * and attempt to merge it with entries before and after the sequence. 1279 * 1280 * The map must be locked on entry and will remain locked on return. 1281 * 1282 * Note that you should also pass the start_entry returned by 1283 * vm_map_clip_range(). However, if you block between the two calls 1284 * with the map unlocked please be aware that the start_entry may 1285 * have been clipped and you may need to scan it backwards to find 1286 * the entry corresponding with the original start address. You are 1287 * responsible for this, vm_map_unclip_range() expects the correct 1288 * start_entry to be passed to it and will KASSERT otherwise. 1289 */ 1290 static 1291 void 1292 vm_map_unclip_range( 1293 vm_map_t map, 1294 vm_map_entry_t start_entry, 1295 vm_offset_t start, 1296 vm_offset_t end, 1297 int *countp, 1298 int flags) 1299 { 1300 vm_map_entry_t entry; 1301 1302 entry = start_entry; 1303 1304 KASSERT(entry->start == start, ("unclip_range: illegal base entry")); 1305 while (entry != &map->header && entry->start < end) { 1306 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("in-transition flag not set during unclip on: %p", entry)); 1307 KASSERT(entry->end <= end, ("unclip_range: tail wasn't clipped")); 1308 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; 1309 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { 1310 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; 1311 wakeup(map); 1312 } 1313 entry = entry->next; 1314 } 1315 1316 /* 1317 * Simplification does not block so there is no restart case. 1318 */ 1319 entry = start_entry; 1320 while (entry != &map->header && entry->start < end) { 1321 vm_map_simplify_entry(map, entry, countp); 1322 entry = entry->next; 1323 } 1324 } 1325 1326 /* 1327 * vm_map_submap: [ kernel use only ] 1328 * 1329 * Mark the given range as handled by a subordinate map. 1330 * 1331 * This range must have been created with vm_map_find, 1332 * and no other operations may have been performed on this 1333 * range prior to calling vm_map_submap. 1334 * 1335 * Only a limited number of operations can be performed 1336 * within this rage after calling vm_map_submap: 1337 * vm_fault 1338 * [Don't try vm_map_copy!] 1339 * 1340 * To remove a submapping, one must first remove the 1341 * range from the superior map, and then destroy the 1342 * submap (if desired). [Better yet, don't try it.] 1343 */ 1344 int 1345 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap) 1346 { 1347 vm_map_entry_t entry; 1348 int result = KERN_INVALID_ARGUMENT; 1349 int count; 1350 1351 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1352 vm_map_lock(map); 1353 1354 VM_MAP_RANGE_CHECK(map, start, end); 1355 1356 if (vm_map_lookup_entry(map, start, &entry)) { 1357 vm_map_clip_start(map, entry, start, &count); 1358 } else { 1359 entry = entry->next; 1360 } 1361 1362 vm_map_clip_end(map, entry, end, &count); 1363 1364 if ((entry->start == start) && (entry->end == end) && 1365 ((entry->eflags & MAP_ENTRY_COW) == 0) && 1366 (entry->object.vm_object == NULL)) { 1367 entry->object.sub_map = submap; 1368 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; 1369 result = KERN_SUCCESS; 1370 } 1371 vm_map_unlock(map); 1372 vm_map_entry_release(count); 1373 1374 return (result); 1375 } 1376 1377 /* 1378 * vm_map_protect: 1379 * 1380 * Sets the protection of the specified address 1381 * region in the target map. If "set_max" is 1382 * specified, the maximum protection is to be set; 1383 * otherwise, only the current protection is affected. 1384 */ 1385 int 1386 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, 1387 vm_prot_t new_prot, boolean_t set_max) 1388 { 1389 vm_map_entry_t current; 1390 vm_map_entry_t entry; 1391 int count; 1392 1393 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1394 vm_map_lock(map); 1395 1396 VM_MAP_RANGE_CHECK(map, start, end); 1397 1398 if (vm_map_lookup_entry(map, start, &entry)) { 1399 vm_map_clip_start(map, entry, start, &count); 1400 } else { 1401 entry = entry->next; 1402 } 1403 1404 /* 1405 * Make a first pass to check for protection violations. 1406 */ 1407 1408 current = entry; 1409 while ((current != &map->header) && (current->start < end)) { 1410 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 1411 vm_map_unlock(map); 1412 vm_map_entry_release(count); 1413 return (KERN_INVALID_ARGUMENT); 1414 } 1415 if ((new_prot & current->max_protection) != new_prot) { 1416 vm_map_unlock(map); 1417 vm_map_entry_release(count); 1418 return (KERN_PROTECTION_FAILURE); 1419 } 1420 current = current->next; 1421 } 1422 1423 /* 1424 * Go back and fix up protections. [Note that clipping is not 1425 * necessary the second time.] 1426 */ 1427 current = entry; 1428 1429 while ((current != &map->header) && (current->start < end)) { 1430 vm_prot_t old_prot; 1431 1432 vm_map_clip_end(map, current, end, &count); 1433 1434 old_prot = current->protection; 1435 if (set_max) 1436 current->protection = 1437 (current->max_protection = new_prot) & 1438 old_prot; 1439 else 1440 current->protection = new_prot; 1441 1442 /* 1443 * Update physical map if necessary. Worry about copy-on-write 1444 * here -- CHECK THIS XXX 1445 */ 1446 1447 if (current->protection != old_prot) { 1448 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ 1449 VM_PROT_ALL) 1450 1451 pmap_protect(map->pmap, current->start, 1452 current->end, 1453 current->protection & MASK(current)); 1454 #undef MASK 1455 } 1456 1457 vm_map_simplify_entry(map, current, &count); 1458 1459 current = current->next; 1460 } 1461 1462 vm_map_unlock(map); 1463 vm_map_entry_release(count); 1464 return (KERN_SUCCESS); 1465 } 1466 1467 /* 1468 * vm_map_madvise: 1469 * 1470 * This routine traverses a processes map handling the madvise 1471 * system call. Advisories are classified as either those effecting 1472 * the vm_map_entry structure, or those effecting the underlying 1473 * objects. 1474 */ 1475 1476 int 1477 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, int behav) 1478 { 1479 vm_map_entry_t current, entry; 1480 int modify_map = 0; 1481 int count; 1482 1483 /* 1484 * Some madvise calls directly modify the vm_map_entry, in which case 1485 * we need to use an exclusive lock on the map and we need to perform 1486 * various clipping operations. Otherwise we only need a read-lock 1487 * on the map. 1488 */ 1489 1490 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1491 1492 switch(behav) { 1493 case MADV_NORMAL: 1494 case MADV_SEQUENTIAL: 1495 case MADV_RANDOM: 1496 case MADV_NOSYNC: 1497 case MADV_AUTOSYNC: 1498 case MADV_NOCORE: 1499 case MADV_CORE: 1500 modify_map = 1; 1501 vm_map_lock(map); 1502 break; 1503 case MADV_WILLNEED: 1504 case MADV_DONTNEED: 1505 case MADV_FREE: 1506 vm_map_lock_read(map); 1507 break; 1508 default: 1509 vm_map_entry_release(count); 1510 return (KERN_INVALID_ARGUMENT); 1511 } 1512 1513 /* 1514 * Locate starting entry and clip if necessary. 1515 */ 1516 1517 VM_MAP_RANGE_CHECK(map, start, end); 1518 1519 if (vm_map_lookup_entry(map, start, &entry)) { 1520 if (modify_map) 1521 vm_map_clip_start(map, entry, start, &count); 1522 } else { 1523 entry = entry->next; 1524 } 1525 1526 if (modify_map) { 1527 /* 1528 * madvise behaviors that are implemented in the vm_map_entry. 1529 * 1530 * We clip the vm_map_entry so that behavioral changes are 1531 * limited to the specified address range. 1532 */ 1533 for (current = entry; 1534 (current != &map->header) && (current->start < end); 1535 current = current->next 1536 ) { 1537 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1538 continue; 1539 1540 vm_map_clip_end(map, current, end, &count); 1541 1542 switch (behav) { 1543 case MADV_NORMAL: 1544 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); 1545 break; 1546 case MADV_SEQUENTIAL: 1547 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); 1548 break; 1549 case MADV_RANDOM: 1550 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); 1551 break; 1552 case MADV_NOSYNC: 1553 current->eflags |= MAP_ENTRY_NOSYNC; 1554 break; 1555 case MADV_AUTOSYNC: 1556 current->eflags &= ~MAP_ENTRY_NOSYNC; 1557 break; 1558 case MADV_NOCORE: 1559 current->eflags |= MAP_ENTRY_NOCOREDUMP; 1560 break; 1561 case MADV_CORE: 1562 current->eflags &= ~MAP_ENTRY_NOCOREDUMP; 1563 break; 1564 default: 1565 break; 1566 } 1567 vm_map_simplify_entry(map, current, &count); 1568 } 1569 vm_map_unlock(map); 1570 } else { 1571 vm_pindex_t pindex; 1572 int count; 1573 1574 /* 1575 * madvise behaviors that are implemented in the underlying 1576 * vm_object. 1577 * 1578 * Since we don't clip the vm_map_entry, we have to clip 1579 * the vm_object pindex and count. 1580 */ 1581 for (current = entry; 1582 (current != &map->header) && (current->start < end); 1583 current = current->next 1584 ) { 1585 vm_offset_t useStart; 1586 1587 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) 1588 continue; 1589 1590 pindex = OFF_TO_IDX(current->offset); 1591 count = atop(current->end - current->start); 1592 useStart = current->start; 1593 1594 if (current->start < start) { 1595 pindex += atop(start - current->start); 1596 count -= atop(start - current->start); 1597 useStart = start; 1598 } 1599 if (current->end > end) 1600 count -= atop(current->end - end); 1601 1602 if (count <= 0) 1603 continue; 1604 1605 vm_object_madvise(current->object.vm_object, 1606 pindex, count, behav); 1607 if (behav == MADV_WILLNEED) { 1608 pmap_object_init_pt( 1609 map->pmap, 1610 useStart, 1611 current->object.vm_object, 1612 pindex, 1613 (count << PAGE_SHIFT), 1614 MAP_PREFAULT_MADVISE 1615 ); 1616 } 1617 } 1618 vm_map_unlock_read(map); 1619 } 1620 vm_map_entry_release(count); 1621 return(0); 1622 } 1623 1624 1625 /* 1626 * vm_map_inherit: 1627 * 1628 * Sets the inheritance of the specified address 1629 * range in the target map. Inheritance 1630 * affects how the map will be shared with 1631 * child maps at the time of vm_map_fork. 1632 */ 1633 int 1634 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, 1635 vm_inherit_t new_inheritance) 1636 { 1637 vm_map_entry_t entry; 1638 vm_map_entry_t temp_entry; 1639 int count; 1640 1641 switch (new_inheritance) { 1642 case VM_INHERIT_NONE: 1643 case VM_INHERIT_COPY: 1644 case VM_INHERIT_SHARE: 1645 break; 1646 default: 1647 return (KERN_INVALID_ARGUMENT); 1648 } 1649 1650 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1651 vm_map_lock(map); 1652 1653 VM_MAP_RANGE_CHECK(map, start, end); 1654 1655 if (vm_map_lookup_entry(map, start, &temp_entry)) { 1656 entry = temp_entry; 1657 vm_map_clip_start(map, entry, start, &count); 1658 } else 1659 entry = temp_entry->next; 1660 1661 while ((entry != &map->header) && (entry->start < end)) { 1662 vm_map_clip_end(map, entry, end, &count); 1663 1664 entry->inheritance = new_inheritance; 1665 1666 vm_map_simplify_entry(map, entry, &count); 1667 1668 entry = entry->next; 1669 } 1670 vm_map_unlock(map); 1671 vm_map_entry_release(count); 1672 return (KERN_SUCCESS); 1673 } 1674 1675 /* 1676 * Implement the semantics of mlock 1677 */ 1678 int 1679 vm_map_user_pageable(map, start, real_end, new_pageable) 1680 vm_map_t map; 1681 vm_offset_t start; 1682 vm_offset_t real_end; 1683 boolean_t new_pageable; 1684 { 1685 vm_map_entry_t entry; 1686 vm_map_entry_t start_entry; 1687 vm_offset_t end; 1688 int rv = KERN_SUCCESS; 1689 int count; 1690 1691 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1692 vm_map_lock(map); 1693 VM_MAP_RANGE_CHECK(map, start, real_end); 1694 end = real_end; 1695 1696 start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES); 1697 if (start_entry == NULL) { 1698 vm_map_unlock(map); 1699 vm_map_entry_release(count); 1700 return (KERN_INVALID_ADDRESS); 1701 } 1702 1703 if (new_pageable == 0) { 1704 entry = start_entry; 1705 while ((entry != &map->header) && (entry->start < end)) { 1706 vm_offset_t save_start; 1707 vm_offset_t save_end; 1708 1709 /* 1710 * Already user wired or hard wired (trivial cases) 1711 */ 1712 if (entry->eflags & MAP_ENTRY_USER_WIRED) { 1713 entry = entry->next; 1714 continue; 1715 } 1716 if (entry->wired_count != 0) { 1717 entry->wired_count++; 1718 entry->eflags |= MAP_ENTRY_USER_WIRED; 1719 entry = entry->next; 1720 continue; 1721 } 1722 1723 /* 1724 * A new wiring requires instantiation of appropriate 1725 * management structures and the faulting in of the 1726 * page. 1727 */ 1728 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1729 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1730 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { 1731 1732 vm_object_shadow(&entry->object.vm_object, 1733 &entry->offset, 1734 atop(entry->end - entry->start)); 1735 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1736 1737 } else if (entry->object.vm_object == NULL && 1738 !map->system_map) { 1739 1740 entry->object.vm_object = 1741 vm_object_allocate(OBJT_DEFAULT, 1742 atop(entry->end - entry->start)); 1743 entry->offset = (vm_offset_t) 0; 1744 1745 } 1746 } 1747 entry->wired_count++; 1748 entry->eflags |= MAP_ENTRY_USER_WIRED; 1749 1750 /* 1751 * Now fault in the area. The map lock needs to be 1752 * manipulated to avoid deadlocks. The in-transition 1753 * flag protects the entries. 1754 */ 1755 save_start = entry->start; 1756 save_end = entry->end; 1757 vm_map_unlock(map); 1758 map->timestamp++; 1759 rv = vm_fault_user_wire(map, save_start, save_end); 1760 vm_map_lock(map); 1761 if (rv) { 1762 CLIP_CHECK_BACK(entry, save_start); 1763 for (;;) { 1764 KASSERT(entry->wired_count == 1, ("bad wired_count on entry")); 1765 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1766 entry->wired_count = 0; 1767 if (entry->end == save_end) 1768 break; 1769 entry = entry->next; 1770 KASSERT(entry != &map->header, ("bad entry clip during backout")); 1771 } 1772 end = save_start; /* unwire the rest */ 1773 break; 1774 } 1775 /* 1776 * note that even though the entry might have been 1777 * clipped, the USER_WIRED flag we set prevents 1778 * duplication so we do not have to do a 1779 * clip check. 1780 */ 1781 entry = entry->next; 1782 } 1783 1784 /* 1785 * If we failed fall through to the unwiring section to 1786 * unwire what we had wired so far. 'end' has already 1787 * been adjusted. 1788 */ 1789 if (rv) 1790 new_pageable = 1; 1791 1792 /* 1793 * start_entry might have been clipped if we unlocked the 1794 * map and blocked. No matter how clipped it has gotten 1795 * there should be a fragment that is on our start boundary. 1796 */ 1797 CLIP_CHECK_BACK(start_entry, start); 1798 } 1799 1800 /* 1801 * Deal with the unwiring case. 1802 */ 1803 if (new_pageable) { 1804 /* 1805 * This is the unwiring case. We must first ensure that the 1806 * range to be unwired is really wired down. We know there 1807 * are no holes. 1808 */ 1809 entry = start_entry; 1810 while ((entry != &map->header) && (entry->start < end)) { 1811 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { 1812 rv = KERN_INVALID_ARGUMENT; 1813 goto done; 1814 } 1815 KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry)); 1816 entry = entry->next; 1817 } 1818 1819 /* 1820 * Now decrement the wiring count for each region. If a region 1821 * becomes completely unwired, unwire its physical pages and 1822 * mappings. 1823 */ 1824 while ((entry != &map->header) && (entry->start < end)) { 1825 KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, ("expected USER_WIRED on entry %p", entry)); 1826 entry->eflags &= ~MAP_ENTRY_USER_WIRED; 1827 entry->wired_count--; 1828 if (entry->wired_count == 0) 1829 vm_fault_unwire(map, entry->start, entry->end); 1830 entry = entry->next; 1831 } 1832 } 1833 done: 1834 vm_map_unclip_range(map, start_entry, start, real_end, &count, 1835 MAP_CLIP_NO_HOLES); 1836 map->timestamp++; 1837 vm_map_unlock(map); 1838 vm_map_entry_release(count); 1839 return (rv); 1840 } 1841 1842 /* 1843 * vm_map_pageable: 1844 * 1845 * Sets the pageability of the specified address 1846 * range in the target map. Regions specified 1847 * as not pageable require locked-down physical 1848 * memory and physical page maps. 1849 * 1850 * The map must not be locked, but a reference 1851 * must remain to the map throughout the call. 1852 * 1853 * This function may be called via the zalloc path and must properly 1854 * reserve map entries for kernel_map. 1855 */ 1856 int 1857 vm_map_pageable(vm_map_t map, vm_offset_t start, 1858 vm_offset_t real_end, boolean_t new_pageable) 1859 { 1860 vm_map_entry_t entry; 1861 vm_map_entry_t start_entry; 1862 vm_offset_t end; 1863 int rv = KERN_SUCCESS; 1864 int count; 1865 int s; 1866 1867 if (map == kernel_map) 1868 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 1869 #if defined(USE_KMEM_MAP) 1870 else if (map == kmem_map) 1871 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 1872 #endif 1873 else 1874 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1875 vm_map_lock(map); 1876 VM_MAP_RANGE_CHECK(map, start, real_end); 1877 end = real_end; 1878 1879 start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES); 1880 if (start_entry == NULL) { 1881 vm_map_unlock(map); 1882 rv = KERN_INVALID_ADDRESS; 1883 goto failure; 1884 } 1885 if (new_pageable == 0) { 1886 /* 1887 * Wiring. 1888 * 1889 * 1. Holding the write lock, we create any shadow or zero-fill 1890 * objects that need to be created. Then we clip each map 1891 * entry to the region to be wired and increment its wiring 1892 * count. We create objects before clipping the map entries 1893 * to avoid object proliferation. 1894 * 1895 * 2. We downgrade to a read lock, and call vm_fault_wire to 1896 * fault in the pages for any newly wired area (wired_count is 1897 * 1). 1898 * 1899 * Downgrading to a read lock for vm_fault_wire avoids a 1900 * possible deadlock with another process that may have faulted 1901 * on one of the pages to be wired (it would mark the page busy, 1902 * blocking us, then in turn block on the map lock that we 1903 * hold). Because of problems in the recursive lock package, 1904 * we cannot upgrade to a write lock in vm_map_lookup. Thus, 1905 * any actions that require the write lock must be done 1906 * beforehand. Because we keep the read lock on the map, the 1907 * copy-on-write status of the entries we modify here cannot 1908 * change. 1909 */ 1910 1911 entry = start_entry; 1912 while ((entry != &map->header) && (entry->start < end)) { 1913 /* 1914 * Trivial case if the entry is already wired 1915 */ 1916 if (entry->wired_count) { 1917 entry->wired_count++; 1918 entry = entry->next; 1919 continue; 1920 } 1921 1922 /* 1923 * The entry is being newly wired, we have to setup 1924 * appropriate management structures. A shadow 1925 * object is required for a copy-on-write region, 1926 * or a normal object for a zero-fill region. We 1927 * do not have to do this for entries that point to sub 1928 * maps because we won't hold the lock on the sub map. 1929 */ 1930 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 1931 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; 1932 if (copyflag && 1933 ((entry->protection & VM_PROT_WRITE) != 0)) { 1934 1935 vm_object_shadow(&entry->object.vm_object, 1936 &entry->offset, 1937 atop(entry->end - entry->start)); 1938 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 1939 } else if (entry->object.vm_object == NULL && 1940 !map->system_map) { 1941 entry->object.vm_object = 1942 vm_object_allocate(OBJT_DEFAULT, 1943 atop(entry->end - entry->start)); 1944 entry->offset = (vm_offset_t) 0; 1945 } 1946 } 1947 1948 entry->wired_count++; 1949 entry = entry->next; 1950 } 1951 1952 /* 1953 * Pass 2. 1954 */ 1955 1956 /* 1957 * HACK HACK HACK HACK 1958 * 1959 * Unlock the map to avoid deadlocks. The in-transit flag 1960 * protects us from most changes but note that 1961 * clipping may still occur. To prevent clipping from 1962 * occuring after the unlock, except for when we are 1963 * blocking in vm_fault_wire, we must run at splvm(). 1964 * Otherwise our accesses to entry->start and entry->end 1965 * could be corrupted. We have to set splvm() prior to 1966 * unlocking so start_entry does not change out from 1967 * under us at the very beginning of the loop. 1968 * 1969 * HACK HACK HACK HACK 1970 */ 1971 1972 s = splvm(); 1973 vm_map_unlock(map); 1974 1975 entry = start_entry; 1976 while (entry != &map->header && entry->start < end) { 1977 /* 1978 * If vm_fault_wire fails for any page we need to undo 1979 * what has been done. We decrement the wiring count 1980 * for those pages which have not yet been wired (now) 1981 * and unwire those that have (later). 1982 */ 1983 vm_offset_t save_start = entry->start; 1984 vm_offset_t save_end = entry->end; 1985 1986 if (entry->wired_count == 1) 1987 rv = vm_fault_wire(map, entry->start, entry->end); 1988 if (rv) { 1989 CLIP_CHECK_BACK(entry, save_start); 1990 for (;;) { 1991 KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly")); 1992 entry->wired_count = 0; 1993 if (entry->end == save_end) 1994 break; 1995 entry = entry->next; 1996 KASSERT(entry != &map->header, ("bad entry clip during backout")); 1997 } 1998 end = save_start; 1999 break; 2000 } 2001 CLIP_CHECK_FWD(entry, save_end); 2002 entry = entry->next; 2003 } 2004 splx(s); 2005 2006 /* 2007 * relock. start_entry is still IN_TRANSITION and must 2008 * still exist, but may have been clipped (handled just 2009 * below). 2010 */ 2011 vm_map_lock(map); 2012 2013 /* 2014 * If a failure occured undo everything by falling through 2015 * to the unwiring code. 'end' has already been adjusted 2016 * appropriately. 2017 */ 2018 if (rv) 2019 new_pageable = 1; 2020 2021 /* 2022 * start_entry might have been clipped if we unlocked the 2023 * map and blocked. No matter how clipped it has gotten 2024 * there should be a fragment that is on our start boundary. 2025 */ 2026 CLIP_CHECK_BACK(start_entry, start); 2027 } 2028 2029 if (new_pageable) { 2030 /* 2031 * This is the unwiring case. We must first ensure that the 2032 * range to be unwired is really wired down. We know there 2033 * are no holes. 2034 */ 2035 entry = start_entry; 2036 while ((entry != &map->header) && (entry->start < end)) { 2037 if (entry->wired_count == 0) { 2038 rv = KERN_INVALID_ARGUMENT; 2039 goto done; 2040 } 2041 entry = entry->next; 2042 } 2043 2044 /* 2045 * Now decrement the wiring count for each region. If a region 2046 * becomes completely unwired, unwire its physical pages and 2047 * mappings. 2048 */ 2049 entry = start_entry; 2050 while ((entry != &map->header) && (entry->start < end)) { 2051 entry->wired_count--; 2052 if (entry->wired_count == 0) 2053 vm_fault_unwire(map, entry->start, entry->end); 2054 entry = entry->next; 2055 } 2056 } 2057 done: 2058 vm_map_unclip_range(map, start_entry, start, real_end, &count, 2059 MAP_CLIP_NO_HOLES); 2060 map->timestamp++; 2061 vm_map_unlock(map); 2062 failure: 2063 if (map == kernel_map) 2064 vm_map_entry_krelease(count); 2065 #if defined(USE_KMEM_MAP) 2066 else if (map == kmem_map) 2067 vm_map_entry_krelease(count); 2068 #endif 2069 else 2070 vm_map_entry_release(count); 2071 return (rv); 2072 } 2073 2074 /* 2075 * vm_map_set_wired_quick() 2076 * 2077 * Mark a newly allocated address range as wired but do not fault in 2078 * the pages. The caller is expected to load the pages into the object. 2079 * 2080 * The map must be locked on entry and will remain locked on return. 2081 */ 2082 void 2083 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *countp) 2084 { 2085 vm_map_entry_t scan; 2086 vm_map_entry_t entry; 2087 2088 entry = vm_map_clip_range(map, addr, addr + size, countp, MAP_CLIP_NO_HOLES); 2089 for (scan = entry; scan != &map->header && scan->start < addr + size; scan = scan->next) { 2090 KKASSERT(entry->wired_count == 0); 2091 entry->wired_count = 1; 2092 } 2093 vm_map_unclip_range(map, entry, addr, addr + size, countp, MAP_CLIP_NO_HOLES); 2094 } 2095 2096 /* 2097 * vm_map_clean 2098 * 2099 * Push any dirty cached pages in the address range to their pager. 2100 * If syncio is TRUE, dirty pages are written synchronously. 2101 * If invalidate is TRUE, any cached pages are freed as well. 2102 * 2103 * Returns an error if any part of the specified range is not mapped. 2104 */ 2105 int 2106 vm_map_clean(map, start, end, syncio, invalidate) 2107 vm_map_t map; 2108 vm_offset_t start; 2109 vm_offset_t end; 2110 boolean_t syncio; 2111 boolean_t invalidate; 2112 { 2113 vm_map_entry_t current; 2114 vm_map_entry_t entry; 2115 vm_size_t size; 2116 vm_object_t object; 2117 vm_ooffset_t offset; 2118 2119 vm_map_lock_read(map); 2120 VM_MAP_RANGE_CHECK(map, start, end); 2121 if (!vm_map_lookup_entry(map, start, &entry)) { 2122 vm_map_unlock_read(map); 2123 return (KERN_INVALID_ADDRESS); 2124 } 2125 /* 2126 * Make a first pass to check for holes. 2127 */ 2128 for (current = entry; current->start < end; current = current->next) { 2129 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2130 vm_map_unlock_read(map); 2131 return (KERN_INVALID_ARGUMENT); 2132 } 2133 if (end > current->end && 2134 (current->next == &map->header || 2135 current->end != current->next->start)) { 2136 vm_map_unlock_read(map); 2137 return (KERN_INVALID_ADDRESS); 2138 } 2139 } 2140 2141 if (invalidate) 2142 pmap_remove(vm_map_pmap(map), start, end); 2143 /* 2144 * Make a second pass, cleaning/uncaching pages from the indicated 2145 * objects as we go. 2146 */ 2147 for (current = entry; current->start < end; current = current->next) { 2148 offset = current->offset + (start - current->start); 2149 size = (end <= current->end ? end : current->end) - start; 2150 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { 2151 vm_map_t smap; 2152 vm_map_entry_t tentry; 2153 vm_size_t tsize; 2154 2155 smap = current->object.sub_map; 2156 vm_map_lock_read(smap); 2157 (void) vm_map_lookup_entry(smap, offset, &tentry); 2158 tsize = tentry->end - offset; 2159 if (tsize < size) 2160 size = tsize; 2161 object = tentry->object.vm_object; 2162 offset = tentry->offset + (offset - tentry->start); 2163 vm_map_unlock_read(smap); 2164 } else { 2165 object = current->object.vm_object; 2166 } 2167 /* 2168 * Note that there is absolutely no sense in writing out 2169 * anonymous objects, so we track down the vnode object 2170 * to write out. 2171 * We invalidate (remove) all pages from the address space 2172 * anyway, for semantic correctness. 2173 * 2174 * note: certain anonymous maps, such as MAP_NOSYNC maps, 2175 * may start out with a NULL object. 2176 */ 2177 while (object && object->backing_object) { 2178 object = object->backing_object; 2179 offset += object->backing_object_offset; 2180 if (object->size < OFF_TO_IDX( offset + size)) 2181 size = IDX_TO_OFF(object->size) - offset; 2182 } 2183 if (object && (object->type == OBJT_VNODE) && 2184 (current->protection & VM_PROT_WRITE)) { 2185 /* 2186 * Flush pages if writing is allowed, invalidate them 2187 * if invalidation requested. Pages undergoing I/O 2188 * will be ignored by vm_object_page_remove(). 2189 * 2190 * We cannot lock the vnode and then wait for paging 2191 * to complete without deadlocking against vm_fault. 2192 * Instead we simply call vm_object_page_remove() and 2193 * allow it to block internally on a page-by-page 2194 * basis when it encounters pages undergoing async 2195 * I/O. 2196 */ 2197 int flags; 2198 2199 vm_object_reference(object); 2200 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread); 2201 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 2202 flags |= invalidate ? OBJPC_INVAL : 0; 2203 vm_object_page_clean(object, 2204 OFF_TO_IDX(offset), 2205 OFF_TO_IDX(offset + size + PAGE_MASK), 2206 flags); 2207 VOP_UNLOCK(object->handle, 0, curthread); 2208 vm_object_deallocate(object); 2209 } 2210 if (object && invalidate && 2211 ((object->type == OBJT_VNODE) || 2212 (object->type == OBJT_DEVICE))) { 2213 vm_object_reference(object); 2214 vm_object_page_remove(object, 2215 OFF_TO_IDX(offset), 2216 OFF_TO_IDX(offset + size + PAGE_MASK), 2217 FALSE); 2218 vm_object_deallocate(object); 2219 } 2220 start += size; 2221 } 2222 2223 vm_map_unlock_read(map); 2224 return (KERN_SUCCESS); 2225 } 2226 2227 /* 2228 * vm_map_entry_unwire: [ internal use only ] 2229 * 2230 * Make the region specified by this entry pageable. 2231 * 2232 * The map in question should be locked. 2233 * [This is the reason for this routine's existence.] 2234 */ 2235 static void 2236 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) 2237 { 2238 vm_fault_unwire(map, entry->start, entry->end); 2239 entry->wired_count = 0; 2240 } 2241 2242 /* 2243 * vm_map_entry_delete: [ internal use only ] 2244 * 2245 * Deallocate the given entry from the target map. 2246 */ 2247 static void 2248 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp) 2249 { 2250 vm_map_entry_unlink(map, entry); 2251 map->size -= entry->end - entry->start; 2252 2253 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 2254 vm_object_deallocate(entry->object.vm_object); 2255 } 2256 2257 vm_map_entry_dispose(map, entry, countp); 2258 } 2259 2260 /* 2261 * vm_map_delete: [ internal use only ] 2262 * 2263 * Deallocates the given address range from the target 2264 * map. 2265 */ 2266 int 2267 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp) 2268 { 2269 vm_object_t object; 2270 vm_map_entry_t entry; 2271 vm_map_entry_t first_entry; 2272 2273 /* 2274 * Find the start of the region, and clip it 2275 */ 2276 2277 again: 2278 if (!vm_map_lookup_entry(map, start, &first_entry)) 2279 entry = first_entry->next; 2280 else { 2281 entry = first_entry; 2282 vm_map_clip_start(map, entry, start, countp); 2283 /* 2284 * Fix the lookup hint now, rather than each time though the 2285 * loop. 2286 */ 2287 SAVE_HINT(map, entry->prev); 2288 } 2289 2290 /* 2291 * Save the free space hint 2292 */ 2293 2294 if (entry == &map->header) { 2295 map->first_free = &map->header; 2296 } else if (map->first_free->start >= start) { 2297 map->first_free = entry->prev; 2298 } 2299 2300 /* 2301 * Step through all entries in this region 2302 */ 2303 2304 while ((entry != &map->header) && (entry->start < end)) { 2305 vm_map_entry_t next; 2306 vm_offset_t s, e; 2307 vm_pindex_t offidxstart, offidxend, count; 2308 2309 /* 2310 * If we hit an in-transition entry we have to sleep and 2311 * retry. It's easier (and not really slower) to just retry 2312 * since this case occurs so rarely and the hint is already 2313 * pointing at the right place. We have to reset the 2314 * start offset so as not to accidently delete an entry 2315 * another process just created in vacated space. 2316 */ 2317 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { 2318 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 2319 start = entry->start; 2320 ++mycpu->gd_cnt.v_intrans_coll; 2321 ++mycpu->gd_cnt.v_intrans_wait; 2322 vm_map_transition_wait(map); 2323 goto again; 2324 } 2325 vm_map_clip_end(map, entry, end, countp); 2326 2327 s = entry->start; 2328 e = entry->end; 2329 next = entry->next; 2330 2331 offidxstart = OFF_TO_IDX(entry->offset); 2332 count = OFF_TO_IDX(e - s); 2333 object = entry->object.vm_object; 2334 2335 /* 2336 * Unwire before removing addresses from the pmap; otherwise, 2337 * unwiring will put the entries back in the pmap. 2338 */ 2339 if (entry->wired_count != 0) { 2340 vm_map_entry_unwire(map, entry); 2341 } 2342 2343 offidxend = offidxstart + count; 2344 2345 if ((object == kernel_object) || (object == kmem_object)) { 2346 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2347 } else { 2348 pmap_remove(map->pmap, s, e); 2349 if (object != NULL && 2350 object->ref_count != 1 && 2351 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING && 2352 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2353 vm_object_collapse(object); 2354 vm_object_page_remove(object, offidxstart, offidxend, FALSE); 2355 if (object->type == OBJT_SWAP) { 2356 swap_pager_freespace(object, offidxstart, count); 2357 } 2358 if (offidxend >= object->size && 2359 offidxstart < object->size) { 2360 object->size = offidxstart; 2361 } 2362 } 2363 } 2364 2365 /* 2366 * Delete the entry (which may delete the object) only after 2367 * removing all pmap entries pointing to its pages. 2368 * (Otherwise, its page frames may be reallocated, and any 2369 * modify bits will be set in the wrong object!) 2370 */ 2371 vm_map_entry_delete(map, entry, countp); 2372 entry = next; 2373 } 2374 return (KERN_SUCCESS); 2375 } 2376 2377 /* 2378 * vm_map_remove: 2379 * 2380 * Remove the given address range from the target map. 2381 * This is the exported form of vm_map_delete. 2382 */ 2383 int 2384 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) 2385 { 2386 int result; 2387 int count; 2388 #if defined(USE_KMEM_MAP) 2389 int s = 0; 2390 #endif 2391 2392 #if defined(USE_KMEM_MAP) 2393 if (map == kmem_map || map == mb_map) 2394 s = splvm(); 2395 #endif 2396 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2397 vm_map_lock(map); 2398 VM_MAP_RANGE_CHECK(map, start, end); 2399 result = vm_map_delete(map, start, end, &count); 2400 vm_map_unlock(map); 2401 vm_map_entry_release(count); 2402 2403 #if defined(USE_KMEM_MAP) 2404 if (map == kmem_map || map == mb_map) 2405 splx(s); 2406 #endif 2407 2408 return (result); 2409 } 2410 2411 /* 2412 * vm_map_check_protection: 2413 * 2414 * Assert that the target map allows the specified 2415 * privilege on the entire address region given. 2416 * The entire region must be allocated. 2417 */ 2418 boolean_t 2419 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, 2420 vm_prot_t protection) 2421 { 2422 vm_map_entry_t entry; 2423 vm_map_entry_t tmp_entry; 2424 2425 if (!vm_map_lookup_entry(map, start, &tmp_entry)) { 2426 return (FALSE); 2427 } 2428 entry = tmp_entry; 2429 2430 while (start < end) { 2431 if (entry == &map->header) { 2432 return (FALSE); 2433 } 2434 /* 2435 * No holes allowed! 2436 */ 2437 2438 if (start < entry->start) { 2439 return (FALSE); 2440 } 2441 /* 2442 * Check protection associated with entry. 2443 */ 2444 2445 if ((entry->protection & protection) != protection) { 2446 return (FALSE); 2447 } 2448 /* go to next entry */ 2449 2450 start = entry->end; 2451 entry = entry->next; 2452 } 2453 return (TRUE); 2454 } 2455 2456 /* 2457 * Split the pages in a map entry into a new object. This affords 2458 * easier removal of unused pages, and keeps object inheritance from 2459 * being a negative impact on memory usage. 2460 */ 2461 static void 2462 vm_map_split(vm_map_entry_t entry) 2463 { 2464 vm_page_t m; 2465 vm_object_t orig_object, new_object, source; 2466 vm_offset_t s, e; 2467 vm_pindex_t offidxstart, offidxend, idx; 2468 vm_size_t size; 2469 vm_ooffset_t offset; 2470 2471 orig_object = entry->object.vm_object; 2472 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 2473 return; 2474 if (orig_object->ref_count <= 1) 2475 return; 2476 2477 offset = entry->offset; 2478 s = entry->start; 2479 e = entry->end; 2480 2481 offidxstart = OFF_TO_IDX(offset); 2482 offidxend = offidxstart + OFF_TO_IDX(e - s); 2483 size = offidxend - offidxstart; 2484 2485 new_object = vm_pager_allocate(orig_object->type, 2486 NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL); 2487 if (new_object == NULL) 2488 return; 2489 2490 source = orig_object->backing_object; 2491 if (source != NULL) { 2492 vm_object_reference(source); /* Referenced by new_object */ 2493 LIST_INSERT_HEAD(&source->shadow_head, 2494 new_object, shadow_list); 2495 vm_object_clear_flag(source, OBJ_ONEMAPPING); 2496 new_object->backing_object_offset = 2497 orig_object->backing_object_offset + IDX_TO_OFF(offidxstart); 2498 new_object->backing_object = source; 2499 source->shadow_count++; 2500 source->generation++; 2501 } 2502 2503 for (idx = 0; idx < size; idx++) { 2504 vm_page_t m; 2505 2506 retry: 2507 m = vm_page_lookup(orig_object, offidxstart + idx); 2508 if (m == NULL) 2509 continue; 2510 2511 /* 2512 * We must wait for pending I/O to complete before we can 2513 * rename the page. 2514 * 2515 * We do not have to VM_PROT_NONE the page as mappings should 2516 * not be changed by this operation. 2517 */ 2518 if (vm_page_sleep_busy(m, TRUE, "spltwt")) 2519 goto retry; 2520 2521 vm_page_busy(m); 2522 vm_page_rename(m, new_object, idx); 2523 /* page automatically made dirty by rename and cache handled */ 2524 vm_page_busy(m); 2525 } 2526 2527 if (orig_object->type == OBJT_SWAP) { 2528 vm_object_pip_add(orig_object, 1); 2529 /* 2530 * copy orig_object pages into new_object 2531 * and destroy unneeded pages in 2532 * shadow object. 2533 */ 2534 swap_pager_copy(orig_object, new_object, offidxstart, 0); 2535 vm_object_pip_wakeup(orig_object); 2536 } 2537 2538 for (idx = 0; idx < size; idx++) { 2539 m = vm_page_lookup(new_object, idx); 2540 if (m) { 2541 vm_page_wakeup(m); 2542 } 2543 } 2544 2545 entry->object.vm_object = new_object; 2546 entry->offset = 0LL; 2547 vm_object_deallocate(orig_object); 2548 } 2549 2550 /* 2551 * vm_map_copy_entry: 2552 * 2553 * Copies the contents of the source entry to the destination 2554 * entry. The entries *must* be aligned properly. 2555 */ 2556 static void 2557 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map, 2558 vm_map_entry_t src_entry, vm_map_entry_t dst_entry) 2559 { 2560 vm_object_t src_object; 2561 2562 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) 2563 return; 2564 2565 if (src_entry->wired_count == 0) { 2566 2567 /* 2568 * If the source entry is marked needs_copy, it is already 2569 * write-protected. 2570 */ 2571 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 2572 pmap_protect(src_map->pmap, 2573 src_entry->start, 2574 src_entry->end, 2575 src_entry->protection & ~VM_PROT_WRITE); 2576 } 2577 2578 /* 2579 * Make a copy of the object. 2580 */ 2581 if ((src_object = src_entry->object.vm_object) != NULL) { 2582 2583 if ((src_object->handle == NULL) && 2584 (src_object->type == OBJT_DEFAULT || 2585 src_object->type == OBJT_SWAP)) { 2586 vm_object_collapse(src_object); 2587 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { 2588 vm_map_split(src_entry); 2589 src_object = src_entry->object.vm_object; 2590 } 2591 } 2592 2593 vm_object_reference(src_object); 2594 vm_object_clear_flag(src_object, OBJ_ONEMAPPING); 2595 dst_entry->object.vm_object = src_object; 2596 src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2597 dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); 2598 dst_entry->offset = src_entry->offset; 2599 } else { 2600 dst_entry->object.vm_object = NULL; 2601 dst_entry->offset = 0; 2602 } 2603 2604 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, 2605 dst_entry->end - dst_entry->start, src_entry->start); 2606 } else { 2607 /* 2608 * Of course, wired down pages can't be set copy-on-write. 2609 * Cause wired pages to be copied into the new map by 2610 * simulating faults (the new pages are pageable) 2611 */ 2612 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); 2613 } 2614 } 2615 2616 /* 2617 * vmspace_fork: 2618 * Create a new process vmspace structure and vm_map 2619 * based on those of an existing process. The new map 2620 * is based on the old map, according to the inheritance 2621 * values on the regions in that map. 2622 * 2623 * The source map must not be locked. 2624 */ 2625 struct vmspace * 2626 vmspace_fork(struct vmspace *vm1) 2627 { 2628 struct vmspace *vm2; 2629 vm_map_t old_map = &vm1->vm_map; 2630 vm_map_t new_map; 2631 vm_map_entry_t old_entry; 2632 vm_map_entry_t new_entry; 2633 vm_object_t object; 2634 int count; 2635 2636 vm_map_lock(old_map); 2637 old_map->infork = 1; 2638 2639 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); 2640 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, 2641 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); 2642 new_map = &vm2->vm_map; /* XXX */ 2643 new_map->timestamp = 1; 2644 2645 count = 0; 2646 old_entry = old_map->header.next; 2647 while (old_entry != &old_map->header) { 2648 ++count; 2649 old_entry = old_entry->next; 2650 } 2651 2652 count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT); 2653 2654 old_entry = old_map->header.next; 2655 while (old_entry != &old_map->header) { 2656 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) 2657 panic("vm_map_fork: encountered a submap"); 2658 2659 switch (old_entry->inheritance) { 2660 case VM_INHERIT_NONE: 2661 break; 2662 2663 case VM_INHERIT_SHARE: 2664 /* 2665 * Clone the entry, creating the shared object if necessary. 2666 */ 2667 object = old_entry->object.vm_object; 2668 if (object == NULL) { 2669 object = vm_object_allocate(OBJT_DEFAULT, 2670 atop(old_entry->end - old_entry->start)); 2671 old_entry->object.vm_object = object; 2672 old_entry->offset = (vm_offset_t) 0; 2673 } 2674 2675 /* 2676 * Add the reference before calling vm_object_shadow 2677 * to insure that a shadow object is created. 2678 */ 2679 vm_object_reference(object); 2680 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { 2681 vm_object_shadow(&old_entry->object.vm_object, 2682 &old_entry->offset, 2683 atop(old_entry->end - old_entry->start)); 2684 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 2685 /* Transfer the second reference too. */ 2686 vm_object_reference( 2687 old_entry->object.vm_object); 2688 vm_object_deallocate(object); 2689 object = old_entry->object.vm_object; 2690 } 2691 vm_object_clear_flag(object, OBJ_ONEMAPPING); 2692 2693 /* 2694 * Clone the entry, referencing the shared object. 2695 */ 2696 new_entry = vm_map_entry_create(new_map, &count); 2697 *new_entry = *old_entry; 2698 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2699 new_entry->wired_count = 0; 2700 2701 /* 2702 * Insert the entry into the new map -- we know we're 2703 * inserting at the end of the new map. 2704 */ 2705 2706 vm_map_entry_link(new_map, new_map->header.prev, 2707 new_entry); 2708 2709 /* 2710 * Update the physical map 2711 */ 2712 2713 pmap_copy(new_map->pmap, old_map->pmap, 2714 new_entry->start, 2715 (old_entry->end - old_entry->start), 2716 old_entry->start); 2717 break; 2718 2719 case VM_INHERIT_COPY: 2720 /* 2721 * Clone the entry and link into the map. 2722 */ 2723 new_entry = vm_map_entry_create(new_map, &count); 2724 *new_entry = *old_entry; 2725 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED; 2726 new_entry->wired_count = 0; 2727 new_entry->object.vm_object = NULL; 2728 vm_map_entry_link(new_map, new_map->header.prev, 2729 new_entry); 2730 vm_map_copy_entry(old_map, new_map, old_entry, 2731 new_entry); 2732 break; 2733 } 2734 old_entry = old_entry->next; 2735 } 2736 2737 new_map->size = old_map->size; 2738 old_map->infork = 0; 2739 vm_map_unlock(old_map); 2740 vm_map_entry_release(count); 2741 2742 return (vm2); 2743 } 2744 2745 int 2746 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, 2747 vm_prot_t prot, vm_prot_t max, int cow) 2748 { 2749 vm_map_entry_t prev_entry; 2750 vm_map_entry_t new_stack_entry; 2751 vm_size_t init_ssize; 2752 int rv; 2753 int count; 2754 2755 if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS) 2756 return (KERN_NO_SPACE); 2757 2758 if (max_ssize < sgrowsiz) 2759 init_ssize = max_ssize; 2760 else 2761 init_ssize = sgrowsiz; 2762 2763 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2764 vm_map_lock(map); 2765 2766 /* If addr is already mapped, no go */ 2767 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { 2768 vm_map_unlock(map); 2769 vm_map_entry_release(count); 2770 return (KERN_NO_SPACE); 2771 } 2772 2773 /* If we would blow our VMEM resource limit, no go */ 2774 if (map->size + init_ssize > 2775 curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 2776 vm_map_unlock(map); 2777 vm_map_entry_release(count); 2778 return (KERN_NO_SPACE); 2779 } 2780 2781 /* If we can't accomodate max_ssize in the current mapping, 2782 * no go. However, we need to be aware that subsequent user 2783 * mappings might map into the space we have reserved for 2784 * stack, and currently this space is not protected. 2785 * 2786 * Hopefully we will at least detect this condition 2787 * when we try to grow the stack. 2788 */ 2789 if ((prev_entry->next != &map->header) && 2790 (prev_entry->next->start < addrbos + max_ssize)) { 2791 vm_map_unlock(map); 2792 vm_map_entry_release(count); 2793 return (KERN_NO_SPACE); 2794 } 2795 2796 /* We initially map a stack of only init_ssize. We will 2797 * grow as needed later. Since this is to be a grow 2798 * down stack, we map at the top of the range. 2799 * 2800 * Note: we would normally expect prot and max to be 2801 * VM_PROT_ALL, and cow to be 0. Possibly we should 2802 * eliminate these as input parameters, and just 2803 * pass these values here in the insert call. 2804 */ 2805 rv = vm_map_insert(map, &count, 2806 NULL, 0, addrbos + max_ssize - init_ssize, 2807 addrbos + max_ssize, prot, max, cow); 2808 2809 /* Now set the avail_ssize amount */ 2810 if (rv == KERN_SUCCESS){ 2811 if (prev_entry != &map->header) 2812 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count); 2813 new_stack_entry = prev_entry->next; 2814 if (new_stack_entry->end != addrbos + max_ssize || 2815 new_stack_entry->start != addrbos + max_ssize - init_ssize) 2816 panic ("Bad entry start/end for new stack entry"); 2817 else 2818 new_stack_entry->avail_ssize = max_ssize - init_ssize; 2819 } 2820 2821 vm_map_unlock(map); 2822 vm_map_entry_release(count); 2823 return (rv); 2824 } 2825 2826 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the 2827 * desired address is already mapped, or if we successfully grow 2828 * the stack. Also returns KERN_SUCCESS if addr is outside the 2829 * stack range (this is strange, but preserves compatibility with 2830 * the grow function in vm_machdep.c). 2831 */ 2832 int 2833 vm_map_growstack (struct proc *p, vm_offset_t addr) 2834 { 2835 vm_map_entry_t prev_entry; 2836 vm_map_entry_t stack_entry; 2837 vm_map_entry_t new_stack_entry; 2838 struct vmspace *vm = p->p_vmspace; 2839 vm_map_t map = &vm->vm_map; 2840 vm_offset_t end; 2841 int grow_amount; 2842 int rv = KERN_SUCCESS; 2843 int is_procstack; 2844 int use_read_lock = 1; 2845 int count; 2846 2847 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2848 Retry: 2849 if (use_read_lock) 2850 vm_map_lock_read(map); 2851 else 2852 vm_map_lock(map); 2853 2854 /* If addr is already in the entry range, no need to grow.*/ 2855 if (vm_map_lookup_entry(map, addr, &prev_entry)) 2856 goto done; 2857 2858 if ((stack_entry = prev_entry->next) == &map->header) 2859 goto done; 2860 if (prev_entry == &map->header) 2861 end = stack_entry->start - stack_entry->avail_ssize; 2862 else 2863 end = prev_entry->end; 2864 2865 /* This next test mimics the old grow function in vm_machdep.c. 2866 * It really doesn't quite make sense, but we do it anyway 2867 * for compatibility. 2868 * 2869 * If not growable stack, return success. This signals the 2870 * caller to proceed as he would normally with normal vm. 2871 */ 2872 if (stack_entry->avail_ssize < 1 || 2873 addr >= stack_entry->start || 2874 addr < stack_entry->start - stack_entry->avail_ssize) { 2875 goto done; 2876 } 2877 2878 /* Find the minimum grow amount */ 2879 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE); 2880 if (grow_amount > stack_entry->avail_ssize) { 2881 rv = KERN_NO_SPACE; 2882 goto done; 2883 } 2884 2885 /* If there is no longer enough space between the entries 2886 * nogo, and adjust the available space. Note: this 2887 * should only happen if the user has mapped into the 2888 * stack area after the stack was created, and is 2889 * probably an error. 2890 * 2891 * This also effectively destroys any guard page the user 2892 * might have intended by limiting the stack size. 2893 */ 2894 if (grow_amount > stack_entry->start - end) { 2895 if (use_read_lock && vm_map_lock_upgrade(map)) { 2896 use_read_lock = 0; 2897 goto Retry; 2898 } 2899 use_read_lock = 0; 2900 stack_entry->avail_ssize = stack_entry->start - end; 2901 rv = KERN_NO_SPACE; 2902 goto done; 2903 } 2904 2905 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr; 2906 2907 /* If this is the main process stack, see if we're over the 2908 * stack limit. 2909 */ 2910 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2911 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2912 rv = KERN_NO_SPACE; 2913 goto done; 2914 } 2915 2916 /* Round up the grow amount modulo SGROWSIZ */ 2917 grow_amount = roundup (grow_amount, sgrowsiz); 2918 if (grow_amount > stack_entry->avail_ssize) { 2919 grow_amount = stack_entry->avail_ssize; 2920 } 2921 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > 2922 p->p_rlimit[RLIMIT_STACK].rlim_cur)) { 2923 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - 2924 ctob(vm->vm_ssize); 2925 } 2926 2927 /* If we would blow our VMEM resource limit, no go */ 2928 if (map->size + grow_amount > 2929 curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 2930 rv = KERN_NO_SPACE; 2931 goto done; 2932 } 2933 2934 if (use_read_lock && vm_map_lock_upgrade(map)) { 2935 use_read_lock = 0; 2936 goto Retry; 2937 } 2938 use_read_lock = 0; 2939 2940 /* Get the preliminary new entry start value */ 2941 addr = stack_entry->start - grow_amount; 2942 2943 /* If this puts us into the previous entry, cut back our growth 2944 * to the available space. Also, see the note above. 2945 */ 2946 if (addr < end) { 2947 stack_entry->avail_ssize = stack_entry->start - end; 2948 addr = end; 2949 } 2950 2951 rv = vm_map_insert(map, &count, 2952 NULL, 0, addr, stack_entry->start, 2953 VM_PROT_ALL, 2954 VM_PROT_ALL, 2955 0); 2956 2957 /* Adjust the available stack space by the amount we grew. */ 2958 if (rv == KERN_SUCCESS) { 2959 if (prev_entry != &map->header) 2960 vm_map_clip_end(map, prev_entry, addr, &count); 2961 new_stack_entry = prev_entry->next; 2962 if (new_stack_entry->end != stack_entry->start || 2963 new_stack_entry->start != addr) 2964 panic ("Bad stack grow start/end in new stack entry"); 2965 else { 2966 new_stack_entry->avail_ssize = stack_entry->avail_ssize - 2967 (new_stack_entry->end - 2968 new_stack_entry->start); 2969 if (is_procstack) 2970 vm->vm_ssize += btoc(new_stack_entry->end - 2971 new_stack_entry->start); 2972 } 2973 } 2974 2975 done: 2976 if (use_read_lock) 2977 vm_map_unlock_read(map); 2978 else 2979 vm_map_unlock(map); 2980 vm_map_entry_release(count); 2981 return (rv); 2982 } 2983 2984 /* 2985 * Unshare the specified VM space for exec. If other processes are 2986 * mapped to it, then create a new one. The new vmspace is null. 2987 */ 2988 2989 void 2990 vmspace_exec(struct proc *p) 2991 { 2992 struct vmspace *oldvmspace = p->p_vmspace; 2993 struct vmspace *newvmspace; 2994 vm_map_t map = &p->p_vmspace->vm_map; 2995 2996 newvmspace = vmspace_alloc(map->min_offset, map->max_offset); 2997 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy, 2998 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy); 2999 /* 3000 * This code is written like this for prototype purposes. The 3001 * goal is to avoid running down the vmspace here, but let the 3002 * other process's that are still using the vmspace to finally 3003 * run it down. Even though there is little or no chance of blocking 3004 * here, it is a good idea to keep this form for future mods. 3005 */ 3006 vmspace_free(oldvmspace); 3007 p->p_vmspace = newvmspace; 3008 pmap_pinit2(vmspace_pmap(newvmspace)); 3009 if (p == curproc) 3010 pmap_activate(p); 3011 } 3012 3013 /* 3014 * Unshare the specified VM space for forcing COW. This 3015 * is called by rfork, for the (RFMEM|RFPROC) == 0 case. 3016 */ 3017 3018 void 3019 vmspace_unshare(struct proc *p) 3020 { 3021 struct vmspace *oldvmspace = p->p_vmspace; 3022 struct vmspace *newvmspace; 3023 3024 if (oldvmspace->vm_refcnt == 1) 3025 return; 3026 newvmspace = vmspace_fork(oldvmspace); 3027 vmspace_free(oldvmspace); 3028 p->p_vmspace = newvmspace; 3029 pmap_pinit2(vmspace_pmap(newvmspace)); 3030 if (p == curproc) 3031 pmap_activate(p); 3032 } 3033 3034 /* 3035 * vm_map_lookup: 3036 * 3037 * Finds the VM object, offset, and 3038 * protection for a given virtual address in the 3039 * specified map, assuming a page fault of the 3040 * type specified. 3041 * 3042 * Leaves the map in question locked for read; return 3043 * values are guaranteed until a vm_map_lookup_done 3044 * call is performed. Note that the map argument 3045 * is in/out; the returned map must be used in 3046 * the call to vm_map_lookup_done. 3047 * 3048 * A handle (out_entry) is returned for use in 3049 * vm_map_lookup_done, to make that fast. 3050 * 3051 * If a lookup is requested with "write protection" 3052 * specified, the map may be changed to perform virtual 3053 * copying operations, although the data referenced will 3054 * remain the same. 3055 */ 3056 int 3057 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ 3058 vm_offset_t vaddr, 3059 vm_prot_t fault_typea, 3060 vm_map_entry_t *out_entry, /* OUT */ 3061 vm_object_t *object, /* OUT */ 3062 vm_pindex_t *pindex, /* OUT */ 3063 vm_prot_t *out_prot, /* OUT */ 3064 boolean_t *wired) /* OUT */ 3065 { 3066 vm_map_entry_t entry; 3067 vm_map_t map = *var_map; 3068 vm_prot_t prot; 3069 vm_prot_t fault_type = fault_typea; 3070 int use_read_lock = 1; 3071 int rv = KERN_SUCCESS; 3072 3073 RetryLookup: 3074 if (use_read_lock) 3075 vm_map_lock_read(map); 3076 else 3077 vm_map_lock(map); 3078 3079 /* 3080 * If the map has an interesting hint, try it before calling full 3081 * blown lookup routine. 3082 */ 3083 entry = map->hint; 3084 *out_entry = entry; 3085 3086 if ((entry == &map->header) || 3087 (vaddr < entry->start) || (vaddr >= entry->end)) { 3088 vm_map_entry_t tmp_entry; 3089 3090 /* 3091 * Entry was either not a valid hint, or the vaddr was not 3092 * contained in the entry, so do a full lookup. 3093 */ 3094 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { 3095 rv = KERN_INVALID_ADDRESS; 3096 goto done; 3097 } 3098 3099 entry = tmp_entry; 3100 *out_entry = entry; 3101 } 3102 3103 /* 3104 * Handle submaps. 3105 */ 3106 3107 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3108 vm_map_t old_map = map; 3109 3110 *var_map = map = entry->object.sub_map; 3111 if (use_read_lock) 3112 vm_map_unlock_read(old_map); 3113 else 3114 vm_map_unlock(old_map); 3115 use_read_lock = 1; 3116 goto RetryLookup; 3117 } 3118 3119 /* 3120 * Check whether this task is allowed to have this page. 3121 * Note the special case for MAP_ENTRY_COW 3122 * pages with an override. This is to implement a forced 3123 * COW for debuggers. 3124 */ 3125 3126 if (fault_type & VM_PROT_OVERRIDE_WRITE) 3127 prot = entry->max_protection; 3128 else 3129 prot = entry->protection; 3130 3131 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); 3132 if ((fault_type & prot) != fault_type) { 3133 rv = KERN_PROTECTION_FAILURE; 3134 goto done; 3135 } 3136 3137 if ((entry->eflags & MAP_ENTRY_USER_WIRED) && 3138 (entry->eflags & MAP_ENTRY_COW) && 3139 (fault_type & VM_PROT_WRITE) && 3140 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) { 3141 rv = KERN_PROTECTION_FAILURE; 3142 goto done; 3143 } 3144 3145 /* 3146 * If this page is not pageable, we have to get it for all possible 3147 * accesses. 3148 */ 3149 3150 *wired = (entry->wired_count != 0); 3151 if (*wired) 3152 prot = fault_type = entry->protection; 3153 3154 /* 3155 * If the entry was copy-on-write, we either ... 3156 */ 3157 3158 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { 3159 /* 3160 * If we want to write the page, we may as well handle that 3161 * now since we've got the map locked. 3162 * 3163 * If we don't need to write the page, we just demote the 3164 * permissions allowed. 3165 */ 3166 3167 if (fault_type & VM_PROT_WRITE) { 3168 /* 3169 * Make a new object, and place it in the object 3170 * chain. Note that no new references have appeared 3171 * -- one just moved from the map to the new 3172 * object. 3173 */ 3174 3175 if (use_read_lock && vm_map_lock_upgrade(map)) { 3176 use_read_lock = 0; 3177 goto RetryLookup; 3178 } 3179 use_read_lock = 0; 3180 3181 vm_object_shadow( 3182 &entry->object.vm_object, 3183 &entry->offset, 3184 atop(entry->end - entry->start)); 3185 3186 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; 3187 } else { 3188 /* 3189 * We're attempting to read a copy-on-write page -- 3190 * don't allow writes. 3191 */ 3192 3193 prot &= ~VM_PROT_WRITE; 3194 } 3195 } 3196 3197 /* 3198 * Create an object if necessary. 3199 */ 3200 if (entry->object.vm_object == NULL && 3201 !map->system_map) { 3202 if (use_read_lock && vm_map_lock_upgrade(map)) { 3203 use_read_lock = 0; 3204 goto RetryLookup; 3205 } 3206 use_read_lock = 0; 3207 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, 3208 atop(entry->end - entry->start)); 3209 entry->offset = 0; 3210 } 3211 3212 /* 3213 * Return the object/offset from this entry. If the entry was 3214 * copy-on-write or empty, it has been fixed up. 3215 */ 3216 3217 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); 3218 *object = entry->object.vm_object; 3219 3220 /* 3221 * Return whether this is the only map sharing this data. On 3222 * success we return with a read lock held on the map. On failure 3223 * we return with the map unlocked. 3224 */ 3225 *out_prot = prot; 3226 done: 3227 if (rv == KERN_SUCCESS) { 3228 if (use_read_lock == 0) 3229 vm_map_lock_downgrade(map); 3230 } else if (use_read_lock) { 3231 vm_map_unlock_read(map); 3232 } else { 3233 vm_map_unlock(map); 3234 } 3235 return (rv); 3236 } 3237 3238 /* 3239 * vm_map_lookup_done: 3240 * 3241 * Releases locks acquired by a vm_map_lookup 3242 * (according to the handle returned by that lookup). 3243 */ 3244 3245 void 3246 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count) 3247 { 3248 /* 3249 * Unlock the main-level map 3250 */ 3251 vm_map_unlock_read(map); 3252 if (count) 3253 vm_map_entry_release(count); 3254 } 3255 3256 /* 3257 * Implement uiomove with VM operations. This handles (and collateral changes) 3258 * support every combination of source object modification, and COW type 3259 * operations. 3260 */ 3261 int 3262 vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages) 3263 vm_map_t mapa; 3264 vm_object_t srcobject; 3265 off_t cp; 3266 int cnta; 3267 vm_offset_t uaddra; 3268 int *npages; 3269 { 3270 vm_map_t map; 3271 vm_object_t first_object, oldobject, object; 3272 vm_map_entry_t entry; 3273 vm_prot_t prot; 3274 boolean_t wired; 3275 int tcnt, rv; 3276 vm_offset_t uaddr, start, end, tend; 3277 vm_pindex_t first_pindex, osize, oindex; 3278 off_t ooffset; 3279 int cnt; 3280 int count; 3281 3282 if (npages) 3283 *npages = 0; 3284 3285 cnt = cnta; 3286 uaddr = uaddra; 3287 3288 while (cnt > 0) { 3289 map = mapa; 3290 3291 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 3292 3293 if ((vm_map_lookup(&map, uaddr, 3294 VM_PROT_READ, &entry, &first_object, 3295 &first_pindex, &prot, &wired)) != KERN_SUCCESS) { 3296 return EFAULT; 3297 } 3298 3299 vm_map_clip_start(map, entry, uaddr, &count); 3300 3301 tcnt = cnt; 3302 tend = uaddr + tcnt; 3303 if (tend > entry->end) { 3304 tcnt = entry->end - uaddr; 3305 tend = entry->end; 3306 } 3307 3308 vm_map_clip_end(map, entry, tend, &count); 3309 3310 start = entry->start; 3311 end = entry->end; 3312 3313 osize = atop(tcnt); 3314 3315 oindex = OFF_TO_IDX(cp); 3316 if (npages) { 3317 vm_pindex_t idx; 3318 for (idx = 0; idx < osize; idx++) { 3319 vm_page_t m; 3320 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) { 3321 vm_map_lookup_done(map, entry, count); 3322 return 0; 3323 } 3324 /* 3325 * disallow busy or invalid pages, but allow 3326 * m->busy pages if they are entirely valid. 3327 */ 3328 if ((m->flags & PG_BUSY) || 3329 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { 3330 vm_map_lookup_done(map, entry, count); 3331 return 0; 3332 } 3333 } 3334 } 3335 3336 /* 3337 * If we are changing an existing map entry, just redirect 3338 * the object, and change mappings. 3339 */ 3340 if ((first_object->type == OBJT_VNODE) && 3341 ((oldobject = entry->object.vm_object) == first_object)) { 3342 3343 if ((entry->offset != cp) || (oldobject != srcobject)) { 3344 /* 3345 * Remove old window into the file 3346 */ 3347 pmap_remove (map->pmap, uaddr, tend); 3348 3349 /* 3350 * Force copy on write for mmaped regions 3351 */ 3352 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3353 3354 /* 3355 * Point the object appropriately 3356 */ 3357 if (oldobject != srcobject) { 3358 3359 /* 3360 * Set the object optimization hint flag 3361 */ 3362 vm_object_set_flag(srcobject, OBJ_OPT); 3363 vm_object_reference(srcobject); 3364 entry->object.vm_object = srcobject; 3365 3366 if (oldobject) { 3367 vm_object_deallocate(oldobject); 3368 } 3369 } 3370 3371 entry->offset = cp; 3372 map->timestamp++; 3373 } else { 3374 pmap_remove (map->pmap, uaddr, tend); 3375 } 3376 3377 } else if ((first_object->ref_count == 1) && 3378 (first_object->size == osize) && 3379 ((first_object->type == OBJT_DEFAULT) || 3380 (first_object->type == OBJT_SWAP)) ) { 3381 3382 oldobject = first_object->backing_object; 3383 3384 if ((first_object->backing_object_offset != cp) || 3385 (oldobject != srcobject)) { 3386 /* 3387 * Remove old window into the file 3388 */ 3389 pmap_remove (map->pmap, uaddr, tend); 3390 3391 /* 3392 * Remove unneeded old pages 3393 */ 3394 vm_object_page_remove(first_object, 0, 0, 0); 3395 3396 /* 3397 * Invalidate swap space 3398 */ 3399 if (first_object->type == OBJT_SWAP) { 3400 swap_pager_freespace(first_object, 3401 0, 3402 first_object->size); 3403 } 3404 3405 /* 3406 * Force copy on write for mmaped regions 3407 */ 3408 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3409 3410 /* 3411 * Point the object appropriately 3412 */ 3413 if (oldobject != srcobject) { 3414 3415 /* 3416 * Set the object optimization hint flag 3417 */ 3418 vm_object_set_flag(srcobject, OBJ_OPT); 3419 vm_object_reference(srcobject); 3420 3421 if (oldobject) { 3422 LIST_REMOVE( 3423 first_object, shadow_list); 3424 oldobject->shadow_count--; 3425 /* XXX bump generation? */ 3426 vm_object_deallocate(oldobject); 3427 } 3428 3429 LIST_INSERT_HEAD(&srcobject->shadow_head, 3430 first_object, shadow_list); 3431 srcobject->shadow_count++; 3432 /* XXX bump generation? */ 3433 3434 first_object->backing_object = srcobject; 3435 } 3436 first_object->backing_object_offset = cp; 3437 map->timestamp++; 3438 } else { 3439 pmap_remove (map->pmap, uaddr, tend); 3440 } 3441 /* 3442 * Otherwise, we have to do a logical mmap. 3443 */ 3444 } else { 3445 3446 vm_object_set_flag(srcobject, OBJ_OPT); 3447 vm_object_reference(srcobject); 3448 3449 pmap_remove (map->pmap, uaddr, tend); 3450 3451 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); 3452 vm_map_lock_upgrade(map); 3453 3454 if (entry == &map->header) { 3455 map->first_free = &map->header; 3456 } else if (map->first_free->start >= start) { 3457 map->first_free = entry->prev; 3458 } 3459 3460 SAVE_HINT(map, entry->prev); 3461 vm_map_entry_delete(map, entry, &count); 3462 3463 object = srcobject; 3464 ooffset = cp; 3465 3466 rv = vm_map_insert(map, &count, 3467 object, ooffset, start, tend, 3468 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE); 3469 3470 if (rv != KERN_SUCCESS) 3471 panic("vm_uiomove: could not insert new entry: %d", rv); 3472 } 3473 3474 /* 3475 * Map the window directly, if it is already in memory 3476 */ 3477 pmap_object_init_pt(map->pmap, uaddr, 3478 srcobject, oindex, tcnt, 0); 3479 3480 map->timestamp++; 3481 vm_map_unlock(map); 3482 vm_map_entry_release(count); 3483 3484 cnt -= tcnt; 3485 uaddr += tcnt; 3486 cp += tcnt; 3487 if (npages) 3488 *npages += osize; 3489 } 3490 return 0; 3491 } 3492 3493 /* 3494 * Performs the copy_on_write operations necessary to allow the virtual copies 3495 * into user space to work. This has to be called for write(2) system calls 3496 * from other processes, file unlinking, and file size shrinkage. 3497 */ 3498 void 3499 vm_freeze_copyopts(object, froma, toa) 3500 vm_object_t object; 3501 vm_pindex_t froma, toa; 3502 { 3503 int rv; 3504 vm_object_t robject; 3505 vm_pindex_t idx; 3506 3507 if ((object == NULL) || 3508 ((object->flags & OBJ_OPT) == 0)) 3509 return; 3510 3511 if (object->shadow_count > object->ref_count) 3512 panic("vm_freeze_copyopts: sc > rc"); 3513 3514 while((robject = LIST_FIRST(&object->shadow_head)) != NULL) { 3515 vm_pindex_t bo_pindex; 3516 vm_page_t m_in, m_out; 3517 3518 bo_pindex = OFF_TO_IDX(robject->backing_object_offset); 3519 3520 vm_object_reference(robject); 3521 3522 vm_object_pip_wait(robject, "objfrz"); 3523 3524 if (robject->ref_count == 1) { 3525 vm_object_deallocate(robject); 3526 continue; 3527 } 3528 3529 vm_object_pip_add(robject, 1); 3530 3531 for (idx = 0; idx < robject->size; idx++) { 3532 3533 m_out = vm_page_grab(robject, idx, 3534 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3535 3536 if (m_out->valid == 0) { 3537 m_in = vm_page_grab(object, bo_pindex + idx, 3538 VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 3539 if (m_in->valid == 0) { 3540 rv = vm_pager_get_pages(object, &m_in, 1, 0); 3541 if (rv != VM_PAGER_OK) { 3542 printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex); 3543 continue; 3544 } 3545 vm_page_deactivate(m_in); 3546 } 3547 3548 vm_page_protect(m_in, VM_PROT_NONE); 3549 pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out)); 3550 m_out->valid = m_in->valid; 3551 vm_page_dirty(m_out); 3552 vm_page_activate(m_out); 3553 vm_page_wakeup(m_in); 3554 } 3555 vm_page_wakeup(m_out); 3556 } 3557 3558 object->shadow_count--; 3559 object->ref_count--; 3560 LIST_REMOVE(robject, shadow_list); 3561 robject->backing_object = NULL; 3562 robject->backing_object_offset = 0; 3563 3564 vm_object_pip_wakeup(robject); 3565 vm_object_deallocate(robject); 3566 } 3567 3568 vm_object_clear_flag(object, OBJ_OPT); 3569 } 3570 3571 #include "opt_ddb.h" 3572 #ifdef DDB 3573 #include <sys/kernel.h> 3574 3575 #include <ddb/ddb.h> 3576 3577 /* 3578 * vm_map_print: [ debug ] 3579 */ 3580 DB_SHOW_COMMAND(map, vm_map_print) 3581 { 3582 static int nlines; 3583 /* XXX convert args. */ 3584 vm_map_t map = (vm_map_t)addr; 3585 boolean_t full = have_addr; 3586 3587 vm_map_entry_t entry; 3588 3589 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", 3590 (void *)map, 3591 (void *)map->pmap, map->nentries, map->timestamp); 3592 nlines++; 3593 3594 if (!full && db_indent) 3595 return; 3596 3597 db_indent += 2; 3598 for (entry = map->header.next; entry != &map->header; 3599 entry = entry->next) { 3600 db_iprintf("map entry %p: start=%p, end=%p\n", 3601 (void *)entry, (void *)entry->start, (void *)entry->end); 3602 nlines++; 3603 { 3604 static char *inheritance_name[4] = 3605 {"share", "copy", "none", "donate_copy"}; 3606 3607 db_iprintf(" prot=%x/%x/%s", 3608 entry->protection, 3609 entry->max_protection, 3610 inheritance_name[(int)(unsigned char)entry->inheritance]); 3611 if (entry->wired_count != 0) 3612 db_printf(", wired"); 3613 } 3614 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 3615 /* XXX no %qd in kernel. Truncate entry->offset. */ 3616 db_printf(", share=%p, offset=0x%lx\n", 3617 (void *)entry->object.sub_map, 3618 (long)entry->offset); 3619 nlines++; 3620 if ((entry->prev == &map->header) || 3621 (entry->prev->object.sub_map != 3622 entry->object.sub_map)) { 3623 db_indent += 2; 3624 vm_map_print((db_expr_t)(intptr_t) 3625 entry->object.sub_map, 3626 full, 0, (char *)0); 3627 db_indent -= 2; 3628 } 3629 } else { 3630 /* XXX no %qd in kernel. Truncate entry->offset. */ 3631 db_printf(", object=%p, offset=0x%lx", 3632 (void *)entry->object.vm_object, 3633 (long)entry->offset); 3634 if (entry->eflags & MAP_ENTRY_COW) 3635 db_printf(", copy (%s)", 3636 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); 3637 db_printf("\n"); 3638 nlines++; 3639 3640 if ((entry->prev == &map->header) || 3641 (entry->prev->object.vm_object != 3642 entry->object.vm_object)) { 3643 db_indent += 2; 3644 vm_object_print((db_expr_t)(intptr_t) 3645 entry->object.vm_object, 3646 full, 0, (char *)0); 3647 nlines += 4; 3648 db_indent -= 2; 3649 } 3650 } 3651 } 3652 db_indent -= 2; 3653 if (db_indent == 0) 3654 nlines = 0; 3655 } 3656 3657 3658 DB_SHOW_COMMAND(procvm, procvm) 3659 { 3660 struct proc *p; 3661 3662 if (have_addr) { 3663 p = (struct proc *) addr; 3664 } else { 3665 p = curproc; 3666 } 3667 3668 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", 3669 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, 3670 (void *)vmspace_pmap(p->p_vmspace)); 3671 3672 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); 3673 } 3674 3675 #endif /* DDB */ 3676