1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)vm_fault.c 7.11 (Berkeley) 05/04/92 11 * 12 * 13 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14 * All rights reserved. 15 * 16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17 * 18 * Permission to use, copy, modify and distribute this software and 19 * its documentation is hereby granted, provided that both the copyright 20 * notice and this permission notice appear in all copies of the 21 * software, derivative works or modified versions, and any portions 22 * thereof, and that both notices appear in supporting documentation. 23 * 24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27 * 28 * Carnegie Mellon requests users of this software to return to 29 * 30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31 * School of Computer Science 32 * Carnegie Mellon University 33 * Pittsburgh PA 15213-3890 34 * 35 * any improvements or extensions that they make and grant Carnegie the 36 * rights to redistribute these changes. 37 */ 38 39 /* 40 * Page fault handling module. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_page.h> 48 #include <vm/vm_pageout.h> 49 50 /* 51 * vm_fault: 52 * 53 * Handle a page fault occuring at the given address, 54 * requiring the given permissions, in the map specified. 55 * If successful, the page is inserted into the 56 * associated physical map. 57 * 58 * NOTE: the given address should be truncated to the 59 * proper page address. 60 * 61 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 62 * a standard error specifying why the fault is fatal is returned. 63 * 64 * 65 * The map in question must be referenced, and remains so. 66 * Caller may hold no locks. 67 */ 68 int 69 vm_fault(map, vaddr, fault_type, change_wiring) 70 vm_map_t map; 71 vm_offset_t vaddr; 72 vm_prot_t fault_type; 73 boolean_t change_wiring; 74 { 75 vm_object_t first_object; 76 vm_offset_t first_offset; 77 vm_map_entry_t entry; 78 register vm_object_t object; 79 register vm_offset_t offset; 80 register vm_page_t m; 81 vm_page_t first_m; 82 vm_prot_t prot; 83 int result; 84 boolean_t wired; 85 boolean_t su; 86 boolean_t lookup_still_valid; 87 boolean_t page_exists; 88 vm_page_t old_m; 89 vm_object_t next_object; 90 91 cnt.v_vm_faults++; /* needs lock XXX */ 92 /* 93 * Recovery actions 94 */ 95 #define FREE_PAGE(m) { \ 96 PAGE_WAKEUP(m); \ 97 vm_page_lock_queues(); \ 98 vm_page_free(m); \ 99 vm_page_unlock_queues(); \ 100 } 101 102 #define RELEASE_PAGE(m) { \ 103 PAGE_WAKEUP(m); \ 104 vm_page_lock_queues(); \ 105 vm_page_activate(m); \ 106 vm_page_unlock_queues(); \ 107 } 108 109 #define UNLOCK_MAP { \ 110 if (lookup_still_valid) { \ 111 vm_map_lookup_done(map, entry); \ 112 lookup_still_valid = FALSE; \ 113 } \ 114 } 115 116 #define UNLOCK_THINGS { \ 117 object->paging_in_progress--; \ 118 vm_object_unlock(object); \ 119 if (object != first_object) { \ 120 vm_object_lock(first_object); \ 121 FREE_PAGE(first_m); \ 122 first_object->paging_in_progress--; \ 123 vm_object_unlock(first_object); \ 124 } \ 125 UNLOCK_MAP; \ 126 } 127 128 #define UNLOCK_AND_DEALLOCATE { \ 129 UNLOCK_THINGS; \ 130 vm_object_deallocate(first_object); \ 131 } 132 133 RetryFault: ; 134 135 /* 136 * Find the backing store object and offset into 137 * it to begin the search. 138 */ 139 140 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, 141 &first_object, &first_offset, 142 &prot, &wired, &su)) != KERN_SUCCESS) { 143 return(result); 144 } 145 lookup_still_valid = TRUE; 146 147 if (wired) 148 fault_type = prot; 149 150 first_m = NULL; 151 152 /* 153 * Make a reference to this object to 154 * prevent its disposal while we are messing with 155 * it. Once we have the reference, the map is free 156 * to be diddled. Since objects reference their 157 * shadows (and copies), they will stay around as well. 158 */ 159 160 vm_object_lock(first_object); 161 162 first_object->ref_count++; 163 first_object->paging_in_progress++; 164 165 /* 166 * INVARIANTS (through entire routine): 167 * 168 * 1) At all times, we must either have the object 169 * lock or a busy page in some object to prevent 170 * some other thread from trying to bring in 171 * the same page. 172 * 173 * Note that we cannot hold any locks during the 174 * pager access or when waiting for memory, so 175 * we use a busy page then. 176 * 177 * Note also that we aren't as concerned about 178 * more than one thead attempting to pager_data_unlock 179 * the same page at once, so we don't hold the page 180 * as busy then, but do record the highest unlock 181 * value so far. [Unlock requests may also be delivered 182 * out of order.] 183 * 184 * 2) Once we have a busy page, we must remove it from 185 * the pageout queues, so that the pageout daemon 186 * will not grab it away. 187 * 188 * 3) To prevent another thread from racing us down the 189 * shadow chain and entering a new page in the top 190 * object before we do, we must keep a busy page in 191 * the top object while following the shadow chain. 192 * 193 * 4) We must increment paging_in_progress on any object 194 * for which we have a busy page, to prevent 195 * vm_object_collapse from removing the busy page 196 * without our noticing. 197 */ 198 199 /* 200 * Search for the page at object/offset. 201 */ 202 203 object = first_object; 204 offset = first_offset; 205 206 /* 207 * See whether this page is resident 208 */ 209 210 while (TRUE) { 211 m = vm_page_lookup(object, offset); 212 if (m != NULL) { 213 /* 214 * If the page is being brought in, 215 * wait for it and then retry. 216 */ 217 if (m->busy) { 218 #ifdef DOTHREADS 219 int wait_result; 220 221 PAGE_ASSERT_WAIT(m, !change_wiring); 222 UNLOCK_THINGS; 223 thread_block(); 224 wait_result = current_thread()->wait_result; 225 vm_object_deallocate(first_object); 226 if (wait_result != THREAD_AWAKENED) 227 return(KERN_SUCCESS); 228 goto RetryFault; 229 #else 230 PAGE_ASSERT_WAIT(m, !change_wiring); 231 UNLOCK_THINGS; 232 thread_block(); 233 vm_object_deallocate(first_object); 234 goto RetryFault; 235 #endif 236 } 237 238 if (m->absent) 239 panic("vm_fault: absent"); 240 241 /* 242 * If the desired access to this page has 243 * been locked out, request that it be unlocked. 244 */ 245 246 if (fault_type & m->page_lock) { 247 #ifdef DOTHREADS 248 int wait_result; 249 250 if ((fault_type & m->unlock_request) != fault_type) 251 panic("vm_fault: pager_data_unlock"); 252 253 PAGE_ASSERT_WAIT(m, !change_wiring); 254 UNLOCK_THINGS; 255 thread_block(); 256 wait_result = current_thread()->wait_result; 257 vm_object_deallocate(first_object); 258 if (wait_result != THREAD_AWAKENED) 259 return(KERN_SUCCESS); 260 goto RetryFault; 261 #else 262 if ((fault_type & m->unlock_request) != fault_type) 263 panic("vm_fault: pager_data_unlock"); 264 265 PAGE_ASSERT_WAIT(m, !change_wiring); 266 UNLOCK_THINGS; 267 thread_block(); 268 vm_object_deallocate(first_object); 269 goto RetryFault; 270 #endif 271 } 272 273 /* 274 * Remove the page from the pageout daemon's 275 * reach while we play with it. 276 */ 277 278 vm_page_lock_queues(); 279 if (m->inactive) { 280 queue_remove(&vm_page_queue_inactive, m, 281 vm_page_t, pageq); 282 m->inactive = FALSE; 283 cnt.v_inactive_count--; 284 cnt.v_reactivated++; 285 } 286 287 if (m->active) { 288 queue_remove(&vm_page_queue_active, m, 289 vm_page_t, pageq); 290 m->active = FALSE; 291 cnt.v_active_count--; 292 } 293 vm_page_unlock_queues(); 294 295 /* 296 * Mark page busy for other threads. 297 */ 298 m->busy = TRUE; 299 m->absent = FALSE; 300 break; 301 } 302 303 if (((object->pager != NULL) && 304 (!change_wiring || wired)) 305 || (object == first_object)) { 306 307 /* 308 * Allocate a new page for this object/offset 309 * pair. 310 */ 311 312 m = vm_page_alloc(object, offset); 313 314 if (m == NULL) { 315 UNLOCK_AND_DEALLOCATE; 316 VM_WAIT; 317 goto RetryFault; 318 } 319 } 320 321 if ((object->pager != NULL) && 322 (!change_wiring || wired)) { 323 int rv; 324 325 /* 326 * Now that we have a busy page, we can 327 * release the object lock. 328 */ 329 vm_object_unlock(object); 330 331 /* 332 * Call the pager to retrieve the data, if any, 333 * after releasing the lock on the map. 334 */ 335 UNLOCK_MAP; 336 337 rv = vm_pager_get(object->pager, m, TRUE); 338 if (rv == VM_PAGER_OK) { 339 /* 340 * Found the page. 341 * Leave it busy while we play with it. 342 */ 343 vm_object_lock(object); 344 345 /* 346 * Relookup in case pager changed page. 347 * Pager is responsible for disposition 348 * of old page if moved. 349 */ 350 m = vm_page_lookup(object, offset); 351 352 cnt.v_pageins++; 353 m->fake = FALSE; 354 m->clean = TRUE; 355 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 356 break; 357 } 358 359 /* 360 * Remove the bogus page (which does not 361 * exist at this object/offset); before 362 * doing so, we must get back our object 363 * lock to preserve our invariant. 364 * 365 * Also wake up any other thread that may want 366 * to bring in this page. 367 * 368 * If this is the top-level object, we must 369 * leave the busy page to prevent another 370 * thread from rushing past us, and inserting 371 * the page in that object at the same time 372 * that we are. 373 */ 374 375 vm_object_lock(object); 376 /* 377 * Data outside the range of the pager; an error 378 */ 379 if (rv == VM_PAGER_BAD) { 380 FREE_PAGE(m); 381 UNLOCK_AND_DEALLOCATE; 382 return(KERN_PROTECTION_FAILURE); /* XXX */ 383 } 384 if (object != first_object) { 385 FREE_PAGE(m); 386 /* 387 * XXX - we cannot just fall out at this 388 * point, m has been freed and is invalid! 389 */ 390 panic("vm_fault: free page"); /* XXX */ 391 } 392 } 393 394 /* 395 * We get here if the object has no pager (or unwiring) 396 * or the pager doesn't have the page. 397 */ 398 if (object == first_object) 399 first_m = m; 400 401 /* 402 * Move on to the next object. Lock the next 403 * object before unlocking the current one. 404 */ 405 406 offset += object->shadow_offset; 407 next_object = object->shadow; 408 if (next_object == NULL) { 409 /* 410 * If there's no object left, fill the page 411 * in the top object with zeros. 412 */ 413 if (object != first_object) { 414 object->paging_in_progress--; 415 vm_object_unlock(object); 416 417 object = first_object; 418 offset = first_offset; 419 m = first_m; 420 vm_object_lock(object); 421 } 422 first_m = NULL; 423 424 vm_page_zero_fill(m); 425 cnt.v_zfod++; 426 m->fake = FALSE; 427 m->absent = FALSE; 428 break; 429 } 430 else { 431 vm_object_lock(next_object); 432 if (object != first_object) 433 object->paging_in_progress--; 434 vm_object_unlock(object); 435 object = next_object; 436 object->paging_in_progress++; 437 } 438 } 439 440 if (m->absent || m->active || m->inactive || !m->busy) 441 panic("vm_fault: absent or active or inactive or not busy after main loop"); 442 443 /* 444 * PAGE HAS BEEN FOUND. 445 * [Loop invariant still holds -- the object lock 446 * is held.] 447 */ 448 449 old_m = m; /* save page that would be copied */ 450 451 /* 452 * If the page is being written, but isn't 453 * already owned by the top-level object, 454 * we have to copy it into a new page owned 455 * by the top-level object. 456 */ 457 458 if (object != first_object) { 459 /* 460 * We only really need to copy if we 461 * want to write it. 462 */ 463 464 if (fault_type & VM_PROT_WRITE) { 465 466 /* 467 * If we try to collapse first_object at this 468 * point, we may deadlock when we try to get 469 * the lock on an intermediate object (since we 470 * have the bottom object locked). We can't 471 * unlock the bottom object, because the page 472 * we found may move (by collapse) if we do. 473 * 474 * Instead, we first copy the page. Then, when 475 * we have no more use for the bottom object, 476 * we unlock it and try to collapse. 477 * 478 * Note that we copy the page even if we didn't 479 * need to... that's the breaks. 480 */ 481 482 /* 483 * We already have an empty page in 484 * first_object - use it. 485 */ 486 487 vm_page_copy(m, first_m); 488 first_m->fake = FALSE; 489 first_m->absent = FALSE; 490 491 /* 492 * If another map is truly sharing this 493 * page with us, we have to flush all 494 * uses of the original page, since we 495 * can't distinguish those which want the 496 * original from those which need the 497 * new copy. 498 * 499 * XXX If we know that only one map has 500 * access to this page, then we could 501 * avoid the pmap_page_protect() call. 502 */ 503 504 vm_page_lock_queues(); 505 vm_page_activate(m); 506 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 507 vm_page_unlock_queues(); 508 509 /* 510 * We no longer need the old page or object. 511 */ 512 PAGE_WAKEUP(m); 513 object->paging_in_progress--; 514 vm_object_unlock(object); 515 516 /* 517 * Only use the new page below... 518 */ 519 520 cnt.v_cow_faults++; 521 m = first_m; 522 object = first_object; 523 offset = first_offset; 524 525 /* 526 * Now that we've gotten the copy out of the 527 * way, let's try to collapse the top object. 528 */ 529 vm_object_lock(object); 530 /* 531 * But we have to play ugly games with 532 * paging_in_progress to do that... 533 */ 534 object->paging_in_progress--; 535 vm_object_collapse(object); 536 object->paging_in_progress++; 537 } 538 else { 539 prot &= (~VM_PROT_WRITE); 540 m->copy_on_write = TRUE; 541 } 542 } 543 544 if (m->active || m->inactive) 545 panic("vm_fault: active or inactive before copy object handling"); 546 547 /* 548 * If the page is being written, but hasn't been 549 * copied to the copy-object, we have to copy it there. 550 */ 551 RetryCopy: 552 if (first_object->copy != NULL) { 553 vm_object_t copy_object = first_object->copy; 554 vm_offset_t copy_offset; 555 vm_page_t copy_m; 556 557 /* 558 * We only need to copy if we want to write it. 559 */ 560 if ((fault_type & VM_PROT_WRITE) == 0) { 561 prot &= ~VM_PROT_WRITE; 562 m->copy_on_write = TRUE; 563 } 564 else { 565 /* 566 * Try to get the lock on the copy_object. 567 */ 568 if (!vm_object_lock_try(copy_object)) { 569 vm_object_unlock(object); 570 /* should spin a bit here... */ 571 vm_object_lock(object); 572 goto RetryCopy; 573 } 574 575 /* 576 * Make another reference to the copy-object, 577 * to keep it from disappearing during the 578 * copy. 579 */ 580 copy_object->ref_count++; 581 582 /* 583 * Does the page exist in the copy? 584 */ 585 copy_offset = first_offset 586 - copy_object->shadow_offset; 587 copy_m = vm_page_lookup(copy_object, copy_offset); 588 if (page_exists = (copy_m != NULL)) { 589 if (copy_m->busy) { 590 #ifdef DOTHREADS 591 int wait_result; 592 593 /* 594 * If the page is being brought 595 * in, wait for it and then retry. 596 */ 597 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 598 RELEASE_PAGE(m); 599 copy_object->ref_count--; 600 vm_object_unlock(copy_object); 601 UNLOCK_THINGS; 602 thread_block(); 603 wait_result = current_thread()->wait_result; 604 vm_object_deallocate(first_object); 605 if (wait_result != THREAD_AWAKENED) 606 return(KERN_SUCCESS); 607 goto RetryFault; 608 #else 609 /* 610 * If the page is being brought 611 * in, wait for it and then retry. 612 */ 613 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 614 RELEASE_PAGE(m); 615 copy_object->ref_count--; 616 vm_object_unlock(copy_object); 617 UNLOCK_THINGS; 618 thread_block(); 619 vm_object_deallocate(first_object); 620 goto RetryFault; 621 #endif 622 } 623 } 624 625 /* 626 * If the page is not in memory (in the object) 627 * and the object has a pager, we have to check 628 * if the pager has the data in secondary 629 * storage. 630 */ 631 if (!page_exists) { 632 633 /* 634 * If we don't allocate a (blank) page 635 * here... another thread could try 636 * to page it in, allocate a page, and 637 * then block on the busy page in its 638 * shadow (first_object). Then we'd 639 * trip over the busy page after we 640 * found that the copy_object's pager 641 * doesn't have the page... 642 */ 643 copy_m = vm_page_alloc(copy_object, 644 copy_offset); 645 if (copy_m == NULL) { 646 /* 647 * Wait for a page, then retry. 648 */ 649 RELEASE_PAGE(m); 650 copy_object->ref_count--; 651 vm_object_unlock(copy_object); 652 UNLOCK_AND_DEALLOCATE; 653 VM_WAIT; 654 goto RetryFault; 655 } 656 657 if (copy_object->pager != NULL) { 658 vm_object_unlock(object); 659 vm_object_unlock(copy_object); 660 UNLOCK_MAP; 661 662 page_exists = vm_pager_has_page( 663 copy_object->pager, 664 (copy_offset + copy_object->paging_offset)); 665 666 vm_object_lock(copy_object); 667 668 /* 669 * Since the map is unlocked, someone 670 * else could have copied this object 671 * and put a different copy_object 672 * between the two. Or, the last 673 * reference to the copy-object (other 674 * than the one we have) may have 675 * disappeared - if that has happened, 676 * we don't need to make the copy. 677 */ 678 if (copy_object->shadow != object || 679 copy_object->ref_count == 1) { 680 /* 681 * Gaah... start over! 682 */ 683 FREE_PAGE(copy_m); 684 vm_object_unlock(copy_object); 685 vm_object_deallocate(copy_object); 686 /* may block */ 687 vm_object_lock(object); 688 goto RetryCopy; 689 } 690 vm_object_lock(object); 691 692 if (page_exists) { 693 /* 694 * We didn't need the page 695 */ 696 FREE_PAGE(copy_m); 697 } 698 } 699 } 700 if (!page_exists) { 701 /* 702 * Must copy page into copy-object. 703 */ 704 vm_page_copy(m, copy_m); 705 copy_m->fake = FALSE; 706 copy_m->absent = FALSE; 707 708 /* 709 * Things to remember: 710 * 1. The copied page must be marked 'dirty' 711 * so it will be paged out to the copy 712 * object. 713 * 2. If the old page was in use by any users 714 * of the copy-object, it must be removed 715 * from all pmaps. (We can't know which 716 * pmaps use it.) 717 */ 718 vm_page_lock_queues(); 719 pmap_page_protect(VM_PAGE_TO_PHYS(old_m), 720 VM_PROT_NONE); 721 copy_m->clean = FALSE; 722 vm_page_activate(copy_m); /* XXX */ 723 vm_page_unlock_queues(); 724 725 PAGE_WAKEUP(copy_m); 726 } 727 /* 728 * The reference count on copy_object must be 729 * at least 2: one for our extra reference, 730 * and at least one from the outside world 731 * (we checked that when we last locked 732 * copy_object). 733 */ 734 copy_object->ref_count--; 735 vm_object_unlock(copy_object); 736 m->copy_on_write = FALSE; 737 } 738 } 739 740 if (m->active || m->inactive) 741 panic("vm_fault: active or inactive before retrying lookup"); 742 743 /* 744 * We must verify that the maps have not changed 745 * since our last lookup. 746 */ 747 748 if (!lookup_still_valid) { 749 vm_object_t retry_object; 750 vm_offset_t retry_offset; 751 vm_prot_t retry_prot; 752 753 /* 754 * Since map entries may be pageable, make sure we can 755 * take a page fault on them. 756 */ 757 vm_object_unlock(object); 758 759 /* 760 * To avoid trying to write_lock the map while another 761 * thread has it read_locked (in vm_map_pageable), we 762 * do not try for write permission. If the page is 763 * still writable, we will get write permission. If it 764 * is not, or has been marked needs_copy, we enter the 765 * mapping without write permission, and will merely 766 * take another fault. 767 */ 768 result = vm_map_lookup(&map, vaddr, 769 fault_type & ~VM_PROT_WRITE, &entry, 770 &retry_object, &retry_offset, &retry_prot, 771 &wired, &su); 772 773 vm_object_lock(object); 774 775 /* 776 * If we don't need the page any longer, put it on the 777 * active list (the easiest thing to do here). If no 778 * one needs it, pageout will grab it eventually. 779 */ 780 781 if (result != KERN_SUCCESS) { 782 RELEASE_PAGE(m); 783 UNLOCK_AND_DEALLOCATE; 784 return(result); 785 } 786 787 lookup_still_valid = TRUE; 788 789 if ((retry_object != first_object) || 790 (retry_offset != first_offset)) { 791 RELEASE_PAGE(m); 792 UNLOCK_AND_DEALLOCATE; 793 goto RetryFault; 794 } 795 796 /* 797 * Check whether the protection has changed or the object 798 * has been copied while we left the map unlocked. 799 * Changing from read to write permission is OK - we leave 800 * the page write-protected, and catch the write fault. 801 * Changing from write to read permission means that we 802 * can't mark the page write-enabled after all. 803 */ 804 prot &= retry_prot; 805 if (m->copy_on_write) 806 prot &= ~VM_PROT_WRITE; 807 } 808 809 /* 810 * (the various bits we're fiddling with here are locked by 811 * the object's lock) 812 */ 813 814 /* XXX This distorts the meaning of the copy_on_write bit */ 815 816 if (prot & VM_PROT_WRITE) 817 m->copy_on_write = FALSE; 818 819 /* 820 * It's critically important that a wired-down page be faulted 821 * only once in each map for which it is wired. 822 */ 823 824 if (m->active || m->inactive) 825 panic("vm_fault: active or inactive before pmap_enter"); 826 827 vm_object_unlock(object); 828 829 /* 830 * Put this page into the physical map. 831 * We had to do the unlock above because pmap_enter 832 * may cause other faults. We don't put the 833 * page back on the active queue until later so 834 * that the page-out daemon won't find us (yet). 835 */ 836 837 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), 838 prot & ~(m->page_lock), wired); 839 840 /* 841 * If the page is not wired down, then put it where the 842 * pageout daemon can find it. 843 */ 844 vm_object_lock(object); 845 vm_page_lock_queues(); 846 if (change_wiring) { 847 if (wired) 848 vm_page_wire(m); 849 else 850 vm_page_unwire(m); 851 } 852 else 853 vm_page_activate(m); 854 vm_page_unlock_queues(); 855 856 /* 857 * Unlock everything, and return 858 */ 859 860 PAGE_WAKEUP(m); 861 UNLOCK_AND_DEALLOCATE; 862 863 return(KERN_SUCCESS); 864 865 } 866 867 /* 868 * vm_fault_wire: 869 * 870 * Wire down a range of virtual addresses in a map. 871 */ 872 void vm_fault_wire(map, start, end) 873 vm_map_t map; 874 vm_offset_t start, end; 875 { 876 877 register vm_offset_t va; 878 register pmap_t pmap; 879 880 pmap = vm_map_pmap(map); 881 882 /* 883 * Inform the physical mapping system that the 884 * range of addresses may not fault, so that 885 * page tables and such can be locked down as well. 886 */ 887 888 pmap_pageable(pmap, start, end, FALSE); 889 890 /* 891 * We simulate a fault to get the page and enter it 892 * in the physical map. 893 */ 894 895 for (va = start; va < end; va += PAGE_SIZE) { 896 (void) vm_fault(map, va, VM_PROT_NONE, TRUE); 897 } 898 } 899 900 901 /* 902 * vm_fault_unwire: 903 * 904 * Unwire a range of virtual addresses in a map. 905 */ 906 void vm_fault_unwire(map, start, end) 907 vm_map_t map; 908 vm_offset_t start, end; 909 { 910 911 register vm_offset_t va, pa; 912 register pmap_t pmap; 913 914 pmap = vm_map_pmap(map); 915 916 /* 917 * Since the pages are wired down, we must be able to 918 * get their mappings from the physical map system. 919 */ 920 921 vm_page_lock_queues(); 922 923 for (va = start; va < end; va += PAGE_SIZE) { 924 pa = pmap_extract(pmap, va); 925 if (pa == (vm_offset_t) 0) { 926 panic("unwire: page not in pmap"); 927 } 928 pmap_change_wiring(pmap, va, FALSE); 929 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 930 } 931 vm_page_unlock_queues(); 932 933 /* 934 * Inform the physical mapping system that the range 935 * of addresses may fault, so that page tables and 936 * such may be unwired themselves. 937 */ 938 939 pmap_pageable(pmap, start, end, TRUE); 940 941 } 942 943 /* 944 * Routine: 945 * vm_fault_copy_entry 946 * Function: 947 * Copy all of the pages from a wired-down map entry to another. 948 * 949 * In/out conditions: 950 * The source and destination maps must be locked for write. 951 * The source map entry must be wired down (or be a sharing map 952 * entry corresponding to a main map entry that is wired down). 953 */ 954 955 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 956 vm_map_t dst_map; 957 vm_map_t src_map; 958 vm_map_entry_t dst_entry; 959 vm_map_entry_t src_entry; 960 { 961 962 vm_object_t dst_object; 963 vm_object_t src_object; 964 vm_offset_t dst_offset; 965 vm_offset_t src_offset; 966 vm_prot_t prot; 967 vm_offset_t vaddr; 968 vm_page_t dst_m; 969 vm_page_t src_m; 970 971 #ifdef lint 972 src_map++; 973 #endif lint 974 975 src_object = src_entry->object.vm_object; 976 src_offset = src_entry->offset; 977 978 /* 979 * Create the top-level object for the destination entry. 980 * (Doesn't actually shadow anything - we copy the pages 981 * directly.) 982 */ 983 dst_object = vm_object_allocate( 984 (vm_size_t) (dst_entry->end - dst_entry->start)); 985 986 dst_entry->object.vm_object = dst_object; 987 dst_entry->offset = 0; 988 989 prot = dst_entry->max_protection; 990 991 /* 992 * Loop through all of the pages in the entry's range, copying 993 * each one from the source object (it should be there) to the 994 * destination object. 995 */ 996 for (vaddr = dst_entry->start, dst_offset = 0; 997 vaddr < dst_entry->end; 998 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 999 1000 /* 1001 * Allocate a page in the destination object 1002 */ 1003 vm_object_lock(dst_object); 1004 do { 1005 dst_m = vm_page_alloc(dst_object, dst_offset); 1006 if (dst_m == NULL) { 1007 vm_object_unlock(dst_object); 1008 VM_WAIT; 1009 vm_object_lock(dst_object); 1010 } 1011 } while (dst_m == NULL); 1012 1013 /* 1014 * Find the page in the source object, and copy it in. 1015 * (Because the source is wired down, the page will be 1016 * in memory.) 1017 */ 1018 vm_object_lock(src_object); 1019 src_m = vm_page_lookup(src_object, dst_offset + src_offset); 1020 if (src_m == NULL) 1021 panic("vm_fault_copy_wired: page missing"); 1022 1023 vm_page_copy(src_m, dst_m); 1024 1025 /* 1026 * Enter it in the pmap... 1027 */ 1028 vm_object_unlock(src_object); 1029 vm_object_unlock(dst_object); 1030 1031 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1032 prot, FALSE); 1033 1034 /* 1035 * Mark it no longer busy, and put it on the active list. 1036 */ 1037 vm_object_lock(dst_object); 1038 vm_page_lock_queues(); 1039 vm_page_activate(dst_m); 1040 vm_page_unlock_queues(); 1041 PAGE_WAKEUP(dst_m); 1042 vm_object_unlock(dst_object); 1043 } 1044 1045 } 1046