1 /* 2 * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young 3 * Copyright (c) 1987 Carnegie-Mellon University 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * The CMU software License Agreement specifies the terms and conditions 11 * for use and redistribution. 12 * 13 * @(#)vm_fault.c 7.1 (Berkeley) 12/05/90 14 */ 15 16 /* 17 * Page fault handling module. 18 */ 19 20 #include "param.h" 21 #include "../vm/vm_param.h" 22 #include "../vm/vm_map.h" 23 #include "../vm/vm_object.h" 24 #include "../vm/vm_page.h" 25 #include "../vm/pmap.h" 26 #include "../vm/vm_statistics.h" 27 #include "../vm/vm_pageout.h" 28 29 /* 30 * vm_fault: 31 * 32 * Handle a page fault occuring at the given address, 33 * requiring the given permissions, in the map specified. 34 * If successful, the page is inserted into the 35 * associated physical map. 36 * 37 * NOTE: the given address should be truncated to the 38 * proper page address. 39 * 40 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 41 * a standard error specifying why the fault is fatal is returned. 42 * 43 * 44 * The map in question must be referenced, and remains so. 45 * Caller may hold no locks. 46 */ 47 vm_fault(map, vaddr, fault_type, change_wiring) 48 vm_map_t map; 49 vm_offset_t vaddr; 50 vm_prot_t fault_type; 51 boolean_t change_wiring; 52 { 53 vm_object_t first_object; 54 vm_offset_t first_offset; 55 vm_map_entry_t entry; 56 register vm_object_t object; 57 register vm_offset_t offset; 58 register vm_page_t m; 59 vm_page_t first_m; 60 vm_prot_t prot; 61 int result; 62 boolean_t wired; 63 boolean_t su; 64 boolean_t lookup_still_valid; 65 boolean_t page_exists; 66 vm_page_t old_m; 67 vm_object_t next_object; 68 69 vm_stat.faults++; /* needs lock XXX */ 70 /* 71 * Recovery actions 72 */ 73 #define FREE_PAGE(m) { \ 74 PAGE_WAKEUP(m); \ 75 vm_page_lock_queues(); \ 76 vm_page_free(m); \ 77 vm_page_unlock_queues(); \ 78 } 79 80 #define RELEASE_PAGE(m) { \ 81 PAGE_WAKEUP(m); \ 82 vm_page_lock_queues(); \ 83 vm_page_activate(m); \ 84 vm_page_unlock_queues(); \ 85 } 86 87 #define UNLOCK_MAP { \ 88 if (lookup_still_valid) { \ 89 vm_map_lookup_done(map, entry); \ 90 lookup_still_valid = FALSE; \ 91 } \ 92 } 93 94 #define UNLOCK_THINGS { \ 95 object->paging_in_progress--; \ 96 vm_object_unlock(object); \ 97 if (object != first_object) { \ 98 vm_object_lock(first_object); \ 99 FREE_PAGE(first_m); \ 100 first_object->paging_in_progress--; \ 101 vm_object_unlock(first_object); \ 102 } \ 103 UNLOCK_MAP; \ 104 } 105 106 #define UNLOCK_AND_DEALLOCATE { \ 107 UNLOCK_THINGS; \ 108 vm_object_deallocate(first_object); \ 109 } 110 111 RetryFault: ; 112 113 /* 114 * Find the backing store object and offset into 115 * it to begin the search. 116 */ 117 118 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, 119 &first_object, &first_offset, 120 &prot, &wired, &su)) != KERN_SUCCESS) { 121 return(result); 122 } 123 lookup_still_valid = TRUE; 124 125 if (wired) 126 fault_type = prot; 127 128 first_m = VM_PAGE_NULL; 129 130 /* 131 * Make a reference to this object to 132 * prevent its disposal while we are messing with 133 * it. Once we have the reference, the map is free 134 * to be diddled. Since objects reference their 135 * shadows (and copies), they will stay around as well. 136 */ 137 138 vm_object_lock(first_object); 139 140 first_object->ref_count++; 141 first_object->paging_in_progress++; 142 143 /* 144 * INVARIANTS (through entire routine): 145 * 146 * 1) At all times, we must either have the object 147 * lock or a busy page in some object to prevent 148 * some other thread from trying to bring in 149 * the same page. 150 * 151 * Note that we cannot hold any locks during the 152 * pager access or when waiting for memory, so 153 * we use a busy page then. 154 * 155 * Note also that we aren't as concerned about 156 * more than one thead attempting to pager_data_unlock 157 * the same page at once, so we don't hold the page 158 * as busy then, but do record the highest unlock 159 * value so far. [Unlock requests may also be delivered 160 * out of order.] 161 * 162 * 2) Once we have a busy page, we must remove it from 163 * the pageout queues, so that the pageout daemon 164 * will not grab it away. 165 * 166 * 3) To prevent another thread from racing us down the 167 * shadow chain and entering a new page in the top 168 * object before we do, we must keep a busy page in 169 * the top object while following the shadow chain. 170 * 171 * 4) We must increment paging_in_progress on any object 172 * for which we have a busy page, to prevent 173 * vm_object_collapse from removing the busy page 174 * without our noticing. 175 */ 176 177 /* 178 * Search for the page at object/offset. 179 */ 180 181 object = first_object; 182 offset = first_offset; 183 184 /* 185 * See whether this page is resident 186 */ 187 188 while (TRUE) { 189 m = vm_page_lookup(object, offset); 190 if (m != VM_PAGE_NULL) { 191 /* 192 * If the page is being brought in, 193 * wait for it and then retry. 194 */ 195 if (m->busy) { 196 #ifdef DOTHREADS 197 int wait_result; 198 199 PAGE_ASSERT_WAIT(m, !change_wiring); 200 UNLOCK_THINGS; 201 thread_block(); 202 wait_result = current_thread()->wait_result; 203 vm_object_deallocate(first_object); 204 if (wait_result != THREAD_AWAKENED) 205 return(KERN_SUCCESS); 206 goto RetryFault; 207 #else 208 PAGE_ASSERT_WAIT(m, !change_wiring); 209 UNLOCK_THINGS; 210 thread_block(); 211 vm_object_deallocate(first_object); 212 goto RetryFault; 213 #endif 214 } 215 216 if (m->absent) 217 panic("vm_fault: absent"); 218 219 /* 220 * If the desired access to this page has 221 * been locked out, request that it be unlocked. 222 */ 223 224 if (fault_type & m->page_lock) { 225 #ifdef DOTHREADS 226 int wait_result; 227 228 if ((fault_type & m->unlock_request) != fault_type) 229 panic("vm_fault: pager_data_unlock"); 230 231 PAGE_ASSERT_WAIT(m, !change_wiring); 232 UNLOCK_THINGS; 233 thread_block(); 234 wait_result = current_thread()->wait_result; 235 vm_object_deallocate(first_object); 236 if (wait_result != THREAD_AWAKENED) 237 return(KERN_SUCCESS); 238 goto RetryFault; 239 #else 240 if ((fault_type & m->unlock_request) != fault_type) 241 panic("vm_fault: pager_data_unlock"); 242 243 PAGE_ASSERT_WAIT(m, !change_wiring); 244 UNLOCK_THINGS; 245 thread_block(); 246 vm_object_deallocate(first_object); 247 goto RetryFault; 248 #endif 249 } 250 251 /* 252 * Remove the page from the pageout daemon's 253 * reach while we play with it. 254 */ 255 256 vm_page_lock_queues(); 257 if (m->inactive) { 258 queue_remove(&vm_page_queue_inactive, m, 259 vm_page_t, pageq); 260 m->inactive = FALSE; 261 vm_page_inactive_count--; 262 vm_stat.reactivations++; 263 } 264 265 if (m->active) { 266 queue_remove(&vm_page_queue_active, m, 267 vm_page_t, pageq); 268 m->active = FALSE; 269 vm_page_active_count--; 270 } 271 vm_page_unlock_queues(); 272 273 /* 274 * Mark page busy for other threads. 275 */ 276 m->busy = TRUE; 277 m->absent = FALSE; 278 break; 279 } 280 281 if (((object->pager != vm_pager_null) && 282 (!change_wiring || wired)) 283 || (object == first_object)) { 284 285 /* 286 * Allocate a new page for this object/offset 287 * pair. 288 */ 289 290 m = vm_page_alloc(object, offset); 291 292 if (m == VM_PAGE_NULL) { 293 UNLOCK_AND_DEALLOCATE; 294 VM_WAIT; 295 goto RetryFault; 296 } 297 } 298 299 if ((object->pager != vm_pager_null) && 300 (!change_wiring || wired)) { 301 int rv; 302 303 /* 304 * Now that we have a busy page, we can 305 * release the object lock. 306 */ 307 vm_object_unlock(object); 308 309 /* 310 * Call the pager to retrieve the data, if any, 311 * after releasing the lock on the map. 312 */ 313 UNLOCK_MAP; 314 315 rv = vm_pager_get(object->pager, m, TRUE); 316 if (rv == VM_PAGER_OK) { 317 /* 318 * Found the page. 319 * Leave it busy while we play with it. 320 */ 321 vm_object_lock(object); 322 323 /* 324 * Relookup in case pager changed page. 325 * Pager is responsible for disposition 326 * of old page if moved. 327 */ 328 m = vm_page_lookup(object, offset); 329 330 vm_stat.pageins++; 331 m->fake = FALSE; 332 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 333 break; 334 } 335 336 /* 337 * Remove the bogus page (which does not 338 * exist at this object/offset); before 339 * doing so, we must get back our object 340 * lock to preserve our invariant. 341 * 342 * Also wake up any other thread that may want 343 * to bring in this page. 344 * 345 * If this is the top-level object, we must 346 * leave the busy page to prevent another 347 * thread from rushing past us, and inserting 348 * the page in that object at the same time 349 * that we are. 350 */ 351 352 vm_object_lock(object); 353 /* 354 * Data outside the range of the pager; an error 355 */ 356 if (rv == VM_PAGER_BAD) { 357 FREE_PAGE(m); 358 UNLOCK_AND_DEALLOCATE; 359 return(KERN_PROTECTION_FAILURE); /* XXX */ 360 } 361 if (object != first_object) { 362 FREE_PAGE(m); 363 /* 364 * XXX - we cannot just fall out at this 365 * point, m has been freed and is invalid! 366 */ 367 } 368 } 369 370 /* 371 * We get here if the object has no pager (or unwiring) 372 * or the pager doesn't have the page. 373 */ 374 if (object == first_object) 375 first_m = m; 376 377 /* 378 * Move on to the next object. Lock the next 379 * object before unlocking the current one. 380 */ 381 382 offset += object->shadow_offset; 383 next_object = object->shadow; 384 if (next_object == VM_OBJECT_NULL) { 385 /* 386 * If there's no object left, fill the page 387 * in the top object with zeros. 388 */ 389 if (object != first_object) { 390 object->paging_in_progress--; 391 vm_object_unlock(object); 392 393 object = first_object; 394 offset = first_offset; 395 m = first_m; 396 vm_object_lock(object); 397 } 398 first_m = VM_PAGE_NULL; 399 400 vm_page_zero_fill(m); 401 vm_stat.zero_fill_count++; 402 m->fake = FALSE; 403 m->absent = FALSE; 404 break; 405 } 406 else { 407 vm_object_lock(next_object); 408 if (object != first_object) 409 object->paging_in_progress--; 410 vm_object_unlock(object); 411 object = next_object; 412 object->paging_in_progress++; 413 } 414 } 415 416 if (m->absent || m->active || m->inactive || !m->busy) 417 panic("vm_fault: absent or active or inactive or not busy after main loop"); 418 419 /* 420 * PAGE HAS BEEN FOUND. 421 * [Loop invariant still holds -- the object lock 422 * is held.] 423 */ 424 425 old_m = m; /* save page that would be copied */ 426 427 /* 428 * If the page is being written, but isn't 429 * already owned by the top-level object, 430 * we have to copy it into a new page owned 431 * by the top-level object. 432 */ 433 434 if (object != first_object) { 435 /* 436 * We only really need to copy if we 437 * want to write it. 438 */ 439 440 if (fault_type & VM_PROT_WRITE) { 441 442 /* 443 * If we try to collapse first_object at this 444 * point, we may deadlock when we try to get 445 * the lock on an intermediate object (since we 446 * have the bottom object locked). We can't 447 * unlock the bottom object, because the page 448 * we found may move (by collapse) if we do. 449 * 450 * Instead, we first copy the page. Then, when 451 * we have no more use for the bottom object, 452 * we unlock it and try to collapse. 453 * 454 * Note that we copy the page even if we didn't 455 * need to... that's the breaks. 456 */ 457 458 /* 459 * We already have an empty page in 460 * first_object - use it. 461 */ 462 463 vm_page_copy(m, first_m); 464 first_m->fake = FALSE; 465 first_m->absent = FALSE; 466 467 /* 468 * If another map is truly sharing this 469 * page with us, we have to flush all 470 * uses of the original page, since we 471 * can't distinguish those which want the 472 * original from those which need the 473 * new copy. 474 */ 475 476 vm_page_lock_queues(); 477 if (!su) { 478 /* 479 * Also, once it's no longer in 480 * use by any maps, move it to 481 * the inactive queue instead. 482 */ 483 484 vm_page_deactivate(m); 485 pmap_remove_all(VM_PAGE_TO_PHYS(m)); 486 } 487 else { 488 /* 489 * Old page is only (possibly) 490 * in use by faulting map. We 491 * should do a pmap_remove on 492 * that mapping, but we know 493 * that pmap_enter will remove 494 * the old mapping before 495 * inserting the new one. 496 */ 497 vm_page_activate(m); 498 } 499 vm_page_unlock_queues(); 500 501 /* 502 * We no longer need the old page or object. 503 */ 504 PAGE_WAKEUP(m); 505 object->paging_in_progress--; 506 vm_object_unlock(object); 507 508 /* 509 * Only use the new page below... 510 */ 511 512 vm_stat.cow_faults++; 513 m = first_m; 514 object = first_object; 515 offset = first_offset; 516 517 /* 518 * Now that we've gotten the copy out of the 519 * way, let's try to collapse the top object. 520 */ 521 vm_object_lock(object); 522 /* 523 * But we have to play ugly games with 524 * paging_in_progress to do that... 525 */ 526 object->paging_in_progress--; 527 vm_object_collapse(object); 528 object->paging_in_progress++; 529 } 530 else { 531 prot &= (~VM_PROT_WRITE); 532 m->copy_on_write = TRUE; 533 } 534 } 535 536 if (m->active || m->inactive) 537 panic("vm_fault: active or inactive before copy object handling"); 538 539 /* 540 * If the page is being written, but hasn't been 541 * copied to the copy-object, we have to copy it there. 542 */ 543 RetryCopy: 544 if (first_object->copy != VM_OBJECT_NULL) { 545 vm_object_t copy_object = first_object->copy; 546 vm_offset_t copy_offset; 547 vm_page_t copy_m; 548 549 /* 550 * We only need to copy if we want to write it. 551 */ 552 if ((fault_type & VM_PROT_WRITE) == 0) { 553 prot &= ~VM_PROT_WRITE; 554 m->copy_on_write = TRUE; 555 } 556 else { 557 /* 558 * Try to get the lock on the copy_object. 559 */ 560 if (!vm_object_lock_try(copy_object)) { 561 vm_object_unlock(object); 562 /* should spin a bit here... */ 563 vm_object_lock(object); 564 goto RetryCopy; 565 } 566 567 /* 568 * Make another reference to the copy-object, 569 * to keep it from disappearing during the 570 * copy. 571 */ 572 copy_object->ref_count++; 573 574 /* 575 * Does the page exist in the copy? 576 */ 577 copy_offset = first_offset 578 - copy_object->shadow_offset; 579 copy_m = vm_page_lookup(copy_object, copy_offset); 580 if (page_exists = (copy_m != VM_PAGE_NULL)) { 581 if (copy_m->busy) { 582 #ifdef DOTHREADS 583 int wait_result; 584 585 /* 586 * If the page is being brought 587 * in, wait for it and then retry. 588 */ 589 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 590 RELEASE_PAGE(m); 591 copy_object->ref_count--; 592 vm_object_unlock(copy_object); 593 UNLOCK_THINGS; 594 thread_block(); 595 wait_result = current_thread()->wait_result; 596 vm_object_deallocate(first_object); 597 if (wait_result != THREAD_AWAKENED) 598 return(KERN_SUCCESS); 599 goto RetryFault; 600 #else 601 /* 602 * If the page is being brought 603 * in, wait for it and then retry. 604 */ 605 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 606 RELEASE_PAGE(m); 607 copy_object->ref_count--; 608 vm_object_unlock(copy_object); 609 UNLOCK_THINGS; 610 thread_block(); 611 vm_object_deallocate(first_object); 612 goto RetryFault; 613 #endif 614 } 615 } 616 617 /* 618 * If the page is not in memory (in the object) 619 * and the object has a pager, we have to check 620 * if the pager has the data in secondary 621 * storage. 622 */ 623 if (!page_exists) { 624 625 /* 626 * If we don't allocate a (blank) page 627 * here... another thread could try 628 * to page it in, allocate a page, and 629 * then block on the busy page in its 630 * shadow (first_object). Then we'd 631 * trip over the busy page after we 632 * found that the copy_object's pager 633 * doesn't have the page... 634 */ 635 copy_m = vm_page_alloc(copy_object, 636 copy_offset); 637 if (copy_m == VM_PAGE_NULL) { 638 /* 639 * Wait for a page, then retry. 640 */ 641 RELEASE_PAGE(m); 642 copy_object->ref_count--; 643 vm_object_unlock(copy_object); 644 UNLOCK_AND_DEALLOCATE; 645 VM_WAIT; 646 goto RetryFault; 647 } 648 649 if (copy_object->pager != vm_pager_null) { 650 vm_object_unlock(object); 651 vm_object_unlock(copy_object); 652 UNLOCK_MAP; 653 654 page_exists = vm_pager_has_page( 655 copy_object->pager, 656 (copy_offset + copy_object->paging_offset)); 657 658 vm_object_lock(copy_object); 659 660 /* 661 * Since the map is unlocked, someone 662 * else could have copied this object 663 * and put a different copy_object 664 * between the two. Or, the last 665 * reference to the copy-object (other 666 * than the one we have) may have 667 * disappeared - if that has happened, 668 * we don't need to make the copy. 669 */ 670 if (copy_object->shadow != object || 671 copy_object->ref_count == 1) { 672 /* 673 * Gaah... start over! 674 */ 675 FREE_PAGE(copy_m); 676 vm_object_unlock(copy_object); 677 vm_object_deallocate(copy_object); 678 /* may block */ 679 vm_object_lock(object); 680 goto RetryCopy; 681 } 682 vm_object_lock(object); 683 684 if (page_exists) { 685 /* 686 * We didn't need the page 687 */ 688 FREE_PAGE(copy_m); 689 } 690 } 691 } 692 if (!page_exists) { 693 /* 694 * Must copy page into copy-object. 695 */ 696 vm_page_copy(m, copy_m); 697 copy_m->fake = FALSE; 698 copy_m->absent = FALSE; 699 700 /* 701 * Things to remember: 702 * 1. The copied page must be marked 'dirty' 703 * so it will be paged out to the copy 704 * object. 705 * 2. If the old page was in use by any users 706 * of the copy-object, it must be removed 707 * from all pmaps. (We can't know which 708 * pmaps use it.) 709 */ 710 vm_page_lock_queues(); 711 pmap_remove_all(VM_PAGE_TO_PHYS(old_m)); 712 copy_m->clean = FALSE; 713 vm_page_activate(copy_m); /* XXX */ 714 vm_page_unlock_queues(); 715 716 PAGE_WAKEUP(copy_m); 717 } 718 /* 719 * The reference count on copy_object must be 720 * at least 2: one for our extra reference, 721 * and at least one from the outside world 722 * (we checked that when we last locked 723 * copy_object). 724 */ 725 copy_object->ref_count--; 726 vm_object_unlock(copy_object); 727 m->copy_on_write = FALSE; 728 } 729 } 730 731 if (m->active || m->inactive) 732 panic("vm_fault: active or inactive before retrying lookup"); 733 734 /* 735 * We must verify that the maps have not changed 736 * since our last lookup. 737 */ 738 739 if (!lookup_still_valid) { 740 vm_object_t retry_object; 741 vm_offset_t retry_offset; 742 vm_prot_t retry_prot; 743 744 /* 745 * Since map entries may be pageable, make sure we can 746 * take a page fault on them. 747 */ 748 vm_object_unlock(object); 749 750 /* 751 * To avoid trying to write_lock the map while another 752 * thread has it read_locked (in vm_map_pageable), we 753 * do not try for write permission. If the page is 754 * still writable, we will get write permission. If it 755 * is not, or has been marked needs_copy, we enter the 756 * mapping without write permission, and will merely 757 * take another fault. 758 */ 759 result = vm_map_lookup(&map, vaddr, 760 fault_type & ~VM_PROT_WRITE, &entry, 761 &retry_object, &retry_offset, &retry_prot, 762 &wired, &su); 763 764 vm_object_lock(object); 765 766 /* 767 * If we don't need the page any longer, put it on the 768 * active list (the easiest thing to do here). If no 769 * one needs it, pageout will grab it eventually. 770 */ 771 772 if (result != KERN_SUCCESS) { 773 RELEASE_PAGE(m); 774 UNLOCK_AND_DEALLOCATE; 775 return(result); 776 } 777 778 lookup_still_valid = TRUE; 779 780 if ((retry_object != first_object) || 781 (retry_offset != first_offset)) { 782 RELEASE_PAGE(m); 783 UNLOCK_AND_DEALLOCATE; 784 goto RetryFault; 785 } 786 787 /* 788 * Check whether the protection has changed or the object 789 * has been copied while we left the map unlocked. 790 * Changing from read to write permission is OK - we leave 791 * the page write-protected, and catch the write fault. 792 * Changing from write to read permission means that we 793 * can't mark the page write-enabled after all. 794 */ 795 prot &= retry_prot; 796 if (m->copy_on_write) 797 prot &= ~VM_PROT_WRITE; 798 } 799 800 /* 801 * (the various bits we're fiddling with here are locked by 802 * the object's lock) 803 */ 804 805 /* XXX This distorts the meaning of the copy_on_write bit */ 806 807 if (prot & VM_PROT_WRITE) 808 m->copy_on_write = FALSE; 809 810 /* 811 * It's critically important that a wired-down page be faulted 812 * only once in each map for which it is wired. 813 */ 814 815 if (m->active || m->inactive) 816 panic("vm_fault: active or inactive before pmap_enter"); 817 818 vm_object_unlock(object); 819 820 /* 821 * Put this page into the physical map. 822 * We had to do the unlock above because pmap_enter 823 * may cause other faults. We don't put the 824 * page back on the active queue until later so 825 * that the page-out daemon won't find us (yet). 826 */ 827 828 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), 829 prot & ~(m->page_lock), wired); 830 831 /* 832 * If the page is not wired down, then put it where the 833 * pageout daemon can find it. 834 */ 835 vm_object_lock(object); 836 vm_page_lock_queues(); 837 if (change_wiring) { 838 if (wired) 839 vm_page_wire(m); 840 else 841 vm_page_unwire(m); 842 } 843 else 844 vm_page_activate(m); 845 vm_page_unlock_queues(); 846 847 /* 848 * Unlock everything, and return 849 */ 850 851 PAGE_WAKEUP(m); 852 UNLOCK_AND_DEALLOCATE; 853 854 return(KERN_SUCCESS); 855 856 } 857 858 /* 859 * vm_fault_wire: 860 * 861 * Wire down a range of virtual addresses in a map. 862 */ 863 void vm_fault_wire(map, start, end) 864 vm_map_t map; 865 vm_offset_t start, end; 866 { 867 868 register vm_offset_t va; 869 register pmap_t pmap; 870 871 pmap = vm_map_pmap(map); 872 873 /* 874 * Inform the physical mapping system that the 875 * range of addresses may not fault, so that 876 * page tables and such can be locked down as well. 877 */ 878 879 pmap_pageable(pmap, start, end, FALSE); 880 881 /* 882 * We simulate a fault to get the page and enter it 883 * in the physical map. 884 */ 885 886 for (va = start; va < end; va += PAGE_SIZE) { 887 (void) vm_fault(map, va, VM_PROT_NONE, TRUE); 888 } 889 } 890 891 892 /* 893 * vm_fault_unwire: 894 * 895 * Unwire a range of virtual addresses in a map. 896 */ 897 void vm_fault_unwire(map, start, end) 898 vm_map_t map; 899 vm_offset_t start, end; 900 { 901 902 register vm_offset_t va, pa; 903 register pmap_t pmap; 904 905 pmap = vm_map_pmap(map); 906 907 /* 908 * Since the pages are wired down, we must be able to 909 * get their mappings from the physical map system. 910 */ 911 912 vm_page_lock_queues(); 913 914 for (va = start; va < end; va += PAGE_SIZE) { 915 pa = pmap_extract(pmap, va); 916 if (pa == (vm_offset_t) 0) { 917 panic("unwire: page not in pmap"); 918 } 919 pmap_change_wiring(pmap, va, FALSE); 920 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 921 } 922 vm_page_unlock_queues(); 923 924 /* 925 * Inform the physical mapping system that the range 926 * of addresses may fault, so that page tables and 927 * such may be unwired themselves. 928 */ 929 930 pmap_pageable(pmap, start, end, TRUE); 931 932 } 933 934 /* 935 * Routine: 936 * vm_fault_copy_entry 937 * Function: 938 * Copy all of the pages from a wired-down map entry to another. 939 * 940 * In/out conditions: 941 * The source and destination maps must be locked for write. 942 * The source map entry must be wired down (or be a sharing map 943 * entry corresponding to a main map entry that is wired down). 944 */ 945 946 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 947 vm_map_t dst_map; 948 vm_map_t src_map; 949 vm_map_entry_t dst_entry; 950 vm_map_entry_t src_entry; 951 { 952 953 vm_object_t dst_object; 954 vm_object_t src_object; 955 vm_offset_t dst_offset; 956 vm_offset_t src_offset; 957 vm_prot_t prot; 958 vm_offset_t vaddr; 959 vm_page_t dst_m; 960 vm_page_t src_m; 961 962 #ifdef lint 963 src_map++; 964 #endif lint 965 966 src_object = src_entry->object.vm_object; 967 src_offset = src_entry->offset; 968 969 /* 970 * Create the top-level object for the destination entry. 971 * (Doesn't actually shadow anything - we copy the pages 972 * directly.) 973 */ 974 dst_object = vm_object_allocate( 975 (vm_size_t) (dst_entry->end - dst_entry->start)); 976 977 dst_entry->object.vm_object = dst_object; 978 dst_entry->offset = 0; 979 980 prot = dst_entry->max_protection; 981 982 /* 983 * Loop through all of the pages in the entry's range, copying 984 * each one from the source object (it should be there) to the 985 * destination object. 986 */ 987 for (vaddr = dst_entry->start, dst_offset = 0; 988 vaddr < dst_entry->end; 989 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 990 991 /* 992 * Allocate a page in the destination object 993 */ 994 vm_object_lock(dst_object); 995 do { 996 dst_m = vm_page_alloc(dst_object, dst_offset); 997 if (dst_m == VM_PAGE_NULL) { 998 vm_object_unlock(dst_object); 999 VM_WAIT; 1000 vm_object_lock(dst_object); 1001 } 1002 } while (dst_m == VM_PAGE_NULL); 1003 1004 /* 1005 * Find the page in the source object, and copy it in. 1006 * (Because the source is wired down, the page will be 1007 * in memory.) 1008 */ 1009 vm_object_lock(src_object); 1010 src_m = vm_page_lookup(src_object, dst_offset + src_offset); 1011 if (src_m == VM_PAGE_NULL) 1012 panic("vm_fault_copy_wired: page missing"); 1013 1014 vm_page_copy(src_m, dst_m); 1015 1016 /* 1017 * Enter it in the pmap... 1018 */ 1019 vm_object_unlock(src_object); 1020 vm_object_unlock(dst_object); 1021 1022 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1023 prot, FALSE); 1024 1025 /* 1026 * Mark it no longer busy, and put it on the active list. 1027 */ 1028 vm_object_lock(dst_object); 1029 vm_page_lock_queues(); 1030 vm_page_activate(dst_m); 1031 vm_page_unlock_queues(); 1032 PAGE_WAKEUP(dst_m); 1033 vm_object_unlock(dst_object); 1034 } 1035 1036 } 1037