1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)vm_fault.c 8.1 (Berkeley) 06/11/93 11 * 12 * 13 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14 * All rights reserved. 15 * 16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17 * 18 * Permission to use, copy, modify and distribute this software and 19 * its documentation is hereby granted, provided that both the copyright 20 * notice and this permission notice appear in all copies of the 21 * software, derivative works or modified versions, and any portions 22 * thereof, and that both notices appear in supporting documentation. 23 * 24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27 * 28 * Carnegie Mellon requests users of this software to return to 29 * 30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31 * School of Computer Science 32 * Carnegie Mellon University 33 * Pittsburgh PA 15213-3890 34 * 35 * any improvements or extensions that they make and grant Carnegie the 36 * rights to redistribute these changes. 37 */ 38 39 /* 40 * Page fault handling module. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_page.h> 48 #include <vm/vm_pageout.h> 49 50 /* 51 * vm_fault: 52 * 53 * Handle a page fault occuring at the given address, 54 * requiring the given permissions, in the map specified. 55 * If successful, the page is inserted into the 56 * associated physical map. 57 * 58 * NOTE: the given address should be truncated to the 59 * proper page address. 60 * 61 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 62 * a standard error specifying why the fault is fatal is returned. 63 * 64 * 65 * The map in question must be referenced, and remains so. 66 * Caller may hold no locks. 67 */ 68 int 69 vm_fault(map, vaddr, fault_type, change_wiring) 70 vm_map_t map; 71 vm_offset_t vaddr; 72 vm_prot_t fault_type; 73 boolean_t change_wiring; 74 { 75 vm_object_t first_object; 76 vm_offset_t first_offset; 77 vm_map_entry_t entry; 78 register vm_object_t object; 79 register vm_offset_t offset; 80 register vm_page_t m; 81 vm_page_t first_m; 82 vm_prot_t prot; 83 int result; 84 boolean_t wired; 85 boolean_t su; 86 boolean_t lookup_still_valid; 87 boolean_t page_exists; 88 vm_page_t old_m; 89 vm_object_t next_object; 90 91 cnt.v_vm_faults++; /* needs lock XXX */ 92 /* 93 * Recovery actions 94 */ 95 #define FREE_PAGE(m) { \ 96 PAGE_WAKEUP(m); \ 97 vm_page_lock_queues(); \ 98 vm_page_free(m); \ 99 vm_page_unlock_queues(); \ 100 } 101 102 #define RELEASE_PAGE(m) { \ 103 PAGE_WAKEUP(m); \ 104 vm_page_lock_queues(); \ 105 vm_page_activate(m); \ 106 vm_page_unlock_queues(); \ 107 } 108 109 #define UNLOCK_MAP { \ 110 if (lookup_still_valid) { \ 111 vm_map_lookup_done(map, entry); \ 112 lookup_still_valid = FALSE; \ 113 } \ 114 } 115 116 #define UNLOCK_THINGS { \ 117 object->paging_in_progress--; \ 118 vm_object_unlock(object); \ 119 if (object != first_object) { \ 120 vm_object_lock(first_object); \ 121 FREE_PAGE(first_m); \ 122 first_object->paging_in_progress--; \ 123 vm_object_unlock(first_object); \ 124 } \ 125 UNLOCK_MAP; \ 126 } 127 128 #define UNLOCK_AND_DEALLOCATE { \ 129 UNLOCK_THINGS; \ 130 vm_object_deallocate(first_object); \ 131 } 132 133 RetryFault: ; 134 135 /* 136 * Find the backing store object and offset into 137 * it to begin the search. 138 */ 139 140 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, 141 &first_object, &first_offset, 142 &prot, &wired, &su)) != KERN_SUCCESS) { 143 return(result); 144 } 145 lookup_still_valid = TRUE; 146 147 if (wired) 148 fault_type = prot; 149 150 first_m = NULL; 151 152 /* 153 * Make a reference to this object to 154 * prevent its disposal while we are messing with 155 * it. Once we have the reference, the map is free 156 * to be diddled. Since objects reference their 157 * shadows (and copies), they will stay around as well. 158 */ 159 160 vm_object_lock(first_object); 161 162 first_object->ref_count++; 163 first_object->paging_in_progress++; 164 165 /* 166 * INVARIANTS (through entire routine): 167 * 168 * 1) At all times, we must either have the object 169 * lock or a busy page in some object to prevent 170 * some other thread from trying to bring in 171 * the same page. 172 * 173 * Note that we cannot hold any locks during the 174 * pager access or when waiting for memory, so 175 * we use a busy page then. 176 * 177 * Note also that we aren't as concerned about 178 * more than one thead attempting to pager_data_unlock 179 * the same page at once, so we don't hold the page 180 * as busy then, but do record the highest unlock 181 * value so far. [Unlock requests may also be delivered 182 * out of order.] 183 * 184 * 2) Once we have a busy page, we must remove it from 185 * the pageout queues, so that the pageout daemon 186 * will not grab it away. 187 * 188 * 3) To prevent another thread from racing us down the 189 * shadow chain and entering a new page in the top 190 * object before we do, we must keep a busy page in 191 * the top object while following the shadow chain. 192 * 193 * 4) We must increment paging_in_progress on any object 194 * for which we have a busy page, to prevent 195 * vm_object_collapse from removing the busy page 196 * without our noticing. 197 */ 198 199 /* 200 * Search for the page at object/offset. 201 */ 202 203 object = first_object; 204 offset = first_offset; 205 206 /* 207 * See whether this page is resident 208 */ 209 210 while (TRUE) { 211 m = vm_page_lookup(object, offset); 212 if (m != NULL) { 213 /* 214 * If the page is being brought in, 215 * wait for it and then retry. 216 */ 217 if (m->flags & PG_BUSY) { 218 #ifdef DOTHREADS 219 int wait_result; 220 221 PAGE_ASSERT_WAIT(m, !change_wiring); 222 UNLOCK_THINGS; 223 thread_block(); 224 wait_result = current_thread()->wait_result; 225 vm_object_deallocate(first_object); 226 if (wait_result != THREAD_AWAKENED) 227 return(KERN_SUCCESS); 228 goto RetryFault; 229 #else 230 PAGE_ASSERT_WAIT(m, !change_wiring); 231 UNLOCK_THINGS; 232 thread_block(); 233 vm_object_deallocate(first_object); 234 goto RetryFault; 235 #endif 236 } 237 238 /* 239 * Remove the page from the pageout daemon's 240 * reach while we play with it. 241 */ 242 243 vm_page_lock_queues(); 244 if (m->flags & PG_INACTIVE) { 245 queue_remove(&vm_page_queue_inactive, m, 246 vm_page_t, pageq); 247 m->flags &= ~PG_INACTIVE; 248 cnt.v_inactive_count--; 249 cnt.v_reactivated++; 250 } 251 252 if (m->flags & PG_ACTIVE) { 253 queue_remove(&vm_page_queue_active, m, 254 vm_page_t, pageq); 255 m->flags &= ~PG_ACTIVE; 256 cnt.v_active_count--; 257 } 258 vm_page_unlock_queues(); 259 260 /* 261 * Mark page busy for other threads. 262 */ 263 m->flags |= PG_BUSY; 264 break; 265 } 266 267 if (((object->pager != NULL) && 268 (!change_wiring || wired)) 269 || (object == first_object)) { 270 271 /* 272 * Allocate a new page for this object/offset 273 * pair. 274 */ 275 276 m = vm_page_alloc(object, offset); 277 278 if (m == NULL) { 279 UNLOCK_AND_DEALLOCATE; 280 VM_WAIT; 281 goto RetryFault; 282 } 283 } 284 285 if (object->pager != NULL && (!change_wiring || wired)) { 286 int rv; 287 288 /* 289 * Now that we have a busy page, we can 290 * release the object lock. 291 */ 292 vm_object_unlock(object); 293 294 /* 295 * Call the pager to retrieve the data, if any, 296 * after releasing the lock on the map. 297 */ 298 UNLOCK_MAP; 299 rv = vm_pager_get(object->pager, m, TRUE); 300 301 /* 302 * Reaquire the object lock to preserve our 303 * invariant. 304 */ 305 vm_object_lock(object); 306 307 /* 308 * Found the page. 309 * Leave it busy while we play with it. 310 */ 311 if (rv == VM_PAGER_OK) { 312 /* 313 * Relookup in case pager changed page. 314 * Pager is responsible for disposition 315 * of old page if moved. 316 */ 317 m = vm_page_lookup(object, offset); 318 319 cnt.v_pageins++; 320 m->flags &= ~PG_FAKE; 321 m->flags |= PG_CLEAN; 322 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 323 break; 324 } 325 326 /* 327 * IO error or page outside the range of the pager: 328 * cleanup and return an error. 329 */ 330 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 331 FREE_PAGE(m); 332 UNLOCK_AND_DEALLOCATE; 333 return(KERN_PROTECTION_FAILURE); /* XXX */ 334 } 335 /* 336 * rv == VM_PAGER_FAIL: 337 * 338 * Page does not exist at this object/offset. 339 * Free the bogus page (waking up anyone waiting 340 * for it) and continue on to the next object. 341 * 342 * If this is the top-level object, we must 343 * leave the busy page to prevent another 344 * thread from rushing past us, and inserting 345 * the page in that object at the same time 346 * that we are. 347 */ 348 if (object != first_object) { 349 FREE_PAGE(m); 350 /* note that `m' is not used after this */ 351 } 352 } 353 354 /* 355 * We get here if the object has no pager (or unwiring) 356 * or the pager doesn't have the page. 357 */ 358 if (object == first_object) 359 first_m = m; 360 361 /* 362 * Move on to the next object. Lock the next 363 * object before unlocking the current one. 364 */ 365 366 offset += object->shadow_offset; 367 next_object = object->shadow; 368 if (next_object == NULL) { 369 /* 370 * If there's no object left, fill the page 371 * in the top object with zeros. 372 */ 373 if (object != first_object) { 374 object->paging_in_progress--; 375 vm_object_unlock(object); 376 377 object = first_object; 378 offset = first_offset; 379 m = first_m; 380 vm_object_lock(object); 381 } 382 first_m = NULL; 383 384 vm_page_zero_fill(m); 385 cnt.v_zfod++; 386 m->flags &= ~PG_FAKE; 387 break; 388 } 389 else { 390 vm_object_lock(next_object); 391 if (object != first_object) 392 object->paging_in_progress--; 393 vm_object_unlock(object); 394 object = next_object; 395 object->paging_in_progress++; 396 } 397 } 398 399 if ((m->flags & (PG_ACTIVE | PG_INACTIVE | PG_BUSY)) != PG_BUSY) 400 panic("vm_fault: active, inactive or !busy after main loop"); 401 402 /* 403 * PAGE HAS BEEN FOUND. 404 * [Loop invariant still holds -- the object lock 405 * is held.] 406 */ 407 408 old_m = m; /* save page that would be copied */ 409 410 /* 411 * If the page is being written, but isn't 412 * already owned by the top-level object, 413 * we have to copy it into a new page owned 414 * by the top-level object. 415 */ 416 417 if (object != first_object) { 418 /* 419 * We only really need to copy if we 420 * want to write it. 421 */ 422 423 if (fault_type & VM_PROT_WRITE) { 424 425 /* 426 * If we try to collapse first_object at this 427 * point, we may deadlock when we try to get 428 * the lock on an intermediate object (since we 429 * have the bottom object locked). We can't 430 * unlock the bottom object, because the page 431 * we found may move (by collapse) if we do. 432 * 433 * Instead, we first copy the page. Then, when 434 * we have no more use for the bottom object, 435 * we unlock it and try to collapse. 436 * 437 * Note that we copy the page even if we didn't 438 * need to... that's the breaks. 439 */ 440 441 /* 442 * We already have an empty page in 443 * first_object - use it. 444 */ 445 446 vm_page_copy(m, first_m); 447 first_m->flags &= ~PG_FAKE; 448 449 /* 450 * If another map is truly sharing this 451 * page with us, we have to flush all 452 * uses of the original page, since we 453 * can't distinguish those which want the 454 * original from those which need the 455 * new copy. 456 * 457 * XXX If we know that only one map has 458 * access to this page, then we could 459 * avoid the pmap_page_protect() call. 460 */ 461 462 vm_page_lock_queues(); 463 vm_page_activate(m); 464 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 465 vm_page_unlock_queues(); 466 467 /* 468 * We no longer need the old page or object. 469 */ 470 PAGE_WAKEUP(m); 471 object->paging_in_progress--; 472 vm_object_unlock(object); 473 474 /* 475 * Only use the new page below... 476 */ 477 478 cnt.v_cow_faults++; 479 m = first_m; 480 object = first_object; 481 offset = first_offset; 482 483 /* 484 * Now that we've gotten the copy out of the 485 * way, let's try to collapse the top object. 486 */ 487 vm_object_lock(object); 488 /* 489 * But we have to play ugly games with 490 * paging_in_progress to do that... 491 */ 492 object->paging_in_progress--; 493 vm_object_collapse(object); 494 object->paging_in_progress++; 495 } 496 else { 497 prot &= (~VM_PROT_WRITE); 498 m->flags |= PG_COPYONWRITE; 499 } 500 } 501 502 if (m->flags & (PG_ACTIVE|PG_INACTIVE)) 503 panic("vm_fault: active or inactive before copy object handling"); 504 505 /* 506 * If the page is being written, but hasn't been 507 * copied to the copy-object, we have to copy it there. 508 */ 509 RetryCopy: 510 if (first_object->copy != NULL) { 511 vm_object_t copy_object = first_object->copy; 512 vm_offset_t copy_offset; 513 vm_page_t copy_m; 514 515 /* 516 * We only need to copy if we want to write it. 517 */ 518 if ((fault_type & VM_PROT_WRITE) == 0) { 519 prot &= ~VM_PROT_WRITE; 520 m->flags |= PG_COPYONWRITE; 521 } 522 else { 523 /* 524 * Try to get the lock on the copy_object. 525 */ 526 if (!vm_object_lock_try(copy_object)) { 527 vm_object_unlock(object); 528 /* should spin a bit here... */ 529 vm_object_lock(object); 530 goto RetryCopy; 531 } 532 533 /* 534 * Make another reference to the copy-object, 535 * to keep it from disappearing during the 536 * copy. 537 */ 538 copy_object->ref_count++; 539 540 /* 541 * Does the page exist in the copy? 542 */ 543 copy_offset = first_offset 544 - copy_object->shadow_offset; 545 copy_m = vm_page_lookup(copy_object, copy_offset); 546 if (page_exists = (copy_m != NULL)) { 547 if (copy_m->flags & PG_BUSY) { 548 #ifdef DOTHREADS 549 int wait_result; 550 551 /* 552 * If the page is being brought 553 * in, wait for it and then retry. 554 */ 555 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 556 RELEASE_PAGE(m); 557 copy_object->ref_count--; 558 vm_object_unlock(copy_object); 559 UNLOCK_THINGS; 560 thread_block(); 561 wait_result = current_thread()->wait_result; 562 vm_object_deallocate(first_object); 563 if (wait_result != THREAD_AWAKENED) 564 return(KERN_SUCCESS); 565 goto RetryFault; 566 #else 567 /* 568 * If the page is being brought 569 * in, wait for it and then retry. 570 */ 571 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 572 RELEASE_PAGE(m); 573 copy_object->ref_count--; 574 vm_object_unlock(copy_object); 575 UNLOCK_THINGS; 576 thread_block(); 577 vm_object_deallocate(first_object); 578 goto RetryFault; 579 #endif 580 } 581 } 582 583 /* 584 * If the page is not in memory (in the object) 585 * and the object has a pager, we have to check 586 * if the pager has the data in secondary 587 * storage. 588 */ 589 if (!page_exists) { 590 591 /* 592 * If we don't allocate a (blank) page 593 * here... another thread could try 594 * to page it in, allocate a page, and 595 * then block on the busy page in its 596 * shadow (first_object). Then we'd 597 * trip over the busy page after we 598 * found that the copy_object's pager 599 * doesn't have the page... 600 */ 601 copy_m = vm_page_alloc(copy_object, 602 copy_offset); 603 if (copy_m == NULL) { 604 /* 605 * Wait for a page, then retry. 606 */ 607 RELEASE_PAGE(m); 608 copy_object->ref_count--; 609 vm_object_unlock(copy_object); 610 UNLOCK_AND_DEALLOCATE; 611 VM_WAIT; 612 goto RetryFault; 613 } 614 615 if (copy_object->pager != NULL) { 616 vm_object_unlock(object); 617 vm_object_unlock(copy_object); 618 UNLOCK_MAP; 619 620 page_exists = vm_pager_has_page( 621 copy_object->pager, 622 (copy_offset + copy_object->paging_offset)); 623 624 vm_object_lock(copy_object); 625 626 /* 627 * Since the map is unlocked, someone 628 * else could have copied this object 629 * and put a different copy_object 630 * between the two. Or, the last 631 * reference to the copy-object (other 632 * than the one we have) may have 633 * disappeared - if that has happened, 634 * we don't need to make the copy. 635 */ 636 if (copy_object->shadow != object || 637 copy_object->ref_count == 1) { 638 /* 639 * Gaah... start over! 640 */ 641 FREE_PAGE(copy_m); 642 vm_object_unlock(copy_object); 643 vm_object_deallocate(copy_object); 644 /* may block */ 645 vm_object_lock(object); 646 goto RetryCopy; 647 } 648 vm_object_lock(object); 649 650 if (page_exists) { 651 /* 652 * We didn't need the page 653 */ 654 FREE_PAGE(copy_m); 655 } 656 } 657 } 658 if (!page_exists) { 659 /* 660 * Must copy page into copy-object. 661 */ 662 vm_page_copy(m, copy_m); 663 copy_m->flags &= ~PG_FAKE; 664 665 /* 666 * Things to remember: 667 * 1. The copied page must be marked 'dirty' 668 * so it will be paged out to the copy 669 * object. 670 * 2. If the old page was in use by any users 671 * of the copy-object, it must be removed 672 * from all pmaps. (We can't know which 673 * pmaps use it.) 674 */ 675 vm_page_lock_queues(); 676 pmap_page_protect(VM_PAGE_TO_PHYS(old_m), 677 VM_PROT_NONE); 678 copy_m->flags &= ~PG_CLEAN; 679 vm_page_activate(copy_m); /* XXX */ 680 vm_page_unlock_queues(); 681 682 PAGE_WAKEUP(copy_m); 683 } 684 /* 685 * The reference count on copy_object must be 686 * at least 2: one for our extra reference, 687 * and at least one from the outside world 688 * (we checked that when we last locked 689 * copy_object). 690 */ 691 copy_object->ref_count--; 692 vm_object_unlock(copy_object); 693 m->flags &= ~PG_COPYONWRITE; 694 } 695 } 696 697 if (m->flags & (PG_ACTIVE | PG_INACTIVE)) 698 panic("vm_fault: active or inactive before retrying lookup"); 699 700 /* 701 * We must verify that the maps have not changed 702 * since our last lookup. 703 */ 704 705 if (!lookup_still_valid) { 706 vm_object_t retry_object; 707 vm_offset_t retry_offset; 708 vm_prot_t retry_prot; 709 710 /* 711 * Since map entries may be pageable, make sure we can 712 * take a page fault on them. 713 */ 714 vm_object_unlock(object); 715 716 /* 717 * To avoid trying to write_lock the map while another 718 * thread has it read_locked (in vm_map_pageable), we 719 * do not try for write permission. If the page is 720 * still writable, we will get write permission. If it 721 * is not, or has been marked needs_copy, we enter the 722 * mapping without write permission, and will merely 723 * take another fault. 724 */ 725 result = vm_map_lookup(&map, vaddr, 726 fault_type & ~VM_PROT_WRITE, &entry, 727 &retry_object, &retry_offset, &retry_prot, 728 &wired, &su); 729 730 vm_object_lock(object); 731 732 /* 733 * If we don't need the page any longer, put it on the 734 * active list (the easiest thing to do here). If no 735 * one needs it, pageout will grab it eventually. 736 */ 737 738 if (result != KERN_SUCCESS) { 739 RELEASE_PAGE(m); 740 UNLOCK_AND_DEALLOCATE; 741 return(result); 742 } 743 744 lookup_still_valid = TRUE; 745 746 if ((retry_object != first_object) || 747 (retry_offset != first_offset)) { 748 RELEASE_PAGE(m); 749 UNLOCK_AND_DEALLOCATE; 750 goto RetryFault; 751 } 752 753 /* 754 * Check whether the protection has changed or the object 755 * has been copied while we left the map unlocked. 756 * Changing from read to write permission is OK - we leave 757 * the page write-protected, and catch the write fault. 758 * Changing from write to read permission means that we 759 * can't mark the page write-enabled after all. 760 */ 761 prot &= retry_prot; 762 if (m->flags & PG_COPYONWRITE) 763 prot &= ~VM_PROT_WRITE; 764 } 765 766 /* 767 * (the various bits we're fiddling with here are locked by 768 * the object's lock) 769 */ 770 771 /* XXX This distorts the meaning of the copy_on_write bit */ 772 773 if (prot & VM_PROT_WRITE) 774 m->flags &= ~PG_COPYONWRITE; 775 776 /* 777 * It's critically important that a wired-down page be faulted 778 * only once in each map for which it is wired. 779 */ 780 781 if (m->flags & (PG_ACTIVE | PG_INACTIVE)) 782 panic("vm_fault: active or inactive before pmap_enter"); 783 784 vm_object_unlock(object); 785 786 /* 787 * Put this page into the physical map. 788 * We had to do the unlock above because pmap_enter 789 * may cause other faults. We don't put the 790 * page back on the active queue until later so 791 * that the page-out daemon won't find us (yet). 792 */ 793 794 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired); 795 796 /* 797 * If the page is not wired down, then put it where the 798 * pageout daemon can find it. 799 */ 800 vm_object_lock(object); 801 vm_page_lock_queues(); 802 if (change_wiring) { 803 if (wired) 804 vm_page_wire(m); 805 else 806 vm_page_unwire(m); 807 } 808 else 809 vm_page_activate(m); 810 vm_page_unlock_queues(); 811 812 /* 813 * Unlock everything, and return 814 */ 815 816 PAGE_WAKEUP(m); 817 UNLOCK_AND_DEALLOCATE; 818 819 return(KERN_SUCCESS); 820 821 } 822 823 /* 824 * vm_fault_wire: 825 * 826 * Wire down a range of virtual addresses in a map. 827 */ 828 int 829 vm_fault_wire(map, start, end) 830 vm_map_t map; 831 vm_offset_t start, end; 832 { 833 register vm_offset_t va; 834 register pmap_t pmap; 835 int rv; 836 837 pmap = vm_map_pmap(map); 838 839 /* 840 * Inform the physical mapping system that the 841 * range of addresses may not fault, so that 842 * page tables and such can be locked down as well. 843 */ 844 845 pmap_pageable(pmap, start, end, FALSE); 846 847 /* 848 * We simulate a fault to get the page and enter it 849 * in the physical map. 850 */ 851 852 for (va = start; va < end; va += PAGE_SIZE) { 853 rv = vm_fault(map, va, VM_PROT_NONE, TRUE); 854 if (rv) { 855 if (va != start) 856 vm_fault_unwire(map, start, va); 857 return(rv); 858 } 859 } 860 return(KERN_SUCCESS); 861 } 862 863 864 /* 865 * vm_fault_unwire: 866 * 867 * Unwire a range of virtual addresses in a map. 868 */ 869 void vm_fault_unwire(map, start, end) 870 vm_map_t map; 871 vm_offset_t start, end; 872 { 873 874 register vm_offset_t va, pa; 875 register pmap_t pmap; 876 877 pmap = vm_map_pmap(map); 878 879 /* 880 * Since the pages are wired down, we must be able to 881 * get their mappings from the physical map system. 882 */ 883 884 vm_page_lock_queues(); 885 886 for (va = start; va < end; va += PAGE_SIZE) { 887 pa = pmap_extract(pmap, va); 888 if (pa == (vm_offset_t) 0) { 889 panic("unwire: page not in pmap"); 890 } 891 pmap_change_wiring(pmap, va, FALSE); 892 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 893 } 894 vm_page_unlock_queues(); 895 896 /* 897 * Inform the physical mapping system that the range 898 * of addresses may fault, so that page tables and 899 * such may be unwired themselves. 900 */ 901 902 pmap_pageable(pmap, start, end, TRUE); 903 904 } 905 906 /* 907 * Routine: 908 * vm_fault_copy_entry 909 * Function: 910 * Copy all of the pages from a wired-down map entry to another. 911 * 912 * In/out conditions: 913 * The source and destination maps must be locked for write. 914 * The source map entry must be wired down (or be a sharing map 915 * entry corresponding to a main map entry that is wired down). 916 */ 917 918 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 919 vm_map_t dst_map; 920 vm_map_t src_map; 921 vm_map_entry_t dst_entry; 922 vm_map_entry_t src_entry; 923 { 924 925 vm_object_t dst_object; 926 vm_object_t src_object; 927 vm_offset_t dst_offset; 928 vm_offset_t src_offset; 929 vm_prot_t prot; 930 vm_offset_t vaddr; 931 vm_page_t dst_m; 932 vm_page_t src_m; 933 934 #ifdef lint 935 src_map++; 936 #endif 937 938 src_object = src_entry->object.vm_object; 939 src_offset = src_entry->offset; 940 941 /* 942 * Create the top-level object for the destination entry. 943 * (Doesn't actually shadow anything - we copy the pages 944 * directly.) 945 */ 946 dst_object = vm_object_allocate( 947 (vm_size_t) (dst_entry->end - dst_entry->start)); 948 949 dst_entry->object.vm_object = dst_object; 950 dst_entry->offset = 0; 951 952 prot = dst_entry->max_protection; 953 954 /* 955 * Loop through all of the pages in the entry's range, copying 956 * each one from the source object (it should be there) to the 957 * destination object. 958 */ 959 for (vaddr = dst_entry->start, dst_offset = 0; 960 vaddr < dst_entry->end; 961 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 962 963 /* 964 * Allocate a page in the destination object 965 */ 966 vm_object_lock(dst_object); 967 do { 968 dst_m = vm_page_alloc(dst_object, dst_offset); 969 if (dst_m == NULL) { 970 vm_object_unlock(dst_object); 971 VM_WAIT; 972 vm_object_lock(dst_object); 973 } 974 } while (dst_m == NULL); 975 976 /* 977 * Find the page in the source object, and copy it in. 978 * (Because the source is wired down, the page will be 979 * in memory.) 980 */ 981 vm_object_lock(src_object); 982 src_m = vm_page_lookup(src_object, dst_offset + src_offset); 983 if (src_m == NULL) 984 panic("vm_fault_copy_wired: page missing"); 985 986 vm_page_copy(src_m, dst_m); 987 988 /* 989 * Enter it in the pmap... 990 */ 991 vm_object_unlock(src_object); 992 vm_object_unlock(dst_object); 993 994 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 995 prot, FALSE); 996 997 /* 998 * Mark it no longer busy, and put it on the active list. 999 */ 1000 vm_object_lock(dst_object); 1001 vm_page_lock_queues(); 1002 vm_page_activate(dst_m); 1003 vm_page_unlock_queues(); 1004 PAGE_WAKEUP(dst_m); 1005 vm_object_unlock(dst_object); 1006 } 1007 1008 } 1009