1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)vm_fault.c 7.15 (Berkeley) 10/01/92 11 * 12 * 13 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14 * All rights reserved. 15 * 16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17 * 18 * Permission to use, copy, modify and distribute this software and 19 * its documentation is hereby granted, provided that both the copyright 20 * notice and this permission notice appear in all copies of the 21 * software, derivative works or modified versions, and any portions 22 * thereof, and that both notices appear in supporting documentation. 23 * 24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27 * 28 * Carnegie Mellon requests users of this software to return to 29 * 30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31 * School of Computer Science 32 * Carnegie Mellon University 33 * Pittsburgh PA 15213-3890 34 * 35 * any improvements or extensions that they make and grant Carnegie the 36 * rights to redistribute these changes. 37 */ 38 39 /* 40 * Page fault handling module. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_page.h> 48 #include <vm/vm_pageout.h> 49 50 /* 51 * vm_fault: 52 * 53 * Handle a page fault occuring at the given address, 54 * requiring the given permissions, in the map specified. 55 * If successful, the page is inserted into the 56 * associated physical map. 57 * 58 * NOTE: the given address should be truncated to the 59 * proper page address. 60 * 61 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 62 * a standard error specifying why the fault is fatal is returned. 63 * 64 * 65 * The map in question must be referenced, and remains so. 66 * Caller may hold no locks. 67 */ 68 int 69 vm_fault(map, vaddr, fault_type, change_wiring) 70 vm_map_t map; 71 vm_offset_t vaddr; 72 vm_prot_t fault_type; 73 boolean_t change_wiring; 74 { 75 vm_object_t first_object; 76 vm_offset_t first_offset; 77 vm_map_entry_t entry; 78 register vm_object_t object; 79 register vm_offset_t offset; 80 register vm_page_t m; 81 vm_page_t first_m; 82 vm_prot_t prot; 83 int result; 84 boolean_t wired; 85 boolean_t su; 86 boolean_t lookup_still_valid; 87 boolean_t page_exists; 88 vm_page_t old_m; 89 vm_object_t next_object; 90 91 cnt.v_vm_faults++; /* needs lock XXX */ 92 /* 93 * Recovery actions 94 */ 95 #define FREE_PAGE(m) { \ 96 PAGE_WAKEUP(m); \ 97 vm_page_lock_queues(); \ 98 vm_page_free(m); \ 99 vm_page_unlock_queues(); \ 100 } 101 102 #define RELEASE_PAGE(m) { \ 103 PAGE_WAKEUP(m); \ 104 vm_page_lock_queues(); \ 105 vm_page_activate(m); \ 106 vm_page_unlock_queues(); \ 107 } 108 109 #define UNLOCK_MAP { \ 110 if (lookup_still_valid) { \ 111 vm_map_lookup_done(map, entry); \ 112 lookup_still_valid = FALSE; \ 113 } \ 114 } 115 116 #define UNLOCK_THINGS { \ 117 object->paging_in_progress--; \ 118 vm_object_unlock(object); \ 119 if (object != first_object) { \ 120 vm_object_lock(first_object); \ 121 FREE_PAGE(first_m); \ 122 first_object->paging_in_progress--; \ 123 vm_object_unlock(first_object); \ 124 } \ 125 UNLOCK_MAP; \ 126 } 127 128 #define UNLOCK_AND_DEALLOCATE { \ 129 UNLOCK_THINGS; \ 130 vm_object_deallocate(first_object); \ 131 } 132 133 RetryFault: ; 134 135 /* 136 * Find the backing store object and offset into 137 * it to begin the search. 138 */ 139 140 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, 141 &first_object, &first_offset, 142 &prot, &wired, &su)) != KERN_SUCCESS) { 143 return(result); 144 } 145 lookup_still_valid = TRUE; 146 147 if (wired) 148 fault_type = prot; 149 150 first_m = NULL; 151 152 /* 153 * Make a reference to this object to 154 * prevent its disposal while we are messing with 155 * it. Once we have the reference, the map is free 156 * to be diddled. Since objects reference their 157 * shadows (and copies), they will stay around as well. 158 */ 159 160 vm_object_lock(first_object); 161 162 first_object->ref_count++; 163 first_object->paging_in_progress++; 164 165 /* 166 * INVARIANTS (through entire routine): 167 * 168 * 1) At all times, we must either have the object 169 * lock or a busy page in some object to prevent 170 * some other thread from trying to bring in 171 * the same page. 172 * 173 * Note that we cannot hold any locks during the 174 * pager access or when waiting for memory, so 175 * we use a busy page then. 176 * 177 * Note also that we aren't as concerned about 178 * more than one thead attempting to pager_data_unlock 179 * the same page at once, so we don't hold the page 180 * as busy then, but do record the highest unlock 181 * value so far. [Unlock requests may also be delivered 182 * out of order.] 183 * 184 * 2) Once we have a busy page, we must remove it from 185 * the pageout queues, so that the pageout daemon 186 * will not grab it away. 187 * 188 * 3) To prevent another thread from racing us down the 189 * shadow chain and entering a new page in the top 190 * object before we do, we must keep a busy page in 191 * the top object while following the shadow chain. 192 * 193 * 4) We must increment paging_in_progress on any object 194 * for which we have a busy page, to prevent 195 * vm_object_collapse from removing the busy page 196 * without our noticing. 197 */ 198 199 /* 200 * Search for the page at object/offset. 201 */ 202 203 object = first_object; 204 offset = first_offset; 205 206 /* 207 * See whether this page is resident 208 */ 209 210 while (TRUE) { 211 m = vm_page_lookup(object, offset); 212 if (m != NULL) { 213 /* 214 * If the page is being brought in, 215 * wait for it and then retry. 216 */ 217 if (m->flags & PG_BUSY) { 218 #ifdef DOTHREADS 219 int wait_result; 220 221 PAGE_ASSERT_WAIT(m, !change_wiring); 222 UNLOCK_THINGS; 223 thread_block(); 224 wait_result = current_thread()->wait_result; 225 vm_object_deallocate(first_object); 226 if (wait_result != THREAD_AWAKENED) 227 return(KERN_SUCCESS); 228 goto RetryFault; 229 #else 230 PAGE_ASSERT_WAIT(m, !change_wiring); 231 UNLOCK_THINGS; 232 thread_block(); 233 vm_object_deallocate(first_object); 234 goto RetryFault; 235 #endif 236 } 237 238 if (m->flags & PG_ABSENT) 239 panic("vm_fault: absent"); 240 241 /* 242 * If the desired access to this page has 243 * been locked out, request that it be unlocked. 244 */ 245 246 if (fault_type & m->page_lock) { 247 #ifdef DOTHREADS 248 int wait_result; 249 250 if ((fault_type & m->unlock_request) != fault_type) 251 panic("vm_fault: pager_data_unlock"); 252 253 PAGE_ASSERT_WAIT(m, !change_wiring); 254 UNLOCK_THINGS; 255 thread_block(); 256 wait_result = current_thread()->wait_result; 257 vm_object_deallocate(first_object); 258 if (wait_result != THREAD_AWAKENED) 259 return(KERN_SUCCESS); 260 goto RetryFault; 261 #else 262 if ((fault_type & m->unlock_request) != fault_type) 263 panic("vm_fault: pager_data_unlock"); 264 265 PAGE_ASSERT_WAIT(m, !change_wiring); 266 UNLOCK_THINGS; 267 thread_block(); 268 vm_object_deallocate(first_object); 269 goto RetryFault; 270 #endif 271 } 272 273 /* 274 * Remove the page from the pageout daemon's 275 * reach while we play with it. 276 */ 277 278 vm_page_lock_queues(); 279 if (m->flags & PG_INACTIVE) { 280 queue_remove(&vm_page_queue_inactive, m, 281 vm_page_t, pageq); 282 m->flags &= ~PG_INACTIVE; 283 cnt.v_inactive_count--; 284 cnt.v_reactivated++; 285 } 286 287 if (m->flags & PG_ACTIVE) { 288 queue_remove(&vm_page_queue_active, m, 289 vm_page_t, pageq); 290 m->flags &= ~PG_ACTIVE; 291 cnt.v_active_count--; 292 } 293 vm_page_unlock_queues(); 294 295 /* 296 * Mark page busy for other threads. 297 */ 298 m->flags |= PG_BUSY; 299 m->flags &= ~PG_ABSENT; 300 break; 301 } 302 303 if (((object->pager != NULL) && 304 (!change_wiring || wired)) 305 || (object == first_object)) { 306 307 /* 308 * Allocate a new page for this object/offset 309 * pair. 310 */ 311 312 m = vm_page_alloc(object, offset); 313 314 if (m == NULL) { 315 UNLOCK_AND_DEALLOCATE; 316 VM_WAIT; 317 goto RetryFault; 318 } 319 } 320 321 if (object->pager != NULL && (!change_wiring || wired)) { 322 int rv; 323 324 /* 325 * Now that we have a busy page, we can 326 * release the object lock. 327 */ 328 vm_object_unlock(object); 329 330 /* 331 * Call the pager to retrieve the data, if any, 332 * after releasing the lock on the map. 333 */ 334 UNLOCK_MAP; 335 rv = vm_pager_get(object->pager, m, TRUE); 336 337 /* 338 * Reaquire the object lock to preserve our 339 * invariant. 340 */ 341 vm_object_lock(object); 342 343 /* 344 * Found the page. 345 * Leave it busy while we play with it. 346 */ 347 if (rv == VM_PAGER_OK) { 348 /* 349 * Relookup in case pager changed page. 350 * Pager is responsible for disposition 351 * of old page if moved. 352 */ 353 m = vm_page_lookup(object, offset); 354 355 cnt.v_pageins++; 356 m->flags &= ~PG_FAKE; 357 m->flags |= PG_CLEAN; 358 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 359 break; 360 } 361 362 /* 363 * IO error or page outside the range of the pager: 364 * cleanup and return an error. 365 */ 366 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 367 FREE_PAGE(m); 368 UNLOCK_AND_DEALLOCATE; 369 return(KERN_PROTECTION_FAILURE); /* XXX */ 370 } 371 /* 372 * rv == VM_PAGER_FAIL: 373 * 374 * Page does not exist at this object/offset. 375 * Free the bogus page (waking up anyone waiting 376 * for it) and continue on to the next object. 377 * 378 * If this is the top-level object, we must 379 * leave the busy page to prevent another 380 * thread from rushing past us, and inserting 381 * the page in that object at the same time 382 * that we are. 383 */ 384 if (object != first_object) { 385 FREE_PAGE(m); 386 /* note that `m' is not used after this */ 387 } 388 } 389 390 /* 391 * We get here if the object has no pager (or unwiring) 392 * or the pager doesn't have the page. 393 */ 394 if (object == first_object) 395 first_m = m; 396 397 /* 398 * Move on to the next object. Lock the next 399 * object before unlocking the current one. 400 */ 401 402 offset += object->shadow_offset; 403 next_object = object->shadow; 404 if (next_object == NULL) { 405 /* 406 * If there's no object left, fill the page 407 * in the top object with zeros. 408 */ 409 if (object != first_object) { 410 object->paging_in_progress--; 411 vm_object_unlock(object); 412 413 object = first_object; 414 offset = first_offset; 415 m = first_m; 416 vm_object_lock(object); 417 } 418 first_m = NULL; 419 420 vm_page_zero_fill(m); 421 cnt.v_zfod++; 422 m->flags &= ~(PG_FAKE | PG_ABSENT); 423 break; 424 } 425 else { 426 vm_object_lock(next_object); 427 if (object != first_object) 428 object->paging_in_progress--; 429 vm_object_unlock(object); 430 object = next_object; 431 object->paging_in_progress++; 432 } 433 } 434 435 if ((m->flags & (PG_ABSENT | PG_ACTIVE | PG_INACTIVE)) || 436 !(m->flags & PG_BUSY)) 437 panic("vm_fault: absent or active or inactive or not busy after main loop"); 438 439 /* 440 * PAGE HAS BEEN FOUND. 441 * [Loop invariant still holds -- the object lock 442 * is held.] 443 */ 444 445 old_m = m; /* save page that would be copied */ 446 447 /* 448 * If the page is being written, but isn't 449 * already owned by the top-level object, 450 * we have to copy it into a new page owned 451 * by the top-level object. 452 */ 453 454 if (object != first_object) { 455 /* 456 * We only really need to copy if we 457 * want to write it. 458 */ 459 460 if (fault_type & VM_PROT_WRITE) { 461 462 /* 463 * If we try to collapse first_object at this 464 * point, we may deadlock when we try to get 465 * the lock on an intermediate object (since we 466 * have the bottom object locked). We can't 467 * unlock the bottom object, because the page 468 * we found may move (by collapse) if we do. 469 * 470 * Instead, we first copy the page. Then, when 471 * we have no more use for the bottom object, 472 * we unlock it and try to collapse. 473 * 474 * Note that we copy the page even if we didn't 475 * need to... that's the breaks. 476 */ 477 478 /* 479 * We already have an empty page in 480 * first_object - use it. 481 */ 482 483 vm_page_copy(m, first_m); 484 first_m->flags &= ~(PG_FAKE | PG_ABSENT); 485 486 /* 487 * If another map is truly sharing this 488 * page with us, we have to flush all 489 * uses of the original page, since we 490 * can't distinguish those which want the 491 * original from those which need the 492 * new copy. 493 * 494 * XXX If we know that only one map has 495 * access to this page, then we could 496 * avoid the pmap_page_protect() call. 497 */ 498 499 vm_page_lock_queues(); 500 vm_page_activate(m); 501 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 502 vm_page_unlock_queues(); 503 504 /* 505 * We no longer need the old page or object. 506 */ 507 PAGE_WAKEUP(m); 508 object->paging_in_progress--; 509 vm_object_unlock(object); 510 511 /* 512 * Only use the new page below... 513 */ 514 515 cnt.v_cow_faults++; 516 m = first_m; 517 object = first_object; 518 offset = first_offset; 519 520 /* 521 * Now that we've gotten the copy out of the 522 * way, let's try to collapse the top object. 523 */ 524 vm_object_lock(object); 525 /* 526 * But we have to play ugly games with 527 * paging_in_progress to do that... 528 */ 529 object->paging_in_progress--; 530 vm_object_collapse(object); 531 object->paging_in_progress++; 532 } 533 else { 534 prot &= (~VM_PROT_WRITE); 535 m->flags |= PG_COPYONWRITE; 536 } 537 } 538 539 if (m->flags & (PG_ACTIVE | PG_INACTIVE)) 540 panic("vm_fault: active or inactive before copy object handling"); 541 542 /* 543 * If the page is being written, but hasn't been 544 * copied to the copy-object, we have to copy it there. 545 */ 546 RetryCopy: 547 if (first_object->copy != NULL) { 548 vm_object_t copy_object = first_object->copy; 549 vm_offset_t copy_offset; 550 vm_page_t copy_m; 551 552 /* 553 * We only need to copy if we want to write it. 554 */ 555 if ((fault_type & VM_PROT_WRITE) == 0) { 556 prot &= ~VM_PROT_WRITE; 557 m->flags |= PG_COPYONWRITE; 558 } 559 else { 560 /* 561 * Try to get the lock on the copy_object. 562 */ 563 if (!vm_object_lock_try(copy_object)) { 564 vm_object_unlock(object); 565 /* should spin a bit here... */ 566 vm_object_lock(object); 567 goto RetryCopy; 568 } 569 570 /* 571 * Make another reference to the copy-object, 572 * to keep it from disappearing during the 573 * copy. 574 */ 575 copy_object->ref_count++; 576 577 /* 578 * Does the page exist in the copy? 579 */ 580 copy_offset = first_offset 581 - copy_object->shadow_offset; 582 copy_m = vm_page_lookup(copy_object, copy_offset); 583 if (page_exists = (copy_m != NULL)) { 584 if (copy_m->flags & PG_BUSY) { 585 #ifdef DOTHREADS 586 int wait_result; 587 588 /* 589 * If the page is being brought 590 * in, wait for it and then retry. 591 */ 592 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 593 RELEASE_PAGE(m); 594 copy_object->ref_count--; 595 vm_object_unlock(copy_object); 596 UNLOCK_THINGS; 597 thread_block(); 598 wait_result = current_thread()->wait_result; 599 vm_object_deallocate(first_object); 600 if (wait_result != THREAD_AWAKENED) 601 return(KERN_SUCCESS); 602 goto RetryFault; 603 #else 604 /* 605 * If the page is being brought 606 * in, wait for it and then retry. 607 */ 608 PAGE_ASSERT_WAIT(copy_m, !change_wiring); 609 RELEASE_PAGE(m); 610 copy_object->ref_count--; 611 vm_object_unlock(copy_object); 612 UNLOCK_THINGS; 613 thread_block(); 614 vm_object_deallocate(first_object); 615 goto RetryFault; 616 #endif 617 } 618 } 619 620 /* 621 * If the page is not in memory (in the object) 622 * and the object has a pager, we have to check 623 * if the pager has the data in secondary 624 * storage. 625 */ 626 if (!page_exists) { 627 628 /* 629 * If we don't allocate a (blank) page 630 * here... another thread could try 631 * to page it in, allocate a page, and 632 * then block on the busy page in its 633 * shadow (first_object). Then we'd 634 * trip over the busy page after we 635 * found that the copy_object's pager 636 * doesn't have the page... 637 */ 638 copy_m = vm_page_alloc(copy_object, 639 copy_offset); 640 if (copy_m == NULL) { 641 /* 642 * Wait for a page, then retry. 643 */ 644 RELEASE_PAGE(m); 645 copy_object->ref_count--; 646 vm_object_unlock(copy_object); 647 UNLOCK_AND_DEALLOCATE; 648 VM_WAIT; 649 goto RetryFault; 650 } 651 652 if (copy_object->pager != NULL) { 653 vm_object_unlock(object); 654 vm_object_unlock(copy_object); 655 UNLOCK_MAP; 656 657 page_exists = vm_pager_has_page( 658 copy_object->pager, 659 (copy_offset + copy_object->paging_offset)); 660 661 vm_object_lock(copy_object); 662 663 /* 664 * Since the map is unlocked, someone 665 * else could have copied this object 666 * and put a different copy_object 667 * between the two. Or, the last 668 * reference to the copy-object (other 669 * than the one we have) may have 670 * disappeared - if that has happened, 671 * we don't need to make the copy. 672 */ 673 if (copy_object->shadow != object || 674 copy_object->ref_count == 1) { 675 /* 676 * Gaah... start over! 677 */ 678 FREE_PAGE(copy_m); 679 vm_object_unlock(copy_object); 680 vm_object_deallocate(copy_object); 681 /* may block */ 682 vm_object_lock(object); 683 goto RetryCopy; 684 } 685 vm_object_lock(object); 686 687 if (page_exists) { 688 /* 689 * We didn't need the page 690 */ 691 FREE_PAGE(copy_m); 692 } 693 } 694 } 695 if (!page_exists) { 696 /* 697 * Must copy page into copy-object. 698 */ 699 vm_page_copy(m, copy_m); 700 copy_m->flags &= ~(PG_FAKE | PG_ABSENT); 701 702 /* 703 * Things to remember: 704 * 1. The copied page must be marked 'dirty' 705 * so it will be paged out to the copy 706 * object. 707 * 2. If the old page was in use by any users 708 * of the copy-object, it must be removed 709 * from all pmaps. (We can't know which 710 * pmaps use it.) 711 */ 712 vm_page_lock_queues(); 713 pmap_page_protect(VM_PAGE_TO_PHYS(old_m), 714 VM_PROT_NONE); 715 copy_m->flags &= ~PG_CLEAN; 716 vm_page_activate(copy_m); /* XXX */ 717 vm_page_unlock_queues(); 718 719 PAGE_WAKEUP(copy_m); 720 } 721 /* 722 * The reference count on copy_object must be 723 * at least 2: one for our extra reference, 724 * and at least one from the outside world 725 * (we checked that when we last locked 726 * copy_object). 727 */ 728 copy_object->ref_count--; 729 vm_object_unlock(copy_object); 730 m->flags &= ~PG_COPYONWRITE; 731 } 732 } 733 734 if (m->flags & (PG_ACTIVE | PG_INACTIVE)) 735 panic("vm_fault: active or inactive before retrying lookup"); 736 737 /* 738 * We must verify that the maps have not changed 739 * since our last lookup. 740 */ 741 742 if (!lookup_still_valid) { 743 vm_object_t retry_object; 744 vm_offset_t retry_offset; 745 vm_prot_t retry_prot; 746 747 /* 748 * Since map entries may be pageable, make sure we can 749 * take a page fault on them. 750 */ 751 vm_object_unlock(object); 752 753 /* 754 * To avoid trying to write_lock the map while another 755 * thread has it read_locked (in vm_map_pageable), we 756 * do not try for write permission. If the page is 757 * still writable, we will get write permission. If it 758 * is not, or has been marked needs_copy, we enter the 759 * mapping without write permission, and will merely 760 * take another fault. 761 */ 762 result = vm_map_lookup(&map, vaddr, 763 fault_type & ~VM_PROT_WRITE, &entry, 764 &retry_object, &retry_offset, &retry_prot, 765 &wired, &su); 766 767 vm_object_lock(object); 768 769 /* 770 * If we don't need the page any longer, put it on the 771 * active list (the easiest thing to do here). If no 772 * one needs it, pageout will grab it eventually. 773 */ 774 775 if (result != KERN_SUCCESS) { 776 RELEASE_PAGE(m); 777 UNLOCK_AND_DEALLOCATE; 778 return(result); 779 } 780 781 lookup_still_valid = TRUE; 782 783 if ((retry_object != first_object) || 784 (retry_offset != first_offset)) { 785 RELEASE_PAGE(m); 786 UNLOCK_AND_DEALLOCATE; 787 goto RetryFault; 788 } 789 790 /* 791 * Check whether the protection has changed or the object 792 * has been copied while we left the map unlocked. 793 * Changing from read to write permission is OK - we leave 794 * the page write-protected, and catch the write fault. 795 * Changing from write to read permission means that we 796 * can't mark the page write-enabled after all. 797 */ 798 prot &= retry_prot; 799 if (m->flags & PG_COPYONWRITE) 800 prot &= ~VM_PROT_WRITE; 801 } 802 803 /* 804 * (the various bits we're fiddling with here are locked by 805 * the object's lock) 806 */ 807 808 /* XXX This distorts the meaning of the copy_on_write bit */ 809 810 if (prot & VM_PROT_WRITE) 811 m->flags &= ~PG_COPYONWRITE; 812 813 /* 814 * It's critically important that a wired-down page be faulted 815 * only once in each map for which it is wired. 816 */ 817 818 if (m->flags & (PG_ACTIVE | PG_INACTIVE)) 819 panic("vm_fault: active or inactive before pmap_enter"); 820 821 vm_object_unlock(object); 822 823 /* 824 * Put this page into the physical map. 825 * We had to do the unlock above because pmap_enter 826 * may cause other faults. We don't put the 827 * page back on the active queue until later so 828 * that the page-out daemon won't find us (yet). 829 */ 830 831 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), 832 prot & ~(m->page_lock), wired); 833 834 /* 835 * If the page is not wired down, then put it where the 836 * pageout daemon can find it. 837 */ 838 vm_object_lock(object); 839 vm_page_lock_queues(); 840 if (change_wiring) { 841 if (wired) 842 vm_page_wire(m); 843 else 844 vm_page_unwire(m); 845 } 846 else 847 vm_page_activate(m); 848 vm_page_unlock_queues(); 849 850 /* 851 * Unlock everything, and return 852 */ 853 854 PAGE_WAKEUP(m); 855 UNLOCK_AND_DEALLOCATE; 856 857 return(KERN_SUCCESS); 858 859 } 860 861 /* 862 * vm_fault_wire: 863 * 864 * Wire down a range of virtual addresses in a map. 865 */ 866 void vm_fault_wire(map, start, end) 867 vm_map_t map; 868 vm_offset_t start, end; 869 { 870 871 register vm_offset_t va; 872 register pmap_t pmap; 873 874 pmap = vm_map_pmap(map); 875 876 /* 877 * Inform the physical mapping system that the 878 * range of addresses may not fault, so that 879 * page tables and such can be locked down as well. 880 */ 881 882 pmap_pageable(pmap, start, end, FALSE); 883 884 /* 885 * We simulate a fault to get the page and enter it 886 * in the physical map. 887 */ 888 889 for (va = start; va < end; va += PAGE_SIZE) { 890 (void) vm_fault(map, va, VM_PROT_NONE, TRUE); 891 } 892 } 893 894 895 /* 896 * vm_fault_unwire: 897 * 898 * Unwire a range of virtual addresses in a map. 899 */ 900 void vm_fault_unwire(map, start, end) 901 vm_map_t map; 902 vm_offset_t start, end; 903 { 904 905 register vm_offset_t va, pa; 906 register pmap_t pmap; 907 908 pmap = vm_map_pmap(map); 909 910 /* 911 * Since the pages are wired down, we must be able to 912 * get their mappings from the physical map system. 913 */ 914 915 vm_page_lock_queues(); 916 917 for (va = start; va < end; va += PAGE_SIZE) { 918 pa = pmap_extract(pmap, va); 919 if (pa == (vm_offset_t) 0) { 920 panic("unwire: page not in pmap"); 921 } 922 pmap_change_wiring(pmap, va, FALSE); 923 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 924 } 925 vm_page_unlock_queues(); 926 927 /* 928 * Inform the physical mapping system that the range 929 * of addresses may fault, so that page tables and 930 * such may be unwired themselves. 931 */ 932 933 pmap_pageable(pmap, start, end, TRUE); 934 935 } 936 937 /* 938 * Routine: 939 * vm_fault_copy_entry 940 * Function: 941 * Copy all of the pages from a wired-down map entry to another. 942 * 943 * In/out conditions: 944 * The source and destination maps must be locked for write. 945 * The source map entry must be wired down (or be a sharing map 946 * entry corresponding to a main map entry that is wired down). 947 */ 948 949 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 950 vm_map_t dst_map; 951 vm_map_t src_map; 952 vm_map_entry_t dst_entry; 953 vm_map_entry_t src_entry; 954 { 955 956 vm_object_t dst_object; 957 vm_object_t src_object; 958 vm_offset_t dst_offset; 959 vm_offset_t src_offset; 960 vm_prot_t prot; 961 vm_offset_t vaddr; 962 vm_page_t dst_m; 963 vm_page_t src_m; 964 965 #ifdef lint 966 src_map++; 967 #endif lint 968 969 src_object = src_entry->object.vm_object; 970 src_offset = src_entry->offset; 971 972 /* 973 * Create the top-level object for the destination entry. 974 * (Doesn't actually shadow anything - we copy the pages 975 * directly.) 976 */ 977 dst_object = vm_object_allocate( 978 (vm_size_t) (dst_entry->end - dst_entry->start)); 979 980 dst_entry->object.vm_object = dst_object; 981 dst_entry->offset = 0; 982 983 prot = dst_entry->max_protection; 984 985 /* 986 * Loop through all of the pages in the entry's range, copying 987 * each one from the source object (it should be there) to the 988 * destination object. 989 */ 990 for (vaddr = dst_entry->start, dst_offset = 0; 991 vaddr < dst_entry->end; 992 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 993 994 /* 995 * Allocate a page in the destination object 996 */ 997 vm_object_lock(dst_object); 998 do { 999 dst_m = vm_page_alloc(dst_object, dst_offset); 1000 if (dst_m == NULL) { 1001 vm_object_unlock(dst_object); 1002 VM_WAIT; 1003 vm_object_lock(dst_object); 1004 } 1005 } while (dst_m == NULL); 1006 1007 /* 1008 * Find the page in the source object, and copy it in. 1009 * (Because the source is wired down, the page will be 1010 * in memory.) 1011 */ 1012 vm_object_lock(src_object); 1013 src_m = vm_page_lookup(src_object, dst_offset + src_offset); 1014 if (src_m == NULL) 1015 panic("vm_fault_copy_wired: page missing"); 1016 1017 vm_page_copy(src_m, dst_m); 1018 1019 /* 1020 * Enter it in the pmap... 1021 */ 1022 vm_object_unlock(src_object); 1023 vm_object_unlock(dst_object); 1024 1025 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1026 prot, FALSE); 1027 1028 /* 1029 * Mark it no longer busy, and put it on the active list. 1030 */ 1031 vm_object_lock(dst_object); 1032 vm_page_lock_queues(); 1033 vm_page_activate(dst_m); 1034 vm_page_unlock_queues(); 1035 PAGE_WAKEUP(dst_m); 1036 vm_object_unlock(dst_object); 1037 } 1038 1039 } 1040