1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * %sccs.include.redist.c%
9 *
10 * @(#)vm_fault.c 8.6 (Berkeley) 05/11/95
11 *
12 *
13 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14 * All rights reserved.
15 *
16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17 *
18 * Permission to use, copy, modify and distribute this software and
19 * its documentation is hereby granted, provided that both the copyright
20 * notice and this permission notice appear in all copies of the
21 * software, derivative works or modified versions, and any portions
22 * thereof, and that both notices appear in supporting documentation.
23 *
24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27 *
28 * Carnegie Mellon requests users of this software to return to
29 *
30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
31 * School of Computer Science
32 * Carnegie Mellon University
33 * Pittsburgh PA 15213-3890
34 *
35 * any improvements or extensions that they make and grant Carnegie the
36 * rights to redistribute these changes.
37 */
38
39 /*
40 * Page fault handling module.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_pageout.h>
49
50 /*
51 * vm_fault:
52 *
53 * Handle a page fault occuring at the given address,
54 * requiring the given permissions, in the map specified.
55 * If successful, the page is inserted into the
56 * associated physical map.
57 *
58 * NOTE: the given address should be truncated to the
59 * proper page address.
60 *
61 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
62 * a standard error specifying why the fault is fatal is returned.
63 *
64 *
65 * The map in question must be referenced, and remains so.
66 * Caller may hold no locks.
67 */
68 int
vm_fault(map,vaddr,fault_type,change_wiring)69 vm_fault(map, vaddr, fault_type, change_wiring)
70 vm_map_t map;
71 vm_offset_t vaddr;
72 vm_prot_t fault_type;
73 boolean_t change_wiring;
74 {
75 vm_object_t first_object;
76 vm_offset_t first_offset;
77 vm_map_entry_t entry;
78 register vm_object_t object;
79 register vm_offset_t offset;
80 register vm_page_t m;
81 vm_page_t first_m;
82 vm_prot_t prot;
83 int result;
84 boolean_t wired;
85 boolean_t su;
86 boolean_t lookup_still_valid;
87 boolean_t page_exists;
88 vm_page_t old_m;
89 vm_object_t next_object;
90
91 cnt.v_faults++; /* needs lock XXX */
92 /*
93 * Recovery actions
94 */
95 #define FREE_PAGE(m) { \
96 PAGE_WAKEUP(m); \
97 vm_page_lock_queues(); \
98 vm_page_free(m); \
99 vm_page_unlock_queues(); \
100 }
101
102 #define RELEASE_PAGE(m) { \
103 PAGE_WAKEUP(m); \
104 vm_page_lock_queues(); \
105 vm_page_activate(m); \
106 vm_page_unlock_queues(); \
107 }
108
109 #define UNLOCK_MAP { \
110 if (lookup_still_valid) { \
111 vm_map_lookup_done(map, entry); \
112 lookup_still_valid = FALSE; \
113 } \
114 }
115
116 #define UNLOCK_THINGS { \
117 object->paging_in_progress--; \
118 vm_object_unlock(object); \
119 if (object != first_object) { \
120 vm_object_lock(first_object); \
121 FREE_PAGE(first_m); \
122 first_object->paging_in_progress--; \
123 vm_object_unlock(first_object); \
124 } \
125 UNLOCK_MAP; \
126 }
127
128 #define UNLOCK_AND_DEALLOCATE { \
129 UNLOCK_THINGS; \
130 vm_object_deallocate(first_object); \
131 }
132
133 RetryFault: ;
134
135 /*
136 * Find the backing store object and offset into
137 * it to begin the search.
138 */
139
140 if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,
141 &first_object, &first_offset,
142 &prot, &wired, &su)) != KERN_SUCCESS) {
143 return(result);
144 }
145 lookup_still_valid = TRUE;
146
147 if (wired)
148 fault_type = prot;
149
150 first_m = NULL;
151
152 /*
153 * Make a reference to this object to
154 * prevent its disposal while we are messing with
155 * it. Once we have the reference, the map is free
156 * to be diddled. Since objects reference their
157 * shadows (and copies), they will stay around as well.
158 */
159
160 vm_object_lock(first_object);
161
162 first_object->ref_count++;
163 first_object->paging_in_progress++;
164
165 /*
166 * INVARIANTS (through entire routine):
167 *
168 * 1) At all times, we must either have the object
169 * lock or a busy page in some object to prevent
170 * some other thread from trying to bring in
171 * the same page.
172 *
173 * Note that we cannot hold any locks during the
174 * pager access or when waiting for memory, so
175 * we use a busy page then.
176 *
177 * Note also that we aren't as concerned about
178 * more than one thead attempting to pager_data_unlock
179 * the same page at once, so we don't hold the page
180 * as busy then, but do record the highest unlock
181 * value so far. [Unlock requests may also be delivered
182 * out of order.]
183 *
184 * 2) Once we have a busy page, we must remove it from
185 * the pageout queues, so that the pageout daemon
186 * will not grab it away.
187 *
188 * 3) To prevent another thread from racing us down the
189 * shadow chain and entering a new page in the top
190 * object before we do, we must keep a busy page in
191 * the top object while following the shadow chain.
192 *
193 * 4) We must increment paging_in_progress on any object
194 * for which we have a busy page, to prevent
195 * vm_object_collapse from removing the busy page
196 * without our noticing.
197 */
198
199 /*
200 * Search for the page at object/offset.
201 */
202
203 object = first_object;
204 offset = first_offset;
205
206 /*
207 * See whether this page is resident
208 */
209
210 while (TRUE) {
211 m = vm_page_lookup(object, offset);
212 if (m != NULL) {
213 /*
214 * If the page is being brought in,
215 * wait for it and then retry.
216 */
217 if (m->flags & PG_BUSY) {
218 #ifdef DOTHREADS
219 int wait_result;
220
221 PAGE_ASSERT_WAIT(m, !change_wiring);
222 UNLOCK_THINGS;
223 thread_block();
224 wait_result = current_thread()->wait_result;
225 vm_object_deallocate(first_object);
226 if (wait_result != THREAD_AWAKENED)
227 return(KERN_SUCCESS);
228 goto RetryFault;
229 #else
230 PAGE_ASSERT_WAIT(m, !change_wiring);
231 UNLOCK_THINGS;
232 cnt.v_intrans++;
233 thread_block();
234 vm_object_deallocate(first_object);
235 goto RetryFault;
236 #endif
237 }
238
239 /*
240 * Remove the page from the pageout daemon's
241 * reach while we play with it.
242 */
243
244 vm_page_lock_queues();
245 if (m->flags & PG_INACTIVE) {
246 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
247 m->flags &= ~PG_INACTIVE;
248 cnt.v_inactive_count--;
249 cnt.v_reactivated++;
250 }
251
252 if (m->flags & PG_ACTIVE) {
253 TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
254 m->flags &= ~PG_ACTIVE;
255 cnt.v_active_count--;
256 }
257 vm_page_unlock_queues();
258
259 /*
260 * Mark page busy for other threads.
261 */
262 m->flags |= PG_BUSY;
263 break;
264 }
265
266 if (((object->pager != NULL) &&
267 (!change_wiring || wired))
268 || (object == first_object)) {
269
270 /*
271 * Allocate a new page for this object/offset
272 * pair.
273 */
274
275 m = vm_page_alloc(object, offset);
276
277 if (m == NULL) {
278 UNLOCK_AND_DEALLOCATE;
279 VM_WAIT;
280 goto RetryFault;
281 }
282 }
283
284 if (object->pager != NULL && (!change_wiring || wired)) {
285 int rv;
286
287 /*
288 * Now that we have a busy page, we can
289 * release the object lock.
290 */
291 vm_object_unlock(object);
292
293 /*
294 * Call the pager to retrieve the data, if any,
295 * after releasing the lock on the map.
296 */
297 UNLOCK_MAP;
298 cnt.v_pageins++;
299 rv = vm_pager_get(object->pager, m, TRUE);
300
301 /*
302 * Reaquire the object lock to preserve our
303 * invariant.
304 */
305 vm_object_lock(object);
306
307 /*
308 * Found the page.
309 * Leave it busy while we play with it.
310 */
311 if (rv == VM_PAGER_OK) {
312 /*
313 * Relookup in case pager changed page.
314 * Pager is responsible for disposition
315 * of old page if moved.
316 */
317 m = vm_page_lookup(object, offset);
318
319 cnt.v_pgpgin++;
320 m->flags &= ~PG_FAKE;
321 m->flags |= PG_CLEAN;
322 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
323 break;
324 }
325
326 /*
327 * IO error or page outside the range of the pager:
328 * cleanup and return an error.
329 */
330 if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
331 FREE_PAGE(m);
332 UNLOCK_AND_DEALLOCATE;
333 return(KERN_PROTECTION_FAILURE); /* XXX */
334 }
335 /*
336 * rv == VM_PAGER_FAIL:
337 *
338 * Page does not exist at this object/offset.
339 * Free the bogus page (waking up anyone waiting
340 * for it) and continue on to the next object.
341 *
342 * If this is the top-level object, we must
343 * leave the busy page to prevent another
344 * thread from rushing past us, and inserting
345 * the page in that object at the same time
346 * that we are.
347 */
348 if (object != first_object) {
349 FREE_PAGE(m);
350 /* note that `m' is not used after this */
351 }
352 }
353
354 /*
355 * We get here if the object has no pager (or unwiring)
356 * or the pager doesn't have the page.
357 */
358 if (object == first_object)
359 first_m = m;
360
361 /*
362 * Move on to the next object. Lock the next
363 * object before unlocking the current one.
364 */
365
366 offset += object->shadow_offset;
367 next_object = object->shadow;
368 if (next_object == NULL) {
369 /*
370 * If there's no object left, fill the page
371 * in the top object with zeros.
372 */
373 if (object != first_object) {
374 object->paging_in_progress--;
375 vm_object_unlock(object);
376
377 object = first_object;
378 offset = first_offset;
379 m = first_m;
380 vm_object_lock(object);
381 }
382 first_m = NULL;
383
384 vm_page_zero_fill(m);
385 cnt.v_zfod++;
386 m->flags &= ~PG_FAKE;
387 break;
388 }
389 else {
390 vm_object_lock(next_object);
391 if (object != first_object)
392 object->paging_in_progress--;
393 vm_object_unlock(object);
394 object = next_object;
395 object->paging_in_progress++;
396 }
397 }
398
399 if ((m->flags & (PG_ACTIVE | PG_INACTIVE | PG_BUSY)) != PG_BUSY)
400 panic("vm_fault: active, inactive or !busy after main loop");
401
402 /*
403 * PAGE HAS BEEN FOUND.
404 * [Loop invariant still holds -- the object lock
405 * is held.]
406 */
407
408 old_m = m; /* save page that would be copied */
409
410 /*
411 * If the page is being written, but isn't
412 * already owned by the top-level object,
413 * we have to copy it into a new page owned
414 * by the top-level object.
415 */
416
417 if (object != first_object) {
418 /*
419 * We only really need to copy if we
420 * want to write it.
421 */
422
423 if (fault_type & VM_PROT_WRITE) {
424
425 /*
426 * If we try to collapse first_object at this
427 * point, we may deadlock when we try to get
428 * the lock on an intermediate object (since we
429 * have the bottom object locked). We can't
430 * unlock the bottom object, because the page
431 * we found may move (by collapse) if we do.
432 *
433 * Instead, we first copy the page. Then, when
434 * we have no more use for the bottom object,
435 * we unlock it and try to collapse.
436 *
437 * Note that we copy the page even if we didn't
438 * need to... that's the breaks.
439 */
440
441 /*
442 * We already have an empty page in
443 * first_object - use it.
444 */
445
446 vm_page_copy(m, first_m);
447 first_m->flags &= ~PG_FAKE;
448
449 /*
450 * If another map is truly sharing this
451 * page with us, we have to flush all
452 * uses of the original page, since we
453 * can't distinguish those which want the
454 * original from those which need the
455 * new copy.
456 *
457 * XXX If we know that only one map has
458 * access to this page, then we could
459 * avoid the pmap_page_protect() call.
460 */
461
462 vm_page_lock_queues();
463 vm_page_activate(m);
464 vm_page_deactivate(m);
465 vm_page_unlock_queues();
466 /*
467 * XXX gag! The page protect has been moved out
468 * of the page queue lock section to avoid a deadlock
469 * in the hp300-style (recursive) pmap module.
470 * If you were on an MP, p_p_protect might result
471 * in a vm_map_pageable(..., TRUE) for the associated
472 * page table page. This would call vm_fault_unwire
473 * which would try to lock the page queues.
474 * Moving the call out is safe here because the
475 * object is still locked and that will prevent
476 * the pageout daemon from messing with this page
477 * on the inactive list. (It would move it back to
478 * the active list if it were referenced but
479 * v_p_deallocate clears the ref bit).
480 */
481 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
482
483 /*
484 * We no longer need the old page or object.
485 */
486 PAGE_WAKEUP(m);
487 object->paging_in_progress--;
488 vm_object_unlock(object);
489
490 /*
491 * Only use the new page below...
492 */
493
494 cnt.v_cow_faults++;
495 m = first_m;
496 object = first_object;
497 offset = first_offset;
498
499 /*
500 * Now that we've gotten the copy out of the
501 * way, let's try to collapse the top object.
502 */
503 vm_object_lock(object);
504 /*
505 * But we have to play ugly games with
506 * paging_in_progress to do that...
507 */
508 object->paging_in_progress--;
509 vm_object_collapse(object);
510 object->paging_in_progress++;
511 }
512 else {
513 prot &= ~VM_PROT_WRITE;
514 m->flags |= PG_COPYONWRITE;
515 }
516 }
517
518 if (m->flags & (PG_ACTIVE|PG_INACTIVE))
519 panic("vm_fault: active or inactive before copy object handling");
520
521 /*
522 * If the page is being written, but hasn't been
523 * copied to the copy-object, we have to copy it there.
524 */
525 RetryCopy:
526 if (first_object->copy != NULL) {
527 vm_object_t copy_object = first_object->copy;
528 vm_offset_t copy_offset;
529 vm_page_t copy_m;
530
531 /*
532 * We only need to copy if we want to write it.
533 */
534 if ((fault_type & VM_PROT_WRITE) == 0) {
535 prot &= ~VM_PROT_WRITE;
536 m->flags |= PG_COPYONWRITE;
537 }
538 else {
539 /*
540 * Try to get the lock on the copy_object.
541 */
542 if (!vm_object_lock_try(copy_object)) {
543 vm_object_unlock(object);
544 /* should spin a bit here... */
545 vm_object_lock(object);
546 goto RetryCopy;
547 }
548
549 /*
550 * Make another reference to the copy-object,
551 * to keep it from disappearing during the
552 * copy.
553 */
554 copy_object->ref_count++;
555
556 /*
557 * Does the page exist in the copy?
558 */
559 copy_offset = first_offset
560 - copy_object->shadow_offset;
561 copy_m = vm_page_lookup(copy_object, copy_offset);
562 if (page_exists = (copy_m != NULL)) {
563 if (copy_m->flags & PG_BUSY) {
564 #ifdef DOTHREADS
565 int wait_result;
566
567 /*
568 * If the page is being brought
569 * in, wait for it and then retry.
570 */
571 PAGE_ASSERT_WAIT(copy_m, !change_wiring);
572 RELEASE_PAGE(m);
573 copy_object->ref_count--;
574 vm_object_unlock(copy_object);
575 UNLOCK_THINGS;
576 thread_block();
577 wait_result = current_thread()->wait_result;
578 vm_object_deallocate(first_object);
579 if (wait_result != THREAD_AWAKENED)
580 return(KERN_SUCCESS);
581 goto RetryFault;
582 #else
583 /*
584 * If the page is being brought
585 * in, wait for it and then retry.
586 */
587 PAGE_ASSERT_WAIT(copy_m, !change_wiring);
588 RELEASE_PAGE(m);
589 copy_object->ref_count--;
590 vm_object_unlock(copy_object);
591 UNLOCK_THINGS;
592 thread_block();
593 vm_object_deallocate(first_object);
594 goto RetryFault;
595 #endif
596 }
597 }
598
599 /*
600 * If the page is not in memory (in the object)
601 * and the object has a pager, we have to check
602 * if the pager has the data in secondary
603 * storage.
604 */
605 if (!page_exists) {
606
607 /*
608 * If we don't allocate a (blank) page
609 * here... another thread could try
610 * to page it in, allocate a page, and
611 * then block on the busy page in its
612 * shadow (first_object). Then we'd
613 * trip over the busy page after we
614 * found that the copy_object's pager
615 * doesn't have the page...
616 */
617 copy_m = vm_page_alloc(copy_object,
618 copy_offset);
619 if (copy_m == NULL) {
620 /*
621 * Wait for a page, then retry.
622 */
623 RELEASE_PAGE(m);
624 copy_object->ref_count--;
625 vm_object_unlock(copy_object);
626 UNLOCK_AND_DEALLOCATE;
627 VM_WAIT;
628 goto RetryFault;
629 }
630
631 if (copy_object->pager != NULL) {
632 vm_object_unlock(object);
633 vm_object_unlock(copy_object);
634 UNLOCK_MAP;
635
636 page_exists = vm_pager_has_page(
637 copy_object->pager,
638 (copy_offset + copy_object->paging_offset));
639
640 vm_object_lock(copy_object);
641
642 /*
643 * Since the map is unlocked, someone
644 * else could have copied this object
645 * and put a different copy_object
646 * between the two. Or, the last
647 * reference to the copy-object (other
648 * than the one we have) may have
649 * disappeared - if that has happened,
650 * we don't need to make the copy.
651 */
652 if (copy_object->shadow != object ||
653 copy_object->ref_count == 1) {
654 /*
655 * Gaah... start over!
656 */
657 FREE_PAGE(copy_m);
658 vm_object_unlock(copy_object);
659 vm_object_deallocate(copy_object);
660 /* may block */
661 vm_object_lock(object);
662 goto RetryCopy;
663 }
664 vm_object_lock(object);
665
666 if (page_exists) {
667 /*
668 * We didn't need the page
669 */
670 FREE_PAGE(copy_m);
671 }
672 }
673 }
674 if (!page_exists) {
675 /*
676 * Must copy page into copy-object.
677 */
678 vm_page_copy(m, copy_m);
679 copy_m->flags &= ~PG_FAKE;
680
681 /*
682 * Things to remember:
683 * 1. The copied page must be marked 'dirty'
684 * so it will be paged out to the copy
685 * object.
686 * 2. If the old page was in use by any users
687 * of the copy-object, it must be removed
688 * from all pmaps. (We can't know which
689 * pmaps use it.)
690 */
691 vm_page_lock_queues();
692 pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
693 VM_PROT_NONE);
694 copy_m->flags &= ~PG_CLEAN;
695 vm_page_activate(copy_m); /* XXX */
696 vm_page_unlock_queues();
697
698 PAGE_WAKEUP(copy_m);
699 }
700 /*
701 * The reference count on copy_object must be
702 * at least 2: one for our extra reference,
703 * and at least one from the outside world
704 * (we checked that when we last locked
705 * copy_object).
706 */
707 copy_object->ref_count--;
708 vm_object_unlock(copy_object);
709 m->flags &= ~PG_COPYONWRITE;
710 }
711 }
712
713 if (m->flags & (PG_ACTIVE | PG_INACTIVE))
714 panic("vm_fault: active or inactive before retrying lookup");
715
716 /*
717 * We must verify that the maps have not changed
718 * since our last lookup.
719 */
720
721 if (!lookup_still_valid) {
722 vm_object_t retry_object;
723 vm_offset_t retry_offset;
724 vm_prot_t retry_prot;
725
726 /*
727 * Since map entries may be pageable, make sure we can
728 * take a page fault on them.
729 */
730 vm_object_unlock(object);
731
732 /*
733 * To avoid trying to write_lock the map while another
734 * thread has it read_locked (in vm_map_pageable), we
735 * do not try for write permission. If the page is
736 * still writable, we will get write permission. If it
737 * is not, or has been marked needs_copy, we enter the
738 * mapping without write permission, and will merely
739 * take another fault.
740 */
741 result = vm_map_lookup(&map, vaddr,
742 fault_type & ~VM_PROT_WRITE, &entry,
743 &retry_object, &retry_offset, &retry_prot,
744 &wired, &su);
745
746 vm_object_lock(object);
747
748 /*
749 * If we don't need the page any longer, put it on the
750 * active list (the easiest thing to do here). If no
751 * one needs it, pageout will grab it eventually.
752 */
753
754 if (result != KERN_SUCCESS) {
755 RELEASE_PAGE(m);
756 UNLOCK_AND_DEALLOCATE;
757 return(result);
758 }
759
760 lookup_still_valid = TRUE;
761
762 if ((retry_object != first_object) ||
763 (retry_offset != first_offset)) {
764 RELEASE_PAGE(m);
765 UNLOCK_AND_DEALLOCATE;
766 goto RetryFault;
767 }
768
769 /*
770 * Check whether the protection has changed or the object
771 * has been copied while we left the map unlocked.
772 * Changing from read to write permission is OK - we leave
773 * the page write-protected, and catch the write fault.
774 * Changing from write to read permission means that we
775 * can't mark the page write-enabled after all.
776 */
777 prot &= retry_prot;
778 if (m->flags & PG_COPYONWRITE)
779 prot &= ~VM_PROT_WRITE;
780 }
781
782 /*
783 * (the various bits we're fiddling with here are locked by
784 * the object's lock)
785 */
786
787 /* XXX This distorts the meaning of the copy_on_write bit */
788
789 if (prot & VM_PROT_WRITE)
790 m->flags &= ~PG_COPYONWRITE;
791
792 /*
793 * It's critically important that a wired-down page be faulted
794 * only once in each map for which it is wired.
795 */
796
797 if (m->flags & (PG_ACTIVE | PG_INACTIVE))
798 panic("vm_fault: active or inactive before pmap_enter");
799
800 vm_object_unlock(object);
801
802 /*
803 * Put this page into the physical map.
804 * We had to do the unlock above because pmap_enter
805 * may cause other faults. We don't put the
806 * page back on the active queue until later so
807 * that the page-out daemon won't find us (yet).
808 */
809
810 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
811
812 /*
813 * If the page is not wired down, then put it where the
814 * pageout daemon can find it.
815 */
816 vm_object_lock(object);
817 vm_page_lock_queues();
818 if (change_wiring) {
819 if (wired)
820 vm_page_wire(m);
821 else
822 vm_page_unwire(m);
823 }
824 else
825 vm_page_activate(m);
826 vm_page_unlock_queues();
827
828 /*
829 * Unlock everything, and return
830 */
831
832 PAGE_WAKEUP(m);
833 UNLOCK_AND_DEALLOCATE;
834
835 return(KERN_SUCCESS);
836
837 }
838
839 /*
840 * vm_fault_wire:
841 *
842 * Wire down a range of virtual addresses in a map.
843 */
844 int
vm_fault_wire(map,start,end)845 vm_fault_wire(map, start, end)
846 vm_map_t map;
847 vm_offset_t start, end;
848 {
849 register vm_offset_t va;
850 register pmap_t pmap;
851 int rv;
852
853 pmap = vm_map_pmap(map);
854
855 /*
856 * Inform the physical mapping system that the
857 * range of addresses may not fault, so that
858 * page tables and such can be locked down as well.
859 */
860
861 pmap_pageable(pmap, start, end, FALSE);
862
863 /*
864 * We simulate a fault to get the page and enter it
865 * in the physical map.
866 */
867
868 for (va = start; va < end; va += PAGE_SIZE) {
869 rv = vm_fault(map, va, VM_PROT_NONE, TRUE);
870 if (rv) {
871 if (va != start)
872 vm_fault_unwire(map, start, va);
873 return(rv);
874 }
875 }
876 return(KERN_SUCCESS);
877 }
878
879
880 /*
881 * vm_fault_unwire:
882 *
883 * Unwire a range of virtual addresses in a map.
884 */
885 void
vm_fault_unwire(map,start,end)886 vm_fault_unwire(map, start, end)
887 vm_map_t map;
888 vm_offset_t start, end;
889 {
890
891 register vm_offset_t va, pa;
892 register pmap_t pmap;
893
894 pmap = vm_map_pmap(map);
895
896 /*
897 * Since the pages are wired down, we must be able to
898 * get their mappings from the physical map system.
899 */
900
901 vm_page_lock_queues();
902
903 for (va = start; va < end; va += PAGE_SIZE) {
904 pa = pmap_extract(pmap, va);
905 if (pa == (vm_offset_t) 0) {
906 panic("unwire: page not in pmap");
907 }
908 pmap_change_wiring(pmap, va, FALSE);
909 vm_page_unwire(PHYS_TO_VM_PAGE(pa));
910 }
911 vm_page_unlock_queues();
912
913 /*
914 * Inform the physical mapping system that the range
915 * of addresses may fault, so that page tables and
916 * such may be unwired themselves.
917 */
918
919 pmap_pageable(pmap, start, end, TRUE);
920
921 }
922
923 /*
924 * Routine:
925 * vm_fault_copy_entry
926 * Function:
927 * Copy all of the pages from a wired-down map entry to another.
928 *
929 * In/out conditions:
930 * The source and destination maps must be locked for write.
931 * The source map entry must be wired down (or be a sharing map
932 * entry corresponding to a main map entry that is wired down).
933 */
934
935 void
vm_fault_copy_entry(dst_map,src_map,dst_entry,src_entry)936 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
937 vm_map_t dst_map;
938 vm_map_t src_map;
939 vm_map_entry_t dst_entry;
940 vm_map_entry_t src_entry;
941 {
942
943 vm_object_t dst_object;
944 vm_object_t src_object;
945 vm_offset_t dst_offset;
946 vm_offset_t src_offset;
947 vm_prot_t prot;
948 vm_offset_t vaddr;
949 vm_page_t dst_m;
950 vm_page_t src_m;
951
952 #ifdef lint
953 src_map++;
954 #endif
955
956 src_object = src_entry->object.vm_object;
957 src_offset = src_entry->offset;
958
959 /*
960 * Create the top-level object for the destination entry.
961 * (Doesn't actually shadow anything - we copy the pages
962 * directly.)
963 */
964 dst_object = vm_object_allocate(
965 (vm_size_t) (dst_entry->end - dst_entry->start));
966
967 dst_entry->object.vm_object = dst_object;
968 dst_entry->offset = 0;
969
970 prot = dst_entry->max_protection;
971
972 /*
973 * Loop through all of the pages in the entry's range, copying
974 * each one from the source object (it should be there) to the
975 * destination object.
976 */
977 for (vaddr = dst_entry->start, dst_offset = 0;
978 vaddr < dst_entry->end;
979 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
980
981 /*
982 * Allocate a page in the destination object
983 */
984 vm_object_lock(dst_object);
985 do {
986 dst_m = vm_page_alloc(dst_object, dst_offset);
987 if (dst_m == NULL) {
988 vm_object_unlock(dst_object);
989 VM_WAIT;
990 vm_object_lock(dst_object);
991 }
992 } while (dst_m == NULL);
993
994 /*
995 * Find the page in the source object, and copy it in.
996 * (Because the source is wired down, the page will be
997 * in memory.)
998 */
999 vm_object_lock(src_object);
1000 src_m = vm_page_lookup(src_object, dst_offset + src_offset);
1001 if (src_m == NULL)
1002 panic("vm_fault_copy_wired: page missing");
1003
1004 vm_page_copy(src_m, dst_m);
1005
1006 /*
1007 * Enter it in the pmap...
1008 */
1009 vm_object_unlock(src_object);
1010 vm_object_unlock(dst_object);
1011
1012 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
1013 prot, FALSE);
1014
1015 /*
1016 * Mark it no longer busy, and put it on the active list.
1017 */
1018 vm_object_lock(dst_object);
1019 vm_page_lock_queues();
1020 vm_page_activate(dst_m);
1021 vm_page_unlock_queues();
1022 PAGE_WAKEUP(dst_m);
1023 vm_object_unlock(dst_object);
1024 }
1025
1026 }
1027