xref: /original-bsd/sys/vm/vm_fault.c (revision 95a66346)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_fault.c	7.4 (Berkeley) 03/27/91
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Page fault handling module.
41  */
42 
43 #include "param.h"
44 #include "../vm/vm_param.h"
45 #include "../vm/vm_map.h"
46 #include "../vm/vm_object.h"
47 #include "../vm/vm_page.h"
48 #include "../vm/pmap.h"
49 #include "../vm/vm_statistics.h"
50 #include "../vm/vm_pageout.h"
51 
52 /*
53  *	vm_fault:
54  *
55  *	Handle a page fault occuring at the given address,
56  *	requiring the given permissions, in the map specified.
57  *	If successful, the page is inserted into the
58  *	associated physical map.
59  *
60  *	NOTE: the given address should be truncated to the
61  *	proper page address.
62  *
63  *	KERN_SUCCESS is returned if the page fault is handled; otherwise,
64  *	a standard error specifying why the fault is fatal is returned.
65  *
66  *
67  *	The map in question must be referenced, and remains so.
68  *	Caller may hold no locks.
69  */
70 vm_fault(map, vaddr, fault_type, change_wiring)
71 	vm_map_t	map;
72 	vm_offset_t	vaddr;
73 	vm_prot_t	fault_type;
74 	boolean_t	change_wiring;
75 {
76 	vm_object_t		first_object;
77 	vm_offset_t		first_offset;
78 	vm_map_entry_t		entry;
79 	register vm_object_t	object;
80 	register vm_offset_t	offset;
81 	register vm_page_t	m;
82 	vm_page_t		first_m;
83 	vm_prot_t		prot;
84 	int			result;
85 	boolean_t		wired;
86 	boolean_t		su;
87 	boolean_t		lookup_still_valid;
88 	boolean_t		page_exists;
89 	vm_page_t		old_m;
90 	vm_object_t		next_object;
91 
92 	vm_stat.faults++;		/* needs lock XXX */
93 /*
94  *	Recovery actions
95  */
96 #define	FREE_PAGE(m)	{				\
97 	PAGE_WAKEUP(m);					\
98 	vm_page_lock_queues();				\
99 	vm_page_free(m);				\
100 	vm_page_unlock_queues();			\
101 }
102 
103 #define	RELEASE_PAGE(m)	{				\
104 	PAGE_WAKEUP(m);					\
105 	vm_page_lock_queues();				\
106 	vm_page_activate(m);				\
107 	vm_page_unlock_queues();			\
108 }
109 
110 #define	UNLOCK_MAP	{				\
111 	if (lookup_still_valid) {			\
112 		vm_map_lookup_done(map, entry);		\
113 		lookup_still_valid = FALSE;		\
114 	}						\
115 }
116 
117 #define	UNLOCK_THINGS	{				\
118 	object->paging_in_progress--;			\
119 	vm_object_unlock(object);			\
120 	if (object != first_object) {			\
121 		vm_object_lock(first_object);		\
122 		FREE_PAGE(first_m);			\
123 		first_object->paging_in_progress--;	\
124 		vm_object_unlock(first_object);		\
125 	}						\
126 	UNLOCK_MAP;					\
127 }
128 
129 #define	UNLOCK_AND_DEALLOCATE	{			\
130 	UNLOCK_THINGS;					\
131 	vm_object_deallocate(first_object);		\
132 }
133 
134     RetryFault: ;
135 
136 	/*
137 	 *	Find the backing store object and offset into
138 	 *	it to begin the search.
139 	 */
140 
141 	if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,
142 			&first_object, &first_offset,
143 			&prot, &wired, &su)) != KERN_SUCCESS) {
144 		return(result);
145 	}
146 	lookup_still_valid = TRUE;
147 
148 	if (wired)
149 		fault_type = prot;
150 
151 	first_m = VM_PAGE_NULL;
152 
153    	/*
154 	 *	Make a reference to this object to
155 	 *	prevent its disposal while we are messing with
156 	 *	it.  Once we have the reference, the map is free
157 	 *	to be diddled.  Since objects reference their
158 	 *	shadows (and copies), they will stay around as well.
159 	 */
160 
161 	vm_object_lock(first_object);
162 
163 	first_object->ref_count++;
164 	first_object->paging_in_progress++;
165 
166 	/*
167 	 *	INVARIANTS (through entire routine):
168 	 *
169 	 *	1)	At all times, we must either have the object
170 	 *		lock or a busy page in some object to prevent
171 	 *		some other thread from trying to bring in
172 	 *		the same page.
173 	 *
174 	 *		Note that we cannot hold any locks during the
175 	 *		pager access or when waiting for memory, so
176 	 *		we use a busy page then.
177 	 *
178 	 *		Note also that we aren't as concerned about
179 	 *		more than one thead attempting to pager_data_unlock
180 	 *		the same page at once, so we don't hold the page
181 	 *		as busy then, but do record the highest unlock
182 	 *		value so far.  [Unlock requests may also be delivered
183 	 *		out of order.]
184 	 *
185 	 *	2)	Once we have a busy page, we must remove it from
186 	 *		the pageout queues, so that the pageout daemon
187 	 *		will not grab it away.
188 	 *
189 	 *	3)	To prevent another thread from racing us down the
190 	 *		shadow chain and entering a new page in the top
191 	 *		object before we do, we must keep a busy page in
192 	 *		the top object while following the shadow chain.
193 	 *
194 	 *	4)	We must increment paging_in_progress on any object
195 	 *		for which we have a busy page, to prevent
196 	 *		vm_object_collapse from removing the busy page
197 	 *		without our noticing.
198 	 */
199 
200 	/*
201 	 *	Search for the page at object/offset.
202 	 */
203 
204 	object = first_object;
205 	offset = first_offset;
206 
207 	/*
208 	 *	See whether this page is resident
209 	 */
210 
211 	while (TRUE) {
212 		m = vm_page_lookup(object, offset);
213 		if (m != VM_PAGE_NULL) {
214 			/*
215 			 *	If the page is being brought in,
216 			 *	wait for it and then retry.
217 			 */
218 			if (m->busy) {
219 #ifdef DOTHREADS
220 				int	wait_result;
221 
222 				PAGE_ASSERT_WAIT(m, !change_wiring);
223 				UNLOCK_THINGS;
224 				thread_block();
225 				wait_result = current_thread()->wait_result;
226 				vm_object_deallocate(first_object);
227 				if (wait_result != THREAD_AWAKENED)
228 					return(KERN_SUCCESS);
229 				goto RetryFault;
230 #else
231 				PAGE_ASSERT_WAIT(m, !change_wiring);
232 				UNLOCK_THINGS;
233 				thread_block();
234 				vm_object_deallocate(first_object);
235 				goto RetryFault;
236 #endif
237 			}
238 
239 			if (m->absent)
240 				panic("vm_fault: absent");
241 
242 			/*
243 			 *	If the desired access to this page has
244 			 *	been locked out, request that it be unlocked.
245 			 */
246 
247 			if (fault_type & m->page_lock) {
248 #ifdef DOTHREADS
249 				int	wait_result;
250 
251 				if ((fault_type & m->unlock_request) != fault_type)
252 					panic("vm_fault: pager_data_unlock");
253 
254 				PAGE_ASSERT_WAIT(m, !change_wiring);
255 				UNLOCK_THINGS;
256 				thread_block();
257 				wait_result = current_thread()->wait_result;
258 				vm_object_deallocate(first_object);
259 				if (wait_result != THREAD_AWAKENED)
260 					return(KERN_SUCCESS);
261 				goto RetryFault;
262 #else
263 				if ((fault_type & m->unlock_request) != fault_type)
264 					panic("vm_fault: pager_data_unlock");
265 
266 				PAGE_ASSERT_WAIT(m, !change_wiring);
267 				UNLOCK_THINGS;
268 				thread_block();
269 				vm_object_deallocate(first_object);
270 				goto RetryFault;
271 #endif
272 			}
273 
274 			/*
275 			 *	Remove the page from the pageout daemon's
276 			 *	reach while we play with it.
277 			 */
278 
279 			vm_page_lock_queues();
280 			if (m->inactive) {
281 				queue_remove(&vm_page_queue_inactive, m,
282 						vm_page_t, pageq);
283 				m->inactive = FALSE;
284 				vm_page_inactive_count--;
285 				vm_stat.reactivations++;
286 			}
287 
288 			if (m->active) {
289 				queue_remove(&vm_page_queue_active, m,
290 						vm_page_t, pageq);
291 				m->active = FALSE;
292 				vm_page_active_count--;
293 			}
294 			vm_page_unlock_queues();
295 
296 			/*
297 			 *	Mark page busy for other threads.
298 			 */
299 			m->busy = TRUE;
300 			m->absent = FALSE;
301 			break;
302 		}
303 
304 		if (((object->pager != vm_pager_null) &&
305 				(!change_wiring || wired))
306 		    || (object == first_object)) {
307 
308 			/*
309 			 *	Allocate a new page for this object/offset
310 			 *	pair.
311 			 */
312 
313 			m = vm_page_alloc(object, offset);
314 
315 			if (m == VM_PAGE_NULL) {
316 				UNLOCK_AND_DEALLOCATE;
317 				VM_WAIT;
318 				goto RetryFault;
319 			}
320 		}
321 
322 		if ((object->pager != vm_pager_null) &&
323 				(!change_wiring || wired)) {
324 			int rv;
325 
326 			/*
327 			 *	Now that we have a busy page, we can
328 			 *	release the object lock.
329 			 */
330 			vm_object_unlock(object);
331 
332 			/*
333 			 *	Call the pager to retrieve the data, if any,
334 			 *	after releasing the lock on the map.
335 			 */
336 			UNLOCK_MAP;
337 
338 			rv = vm_pager_get(object->pager, m, TRUE);
339 			if (rv == VM_PAGER_OK) {
340 				/*
341 				 *	Found the page.
342 				 *	Leave it busy while we play with it.
343 				 */
344 				vm_object_lock(object);
345 
346 				/*
347 				 *	Relookup in case pager changed page.
348 				 *	Pager is responsible for disposition
349 				 *	of old page if moved.
350 				 */
351 				m = vm_page_lookup(object, offset);
352 
353 				vm_stat.pageins++;
354 				m->fake = FALSE;
355 				pmap_clear_modify(VM_PAGE_TO_PHYS(m));
356 				break;
357 			}
358 
359 			/*
360 			 *	Remove the bogus page (which does not
361 			 *	exist at this object/offset); before
362 			 *	doing so, we must get back our object
363 			 *	lock to preserve our invariant.
364 			 *
365 			 *	Also wake up any other thread that may want
366 			 *	to bring in this page.
367 			 *
368 			 *	If this is the top-level object, we must
369 			 *	leave the busy page to prevent another
370 			 *	thread from rushing past us, and inserting
371 			 *	the page in that object at the same time
372 			 *	that we are.
373 			 */
374 
375 			vm_object_lock(object);
376 			/*
377 			 * Data outside the range of the pager; an error
378 			 */
379 			if (rv == VM_PAGER_BAD) {
380 				FREE_PAGE(m);
381 				UNLOCK_AND_DEALLOCATE;
382 				return(KERN_PROTECTION_FAILURE); /* XXX */
383 			}
384 			if (object != first_object) {
385 				FREE_PAGE(m);
386 				/*
387 				 * XXX - we cannot just fall out at this
388 				 * point, m has been freed and is invalid!
389 				 */
390 			}
391 		}
392 
393 		/*
394 		 * We get here if the object has no pager (or unwiring)
395 		 * or the pager doesn't have the page.
396 		 */
397 		if (object == first_object)
398 			first_m = m;
399 
400 		/*
401 		 *	Move on to the next object.  Lock the next
402 		 *	object before unlocking the current one.
403 		 */
404 
405 		offset += object->shadow_offset;
406 		next_object = object->shadow;
407 		if (next_object == VM_OBJECT_NULL) {
408 			/*
409 			 *	If there's no object left, fill the page
410 			 *	in the top object with zeros.
411 			 */
412 			if (object != first_object) {
413 				object->paging_in_progress--;
414 				vm_object_unlock(object);
415 
416 				object = first_object;
417 				offset = first_offset;
418 				m = first_m;
419 				vm_object_lock(object);
420 			}
421 			first_m = VM_PAGE_NULL;
422 
423 			vm_page_zero_fill(m);
424 			vm_stat.zero_fill_count++;
425 			m->fake = FALSE;
426 			m->absent = FALSE;
427 			break;
428 		}
429 		else {
430 			vm_object_lock(next_object);
431 			if (object != first_object)
432 				object->paging_in_progress--;
433 			vm_object_unlock(object);
434 			object = next_object;
435 			object->paging_in_progress++;
436 		}
437 	}
438 
439 	if (m->absent || m->active || m->inactive || !m->busy)
440 		panic("vm_fault: absent or active or inactive or not busy after main loop");
441 
442 	/*
443 	 *	PAGE HAS BEEN FOUND.
444 	 *	[Loop invariant still holds -- the object lock
445 	 *	is held.]
446 	 */
447 
448 	old_m = m;	/* save page that would be copied */
449 
450 	/*
451 	 *	If the page is being written, but isn't
452 	 *	already owned by the top-level object,
453 	 *	we have to copy it into a new page owned
454 	 *	by the top-level object.
455 	 */
456 
457 	if (object != first_object) {
458 	    	/*
459 		 *	We only really need to copy if we
460 		 *	want to write it.
461 		 */
462 
463 	    	if (fault_type & VM_PROT_WRITE) {
464 
465 			/*
466 			 *	If we try to collapse first_object at this
467 			 *	point, we may deadlock when we try to get
468 			 *	the lock on an intermediate object (since we
469 			 *	have the bottom object locked).  We can't
470 			 *	unlock the bottom object, because the page
471 			 *	we found may move (by collapse) if we do.
472 			 *
473 			 *	Instead, we first copy the page.  Then, when
474 			 *	we have no more use for the bottom object,
475 			 *	we unlock it and try to collapse.
476 			 *
477 			 *	Note that we copy the page even if we didn't
478 			 *	need to... that's the breaks.
479 			 */
480 
481 		    	/*
482 			 *	We already have an empty page in
483 			 *	first_object - use it.
484 			 */
485 
486 			vm_page_copy(m, first_m);
487 			first_m->fake = FALSE;
488 			first_m->absent = FALSE;
489 
490 			/*
491 			 *	If another map is truly sharing this
492 			 *	page with us, we have to flush all
493 			 *	uses of the original page, since we
494 			 *	can't distinguish those which want the
495 			 *	original from those which need the
496 			 *	new copy.
497 			 */
498 
499 			vm_page_lock_queues();
500 			if (!su) {
501 				/*
502 				 *	Also, once it's no longer in
503 				 *	use by any maps, move it to
504 				 *	the inactive queue instead.
505 				 */
506 
507 				vm_page_deactivate(m);
508 				pmap_remove_all(VM_PAGE_TO_PHYS(m));
509 			}
510 			else {
511 				/*
512 				 *	Old page is only (possibly)
513 				 *	in use by faulting map.  We
514 				 *	should do a pmap_remove on
515 				 *	that mapping, but we know
516 				 *	that pmap_enter will remove
517 				 *	the old mapping before
518 				 *	inserting the new one.
519 				 */
520 				vm_page_activate(m);
521 			}
522 			vm_page_unlock_queues();
523 
524 			/*
525 			 *	We no longer need the old page or object.
526 			 */
527 			PAGE_WAKEUP(m);
528 			object->paging_in_progress--;
529 			vm_object_unlock(object);
530 
531 			/*
532 			 *	Only use the new page below...
533 			 */
534 
535 			vm_stat.cow_faults++;
536 			m = first_m;
537 			object = first_object;
538 			offset = first_offset;
539 
540 			/*
541 			 *	Now that we've gotten the copy out of the
542 			 *	way, let's try to collapse the top object.
543 			 */
544 			vm_object_lock(object);
545 			/*
546 			 *	But we have to play ugly games with
547 			 *	paging_in_progress to do that...
548 			 */
549 			object->paging_in_progress--;
550 			vm_object_collapse(object);
551 			object->paging_in_progress++;
552 		}
553 		else {
554 		    	prot &= (~VM_PROT_WRITE);
555 			m->copy_on_write = TRUE;
556 		}
557 	}
558 
559 	if (m->active || m->inactive)
560 		panic("vm_fault: active or inactive before copy object handling");
561 
562 	/*
563 	 *	If the page is being written, but hasn't been
564 	 *	copied to the copy-object, we have to copy it there.
565 	 */
566     RetryCopy:
567 	if (first_object->copy != VM_OBJECT_NULL) {
568 		vm_object_t		copy_object = first_object->copy;
569 		vm_offset_t		copy_offset;
570 		vm_page_t		copy_m;
571 
572 		/*
573 		 *	We only need to copy if we want to write it.
574 		 */
575 		if ((fault_type & VM_PROT_WRITE) == 0) {
576 			prot &= ~VM_PROT_WRITE;
577 			m->copy_on_write = TRUE;
578 		}
579 		else {
580 			/*
581 			 *	Try to get the lock on the copy_object.
582 			 */
583 			if (!vm_object_lock_try(copy_object)) {
584 				vm_object_unlock(object);
585 				/* should spin a bit here... */
586 				vm_object_lock(object);
587 				goto RetryCopy;
588 			}
589 
590 			/*
591 			 *	Make another reference to the copy-object,
592 			 *	to keep it from disappearing during the
593 			 *	copy.
594 			 */
595 			copy_object->ref_count++;
596 
597 			/*
598 			 *	Does the page exist in the copy?
599 			 */
600 			copy_offset = first_offset
601 				- copy_object->shadow_offset;
602 			copy_m = vm_page_lookup(copy_object, copy_offset);
603 			if (page_exists = (copy_m != VM_PAGE_NULL)) {
604 				if (copy_m->busy) {
605 #ifdef DOTHREADS
606 					int	wait_result;
607 
608 					/*
609 					 *	If the page is being brought
610 					 *	in, wait for it and then retry.
611 					 */
612 					PAGE_ASSERT_WAIT(copy_m, !change_wiring);
613 					RELEASE_PAGE(m);
614 					copy_object->ref_count--;
615 					vm_object_unlock(copy_object);
616 					UNLOCK_THINGS;
617 					thread_block();
618 					wait_result = current_thread()->wait_result;
619 					vm_object_deallocate(first_object);
620 					if (wait_result != THREAD_AWAKENED)
621 						return(KERN_SUCCESS);
622 					goto RetryFault;
623 #else
624 					/*
625 					 *	If the page is being brought
626 					 *	in, wait for it and then retry.
627 					 */
628 					PAGE_ASSERT_WAIT(copy_m, !change_wiring);
629 					RELEASE_PAGE(m);
630 					copy_object->ref_count--;
631 					vm_object_unlock(copy_object);
632 					UNLOCK_THINGS;
633 					thread_block();
634 					vm_object_deallocate(first_object);
635 					goto RetryFault;
636 #endif
637 				}
638 			}
639 
640 			/*
641 			 *	If the page is not in memory (in the object)
642 			 *	and the object has a pager, we have to check
643 			 *	if the pager has the data in secondary
644 			 *	storage.
645 			 */
646 			if (!page_exists) {
647 
648 				/*
649 				 *	If we don't allocate a (blank) page
650 				 *	here... another thread could try
651 				 *	to page it in, allocate a page, and
652 				 *	then block on the busy page in its
653 				 *	shadow (first_object).  Then we'd
654 				 *	trip over the busy page after we
655 				 *	found that the copy_object's pager
656 				 *	doesn't have the page...
657 				 */
658 				copy_m = vm_page_alloc(copy_object,
659 								copy_offset);
660 				if (copy_m == VM_PAGE_NULL) {
661 					/*
662 					 *	Wait for a page, then retry.
663 					 */
664 					RELEASE_PAGE(m);
665 					copy_object->ref_count--;
666 					vm_object_unlock(copy_object);
667 					UNLOCK_AND_DEALLOCATE;
668 					VM_WAIT;
669 					goto RetryFault;
670 				}
671 
672 			 	if (copy_object->pager != vm_pager_null) {
673 					vm_object_unlock(object);
674 					vm_object_unlock(copy_object);
675 					UNLOCK_MAP;
676 
677 					page_exists = vm_pager_has_page(
678 							copy_object->pager,
679 							(copy_offset + copy_object->paging_offset));
680 
681 					vm_object_lock(copy_object);
682 
683 					/*
684 					 * Since the map is unlocked, someone
685 					 * else could have copied this object
686 					 * and put a different copy_object
687 					 * between the two.  Or, the last
688 					 * reference to the copy-object (other
689 					 * than the one we have) may have
690 					 * disappeared - if that has happened,
691 					 * we don't need to make the copy.
692 					 */
693 					if (copy_object->shadow != object ||
694 					    copy_object->ref_count == 1) {
695 						/*
696 						 *	Gaah... start over!
697 						 */
698 						FREE_PAGE(copy_m);
699 						vm_object_unlock(copy_object);
700 						vm_object_deallocate(copy_object);
701 							/* may block */
702 						vm_object_lock(object);
703 						goto RetryCopy;
704 					}
705 					vm_object_lock(object);
706 
707 					if (page_exists) {
708 						/*
709 						 *	We didn't need the page
710 						 */
711 						FREE_PAGE(copy_m);
712 					}
713 				}
714 			}
715 			if (!page_exists) {
716 				/*
717 				 *	Must copy page into copy-object.
718 				 */
719 				vm_page_copy(m, copy_m);
720 				copy_m->fake = FALSE;
721 				copy_m->absent = FALSE;
722 
723 				/*
724 				 * Things to remember:
725 				 * 1. The copied page must be marked 'dirty'
726 				 *    so it will be paged out to the copy
727 				 *    object.
728 				 * 2. If the old page was in use by any users
729 				 *    of the copy-object, it must be removed
730 				 *    from all pmaps.  (We can't know which
731 				 *    pmaps use it.)
732 				 */
733 				vm_page_lock_queues();
734 				pmap_remove_all(VM_PAGE_TO_PHYS(old_m));
735 				copy_m->clean = FALSE;
736 				vm_page_activate(copy_m);	/* XXX */
737 				vm_page_unlock_queues();
738 
739 				PAGE_WAKEUP(copy_m);
740 			}
741 			/*
742 			 *	The reference count on copy_object must be
743 			 *	at least 2: one for our extra reference,
744 			 *	and at least one from the outside world
745 			 *	(we checked that when we last locked
746 			 *	copy_object).
747 			 */
748 			copy_object->ref_count--;
749 			vm_object_unlock(copy_object);
750 			m->copy_on_write = FALSE;
751 		}
752 	}
753 
754 	if (m->active || m->inactive)
755 		panic("vm_fault: active or inactive before retrying lookup");
756 
757 	/*
758 	 *	We must verify that the maps have not changed
759 	 *	since our last lookup.
760 	 */
761 
762 	if (!lookup_still_valid) {
763 		vm_object_t	retry_object;
764 		vm_offset_t	retry_offset;
765 		vm_prot_t	retry_prot;
766 
767 		/*
768 		 *	Since map entries may be pageable, make sure we can
769 		 *	take a page fault on them.
770 		 */
771 		vm_object_unlock(object);
772 
773 		/*
774 		 *	To avoid trying to write_lock the map while another
775 		 *	thread has it read_locked (in vm_map_pageable), we
776 		 *	do not try for write permission.  If the page is
777 		 *	still writable, we will get write permission.  If it
778 		 *	is not, or has been marked needs_copy, we enter the
779 		 *	mapping without write permission, and will merely
780 		 *	take another fault.
781 		 */
782 		result = vm_map_lookup(&map, vaddr,
783 				fault_type & ~VM_PROT_WRITE, &entry,
784 				&retry_object, &retry_offset, &retry_prot,
785 				&wired, &su);
786 
787 		vm_object_lock(object);
788 
789 		/*
790 		 *	If we don't need the page any longer, put it on the
791 		 *	active list (the easiest thing to do here).  If no
792 		 *	one needs it, pageout will grab it eventually.
793 		 */
794 
795 		if (result != KERN_SUCCESS) {
796 			RELEASE_PAGE(m);
797 			UNLOCK_AND_DEALLOCATE;
798 			return(result);
799 		}
800 
801 		lookup_still_valid = TRUE;
802 
803 		if ((retry_object != first_object) ||
804 				(retry_offset != first_offset)) {
805 			RELEASE_PAGE(m);
806 			UNLOCK_AND_DEALLOCATE;
807 			goto RetryFault;
808 		}
809 
810 		/*
811 		 *	Check whether the protection has changed or the object
812 		 *	has been copied while we left the map unlocked.
813 		 *	Changing from read to write permission is OK - we leave
814 		 *	the page write-protected, and catch the write fault.
815 		 *	Changing from write to read permission means that we
816 		 *	can't mark the page write-enabled after all.
817 		 */
818 		prot &= retry_prot;
819 		if (m->copy_on_write)
820 			prot &= ~VM_PROT_WRITE;
821 	}
822 
823 	/*
824 	 * (the various bits we're fiddling with here are locked by
825 	 * the object's lock)
826 	 */
827 
828 	/* XXX This distorts the meaning of the copy_on_write bit */
829 
830 	if (prot & VM_PROT_WRITE)
831 		m->copy_on_write = FALSE;
832 
833 	/*
834 	 *	It's critically important that a wired-down page be faulted
835 	 *	only once in each map for which it is wired.
836 	 */
837 
838 	if (m->active || m->inactive)
839 		panic("vm_fault: active or inactive before pmap_enter");
840 
841 	vm_object_unlock(object);
842 
843 	/*
844 	 *	Put this page into the physical map.
845 	 *	We had to do the unlock above because pmap_enter
846 	 *	may cause other faults.   We don't put the
847 	 *	page back on the active queue until later so
848 	 *	that the page-out daemon won't find us (yet).
849 	 */
850 
851 	pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m),
852 			prot & ~(m->page_lock), wired);
853 
854 	/*
855 	 *	If the page is not wired down, then put it where the
856 	 *	pageout daemon can find it.
857 	 */
858 	vm_object_lock(object);
859 	vm_page_lock_queues();
860 	if (change_wiring) {
861 		if (wired)
862 			vm_page_wire(m);
863 		else
864 			vm_page_unwire(m);
865 	}
866 	else
867 		vm_page_activate(m);
868 	vm_page_unlock_queues();
869 
870 	/*
871 	 *	Unlock everything, and return
872 	 */
873 
874 	PAGE_WAKEUP(m);
875 	UNLOCK_AND_DEALLOCATE;
876 
877 	return(KERN_SUCCESS);
878 
879 }
880 
881 /*
882  *	vm_fault_wire:
883  *
884  *	Wire down a range of virtual addresses in a map.
885  */
886 void vm_fault_wire(map, start, end)
887 	vm_map_t	map;
888 	vm_offset_t	start, end;
889 {
890 
891 	register vm_offset_t	va;
892 	register pmap_t		pmap;
893 
894 	pmap = vm_map_pmap(map);
895 
896 	/*
897 	 *	Inform the physical mapping system that the
898 	 *	range of addresses may not fault, so that
899 	 *	page tables and such can be locked down as well.
900 	 */
901 
902 	pmap_pageable(pmap, start, end, FALSE);
903 
904 	/*
905 	 *	We simulate a fault to get the page and enter it
906 	 *	in the physical map.
907 	 */
908 
909 	for (va = start; va < end; va += PAGE_SIZE) {
910 		(void) vm_fault(map, va, VM_PROT_NONE, TRUE);
911 	}
912 }
913 
914 
915 /*
916  *	vm_fault_unwire:
917  *
918  *	Unwire a range of virtual addresses in a map.
919  */
920 void vm_fault_unwire(map, start, end)
921 	vm_map_t	map;
922 	vm_offset_t	start, end;
923 {
924 
925 	register vm_offset_t	va, pa;
926 	register pmap_t		pmap;
927 
928 	pmap = vm_map_pmap(map);
929 
930 	/*
931 	 *	Since the pages are wired down, we must be able to
932 	 *	get their mappings from the physical map system.
933 	 */
934 
935 	vm_page_lock_queues();
936 
937 	for (va = start; va < end; va += PAGE_SIZE) {
938 		pa = pmap_extract(pmap, va);
939 		if (pa == (vm_offset_t) 0) {
940 			panic("unwire: page not in pmap");
941 		}
942 		pmap_change_wiring(pmap, va, FALSE);
943 		vm_page_unwire(PHYS_TO_VM_PAGE(pa));
944 	}
945 	vm_page_unlock_queues();
946 
947 	/*
948 	 *	Inform the physical mapping system that the range
949 	 *	of addresses may fault, so that page tables and
950 	 *	such may be unwired themselves.
951 	 */
952 
953 	pmap_pageable(pmap, start, end, TRUE);
954 
955 }
956 
957 /*
958  *	Routine:
959  *		vm_fault_copy_entry
960  *	Function:
961  *		Copy all of the pages from a wired-down map entry to another.
962  *
963  *	In/out conditions:
964  *		The source and destination maps must be locked for write.
965  *		The source map entry must be wired down (or be a sharing map
966  *		entry corresponding to a main map entry that is wired down).
967  */
968 
969 void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
970 	vm_map_t	dst_map;
971 	vm_map_t	src_map;
972 	vm_map_entry_t	dst_entry;
973 	vm_map_entry_t	src_entry;
974 {
975 
976 	vm_object_t	dst_object;
977 	vm_object_t	src_object;
978 	vm_offset_t	dst_offset;
979 	vm_offset_t	src_offset;
980 	vm_prot_t	prot;
981 	vm_offset_t	vaddr;
982 	vm_page_t	dst_m;
983 	vm_page_t	src_m;
984 
985 #ifdef	lint
986 	src_map++;
987 #endif	lint
988 
989 	src_object = src_entry->object.vm_object;
990 	src_offset = src_entry->offset;
991 
992 	/*
993 	 *	Create the top-level object for the destination entry.
994 	 *	(Doesn't actually shadow anything - we copy the pages
995 	 *	directly.)
996 	 */
997 	dst_object = vm_object_allocate(
998 			(vm_size_t) (dst_entry->end - dst_entry->start));
999 
1000 	dst_entry->object.vm_object = dst_object;
1001 	dst_entry->offset = 0;
1002 
1003 	prot  = dst_entry->max_protection;
1004 
1005 	/*
1006 	 *	Loop through all of the pages in the entry's range, copying
1007 	 *	each one from the source object (it should be there) to the
1008 	 *	destination object.
1009 	 */
1010 	for (vaddr = dst_entry->start, dst_offset = 0;
1011 	     vaddr < dst_entry->end;
1012 	     vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1013 
1014 		/*
1015 		 *	Allocate a page in the destination object
1016 		 */
1017 		vm_object_lock(dst_object);
1018 		do {
1019 			dst_m = vm_page_alloc(dst_object, dst_offset);
1020 			if (dst_m == VM_PAGE_NULL) {
1021 				vm_object_unlock(dst_object);
1022 				VM_WAIT;
1023 				vm_object_lock(dst_object);
1024 			}
1025 		} while (dst_m == VM_PAGE_NULL);
1026 
1027 		/*
1028 		 *	Find the page in the source object, and copy it in.
1029 		 *	(Because the source is wired down, the page will be
1030 		 *	in memory.)
1031 		 */
1032 		vm_object_lock(src_object);
1033 		src_m = vm_page_lookup(src_object, dst_offset + src_offset);
1034 		if (src_m == VM_PAGE_NULL)
1035 			panic("vm_fault_copy_wired: page missing");
1036 
1037 		vm_page_copy(src_m, dst_m);
1038 
1039 		/*
1040 		 *	Enter it in the pmap...
1041 		 */
1042 		vm_object_unlock(src_object);
1043 		vm_object_unlock(dst_object);
1044 
1045 		pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
1046 				prot, FALSE);
1047 
1048 		/*
1049 		 *	Mark it no longer busy, and put it on the active list.
1050 		 */
1051 		vm_object_lock(dst_object);
1052 		vm_page_lock_queues();
1053 		vm_page_activate(dst_m);
1054 		vm_page_unlock_queues();
1055 		PAGE_WAKEUP(dst_m);
1056 		vm_object_unlock(dst_object);
1057 	}
1058 
1059 }
1060