xref: /dragonfly/sys/vm/vm_fault.c (revision 768af85b)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	from: @(#)vm_fault.c	8.4 (Berkeley) 1/12/94
42  *
43  *
44  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45  * All rights reserved.
46  *
47  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  *
69  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
70  * $DragonFly: src/sys/vm/vm_fault.c,v 1.47 2008/07/01 02:02:56 dillon Exp $
71  */
72 
73 /*
74  *	Page fault handling module.
75  */
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/proc.h>
81 #include <sys/vnode.h>
82 #include <sys/resourcevar.h>
83 #include <sys/vmmeter.h>
84 #include <sys/vkernel.h>
85 #include <sys/sfbuf.h>
86 #include <sys/lock.h>
87 #include <sys/sysctl.h>
88 
89 #include <vm/vm.h>
90 #include <vm/vm_param.h>
91 #include <vm/pmap.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vnode_pager.h>
99 #include <vm/vm_extern.h>
100 
101 #include <sys/thread2.h>
102 #include <vm/vm_page2.h>
103 
104 struct faultstate {
105 	vm_page_t m;
106 	vm_object_t object;
107 	vm_pindex_t pindex;
108 	vm_prot_t prot;
109 	vm_page_t first_m;
110 	vm_object_t first_object;
111 	vm_prot_t first_prot;
112 	vm_map_t map;
113 	vm_map_entry_t entry;
114 	int lookup_still_valid;
115 	int didlimit;
116 	int hardfault;
117 	int fault_flags;
118 	int map_generation;
119 	boolean_t wired;
120 	struct vnode *vp;
121 };
122 
123 static int vm_fast_fault = 1;
124 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0, "");
125 static int debug_cluster = 0;
126 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
127 
128 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t);
129 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, vpte_t, int);
130 #if 0
131 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
132 #endif
133 static int vm_fault_ratelimit(struct vmspace *);
134 static void vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry,
135 			int prot);
136 
137 static __inline void
138 release_page(struct faultstate *fs)
139 {
140 	vm_page_deactivate(fs->m);
141 	vm_page_wakeup(fs->m);
142 	fs->m = NULL;
143 }
144 
145 static __inline void
146 unlock_map(struct faultstate *fs)
147 {
148 	if (fs->lookup_still_valid && fs->map) {
149 		vm_map_lookup_done(fs->map, fs->entry, 0);
150 		fs->lookup_still_valid = FALSE;
151 	}
152 }
153 
154 /*
155  * Clean up after a successful call to vm_fault_object() so another call
156  * to vm_fault_object() can be made.
157  */
158 static void
159 _cleanup_successful_fault(struct faultstate *fs, int relock)
160 {
161 	if (fs->object != fs->first_object) {
162 		vm_page_free(fs->first_m);
163 		vm_object_pip_wakeup(fs->object);
164 		fs->first_m = NULL;
165 	}
166 	fs->object = fs->first_object;
167 	if (relock && fs->lookup_still_valid == FALSE) {
168 		if (fs->map)
169 			vm_map_lock_read(fs->map);
170 		fs->lookup_still_valid = TRUE;
171 	}
172 }
173 
174 static void
175 _unlock_things(struct faultstate *fs, int dealloc)
176 {
177 	vm_object_pip_wakeup(fs->first_object);
178 	_cleanup_successful_fault(fs, 0);
179 	if (dealloc) {
180 		vm_object_deallocate(fs->first_object);
181 		fs->first_object = NULL;
182 	}
183 	unlock_map(fs);
184 	if (fs->vp != NULL) {
185 		vput(fs->vp);
186 		fs->vp = NULL;
187 	}
188 }
189 
190 #define unlock_things(fs) _unlock_things(fs, 0)
191 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
192 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
193 
194 /*
195  * TRYPAGER
196  *
197  * Determine if the pager for the current object *might* contain the page.
198  *
199  * We only need to try the pager if this is not a default object (default
200  * objects are zero-fill and have no real pager), and if we are not taking
201  * a wiring fault or if the FS entry is wired.
202  */
203 #define TRYPAGER(fs)	\
204 		(fs->object->type != OBJT_DEFAULT && \
205 		(((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
206 
207 /*
208  * vm_fault:
209  *
210  * Handle a page fault occuring at the given address, requiring the given
211  * permissions, in the map specified.  If successful, the page is inserted
212  * into the associated physical map.
213  *
214  * NOTE: The given address should be truncated to the proper page address.
215  *
216  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
217  * a standard error specifying why the fault is fatal is returned.
218  *
219  * The map in question must be referenced, and remains so.
220  * The caller may hold no locks.
221  */
222 int
223 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
224 {
225 	int result;
226 	vm_pindex_t first_pindex;
227 	struct faultstate fs;
228 
229 	mycpu->gd_cnt.v_vm_faults++;
230 
231 	fs.didlimit = 0;
232 	fs.hardfault = 0;
233 	fs.fault_flags = fault_flags;
234 
235 RetryFault:
236 	/*
237 	 * Find the vm_map_entry representing the backing store and resolve
238 	 * the top level object and page index.  This may have the side
239 	 * effect of executing a copy-on-write on the map entry and/or
240 	 * creating a shadow object, but will not COW any actual VM pages.
241 	 *
242 	 * On success fs.map is left read-locked and various other fields
243 	 * are initialized but not otherwise referenced or locked.
244 	 *
245 	 * NOTE!  vm_map_lookup will try to upgrade the fault_type to
246 	 * VM_FAULT_WRITE if the map entry is a virtual page table and also
247 	 * writable, so we can set the 'A'accessed bit in the virtual page
248 	 * table entry.
249 	 */
250 	fs.map = map;
251 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
252 			       &fs.entry, &fs.first_object,
253 			       &first_pindex, &fs.first_prot, &fs.wired);
254 
255 	/*
256 	 * If the lookup failed or the map protections are incompatible,
257 	 * the fault generally fails.  However, if the caller is trying
258 	 * to do a user wiring we have more work to do.
259 	 */
260 	if (result != KERN_SUCCESS) {
261 		if (result != KERN_PROTECTION_FAILURE)
262 			return result;
263 		if ((fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
264 			return result;
265 
266 		/*
267    		 * If we are user-wiring a r/w segment, and it is COW, then
268    		 * we need to do the COW operation.  Note that we don't
269 		 * currently COW RO sections now, because it is NOT desirable
270    		 * to COW .text.  We simply keep .text from ever being COW'ed
271    		 * and take the heat that one cannot debug wired .text sections.
272    		 */
273 		result = vm_map_lookup(&fs.map, vaddr,
274 				       VM_PROT_READ|VM_PROT_WRITE|
275 				        VM_PROT_OVERRIDE_WRITE,
276 				       &fs.entry, &fs.first_object,
277 				       &first_pindex, &fs.first_prot,
278 				       &fs.wired);
279 		if (result != KERN_SUCCESS)
280 			return result;
281 
282 		/*
283 		 * If we don't COW now, on a user wire, the user will never
284 		 * be able to write to the mapping.  If we don't make this
285 		 * restriction, the bookkeeping would be nearly impossible.
286 		 */
287 		if ((fs.entry->protection & VM_PROT_WRITE) == 0)
288 			fs.entry->max_protection &= ~VM_PROT_WRITE;
289 	}
290 
291 	/*
292 	 * fs.map is read-locked
293 	 *
294 	 * Misc checks.  Save the map generation number to detect races.
295 	 */
296 	fs.map_generation = fs.map->timestamp;
297 
298 	if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
299 		panic("vm_fault: fault on nofault entry, addr: %lx",
300 		    (u_long)vaddr);
301 	}
302 
303 	/*
304 	 * A system map entry may return a NULL object.  No object means
305 	 * no pager means an unrecoverable kernel fault.
306 	 */
307 	if (fs.first_object == NULL) {
308 		panic("vm_fault: unrecoverable fault at %p in entry %p",
309 			(void *)vaddr, fs.entry);
310 	}
311 
312 	/*
313 	 * Make a reference to this object to prevent its disposal while we
314 	 * are messing with it.  Once we have the reference, the map is free
315 	 * to be diddled.  Since objects reference their shadows (and copies),
316 	 * they will stay around as well.
317 	 *
318 	 * Bump the paging-in-progress count to prevent size changes (e.g.
319 	 * truncation operations) during I/O.  This must be done after
320 	 * obtaining the vnode lock in order to avoid possible deadlocks.
321 	 */
322 	vm_object_reference(fs.first_object);
323 	fs.vp = vnode_pager_lock(fs.first_object);
324 	vm_object_pip_add(fs.first_object, 1);
325 
326 	fs.lookup_still_valid = TRUE;
327 	fs.first_m = NULL;
328 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
329 
330 	/*
331 	 * If the entry is wired we cannot change the page protection.
332 	 */
333 	if (fs.wired)
334 		fault_type = fs.first_prot;
335 
336 	/*
337 	 * The page we want is at (first_object, first_pindex), but if the
338 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
339 	 * page table to figure out the actual pindex.
340 	 *
341 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
342 	 * ONLY
343 	 */
344 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
345 		result = vm_fault_vpagetable(&fs, &first_pindex,
346 					     fs.entry->aux.master_pde,
347 					     fault_type);
348 		if (result == KERN_TRY_AGAIN)
349 			goto RetryFault;
350 		if (result != KERN_SUCCESS)
351 			return (result);
352 	}
353 
354 	/*
355 	 * Now we have the actual (object, pindex), fault in the page.  If
356 	 * vm_fault_object() fails it will unlock and deallocate the FS
357 	 * data.   If it succeeds everything remains locked and fs->object
358 	 * will have an additinal PIP count if it is not equal to
359 	 * fs->first_object
360 	 *
361 	 * vm_fault_object will set fs->prot for the pmap operation.  It is
362 	 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
363 	 * page can be safely written.  However, it will force a read-only
364 	 * mapping for a read fault if the memory is managed by a virtual
365 	 * page table.
366 	 */
367 	result = vm_fault_object(&fs, first_pindex, fault_type);
368 
369 	if (result == KERN_TRY_AGAIN)
370 		goto RetryFault;
371 	if (result != KERN_SUCCESS)
372 		return (result);
373 
374 	/*
375 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
376 	 * will contain a busied page.
377 	 *
378 	 * Enter the page into the pmap and do pmap-related adjustments.
379 	 */
380 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
381 
382 	/*
383 	 * Burst in a few more pages if possible.  The fs.map should still
384 	 * be locked.
385 	 */
386 	if (fault_flags & VM_FAULT_BURST) {
387 		if ((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 &&
388 		    fs.wired == 0) {
389 			vm_prefault(fs.map->pmap, vaddr, fs.entry, fs.prot);
390 		}
391 	}
392 	unlock_things(&fs);
393 
394 	vm_page_flag_clear(fs.m, PG_ZERO);
395 	vm_page_flag_set(fs.m, PG_REFERENCED);
396 
397 	/*
398 	 * If the page is not wired down, then put it where the pageout daemon
399 	 * can find it.
400 	 */
401 	if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
402 		if (fs.wired)
403 			vm_page_wire(fs.m);
404 		else
405 			vm_page_unwire(fs.m, 1);
406 	} else {
407 		vm_page_activate(fs.m);
408 	}
409 
410 	if (curthread->td_lwp) {
411 		if (fs.hardfault) {
412 			curthread->td_lwp->lwp_ru.ru_majflt++;
413 		} else {
414 			curthread->td_lwp->lwp_ru.ru_minflt++;
415 		}
416 	}
417 
418 	/*
419 	 * Unlock everything, and return
420 	 */
421 	vm_page_wakeup(fs.m);
422 	vm_object_deallocate(fs.first_object);
423 
424 	return (KERN_SUCCESS);
425 }
426 
427 /*
428  * Fault in the specified virtual address in the current process map,
429  * returning a held VM page or NULL.  See vm_fault_page() for more
430  * information.
431  */
432 vm_page_t
433 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
434 {
435 	struct lwp *lp = curthread->td_lwp;
436 	vm_page_t m;
437 
438 	m = vm_fault_page(&lp->lwp_vmspace->vm_map, va,
439 			  fault_type, VM_FAULT_NORMAL, errorp);
440 	return(m);
441 }
442 
443 /*
444  * Fault in the specified virtual address in the specified map, doing all
445  * necessary manipulation of the object store and all necessary I/O.  Return
446  * a held VM page or NULL, and set *errorp.  The related pmap is not
447  * updated.
448  *
449  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
450  * and marked PG_REFERENCED as well.
451  *
452  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
453  * error will be returned.
454  */
455 vm_page_t
456 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
457 	      int fault_flags, int *errorp)
458 {
459 	vm_pindex_t first_pindex;
460 	struct faultstate fs;
461 	int result;
462 	vm_prot_t orig_fault_type = fault_type;
463 
464 	mycpu->gd_cnt.v_vm_faults++;
465 
466 	fs.didlimit = 0;
467 	fs.hardfault = 0;
468 	fs.fault_flags = fault_flags;
469 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
470 
471 RetryFault:
472 	/*
473 	 * Find the vm_map_entry representing the backing store and resolve
474 	 * the top level object and page index.  This may have the side
475 	 * effect of executing a copy-on-write on the map entry and/or
476 	 * creating a shadow object, but will not COW any actual VM pages.
477 	 *
478 	 * On success fs.map is left read-locked and various other fields
479 	 * are initialized but not otherwise referenced or locked.
480 	 *
481 	 * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
482 	 * if the map entry is a virtual page table and also writable,
483 	 * so we can set the 'A'accessed bit in the virtual page table entry.
484 	 */
485 	fs.map = map;
486 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
487 			       &fs.entry, &fs.first_object,
488 			       &first_pindex, &fs.first_prot, &fs.wired);
489 
490 	if (result != KERN_SUCCESS) {
491 		*errorp = result;
492 		return (NULL);
493 	}
494 
495 	/*
496 	 * fs.map is read-locked
497 	 *
498 	 * Misc checks.  Save the map generation number to detect races.
499 	 */
500 	fs.map_generation = fs.map->timestamp;
501 
502 	if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
503 		panic("vm_fault: fault on nofault entry, addr: %lx",
504 		    (u_long)vaddr);
505 	}
506 
507 	/*
508 	 * A system map entry may return a NULL object.  No object means
509 	 * no pager means an unrecoverable kernel fault.
510 	 */
511 	if (fs.first_object == NULL) {
512 		panic("vm_fault: unrecoverable fault at %p in entry %p",
513 			(void *)vaddr, fs.entry);
514 	}
515 
516 	/*
517 	 * Make a reference to this object to prevent its disposal while we
518 	 * are messing with it.  Once we have the reference, the map is free
519 	 * to be diddled.  Since objects reference their shadows (and copies),
520 	 * they will stay around as well.
521 	 *
522 	 * Bump the paging-in-progress count to prevent size changes (e.g.
523 	 * truncation operations) during I/O.  This must be done after
524 	 * obtaining the vnode lock in order to avoid possible deadlocks.
525 	 */
526 	vm_object_reference(fs.first_object);
527 	fs.vp = vnode_pager_lock(fs.first_object);
528 	vm_object_pip_add(fs.first_object, 1);
529 
530 	fs.lookup_still_valid = TRUE;
531 	fs.first_m = NULL;
532 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
533 
534 	/*
535 	 * If the entry is wired we cannot change the page protection.
536 	 */
537 	if (fs.wired)
538 		fault_type = fs.first_prot;
539 
540 	/*
541 	 * The page we want is at (first_object, first_pindex), but if the
542 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
543 	 * page table to figure out the actual pindex.
544 	 *
545 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
546 	 * ONLY
547 	 */
548 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
549 		result = vm_fault_vpagetable(&fs, &first_pindex,
550 					     fs.entry->aux.master_pde,
551 					     fault_type);
552 		if (result == KERN_TRY_AGAIN)
553 			goto RetryFault;
554 		if (result != KERN_SUCCESS) {
555 			*errorp = result;
556 			return (NULL);
557 		}
558 	}
559 
560 	/*
561 	 * Now we have the actual (object, pindex), fault in the page.  If
562 	 * vm_fault_object() fails it will unlock and deallocate the FS
563 	 * data.   If it succeeds everything remains locked and fs->object
564 	 * will have an additinal PIP count if it is not equal to
565 	 * fs->first_object
566 	 */
567 	result = vm_fault_object(&fs, first_pindex, fault_type);
568 
569 	if (result == KERN_TRY_AGAIN)
570 		goto RetryFault;
571 	if (result != KERN_SUCCESS) {
572 		*errorp = result;
573 		return(NULL);
574 	}
575 
576 	if ((orig_fault_type & VM_PROT_WRITE) &&
577 	    (fs.prot & VM_PROT_WRITE) == 0) {
578 		*errorp = KERN_PROTECTION_FAILURE;
579 		unlock_and_deallocate(&fs);
580 		return(NULL);
581 	}
582 
583 	/*
584 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
585 	 * will contain a busied page.
586 	 */
587 	unlock_things(&fs);
588 
589 	/*
590 	 * Return a held page.  We are not doing any pmap manipulation so do
591 	 * not set PG_MAPPED.  However, adjust the page flags according to
592 	 * the fault type because the caller may not use a managed pmapping
593 	 * (so we don't want to lose the fact that the page will be dirtied
594 	 * if a write fault was specified).
595 	 */
596 	vm_page_hold(fs.m);
597 	vm_page_flag_clear(fs.m, PG_ZERO);
598 	if (fault_type & VM_PROT_WRITE)
599 		vm_page_dirty(fs.m);
600 
601 	/*
602 	 * Update the pmap.  We really only have to do this if a COW
603 	 * occured to replace the read-only page with the new page.  For
604 	 * now just do it unconditionally. XXX
605 	 */
606 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
607 	vm_page_flag_set(fs.m, PG_REFERENCED);
608 
609 	/*
610 	 * Unbusy the page by activating it.  It remains held and will not
611 	 * be reclaimed.
612 	 */
613 	vm_page_activate(fs.m);
614 
615 	if (curthread->td_lwp) {
616 		if (fs.hardfault) {
617 			curthread->td_lwp->lwp_ru.ru_majflt++;
618 		} else {
619 			curthread->td_lwp->lwp_ru.ru_minflt++;
620 		}
621 	}
622 
623 	/*
624 	 * Unlock everything, and return the held page.
625 	 */
626 	vm_page_wakeup(fs.m);
627 	vm_object_deallocate(fs.first_object);
628 
629 	*errorp = 0;
630 	return(fs.m);
631 }
632 
633 /*
634  * Fault in the specified (object,offset), dirty the returned page as
635  * needed.  If the requested fault_type cannot be done NULL and an
636  * error is returned.
637  */
638 vm_page_t
639 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
640 		     vm_prot_t fault_type, int fault_flags, int *errorp)
641 {
642 	int result;
643 	vm_pindex_t first_pindex;
644 	struct faultstate fs;
645 	struct vm_map_entry entry;
646 
647 	bzero(&entry, sizeof(entry));
648 	entry.object.vm_object = object;
649 	entry.maptype = VM_MAPTYPE_NORMAL;
650 	entry.protection = entry.max_protection = fault_type;
651 
652 	fs.didlimit = 0;
653 	fs.hardfault = 0;
654 	fs.fault_flags = fault_flags;
655 	fs.map = NULL;
656 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
657 
658 RetryFault:
659 
660 	fs.first_object = object;
661 	first_pindex = OFF_TO_IDX(offset);
662 	fs.entry = &entry;
663 	fs.first_prot = fault_type;
664 	fs.wired = 0;
665 	/*fs.map_generation = 0; unused */
666 
667 	/*
668 	 * Make a reference to this object to prevent its disposal while we
669 	 * are messing with it.  Once we have the reference, the map is free
670 	 * to be diddled.  Since objects reference their shadows (and copies),
671 	 * they will stay around as well.
672 	 *
673 	 * Bump the paging-in-progress count to prevent size changes (e.g.
674 	 * truncation operations) during I/O.  This must be done after
675 	 * obtaining the vnode lock in order to avoid possible deadlocks.
676 	 */
677 	vm_object_reference(fs.first_object);
678 	fs.vp = vnode_pager_lock(fs.first_object);
679 	vm_object_pip_add(fs.first_object, 1);
680 
681 	fs.lookup_still_valid = TRUE;
682 	fs.first_m = NULL;
683 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
684 
685 #if 0
686 	/* XXX future - ability to operate on VM object using vpagetable */
687 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
688 		result = vm_fault_vpagetable(&fs, &first_pindex,
689 					     fs.entry->aux.master_pde,
690 					     fault_type);
691 		if (result == KERN_TRY_AGAIN)
692 			goto RetryFault;
693 		if (result != KERN_SUCCESS) {
694 			*errorp = result;
695 			return (NULL);
696 		}
697 	}
698 #endif
699 
700 	/*
701 	 * Now we have the actual (object, pindex), fault in the page.  If
702 	 * vm_fault_object() fails it will unlock and deallocate the FS
703 	 * data.   If it succeeds everything remains locked and fs->object
704 	 * will have an additinal PIP count if it is not equal to
705 	 * fs->first_object
706 	 */
707 	result = vm_fault_object(&fs, first_pindex, fault_type);
708 
709 	if (result == KERN_TRY_AGAIN)
710 		goto RetryFault;
711 	if (result != KERN_SUCCESS) {
712 		*errorp = result;
713 		return(NULL);
714 	}
715 
716 	if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
717 		*errorp = KERN_PROTECTION_FAILURE;
718 		unlock_and_deallocate(&fs);
719 		return(NULL);
720 	}
721 
722 	/*
723 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
724 	 * will contain a busied page.
725 	 */
726 	unlock_things(&fs);
727 
728 	/*
729 	 * Return a held page.  We are not doing any pmap manipulation so do
730 	 * not set PG_MAPPED.  However, adjust the page flags according to
731 	 * the fault type because the caller may not use a managed pmapping
732 	 * (so we don't want to lose the fact that the page will be dirtied
733 	 * if a write fault was specified).
734 	 */
735 	vm_page_hold(fs.m);
736 	vm_page_flag_clear(fs.m, PG_ZERO);
737 	if (fault_type & VM_PROT_WRITE)
738 		vm_page_dirty(fs.m);
739 
740 	/*
741 	 * Indicate that the page was accessed.
742 	 */
743 	vm_page_flag_set(fs.m, PG_REFERENCED);
744 
745 	/*
746 	 * Unbusy the page by activating it.  It remains held and will not
747 	 * be reclaimed.
748 	 */
749 	vm_page_activate(fs.m);
750 
751 	if (curthread->td_lwp) {
752 		if (fs.hardfault) {
753 			mycpu->gd_cnt.v_vm_faults++;
754 			curthread->td_lwp->lwp_ru.ru_majflt++;
755 		} else {
756 			curthread->td_lwp->lwp_ru.ru_minflt++;
757 		}
758 	}
759 
760 	/*
761 	 * Unlock everything, and return the held page.
762 	 */
763 	vm_page_wakeup(fs.m);
764 	vm_object_deallocate(fs.first_object);
765 
766 	*errorp = 0;
767 	return(fs.m);
768 }
769 
770 /*
771  * Translate the virtual page number (first_pindex) that is relative
772  * to the address space into a logical page number that is relative to the
773  * backing object.  Use the virtual page table pointed to by (vpte).
774  *
775  * This implements an N-level page table.  Any level can terminate the
776  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
777  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
778  */
779 static
780 int
781 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
782 		    vpte_t vpte, int fault_type)
783 {
784 	struct sf_buf *sf;
785 	int vshift = 32 - PAGE_SHIFT;	/* page index bits remaining */
786 	int result = KERN_SUCCESS;
787 	vpte_t *ptep;
788 
789 	for (;;) {
790 		/*
791 		 * We cannot proceed if the vpte is not valid, not readable
792 		 * for a read fault, or not writable for a write fault.
793 		 */
794 		if ((vpte & VPTE_V) == 0) {
795 			unlock_and_deallocate(fs);
796 			return (KERN_FAILURE);
797 		}
798 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_R) == 0) {
799 			unlock_and_deallocate(fs);
800 			return (KERN_FAILURE);
801 		}
802 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_W) == 0) {
803 			unlock_and_deallocate(fs);
804 			return (KERN_FAILURE);
805 		}
806 		if ((vpte & VPTE_PS) || vshift == 0)
807 			break;
808 		KKASSERT(vshift >= VPTE_PAGE_BITS);
809 
810 		/*
811 		 * Get the page table page.  Nominally we only read the page
812 		 * table, but since we are actively setting VPTE_M and VPTE_A,
813 		 * tell vm_fault_object() that we are writing it.
814 		 *
815 		 * There is currently no real need to optimize this.
816 		 */
817 		result = vm_fault_object(fs, vpte >> PAGE_SHIFT,
818 					 VM_PROT_READ|VM_PROT_WRITE);
819 		if (result != KERN_SUCCESS)
820 			return (result);
821 
822 		/*
823 		 * Process the returned fs.m and look up the page table
824 		 * entry in the page table page.
825 		 */
826 		vshift -= VPTE_PAGE_BITS;
827 		sf = sf_buf_alloc(fs->m, SFB_CPUPRIVATE);
828 		ptep = ((vpte_t *)sf_buf_kva(sf) +
829 		        ((*pindex >> vshift) & VPTE_PAGE_MASK));
830 		vpte = *ptep;
831 
832 		/*
833 		 * Page table write-back.  If the vpte is valid for the
834 		 * requested operation, do a write-back to the page table.
835 		 *
836 		 * XXX VPTE_M is not set properly for page directory pages.
837 		 * It doesn't get set in the page directory if the page table
838 		 * is modified during a read access.
839 		 */
840 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
841 		    (vpte & VPTE_W)) {
842 			if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
843 				atomic_set_int(ptep, VPTE_M|VPTE_A);
844 				vm_page_dirty(fs->m);
845 			}
846 		}
847 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V) &&
848 		    (vpte & VPTE_R)) {
849 			if ((vpte & VPTE_A) == 0) {
850 				atomic_set_int(ptep, VPTE_A);
851 				vm_page_dirty(fs->m);
852 			}
853 		}
854 		sf_buf_free(sf);
855 		vm_page_flag_set(fs->m, PG_REFERENCED);
856 		vm_page_activate(fs->m);
857 		vm_page_wakeup(fs->m);
858 		cleanup_successful_fault(fs);
859 	}
860 	/*
861 	 * Combine remaining address bits with the vpte.
862 	 */
863 	*pindex = (vpte >> PAGE_SHIFT) +
864 		  (*pindex & ((1 << vshift) - 1));
865 	return (KERN_SUCCESS);
866 }
867 
868 
869 /*
870  * Do all operations required to fault-in (fs.first_object, pindex).  Run
871  * through the shadow chain as necessary and do required COW or virtual
872  * copy operations.  The caller has already fully resolved the vm_map_entry
873  * and, if appropriate, has created a copy-on-write layer.  All we need to
874  * do is iterate the object chain.
875  *
876  * On failure (fs) is unlocked and deallocated and the caller may return or
877  * retry depending on the failure code.  On success (fs) is NOT unlocked or
878  * deallocated, fs.m will contained a resolved, busied page, and fs.object
879  * will have an additional PIP count if it is not equal to fs.first_object.
880  */
881 static
882 int
883 vm_fault_object(struct faultstate *fs,
884 		vm_pindex_t first_pindex, vm_prot_t fault_type)
885 {
886 	vm_object_t next_object;
887 	vm_pindex_t pindex;
888 
889 	fs->prot = fs->first_prot;
890 	fs->object = fs->first_object;
891 	pindex = first_pindex;
892 
893 	/*
894 	 * If a read fault occurs we try to make the page writable if
895 	 * possible.  There are three cases where we cannot make the
896 	 * page mapping writable:
897 	 *
898 	 * (1) The mapping is read-only or the VM object is read-only,
899 	 *     fs->prot above will simply not have VM_PROT_WRITE set.
900 	 *
901 	 * (2) If the mapping is a virtual page table we need to be able
902 	 *     to detect writes so we can set VPTE_M in the virtual page
903 	 *     table.
904 	 *
905 	 * (3) If the VM page is read-only or copy-on-write, upgrading would
906 	 *     just result in an unnecessary COW fault.
907 	 *
908 	 * VM_PROT_VPAGED is set if faulting via a virtual page table and
909 	 * causes adjustments to the 'M'odify bit to also turn off write
910 	 * access to force a re-fault.
911 	 */
912 	if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
913 		if ((fault_type & VM_PROT_WRITE) == 0)
914 			fs->prot &= ~VM_PROT_WRITE;
915 	}
916 
917 	for (;;) {
918 		/*
919 		 * If the object is dead, we stop here
920 		 */
921 		if (fs->object->flags & OBJ_DEAD) {
922 			unlock_and_deallocate(fs);
923 			return (KERN_PROTECTION_FAILURE);
924 		}
925 
926 		/*
927 		 * See if page is resident.  spl protection is required
928 		 * to avoid an interrupt unbusy/free race against our
929 		 * lookup.  We must hold the protection through a page
930 		 * allocation or busy.
931 		 */
932 		crit_enter();
933 		fs->m = vm_page_lookup(fs->object, pindex);
934 		if (fs->m != NULL) {
935 			int queue;
936 			/*
937 			 * Wait/Retry if the page is busy.  We have to do this
938 			 * if the page is busy via either PG_BUSY or
939 			 * vm_page_t->busy because the vm_pager may be using
940 			 * vm_page_t->busy for pageouts ( and even pageins if
941 			 * it is the vnode pager ), and we could end up trying
942 			 * to pagein and pageout the same page simultaneously.
943 			 *
944 			 * We can theoretically allow the busy case on a read
945 			 * fault if the page is marked valid, but since such
946 			 * pages are typically already pmap'd, putting that
947 			 * special case in might be more effort then it is
948 			 * worth.  We cannot under any circumstances mess
949 			 * around with a vm_page_t->busy page except, perhaps,
950 			 * to pmap it.
951 			 */
952 			if ((fs->m->flags & PG_BUSY) || fs->m->busy) {
953 				unlock_things(fs);
954 				vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
955 				mycpu->gd_cnt.v_intrans++;
956 				vm_object_deallocate(fs->first_object);
957 				fs->first_object = NULL;
958 				crit_exit();
959 				return (KERN_TRY_AGAIN);
960 			}
961 
962 			/*
963 			 * If reactivating a page from PQ_CACHE we may have
964 			 * to rate-limit.
965 			 */
966 			queue = fs->m->queue;
967 			vm_page_unqueue_nowakeup(fs->m);
968 
969 			if ((queue - fs->m->pc) == PQ_CACHE &&
970 			    vm_page_count_severe()) {
971 				vm_page_activate(fs->m);
972 				unlock_and_deallocate(fs);
973 				vm_waitpfault();
974 				crit_exit();
975 				return (KERN_TRY_AGAIN);
976 			}
977 
978 			/*
979 			 * Mark page busy for other processes, and the
980 			 * pagedaemon.  If it still isn't completely valid
981 			 * (readable), or if a read-ahead-mark is set on
982 			 * the VM page, jump to readrest, else we found the
983 			 * page and can return.
984 			 *
985 			 * We can release the spl once we have marked the
986 			 * page busy.
987 			 */
988 			vm_page_busy(fs->m);
989 			crit_exit();
990 
991 			if (fs->m->object != &kernel_object) {
992 				if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
993 				    VM_PAGE_BITS_ALL) {
994 					goto readrest;
995 				}
996 				if (fs->m->flags & PG_RAM) {
997 					if (debug_cluster)
998 						kprintf("R");
999 					vm_page_flag_clear(fs->m, PG_RAM);
1000 					goto readrest;
1001 				}
1002 			}
1003 			break; /* break to PAGE HAS BEEN FOUND */
1004 		}
1005 
1006 		/*
1007 		 * Page is not resident, If this is the search termination
1008 		 * or the pager might contain the page, allocate a new page.
1009 		 *
1010 		 * NOTE: We are still in a critical section.
1011 		 */
1012 		if (TRYPAGER(fs) || fs->object == fs->first_object) {
1013 			/*
1014 			 * If the page is beyond the object size we fail
1015 			 */
1016 			if (pindex >= fs->object->size) {
1017 				crit_exit();
1018 				unlock_and_deallocate(fs);
1019 				return (KERN_PROTECTION_FAILURE);
1020 			}
1021 
1022 			/*
1023 			 * Ratelimit.
1024 			 */
1025 			if (fs->didlimit == 0 && curproc != NULL) {
1026 				int limticks;
1027 
1028 				limticks = vm_fault_ratelimit(curproc->p_vmspace);
1029 				if (limticks) {
1030 					crit_exit();
1031 					unlock_and_deallocate(fs);
1032 					tsleep(curproc, 0, "vmrate", limticks);
1033 					fs->didlimit = 1;
1034 					return (KERN_TRY_AGAIN);
1035 				}
1036 			}
1037 
1038 			/*
1039 			 * Allocate a new page for this object/offset pair.
1040 			 */
1041 			fs->m = NULL;
1042 			if (!vm_page_count_severe()) {
1043 				fs->m = vm_page_alloc(fs->object, pindex,
1044 				    (fs->vp || fs->object->backing_object) ? VM_ALLOC_NORMAL : VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
1045 			}
1046 			if (fs->m == NULL) {
1047 				crit_exit();
1048 				unlock_and_deallocate(fs);
1049 				vm_waitpfault();
1050 				return (KERN_TRY_AGAIN);
1051 			}
1052 		}
1053 		crit_exit();
1054 
1055 readrest:
1056 		/*
1057 		 * We have found an invalid or partially valid page, a
1058 		 * page with a read-ahead mark which might be partially or
1059 		 * fully valid (and maybe dirty too), or we have allocated
1060 		 * a new page.
1061 		 *
1062 		 * Attempt to fault-in the page if there is a chance that the
1063 		 * pager has it, and potentially fault in additional pages
1064 		 * at the same time.
1065 		 *
1066 		 * We are NOT in splvm here and if TRYPAGER is true then
1067 		 * fs.m will be non-NULL and will be PG_BUSY for us.
1068 		 */
1069 		if (TRYPAGER(fs)) {
1070 			int rv;
1071 			int seqaccess;
1072 			u_char behavior = vm_map_entry_behavior(fs->entry);
1073 
1074 			if (behavior == MAP_ENTRY_BEHAV_RANDOM)
1075 				seqaccess = 0;
1076 			else
1077 				seqaccess = -1;
1078 
1079 			/*
1080 			 * If sequential access is detected then attempt
1081 			 * to deactivate/cache pages behind the scan to
1082 			 * prevent resource hogging.
1083 			 *
1084 			 * Use of PG_RAM to detect sequential access
1085 			 * also simulates multi-zone sequential access
1086 			 * detection for free.
1087 			 *
1088 			 * NOTE: Partially valid dirty pages cannot be
1089 			 *	 deactivated without causing NFS picemeal
1090 			 *	 writes to barf.
1091 			 */
1092 			if ((fs->first_object->type != OBJT_DEVICE) &&
1093 			    (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
1094                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
1095 				 (fs->m->flags & PG_RAM)))
1096 			) {
1097 				vm_pindex_t scan_pindex;
1098 				int scan_count = 16;
1099 
1100 				if (first_pindex < 16) {
1101 					scan_pindex = 0;
1102 					scan_count = 0;
1103 				} else {
1104 					scan_pindex = first_pindex - 16;
1105 					if (scan_pindex < 16)
1106 						scan_count = scan_pindex;
1107 					else
1108 						scan_count = 16;
1109 				}
1110 
1111 				crit_enter();
1112 				while (scan_count) {
1113 					vm_page_t mt;
1114 
1115 					mt = vm_page_lookup(fs->first_object,
1116 							    scan_pindex);
1117 					if (mt == NULL ||
1118 					    (mt->valid != VM_PAGE_BITS_ALL)) {
1119 						break;
1120 					}
1121 					if (mt->busy ||
1122 					    (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
1123 					    mt->hold_count ||
1124 					    mt->wire_count)  {
1125 						goto skip;
1126 					}
1127 					if (mt->dirty == 0)
1128 						vm_page_test_dirty(mt);
1129 					if (mt->dirty) {
1130 						vm_page_busy(mt);
1131 						vm_page_protect(mt,
1132 								VM_PROT_NONE);
1133 						vm_page_deactivate(mt);
1134 						vm_page_wakeup(mt);
1135 					} else {
1136 						vm_page_cache(mt);
1137 					}
1138 skip:
1139 					--scan_count;
1140 					--scan_pindex;
1141 				}
1142 				crit_exit();
1143 
1144 				seqaccess = 1;
1145 			}
1146 
1147 			/*
1148 			 * Avoid deadlocking against the map when doing I/O.
1149 			 * fs.object and the page is PG_BUSY'd.
1150 			 */
1151 			unlock_map(fs);
1152 
1153 			/*
1154 			 * Acquire the page data.  We still hold a ref on
1155 			 * fs.object and the page has been PG_BUSY's.
1156 			 *
1157 			 * The pager may replace the page (for example, in
1158 			 * order to enter a fictitious page into the
1159 			 * object).  If it does so it is responsible for
1160 			 * cleaning up the passed page and properly setting
1161 			 * the new page PG_BUSY.
1162 			 *
1163 			 * If we got here through a PG_RAM read-ahead
1164 			 * mark the page may be partially dirty and thus
1165 			 * not freeable.  Don't bother checking to see
1166 			 * if the pager has the page because we can't free
1167 			 * it anyway.  We have to depend on the get_page
1168 			 * operation filling in any gaps whether there is
1169 			 * backing store or not.
1170 			 */
1171 			rv = vm_pager_get_page(fs->object, &fs->m, seqaccess);
1172 
1173 			if (rv == VM_PAGER_OK) {
1174 				/*
1175 				 * Relookup in case pager changed page. Pager
1176 				 * is responsible for disposition of old page
1177 				 * if moved.
1178 				 *
1179 				 * XXX other code segments do relookups too.
1180 				 * It's a bad abstraction that needs to be
1181 				 * fixed/removed.
1182 				 */
1183 				fs->m = vm_page_lookup(fs->object, pindex);
1184 				if (fs->m == NULL) {
1185 					unlock_and_deallocate(fs);
1186 					return (KERN_TRY_AGAIN);
1187 				}
1188 
1189 				++fs->hardfault;
1190 				break; /* break to PAGE HAS BEEN FOUND */
1191 			}
1192 
1193 			/*
1194 			 * Remove the bogus page (which does not exist at this
1195 			 * object/offset); before doing so, we must get back
1196 			 * our object lock to preserve our invariant.
1197 			 *
1198 			 * Also wake up any other process that may want to bring
1199 			 * in this page.
1200 			 *
1201 			 * If this is the top-level object, we must leave the
1202 			 * busy page to prevent another process from rushing
1203 			 * past us, and inserting the page in that object at
1204 			 * the same time that we are.
1205 			 */
1206 			if (rv == VM_PAGER_ERROR) {
1207 				if (curproc)
1208 					kprintf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm);
1209 				else
1210 					kprintf("vm_fault: pager read error, thread %p (%s)\n", curthread, curproc->p_comm);
1211 			}
1212 
1213 			/*
1214 			 * Data outside the range of the pager or an I/O error
1215 			 *
1216 			 * The page may have been wired during the pagein,
1217 			 * e.g. by the buffer cache, and cannot simply be
1218 			 * freed.  Call vnode_pager_freepage() to deal with it.
1219 			 */
1220 			/*
1221 			 * XXX - the check for kernel_map is a kludge to work
1222 			 * around having the machine panic on a kernel space
1223 			 * fault w/ I/O error.
1224 			 */
1225 			if (((fs->map != &kernel_map) &&
1226 			    (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
1227 				vnode_pager_freepage(fs->m);
1228 				fs->m = NULL;
1229 				unlock_and_deallocate(fs);
1230 				if (rv == VM_PAGER_ERROR)
1231 					return (KERN_FAILURE);
1232 				else
1233 					return (KERN_PROTECTION_FAILURE);
1234 				/* NOT REACHED */
1235 			}
1236 			if (fs->object != fs->first_object) {
1237 				vnode_pager_freepage(fs->m);
1238 				fs->m = NULL;
1239 				/*
1240 				 * XXX - we cannot just fall out at this
1241 				 * point, m has been freed and is invalid!
1242 				 */
1243 			}
1244 		}
1245 
1246 		/*
1247 		 * We get here if the object has a default pager (or unwiring)
1248 		 * or the pager doesn't have the page.
1249 		 */
1250 		if (fs->object == fs->first_object)
1251 			fs->first_m = fs->m;
1252 
1253 		/*
1254 		 * Move on to the next object.  Lock the next object before
1255 		 * unlocking the current one.
1256 		 */
1257 		pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1258 		next_object = fs->object->backing_object;
1259 		if (next_object == NULL) {
1260 			/*
1261 			 * If there's no object left, fill the page in the top
1262 			 * object with zeros.
1263 			 */
1264 			if (fs->object != fs->first_object) {
1265 				vm_object_pip_wakeup(fs->object);
1266 
1267 				fs->object = fs->first_object;
1268 				pindex = first_pindex;
1269 				fs->m = fs->first_m;
1270 			}
1271 			fs->first_m = NULL;
1272 
1273 			/*
1274 			 * Zero the page if necessary and mark it valid.
1275 			 */
1276 			if ((fs->m->flags & PG_ZERO) == 0) {
1277 				vm_page_zero_fill(fs->m);
1278 			} else {
1279 				mycpu->gd_cnt.v_ozfod++;
1280 			}
1281 			mycpu->gd_cnt.v_zfod++;
1282 			fs->m->valid = VM_PAGE_BITS_ALL;
1283 			break;	/* break to PAGE HAS BEEN FOUND */
1284 		} else {
1285 			if (fs->object != fs->first_object) {
1286 				vm_object_pip_wakeup(fs->object);
1287 			}
1288 			KASSERT(fs->object != next_object, ("object loop %p", next_object));
1289 			fs->object = next_object;
1290 			vm_object_pip_add(fs->object, 1);
1291 		}
1292 	}
1293 
1294 	/*
1295 	 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1296 	 * is held.]
1297 	 *
1298 	 * If the page is being written, but isn't already owned by the
1299 	 * top-level object, we have to copy it into a new page owned by the
1300 	 * top-level object.
1301 	 */
1302 	KASSERT((fs->m->flags & PG_BUSY) != 0,
1303 		("vm_fault: not busy after main loop"));
1304 
1305 	if (fs->object != fs->first_object) {
1306 		/*
1307 		 * We only really need to copy if we want to write it.
1308 		 */
1309 		if (fault_type & VM_PROT_WRITE) {
1310 			/*
1311 			 * This allows pages to be virtually copied from a
1312 			 * backing_object into the first_object, where the
1313 			 * backing object has no other refs to it, and cannot
1314 			 * gain any more refs.  Instead of a bcopy, we just
1315 			 * move the page from the backing object to the
1316 			 * first object.  Note that we must mark the page
1317 			 * dirty in the first object so that it will go out
1318 			 * to swap when needed.
1319 			 */
1320 			if (
1321 				/*
1322 				 * Map, if present, has not changed
1323 				 */
1324 				(fs->map == NULL ||
1325 				fs->map_generation == fs->map->timestamp) &&
1326 				/*
1327 				 * Only one shadow object
1328 				 */
1329 				(fs->object->shadow_count == 1) &&
1330 				/*
1331 				 * No COW refs, except us
1332 				 */
1333 				(fs->object->ref_count == 1) &&
1334 				/*
1335 				 * No one else can look this object up
1336 				 */
1337 				(fs->object->handle == NULL) &&
1338 				/*
1339 				 * No other ways to look the object up
1340 				 */
1341 				((fs->object->type == OBJT_DEFAULT) ||
1342 				 (fs->object->type == OBJT_SWAP)) &&
1343 				/*
1344 				 * We don't chase down the shadow chain
1345 				 */
1346 				(fs->object == fs->first_object->backing_object) &&
1347 
1348 				/*
1349 				 * grab the lock if we need to
1350 				 */
1351 				(fs->lookup_still_valid ||
1352 				 fs->map == NULL ||
1353 				 lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1354 			    ) {
1355 
1356 				fs->lookup_still_valid = 1;
1357 				/*
1358 				 * get rid of the unnecessary page
1359 				 */
1360 				vm_page_protect(fs->first_m, VM_PROT_NONE);
1361 				vm_page_free(fs->first_m);
1362 				fs->first_m = NULL;
1363 
1364 				/*
1365 				 * grab the page and put it into the
1366 				 * process'es object.  The page is
1367 				 * automatically made dirty.
1368 				 */
1369 				vm_page_rename(fs->m, fs->first_object, first_pindex);
1370 				fs->first_m = fs->m;
1371 				vm_page_busy(fs->first_m);
1372 				fs->m = NULL;
1373 				mycpu->gd_cnt.v_cow_optim++;
1374 			} else {
1375 				/*
1376 				 * Oh, well, lets copy it.
1377 				 */
1378 				vm_page_copy(fs->m, fs->first_m);
1379 				vm_page_event(fs->m, VMEVENT_COW);
1380 			}
1381 
1382 			if (fs->m) {
1383 				/*
1384 				 * We no longer need the old page or object.
1385 				 */
1386 				release_page(fs);
1387 			}
1388 
1389 			/*
1390 			 * fs->object != fs->first_object due to above
1391 			 * conditional
1392 			 */
1393 			vm_object_pip_wakeup(fs->object);
1394 
1395 			/*
1396 			 * Only use the new page below...
1397 			 */
1398 
1399 			mycpu->gd_cnt.v_cow_faults++;
1400 			fs->m = fs->first_m;
1401 			fs->object = fs->first_object;
1402 			pindex = first_pindex;
1403 		} else {
1404 			/*
1405 			 * If it wasn't a write fault avoid having to copy
1406 			 * the page by mapping it read-only.
1407 			 */
1408 			fs->prot &= ~VM_PROT_WRITE;
1409 		}
1410 	}
1411 
1412 	/*
1413 	 * We may have had to unlock a map to do I/O.  If we did then
1414 	 * lookup_still_valid will be FALSE.  If the map generation count
1415 	 * also changed then all sorts of things could have happened while
1416 	 * we were doing the I/O and we need to retry.
1417 	 */
1418 
1419 	if (!fs->lookup_still_valid &&
1420 	    fs->map != NULL &&
1421 	    (fs->map->timestamp != fs->map_generation)) {
1422 		release_page(fs);
1423 		unlock_and_deallocate(fs);
1424 		return (KERN_TRY_AGAIN);
1425 	}
1426 
1427 	/*
1428 	 * If the fault is a write, we know that this page is being
1429 	 * written NOW so dirty it explicitly to save on pmap_is_modified()
1430 	 * calls later.
1431 	 *
1432 	 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1433 	 * if the page is already dirty to prevent data written with
1434 	 * the expectation of being synced from not being synced.
1435 	 * Likewise if this entry does not request NOSYNC then make
1436 	 * sure the page isn't marked NOSYNC.  Applications sharing
1437 	 * data should use the same flags to avoid ping ponging.
1438 	 *
1439 	 * Also tell the backing pager, if any, that it should remove
1440 	 * any swap backing since the page is now dirty.
1441 	 */
1442 	if (fs->prot & VM_PROT_WRITE) {
1443 		vm_object_set_writeable_dirty(fs->m->object);
1444 		if (fs->entry->eflags & MAP_ENTRY_NOSYNC) {
1445 			if (fs->m->dirty == 0)
1446 				vm_page_flag_set(fs->m, PG_NOSYNC);
1447 		} else {
1448 			vm_page_flag_clear(fs->m, PG_NOSYNC);
1449 		}
1450 		if (fs->fault_flags & VM_FAULT_DIRTY) {
1451 			crit_enter();
1452 			vm_page_dirty(fs->m);
1453 			swap_pager_unswapped(fs->m);
1454 			crit_exit();
1455 		}
1456 	}
1457 
1458 	/*
1459 	 * Page had better still be busy.  We are still locked up and
1460 	 * fs->object will have another PIP reference if it is not equal
1461 	 * to fs->first_object.
1462 	 */
1463 	KASSERT(fs->m->flags & PG_BUSY,
1464 		("vm_fault: page %p not busy!", fs->m));
1465 
1466 	/*
1467 	 * Sanity check: page must be completely valid or it is not fit to
1468 	 * map into user space.  vm_pager_get_pages() ensures this.
1469 	 */
1470 	if (fs->m->valid != VM_PAGE_BITS_ALL) {
1471 		vm_page_zero_invalid(fs->m, TRUE);
1472 		kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1473 	}
1474 
1475 	return (KERN_SUCCESS);
1476 }
1477 
1478 /*
1479  * Wire down a range of virtual addresses in a map.  The entry in question
1480  * should be marked in-transition and the map must be locked.  We must
1481  * release the map temporarily while faulting-in the page to avoid a
1482  * deadlock.  Note that the entry may be clipped while we are blocked but
1483  * will never be freed.
1484  */
1485 int
1486 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1487 {
1488 	boolean_t fictitious;
1489 	vm_offset_t start;
1490 	vm_offset_t end;
1491 	vm_offset_t va;
1492 	vm_paddr_t pa;
1493 	pmap_t pmap;
1494 	int rv;
1495 
1496 	pmap = vm_map_pmap(map);
1497 	start = entry->start;
1498 	end = entry->end;
1499 	fictitious = entry->object.vm_object &&
1500 			(entry->object.vm_object->type == OBJT_DEVICE);
1501 
1502 	vm_map_unlock(map);
1503 	map->timestamp++;
1504 
1505 	/*
1506 	 * We simulate a fault to get the page and enter it in the physical
1507 	 * map.
1508 	 */
1509 	for (va = start; va < end; va += PAGE_SIZE) {
1510 		if (user_wire) {
1511 			rv = vm_fault(map, va, VM_PROT_READ,
1512 					VM_FAULT_USER_WIRE);
1513 		} else {
1514 			rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1515 					VM_FAULT_CHANGE_WIRING);
1516 		}
1517 		if (rv) {
1518 			while (va > start) {
1519 				va -= PAGE_SIZE;
1520 				if ((pa = pmap_extract(pmap, va)) == 0)
1521 					continue;
1522 				pmap_change_wiring(pmap, va, FALSE);
1523 				if (!fictitious)
1524 					vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1525 			}
1526 			vm_map_lock(map);
1527 			return (rv);
1528 		}
1529 	}
1530 	vm_map_lock(map);
1531 	return (KERN_SUCCESS);
1532 }
1533 
1534 /*
1535  * Unwire a range of virtual addresses in a map.  The map should be
1536  * locked.
1537  */
1538 void
1539 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1540 {
1541 	boolean_t fictitious;
1542 	vm_offset_t start;
1543 	vm_offset_t end;
1544 	vm_offset_t va;
1545 	vm_paddr_t pa;
1546 	pmap_t pmap;
1547 
1548 	pmap = vm_map_pmap(map);
1549 	start = entry->start;
1550 	end = entry->end;
1551 	fictitious = entry->object.vm_object &&
1552 			(entry->object.vm_object->type == OBJT_DEVICE);
1553 
1554 	/*
1555 	 * Since the pages are wired down, we must be able to get their
1556 	 * mappings from the physical map system.
1557 	 */
1558 	for (va = start; va < end; va += PAGE_SIZE) {
1559 		pa = pmap_extract(pmap, va);
1560 		if (pa != 0) {
1561 			pmap_change_wiring(pmap, va, FALSE);
1562 			if (!fictitious)
1563 				vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1564 		}
1565 	}
1566 }
1567 
1568 /*
1569  * Reduce the rate at which memory is allocated to a process based
1570  * on the perceived load on the VM system. As the load increases
1571  * the allocation burst rate goes down and the delay increases.
1572  *
1573  * Rate limiting does not apply when faulting active or inactive
1574  * pages.  When faulting 'cache' pages, rate limiting only applies
1575  * if the system currently has a severe page deficit.
1576  *
1577  * XXX vm_pagesupply should be increased when a page is freed.
1578  *
1579  * We sleep up to 1/10 of a second.
1580  */
1581 static int
1582 vm_fault_ratelimit(struct vmspace *vmspace)
1583 {
1584 	if (vm_load_enable == 0)
1585 		return(0);
1586 	if (vmspace->vm_pagesupply > 0) {
1587 		--vmspace->vm_pagesupply;
1588 		return(0);
1589 	}
1590 #ifdef INVARIANTS
1591 	if (vm_load_debug) {
1592 		kprintf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1593 			vm_load,
1594 			(1000 - vm_load ) / 10, vm_load * hz / 10000,
1595 			curproc->p_pid, curproc->p_comm);
1596 	}
1597 #endif
1598 	vmspace->vm_pagesupply = (1000 - vm_load) / 10;
1599 	return(vm_load * hz / 10000);
1600 }
1601 
1602 /*
1603  *	Routine:
1604  *		vm_fault_copy_entry
1605  *	Function:
1606  *		Copy all of the pages from a wired-down map entry to another.
1607  *
1608  *	In/out conditions:
1609  *		The source and destination maps must be locked for write.
1610  *		The source map entry must be wired down (or be a sharing map
1611  *		entry corresponding to a main map entry that is wired down).
1612  */
1613 
1614 void
1615 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1616     vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
1617 {
1618 	vm_object_t dst_object;
1619 	vm_object_t src_object;
1620 	vm_ooffset_t dst_offset;
1621 	vm_ooffset_t src_offset;
1622 	vm_prot_t prot;
1623 	vm_offset_t vaddr;
1624 	vm_page_t dst_m;
1625 	vm_page_t src_m;
1626 
1627 #ifdef	lint
1628 	src_map++;
1629 #endif	/* lint */
1630 
1631 	src_object = src_entry->object.vm_object;
1632 	src_offset = src_entry->offset;
1633 
1634 	/*
1635 	 * Create the top-level object for the destination entry. (Doesn't
1636 	 * actually shadow anything - we copy the pages directly.)
1637 	 */
1638 	vm_map_entry_allocate_object(dst_entry);
1639 	dst_object = dst_entry->object.vm_object;
1640 
1641 	prot = dst_entry->max_protection;
1642 
1643 	/*
1644 	 * Loop through all of the pages in the entry's range, copying each
1645 	 * one from the source object (it should be there) to the destination
1646 	 * object.
1647 	 */
1648 	for (vaddr = dst_entry->start, dst_offset = 0;
1649 	    vaddr < dst_entry->end;
1650 	    vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1651 
1652 		/*
1653 		 * Allocate a page in the destination object
1654 		 */
1655 		do {
1656 			dst_m = vm_page_alloc(dst_object,
1657 				OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1658 			if (dst_m == NULL) {
1659 				vm_wait(0);
1660 			}
1661 		} while (dst_m == NULL);
1662 
1663 		/*
1664 		 * Find the page in the source object, and copy it in.
1665 		 * (Because the source is wired down, the page will be in
1666 		 * memory.)
1667 		 */
1668 		src_m = vm_page_lookup(src_object,
1669 			OFF_TO_IDX(dst_offset + src_offset));
1670 		if (src_m == NULL)
1671 			panic("vm_fault_copy_wired: page missing");
1672 
1673 		vm_page_copy(src_m, dst_m);
1674 		vm_page_event(src_m, VMEVENT_COW);
1675 
1676 		/*
1677 		 * Enter it in the pmap...
1678 		 */
1679 
1680 		vm_page_flag_clear(dst_m, PG_ZERO);
1681 		pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1682 
1683 		/*
1684 		 * Mark it no longer busy, and put it on the active list.
1685 		 */
1686 		vm_page_activate(dst_m);
1687 		vm_page_wakeup(dst_m);
1688 	}
1689 }
1690 
1691 #if 0
1692 
1693 /*
1694  * This routine checks around the requested page for other pages that
1695  * might be able to be faulted in.  This routine brackets the viable
1696  * pages for the pages to be paged in.
1697  *
1698  * Inputs:
1699  *	m, rbehind, rahead
1700  *
1701  * Outputs:
1702  *  marray (array of vm_page_t), reqpage (index of requested page)
1703  *
1704  * Return value:
1705  *  number of pages in marray
1706  */
1707 static int
1708 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
1709 			  vm_page_t *marray, int *reqpage)
1710 {
1711 	int i,j;
1712 	vm_object_t object;
1713 	vm_pindex_t pindex, startpindex, endpindex, tpindex;
1714 	vm_page_t rtm;
1715 	int cbehind, cahead;
1716 
1717 	object = m->object;
1718 	pindex = m->pindex;
1719 
1720 	/*
1721 	 * we don't fault-ahead for device pager
1722 	 */
1723 	if (object->type == OBJT_DEVICE) {
1724 		*reqpage = 0;
1725 		marray[0] = m;
1726 		return 1;
1727 	}
1728 
1729 	/*
1730 	 * if the requested page is not available, then give up now
1731 	 */
1732 	if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1733 		*reqpage = 0;	/* not used by caller, fix compiler warn */
1734 		return 0;
1735 	}
1736 
1737 	if ((cbehind == 0) && (cahead == 0)) {
1738 		*reqpage = 0;
1739 		marray[0] = m;
1740 		return 1;
1741 	}
1742 
1743 	if (rahead > cahead) {
1744 		rahead = cahead;
1745 	}
1746 
1747 	if (rbehind > cbehind) {
1748 		rbehind = cbehind;
1749 	}
1750 
1751 	/*
1752 	 * Do not do any readahead if we have insufficient free memory.
1753 	 *
1754 	 * XXX code was broken disabled before and has instability
1755 	 * with this conditonal fixed, so shortcut for now.
1756 	 */
1757 	if (burst_fault == 0 || vm_page_count_severe()) {
1758 		marray[0] = m;
1759 		*reqpage = 0;
1760 		return 1;
1761 	}
1762 
1763 	/*
1764 	 * scan backward for the read behind pages -- in memory
1765 	 *
1766 	 * Assume that if the page is not found an interrupt will not
1767 	 * create it.  Theoretically interrupts can only remove (busy)
1768 	 * pages, not create new associations.
1769 	 */
1770 	if (pindex > 0) {
1771 		if (rbehind > pindex) {
1772 			rbehind = pindex;
1773 			startpindex = 0;
1774 		} else {
1775 			startpindex = pindex - rbehind;
1776 		}
1777 
1778 		crit_enter();
1779 		for (tpindex = pindex; tpindex > startpindex; --tpindex) {
1780 			if (vm_page_lookup(object, tpindex - 1))
1781 				break;
1782 		}
1783 
1784 		i = 0;
1785 		while (tpindex < pindex) {
1786 			rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1787 			if (rtm == NULL) {
1788 				crit_exit();
1789 				for (j = 0; j < i; j++) {
1790 					vm_page_free(marray[j]);
1791 				}
1792 				marray[0] = m;
1793 				*reqpage = 0;
1794 				return 1;
1795 			}
1796 			marray[i] = rtm;
1797 			++i;
1798 			++tpindex;
1799 		}
1800 		crit_exit();
1801 	} else {
1802 		i = 0;
1803 	}
1804 
1805 	/*
1806 	 * Assign requested page
1807 	 */
1808 	marray[i] = m;
1809 	*reqpage = i;
1810 	++i;
1811 
1812 	/*
1813 	 * Scan forwards for read-ahead pages
1814 	 */
1815 	tpindex = pindex + 1;
1816 	endpindex = tpindex + rahead;
1817 	if (endpindex > object->size)
1818 		endpindex = object->size;
1819 
1820 	crit_enter();
1821 	while (tpindex < endpindex) {
1822 		if (vm_page_lookup(object, tpindex))
1823 			break;
1824 		rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1825 		if (rtm == NULL)
1826 			break;
1827 		marray[i] = rtm;
1828 		++i;
1829 		++tpindex;
1830 	}
1831 	crit_exit();
1832 
1833 	return (i);
1834 }
1835 
1836 #endif
1837 
1838 /*
1839  * vm_prefault() provides a quick way of clustering pagefaults into a
1840  * processes address space.  It is a "cousin" of pmap_object_init_pt,
1841  * except it runs at page fault time instead of mmap time.
1842  *
1843  * This code used to be per-platform pmap_prefault().  It is now
1844  * machine-independent and enhanced to also pre-fault zero-fill pages
1845  * (see vm.fast_fault) as well as make them writable, which greatly
1846  * reduces the number of page faults programs incur.
1847  *
1848  * Application performance when pre-faulting zero-fill pages is heavily
1849  * dependent on the application.  Very tiny applications like /bin/echo
1850  * lose a little performance while applications of any appreciable size
1851  * gain performance.  Prefaulting multiple pages also reduces SMP
1852  * congestion and can improve SMP performance significantly.
1853  *
1854  * NOTE!  prot may allow writing but this only applies to the top level
1855  *	  object.  If we wind up mapping a page extracted from a backing
1856  *	  object we have to make sure it is read-only.
1857  *
1858  * NOTE!  The caller has already handled any COW operations on the
1859  *	  vm_map_entry via the normal fault code.  Do NOT call this
1860  *	  shortcut unless the normal fault code has run on this entry.
1861  */
1862 #define PFBAK 4
1863 #define PFFOR 4
1864 #define PAGEORDER_SIZE (PFBAK+PFFOR)
1865 
1866 static int vm_prefault_pageorder[] = {
1867 	-PAGE_SIZE, PAGE_SIZE,
1868 	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
1869 	-3 * PAGE_SIZE, 3 * PAGE_SIZE,
1870 	-4 * PAGE_SIZE, 4 * PAGE_SIZE
1871 };
1872 
1873 static void
1874 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot)
1875 {
1876 	struct lwp *lp;
1877 	vm_page_t m;
1878 	vm_offset_t starta;
1879 	vm_offset_t addr;
1880 	vm_pindex_t index;
1881 	vm_pindex_t pindex;
1882 	vm_object_t object;
1883 	int pprot;
1884 	int i;
1885 
1886 	/*
1887 	 * We do not currently prefault mappings that use virtual page
1888 	 * tables.  We do not prefault foreign pmaps.
1889 	 */
1890 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
1891 		return;
1892 	lp = curthread->td_lwp;
1893 	if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
1894 		return;
1895 
1896 	object = entry->object.vm_object;
1897 
1898 	starta = addra - PFBAK * PAGE_SIZE;
1899 	if (starta < entry->start)
1900 		starta = entry->start;
1901 	else if (starta > addra)
1902 		starta = 0;
1903 
1904 	/*
1905 	 * critical section protection is required to maintain the
1906 	 * page/object association, interrupts can free pages and remove
1907 	 * them from their objects.
1908 	 */
1909 	crit_enter();
1910 	for (i = 0; i < PAGEORDER_SIZE; i++) {
1911 		vm_object_t lobject;
1912 		int allocated = 0;
1913 
1914 		addr = addra + vm_prefault_pageorder[i];
1915 		if (addr > addra + (PFFOR * PAGE_SIZE))
1916 			addr = 0;
1917 
1918 		if (addr < starta || addr >= entry->end)
1919 			continue;
1920 
1921 		if (pmap_prefault_ok(pmap, addr) == 0)
1922 			continue;
1923 
1924 		/*
1925 		 * Follow the VM object chain to obtain the page to be mapped
1926 		 * into the pmap.
1927 		 *
1928 		 * If we reach the terminal object without finding a page
1929 		 * and we determine it would be advantageous, then allocate
1930 		 * a zero-fill page for the base object.  The base object
1931 		 * is guaranteed to be OBJT_DEFAULT for this case.
1932 		 *
1933 		 * In order to not have to check the pager via *haspage*()
1934 		 * we stop if any non-default object is encountered.  e.g.
1935 		 * a vnode or swap object would stop the loop.
1936 		 */
1937 		index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
1938 		lobject = object;
1939 		pindex = index;
1940 		pprot = prot;
1941 
1942 		while ((m = vm_page_lookup(lobject, pindex)) == NULL) {
1943 			if (lobject->type != OBJT_DEFAULT)
1944 				break;
1945 			if (lobject->backing_object == NULL) {
1946 				if (vm_fast_fault == 0)
1947 					break;
1948 				if (vm_prefault_pageorder[i] < 0 ||
1949 				    (prot & VM_PROT_WRITE) == 0 ||
1950 				    vm_page_count_min(0)) {
1951 					break;
1952 				}
1953 				/* note: allocate from base object */
1954 				m = vm_page_alloc(object, index,
1955 					      VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
1956 
1957 				if ((m->flags & PG_ZERO) == 0) {
1958 					vm_page_zero_fill(m);
1959 				} else {
1960 					vm_page_flag_clear(m, PG_ZERO);
1961 					mycpu->gd_cnt.v_ozfod++;
1962 				}
1963 				mycpu->gd_cnt.v_zfod++;
1964 				m->valid = VM_PAGE_BITS_ALL;
1965 				allocated = 1;
1966 				pprot = prot;
1967 				/* lobject = object .. not needed */
1968 				break;
1969 			}
1970 			if (lobject->backing_object_offset & PAGE_MASK)
1971 				break;
1972 			pindex += lobject->backing_object_offset >> PAGE_SHIFT;
1973 			lobject = lobject->backing_object;
1974 			pprot &= ~VM_PROT_WRITE;
1975 		}
1976 		/*
1977 		 * NOTE: lobject now invalid (if we did a zero-fill we didn't
1978 		 *	 bother assigning lobject = object).
1979 		 *
1980 		 * Give-up if the page is not available.
1981 		 */
1982 		if (m == NULL)
1983 			break;
1984 
1985 		/*
1986 		 * Do not conditionalize on PG_RAM.  If pages are present in
1987 		 * the VM system we assume optimal caching.  If caching is
1988 		 * not optimal the I/O gravy train will be restarted when we
1989 		 * hit an unavailable page.  We do not want to try to restart
1990 		 * the gravy train now because we really don't know how much
1991 		 * of the object has been cached.  The cost for restarting
1992 		 * the gravy train should be low (since accesses will likely
1993 		 * be I/O bound anyway).
1994 		 *
1995 		 * The object must be marked dirty if we are mapping a
1996 		 * writable page.
1997 		 */
1998 		if (pprot & VM_PROT_WRITE)
1999 			vm_object_set_writeable_dirty(m->object);
2000 
2001 		/*
2002 		 * Enter the page into the pmap if appropriate.  If we had
2003 		 * allocated the page we have to place it on a queue.  If not
2004 		 * we just have to make sure it isn't on the cache queue
2005 		 * (pages on the cache queue are not allowed to be mapped).
2006 		 */
2007 		if (allocated) {
2008 			pmap_enter(pmap, addr, m, pprot, 0);
2009 			vm_page_deactivate(m);
2010 			vm_page_wakeup(m);
2011 		} else if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2012 		    (m->busy == 0) &&
2013 		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2014 
2015 			if ((m->queue - m->pc) == PQ_CACHE) {
2016 				vm_page_deactivate(m);
2017 			}
2018 			vm_page_busy(m);
2019 			pmap_enter(pmap, addr, m, pprot, 0);
2020 			vm_page_wakeup(m);
2021 		}
2022 	}
2023 	crit_exit();
2024 }
2025