xref: /dragonfly/sys/vm/vm_fault.c (revision 31c7ac8b)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  *
12  * This code is derived from software contributed to Berkeley by
13  * The Mach Operating System project at Carnegie-Mellon University.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from: @(#)vm_fault.c	8.4 (Berkeley) 1/12/94
40  *
41  *
42  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
43  * All rights reserved.
44  *
45  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46  *
47  * Permission to use, copy, modify and distribute this software and
48  * its documentation is hereby granted, provided that both the copyright
49  * notice and this permission notice appear in all copies of the
50  * software, derivative works or modified versions, and any portions
51  * thereof, and that both notices appear in supporting documentation.
52  *
53  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
54  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
55  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56  *
57  * Carnegie Mellon requests users of this software to return to
58  *
59  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
60  *  School of Computer Science
61  *  Carnegie Mellon University
62  *  Pittsburgh PA 15213-3890
63  *
64  * any improvements or extensions that they make and grant Carnegie the
65  * rights to redistribute these changes.
66  *
67  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
68  * $DragonFly: src/sys/vm/vm_fault.c,v 1.47 2008/07/01 02:02:56 dillon Exp $
69  */
70 
71 /*
72  *	Page fault handling module.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/proc.h>
79 #include <sys/vnode.h>
80 #include <sys/resourcevar.h>
81 #include <sys/vmmeter.h>
82 #include <sys/vkernel.h>
83 #include <sys/lock.h>
84 #include <sys/sysctl.h>
85 
86 #include <cpu/lwbuf.h>
87 
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/vm_extern.h>
99 
100 #include <sys/thread2.h>
101 #include <vm/vm_page2.h>
102 
103 struct faultstate {
104 	vm_page_t m;
105 	vm_object_t object;
106 	vm_pindex_t pindex;
107 	vm_prot_t prot;
108 	vm_page_t first_m;
109 	vm_object_t first_object;
110 	vm_prot_t first_prot;
111 	vm_map_t map;
112 	vm_map_entry_t entry;
113 	int lookup_still_valid;
114 	int hardfault;
115 	int fault_flags;
116 	int map_generation;
117 	int shared;
118 	int first_shared;
119 	boolean_t wired;
120 	struct vnode *vp;
121 };
122 
123 static int debug_fault = 0;
124 SYSCTL_INT(_vm, OID_AUTO, debug_fault, CTLFLAG_RW, &debug_fault, 0, "");
125 static int debug_cluster = 0;
126 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
127 int vm_shared_fault = 1;
128 TUNABLE_INT("vm.shared_fault", &vm_shared_fault);
129 SYSCTL_INT(_vm, OID_AUTO, shared_fault, CTLFLAG_RW, &vm_shared_fault, 0,
130 	   "Allow shared token on vm_object");
131 static long vm_shared_hit = 0;
132 SYSCTL_LONG(_vm, OID_AUTO, shared_hit, CTLFLAG_RW, &vm_shared_hit, 0,
133 	   "Successful shared faults");
134 static long vm_shared_count = 0;
135 SYSCTL_LONG(_vm, OID_AUTO, shared_count, CTLFLAG_RW, &vm_shared_count, 0,
136 	   "Shared fault attempts");
137 static long vm_shared_miss = 0;
138 SYSCTL_LONG(_vm, OID_AUTO, shared_miss, CTLFLAG_RW, &vm_shared_miss, 0,
139 	   "Unsuccessful shared faults");
140 
141 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t, int);
142 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *,
143 			vpte_t, int, int);
144 #if 0
145 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
146 #endif
147 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry);
148 static void vm_prefault(pmap_t pmap, vm_offset_t addra,
149 			vm_map_entry_t entry, int prot, int fault_flags);
150 static void vm_prefault_quick(pmap_t pmap, vm_offset_t addra,
151 			vm_map_entry_t entry, int prot, int fault_flags);
152 
153 static __inline void
154 release_page(struct faultstate *fs)
155 {
156 	vm_page_deactivate(fs->m);
157 	vm_page_wakeup(fs->m);
158 	fs->m = NULL;
159 }
160 
161 /*
162  * NOTE: Once unlocked any cached fs->entry becomes invalid, any reuse
163  *	 requires relocking and then checking the timestamp.
164  *
165  * NOTE: vm_map_lock_read() does not bump fs->map->timestamp so we do
166  *	 not have to update fs->map_generation here.
167  *
168  * NOTE: This function can fail due to a deadlock against the caller's
169  *	 holding of a vm_page BUSY.
170  */
171 static __inline int
172 relock_map(struct faultstate *fs)
173 {
174 	int error;
175 
176 	if (fs->lookup_still_valid == FALSE && fs->map) {
177 		error = vm_map_lock_read_to(fs->map);
178 		if (error == 0)
179 			fs->lookup_still_valid = TRUE;
180 	} else {
181 		error = 0;
182 	}
183 	return error;
184 }
185 
186 static __inline void
187 unlock_map(struct faultstate *fs)
188 {
189 	if (fs->lookup_still_valid && fs->map) {
190 		vm_map_lookup_done(fs->map, fs->entry, 0);
191 		fs->lookup_still_valid = FALSE;
192 	}
193 }
194 
195 /*
196  * Clean up after a successful call to vm_fault_object() so another call
197  * to vm_fault_object() can be made.
198  */
199 static void
200 _cleanup_successful_fault(struct faultstate *fs, int relock)
201 {
202 	/*
203 	 * We allocated a junk page for a COW operation that did
204 	 * not occur, the page must be freed.
205 	 */
206 	if (fs->object != fs->first_object) {
207 		KKASSERT(fs->first_shared == 0);
208 		vm_page_free(fs->first_m);
209 		vm_object_pip_wakeup(fs->object);
210 		fs->first_m = NULL;
211 	}
212 
213 	/*
214 	 * Reset fs->object.
215 	 */
216 	fs->object = fs->first_object;
217 	if (relock && fs->lookup_still_valid == FALSE) {
218 		if (fs->map)
219 			vm_map_lock_read(fs->map);
220 		fs->lookup_still_valid = TRUE;
221 	}
222 }
223 
224 static void
225 _unlock_things(struct faultstate *fs, int dealloc)
226 {
227 	_cleanup_successful_fault(fs, 0);
228 	if (dealloc) {
229 		/*vm_object_deallocate(fs->first_object);*/
230 		/*fs->first_object = NULL; drop used later on */
231 	}
232 	unlock_map(fs);
233 	if (fs->vp != NULL) {
234 		vput(fs->vp);
235 		fs->vp = NULL;
236 	}
237 }
238 
239 #define unlock_things(fs) _unlock_things(fs, 0)
240 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
241 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
242 
243 /*
244  * TRYPAGER
245  *
246  * Determine if the pager for the current object *might* contain the page.
247  *
248  * We only need to try the pager if this is not a default object (default
249  * objects are zero-fill and have no real pager), and if we are not taking
250  * a wiring fault or if the FS entry is wired.
251  */
252 #define TRYPAGER(fs)	\
253 		(fs->object->type != OBJT_DEFAULT && \
254 		(((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
255 
256 /*
257  * vm_fault:
258  *
259  * Handle a page fault occuring at the given address, requiring the given
260  * permissions, in the map specified.  If successful, the page is inserted
261  * into the associated physical map.
262  *
263  * NOTE: The given address should be truncated to the proper page address.
264  *
265  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
266  * a standard error specifying why the fault is fatal is returned.
267  *
268  * The map in question must be referenced, and remains so.
269  * The caller may hold no locks.
270  * No other requirements.
271  */
272 int
273 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
274 {
275 	int result;
276 	vm_pindex_t first_pindex;
277 	struct faultstate fs;
278 	struct lwp *lp;
279 	int growstack;
280 	int retry = 0;
281 
282 	vm_page_pcpu_cache();
283 	fs.hardfault = 0;
284 	fs.fault_flags = fault_flags;
285 	fs.vp = NULL;
286 	fs.shared = vm_shared_fault;
287 	fs.first_shared = vm_shared_fault;
288 	growstack = 1;
289 	if (vm_shared_fault)
290 		++vm_shared_count;
291 
292 	/*
293 	 * vm_map interactions
294 	 */
295 	if ((lp = curthread->td_lwp) != NULL)
296 		lp->lwp_flags |= LWP_PAGING;
297 	lwkt_gettoken(&map->token);
298 
299 RetryFault:
300 	/*
301 	 * Find the vm_map_entry representing the backing store and resolve
302 	 * the top level object and page index.  This may have the side
303 	 * effect of executing a copy-on-write on the map entry and/or
304 	 * creating a shadow object, but will not COW any actual VM pages.
305 	 *
306 	 * On success fs.map is left read-locked and various other fields
307 	 * are initialized but not otherwise referenced or locked.
308 	 *
309 	 * NOTE!  vm_map_lookup will try to upgrade the fault_type to
310 	 * VM_FAULT_WRITE if the map entry is a virtual page table and also
311 	 * writable, so we can set the 'A'accessed bit in the virtual page
312 	 * table entry.
313 	 */
314 	fs.map = map;
315 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
316 			       &fs.entry, &fs.first_object,
317 			       &first_pindex, &fs.first_prot, &fs.wired);
318 
319 	/*
320 	 * If the lookup failed or the map protections are incompatible,
321 	 * the fault generally fails.  However, if the caller is trying
322 	 * to do a user wiring we have more work to do.
323 	 */
324 	if (result != KERN_SUCCESS) {
325 		if (result != KERN_PROTECTION_FAILURE ||
326 		    (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
327 		{
328 			if (result == KERN_INVALID_ADDRESS && growstack &&
329 			    map != &kernel_map && curproc != NULL) {
330 				result = vm_map_growstack(curproc, vaddr);
331 				if (result == KERN_SUCCESS) {
332 					growstack = 0;
333 					++retry;
334 					goto RetryFault;
335 				}
336 				result = KERN_FAILURE;
337 			}
338 			goto done;
339 		}
340 
341 		/*
342    		 * If we are user-wiring a r/w segment, and it is COW, then
343    		 * we need to do the COW operation.  Note that we don't
344 		 * currently COW RO sections now, because it is NOT desirable
345    		 * to COW .text.  We simply keep .text from ever being COW'ed
346    		 * and take the heat that one cannot debug wired .text sections.
347    		 */
348 		result = vm_map_lookup(&fs.map, vaddr,
349 				       VM_PROT_READ|VM_PROT_WRITE|
350 				        VM_PROT_OVERRIDE_WRITE,
351 				       &fs.entry, &fs.first_object,
352 				       &first_pindex, &fs.first_prot,
353 				       &fs.wired);
354 		if (result != KERN_SUCCESS) {
355 			result = KERN_FAILURE;
356 			goto done;
357 		}
358 
359 		/*
360 		 * If we don't COW now, on a user wire, the user will never
361 		 * be able to write to the mapping.  If we don't make this
362 		 * restriction, the bookkeeping would be nearly impossible.
363 		 *
364 		 * XXX We have a shared lock, this will have a MP race but
365 		 * I don't see how it can hurt anything.
366 		 */
367 		if ((fs.entry->protection & VM_PROT_WRITE) == 0)
368 			fs.entry->max_protection &= ~VM_PROT_WRITE;
369 	}
370 
371 	/*
372 	 * fs.map is read-locked
373 	 *
374 	 * Misc checks.  Save the map generation number to detect races.
375 	 */
376 	fs.map_generation = fs.map->timestamp;
377 	fs.lookup_still_valid = TRUE;
378 	fs.first_m = NULL;
379 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
380 
381 	if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) {
382 		if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
383 			panic("vm_fault: fault on nofault entry, addr: %p",
384 			      (void *)vaddr);
385 		}
386 		if ((fs.entry->eflags & MAP_ENTRY_KSTACK) &&
387 		    vaddr >= fs.entry->start &&
388 		    vaddr < fs.entry->start + PAGE_SIZE) {
389 			panic("vm_fault: fault on stack guard, addr: %p",
390 			      (void *)vaddr);
391 		}
392 	}
393 
394 	/*
395 	 * A system map entry may return a NULL object.  No object means
396 	 * no pager means an unrecoverable kernel fault.
397 	 */
398 	if (fs.first_object == NULL) {
399 		panic("vm_fault: unrecoverable fault at %p in entry %p",
400 			(void *)vaddr, fs.entry);
401 	}
402 
403 	/*
404 	 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT
405 	 * is set.
406 	 */
407 	if ((curthread->td_flags & TDF_NOFAULT) &&
408 	    (retry ||
409 	     fs.first_object->type == OBJT_VNODE ||
410 	     fs.first_object->backing_object)) {
411 		result = KERN_FAILURE;
412 		unlock_things(&fs);
413 		goto done2;
414 	}
415 
416 	/*
417 	 * If the entry is wired we cannot change the page protection.
418 	 */
419 	if (fs.wired)
420 		fault_type = fs.first_prot;
421 
422 	/*
423 	 * We generally want to avoid unnecessary exclusive modes on backing
424 	 * and terminal objects because this can seriously interfere with
425 	 * heavily fork()'d processes (particularly /bin/sh scripts).
426 	 *
427 	 * However, we also want to avoid unnecessary retries due to needed
428 	 * shared->exclusive promotion for common faults.  Exclusive mode is
429 	 * always needed if any page insertion, rename, or free occurs in an
430 	 * object (and also indirectly if any I/O is done).
431 	 *
432 	 * The main issue here is going to be fs.first_shared.  If the
433 	 * first_object has a backing object which isn't shadowed and the
434 	 * process is single-threaded we might as well use an exclusive
435 	 * lock/chain right off the bat.
436 	 */
437 	if (fs.first_shared && fs.first_object->backing_object &&
438 	    LIST_EMPTY(&fs.first_object->shadow_head) &&
439 	    curthread->td_proc && curthread->td_proc->p_nthreads == 1) {
440 		fs.first_shared = 0;
441 	}
442 
443 	/*
444 	 * swap_pager_unswapped() needs an exclusive object
445 	 */
446 	if (fault_flags & (VM_FAULT_UNSWAP | VM_FAULT_DIRTY)) {
447 		fs.first_shared = 0;
448 	}
449 
450 	/*
451 	 * Obtain a top-level object lock, shared or exclusive depending
452 	 * on fs.first_shared.  If a shared lock winds up being insufficient
453 	 * we will retry with an exclusive lock.
454 	 *
455 	 * The vnode pager lock is always shared.
456 	 */
457 	if (fs.first_shared)
458 		vm_object_hold_shared(fs.first_object);
459 	else
460 		vm_object_hold(fs.first_object);
461 	if (fs.vp == NULL)
462 		fs.vp = vnode_pager_lock(fs.first_object);
463 
464 	/*
465 	 * The page we want is at (first_object, first_pindex), but if the
466 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
467 	 * page table to figure out the actual pindex.
468 	 *
469 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
470 	 * ONLY
471 	 */
472 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
473 		result = vm_fault_vpagetable(&fs, &first_pindex,
474 					     fs.entry->aux.master_pde,
475 					     fault_type, 1);
476 		if (result == KERN_TRY_AGAIN) {
477 			vm_object_drop(fs.first_object);
478 			++retry;
479 			goto RetryFault;
480 		}
481 		if (result != KERN_SUCCESS)
482 			goto done;
483 	}
484 
485 	/*
486 	 * Now we have the actual (object, pindex), fault in the page.  If
487 	 * vm_fault_object() fails it will unlock and deallocate the FS
488 	 * data.   If it succeeds everything remains locked and fs->object
489 	 * will have an additional PIP count if it is not equal to
490 	 * fs->first_object
491 	 *
492 	 * vm_fault_object will set fs->prot for the pmap operation.  It is
493 	 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
494 	 * page can be safely written.  However, it will force a read-only
495 	 * mapping for a read fault if the memory is managed by a virtual
496 	 * page table.
497 	 *
498 	 * If the fault code uses the shared object lock shortcut
499 	 * we must not try to burst (we can't allocate VM pages).
500 	 */
501 	result = vm_fault_object(&fs, first_pindex, fault_type, 1);
502 
503 	if (debug_fault > 0) {
504 		--debug_fault;
505 		kprintf("VM_FAULT result %d addr=%jx type=%02x flags=%02x "
506 			"fs.m=%p fs.prot=%02x fs.wired=%02x fs.entry=%p\n",
507 			result, (intmax_t)vaddr, fault_type, fault_flags,
508 			fs.m, fs.prot, fs.wired, fs.entry);
509 	}
510 
511 	if (result == KERN_TRY_AGAIN) {
512 		vm_object_drop(fs.first_object);
513 		++retry;
514 		goto RetryFault;
515 	}
516 	if (result != KERN_SUCCESS)
517 		goto done;
518 
519 	/*
520 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
521 	 * will contain a busied page.
522 	 *
523 	 * Enter the page into the pmap and do pmap-related adjustments.
524 	 */
525 	KKASSERT(fs.lookup_still_valid == TRUE);
526 	vm_page_flag_set(fs.m, PG_REFERENCED);
527 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired, fs.entry);
528 	mycpu->gd_cnt.v_vm_faults++;
529 	if (curthread->td_lwp)
530 		++curthread->td_lwp->lwp_ru.ru_minflt;
531 
532 	/*KKASSERT(fs.m->queue == PQ_NONE); page-in op may deactivate page */
533 	KKASSERT(fs.m->flags & PG_BUSY);
534 
535 	/*
536 	 * If the page is not wired down, then put it where the pageout daemon
537 	 * can find it.
538 	 */
539 	if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
540 		if (fs.wired)
541 			vm_page_wire(fs.m);
542 		else
543 			vm_page_unwire(fs.m, 1);
544 	} else {
545 		vm_page_activate(fs.m);
546 	}
547 	vm_page_wakeup(fs.m);
548 
549 	/*
550 	 * Burst in a few more pages if possible.  The fs.map should still
551 	 * be locked.  To avoid interlocking against a vnode->getblk
552 	 * operation we had to be sure to unbusy our primary vm_page above
553 	 * first.
554 	 *
555 	 * A normal burst can continue down backing store, only execute
556 	 * if we are holding an exclusive lock, otherwise the exclusive
557 	 * locks the burst code gets might cause excessive SMP collisions.
558 	 *
559 	 * A quick burst can be utilized when there is no backing object
560 	 * (i.e. a shared file mmap).
561 	 */
562 	if ((fault_flags & VM_FAULT_BURST) &&
563 	    (fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 &&
564 	    fs.wired == 0) {
565 		if (fs.first_shared == 0 && fs.shared == 0) {
566 			vm_prefault(fs.map->pmap, vaddr,
567 				    fs.entry, fs.prot, fault_flags);
568 		} else {
569 			vm_prefault_quick(fs.map->pmap, vaddr,
570 					  fs.entry, fs.prot, fault_flags);
571 		}
572 	}
573 
574 	/*
575 	 * Unlock everything, and return
576 	 */
577 	unlock_things(&fs);
578 
579 	if (curthread->td_lwp) {
580 		if (fs.hardfault) {
581 			curthread->td_lwp->lwp_ru.ru_majflt++;
582 		} else {
583 			curthread->td_lwp->lwp_ru.ru_minflt++;
584 		}
585 	}
586 
587 	/*vm_object_deallocate(fs.first_object);*/
588 	/*fs.m = NULL; */
589 	/*fs.first_object = NULL; must still drop later */
590 
591 	result = KERN_SUCCESS;
592 done:
593 	if (fs.first_object)
594 		vm_object_drop(fs.first_object);
595 done2:
596 	lwkt_reltoken(&map->token);
597 	if (lp)
598 		lp->lwp_flags &= ~LWP_PAGING;
599 	if (vm_shared_fault && fs.shared == 0)
600 		++vm_shared_miss;
601 	return (result);
602 }
603 
604 /*
605  * Fault in the specified virtual address in the current process map,
606  * returning a held VM page or NULL.  See vm_fault_page() for more
607  * information.
608  *
609  * No requirements.
610  */
611 vm_page_t
612 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
613 {
614 	struct lwp *lp = curthread->td_lwp;
615 	vm_page_t m;
616 
617 	m = vm_fault_page(&lp->lwp_vmspace->vm_map, va,
618 			  fault_type, VM_FAULT_NORMAL, errorp);
619 	return(m);
620 }
621 
622 /*
623  * Fault in the specified virtual address in the specified map, doing all
624  * necessary manipulation of the object store and all necessary I/O.  Return
625  * a held VM page or NULL, and set *errorp.  The related pmap is not
626  * updated.
627  *
628  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
629  * and marked PG_REFERENCED as well.
630  *
631  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
632  * error will be returned.
633  *
634  * No requirements.
635  */
636 vm_page_t
637 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
638 	      int fault_flags, int *errorp)
639 {
640 	vm_pindex_t first_pindex;
641 	struct faultstate fs;
642 	int result;
643 	int retry = 0;
644 	vm_prot_t orig_fault_type = fault_type;
645 
646 	fs.hardfault = 0;
647 	fs.fault_flags = fault_flags;
648 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
649 
650 	/*
651 	 * Dive the pmap (concurrency possible).  If we find the
652 	 * appropriate page we can terminate early and quickly.
653 	 */
654 	fs.m = pmap_fault_page_quick(map->pmap, vaddr, fault_type);
655 	if (fs.m) {
656 		*errorp = 0;
657 		return(fs.m);
658 	}
659 
660 	/*
661 	 * Otherwise take a concurrency hit and do a formal page
662 	 * fault.
663 	 */
664 	fs.shared = vm_shared_fault;
665 	fs.first_shared = vm_shared_fault;
666 	fs.vp = NULL;
667 	lwkt_gettoken(&map->token);
668 
669 	/*
670 	 * swap_pager_unswapped() needs an exclusive object
671 	 */
672 	if (fault_flags & (VM_FAULT_UNSWAP | VM_FAULT_DIRTY)) {
673 		fs.first_shared = 0;
674 	}
675 
676 RetryFault:
677 	/*
678 	 * Find the vm_map_entry representing the backing store and resolve
679 	 * the top level object and page index.  This may have the side
680 	 * effect of executing a copy-on-write on the map entry and/or
681 	 * creating a shadow object, but will not COW any actual VM pages.
682 	 *
683 	 * On success fs.map is left read-locked and various other fields
684 	 * are initialized but not otherwise referenced or locked.
685 	 *
686 	 * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
687 	 * if the map entry is a virtual page table and also writable,
688 	 * so we can set the 'A'accessed bit in the virtual page table entry.
689 	 */
690 	fs.map = map;
691 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
692 			       &fs.entry, &fs.first_object,
693 			       &first_pindex, &fs.first_prot, &fs.wired);
694 
695 	if (result != KERN_SUCCESS) {
696 		*errorp = result;
697 		fs.m = NULL;
698 		goto done;
699 	}
700 
701 	/*
702 	 * fs.map is read-locked
703 	 *
704 	 * Misc checks.  Save the map generation number to detect races.
705 	 */
706 	fs.map_generation = fs.map->timestamp;
707 	fs.lookup_still_valid = TRUE;
708 	fs.first_m = NULL;
709 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
710 
711 	if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
712 		panic("vm_fault: fault on nofault entry, addr: %lx",
713 		    (u_long)vaddr);
714 	}
715 
716 	/*
717 	 * A system map entry may return a NULL object.  No object means
718 	 * no pager means an unrecoverable kernel fault.
719 	 */
720 	if (fs.first_object == NULL) {
721 		panic("vm_fault: unrecoverable fault at %p in entry %p",
722 			(void *)vaddr, fs.entry);
723 	}
724 
725 	/*
726 	 * Fail here if not a trivial anonymous page fault and TDF_NOFAULT
727 	 * is set.
728 	 */
729 	if ((curthread->td_flags & TDF_NOFAULT) &&
730 	    (retry ||
731 	     fs.first_object->type == OBJT_VNODE ||
732 	     fs.first_object->backing_object)) {
733 		*errorp = KERN_FAILURE;
734 		unlock_things(&fs);
735 		goto done2;
736 	}
737 
738 	/*
739 	 * If the entry is wired we cannot change the page protection.
740 	 */
741 	if (fs.wired)
742 		fault_type = fs.first_prot;
743 
744 	/*
745 	 * Make a reference to this object to prevent its disposal while we
746 	 * are messing with it.  Once we have the reference, the map is free
747 	 * to be diddled.  Since objects reference their shadows (and copies),
748 	 * they will stay around as well.
749 	 *
750 	 * The reference should also prevent an unexpected collapse of the
751 	 * parent that might move pages from the current object into the
752 	 * parent unexpectedly, resulting in corruption.
753 	 *
754 	 * Bump the paging-in-progress count to prevent size changes (e.g.
755 	 * truncation operations) during I/O.  This must be done after
756 	 * obtaining the vnode lock in order to avoid possible deadlocks.
757 	 */
758 	if (fs.first_shared)
759 		vm_object_hold_shared(fs.first_object);
760 	else
761 		vm_object_hold(fs.first_object);
762 	if (fs.vp == NULL)
763 		fs.vp = vnode_pager_lock(fs.first_object);	/* shared */
764 
765 	/*
766 	 * The page we want is at (first_object, first_pindex), but if the
767 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
768 	 * page table to figure out the actual pindex.
769 	 *
770 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
771 	 * ONLY
772 	 */
773 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
774 		result = vm_fault_vpagetable(&fs, &first_pindex,
775 					     fs.entry->aux.master_pde,
776 					     fault_type, 1);
777 		if (result == KERN_TRY_AGAIN) {
778 			vm_object_drop(fs.first_object);
779 			++retry;
780 			goto RetryFault;
781 		}
782 		if (result != KERN_SUCCESS) {
783 			*errorp = result;
784 			fs.m = NULL;
785 			goto done;
786 		}
787 	}
788 
789 	/*
790 	 * Now we have the actual (object, pindex), fault in the page.  If
791 	 * vm_fault_object() fails it will unlock and deallocate the FS
792 	 * data.   If it succeeds everything remains locked and fs->object
793 	 * will have an additinal PIP count if it is not equal to
794 	 * fs->first_object
795 	 */
796 	fs.m = NULL;
797 	result = vm_fault_object(&fs, first_pindex, fault_type, 1);
798 
799 	if (result == KERN_TRY_AGAIN) {
800 		vm_object_drop(fs.first_object);
801 		++retry;
802 		goto RetryFault;
803 	}
804 	if (result != KERN_SUCCESS) {
805 		*errorp = result;
806 		fs.m = NULL;
807 		goto done;
808 	}
809 
810 	if ((orig_fault_type & VM_PROT_WRITE) &&
811 	    (fs.prot & VM_PROT_WRITE) == 0) {
812 		*errorp = KERN_PROTECTION_FAILURE;
813 		unlock_and_deallocate(&fs);
814 		fs.m = NULL;
815 		goto done;
816 	}
817 
818 	/*
819 	 * DO NOT UPDATE THE PMAP!!!  This function may be called for
820 	 * a pmap unrelated to the current process pmap, in which case
821 	 * the current cpu core will not be listed in the pmap's pm_active
822 	 * mask.  Thus invalidation interlocks will fail to work properly.
823 	 *
824 	 * (for example, 'ps' uses procfs to read program arguments from
825 	 * each process's stack).
826 	 *
827 	 * In addition to the above this function will be called to acquire
828 	 * a page that might already be faulted in, re-faulting it
829 	 * continuously is a waste of time.
830 	 *
831 	 * XXX could this have been the cause of our random seg-fault
832 	 *     issues?  procfs accesses user stacks.
833 	 */
834 	vm_page_flag_set(fs.m, PG_REFERENCED);
835 #if 0
836 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired, NULL);
837 	mycpu->gd_cnt.v_vm_faults++;
838 	if (curthread->td_lwp)
839 		++curthread->td_lwp->lwp_ru.ru_minflt;
840 #endif
841 
842 	/*
843 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
844 	 * will contain a busied page.  So we must unlock here after having
845 	 * messed with the pmap.
846 	 */
847 	unlock_things(&fs);
848 
849 	/*
850 	 * Return a held page.  We are not doing any pmap manipulation so do
851 	 * not set PG_MAPPED.  However, adjust the page flags according to
852 	 * the fault type because the caller may not use a managed pmapping
853 	 * (so we don't want to lose the fact that the page will be dirtied
854 	 * if a write fault was specified).
855 	 */
856 	vm_page_hold(fs.m);
857 	vm_page_activate(fs.m);
858 	if (fault_type & VM_PROT_WRITE)
859 		vm_page_dirty(fs.m);
860 
861 	if (curthread->td_lwp) {
862 		if (fs.hardfault) {
863 			curthread->td_lwp->lwp_ru.ru_majflt++;
864 		} else {
865 			curthread->td_lwp->lwp_ru.ru_minflt++;
866 		}
867 	}
868 
869 	/*
870 	 * Unlock everything, and return the held page.
871 	 */
872 	vm_page_wakeup(fs.m);
873 	/*vm_object_deallocate(fs.first_object);*/
874 	/*fs.first_object = NULL; */
875 	*errorp = 0;
876 
877 done:
878 	if (fs.first_object)
879 		vm_object_drop(fs.first_object);
880 done2:
881 	lwkt_reltoken(&map->token);
882 	return(fs.m);
883 }
884 
885 /*
886  * Fault in the specified (object,offset), dirty the returned page as
887  * needed.  If the requested fault_type cannot be done NULL and an
888  * error is returned.
889  *
890  * A held (but not busied) page is returned.
891  *
892  * The passed in object must be held as specified by the shared
893  * argument.
894  */
895 vm_page_t
896 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
897 		     vm_prot_t fault_type, int fault_flags,
898 		     int *sharedp, int *errorp)
899 {
900 	int result;
901 	vm_pindex_t first_pindex;
902 	struct faultstate fs;
903 	struct vm_map_entry entry;
904 
905 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
906 	bzero(&entry, sizeof(entry));
907 	entry.object.vm_object = object;
908 	entry.maptype = VM_MAPTYPE_NORMAL;
909 	entry.protection = entry.max_protection = fault_type;
910 
911 	fs.hardfault = 0;
912 	fs.fault_flags = fault_flags;
913 	fs.map = NULL;
914 	fs.shared = vm_shared_fault;
915 	fs.first_shared = *sharedp;
916 	fs.vp = NULL;
917 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
918 
919 	/*
920 	 * Might require swap block adjustments
921 	 */
922 	if (fs.first_shared && (fault_flags & (VM_FAULT_UNSWAP | VM_FAULT_DIRTY))) {
923 		fs.first_shared = 0;
924 		vm_object_upgrade(object);
925 	}
926 
927 	/*
928 	 * Retry loop as needed (typically for shared->exclusive transitions)
929 	 */
930 RetryFault:
931 	*sharedp = fs.first_shared;
932 	first_pindex = OFF_TO_IDX(offset);
933 	fs.first_object = object;
934 	fs.entry = &entry;
935 	fs.first_prot = fault_type;
936 	fs.wired = 0;
937 	/*fs.map_generation = 0; unused */
938 
939 	/*
940 	 * Make a reference to this object to prevent its disposal while we
941 	 * are messing with it.  Once we have the reference, the map is free
942 	 * to be diddled.  Since objects reference their shadows (and copies),
943 	 * they will stay around as well.
944 	 *
945 	 * The reference should also prevent an unexpected collapse of the
946 	 * parent that might move pages from the current object into the
947 	 * parent unexpectedly, resulting in corruption.
948 	 *
949 	 * Bump the paging-in-progress count to prevent size changes (e.g.
950 	 * truncation operations) during I/O.  This must be done after
951 	 * obtaining the vnode lock in order to avoid possible deadlocks.
952 	 */
953 	if (fs.vp == NULL)
954 		fs.vp = vnode_pager_lock(fs.first_object);
955 
956 	fs.lookup_still_valid = TRUE;
957 	fs.first_m = NULL;
958 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
959 
960 #if 0
961 	/* XXX future - ability to operate on VM object using vpagetable */
962 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
963 		result = vm_fault_vpagetable(&fs, &first_pindex,
964 					     fs.entry->aux.master_pde,
965 					     fault_type, 0);
966 		if (result == KERN_TRY_AGAIN) {
967 			if (fs.first_shared == 0 && *sharedp)
968 				vm_object_upgrade(object);
969 			goto RetryFault;
970 		}
971 		if (result != KERN_SUCCESS) {
972 			*errorp = result;
973 			return (NULL);
974 		}
975 	}
976 #endif
977 
978 	/*
979 	 * Now we have the actual (object, pindex), fault in the page.  If
980 	 * vm_fault_object() fails it will unlock and deallocate the FS
981 	 * data.   If it succeeds everything remains locked and fs->object
982 	 * will have an additinal PIP count if it is not equal to
983 	 * fs->first_object
984 	 *
985 	 * On KERN_TRY_AGAIN vm_fault_object() leaves fs.first_object intact.
986 	 * We may have to upgrade its lock to handle the requested fault.
987 	 */
988 	result = vm_fault_object(&fs, first_pindex, fault_type, 0);
989 
990 	if (result == KERN_TRY_AGAIN) {
991 		if (fs.first_shared == 0 && *sharedp)
992 			vm_object_upgrade(object);
993 		goto RetryFault;
994 	}
995 	if (result != KERN_SUCCESS) {
996 		*errorp = result;
997 		return(NULL);
998 	}
999 
1000 	if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
1001 		*errorp = KERN_PROTECTION_FAILURE;
1002 		unlock_and_deallocate(&fs);
1003 		return(NULL);
1004 	}
1005 
1006 	/*
1007 	 * On success vm_fault_object() does not unlock or deallocate, so we
1008 	 * do it here.  Note that the returned fs.m will be busied.
1009 	 */
1010 	unlock_things(&fs);
1011 
1012 	/*
1013 	 * Return a held page.  We are not doing any pmap manipulation so do
1014 	 * not set PG_MAPPED.  However, adjust the page flags according to
1015 	 * the fault type because the caller may not use a managed pmapping
1016 	 * (so we don't want to lose the fact that the page will be dirtied
1017 	 * if a write fault was specified).
1018 	 */
1019 	vm_page_hold(fs.m);
1020 	vm_page_activate(fs.m);
1021 	if ((fault_type & VM_PROT_WRITE) || (fault_flags & VM_FAULT_DIRTY))
1022 		vm_page_dirty(fs.m);
1023 	if (fault_flags & VM_FAULT_UNSWAP)
1024 		swap_pager_unswapped(fs.m);
1025 
1026 	/*
1027 	 * Indicate that the page was accessed.
1028 	 */
1029 	vm_page_flag_set(fs.m, PG_REFERENCED);
1030 
1031 	if (curthread->td_lwp) {
1032 		if (fs.hardfault) {
1033 			curthread->td_lwp->lwp_ru.ru_majflt++;
1034 		} else {
1035 			curthread->td_lwp->lwp_ru.ru_minflt++;
1036 		}
1037 	}
1038 
1039 	/*
1040 	 * Unlock everything, and return the held page.
1041 	 */
1042 	vm_page_wakeup(fs.m);
1043 	/*vm_object_deallocate(fs.first_object);*/
1044 	/*fs.first_object = NULL; */
1045 
1046 	*errorp = 0;
1047 	return(fs.m);
1048 }
1049 
1050 /*
1051  * Translate the virtual page number (first_pindex) that is relative
1052  * to the address space into a logical page number that is relative to the
1053  * backing object.  Use the virtual page table pointed to by (vpte).
1054  *
1055  * This implements an N-level page table.  Any level can terminate the
1056  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
1057  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
1058  */
1059 static
1060 int
1061 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
1062 		    vpte_t vpte, int fault_type, int allow_nofault)
1063 {
1064 	struct lwbuf *lwb;
1065 	struct lwbuf lwb_cache;
1066 	int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */
1067 	int result = KERN_SUCCESS;
1068 	vpte_t *ptep;
1069 
1070 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object));
1071 	for (;;) {
1072 		/*
1073 		 * We cannot proceed if the vpte is not valid, not readable
1074 		 * for a read fault, or not writable for a write fault.
1075 		 */
1076 		if ((vpte & VPTE_V) == 0) {
1077 			unlock_and_deallocate(fs);
1078 			return (KERN_FAILURE);
1079 		}
1080 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_RW) == 0) {
1081 			unlock_and_deallocate(fs);
1082 			return (KERN_FAILURE);
1083 		}
1084 		if ((vpte & VPTE_PS) || vshift == 0)
1085 			break;
1086 		KKASSERT(vshift >= VPTE_PAGE_BITS);
1087 
1088 		/*
1089 		 * Get the page table page.  Nominally we only read the page
1090 		 * table, but since we are actively setting VPTE_M and VPTE_A,
1091 		 * tell vm_fault_object() that we are writing it.
1092 		 *
1093 		 * There is currently no real need to optimize this.
1094 		 */
1095 		result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT,
1096 					 VM_PROT_READ|VM_PROT_WRITE,
1097 					 allow_nofault);
1098 		if (result != KERN_SUCCESS)
1099 			return (result);
1100 
1101 		/*
1102 		 * Process the returned fs.m and look up the page table
1103 		 * entry in the page table page.
1104 		 */
1105 		vshift -= VPTE_PAGE_BITS;
1106 		lwb = lwbuf_alloc(fs->m, &lwb_cache);
1107 		ptep = ((vpte_t *)lwbuf_kva(lwb) +
1108 		        ((*pindex >> vshift) & VPTE_PAGE_MASK));
1109 		vpte = *ptep;
1110 
1111 		/*
1112 		 * Page table write-back.  If the vpte is valid for the
1113 		 * requested operation, do a write-back to the page table.
1114 		 *
1115 		 * XXX VPTE_M is not set properly for page directory pages.
1116 		 * It doesn't get set in the page directory if the page table
1117 		 * is modified during a read access.
1118 		 */
1119 		vm_page_activate(fs->m);
1120 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
1121 		    (vpte & VPTE_RW)) {
1122 			if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
1123 				atomic_set_long(ptep, VPTE_M | VPTE_A);
1124 				vm_page_dirty(fs->m);
1125 			}
1126 		}
1127 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V)) {
1128 			if ((vpte & VPTE_A) == 0) {
1129 				atomic_set_long(ptep, VPTE_A);
1130 				vm_page_dirty(fs->m);
1131 			}
1132 		}
1133 		lwbuf_free(lwb);
1134 		vm_page_flag_set(fs->m, PG_REFERENCED);
1135 		vm_page_wakeup(fs->m);
1136 		fs->m = NULL;
1137 		cleanup_successful_fault(fs);
1138 	}
1139 	/*
1140 	 * Combine remaining address bits with the vpte.
1141 	 */
1142 	/* JG how many bits from each? */
1143 	*pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) +
1144 		  (*pindex & ((1L << vshift) - 1));
1145 	return (KERN_SUCCESS);
1146 }
1147 
1148 
1149 /*
1150  * This is the core of the vm_fault code.
1151  *
1152  * Do all operations required to fault-in (fs.first_object, pindex).  Run
1153  * through the shadow chain as necessary and do required COW or virtual
1154  * copy operations.  The caller has already fully resolved the vm_map_entry
1155  * and, if appropriate, has created a copy-on-write layer.  All we need to
1156  * do is iterate the object chain.
1157  *
1158  * On failure (fs) is unlocked and deallocated and the caller may return or
1159  * retry depending on the failure code.  On success (fs) is NOT unlocked or
1160  * deallocated, fs.m will contained a resolved, busied page, and fs.object
1161  * will have an additional PIP count if it is not equal to fs.first_object.
1162  *
1163  * If locks based on fs->first_shared or fs->shared are insufficient,
1164  * clear the appropriate field(s) and return RETRY.  COWs require that
1165  * first_shared be 0, while page allocations (or frees) require that
1166  * shared be 0.  Renames require that both be 0.
1167  *
1168  * fs->first_object must be held on call.
1169  */
1170 static
1171 int
1172 vm_fault_object(struct faultstate *fs, vm_pindex_t first_pindex,
1173 		vm_prot_t fault_type, int allow_nofault)
1174 {
1175 	vm_object_t next_object;
1176 	vm_pindex_t pindex;
1177 	int error;
1178 
1179 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(fs->first_object));
1180 	fs->prot = fs->first_prot;
1181 	fs->object = fs->first_object;
1182 	pindex = first_pindex;
1183 
1184 	vm_object_chain_acquire(fs->first_object, fs->shared);
1185 	vm_object_pip_add(fs->first_object, 1);
1186 
1187 	/*
1188 	 * If a read fault occurs we try to make the page writable if
1189 	 * possible.  There are three cases where we cannot make the
1190 	 * page mapping writable:
1191 	 *
1192 	 * (1) The mapping is read-only or the VM object is read-only,
1193 	 *     fs->prot above will simply not have VM_PROT_WRITE set.
1194 	 *
1195 	 * (2) If the mapping is a virtual page table we need to be able
1196 	 *     to detect writes so we can set VPTE_M in the virtual page
1197 	 *     table.
1198 	 *
1199 	 * (3) If the VM page is read-only or copy-on-write, upgrading would
1200 	 *     just result in an unnecessary COW fault.
1201 	 *
1202 	 * VM_PROT_VPAGED is set if faulting via a virtual page table and
1203 	 * causes adjustments to the 'M'odify bit to also turn off write
1204 	 * access to force a re-fault.
1205 	 */
1206 	if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
1207 		if ((fault_type & VM_PROT_WRITE) == 0)
1208 			fs->prot &= ~VM_PROT_WRITE;
1209 	}
1210 
1211 	if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
1212 	    pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
1213 		if ((fault_type & VM_PROT_WRITE) == 0)
1214 			fs->prot &= ~VM_PROT_WRITE;
1215 	}
1216 
1217 	/* vm_object_hold(fs->object); implied b/c object == first_object */
1218 
1219 	for (;;) {
1220 		/*
1221 		 * The entire backing chain from first_object to object
1222 		 * inclusive is chainlocked.
1223 		 *
1224 		 * If the object is dead, we stop here
1225 		 */
1226 		if (fs->object->flags & OBJ_DEAD) {
1227 			vm_object_pip_wakeup(fs->first_object);
1228 			vm_object_chain_release_all(fs->first_object,
1229 						    fs->object);
1230 			if (fs->object != fs->first_object)
1231 				vm_object_drop(fs->object);
1232 			unlock_and_deallocate(fs);
1233 			return (KERN_PROTECTION_FAILURE);
1234 		}
1235 
1236 		/*
1237 		 * See if the page is resident.  Wait/Retry if the page is
1238 		 * busy (lots of stuff may have changed so we can't continue
1239 		 * in that case).
1240 		 *
1241 		 * We can theoretically allow the soft-busy case on a read
1242 		 * fault if the page is marked valid, but since such
1243 		 * pages are typically already pmap'd, putting that
1244 		 * special case in might be more effort then it is
1245 		 * worth.  We cannot under any circumstances mess
1246 		 * around with a vm_page_t->busy page except, perhaps,
1247 		 * to pmap it.
1248 		 */
1249 		fs->m = vm_page_lookup_busy_try(fs->object, pindex,
1250 						TRUE, &error);
1251 		if (error) {
1252 			vm_object_pip_wakeup(fs->first_object);
1253 			vm_object_chain_release_all(fs->first_object,
1254 						    fs->object);
1255 			if (fs->object != fs->first_object)
1256 				vm_object_drop(fs->object);
1257 			unlock_things(fs);
1258 			vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
1259 			mycpu->gd_cnt.v_intrans++;
1260 			/*vm_object_deallocate(fs->first_object);*/
1261 			/*fs->first_object = NULL;*/
1262 			fs->m = NULL;
1263 			return (KERN_TRY_AGAIN);
1264 		}
1265 		if (fs->m) {
1266 			/*
1267 			 * The page is busied for us.
1268 			 *
1269 			 * If reactivating a page from PQ_CACHE we may have
1270 			 * to rate-limit.
1271 			 */
1272 			int queue = fs->m->queue;
1273 			vm_page_unqueue_nowakeup(fs->m);
1274 
1275 			if ((queue - fs->m->pc) == PQ_CACHE &&
1276 			    vm_page_count_severe()) {
1277 				vm_page_activate(fs->m);
1278 				vm_page_wakeup(fs->m);
1279 				fs->m = NULL;
1280 				vm_object_pip_wakeup(fs->first_object);
1281 				vm_object_chain_release_all(fs->first_object,
1282 							    fs->object);
1283 				if (fs->object != fs->first_object)
1284 					vm_object_drop(fs->object);
1285 				unlock_and_deallocate(fs);
1286 				if (allow_nofault == 0 ||
1287 				    (curthread->td_flags & TDF_NOFAULT) == 0) {
1288 					vm_wait_pfault();
1289 				}
1290 				return (KERN_TRY_AGAIN);
1291 			}
1292 
1293 			/*
1294 			 * If it still isn't completely valid (readable),
1295 			 * or if a read-ahead-mark is set on the VM page,
1296 			 * jump to readrest, else we found the page and
1297 			 * can return.
1298 			 *
1299 			 * We can release the spl once we have marked the
1300 			 * page busy.
1301 			 */
1302 			if (fs->m->object != &kernel_object) {
1303 				if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
1304 				    VM_PAGE_BITS_ALL) {
1305 					goto readrest;
1306 				}
1307 				if (fs->m->flags & PG_RAM) {
1308 					if (debug_cluster)
1309 						kprintf("R");
1310 					vm_page_flag_clear(fs->m, PG_RAM);
1311 					goto readrest;
1312 				}
1313 			}
1314 			break; /* break to PAGE HAS BEEN FOUND */
1315 		}
1316 
1317 		/*
1318 		 * Page is not resident, If this is the search termination
1319 		 * or the pager might contain the page, allocate a new page.
1320 		 */
1321 		if (TRYPAGER(fs) || fs->object == fs->first_object) {
1322 			/*
1323 			 * Allocating, must be exclusive.
1324 			 */
1325 			if (fs->object == fs->first_object &&
1326 			    fs->first_shared) {
1327 				fs->first_shared = 0;
1328 				vm_object_pip_wakeup(fs->first_object);
1329 				vm_object_chain_release_all(fs->first_object,
1330 							    fs->object);
1331 				if (fs->object != fs->first_object)
1332 					vm_object_drop(fs->object);
1333 				unlock_and_deallocate(fs);
1334 				return (KERN_TRY_AGAIN);
1335 			}
1336 			if (fs->object != fs->first_object &&
1337 			    fs->shared) {
1338 				fs->first_shared = 0;
1339 				fs->shared = 0;
1340 				vm_object_pip_wakeup(fs->first_object);
1341 				vm_object_chain_release_all(fs->first_object,
1342 							    fs->object);
1343 				if (fs->object != fs->first_object)
1344 					vm_object_drop(fs->object);
1345 				unlock_and_deallocate(fs);
1346 				return (KERN_TRY_AGAIN);
1347 			}
1348 
1349 			/*
1350 			 * If the page is beyond the object size we fail
1351 			 */
1352 			if (pindex >= fs->object->size) {
1353 				vm_object_pip_wakeup(fs->first_object);
1354 				vm_object_chain_release_all(fs->first_object,
1355 							    fs->object);
1356 				if (fs->object != fs->first_object)
1357 					vm_object_drop(fs->object);
1358 				unlock_and_deallocate(fs);
1359 				return (KERN_PROTECTION_FAILURE);
1360 			}
1361 
1362 			/*
1363 			 * Allocate a new page for this object/offset pair.
1364 			 *
1365 			 * It is possible for the allocation to race, so
1366 			 * handle the case.
1367 			 */
1368 			fs->m = NULL;
1369 			if (!vm_page_count_severe()) {
1370 				fs->m = vm_page_alloc(fs->object, pindex,
1371 				    ((fs->vp || fs->object->backing_object) ?
1372 					VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL :
1373 					VM_ALLOC_NULL_OK | VM_ALLOC_NORMAL |
1374 					VM_ALLOC_USE_GD | VM_ALLOC_ZERO));
1375 			}
1376 			if (fs->m == NULL) {
1377 				vm_object_pip_wakeup(fs->first_object);
1378 				vm_object_chain_release_all(fs->first_object,
1379 							    fs->object);
1380 				if (fs->object != fs->first_object)
1381 					vm_object_drop(fs->object);
1382 				unlock_and_deallocate(fs);
1383 				if (allow_nofault == 0 ||
1384 				    (curthread->td_flags & TDF_NOFAULT) == 0) {
1385 					vm_wait_pfault();
1386 				}
1387 				return (KERN_TRY_AGAIN);
1388 			}
1389 
1390 			/*
1391 			 * Fall through to readrest.  We have a new page which
1392 			 * will have to be paged (since m->valid will be 0).
1393 			 */
1394 		}
1395 
1396 readrest:
1397 		/*
1398 		 * We have found an invalid or partially valid page, a
1399 		 * page with a read-ahead mark which might be partially or
1400 		 * fully valid (and maybe dirty too), or we have allocated
1401 		 * a new page.
1402 		 *
1403 		 * Attempt to fault-in the page if there is a chance that the
1404 		 * pager has it, and potentially fault in additional pages
1405 		 * at the same time.
1406 		 *
1407 		 * If TRYPAGER is true then fs.m will be non-NULL and busied
1408 		 * for us.
1409 		 */
1410 		if (TRYPAGER(fs)) {
1411 			int rv;
1412 			int seqaccess;
1413 			u_char behavior = vm_map_entry_behavior(fs->entry);
1414 
1415 			if (behavior == MAP_ENTRY_BEHAV_RANDOM)
1416 				seqaccess = 0;
1417 			else
1418 				seqaccess = -1;
1419 
1420 			/*
1421 			 * Doing I/O may synchronously insert additional
1422 			 * pages so we can't be shared at this point either.
1423 			 *
1424 			 * NOTE: We can't free fs->m here in the allocated
1425 			 *	 case (fs->object != fs->first_object) as
1426 			 *	 this would require an exclusively locked
1427 			 *	 VM object.
1428 			 */
1429 			if (fs->object == fs->first_object &&
1430 			    fs->first_shared) {
1431 				vm_page_deactivate(fs->m);
1432 				vm_page_wakeup(fs->m);
1433 				fs->m = NULL;
1434 				fs->first_shared = 0;
1435 				vm_object_pip_wakeup(fs->first_object);
1436 				vm_object_chain_release_all(fs->first_object,
1437 							    fs->object);
1438 				if (fs->object != fs->first_object)
1439 					vm_object_drop(fs->object);
1440 				unlock_and_deallocate(fs);
1441 				return (KERN_TRY_AGAIN);
1442 			}
1443 			if (fs->object != fs->first_object &&
1444 			    fs->shared) {
1445 				vm_page_deactivate(fs->m);
1446 				vm_page_wakeup(fs->m);
1447 				fs->m = NULL;
1448 				fs->first_shared = 0;
1449 				fs->shared = 0;
1450 				vm_object_pip_wakeup(fs->first_object);
1451 				vm_object_chain_release_all(fs->first_object,
1452 							    fs->object);
1453 				if (fs->object != fs->first_object)
1454 					vm_object_drop(fs->object);
1455 				unlock_and_deallocate(fs);
1456 				return (KERN_TRY_AGAIN);
1457 			}
1458 
1459 			/*
1460 			 * Avoid deadlocking against the map when doing I/O.
1461 			 * fs.object and the page is PG_BUSY'd.
1462 			 *
1463 			 * NOTE: Once unlocked, fs->entry can become stale
1464 			 *	 so this will NULL it out.
1465 			 *
1466 			 * NOTE: fs->entry is invalid until we relock the
1467 			 *	 map and verify that the timestamp has not
1468 			 *	 changed.
1469 			 */
1470 			unlock_map(fs);
1471 
1472 			/*
1473 			 * Acquire the page data.  We still hold a ref on
1474 			 * fs.object and the page has been PG_BUSY's.
1475 			 *
1476 			 * The pager may replace the page (for example, in
1477 			 * order to enter a fictitious page into the
1478 			 * object).  If it does so it is responsible for
1479 			 * cleaning up the passed page and properly setting
1480 			 * the new page PG_BUSY.
1481 			 *
1482 			 * If we got here through a PG_RAM read-ahead
1483 			 * mark the page may be partially dirty and thus
1484 			 * not freeable.  Don't bother checking to see
1485 			 * if the pager has the page because we can't free
1486 			 * it anyway.  We have to depend on the get_page
1487 			 * operation filling in any gaps whether there is
1488 			 * backing store or not.
1489 			 */
1490 			rv = vm_pager_get_page(fs->object, &fs->m, seqaccess);
1491 
1492 			if (rv == VM_PAGER_OK) {
1493 				/*
1494 				 * Relookup in case pager changed page. Pager
1495 				 * is responsible for disposition of old page
1496 				 * if moved.
1497 				 *
1498 				 * XXX other code segments do relookups too.
1499 				 * It's a bad abstraction that needs to be
1500 				 * fixed/removed.
1501 				 */
1502 				fs->m = vm_page_lookup(fs->object, pindex);
1503 				if (fs->m == NULL) {
1504 					vm_object_pip_wakeup(fs->first_object);
1505 					vm_object_chain_release_all(
1506 						fs->first_object, fs->object);
1507 					if (fs->object != fs->first_object)
1508 						vm_object_drop(fs->object);
1509 					unlock_and_deallocate(fs);
1510 					return (KERN_TRY_AGAIN);
1511 				}
1512 				++fs->hardfault;
1513 				break; /* break to PAGE HAS BEEN FOUND */
1514 			}
1515 
1516 			/*
1517 			 * Remove the bogus page (which does not exist at this
1518 			 * object/offset); before doing so, we must get back
1519 			 * our object lock to preserve our invariant.
1520 			 *
1521 			 * Also wake up any other process that may want to bring
1522 			 * in this page.
1523 			 *
1524 			 * If this is the top-level object, we must leave the
1525 			 * busy page to prevent another process from rushing
1526 			 * past us, and inserting the page in that object at
1527 			 * the same time that we are.
1528 			 */
1529 			if (rv == VM_PAGER_ERROR) {
1530 				if (curproc) {
1531 					kprintf("vm_fault: pager read error, "
1532 						"pid %d (%s)\n",
1533 						curproc->p_pid,
1534 						curproc->p_comm);
1535 				} else {
1536 					kprintf("vm_fault: pager read error, "
1537 						"thread %p (%s)\n",
1538 						curthread,
1539 						curproc->p_comm);
1540 				}
1541 			}
1542 
1543 			/*
1544 			 * Data outside the range of the pager or an I/O error
1545 			 *
1546 			 * The page may have been wired during the pagein,
1547 			 * e.g. by the buffer cache, and cannot simply be
1548 			 * freed.  Call vnode_pager_freepage() to deal with it.
1549 			 *
1550 			 * Also note that we cannot free the page if we are
1551 			 * holding the related object shared. XXX not sure
1552 			 * what to do in that case.
1553 			 */
1554 			if (fs->object != fs->first_object) {
1555 				vnode_pager_freepage(fs->m);
1556 				fs->m = NULL;
1557 				/*
1558 				 * XXX - we cannot just fall out at this
1559 				 * point, m has been freed and is invalid!
1560 				 */
1561 			}
1562 			/*
1563 			 * XXX - the check for kernel_map is a kludge to work
1564 			 * around having the machine panic on a kernel space
1565 			 * fault w/ I/O error.
1566 			 */
1567 			if (((fs->map != &kernel_map) &&
1568 			    (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
1569 				if (fs->m) {
1570 					if (fs->first_shared) {
1571 						vm_page_deactivate(fs->m);
1572 						vm_page_wakeup(fs->m);
1573 					} else {
1574 						vnode_pager_freepage(fs->m);
1575 					}
1576 					fs->m = NULL;
1577 				}
1578 				vm_object_pip_wakeup(fs->first_object);
1579 				vm_object_chain_release_all(fs->first_object,
1580 							    fs->object);
1581 				if (fs->object != fs->first_object)
1582 					vm_object_drop(fs->object);
1583 				unlock_and_deallocate(fs);
1584 				if (rv == VM_PAGER_ERROR)
1585 					return (KERN_FAILURE);
1586 				else
1587 					return (KERN_PROTECTION_FAILURE);
1588 				/* NOT REACHED */
1589 			}
1590 		}
1591 
1592 		/*
1593 		 * We get here if the object has a default pager (or unwiring)
1594 		 * or the pager doesn't have the page.
1595 		 *
1596 		 * fs->first_m will be used for the COW unless we find a
1597 		 * deeper page to be mapped read-only, in which case the
1598 		 * unlock*(fs) will free first_m.
1599 		 */
1600 		if (fs->object == fs->first_object)
1601 			fs->first_m = fs->m;
1602 
1603 		/*
1604 		 * Move on to the next object.  The chain lock should prevent
1605 		 * the backing_object from getting ripped out from under us.
1606 		 *
1607 		 * The object lock for the next object is governed by
1608 		 * fs->shared.
1609 		 */
1610 		if ((next_object = fs->object->backing_object) != NULL) {
1611 			if (fs->shared)
1612 				vm_object_hold_shared(next_object);
1613 			else
1614 				vm_object_hold(next_object);
1615 			vm_object_chain_acquire(next_object, fs->shared);
1616 			KKASSERT(next_object == fs->object->backing_object);
1617 			pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1618 		}
1619 
1620 		if (next_object == NULL) {
1621 			/*
1622 			 * If there's no object left, fill the page in the top
1623 			 * object with zeros.
1624 			 */
1625 			if (fs->object != fs->first_object) {
1626 #if 0
1627 				if (fs->first_object->backing_object !=
1628 				    fs->object) {
1629 					vm_object_hold(fs->first_object->backing_object);
1630 				}
1631 #endif
1632 				vm_object_chain_release_all(
1633 					fs->first_object->backing_object,
1634 					fs->object);
1635 #if 0
1636 				if (fs->first_object->backing_object !=
1637 				    fs->object) {
1638 					vm_object_drop(fs->first_object->backing_object);
1639 				}
1640 #endif
1641 				vm_object_pip_wakeup(fs->object);
1642 				vm_object_drop(fs->object);
1643 				fs->object = fs->first_object;
1644 				pindex = first_pindex;
1645 				fs->m = fs->first_m;
1646 			}
1647 			fs->first_m = NULL;
1648 
1649 			/*
1650 			 * Zero the page if necessary and mark it valid.
1651 			 */
1652 			if ((fs->m->flags & PG_ZERO) == 0) {
1653 				vm_page_zero_fill(fs->m);
1654 			} else {
1655 #ifdef PMAP_DEBUG
1656 				pmap_page_assertzero(VM_PAGE_TO_PHYS(fs->m));
1657 #endif
1658 				vm_page_flag_clear(fs->m, PG_ZERO);
1659 				mycpu->gd_cnt.v_ozfod++;
1660 			}
1661 			mycpu->gd_cnt.v_zfod++;
1662 			fs->m->valid = VM_PAGE_BITS_ALL;
1663 			break;	/* break to PAGE HAS BEEN FOUND */
1664 		}
1665 		if (fs->object != fs->first_object) {
1666 			vm_object_pip_wakeup(fs->object);
1667 			vm_object_lock_swap();
1668 			vm_object_drop(fs->object);
1669 		}
1670 		KASSERT(fs->object != next_object,
1671 			("object loop %p", next_object));
1672 		fs->object = next_object;
1673 		vm_object_pip_add(fs->object, 1);
1674 	}
1675 
1676 	/*
1677 	 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1678 	 * is held.]
1679 	 *
1680 	 * object still held.
1681 	 *
1682 	 * local shared variable may be different from fs->shared.
1683 	 *
1684 	 * If the page is being written, but isn't already owned by the
1685 	 * top-level object, we have to copy it into a new page owned by the
1686 	 * top-level object.
1687 	 */
1688 	KASSERT((fs->m->flags & PG_BUSY) != 0,
1689 		("vm_fault: not busy after main loop"));
1690 
1691 	if (fs->object != fs->first_object) {
1692 		/*
1693 		 * We only really need to copy if we want to write it.
1694 		 */
1695 		if (fault_type & VM_PROT_WRITE) {
1696 			/*
1697 			 * This allows pages to be virtually copied from a
1698 			 * backing_object into the first_object, where the
1699 			 * backing object has no other refs to it, and cannot
1700 			 * gain any more refs.  Instead of a bcopy, we just
1701 			 * move the page from the backing object to the
1702 			 * first object.  Note that we must mark the page
1703 			 * dirty in the first object so that it will go out
1704 			 * to swap when needed.
1705 			 */
1706 			if (
1707 				/*
1708 				 * Must be holding exclusive locks
1709 				 */
1710 				fs->first_shared == 0 &&
1711 				fs->shared == 0 &&
1712 				/*
1713 				 * Map, if present, has not changed
1714 				 */
1715 				(fs->map == NULL ||
1716 				fs->map_generation == fs->map->timestamp) &&
1717 				/*
1718 				 * Only one shadow object
1719 				 */
1720 				(fs->object->shadow_count == 1) &&
1721 				/*
1722 				 * No COW refs, except us
1723 				 */
1724 				(fs->object->ref_count == 1) &&
1725 				/*
1726 				 * No one else can look this object up
1727 				 */
1728 				(fs->object->handle == NULL) &&
1729 				/*
1730 				 * No other ways to look the object up
1731 				 */
1732 				((fs->object->type == OBJT_DEFAULT) ||
1733 				 (fs->object->type == OBJT_SWAP)) &&
1734 				/*
1735 				 * We don't chase down the shadow chain
1736 				 */
1737 				(fs->object == fs->first_object->backing_object) &&
1738 
1739 				/*
1740 				 * grab the lock if we need to
1741 				 */
1742 				(fs->lookup_still_valid ||
1743 				 fs->map == NULL ||
1744 				 lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1745 			    ) {
1746 				/*
1747 				 * (first_m) and (m) are both busied.  We have
1748 				 * move (m) into (first_m)'s object/pindex
1749 				 * in an atomic fashion, then free (first_m).
1750 				 *
1751 				 * first_object is held so second remove
1752 				 * followed by the rename should wind
1753 				 * up being atomic.  vm_page_free() might
1754 				 * block so we don't do it until after the
1755 				 * rename.
1756 				 */
1757 				fs->lookup_still_valid = 1;
1758 				vm_page_protect(fs->first_m, VM_PROT_NONE);
1759 				vm_page_remove(fs->first_m);
1760 				vm_page_rename(fs->m, fs->first_object,
1761 					       first_pindex);
1762 				vm_page_free(fs->first_m);
1763 				fs->first_m = fs->m;
1764 				fs->m = NULL;
1765 				mycpu->gd_cnt.v_cow_optim++;
1766 			} else {
1767 				/*
1768 				 * Oh, well, lets copy it.
1769 				 *
1770 				 * Why are we unmapping the original page
1771 				 * here?  Well, in short, not all accessors
1772 				 * of user memory go through the pmap.  The
1773 				 * procfs code doesn't have access user memory
1774 				 * via a local pmap, so vm_fault_page*()
1775 				 * can't call pmap_enter().  And the umtx*()
1776 				 * code may modify the COW'd page via a DMAP
1777 				 * or kernel mapping and not via the pmap,
1778 				 * leaving the original page still mapped
1779 				 * read-only into the pmap.
1780 				 *
1781 				 * So we have to remove the page from at
1782 				 * least the current pmap if it is in it.
1783 				 * Just remove it from all pmaps.
1784 				 */
1785 				KKASSERT(fs->first_shared == 0);
1786 				vm_page_copy(fs->m, fs->first_m);
1787 				vm_page_protect(fs->m, VM_PROT_NONE);
1788 				vm_page_event(fs->m, VMEVENT_COW);
1789 			}
1790 
1791 			/*
1792 			 * We no longer need the old page or object.
1793 			 */
1794 			if (fs->m)
1795 				release_page(fs);
1796 
1797 			/*
1798 			 * We intend to revert to first_object, undo the
1799 			 * chain lock through to that.
1800 			 */
1801 #if 0
1802 			if (fs->first_object->backing_object != fs->object)
1803 				vm_object_hold(fs->first_object->backing_object);
1804 #endif
1805 			vm_object_chain_release_all(
1806 					fs->first_object->backing_object,
1807 					fs->object);
1808 #if 0
1809 			if (fs->first_object->backing_object != fs->object)
1810 				vm_object_drop(fs->first_object->backing_object);
1811 #endif
1812 
1813 			/*
1814 			 * fs->object != fs->first_object due to above
1815 			 * conditional
1816 			 */
1817 			vm_object_pip_wakeup(fs->object);
1818 			vm_object_drop(fs->object);
1819 
1820 			/*
1821 			 * Only use the new page below...
1822 			 */
1823 			mycpu->gd_cnt.v_cow_faults++;
1824 			fs->m = fs->first_m;
1825 			fs->object = fs->first_object;
1826 			pindex = first_pindex;
1827 		} else {
1828 			/*
1829 			 * If it wasn't a write fault avoid having to copy
1830 			 * the page by mapping it read-only.
1831 			 */
1832 			fs->prot &= ~VM_PROT_WRITE;
1833 		}
1834 	}
1835 
1836 	/*
1837 	 * Relock the map if necessary, then check the generation count.
1838 	 * relock_map() will update fs->timestamp to account for the
1839 	 * relocking if necessary.
1840 	 *
1841 	 * If the count has changed after relocking then all sorts of
1842 	 * crap may have happened and we have to retry.
1843 	 *
1844 	 * NOTE: The relock_map() can fail due to a deadlock against
1845 	 *	 the vm_page we are holding BUSY.
1846 	 */
1847 	if (fs->lookup_still_valid == FALSE && fs->map) {
1848 		if (relock_map(fs) ||
1849 		    fs->map->timestamp != fs->map_generation) {
1850 			release_page(fs);
1851 			vm_object_pip_wakeup(fs->first_object);
1852 			vm_object_chain_release_all(fs->first_object,
1853 						    fs->object);
1854 			if (fs->object != fs->first_object)
1855 				vm_object_drop(fs->object);
1856 			unlock_and_deallocate(fs);
1857 			return (KERN_TRY_AGAIN);
1858 		}
1859 	}
1860 
1861 	/*
1862 	 * If the fault is a write, we know that this page is being
1863 	 * written NOW so dirty it explicitly to save on pmap_is_modified()
1864 	 * calls later.
1865 	 *
1866 	 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1867 	 * if the page is already dirty to prevent data written with
1868 	 * the expectation of being synced from not being synced.
1869 	 * Likewise if this entry does not request NOSYNC then make
1870 	 * sure the page isn't marked NOSYNC.  Applications sharing
1871 	 * data should use the same flags to avoid ping ponging.
1872 	 *
1873 	 * Also tell the backing pager, if any, that it should remove
1874 	 * any swap backing since the page is now dirty.
1875 	 */
1876 	vm_page_activate(fs->m);
1877 	if (fs->prot & VM_PROT_WRITE) {
1878 		vm_object_set_writeable_dirty(fs->m->object);
1879 		vm_set_nosync(fs->m, fs->entry);
1880 		if (fs->fault_flags & VM_FAULT_DIRTY) {
1881 			vm_page_dirty(fs->m);
1882 			swap_pager_unswapped(fs->m);
1883 		}
1884 	}
1885 
1886 	vm_object_pip_wakeup(fs->first_object);
1887 	vm_object_chain_release_all(fs->first_object, fs->object);
1888 	if (fs->object != fs->first_object)
1889 		vm_object_drop(fs->object);
1890 
1891 	/*
1892 	 * Page had better still be busy.  We are still locked up and
1893 	 * fs->object will have another PIP reference if it is not equal
1894 	 * to fs->first_object.
1895 	 */
1896 	KASSERT(fs->m->flags & PG_BUSY,
1897 		("vm_fault: page %p not busy!", fs->m));
1898 
1899 	/*
1900 	 * Sanity check: page must be completely valid or it is not fit to
1901 	 * map into user space.  vm_pager_get_pages() ensures this.
1902 	 */
1903 	if (fs->m->valid != VM_PAGE_BITS_ALL) {
1904 		vm_page_zero_invalid(fs->m, TRUE);
1905 		kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1906 	}
1907 	vm_page_flag_clear(fs->m, PG_ZERO);
1908 
1909 	return (KERN_SUCCESS);
1910 }
1911 
1912 /*
1913  * Hold each of the physical pages that are mapped by the specified range of
1914  * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
1915  * and allow the specified types of access, "prot".  If all of the implied
1916  * pages are successfully held, then the number of held pages is returned
1917  * together with pointers to those pages in the array "ma".  However, if any
1918  * of the pages cannot be held, -1 is returned.
1919  */
1920 int
1921 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
1922     vm_prot_t prot, vm_page_t *ma, int max_count)
1923 {
1924 	vm_offset_t start, end;
1925 	int i, npages, error;
1926 
1927 	start = trunc_page(addr);
1928 	end = round_page(addr + len);
1929 
1930 	npages = howmany(end - start, PAGE_SIZE);
1931 
1932 	if (npages > max_count)
1933 		return -1;
1934 
1935 	for (i = 0; i < npages; i++) {
1936 		// XXX error handling
1937 		ma[i] = vm_fault_page_quick(start + (i * PAGE_SIZE),
1938 			prot,
1939 			&error);
1940 	}
1941 
1942 	return npages;
1943 }
1944 
1945 /*
1946  * Wire down a range of virtual addresses in a map.  The entry in question
1947  * should be marked in-transition and the map must be locked.  We must
1948  * release the map temporarily while faulting-in the page to avoid a
1949  * deadlock.  Note that the entry may be clipped while we are blocked but
1950  * will never be freed.
1951  *
1952  * No requirements.
1953  */
1954 int
1955 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1956 {
1957 	boolean_t fictitious;
1958 	vm_offset_t start;
1959 	vm_offset_t end;
1960 	vm_offset_t va;
1961 	vm_paddr_t pa;
1962 	vm_page_t m;
1963 	pmap_t pmap;
1964 	int rv;
1965 
1966 	lwkt_gettoken(&map->token);
1967 
1968 	pmap = vm_map_pmap(map);
1969 	start = entry->start;
1970 	end = entry->end;
1971 	fictitious = entry->object.vm_object &&
1972 			((entry->object.vm_object->type == OBJT_DEVICE) ||
1973 			 (entry->object.vm_object->type == OBJT_MGTDEVICE));
1974 	if (entry->eflags & MAP_ENTRY_KSTACK)
1975 		start += PAGE_SIZE;
1976 	map->timestamp++;
1977 	vm_map_unlock(map);
1978 
1979 	/*
1980 	 * We simulate a fault to get the page and enter it in the physical
1981 	 * map.
1982 	 */
1983 	for (va = start; va < end; va += PAGE_SIZE) {
1984 		if (user_wire) {
1985 			rv = vm_fault(map, va, VM_PROT_READ,
1986 					VM_FAULT_USER_WIRE);
1987 		} else {
1988 			rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1989 					VM_FAULT_CHANGE_WIRING);
1990 		}
1991 		if (rv) {
1992 			while (va > start) {
1993 				va -= PAGE_SIZE;
1994 				if ((pa = pmap_extract(pmap, va)) == 0)
1995 					continue;
1996 				pmap_change_wiring(pmap, va, FALSE, entry);
1997 				if (!fictitious) {
1998 					m = PHYS_TO_VM_PAGE(pa);
1999 					vm_page_busy_wait(m, FALSE, "vmwrpg");
2000 					vm_page_unwire(m, 1);
2001 					vm_page_wakeup(m);
2002 				}
2003 			}
2004 			goto done;
2005 		}
2006 	}
2007 	rv = KERN_SUCCESS;
2008 done:
2009 	vm_map_lock(map);
2010 	lwkt_reltoken(&map->token);
2011 	return (rv);
2012 }
2013 
2014 /*
2015  * Unwire a range of virtual addresses in a map.  The map should be
2016  * locked.
2017  */
2018 void
2019 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
2020 {
2021 	boolean_t fictitious;
2022 	vm_offset_t start;
2023 	vm_offset_t end;
2024 	vm_offset_t va;
2025 	vm_paddr_t pa;
2026 	vm_page_t m;
2027 	pmap_t pmap;
2028 
2029 	lwkt_gettoken(&map->token);
2030 
2031 	pmap = vm_map_pmap(map);
2032 	start = entry->start;
2033 	end = entry->end;
2034 	fictitious = entry->object.vm_object &&
2035 			((entry->object.vm_object->type == OBJT_DEVICE) ||
2036 			 (entry->object.vm_object->type == OBJT_MGTDEVICE));
2037 	if (entry->eflags & MAP_ENTRY_KSTACK)
2038 		start += PAGE_SIZE;
2039 
2040 	/*
2041 	 * Since the pages are wired down, we must be able to get their
2042 	 * mappings from the physical map system.
2043 	 */
2044 	for (va = start; va < end; va += PAGE_SIZE) {
2045 		pa = pmap_extract(pmap, va);
2046 		if (pa != 0) {
2047 			pmap_change_wiring(pmap, va, FALSE, entry);
2048 			if (!fictitious) {
2049 				m = PHYS_TO_VM_PAGE(pa);
2050 				vm_page_busy_wait(m, FALSE, "vmwupg");
2051 				vm_page_unwire(m, 1);
2052 				vm_page_wakeup(m);
2053 			}
2054 		}
2055 	}
2056 	lwkt_reltoken(&map->token);
2057 }
2058 
2059 /*
2060  * Copy all of the pages from a wired-down map entry to another.
2061  *
2062  * The source and destination maps must be locked for write.
2063  * The source and destination maps token must be held
2064  * The source map entry must be wired down (or be a sharing map
2065  * entry corresponding to a main map entry that is wired down).
2066  *
2067  * No other requirements.
2068  *
2069  * XXX do segment optimization
2070  */
2071 void
2072 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
2073 		    vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
2074 {
2075 	vm_object_t dst_object;
2076 	vm_object_t src_object;
2077 	vm_ooffset_t dst_offset;
2078 	vm_ooffset_t src_offset;
2079 	vm_prot_t prot;
2080 	vm_offset_t vaddr;
2081 	vm_page_t dst_m;
2082 	vm_page_t src_m;
2083 
2084 	src_object = src_entry->object.vm_object;
2085 	src_offset = src_entry->offset;
2086 
2087 	/*
2088 	 * Create the top-level object for the destination entry. (Doesn't
2089 	 * actually shadow anything - we copy the pages directly.)
2090 	 */
2091 	vm_map_entry_allocate_object(dst_entry);
2092 	dst_object = dst_entry->object.vm_object;
2093 
2094 	prot = dst_entry->max_protection;
2095 
2096 	/*
2097 	 * Loop through all of the pages in the entry's range, copying each
2098 	 * one from the source object (it should be there) to the destination
2099 	 * object.
2100 	 */
2101 	vm_object_hold(src_object);
2102 	vm_object_hold(dst_object);
2103 	for (vaddr = dst_entry->start, dst_offset = 0;
2104 	    vaddr < dst_entry->end;
2105 	    vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
2106 
2107 		/*
2108 		 * Allocate a page in the destination object
2109 		 */
2110 		do {
2111 			dst_m = vm_page_alloc(dst_object,
2112 					      OFF_TO_IDX(dst_offset),
2113 					      VM_ALLOC_NORMAL);
2114 			if (dst_m == NULL) {
2115 				vm_wait(0);
2116 			}
2117 		} while (dst_m == NULL);
2118 
2119 		/*
2120 		 * Find the page in the source object, and copy it in.
2121 		 * (Because the source is wired down, the page will be in
2122 		 * memory.)
2123 		 */
2124 		src_m = vm_page_lookup(src_object,
2125 				       OFF_TO_IDX(dst_offset + src_offset));
2126 		if (src_m == NULL)
2127 			panic("vm_fault_copy_wired: page missing");
2128 
2129 		vm_page_copy(src_m, dst_m);
2130 		vm_page_event(src_m, VMEVENT_COW);
2131 
2132 		/*
2133 		 * Enter it in the pmap...
2134 		 */
2135 
2136 		vm_page_flag_clear(dst_m, PG_ZERO);
2137 		pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE, dst_entry);
2138 
2139 		/*
2140 		 * Mark it no longer busy, and put it on the active list.
2141 		 */
2142 		vm_page_activate(dst_m);
2143 		vm_page_wakeup(dst_m);
2144 	}
2145 	vm_object_drop(dst_object);
2146 	vm_object_drop(src_object);
2147 }
2148 
2149 #if 0
2150 
2151 /*
2152  * This routine checks around the requested page for other pages that
2153  * might be able to be faulted in.  This routine brackets the viable
2154  * pages for the pages to be paged in.
2155  *
2156  * Inputs:
2157  *	m, rbehind, rahead
2158  *
2159  * Outputs:
2160  *  marray (array of vm_page_t), reqpage (index of requested page)
2161  *
2162  * Return value:
2163  *  number of pages in marray
2164  */
2165 static int
2166 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
2167 			  vm_page_t *marray, int *reqpage)
2168 {
2169 	int i,j;
2170 	vm_object_t object;
2171 	vm_pindex_t pindex, startpindex, endpindex, tpindex;
2172 	vm_page_t rtm;
2173 	int cbehind, cahead;
2174 
2175 	object = m->object;
2176 	pindex = m->pindex;
2177 
2178 	/*
2179 	 * we don't fault-ahead for device pager
2180 	 */
2181 	if ((object->type == OBJT_DEVICE) ||
2182 	    (object->type == OBJT_MGTDEVICE)) {
2183 		*reqpage = 0;
2184 		marray[0] = m;
2185 		return 1;
2186 	}
2187 
2188 	/*
2189 	 * if the requested page is not available, then give up now
2190 	 */
2191 	if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
2192 		*reqpage = 0;	/* not used by caller, fix compiler warn */
2193 		return 0;
2194 	}
2195 
2196 	if ((cbehind == 0) && (cahead == 0)) {
2197 		*reqpage = 0;
2198 		marray[0] = m;
2199 		return 1;
2200 	}
2201 
2202 	if (rahead > cahead) {
2203 		rahead = cahead;
2204 	}
2205 
2206 	if (rbehind > cbehind) {
2207 		rbehind = cbehind;
2208 	}
2209 
2210 	/*
2211 	 * Do not do any readahead if we have insufficient free memory.
2212 	 *
2213 	 * XXX code was broken disabled before and has instability
2214 	 * with this conditonal fixed, so shortcut for now.
2215 	 */
2216 	if (burst_fault == 0 || vm_page_count_severe()) {
2217 		marray[0] = m;
2218 		*reqpage = 0;
2219 		return 1;
2220 	}
2221 
2222 	/*
2223 	 * scan backward for the read behind pages -- in memory
2224 	 *
2225 	 * Assume that if the page is not found an interrupt will not
2226 	 * create it.  Theoretically interrupts can only remove (busy)
2227 	 * pages, not create new associations.
2228 	 */
2229 	if (pindex > 0) {
2230 		if (rbehind > pindex) {
2231 			rbehind = pindex;
2232 			startpindex = 0;
2233 		} else {
2234 			startpindex = pindex - rbehind;
2235 		}
2236 
2237 		vm_object_hold(object);
2238 		for (tpindex = pindex; tpindex > startpindex; --tpindex) {
2239 			if (vm_page_lookup(object, tpindex - 1))
2240 				break;
2241 		}
2242 
2243 		i = 0;
2244 		while (tpindex < pindex) {
2245 			rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM |
2246 							     VM_ALLOC_NULL_OK);
2247 			if (rtm == NULL) {
2248 				for (j = 0; j < i; j++) {
2249 					vm_page_free(marray[j]);
2250 				}
2251 				vm_object_drop(object);
2252 				marray[0] = m;
2253 				*reqpage = 0;
2254 				return 1;
2255 			}
2256 			marray[i] = rtm;
2257 			++i;
2258 			++tpindex;
2259 		}
2260 		vm_object_drop(object);
2261 	} else {
2262 		i = 0;
2263 	}
2264 
2265 	/*
2266 	 * Assign requested page
2267 	 */
2268 	marray[i] = m;
2269 	*reqpage = i;
2270 	++i;
2271 
2272 	/*
2273 	 * Scan forwards for read-ahead pages
2274 	 */
2275 	tpindex = pindex + 1;
2276 	endpindex = tpindex + rahead;
2277 	if (endpindex > object->size)
2278 		endpindex = object->size;
2279 
2280 	vm_object_hold(object);
2281 	while (tpindex < endpindex) {
2282 		if (vm_page_lookup(object, tpindex))
2283 			break;
2284 		rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM |
2285 						     VM_ALLOC_NULL_OK);
2286 		if (rtm == NULL)
2287 			break;
2288 		marray[i] = rtm;
2289 		++i;
2290 		++tpindex;
2291 	}
2292 	vm_object_drop(object);
2293 
2294 	return (i);
2295 }
2296 
2297 #endif
2298 
2299 /*
2300  * vm_prefault() provides a quick way of clustering pagefaults into a
2301  * processes address space.  It is a "cousin" of pmap_object_init_pt,
2302  * except it runs at page fault time instead of mmap time.
2303  *
2304  * vm.fast_fault	Enables pre-faulting zero-fill pages
2305  *
2306  * vm.prefault_pages	Number of pages (1/2 negative, 1/2 positive) to
2307  *			prefault.  Scan stops in either direction when
2308  *			a page is found to already exist.
2309  *
2310  * This code used to be per-platform pmap_prefault().  It is now
2311  * machine-independent and enhanced to also pre-fault zero-fill pages
2312  * (see vm.fast_fault) as well as make them writable, which greatly
2313  * reduces the number of page faults programs incur.
2314  *
2315  * Application performance when pre-faulting zero-fill pages is heavily
2316  * dependent on the application.  Very tiny applications like /bin/echo
2317  * lose a little performance while applications of any appreciable size
2318  * gain performance.  Prefaulting multiple pages also reduces SMP
2319  * congestion and can improve SMP performance significantly.
2320  *
2321  * NOTE!  prot may allow writing but this only applies to the top level
2322  *	  object.  If we wind up mapping a page extracted from a backing
2323  *	  object we have to make sure it is read-only.
2324  *
2325  * NOTE!  The caller has already handled any COW operations on the
2326  *	  vm_map_entry via the normal fault code.  Do NOT call this
2327  *	  shortcut unless the normal fault code has run on this entry.
2328  *
2329  * The related map must be locked.
2330  * No other requirements.
2331  */
2332 static int vm_prefault_pages = 8;
2333 SYSCTL_INT(_vm, OID_AUTO, prefault_pages, CTLFLAG_RW, &vm_prefault_pages, 0,
2334 	   "Maximum number of pages to pre-fault");
2335 static int vm_fast_fault = 1;
2336 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0,
2337 	   "Burst fault zero-fill regions");
2338 
2339 /*
2340  * Set PG_NOSYNC if the map entry indicates so, but only if the page
2341  * is not already dirty by other means.  This will prevent passive
2342  * filesystem syncing as well as 'sync' from writing out the page.
2343  */
2344 static void
2345 vm_set_nosync(vm_page_t m, vm_map_entry_t entry)
2346 {
2347 	if (entry->eflags & MAP_ENTRY_NOSYNC) {
2348 		if (m->dirty == 0)
2349 			vm_page_flag_set(m, PG_NOSYNC);
2350 	} else {
2351 		vm_page_flag_clear(m, PG_NOSYNC);
2352 	}
2353 }
2354 
2355 static void
2356 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot,
2357 	    int fault_flags)
2358 {
2359 	struct lwp *lp;
2360 	vm_page_t m;
2361 	vm_offset_t addr;
2362 	vm_pindex_t index;
2363 	vm_pindex_t pindex;
2364 	vm_object_t object;
2365 	int pprot;
2366 	int i;
2367 	int noneg;
2368 	int nopos;
2369 	int maxpages;
2370 
2371 	/*
2372 	 * Get stable max count value, disabled if set to 0
2373 	 */
2374 	maxpages = vm_prefault_pages;
2375 	cpu_ccfence();
2376 	if (maxpages <= 0)
2377 		return;
2378 
2379 	/*
2380 	 * We do not currently prefault mappings that use virtual page
2381 	 * tables.  We do not prefault foreign pmaps.
2382 	 */
2383 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2384 		return;
2385 	lp = curthread->td_lwp;
2386 	if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2387 		return;
2388 
2389 	/*
2390 	 * Limit pre-fault count to 1024 pages.
2391 	 */
2392 	if (maxpages > 1024)
2393 		maxpages = 1024;
2394 
2395 	object = entry->object.vm_object;
2396 	KKASSERT(object != NULL);
2397 	KKASSERT(object == entry->object.vm_object);
2398 	vm_object_hold(object);
2399 	vm_object_chain_acquire(object, 0);
2400 
2401 	noneg = 0;
2402 	nopos = 0;
2403 	for (i = 0; i < maxpages; ++i) {
2404 		vm_object_t lobject;
2405 		vm_object_t nobject;
2406 		int allocated = 0;
2407 		int error;
2408 
2409 		/*
2410 		 * This can eat a lot of time on a heavily contended
2411 		 * machine so yield on the tick if needed.
2412 		 */
2413 		if ((i & 7) == 7)
2414 			lwkt_yield();
2415 
2416 		/*
2417 		 * Calculate the page to pre-fault, stopping the scan in
2418 		 * each direction separately if the limit is reached.
2419 		 */
2420 		if (i & 1) {
2421 			if (noneg)
2422 				continue;
2423 			addr = addra - ((i + 1) >> 1) * PAGE_SIZE;
2424 		} else {
2425 			if (nopos)
2426 				continue;
2427 			addr = addra + ((i + 2) >> 1) * PAGE_SIZE;
2428 		}
2429 		if (addr < entry->start) {
2430 			noneg = 1;
2431 			if (noneg && nopos)
2432 				break;
2433 			continue;
2434 		}
2435 		if (addr >= entry->end) {
2436 			nopos = 1;
2437 			if (noneg && nopos)
2438 				break;
2439 			continue;
2440 		}
2441 
2442 		/*
2443 		 * Skip pages already mapped, and stop scanning in that
2444 		 * direction.  When the scan terminates in both directions
2445 		 * we are done.
2446 		 */
2447 		if (pmap_prefault_ok(pmap, addr) == 0) {
2448 			if (i & 1)
2449 				noneg = 1;
2450 			else
2451 				nopos = 1;
2452 			if (noneg && nopos)
2453 				break;
2454 			continue;
2455 		}
2456 
2457 		/*
2458 		 * Follow the VM object chain to obtain the page to be mapped
2459 		 * into the pmap.
2460 		 *
2461 		 * If we reach the terminal object without finding a page
2462 		 * and we determine it would be advantageous, then allocate
2463 		 * a zero-fill page for the base object.  The base object
2464 		 * is guaranteed to be OBJT_DEFAULT for this case.
2465 		 *
2466 		 * In order to not have to check the pager via *haspage*()
2467 		 * we stop if any non-default object is encountered.  e.g.
2468 		 * a vnode or swap object would stop the loop.
2469 		 */
2470 		index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2471 		lobject = object;
2472 		pindex = index;
2473 		pprot = prot;
2474 
2475 		KKASSERT(lobject == entry->object.vm_object);
2476 		/*vm_object_hold(lobject); implied */
2477 
2478 		while ((m = vm_page_lookup_busy_try(lobject, pindex,
2479 						    TRUE, &error)) == NULL) {
2480 			if (lobject->type != OBJT_DEFAULT)
2481 				break;
2482 			if (lobject->backing_object == NULL) {
2483 				if (vm_fast_fault == 0)
2484 					break;
2485 				if ((prot & VM_PROT_WRITE) == 0 ||
2486 				    vm_page_count_min(0)) {
2487 					break;
2488 				}
2489 
2490 				/*
2491 				 * NOTE: Allocated from base object
2492 				 */
2493 				m = vm_page_alloc(object, index,
2494 						  VM_ALLOC_NORMAL |
2495 						  VM_ALLOC_ZERO |
2496 						  VM_ALLOC_USE_GD |
2497 						  VM_ALLOC_NULL_OK);
2498 				if (m == NULL)
2499 					break;
2500 				allocated = 1;
2501 				pprot = prot;
2502 				/* lobject = object .. not needed */
2503 				break;
2504 			}
2505 			if (lobject->backing_object_offset & PAGE_MASK)
2506 				break;
2507 			nobject = lobject->backing_object;
2508 			vm_object_hold(nobject);
2509 			KKASSERT(nobject == lobject->backing_object);
2510 			pindex += lobject->backing_object_offset >> PAGE_SHIFT;
2511 			if (lobject != object) {
2512 				vm_object_lock_swap();
2513 				vm_object_drop(lobject);
2514 			}
2515 			lobject = nobject;
2516 			pprot &= ~VM_PROT_WRITE;
2517 			vm_object_chain_acquire(lobject, 0);
2518 		}
2519 
2520 		/*
2521 		 * NOTE: A non-NULL (m) will be associated with lobject if
2522 		 *	 it was found there, otherwise it is probably a
2523 		 *	 zero-fill page associated with the base object.
2524 		 *
2525 		 * Give-up if no page is available.
2526 		 */
2527 		if (m == NULL) {
2528 			if (lobject != object) {
2529 #if 0
2530 				if (object->backing_object != lobject)
2531 					vm_object_hold(object->backing_object);
2532 #endif
2533 				vm_object_chain_release_all(
2534 					object->backing_object, lobject);
2535 #if 0
2536 				if (object->backing_object != lobject)
2537 					vm_object_drop(object->backing_object);
2538 #endif
2539 				vm_object_drop(lobject);
2540 			}
2541 			break;
2542 		}
2543 
2544 		/*
2545 		 * The object must be marked dirty if we are mapping a
2546 		 * writable page.  m->object is either lobject or object,
2547 		 * both of which are still held.  Do this before we
2548 		 * potentially drop the object.
2549 		 */
2550 		if (pprot & VM_PROT_WRITE)
2551 			vm_object_set_writeable_dirty(m->object);
2552 
2553 		/*
2554 		 * Do not conditionalize on PG_RAM.  If pages are present in
2555 		 * the VM system we assume optimal caching.  If caching is
2556 		 * not optimal the I/O gravy train will be restarted when we
2557 		 * hit an unavailable page.  We do not want to try to restart
2558 		 * the gravy train now because we really don't know how much
2559 		 * of the object has been cached.  The cost for restarting
2560 		 * the gravy train should be low (since accesses will likely
2561 		 * be I/O bound anyway).
2562 		 */
2563 		if (lobject != object) {
2564 #if 0
2565 			if (object->backing_object != lobject)
2566 				vm_object_hold(object->backing_object);
2567 #endif
2568 			vm_object_chain_release_all(object->backing_object,
2569 						    lobject);
2570 #if 0
2571 			if (object->backing_object != lobject)
2572 				vm_object_drop(object->backing_object);
2573 #endif
2574 			vm_object_drop(lobject);
2575 		}
2576 
2577 		/*
2578 		 * Enter the page into the pmap if appropriate.  If we had
2579 		 * allocated the page we have to place it on a queue.  If not
2580 		 * we just have to make sure it isn't on the cache queue
2581 		 * (pages on the cache queue are not allowed to be mapped).
2582 		 */
2583 		if (allocated) {
2584 			/*
2585 			 * Page must be zerod.
2586 			 */
2587 			if ((m->flags & PG_ZERO) == 0) {
2588 				vm_page_zero_fill(m);
2589 			} else {
2590 #ifdef PMAP_DEBUG
2591 				pmap_page_assertzero(
2592 						VM_PAGE_TO_PHYS(m));
2593 #endif
2594 				vm_page_flag_clear(m, PG_ZERO);
2595 				mycpu->gd_cnt.v_ozfod++;
2596 			}
2597 			mycpu->gd_cnt.v_zfod++;
2598 			m->valid = VM_PAGE_BITS_ALL;
2599 
2600 			/*
2601 			 * Handle dirty page case
2602 			 */
2603 			if (pprot & VM_PROT_WRITE)
2604 				vm_set_nosync(m, entry);
2605 			pmap_enter(pmap, addr, m, pprot, 0, entry);
2606 			mycpu->gd_cnt.v_vm_faults++;
2607 			if (curthread->td_lwp)
2608 				++curthread->td_lwp->lwp_ru.ru_minflt;
2609 			vm_page_deactivate(m);
2610 			if (pprot & VM_PROT_WRITE) {
2611 				/*vm_object_set_writeable_dirty(m->object);*/
2612 				vm_set_nosync(m, entry);
2613 				if (fault_flags & VM_FAULT_DIRTY) {
2614 					vm_page_dirty(m);
2615 					/*XXX*/
2616 					swap_pager_unswapped(m);
2617 				}
2618 			}
2619 			vm_page_wakeup(m);
2620 		} else if (error) {
2621 			/* couldn't busy page, no wakeup */
2622 		} else if (
2623 		    ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2624 		    (m->flags & PG_FICTITIOUS) == 0) {
2625 			/*
2626 			 * A fully valid page not undergoing soft I/O can
2627 			 * be immediately entered into the pmap.
2628 			 */
2629 			if ((m->queue - m->pc) == PQ_CACHE)
2630 				vm_page_deactivate(m);
2631 			if (pprot & VM_PROT_WRITE) {
2632 				/*vm_object_set_writeable_dirty(m->object);*/
2633 				vm_set_nosync(m, entry);
2634 				if (fault_flags & VM_FAULT_DIRTY) {
2635 					vm_page_dirty(m);
2636 					/*XXX*/
2637 					swap_pager_unswapped(m);
2638 				}
2639 			}
2640 			if (pprot & VM_PROT_WRITE)
2641 				vm_set_nosync(m, entry);
2642 			pmap_enter(pmap, addr, m, pprot, 0, entry);
2643 			mycpu->gd_cnt.v_vm_faults++;
2644 			if (curthread->td_lwp)
2645 				++curthread->td_lwp->lwp_ru.ru_minflt;
2646 			vm_page_wakeup(m);
2647 		} else {
2648 			vm_page_wakeup(m);
2649 		}
2650 	}
2651 	vm_object_chain_release(object);
2652 	vm_object_drop(object);
2653 }
2654 
2655 /*
2656  * Object can be held shared
2657  */
2658 static void
2659 vm_prefault_quick(pmap_t pmap, vm_offset_t addra,
2660 		  vm_map_entry_t entry, int prot, int fault_flags)
2661 {
2662 	struct lwp *lp;
2663 	vm_page_t m;
2664 	vm_offset_t addr;
2665 	vm_pindex_t pindex;
2666 	vm_object_t object;
2667 	int i;
2668 	int noneg;
2669 	int nopos;
2670 	int maxpages;
2671 
2672 	/*
2673 	 * Get stable max count value, disabled if set to 0
2674 	 */
2675 	maxpages = vm_prefault_pages;
2676 	cpu_ccfence();
2677 	if (maxpages <= 0)
2678 		return;
2679 
2680 	/*
2681 	 * We do not currently prefault mappings that use virtual page
2682 	 * tables.  We do not prefault foreign pmaps.
2683 	 */
2684 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2685 		return;
2686 	lp = curthread->td_lwp;
2687 	if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2688 		return;
2689 	object = entry->object.vm_object;
2690 	if (object->backing_object != NULL)
2691 		return;
2692 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2693 
2694 	/*
2695 	 * Limit pre-fault count to 1024 pages.
2696 	 */
2697 	if (maxpages > 1024)
2698 		maxpages = 1024;
2699 
2700 	noneg = 0;
2701 	nopos = 0;
2702 	for (i = 0; i < maxpages; ++i) {
2703 		int error;
2704 
2705 		/*
2706 		 * Calculate the page to pre-fault, stopping the scan in
2707 		 * each direction separately if the limit is reached.
2708 		 */
2709 		if (i & 1) {
2710 			if (noneg)
2711 				continue;
2712 			addr = addra - ((i + 1) >> 1) * PAGE_SIZE;
2713 		} else {
2714 			if (nopos)
2715 				continue;
2716 			addr = addra + ((i + 2) >> 1) * PAGE_SIZE;
2717 		}
2718 		if (addr < entry->start) {
2719 			noneg = 1;
2720 			if (noneg && nopos)
2721 				break;
2722 			continue;
2723 		}
2724 		if (addr >= entry->end) {
2725 			nopos = 1;
2726 			if (noneg && nopos)
2727 				break;
2728 			continue;
2729 		}
2730 
2731 		/*
2732 		 * Skip pages already mapped, and stop scanning in that
2733 		 * direction.  When the scan terminates in both directions
2734 		 * we are done.
2735 		 */
2736 		if (pmap_prefault_ok(pmap, addr) == 0) {
2737 			if (i & 1)
2738 				noneg = 1;
2739 			else
2740 				nopos = 1;
2741 			if (noneg && nopos)
2742 				break;
2743 			continue;
2744 		}
2745 
2746 		/*
2747 		 * Follow the VM object chain to obtain the page to be mapped
2748 		 * into the pmap.  This version of the prefault code only
2749 		 * works with terminal objects.
2750 		 *
2751 		 * WARNING!  We cannot call swap_pager_unswapped() with a
2752 		 *	     shared token.
2753 		 */
2754 		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2755 
2756 		m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
2757 		if (m == NULL || error)
2758 			continue;
2759 
2760 		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2761 		    (m->flags & PG_FICTITIOUS) == 0 &&
2762 		    ((m->flags & PG_SWAPPED) == 0 ||
2763 		     (prot & VM_PROT_WRITE) == 0 ||
2764 		     (fault_flags & VM_FAULT_DIRTY) == 0)) {
2765 			/*
2766 			 * A fully valid page not undergoing soft I/O can
2767 			 * be immediately entered into the pmap.
2768 			 */
2769 			if ((m->queue - m->pc) == PQ_CACHE)
2770 				vm_page_deactivate(m);
2771 			if (prot & VM_PROT_WRITE) {
2772 				vm_object_set_writeable_dirty(m->object);
2773 				vm_set_nosync(m, entry);
2774 				if (fault_flags & VM_FAULT_DIRTY) {
2775 					vm_page_dirty(m);
2776 					/*XXX*/
2777 					swap_pager_unswapped(m);
2778 				}
2779 			}
2780 			pmap_enter(pmap, addr, m, prot, 0, entry);
2781 			mycpu->gd_cnt.v_vm_faults++;
2782 			if (curthread->td_lwp)
2783 				++curthread->td_lwp->lwp_ru.ru_minflt;
2784 		}
2785 		vm_page_wakeup(m);
2786 	}
2787 }
2788