xref: /dragonfly/sys/vm/vm_fault.c (revision 10f4bf95)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  *
12  * This code is derived from software contributed to Berkeley by
13  * The Mach Operating System project at Carnegie-Mellon University.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by the University of
26  *	California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  *	from: @(#)vm_fault.c	8.4 (Berkeley) 1/12/94
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
50  *
51  * Permission to use, copy, modify and distribute this software and
52  * its documentation is hereby granted, provided that both the copyright
53  * notice and this permission notice appear in all copies of the
54  * software, derivative works or modified versions, and any portions
55  * thereof, and that both notices appear in supporting documentation.
56  *
57  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
58  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
59  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
60  *
61  * Carnegie Mellon requests users of this software to return to
62  *
63  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
64  *  School of Computer Science
65  *  Carnegie Mellon University
66  *  Pittsburgh PA 15213-3890
67  *
68  * any improvements or extensions that they make and grant Carnegie the
69  * rights to redistribute these changes.
70  *
71  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
72  * $DragonFly: src/sys/vm/vm_fault.c,v 1.47 2008/07/01 02:02:56 dillon Exp $
73  */
74 
75 /*
76  *	Page fault handling module.
77  */
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc.h>
83 #include <sys/vnode.h>
84 #include <sys/resourcevar.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vkernel.h>
87 #include <sys/lock.h>
88 #include <sys/sysctl.h>
89 
90 #include <cpu/lwbuf.h>
91 
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_pager.h>
101 #include <vm/vnode_pager.h>
102 #include <vm/vm_extern.h>
103 
104 #include <sys/thread2.h>
105 #include <vm/vm_page2.h>
106 
107 struct faultstate {
108 	vm_page_t m;
109 	vm_object_t object;
110 	vm_pindex_t pindex;
111 	vm_prot_t prot;
112 	vm_page_t first_m;
113 	vm_object_t first_object;
114 	vm_prot_t first_prot;
115 	vm_map_t map;
116 	vm_map_entry_t entry;
117 	int lookup_still_valid;
118 	int didlimit;
119 	int hardfault;
120 	int fault_flags;
121 	int map_generation;
122 	boolean_t wired;
123 	struct vnode *vp;
124 };
125 
126 static int vm_fast_fault = 1;
127 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0,
128 	   "Burst fault zero-fill regions");
129 static int debug_cluster = 0;
130 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
131 
132 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t);
133 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, vpte_t, int);
134 #if 0
135 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
136 #endif
137 static int vm_fault_ratelimit(struct vmspace *);
138 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry);
139 static void vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry,
140 			int prot);
141 
142 /*
143  * The caller must hold vm_token.
144  */
145 static __inline void
146 release_page(struct faultstate *fs)
147 {
148 	vm_page_deactivate(fs->m);
149 	vm_page_wakeup(fs->m);
150 	fs->m = NULL;
151 }
152 
153 /*
154  * The caller must hold vm_token.
155  *
156  * NOTE: Once unlocked any cached fs->entry becomes invalid, any reuse
157  *	 requires relocking and then checking the timestamp.
158  *
159  * NOTE: vm_map_lock_read() does not bump fs->map->timestamp so we do
160  *	 not have to update fs->map_generation here.
161  *
162  * NOTE: This function can fail due to a deadlock against the caller's
163  *	 holding of a vm_page BUSY.
164  */
165 static __inline int
166 relock_map(struct faultstate *fs)
167 {
168 	int error;
169 
170 	if (fs->lookup_still_valid == FALSE && fs->map) {
171 		error = vm_map_lock_read_to(fs->map);
172 		if (error == 0)
173 			fs->lookup_still_valid = TRUE;
174 	} else {
175 		error = 0;
176 	}
177 	return error;
178 }
179 
180 static __inline void
181 unlock_map(struct faultstate *fs)
182 {
183 	if (fs->lookup_still_valid && fs->map) {
184 		vm_map_lookup_done(fs->map, fs->entry, 0);
185 		fs->lookup_still_valid = FALSE;
186 	}
187 }
188 
189 /*
190  * Clean up after a successful call to vm_fault_object() so another call
191  * to vm_fault_object() can be made.
192  *
193  * The caller must hold vm_token.
194  */
195 static void
196 _cleanup_successful_fault(struct faultstate *fs, int relock)
197 {
198 	if (fs->object != fs->first_object) {
199 		vm_page_free(fs->first_m);
200 		vm_object_pip_wakeup(fs->object);
201 		fs->first_m = NULL;
202 	}
203 	fs->object = fs->first_object;
204 	if (relock && fs->lookup_still_valid == FALSE) {
205 		if (fs->map)
206 			vm_map_lock_read(fs->map);
207 		fs->lookup_still_valid = TRUE;
208 	}
209 }
210 
211 /*
212  * The caller must hold vm_token.
213  */
214 static void
215 _unlock_things(struct faultstate *fs, int dealloc)
216 {
217 	vm_object_pip_wakeup(fs->first_object);
218 	_cleanup_successful_fault(fs, 0);
219 	if (dealloc) {
220 		vm_object_deallocate(fs->first_object);
221 		fs->first_object = NULL;
222 	}
223 	unlock_map(fs);
224 	if (fs->vp != NULL) {
225 		vput(fs->vp);
226 		fs->vp = NULL;
227 	}
228 }
229 
230 #define unlock_things(fs) _unlock_things(fs, 0)
231 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
232 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
233 
234 /*
235  * TRYPAGER
236  *
237  * Determine if the pager for the current object *might* contain the page.
238  *
239  * We only need to try the pager if this is not a default object (default
240  * objects are zero-fill and have no real pager), and if we are not taking
241  * a wiring fault or if the FS entry is wired.
242  */
243 #define TRYPAGER(fs)	\
244 		(fs->object->type != OBJT_DEFAULT && \
245 		(((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
246 
247 /*
248  * vm_fault:
249  *
250  * Handle a page fault occuring at the given address, requiring the given
251  * permissions, in the map specified.  If successful, the page is inserted
252  * into the associated physical map.
253  *
254  * NOTE: The given address should be truncated to the proper page address.
255  *
256  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
257  * a standard error specifying why the fault is fatal is returned.
258  *
259  * The map in question must be referenced, and remains so.
260  * The caller may hold no locks.
261  * No other requirements.
262  */
263 int
264 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
265 {
266 	int result;
267 	vm_pindex_t first_pindex;
268 	struct faultstate fs;
269 	int growstack;
270 
271 	mycpu->gd_cnt.v_vm_faults++;
272 
273 	fs.didlimit = 0;
274 	fs.hardfault = 0;
275 	fs.fault_flags = fault_flags;
276 	growstack = 1;
277 
278 RetryFault:
279 	/*
280 	 * Find the vm_map_entry representing the backing store and resolve
281 	 * the top level object and page index.  This may have the side
282 	 * effect of executing a copy-on-write on the map entry and/or
283 	 * creating a shadow object, but will not COW any actual VM pages.
284 	 *
285 	 * On success fs.map is left read-locked and various other fields
286 	 * are initialized but not otherwise referenced or locked.
287 	 *
288 	 * NOTE!  vm_map_lookup will try to upgrade the fault_type to
289 	 * VM_FAULT_WRITE if the map entry is a virtual page table and also
290 	 * writable, so we can set the 'A'accessed bit in the virtual page
291 	 * table entry.
292 	 */
293 	fs.map = map;
294 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
295 			       &fs.entry, &fs.first_object,
296 			       &first_pindex, &fs.first_prot, &fs.wired);
297 
298 	/*
299 	 * If the lookup failed or the map protections are incompatible,
300 	 * the fault generally fails.  However, if the caller is trying
301 	 * to do a user wiring we have more work to do.
302 	 */
303 	if (result != KERN_SUCCESS) {
304 		if (result != KERN_PROTECTION_FAILURE ||
305 		    (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
306 		{
307 			if (result == KERN_INVALID_ADDRESS && growstack &&
308 			    map != &kernel_map && curproc != NULL) {
309 				result = vm_map_growstack(curproc, vaddr);
310 				if (result != KERN_SUCCESS)
311 					return (KERN_FAILURE);
312 				growstack = 0;
313 				goto RetryFault;
314 			}
315 			return (result);
316 		}
317 
318 		/*
319    		 * If we are user-wiring a r/w segment, and it is COW, then
320    		 * we need to do the COW operation.  Note that we don't
321 		 * currently COW RO sections now, because it is NOT desirable
322    		 * to COW .text.  We simply keep .text from ever being COW'ed
323    		 * and take the heat that one cannot debug wired .text sections.
324    		 */
325 		result = vm_map_lookup(&fs.map, vaddr,
326 				       VM_PROT_READ|VM_PROT_WRITE|
327 				        VM_PROT_OVERRIDE_WRITE,
328 				       &fs.entry, &fs.first_object,
329 				       &first_pindex, &fs.first_prot,
330 				       &fs.wired);
331 		if (result != KERN_SUCCESS)
332 			return result;
333 
334 		/*
335 		 * If we don't COW now, on a user wire, the user will never
336 		 * be able to write to the mapping.  If we don't make this
337 		 * restriction, the bookkeeping would be nearly impossible.
338 		 */
339 		if ((fs.entry->protection & VM_PROT_WRITE) == 0)
340 			fs.entry->max_protection &= ~VM_PROT_WRITE;
341 	}
342 
343 	/*
344 	 * fs.map is read-locked
345 	 *
346 	 * Misc checks.  Save the map generation number to detect races.
347 	 */
348 	fs.map_generation = fs.map->timestamp;
349 
350 	if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) {
351 		if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
352 			panic("vm_fault: fault on nofault entry, addr: %p",
353 			      (void *)vaddr);
354 		}
355 		if ((fs.entry->eflags & MAP_ENTRY_KSTACK) &&
356 		    vaddr >= fs.entry->start &&
357 		    vaddr < fs.entry->start + PAGE_SIZE) {
358 			panic("vm_fault: fault on stack guard, addr: %p",
359 			      (void *)vaddr);
360 		}
361 	}
362 
363 	/*
364 	 * A system map entry may return a NULL object.  No object means
365 	 * no pager means an unrecoverable kernel fault.
366 	 */
367 	if (fs.first_object == NULL) {
368 		panic("vm_fault: unrecoverable fault at %p in entry %p",
369 			(void *)vaddr, fs.entry);
370 	}
371 
372 	/*
373 	 * Make a reference to this object to prevent its disposal while we
374 	 * are messing with it.  Once we have the reference, the map is free
375 	 * to be diddled.  Since objects reference their shadows (and copies),
376 	 * they will stay around as well.
377 	 *
378 	 * Bump the paging-in-progress count to prevent size changes (e.g.
379 	 * truncation operations) during I/O.  This must be done after
380 	 * obtaining the vnode lock in order to avoid possible deadlocks.
381 	 *
382 	 * The vm_object must be held before manipulation.
383 	 */
384 	lwkt_gettoken(&vm_token);
385 	vm_object_hold(fs.first_object);
386 	vm_object_reference(fs.first_object);
387 	fs.vp = vnode_pager_lock(fs.first_object);
388 	vm_object_pip_add(fs.first_object, 1);
389 	vm_object_drop(fs.first_object);
390 	lwkt_reltoken(&vm_token);
391 
392 	fs.lookup_still_valid = TRUE;
393 	fs.first_m = NULL;
394 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
395 
396 	/*
397 	 * If the entry is wired we cannot change the page protection.
398 	 */
399 	if (fs.wired)
400 		fault_type = fs.first_prot;
401 
402 	/*
403 	 * The page we want is at (first_object, first_pindex), but if the
404 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
405 	 * page table to figure out the actual pindex.
406 	 *
407 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
408 	 * ONLY
409 	 */
410 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
411 		result = vm_fault_vpagetable(&fs, &first_pindex,
412 					     fs.entry->aux.master_pde,
413 					     fault_type);
414 		if (result == KERN_TRY_AGAIN)
415 			goto RetryFault;
416 		if (result != KERN_SUCCESS)
417 			return (result);
418 	}
419 
420 	/*
421 	 * Now we have the actual (object, pindex), fault in the page.  If
422 	 * vm_fault_object() fails it will unlock and deallocate the FS
423 	 * data.   If it succeeds everything remains locked and fs->object
424 	 * will have an additional PIP count if it is not equal to
425 	 * fs->first_object
426 	 *
427 	 * vm_fault_object will set fs->prot for the pmap operation.  It is
428 	 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
429 	 * page can be safely written.  However, it will force a read-only
430 	 * mapping for a read fault if the memory is managed by a virtual
431 	 * page table.
432 	 */
433 	/* BEFORE */
434 	result = vm_fault_object(&fs, first_pindex, fault_type);
435 
436 	if (result == KERN_TRY_AGAIN) {
437 		/*lwkt_reltoken(&vm_token);*/
438 		goto RetryFault;
439 	}
440 	if (result != KERN_SUCCESS) {
441 		/*lwkt_reltoken(&vm_token);*/
442 		return (result);
443 	}
444 
445 	/*
446 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
447 	 * will contain a busied page.
448 	 *
449 	 * Enter the page into the pmap and do pmap-related adjustments.
450 	 */
451 	vm_page_flag_set(fs.m, PG_REFERENCED);
452 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
453 
454 	/*
455 	 * Burst in a few more pages if possible.  The fs.map should still
456 	 * be locked.
457 	 */
458 	if (fault_flags & VM_FAULT_BURST) {
459 		if ((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 &&
460 		    fs.wired == 0) {
461 			vm_prefault(fs.map->pmap, vaddr, fs.entry, fs.prot);
462 		}
463 	}
464 	lwkt_gettoken(&vm_token);
465 	unlock_things(&fs);
466 
467 	/*KKASSERT(fs.m->queue == PQ_NONE); page-in op may deactivate page */
468 	KKASSERT(fs.m->flags & PG_BUSY);
469 
470 	/*
471 	 * If the page is not wired down, then put it where the pageout daemon
472 	 * can find it.
473 	 *
474 	 * We do not really need to get vm_token here but since all the
475 	 * vm_*() calls have to doing it here improves efficiency.
476 	 */
477 	/*lwkt_gettoken(&vm_token);*/
478 
479 	if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
480 		lwkt_reltoken(&vm_token); /* before wire activate does not */
481 		if (fs.wired)
482 			vm_page_wire(fs.m);
483 		else
484 			vm_page_unwire(fs.m, 1);
485 	} else {
486 		vm_page_activate(fs.m);
487 		lwkt_reltoken(&vm_token); /* before wire activate does not */
488 	}
489 	/*lwkt_reltoken(&vm_token); after wire/activate works */
490 
491 	if (curthread->td_lwp) {
492 		if (fs.hardfault) {
493 			curthread->td_lwp->lwp_ru.ru_majflt++;
494 		} else {
495 			curthread->td_lwp->lwp_ru.ru_minflt++;
496 		}
497 	}
498 
499 	/*
500 	 * Unlock everything, and return
501 	 */
502 	vm_page_wakeup(fs.m);
503 	vm_object_deallocate(fs.first_object);
504 	/*fs.m = NULL; */
505 	/*fs.first_object = NULL; */
506 	/*lwkt_reltoken(&vm_token);*/
507 
508 	return (KERN_SUCCESS);
509 }
510 
511 /*
512  * Fault in the specified virtual address in the current process map,
513  * returning a held VM page or NULL.  See vm_fault_page() for more
514  * information.
515  *
516  * No requirements.
517  */
518 vm_page_t
519 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
520 {
521 	struct lwp *lp = curthread->td_lwp;
522 	vm_page_t m;
523 
524 	m = vm_fault_page(&lp->lwp_vmspace->vm_map, va,
525 			  fault_type, VM_FAULT_NORMAL, errorp);
526 	return(m);
527 }
528 
529 /*
530  * Fault in the specified virtual address in the specified map, doing all
531  * necessary manipulation of the object store and all necessary I/O.  Return
532  * a held VM page or NULL, and set *errorp.  The related pmap is not
533  * updated.
534  *
535  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
536  * and marked PG_REFERENCED as well.
537  *
538  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
539  * error will be returned.
540  *
541  * No requirements.
542  */
543 vm_page_t
544 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
545 	      int fault_flags, int *errorp)
546 {
547 	vm_pindex_t first_pindex;
548 	struct faultstate fs;
549 	int result;
550 	vm_prot_t orig_fault_type = fault_type;
551 
552 	mycpu->gd_cnt.v_vm_faults++;
553 
554 	fs.didlimit = 0;
555 	fs.hardfault = 0;
556 	fs.fault_flags = fault_flags;
557 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
558 
559 RetryFault:
560 	/*
561 	 * Find the vm_map_entry representing the backing store and resolve
562 	 * the top level object and page index.  This may have the side
563 	 * effect of executing a copy-on-write on the map entry and/or
564 	 * creating a shadow object, but will not COW any actual VM pages.
565 	 *
566 	 * On success fs.map is left read-locked and various other fields
567 	 * are initialized but not otherwise referenced or locked.
568 	 *
569 	 * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
570 	 * if the map entry is a virtual page table and also writable,
571 	 * so we can set the 'A'accessed bit in the virtual page table entry.
572 	 */
573 	fs.map = map;
574 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
575 			       &fs.entry, &fs.first_object,
576 			       &first_pindex, &fs.first_prot, &fs.wired);
577 
578 	if (result != KERN_SUCCESS) {
579 		*errorp = result;
580 		return (NULL);
581 	}
582 
583 	/*
584 	 * fs.map is read-locked
585 	 *
586 	 * Misc checks.  Save the map generation number to detect races.
587 	 */
588 	fs.map_generation = fs.map->timestamp;
589 
590 	if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
591 		panic("vm_fault: fault on nofault entry, addr: %lx",
592 		    (u_long)vaddr);
593 	}
594 
595 	/*
596 	 * A system map entry may return a NULL object.  No object means
597 	 * no pager means an unrecoverable kernel fault.
598 	 */
599 	if (fs.first_object == NULL) {
600 		panic("vm_fault: unrecoverable fault at %p in entry %p",
601 			(void *)vaddr, fs.entry);
602 	}
603 
604 	/*
605 	 * Make a reference to this object to prevent its disposal while we
606 	 * are messing with it.  Once we have the reference, the map is free
607 	 * to be diddled.  Since objects reference their shadows (and copies),
608 	 * they will stay around as well.
609 	 *
610 	 * Bump the paging-in-progress count to prevent size changes (e.g.
611 	 * truncation operations) during I/O.  This must be done after
612 	 * obtaining the vnode lock in order to avoid possible deadlocks.
613 	 *
614 	 * The vm_object must be held before manipulation.
615 	 */
616 	lwkt_gettoken(&vm_token);
617 	vm_object_hold(fs.first_object);
618 	vm_object_reference(fs.first_object);
619 	fs.vp = vnode_pager_lock(fs.first_object);
620 	vm_object_pip_add(fs.first_object, 1);
621 	vm_object_drop(fs.first_object);
622 	lwkt_reltoken(&vm_token);
623 
624 	fs.lookup_still_valid = TRUE;
625 	fs.first_m = NULL;
626 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
627 
628 	/*
629 	 * If the entry is wired we cannot change the page protection.
630 	 */
631 	if (fs.wired)
632 		fault_type = fs.first_prot;
633 
634 	/*
635 	 * The page we want is at (first_object, first_pindex), but if the
636 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
637 	 * page table to figure out the actual pindex.
638 	 *
639 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
640 	 * ONLY
641 	 */
642 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
643 		result = vm_fault_vpagetable(&fs, &first_pindex,
644 					     fs.entry->aux.master_pde,
645 					     fault_type);
646 		if (result == KERN_TRY_AGAIN)
647 			goto RetryFault;
648 		if (result != KERN_SUCCESS) {
649 			*errorp = result;
650 			return (NULL);
651 		}
652 	}
653 
654 	/*
655 	 * Now we have the actual (object, pindex), fault in the page.  If
656 	 * vm_fault_object() fails it will unlock and deallocate the FS
657 	 * data.   If it succeeds everything remains locked and fs->object
658 	 * will have an additinal PIP count if it is not equal to
659 	 * fs->first_object
660 	 */
661 	result = vm_fault_object(&fs, first_pindex, fault_type);
662 
663 	if (result == KERN_TRY_AGAIN)
664 		goto RetryFault;
665 	if (result != KERN_SUCCESS) {
666 		*errorp = result;
667 		return(NULL);
668 	}
669 
670 	if ((orig_fault_type & VM_PROT_WRITE) &&
671 	    (fs.prot & VM_PROT_WRITE) == 0) {
672 		*errorp = KERN_PROTECTION_FAILURE;
673 		unlock_and_deallocate(&fs);
674 		return(NULL);
675 	}
676 
677 	/*
678 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
679 	 * will contain a busied page.
680 	 */
681 	unlock_things(&fs);
682 
683 	/*
684 	 * Return a held page.  We are not doing any pmap manipulation so do
685 	 * not set PG_MAPPED.  However, adjust the page flags according to
686 	 * the fault type because the caller may not use a managed pmapping
687 	 * (so we don't want to lose the fact that the page will be dirtied
688 	 * if a write fault was specified).
689 	 */
690 	lwkt_gettoken(&vm_token);
691 	vm_page_hold(fs.m);
692 	if (fault_type & VM_PROT_WRITE)
693 		vm_page_dirty(fs.m);
694 
695 	/*
696 	 * Update the pmap.  We really only have to do this if a COW
697 	 * occured to replace the read-only page with the new page.  For
698 	 * now just do it unconditionally. XXX
699 	 */
700 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
701 	vm_page_flag_set(fs.m, PG_REFERENCED);
702 
703 	/*
704 	 * Unbusy the page by activating it.  It remains held and will not
705 	 * be reclaimed.
706 	 */
707 	vm_page_activate(fs.m);
708 
709 	if (curthread->td_lwp) {
710 		if (fs.hardfault) {
711 			curthread->td_lwp->lwp_ru.ru_majflt++;
712 		} else {
713 			curthread->td_lwp->lwp_ru.ru_minflt++;
714 		}
715 	}
716 
717 	/*
718 	 * Unlock everything, and return the held page.
719 	 */
720 	vm_page_wakeup(fs.m);
721 	vm_object_deallocate(fs.first_object);
722 	/*fs.first_object = NULL; */
723 	lwkt_reltoken(&vm_token);
724 
725 	*errorp = 0;
726 	return(fs.m);
727 }
728 
729 /*
730  * Fault in the specified (object,offset), dirty the returned page as
731  * needed.  If the requested fault_type cannot be done NULL and an
732  * error is returned.
733  *
734  * A held (but not busied) page is returned.
735  *
736  * No requirements.
737  */
738 vm_page_t
739 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
740 		     vm_prot_t fault_type, int fault_flags, int *errorp)
741 {
742 	int result;
743 	vm_pindex_t first_pindex;
744 	struct faultstate fs;
745 	struct vm_map_entry entry;
746 
747 	bzero(&entry, sizeof(entry));
748 	entry.object.vm_object = object;
749 	entry.maptype = VM_MAPTYPE_NORMAL;
750 	entry.protection = entry.max_protection = fault_type;
751 
752 	fs.didlimit = 0;
753 	fs.hardfault = 0;
754 	fs.fault_flags = fault_flags;
755 	fs.map = NULL;
756 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
757 
758 RetryFault:
759 
760 	fs.first_object = object;
761 	first_pindex = OFF_TO_IDX(offset);
762 	fs.entry = &entry;
763 	fs.first_prot = fault_type;
764 	fs.wired = 0;
765 	/*fs.map_generation = 0; unused */
766 
767 	/*
768 	 * Make a reference to this object to prevent its disposal while we
769 	 * are messing with it.  Once we have the reference, the map is free
770 	 * to be diddled.  Since objects reference their shadows (and copies),
771 	 * they will stay around as well.
772 	 *
773 	 * Bump the paging-in-progress count to prevent size changes (e.g.
774 	 * truncation operations) during I/O.  This must be done after
775 	 * obtaining the vnode lock in order to avoid possible deadlocks.
776 	 */
777 	lwkt_gettoken(&vm_token);
778 	vm_object_hold(fs.first_object);
779 	vm_object_reference(fs.first_object);
780 	fs.vp = vnode_pager_lock(fs.first_object);
781 	vm_object_pip_add(fs.first_object, 1);
782 	vm_object_drop(fs.first_object);
783 	lwkt_reltoken(&vm_token);
784 
785 	fs.lookup_still_valid = TRUE;
786 	fs.first_m = NULL;
787 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
788 
789 #if 0
790 	/* XXX future - ability to operate on VM object using vpagetable */
791 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
792 		result = vm_fault_vpagetable(&fs, &first_pindex,
793 					     fs.entry->aux.master_pde,
794 					     fault_type);
795 		if (result == KERN_TRY_AGAIN)
796 			goto RetryFault;
797 		if (result != KERN_SUCCESS) {
798 			*errorp = result;
799 			return (NULL);
800 		}
801 	}
802 #endif
803 
804 	/*
805 	 * Now we have the actual (object, pindex), fault in the page.  If
806 	 * vm_fault_object() fails it will unlock and deallocate the FS
807 	 * data.   If it succeeds everything remains locked and fs->object
808 	 * will have an additinal PIP count if it is not equal to
809 	 * fs->first_object
810 	 */
811 	result = vm_fault_object(&fs, first_pindex, fault_type);
812 
813 	if (result == KERN_TRY_AGAIN)
814 		goto RetryFault;
815 	if (result != KERN_SUCCESS) {
816 		*errorp = result;
817 		return(NULL);
818 	}
819 
820 	if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
821 		*errorp = KERN_PROTECTION_FAILURE;
822 		unlock_and_deallocate(&fs);
823 		return(NULL);
824 	}
825 
826 	/*
827 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
828 	 * will contain a busied page.
829 	 */
830 	unlock_things(&fs);
831 
832 	/*
833 	 * Return a held page.  We are not doing any pmap manipulation so do
834 	 * not set PG_MAPPED.  However, adjust the page flags according to
835 	 * the fault type because the caller may not use a managed pmapping
836 	 * (so we don't want to lose the fact that the page will be dirtied
837 	 * if a write fault was specified).
838 	 */
839 	lwkt_gettoken(&vm_token);
840 	vm_page_hold(fs.m);
841 	if (fault_type & VM_PROT_WRITE)
842 		vm_page_dirty(fs.m);
843 
844 	if (fault_flags & VM_FAULT_DIRTY)
845 		vm_page_dirty(fs.m);
846 	if (fault_flags & VM_FAULT_UNSWAP)
847 		swap_pager_unswapped(fs.m);
848 
849 	/*
850 	 * Indicate that the page was accessed.
851 	 */
852 	vm_page_flag_set(fs.m, PG_REFERENCED);
853 
854 	/*
855 	 * Unbusy the page by activating it.  It remains held and will not
856 	 * be reclaimed.
857 	 */
858 	vm_page_activate(fs.m);
859 
860 	if (curthread->td_lwp) {
861 		if (fs.hardfault) {
862 			mycpu->gd_cnt.v_vm_faults++;
863 			curthread->td_lwp->lwp_ru.ru_majflt++;
864 		} else {
865 			curthread->td_lwp->lwp_ru.ru_minflt++;
866 		}
867 	}
868 
869 	/*
870 	 * Unlock everything, and return the held page.
871 	 */
872 	vm_page_wakeup(fs.m);
873 	vm_object_deallocate(fs.first_object);
874 	/*fs.first_object = NULL; */
875 	lwkt_reltoken(&vm_token);
876 
877 	*errorp = 0;
878 	return(fs.m);
879 }
880 
881 /*
882  * Translate the virtual page number (first_pindex) that is relative
883  * to the address space into a logical page number that is relative to the
884  * backing object.  Use the virtual page table pointed to by (vpte).
885  *
886  * This implements an N-level page table.  Any level can terminate the
887  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
888  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
889  *
890  * No requirements (vm_token need not be held).
891  */
892 static
893 int
894 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
895 		    vpte_t vpte, int fault_type)
896 {
897 	struct lwbuf *lwb;
898 	struct lwbuf lwb_cache;
899 	int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */
900 	int result = KERN_SUCCESS;
901 	vpte_t *ptep;
902 
903 	for (;;) {
904 		/*
905 		 * We cannot proceed if the vpte is not valid, not readable
906 		 * for a read fault, or not writable for a write fault.
907 		 */
908 		if ((vpte & VPTE_V) == 0) {
909 			unlock_and_deallocate(fs);
910 			return (KERN_FAILURE);
911 		}
912 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_R) == 0) {
913 			unlock_and_deallocate(fs);
914 			return (KERN_FAILURE);
915 		}
916 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_W) == 0) {
917 			unlock_and_deallocate(fs);
918 			return (KERN_FAILURE);
919 		}
920 		if ((vpte & VPTE_PS) || vshift == 0)
921 			break;
922 		KKASSERT(vshift >= VPTE_PAGE_BITS);
923 
924 		/*
925 		 * Get the page table page.  Nominally we only read the page
926 		 * table, but since we are actively setting VPTE_M and VPTE_A,
927 		 * tell vm_fault_object() that we are writing it.
928 		 *
929 		 * There is currently no real need to optimize this.
930 		 */
931 		result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT,
932 					 VM_PROT_READ|VM_PROT_WRITE);
933 		if (result != KERN_SUCCESS)
934 			return (result);
935 
936 		/*
937 		 * Process the returned fs.m and look up the page table
938 		 * entry in the page table page.
939 		 */
940 		vshift -= VPTE_PAGE_BITS;
941 		lwb = lwbuf_alloc(fs->m, &lwb_cache);
942 		ptep = ((vpte_t *)lwbuf_kva(lwb) +
943 		        ((*pindex >> vshift) & VPTE_PAGE_MASK));
944 		vpte = *ptep;
945 
946 		/*
947 		 * Page table write-back.  If the vpte is valid for the
948 		 * requested operation, do a write-back to the page table.
949 		 *
950 		 * XXX VPTE_M is not set properly for page directory pages.
951 		 * It doesn't get set in the page directory if the page table
952 		 * is modified during a read access.
953 		 */
954 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
955 		    (vpte & VPTE_W)) {
956 			if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
957 				atomic_set_long(ptep, VPTE_M | VPTE_A);
958 				vm_page_dirty(fs->m);
959 			}
960 		}
961 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V) &&
962 		    (vpte & VPTE_R)) {
963 			if ((vpte & VPTE_A) == 0) {
964 				atomic_set_long(ptep, VPTE_A);
965 				vm_page_dirty(fs->m);
966 			}
967 		}
968 		lwbuf_free(lwb);
969 		vm_page_flag_set(fs->m, PG_REFERENCED);
970 		vm_page_activate(fs->m);
971 		vm_page_wakeup(fs->m);
972 		fs->m = NULL;
973 		cleanup_successful_fault(fs);
974 	}
975 	/*
976 	 * Combine remaining address bits with the vpte.
977 	 */
978 	/* JG how many bits from each? */
979 	*pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) +
980 		  (*pindex & ((1L << vshift) - 1));
981 	return (KERN_SUCCESS);
982 }
983 
984 
985 /*
986  * This is the core of the vm_fault code.
987  *
988  * Do all operations required to fault-in (fs.first_object, pindex).  Run
989  * through the shadow chain as necessary and do required COW or virtual
990  * copy operations.  The caller has already fully resolved the vm_map_entry
991  * and, if appropriate, has created a copy-on-write layer.  All we need to
992  * do is iterate the object chain.
993  *
994  * On failure (fs) is unlocked and deallocated and the caller may return or
995  * retry depending on the failure code.  On success (fs) is NOT unlocked or
996  * deallocated, fs.m will contained a resolved, busied page, and fs.object
997  * will have an additional PIP count if it is not equal to fs.first_object.
998  *
999  * No requirements.
1000  */
1001 static
1002 int
1003 vm_fault_object(struct faultstate *fs,
1004 		vm_pindex_t first_pindex, vm_prot_t fault_type)
1005 {
1006 	vm_object_t next_object;
1007 	vm_pindex_t pindex;
1008 
1009 	fs->prot = fs->first_prot;
1010 	fs->object = fs->first_object;
1011 	pindex = first_pindex;
1012 
1013 	/*
1014 	 * If a read fault occurs we try to make the page writable if
1015 	 * possible.  There are three cases where we cannot make the
1016 	 * page mapping writable:
1017 	 *
1018 	 * (1) The mapping is read-only or the VM object is read-only,
1019 	 *     fs->prot above will simply not have VM_PROT_WRITE set.
1020 	 *
1021 	 * (2) If the mapping is a virtual page table we need to be able
1022 	 *     to detect writes so we can set VPTE_M in the virtual page
1023 	 *     table.
1024 	 *
1025 	 * (3) If the VM page is read-only or copy-on-write, upgrading would
1026 	 *     just result in an unnecessary COW fault.
1027 	 *
1028 	 * VM_PROT_VPAGED is set if faulting via a virtual page table and
1029 	 * causes adjustments to the 'M'odify bit to also turn off write
1030 	 * access to force a re-fault.
1031 	 */
1032 	if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
1033 		if ((fault_type & VM_PROT_WRITE) == 0)
1034 			fs->prot &= ~VM_PROT_WRITE;
1035 	}
1036 
1037 	lwkt_gettoken(&vm_token);
1038 
1039 	for (;;) {
1040 		/*
1041 		 * If the object is dead, we stop here
1042 		 */
1043 		if (fs->object->flags & OBJ_DEAD) {
1044 			unlock_and_deallocate(fs);
1045 			lwkt_reltoken(&vm_token);
1046 			return (KERN_PROTECTION_FAILURE);
1047 		}
1048 
1049 		/*
1050 		 * See if the page is resident.
1051 		 */
1052 		fs->m = vm_page_lookup(fs->object, pindex);
1053 		if (fs->m != NULL) {
1054 			int queue;
1055 			/*
1056 			 * Wait/Retry if the page is busy.  We have to do this
1057 			 * if the page is busy via either PG_BUSY or
1058 			 * vm_page_t->busy because the vm_pager may be using
1059 			 * vm_page_t->busy for pageouts ( and even pageins if
1060 			 * it is the vnode pager ), and we could end up trying
1061 			 * to pagein and pageout the same page simultaneously.
1062 			 *
1063 			 * We can theoretically allow the busy case on a read
1064 			 * fault if the page is marked valid, but since such
1065 			 * pages are typically already pmap'd, putting that
1066 			 * special case in might be more effort then it is
1067 			 * worth.  We cannot under any circumstances mess
1068 			 * around with a vm_page_t->busy page except, perhaps,
1069 			 * to pmap it.
1070 			 */
1071 			if ((fs->m->flags & PG_BUSY) || fs->m->busy) {
1072 				unlock_things(fs);
1073 				vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
1074 				mycpu->gd_cnt.v_intrans++;
1075 				vm_object_deallocate(fs->first_object);
1076 				fs->first_object = NULL;
1077 				lwkt_reltoken(&vm_token);
1078 				return (KERN_TRY_AGAIN);
1079 			}
1080 
1081 			/*
1082 			 * If reactivating a page from PQ_CACHE we may have
1083 			 * to rate-limit.
1084 			 */
1085 			queue = fs->m->queue;
1086 			vm_page_unqueue_nowakeup(fs->m);
1087 
1088 			if ((queue - fs->m->pc) == PQ_CACHE &&
1089 			    vm_page_count_severe()) {
1090 				vm_page_activate(fs->m);
1091 				unlock_and_deallocate(fs);
1092 				vm_waitpfault();
1093 				lwkt_reltoken(&vm_token);
1094 				return (KERN_TRY_AGAIN);
1095 			}
1096 
1097 			/*
1098 			 * Mark page busy for other processes, and the
1099 			 * pagedaemon.  If it still isn't completely valid
1100 			 * (readable), or if a read-ahead-mark is set on
1101 			 * the VM page, jump to readrest, else we found the
1102 			 * page and can return.
1103 			 *
1104 			 * We can release the spl once we have marked the
1105 			 * page busy.
1106 			 */
1107 			vm_page_busy(fs->m);
1108 
1109 			if (fs->m->object != &kernel_object) {
1110 				if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
1111 				    VM_PAGE_BITS_ALL) {
1112 					goto readrest;
1113 				}
1114 				if (fs->m->flags & PG_RAM) {
1115 					if (debug_cluster)
1116 						kprintf("R");
1117 					vm_page_flag_clear(fs->m, PG_RAM);
1118 					goto readrest;
1119 				}
1120 			}
1121 			break; /* break to PAGE HAS BEEN FOUND */
1122 		}
1123 
1124 		/*
1125 		 * Page is not resident, If this is the search termination
1126 		 * or the pager might contain the page, allocate a new page.
1127 		 */
1128 		if (TRYPAGER(fs) || fs->object == fs->first_object) {
1129 			/*
1130 			 * If the page is beyond the object size we fail
1131 			 */
1132 			if (pindex >= fs->object->size) {
1133 				lwkt_reltoken(&vm_token);
1134 				unlock_and_deallocate(fs);
1135 				return (KERN_PROTECTION_FAILURE);
1136 			}
1137 
1138 			/*
1139 			 * Ratelimit.
1140 			 */
1141 			if (fs->didlimit == 0 && curproc != NULL) {
1142 				int limticks;
1143 
1144 				limticks = vm_fault_ratelimit(curproc->p_vmspace);
1145 				if (limticks) {
1146 					lwkt_reltoken(&vm_token);
1147 					unlock_and_deallocate(fs);
1148 					tsleep(curproc, 0, "vmrate", limticks);
1149 					fs->didlimit = 1;
1150 					return (KERN_TRY_AGAIN);
1151 				}
1152 			}
1153 
1154 			/*
1155 			 * Allocate a new page for this object/offset pair.
1156 			 */
1157 			fs->m = NULL;
1158 			if (!vm_page_count_severe()) {
1159 				fs->m = vm_page_alloc(fs->object, pindex,
1160 				    (fs->vp || fs->object->backing_object) ? VM_ALLOC_NORMAL : VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
1161 			}
1162 			if (fs->m == NULL) {
1163 				lwkt_reltoken(&vm_token);
1164 				unlock_and_deallocate(fs);
1165 				vm_waitpfault();
1166 				return (KERN_TRY_AGAIN);
1167 			}
1168 		}
1169 
1170 readrest:
1171 		/*
1172 		 * We have found an invalid or partially valid page, a
1173 		 * page with a read-ahead mark which might be partially or
1174 		 * fully valid (and maybe dirty too), or we have allocated
1175 		 * a new page.
1176 		 *
1177 		 * Attempt to fault-in the page if there is a chance that the
1178 		 * pager has it, and potentially fault in additional pages
1179 		 * at the same time.
1180 		 *
1181 		 * We are NOT in splvm here and if TRYPAGER is true then
1182 		 * fs.m will be non-NULL and will be PG_BUSY for us.
1183 		 */
1184 		if (TRYPAGER(fs)) {
1185 			int rv;
1186 			int seqaccess;
1187 			u_char behavior = vm_map_entry_behavior(fs->entry);
1188 
1189 			if (behavior == MAP_ENTRY_BEHAV_RANDOM)
1190 				seqaccess = 0;
1191 			else
1192 				seqaccess = -1;
1193 
1194 			/*
1195 			 * If sequential access is detected then attempt
1196 			 * to deactivate/cache pages behind the scan to
1197 			 * prevent resource hogging.
1198 			 *
1199 			 * Use of PG_RAM to detect sequential access
1200 			 * also simulates multi-zone sequential access
1201 			 * detection for free.
1202 			 *
1203 			 * NOTE: Partially valid dirty pages cannot be
1204 			 *	 deactivated without causing NFS picemeal
1205 			 *	 writes to barf.
1206 			 */
1207 			if ((fs->first_object->type != OBJT_DEVICE) &&
1208 			    (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
1209                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
1210 				 (fs->m->flags & PG_RAM)))
1211 			) {
1212 				vm_pindex_t scan_pindex;
1213 				int scan_count = 16;
1214 
1215 				if (first_pindex < 16) {
1216 					scan_pindex = 0;
1217 					scan_count = 0;
1218 				} else {
1219 					scan_pindex = first_pindex - 16;
1220 					if (scan_pindex < 16)
1221 						scan_count = scan_pindex;
1222 					else
1223 						scan_count = 16;
1224 				}
1225 
1226 				while (scan_count) {
1227 					vm_page_t mt;
1228 
1229 					mt = vm_page_lookup(fs->first_object,
1230 							    scan_pindex);
1231 					if (mt == NULL ||
1232 					    (mt->valid != VM_PAGE_BITS_ALL)) {
1233 						break;
1234 					}
1235 					if (mt->busy ||
1236 					    (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
1237 					    mt->hold_count ||
1238 					    mt->wire_count)  {
1239 						goto skip;
1240 					}
1241 					vm_page_busy(mt);
1242 					if (mt->dirty == 0)
1243 						vm_page_test_dirty(mt);
1244 					if (mt->dirty) {
1245 						vm_page_protect(mt,
1246 								VM_PROT_NONE);
1247 						vm_page_deactivate(mt);
1248 						vm_page_wakeup(mt);
1249 					} else {
1250 						vm_page_cache(mt);
1251 					}
1252 skip:
1253 					--scan_count;
1254 					--scan_pindex;
1255 				}
1256 
1257 				seqaccess = 1;
1258 			}
1259 
1260 			/*
1261 			 * Avoid deadlocking against the map when doing I/O.
1262 			 * fs.object and the page is PG_BUSY'd.
1263 			 *
1264 			 * NOTE: Once unlocked, fs->entry can become stale
1265 			 *	 so this will NULL it out.
1266 			 *
1267 			 * NOTE: fs->entry is invalid until we relock the
1268 			 *	 map and verify that the timestamp has not
1269 			 *	 changed.
1270 			 */
1271 			unlock_map(fs);
1272 
1273 			/*
1274 			 * Acquire the page data.  We still hold a ref on
1275 			 * fs.object and the page has been PG_BUSY's.
1276 			 *
1277 			 * The pager may replace the page (for example, in
1278 			 * order to enter a fictitious page into the
1279 			 * object).  If it does so it is responsible for
1280 			 * cleaning up the passed page and properly setting
1281 			 * the new page PG_BUSY.
1282 			 *
1283 			 * If we got here through a PG_RAM read-ahead
1284 			 * mark the page may be partially dirty and thus
1285 			 * not freeable.  Don't bother checking to see
1286 			 * if the pager has the page because we can't free
1287 			 * it anyway.  We have to depend on the get_page
1288 			 * operation filling in any gaps whether there is
1289 			 * backing store or not.
1290 			 */
1291 			rv = vm_pager_get_page(fs->object, &fs->m, seqaccess);
1292 
1293 			if (rv == VM_PAGER_OK) {
1294 				/*
1295 				 * Relookup in case pager changed page. Pager
1296 				 * is responsible for disposition of old page
1297 				 * if moved.
1298 				 *
1299 				 * XXX other code segments do relookups too.
1300 				 * It's a bad abstraction that needs to be
1301 				 * fixed/removed.
1302 				 */
1303 				fs->m = vm_page_lookup(fs->object, pindex);
1304 				if (fs->m == NULL) {
1305 					lwkt_reltoken(&vm_token);
1306 					unlock_and_deallocate(fs);
1307 					return (KERN_TRY_AGAIN);
1308 				}
1309 
1310 				++fs->hardfault;
1311 				break; /* break to PAGE HAS BEEN FOUND */
1312 			}
1313 
1314 			/*
1315 			 * Remove the bogus page (which does not exist at this
1316 			 * object/offset); before doing so, we must get back
1317 			 * our object lock to preserve our invariant.
1318 			 *
1319 			 * Also wake up any other process that may want to bring
1320 			 * in this page.
1321 			 *
1322 			 * If this is the top-level object, we must leave the
1323 			 * busy page to prevent another process from rushing
1324 			 * past us, and inserting the page in that object at
1325 			 * the same time that we are.
1326 			 */
1327 			if (rv == VM_PAGER_ERROR) {
1328 				if (curproc)
1329 					kprintf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm);
1330 				else
1331 					kprintf("vm_fault: pager read error, thread %p (%s)\n", curthread, curproc->p_comm);
1332 			}
1333 
1334 			/*
1335 			 * Data outside the range of the pager or an I/O error
1336 			 *
1337 			 * The page may have been wired during the pagein,
1338 			 * e.g. by the buffer cache, and cannot simply be
1339 			 * freed.  Call vnode_pager_freepage() to deal with it.
1340 			 */
1341 			/*
1342 			 * XXX - the check for kernel_map is a kludge to work
1343 			 * around having the machine panic on a kernel space
1344 			 * fault w/ I/O error.
1345 			 */
1346 			if (((fs->map != &kernel_map) &&
1347 			    (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
1348 				vnode_pager_freepage(fs->m);
1349 				lwkt_reltoken(&vm_token);
1350 				fs->m = NULL;
1351 				unlock_and_deallocate(fs);
1352 				if (rv == VM_PAGER_ERROR)
1353 					return (KERN_FAILURE);
1354 				else
1355 					return (KERN_PROTECTION_FAILURE);
1356 				/* NOT REACHED */
1357 			}
1358 			if (fs->object != fs->first_object) {
1359 				vnode_pager_freepage(fs->m);
1360 				fs->m = NULL;
1361 				/*
1362 				 * XXX - we cannot just fall out at this
1363 				 * point, m has been freed and is invalid!
1364 				 */
1365 			}
1366 		}
1367 
1368 		/*
1369 		 * We get here if the object has a default pager (or unwiring)
1370 		 * or the pager doesn't have the page.
1371 		 */
1372 		if (fs->object == fs->first_object)
1373 			fs->first_m = fs->m;
1374 
1375 		/*
1376 		 * Move on to the next object.  Lock the next object before
1377 		 * unlocking the current one.
1378 		 */
1379 		pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1380 		next_object = fs->object->backing_object;
1381 		if (next_object == NULL) {
1382 			/*
1383 			 * If there's no object left, fill the page in the top
1384 			 * object with zeros.
1385 			 */
1386 			if (fs->object != fs->first_object) {
1387 				vm_object_pip_wakeup(fs->object);
1388 
1389 				fs->object = fs->first_object;
1390 				pindex = first_pindex;
1391 				fs->m = fs->first_m;
1392 			}
1393 			fs->first_m = NULL;
1394 
1395 			/*
1396 			 * Zero the page if necessary and mark it valid.
1397 			 */
1398 			if ((fs->m->flags & PG_ZERO) == 0) {
1399 				vm_page_zero_fill(fs->m);
1400 			} else {
1401 #ifdef PMAP_DEBUG
1402 				pmap_page_assertzero(VM_PAGE_TO_PHYS(fs->m));
1403 #endif
1404 				vm_page_flag_clear(fs->m, PG_ZERO);
1405 				mycpu->gd_cnt.v_ozfod++;
1406 			}
1407 			mycpu->gd_cnt.v_zfod++;
1408 			fs->m->valid = VM_PAGE_BITS_ALL;
1409 			break;	/* break to PAGE HAS BEEN FOUND */
1410 		}
1411 		if (fs->object != fs->first_object) {
1412 			vm_object_pip_wakeup(fs->object);
1413 		}
1414 		KASSERT(fs->object != next_object,
1415 			("object loop %p", next_object));
1416 		fs->object = next_object;
1417 		vm_object_pip_add(fs->object, 1);
1418 	}
1419 
1420 	/*
1421 	 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1422 	 * is held.]
1423 	 *
1424 	 * vm_token is still held
1425 	 *
1426 	 * If the page is being written, but isn't already owned by the
1427 	 * top-level object, we have to copy it into a new page owned by the
1428 	 * top-level object.
1429 	 */
1430 	KASSERT((fs->m->flags & PG_BUSY) != 0,
1431 		("vm_fault: not busy after main loop"));
1432 
1433 	if (fs->object != fs->first_object) {
1434 		/*
1435 		 * We only really need to copy if we want to write it.
1436 		 */
1437 		if (fault_type & VM_PROT_WRITE) {
1438 			/*
1439 			 * This allows pages to be virtually copied from a
1440 			 * backing_object into the first_object, where the
1441 			 * backing object has no other refs to it, and cannot
1442 			 * gain any more refs.  Instead of a bcopy, we just
1443 			 * move the page from the backing object to the
1444 			 * first object.  Note that we must mark the page
1445 			 * dirty in the first object so that it will go out
1446 			 * to swap when needed.
1447 			 */
1448 			if (
1449 				/*
1450 				 * Map, if present, has not changed
1451 				 */
1452 				(fs->map == NULL ||
1453 				fs->map_generation == fs->map->timestamp) &&
1454 				/*
1455 				 * Only one shadow object
1456 				 */
1457 				(fs->object->shadow_count == 1) &&
1458 				/*
1459 				 * No COW refs, except us
1460 				 */
1461 				(fs->object->ref_count == 1) &&
1462 				/*
1463 				 * No one else can look this object up
1464 				 */
1465 				(fs->object->handle == NULL) &&
1466 				/*
1467 				 * No other ways to look the object up
1468 				 */
1469 				((fs->object->type == OBJT_DEFAULT) ||
1470 				 (fs->object->type == OBJT_SWAP)) &&
1471 				/*
1472 				 * We don't chase down the shadow chain
1473 				 */
1474 				(fs->object == fs->first_object->backing_object) &&
1475 
1476 				/*
1477 				 * grab the lock if we need to
1478 				 */
1479 				(fs->lookup_still_valid ||
1480 				 fs->map == NULL ||
1481 				 lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1482 			    ) {
1483 
1484 				fs->lookup_still_valid = 1;
1485 				/*
1486 				 * get rid of the unnecessary page
1487 				 */
1488 				vm_page_protect(fs->first_m, VM_PROT_NONE);
1489 				vm_page_free(fs->first_m);
1490 				fs->first_m = NULL;
1491 
1492 				/*
1493 				 * grab the page and put it into the
1494 				 * process'es object.  The page is
1495 				 * automatically made dirty.
1496 				 */
1497 				vm_page_rename(fs->m, fs->first_object, first_pindex);
1498 				fs->first_m = fs->m;
1499 				vm_page_busy(fs->first_m);
1500 				fs->m = NULL;
1501 				mycpu->gd_cnt.v_cow_optim++;
1502 			} else {
1503 				/*
1504 				 * Oh, well, lets copy it.
1505 				 */
1506 				vm_page_copy(fs->m, fs->first_m);
1507 				vm_page_event(fs->m, VMEVENT_COW);
1508 			}
1509 
1510 			if (fs->m) {
1511 				/*
1512 				 * We no longer need the old page or object.
1513 				 */
1514 				release_page(fs);
1515 			}
1516 
1517 			/*
1518 			 * fs->object != fs->first_object due to above
1519 			 * conditional
1520 			 */
1521 			vm_object_pip_wakeup(fs->object);
1522 
1523 			/*
1524 			 * Only use the new page below...
1525 			 */
1526 
1527 			mycpu->gd_cnt.v_cow_faults++;
1528 			fs->m = fs->first_m;
1529 			fs->object = fs->first_object;
1530 			pindex = first_pindex;
1531 		} else {
1532 			/*
1533 			 * If it wasn't a write fault avoid having to copy
1534 			 * the page by mapping it read-only.
1535 			 */
1536 			fs->prot &= ~VM_PROT_WRITE;
1537 		}
1538 	}
1539 
1540 	/*
1541 	 * Relock the map if necessary, then check the generation count.
1542 	 * relock_map() will update fs->timestamp to account for the
1543 	 * relocking if necessary.
1544 	 *
1545 	 * If the count has changed after relocking then all sorts of
1546 	 * crap may have happened and we have to retry.
1547 	 *
1548 	 * NOTE: The relock_map() can fail due to a deadlock against
1549 	 *	 the vm_page we are holding BUSY.
1550 	 */
1551 	if (fs->lookup_still_valid == FALSE && fs->map) {
1552 		if (relock_map(fs) ||
1553 		    fs->map->timestamp != fs->map_generation) {
1554 			release_page(fs);
1555 			lwkt_reltoken(&vm_token);
1556 			unlock_and_deallocate(fs);
1557 			return (KERN_TRY_AGAIN);
1558 		}
1559 	}
1560 
1561 	/*
1562 	 * If the fault is a write, we know that this page is being
1563 	 * written NOW so dirty it explicitly to save on pmap_is_modified()
1564 	 * calls later.
1565 	 *
1566 	 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1567 	 * if the page is already dirty to prevent data written with
1568 	 * the expectation of being synced from not being synced.
1569 	 * Likewise if this entry does not request NOSYNC then make
1570 	 * sure the page isn't marked NOSYNC.  Applications sharing
1571 	 * data should use the same flags to avoid ping ponging.
1572 	 *
1573 	 * Also tell the backing pager, if any, that it should remove
1574 	 * any swap backing since the page is now dirty.
1575 	 */
1576 	if (fs->prot & VM_PROT_WRITE) {
1577 		vm_object_set_writeable_dirty(fs->m->object);
1578 		vm_set_nosync(fs->m, fs->entry);
1579 		if (fs->fault_flags & VM_FAULT_DIRTY) {
1580 			vm_page_dirty(fs->m);
1581 			swap_pager_unswapped(fs->m);
1582 		}
1583 	}
1584 
1585 	lwkt_reltoken(&vm_token);
1586 
1587 	/*
1588 	 * Page had better still be busy.  We are still locked up and
1589 	 * fs->object will have another PIP reference if it is not equal
1590 	 * to fs->first_object.
1591 	 */
1592 	KASSERT(fs->m->flags & PG_BUSY,
1593 		("vm_fault: page %p not busy!", fs->m));
1594 
1595 	/*
1596 	 * Sanity check: page must be completely valid or it is not fit to
1597 	 * map into user space.  vm_pager_get_pages() ensures this.
1598 	 */
1599 	if (fs->m->valid != VM_PAGE_BITS_ALL) {
1600 		vm_page_zero_invalid(fs->m, TRUE);
1601 		kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1602 	}
1603 	vm_page_flag_clear(fs->m, PG_ZERO);
1604 
1605 	return (KERN_SUCCESS);
1606 }
1607 
1608 /*
1609  * Wire down a range of virtual addresses in a map.  The entry in question
1610  * should be marked in-transition and the map must be locked.  We must
1611  * release the map temporarily while faulting-in the page to avoid a
1612  * deadlock.  Note that the entry may be clipped while we are blocked but
1613  * will never be freed.
1614  *
1615  * No requirements.
1616  */
1617 int
1618 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1619 {
1620 	boolean_t fictitious;
1621 	vm_offset_t start;
1622 	vm_offset_t end;
1623 	vm_offset_t va;
1624 	vm_paddr_t pa;
1625 	pmap_t pmap;
1626 	int rv;
1627 
1628 	pmap = vm_map_pmap(map);
1629 	start = entry->start;
1630 	end = entry->end;
1631 	fictitious = entry->object.vm_object &&
1632 			(entry->object.vm_object->type == OBJT_DEVICE);
1633 	if (entry->eflags & MAP_ENTRY_KSTACK)
1634 		start += PAGE_SIZE;
1635 	lwkt_gettoken(&vm_token);
1636 	map->timestamp++;
1637 	vm_map_unlock(map);
1638 
1639 	/*
1640 	 * We simulate a fault to get the page and enter it in the physical
1641 	 * map.
1642 	 */
1643 	for (va = start; va < end; va += PAGE_SIZE) {
1644 		if (user_wire) {
1645 			rv = vm_fault(map, va, VM_PROT_READ,
1646 					VM_FAULT_USER_WIRE);
1647 		} else {
1648 			rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1649 					VM_FAULT_CHANGE_WIRING);
1650 		}
1651 		if (rv) {
1652 			while (va > start) {
1653 				va -= PAGE_SIZE;
1654 				if ((pa = pmap_extract(pmap, va)) == 0)
1655 					continue;
1656 				pmap_change_wiring(pmap, va, FALSE);
1657 				if (!fictitious)
1658 					vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1659 			}
1660 			vm_map_lock(map);
1661 			lwkt_reltoken(&vm_token);
1662 			return (rv);
1663 		}
1664 	}
1665 	vm_map_lock(map);
1666 	lwkt_reltoken(&vm_token);
1667 	return (KERN_SUCCESS);
1668 }
1669 
1670 /*
1671  * Unwire a range of virtual addresses in a map.  The map should be
1672  * locked.
1673  */
1674 void
1675 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1676 {
1677 	boolean_t fictitious;
1678 	vm_offset_t start;
1679 	vm_offset_t end;
1680 	vm_offset_t va;
1681 	vm_paddr_t pa;
1682 	pmap_t pmap;
1683 
1684 	pmap = vm_map_pmap(map);
1685 	start = entry->start;
1686 	end = entry->end;
1687 	fictitious = entry->object.vm_object &&
1688 			(entry->object.vm_object->type == OBJT_DEVICE);
1689 	if (entry->eflags & MAP_ENTRY_KSTACK)
1690 		start += PAGE_SIZE;
1691 
1692 	/*
1693 	 * Since the pages are wired down, we must be able to get their
1694 	 * mappings from the physical map system.
1695 	 */
1696 	lwkt_gettoken(&vm_token);
1697 	for (va = start; va < end; va += PAGE_SIZE) {
1698 		pa = pmap_extract(pmap, va);
1699 		if (pa != 0) {
1700 			pmap_change_wiring(pmap, va, FALSE);
1701 			if (!fictitious)
1702 				vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1703 		}
1704 	}
1705 	lwkt_reltoken(&vm_token);
1706 }
1707 
1708 /*
1709  * Reduce the rate at which memory is allocated to a process based
1710  * on the perceived load on the VM system. As the load increases
1711  * the allocation burst rate goes down and the delay increases.
1712  *
1713  * Rate limiting does not apply when faulting active or inactive
1714  * pages.  When faulting 'cache' pages, rate limiting only applies
1715  * if the system currently has a severe page deficit.
1716  *
1717  * XXX vm_pagesupply should be increased when a page is freed.
1718  *
1719  * We sleep up to 1/10 of a second.
1720  */
1721 static int
1722 vm_fault_ratelimit(struct vmspace *vmspace)
1723 {
1724 	if (vm_load_enable == 0)
1725 		return(0);
1726 	if (vmspace->vm_pagesupply > 0) {
1727 		--vmspace->vm_pagesupply;	/* SMP race ok */
1728 		return(0);
1729 	}
1730 #ifdef INVARIANTS
1731 	if (vm_load_debug) {
1732 		kprintf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1733 			vm_load,
1734 			(1000 - vm_load ) / 10, vm_load * hz / 10000,
1735 			curproc->p_pid, curproc->p_comm);
1736 	}
1737 #endif
1738 	vmspace->vm_pagesupply = (1000 - vm_load) / 10;
1739 	return(vm_load * hz / 10000);
1740 }
1741 
1742 /*
1743  * Copy all of the pages from a wired-down map entry to another.
1744  *
1745  * The source and destination maps must be locked for write.
1746  * The source map entry must be wired down (or be a sharing map
1747  * entry corresponding to a main map entry that is wired down).
1748  *
1749  * No other requirements.
1750  */
1751 void
1752 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1753 		    vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
1754 {
1755 	vm_object_t dst_object;
1756 	vm_object_t src_object;
1757 	vm_ooffset_t dst_offset;
1758 	vm_ooffset_t src_offset;
1759 	vm_prot_t prot;
1760 	vm_offset_t vaddr;
1761 	vm_page_t dst_m;
1762 	vm_page_t src_m;
1763 
1764 #ifdef	lint
1765 	src_map++;
1766 #endif	/* lint */
1767 
1768 	src_object = src_entry->object.vm_object;
1769 	src_offset = src_entry->offset;
1770 
1771 	/*
1772 	 * Create the top-level object for the destination entry. (Doesn't
1773 	 * actually shadow anything - we copy the pages directly.)
1774 	 */
1775 	vm_map_entry_allocate_object(dst_entry);
1776 	dst_object = dst_entry->object.vm_object;
1777 
1778 	prot = dst_entry->max_protection;
1779 
1780 	/*
1781 	 * Loop through all of the pages in the entry's range, copying each
1782 	 * one from the source object (it should be there) to the destination
1783 	 * object.
1784 	 */
1785 	for (vaddr = dst_entry->start, dst_offset = 0;
1786 	    vaddr < dst_entry->end;
1787 	    vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1788 
1789 		/*
1790 		 * Allocate a page in the destination object
1791 		 */
1792 		do {
1793 			dst_m = vm_page_alloc(dst_object,
1794 				OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1795 			if (dst_m == NULL) {
1796 				vm_wait(0);
1797 			}
1798 		} while (dst_m == NULL);
1799 
1800 		/*
1801 		 * Find the page in the source object, and copy it in.
1802 		 * (Because the source is wired down, the page will be in
1803 		 * memory.)
1804 		 */
1805 		src_m = vm_page_lookup(src_object,
1806 				       OFF_TO_IDX(dst_offset + src_offset));
1807 		if (src_m == NULL)
1808 			panic("vm_fault_copy_wired: page missing");
1809 
1810 		vm_page_copy(src_m, dst_m);
1811 		vm_page_event(src_m, VMEVENT_COW);
1812 
1813 		/*
1814 		 * Enter it in the pmap...
1815 		 */
1816 
1817 		vm_page_flag_clear(dst_m, PG_ZERO);
1818 		pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1819 
1820 		/*
1821 		 * Mark it no longer busy, and put it on the active list.
1822 		 */
1823 		vm_page_activate(dst_m);
1824 		vm_page_wakeup(dst_m);
1825 	}
1826 }
1827 
1828 #if 0
1829 
1830 /*
1831  * This routine checks around the requested page for other pages that
1832  * might be able to be faulted in.  This routine brackets the viable
1833  * pages for the pages to be paged in.
1834  *
1835  * Inputs:
1836  *	m, rbehind, rahead
1837  *
1838  * Outputs:
1839  *  marray (array of vm_page_t), reqpage (index of requested page)
1840  *
1841  * Return value:
1842  *  number of pages in marray
1843  */
1844 static int
1845 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
1846 			  vm_page_t *marray, int *reqpage)
1847 {
1848 	int i,j;
1849 	vm_object_t object;
1850 	vm_pindex_t pindex, startpindex, endpindex, tpindex;
1851 	vm_page_t rtm;
1852 	int cbehind, cahead;
1853 
1854 	object = m->object;
1855 	pindex = m->pindex;
1856 
1857 	/*
1858 	 * we don't fault-ahead for device pager
1859 	 */
1860 	if (object->type == OBJT_DEVICE) {
1861 		*reqpage = 0;
1862 		marray[0] = m;
1863 		return 1;
1864 	}
1865 
1866 	/*
1867 	 * if the requested page is not available, then give up now
1868 	 */
1869 	if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1870 		*reqpage = 0;	/* not used by caller, fix compiler warn */
1871 		return 0;
1872 	}
1873 
1874 	if ((cbehind == 0) && (cahead == 0)) {
1875 		*reqpage = 0;
1876 		marray[0] = m;
1877 		return 1;
1878 	}
1879 
1880 	if (rahead > cahead) {
1881 		rahead = cahead;
1882 	}
1883 
1884 	if (rbehind > cbehind) {
1885 		rbehind = cbehind;
1886 	}
1887 
1888 	/*
1889 	 * Do not do any readahead if we have insufficient free memory.
1890 	 *
1891 	 * XXX code was broken disabled before and has instability
1892 	 * with this conditonal fixed, so shortcut for now.
1893 	 */
1894 	if (burst_fault == 0 || vm_page_count_severe()) {
1895 		marray[0] = m;
1896 		*reqpage = 0;
1897 		return 1;
1898 	}
1899 
1900 	/*
1901 	 * scan backward for the read behind pages -- in memory
1902 	 *
1903 	 * Assume that if the page is not found an interrupt will not
1904 	 * create it.  Theoretically interrupts can only remove (busy)
1905 	 * pages, not create new associations.
1906 	 */
1907 	if (pindex > 0) {
1908 		if (rbehind > pindex) {
1909 			rbehind = pindex;
1910 			startpindex = 0;
1911 		} else {
1912 			startpindex = pindex - rbehind;
1913 		}
1914 
1915 		lwkt_gettoken(&vm_token);
1916 		for (tpindex = pindex; tpindex > startpindex; --tpindex) {
1917 			if (vm_page_lookup(object, tpindex - 1))
1918 				break;
1919 		}
1920 
1921 		i = 0;
1922 		while (tpindex < pindex) {
1923 			rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1924 			if (rtm == NULL) {
1925 				lwkt_reltoken(&vm_token);
1926 				for (j = 0; j < i; j++) {
1927 					vm_page_free(marray[j]);
1928 				}
1929 				marray[0] = m;
1930 				*reqpage = 0;
1931 				return 1;
1932 			}
1933 			marray[i] = rtm;
1934 			++i;
1935 			++tpindex;
1936 		}
1937 		lwkt_reltoken(&vm_token);
1938 	} else {
1939 		i = 0;
1940 	}
1941 
1942 	/*
1943 	 * Assign requested page
1944 	 */
1945 	marray[i] = m;
1946 	*reqpage = i;
1947 	++i;
1948 
1949 	/*
1950 	 * Scan forwards for read-ahead pages
1951 	 */
1952 	tpindex = pindex + 1;
1953 	endpindex = tpindex + rahead;
1954 	if (endpindex > object->size)
1955 		endpindex = object->size;
1956 
1957 	lwkt_gettoken(&vm_token);
1958 	while (tpindex < endpindex) {
1959 		if (vm_page_lookup(object, tpindex))
1960 			break;
1961 		rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1962 		if (rtm == NULL)
1963 			break;
1964 		marray[i] = rtm;
1965 		++i;
1966 		++tpindex;
1967 	}
1968 	lwkt_reltoken(&vm_token);
1969 
1970 	return (i);
1971 }
1972 
1973 #endif
1974 
1975 /*
1976  * vm_prefault() provides a quick way of clustering pagefaults into a
1977  * processes address space.  It is a "cousin" of pmap_object_init_pt,
1978  * except it runs at page fault time instead of mmap time.
1979  *
1980  * This code used to be per-platform pmap_prefault().  It is now
1981  * machine-independent and enhanced to also pre-fault zero-fill pages
1982  * (see vm.fast_fault) as well as make them writable, which greatly
1983  * reduces the number of page faults programs incur.
1984  *
1985  * Application performance when pre-faulting zero-fill pages is heavily
1986  * dependent on the application.  Very tiny applications like /bin/echo
1987  * lose a little performance while applications of any appreciable size
1988  * gain performance.  Prefaulting multiple pages also reduces SMP
1989  * congestion and can improve SMP performance significantly.
1990  *
1991  * NOTE!  prot may allow writing but this only applies to the top level
1992  *	  object.  If we wind up mapping a page extracted from a backing
1993  *	  object we have to make sure it is read-only.
1994  *
1995  * NOTE!  The caller has already handled any COW operations on the
1996  *	  vm_map_entry via the normal fault code.  Do NOT call this
1997  *	  shortcut unless the normal fault code has run on this entry.
1998  *
1999  * No other requirements.
2000  */
2001 #define PFBAK 4
2002 #define PFFOR 4
2003 #define PAGEORDER_SIZE (PFBAK+PFFOR)
2004 
2005 static int vm_prefault_pageorder[] = {
2006 	-PAGE_SIZE, PAGE_SIZE,
2007 	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
2008 	-3 * PAGE_SIZE, 3 * PAGE_SIZE,
2009 	-4 * PAGE_SIZE, 4 * PAGE_SIZE
2010 };
2011 
2012 /*
2013  * Set PG_NOSYNC if the map entry indicates so, but only if the page
2014  * is not already dirty by other means.  This will prevent passive
2015  * filesystem syncing as well as 'sync' from writing out the page.
2016  */
2017 static void
2018 vm_set_nosync(vm_page_t m, vm_map_entry_t entry)
2019 {
2020 	if (entry->eflags & MAP_ENTRY_NOSYNC) {
2021 		if (m->dirty == 0)
2022 			vm_page_flag_set(m, PG_NOSYNC);
2023 	} else {
2024 		vm_page_flag_clear(m, PG_NOSYNC);
2025 	}
2026 }
2027 
2028 static void
2029 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot)
2030 {
2031 	struct lwp *lp;
2032 	vm_page_t m;
2033 	vm_offset_t starta;
2034 	vm_offset_t addr;
2035 	vm_pindex_t index;
2036 	vm_pindex_t pindex;
2037 	vm_object_t object;
2038 	int pprot;
2039 	int i;
2040 
2041 	/*
2042 	 * We do not currently prefault mappings that use virtual page
2043 	 * tables.  We do not prefault foreign pmaps.
2044 	 */
2045 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2046 		return;
2047 	lp = curthread->td_lwp;
2048 	if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2049 		return;
2050 
2051 	lwkt_gettoken(&vm_token);
2052 
2053 	object = entry->object.vm_object;
2054 	KKASSERT(object != NULL);
2055 	vm_object_hold(object);
2056 
2057 	starta = addra - PFBAK * PAGE_SIZE;
2058 	if (starta < entry->start)
2059 		starta = entry->start;
2060 	else if (starta > addra)
2061 		starta = 0;
2062 
2063 	KKASSERT(object == entry->object.vm_object);
2064 	for (i = 0; i < PAGEORDER_SIZE; i++) {
2065 		vm_object_t lobject;
2066 		vm_object_t nobject;
2067 		int allocated = 0;
2068 
2069 		addr = addra + vm_prefault_pageorder[i];
2070 		if (addr > addra + (PFFOR * PAGE_SIZE))
2071 			addr = 0;
2072 
2073 		if (addr < starta || addr >= entry->end)
2074 			continue;
2075 
2076 		if (pmap_prefault_ok(pmap, addr) == 0)
2077 			continue;
2078 
2079 		/*
2080 		 * Follow the VM object chain to obtain the page to be mapped
2081 		 * into the pmap.
2082 		 *
2083 		 * If we reach the terminal object without finding a page
2084 		 * and we determine it would be advantageous, then allocate
2085 		 * a zero-fill page for the base object.  The base object
2086 		 * is guaranteed to be OBJT_DEFAULT for this case.
2087 		 *
2088 		 * In order to not have to check the pager via *haspage*()
2089 		 * we stop if any non-default object is encountered.  e.g.
2090 		 * a vnode or swap object would stop the loop.
2091 		 *
2092 		 * XXX It is unclear whether hold chaining is sufficient
2093 		 *     to maintain the validity of the backing object chain.
2094 		 */
2095 		index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2096 		lobject = object;
2097 		pindex = index;
2098 		pprot = prot;
2099 
2100 		KKASSERT(lobject == entry->object.vm_object);
2101 		vm_object_hold(lobject);
2102 
2103 		while ((m = vm_page_lookup(lobject, pindex)) == NULL) {
2104 			if (lobject->type != OBJT_DEFAULT)
2105 				break;
2106 			if (lobject->backing_object == NULL) {
2107 				if (vm_fast_fault == 0)
2108 					break;
2109 				if (vm_prefault_pageorder[i] < 0 ||
2110 				    (prot & VM_PROT_WRITE) == 0 ||
2111 				    vm_page_count_min(0)) {
2112 					break;
2113 				}
2114 
2115 				/* NOTE: allocated from base object */
2116 				m = vm_page_alloc(object, index,
2117 					      VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
2118 
2119 				if ((m->flags & PG_ZERO) == 0) {
2120 					vm_page_zero_fill(m);
2121 				} else {
2122 #ifdef PMAP_DEBUG
2123 					pmap_page_assertzero(
2124 							VM_PAGE_TO_PHYS(m));
2125 #endif
2126 					vm_page_flag_clear(m, PG_ZERO);
2127 					mycpu->gd_cnt.v_ozfod++;
2128 				}
2129 				mycpu->gd_cnt.v_zfod++;
2130 				m->valid = VM_PAGE_BITS_ALL;
2131 				allocated = 1;
2132 				pprot = prot;
2133 				/* lobject = object .. not needed */
2134 				break;
2135 			}
2136 			if (lobject->backing_object_offset & PAGE_MASK)
2137 				break;
2138 			while ((nobject = lobject->backing_object) != NULL) {
2139 				vm_object_hold(nobject);
2140 				if (nobject == lobject->backing_object) {
2141 					pindex +=
2142 					    lobject->backing_object_offset >>
2143 					    PAGE_SHIFT;
2144 					vm_object_lock_swap();
2145 					vm_object_drop(lobject);
2146 					lobject = nobject;
2147 					break;
2148 				}
2149 				vm_object_drop(nobject);
2150 			}
2151 			if (nobject == NULL) {
2152 				kprintf("vm_prefault: Warning, backing object "
2153 					"race averted lobject %p\n",
2154 					lobject);
2155 				continue;
2156 			}
2157 			pprot &= ~VM_PROT_WRITE;
2158 		}
2159 		vm_object_drop(lobject);
2160 
2161 		/*
2162 		 * NOTE: lobject now invalid (if we did a zero-fill we didn't
2163 		 *	 bother assigning lobject = object).
2164 		 *
2165 		 * Give-up if the page is not available.
2166 		 */
2167 		if (m == NULL)
2168 			break;
2169 
2170 		/*
2171 		 * Do not conditionalize on PG_RAM.  If pages are present in
2172 		 * the VM system we assume optimal caching.  If caching is
2173 		 * not optimal the I/O gravy train will be restarted when we
2174 		 * hit an unavailable page.  We do not want to try to restart
2175 		 * the gravy train now because we really don't know how much
2176 		 * of the object has been cached.  The cost for restarting
2177 		 * the gravy train should be low (since accesses will likely
2178 		 * be I/O bound anyway).
2179 		 *
2180 		 * The object must be marked dirty if we are mapping a
2181 		 * writable page.
2182 		 */
2183 		if (pprot & VM_PROT_WRITE)
2184 			vm_object_set_writeable_dirty(m->object);
2185 
2186 		/*
2187 		 * Enter the page into the pmap if appropriate.  If we had
2188 		 * allocated the page we have to place it on a queue.  If not
2189 		 * we just have to make sure it isn't on the cache queue
2190 		 * (pages on the cache queue are not allowed to be mapped).
2191 		 */
2192 		if (allocated) {
2193 			if (pprot & VM_PROT_WRITE)
2194 				vm_set_nosync(m, entry);
2195 			pmap_enter(pmap, addr, m, pprot, 0);
2196 			vm_page_deactivate(m);
2197 			vm_page_wakeup(m);
2198 		} else if (
2199 		    ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2200 		    (m->busy == 0) &&
2201 		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2202 			/*
2203 			 * A fully valid page not undergoing soft I/O can
2204 			 * be immediately entered into the pmap.
2205 			 */
2206 			vm_page_busy(m);
2207 			if ((m->queue - m->pc) == PQ_CACHE) {
2208 				vm_page_deactivate(m);
2209 			}
2210 			if (pprot & VM_PROT_WRITE)
2211 				vm_set_nosync(m, entry);
2212 			pmap_enter(pmap, addr, m, pprot, 0);
2213 			vm_page_wakeup(m);
2214 		}
2215 	}
2216 	vm_object_drop(object);
2217 	lwkt_reltoken(&vm_token);
2218 }
2219