xref: /dragonfly/sys/vm/vm_fault.c (revision bcb3e04d)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  *
12  * This code is derived from software contributed to Berkeley by
13  * The Mach Operating System project at Carnegie-Mellon University.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by the University of
26  *	California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  *	from: @(#)vm_fault.c	8.4 (Berkeley) 1/12/94
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
50  *
51  * Permission to use, copy, modify and distribute this software and
52  * its documentation is hereby granted, provided that both the copyright
53  * notice and this permission notice appear in all copies of the
54  * software, derivative works or modified versions, and any portions
55  * thereof, and that both notices appear in supporting documentation.
56  *
57  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
58  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
59  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
60  *
61  * Carnegie Mellon requests users of this software to return to
62  *
63  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
64  *  School of Computer Science
65  *  Carnegie Mellon University
66  *  Pittsburgh PA 15213-3890
67  *
68  * any improvements or extensions that they make and grant Carnegie the
69  * rights to redistribute these changes.
70  *
71  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
72  * $DragonFly: src/sys/vm/vm_fault.c,v 1.47 2008/07/01 02:02:56 dillon Exp $
73  */
74 
75 /*
76  *	Page fault handling module.
77  */
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc.h>
83 #include <sys/vnode.h>
84 #include <sys/resourcevar.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vkernel.h>
87 #include <sys/lock.h>
88 #include <sys/sysctl.h>
89 
90 #include <cpu/lwbuf.h>
91 
92 #include <vm/vm.h>
93 #include <vm/vm_param.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_pager.h>
101 #include <vm/vnode_pager.h>
102 #include <vm/vm_extern.h>
103 
104 #include <sys/thread2.h>
105 #include <vm/vm_page2.h>
106 
107 struct faultstate {
108 	vm_page_t m;
109 	vm_object_t object;
110 	vm_pindex_t pindex;
111 	vm_prot_t prot;
112 	vm_page_t first_m;
113 	vm_object_t first_object;
114 	vm_prot_t first_prot;
115 	vm_map_t map;
116 	vm_map_entry_t entry;
117 	int lookup_still_valid;
118 	int didlimit;
119 	int hardfault;
120 	int fault_flags;
121 	int map_generation;
122 	boolean_t wired;
123 	struct vnode *vp;
124 };
125 
126 static int vm_fast_fault = 1;
127 SYSCTL_INT(_vm, OID_AUTO, fast_fault, CTLFLAG_RW, &vm_fast_fault, 0,
128 	   "Burst fault zero-fill regions");
129 static int debug_cluster = 0;
130 SYSCTL_INT(_vm, OID_AUTO, debug_cluster, CTLFLAG_RW, &debug_cluster, 0, "");
131 
132 static int vm_fault_object(struct faultstate *, vm_pindex_t, vm_prot_t);
133 static int vm_fault_vpagetable(struct faultstate *, vm_pindex_t *, vpte_t, int);
134 #if 0
135 static int vm_fault_additional_pages (vm_page_t, int, int, vm_page_t *, int *);
136 #endif
137 static int vm_fault_ratelimit(struct vmspace *);
138 static void vm_set_nosync(vm_page_t m, vm_map_entry_t entry);
139 static void vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry,
140 			int prot);
141 
142 /*
143  * The caller must hold vm_token.
144  */
145 static __inline void
146 release_page(struct faultstate *fs)
147 {
148 	vm_page_deactivate(fs->m);
149 	vm_page_wakeup(fs->m);
150 	fs->m = NULL;
151 }
152 
153 /*
154  * The caller must hold vm_token.
155  */
156 static __inline void
157 unlock_map(struct faultstate *fs)
158 {
159 	if (fs->lookup_still_valid && fs->map) {
160 		vm_map_lookup_done(fs->map, fs->entry, 0);
161 		fs->lookup_still_valid = FALSE;
162 	}
163 }
164 
165 /*
166  * Clean up after a successful call to vm_fault_object() so another call
167  * to vm_fault_object() can be made.
168  *
169  * The caller must hold vm_token.
170  */
171 static void
172 _cleanup_successful_fault(struct faultstate *fs, int relock)
173 {
174 	if (fs->object != fs->first_object) {
175 		vm_page_free(fs->first_m);
176 		vm_object_pip_wakeup(fs->object);
177 		fs->first_m = NULL;
178 	}
179 	fs->object = fs->first_object;
180 	if (relock && fs->lookup_still_valid == FALSE) {
181 		if (fs->map)
182 			vm_map_lock_read(fs->map);
183 		fs->lookup_still_valid = TRUE;
184 	}
185 }
186 
187 /*
188  * The caller must hold vm_token.
189  */
190 static void
191 _unlock_things(struct faultstate *fs, int dealloc)
192 {
193 	vm_object_pip_wakeup(fs->first_object);
194 	_cleanup_successful_fault(fs, 0);
195 	if (dealloc) {
196 		vm_object_deallocate(fs->first_object);
197 		fs->first_object = NULL;
198 	}
199 	unlock_map(fs);
200 	if (fs->vp != NULL) {
201 		vput(fs->vp);
202 		fs->vp = NULL;
203 	}
204 }
205 
206 #define unlock_things(fs) _unlock_things(fs, 0)
207 #define unlock_and_deallocate(fs) _unlock_things(fs, 1)
208 #define cleanup_successful_fault(fs) _cleanup_successful_fault(fs, 1)
209 
210 /*
211  * TRYPAGER
212  *
213  * Determine if the pager for the current object *might* contain the page.
214  *
215  * We only need to try the pager if this is not a default object (default
216  * objects are zero-fill and have no real pager), and if we are not taking
217  * a wiring fault or if the FS entry is wired.
218  */
219 #define TRYPAGER(fs)	\
220 		(fs->object->type != OBJT_DEFAULT && \
221 		(((fs->fault_flags & VM_FAULT_WIRE_MASK) == 0) || fs->wired))
222 
223 /*
224  * vm_fault:
225  *
226  * Handle a page fault occuring at the given address, requiring the given
227  * permissions, in the map specified.  If successful, the page is inserted
228  * into the associated physical map.
229  *
230  * NOTE: The given address should be truncated to the proper page address.
231  *
232  * KERN_SUCCESS is returned if the page fault is handled; otherwise,
233  * a standard error specifying why the fault is fatal is returned.
234  *
235  * The map in question must be referenced, and remains so.
236  * The caller may hold no locks.
237  * No other requirements.
238  */
239 int
240 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
241 {
242 	int result;
243 	vm_pindex_t first_pindex;
244 	struct faultstate fs;
245 	int growstack;
246 
247 	mycpu->gd_cnt.v_vm_faults++;
248 
249 	fs.didlimit = 0;
250 	fs.hardfault = 0;
251 	fs.fault_flags = fault_flags;
252 	growstack = 1;
253 
254 RetryFault:
255 	/*
256 	 * Find the vm_map_entry representing the backing store and resolve
257 	 * the top level object and page index.  This may have the side
258 	 * effect of executing a copy-on-write on the map entry and/or
259 	 * creating a shadow object, but will not COW any actual VM pages.
260 	 *
261 	 * On success fs.map is left read-locked and various other fields
262 	 * are initialized but not otherwise referenced or locked.
263 	 *
264 	 * NOTE!  vm_map_lookup will try to upgrade the fault_type to
265 	 * VM_FAULT_WRITE if the map entry is a virtual page table and also
266 	 * writable, so we can set the 'A'accessed bit in the virtual page
267 	 * table entry.
268 	 */
269 	fs.map = map;
270 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
271 			       &fs.entry, &fs.first_object,
272 			       &first_pindex, &fs.first_prot, &fs.wired);
273 
274 	/*
275 	 * If the lookup failed or the map protections are incompatible,
276 	 * the fault generally fails.  However, if the caller is trying
277 	 * to do a user wiring we have more work to do.
278 	 */
279 	if (result != KERN_SUCCESS) {
280 		if (result != KERN_PROTECTION_FAILURE ||
281 		    (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)
282 		{
283 			if (result == KERN_INVALID_ADDRESS && growstack &&
284 			    map != &kernel_map && curproc != NULL) {
285 				result = vm_map_growstack(curproc, vaddr);
286 				if (result != KERN_SUCCESS)
287 					return (KERN_FAILURE);
288 				growstack = 0;
289 				goto RetryFault;
290 			}
291 			return (result);
292 		}
293 
294 		/*
295    		 * If we are user-wiring a r/w segment, and it is COW, then
296    		 * we need to do the COW operation.  Note that we don't
297 		 * currently COW RO sections now, because it is NOT desirable
298    		 * to COW .text.  We simply keep .text from ever being COW'ed
299    		 * and take the heat that one cannot debug wired .text sections.
300    		 */
301 		result = vm_map_lookup(&fs.map, vaddr,
302 				       VM_PROT_READ|VM_PROT_WRITE|
303 				        VM_PROT_OVERRIDE_WRITE,
304 				       &fs.entry, &fs.first_object,
305 				       &first_pindex, &fs.first_prot,
306 				       &fs.wired);
307 		if (result != KERN_SUCCESS)
308 			return result;
309 
310 		/*
311 		 * If we don't COW now, on a user wire, the user will never
312 		 * be able to write to the mapping.  If we don't make this
313 		 * restriction, the bookkeeping would be nearly impossible.
314 		 */
315 		if ((fs.entry->protection & VM_PROT_WRITE) == 0)
316 			fs.entry->max_protection &= ~VM_PROT_WRITE;
317 	}
318 
319 	/*
320 	 * fs.map is read-locked
321 	 *
322 	 * Misc checks.  Save the map generation number to detect races.
323 	 */
324 	fs.map_generation = fs.map->timestamp;
325 
326 	if (fs.entry->eflags & (MAP_ENTRY_NOFAULT | MAP_ENTRY_KSTACK)) {
327 		if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
328 			panic("vm_fault: fault on nofault entry, addr: %p",
329 			      (void *)vaddr);
330 		}
331 		if ((fs.entry->eflags & MAP_ENTRY_KSTACK) &&
332 		    vaddr >= fs.entry->start &&
333 		    vaddr < fs.entry->start + PAGE_SIZE) {
334 			panic("vm_fault: fault on stack guard, addr: %p",
335 			      (void *)vaddr);
336 		}
337 	}
338 
339 	/*
340 	 * A system map entry may return a NULL object.  No object means
341 	 * no pager means an unrecoverable kernel fault.
342 	 */
343 	if (fs.first_object == NULL) {
344 		panic("vm_fault: unrecoverable fault at %p in entry %p",
345 			(void *)vaddr, fs.entry);
346 	}
347 
348 	/*
349 	 * Make a reference to this object to prevent its disposal while we
350 	 * are messing with it.  Once we have the reference, the map is free
351 	 * to be diddled.  Since objects reference their shadows (and copies),
352 	 * they will stay around as well.
353 	 *
354 	 * Bump the paging-in-progress count to prevent size changes (e.g.
355 	 * truncation operations) during I/O.  This must be done after
356 	 * obtaining the vnode lock in order to avoid possible deadlocks.
357 	 *
358 	 * The vm_token is needed to manipulate the vm_object
359 	 */
360 	lwkt_gettoken(&vm_token);
361 	vm_object_reference(fs.first_object);
362 	fs.vp = vnode_pager_lock(fs.first_object);
363 	vm_object_pip_add(fs.first_object, 1);
364 	lwkt_reltoken(&vm_token);
365 
366 	fs.lookup_still_valid = TRUE;
367 	fs.first_m = NULL;
368 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
369 
370 	/*
371 	 * If the entry is wired we cannot change the page protection.
372 	 */
373 	if (fs.wired)
374 		fault_type = fs.first_prot;
375 
376 	/*
377 	 * The page we want is at (first_object, first_pindex), but if the
378 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
379 	 * page table to figure out the actual pindex.
380 	 *
381 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
382 	 * ONLY
383 	 */
384 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
385 		result = vm_fault_vpagetable(&fs, &first_pindex,
386 					     fs.entry->aux.master_pde,
387 					     fault_type);
388 		if (result == KERN_TRY_AGAIN)
389 			goto RetryFault;
390 		if (result != KERN_SUCCESS)
391 			return (result);
392 	}
393 
394 	/*
395 	 * Now we have the actual (object, pindex), fault in the page.  If
396 	 * vm_fault_object() fails it will unlock and deallocate the FS
397 	 * data.   If it succeeds everything remains locked and fs->object
398 	 * will have an additional PIP count if it is not equal to
399 	 * fs->first_object
400 	 *
401 	 * vm_fault_object will set fs->prot for the pmap operation.  It is
402 	 * allowed to set VM_PROT_WRITE if fault_type == VM_PROT_READ if the
403 	 * page can be safely written.  However, it will force a read-only
404 	 * mapping for a read fault if the memory is managed by a virtual
405 	 * page table.
406 	 */
407 	result = vm_fault_object(&fs, first_pindex, fault_type);
408 
409 	if (result == KERN_TRY_AGAIN)
410 		goto RetryFault;
411 	if (result != KERN_SUCCESS)
412 		return (result);
413 
414 	/*
415 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
416 	 * will contain a busied page.
417 	 *
418 	 * Enter the page into the pmap and do pmap-related adjustments.
419 	 */
420 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
421 
422 	/*
423 	 * Burst in a few more pages if possible.  The fs.map should still
424 	 * be locked.
425 	 */
426 	if (fault_flags & VM_FAULT_BURST) {
427 		if ((fs.fault_flags & VM_FAULT_WIRE_MASK) == 0 &&
428 		    fs.wired == 0) {
429 			vm_prefault(fs.map->pmap, vaddr, fs.entry, fs.prot);
430 		}
431 	}
432 	unlock_things(&fs);
433 
434 	vm_page_flag_clear(fs.m, PG_ZERO);
435 	vm_page_flag_set(fs.m, PG_REFERENCED);
436 
437 	/*
438 	 * If the page is not wired down, then put it where the pageout daemon
439 	 * can find it.
440 	 *
441 	 * We do not really need to get vm_token here but since all the
442 	 * vm_*() calls have to doing it here improves efficiency.
443 	 */
444 	lwkt_gettoken(&vm_token);
445 	if (fs.fault_flags & VM_FAULT_WIRE_MASK) {
446 		if (fs.wired)
447 			vm_page_wire(fs.m);
448 		else
449 			vm_page_unwire(fs.m, 1);
450 	} else {
451 		vm_page_activate(fs.m);
452 	}
453 
454 	if (curthread->td_lwp) {
455 		if (fs.hardfault) {
456 			curthread->td_lwp->lwp_ru.ru_majflt++;
457 		} else {
458 			curthread->td_lwp->lwp_ru.ru_minflt++;
459 		}
460 	}
461 
462 	/*
463 	 * Unlock everything, and return
464 	 */
465 	vm_page_wakeup(fs.m);
466 	vm_object_deallocate(fs.first_object);
467 	lwkt_reltoken(&vm_token);
468 
469 	return (KERN_SUCCESS);
470 }
471 
472 /*
473  * Fault in the specified virtual address in the current process map,
474  * returning a held VM page or NULL.  See vm_fault_page() for more
475  * information.
476  *
477  * No requirements.
478  */
479 vm_page_t
480 vm_fault_page_quick(vm_offset_t va, vm_prot_t fault_type, int *errorp)
481 {
482 	struct lwp *lp = curthread->td_lwp;
483 	vm_page_t m;
484 
485 	m = vm_fault_page(&lp->lwp_vmspace->vm_map, va,
486 			  fault_type, VM_FAULT_NORMAL, errorp);
487 	return(m);
488 }
489 
490 /*
491  * Fault in the specified virtual address in the specified map, doing all
492  * necessary manipulation of the object store and all necessary I/O.  Return
493  * a held VM page or NULL, and set *errorp.  The related pmap is not
494  * updated.
495  *
496  * The returned page will be properly dirtied if VM_PROT_WRITE was specified,
497  * and marked PG_REFERENCED as well.
498  *
499  * If the page cannot be faulted writable and VM_PROT_WRITE was specified, an
500  * error will be returned.
501  *
502  * No requirements.
503  */
504 vm_page_t
505 vm_fault_page(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
506 	      int fault_flags, int *errorp)
507 {
508 	vm_pindex_t first_pindex;
509 	struct faultstate fs;
510 	int result;
511 	vm_prot_t orig_fault_type = fault_type;
512 
513 	mycpu->gd_cnt.v_vm_faults++;
514 
515 	fs.didlimit = 0;
516 	fs.hardfault = 0;
517 	fs.fault_flags = fault_flags;
518 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
519 
520 RetryFault:
521 	/*
522 	 * Find the vm_map_entry representing the backing store and resolve
523 	 * the top level object and page index.  This may have the side
524 	 * effect of executing a copy-on-write on the map entry and/or
525 	 * creating a shadow object, but will not COW any actual VM pages.
526 	 *
527 	 * On success fs.map is left read-locked and various other fields
528 	 * are initialized but not otherwise referenced or locked.
529 	 *
530 	 * NOTE!  vm_map_lookup will upgrade the fault_type to VM_FAULT_WRITE
531 	 * if the map entry is a virtual page table and also writable,
532 	 * so we can set the 'A'accessed bit in the virtual page table entry.
533 	 */
534 	fs.map = map;
535 	result = vm_map_lookup(&fs.map, vaddr, fault_type,
536 			       &fs.entry, &fs.first_object,
537 			       &first_pindex, &fs.first_prot, &fs.wired);
538 
539 	if (result != KERN_SUCCESS) {
540 		*errorp = result;
541 		return (NULL);
542 	}
543 
544 	/*
545 	 * fs.map is read-locked
546 	 *
547 	 * Misc checks.  Save the map generation number to detect races.
548 	 */
549 	fs.map_generation = fs.map->timestamp;
550 
551 	if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
552 		panic("vm_fault: fault on nofault entry, addr: %lx",
553 		    (u_long)vaddr);
554 	}
555 
556 	/*
557 	 * A system map entry may return a NULL object.  No object means
558 	 * no pager means an unrecoverable kernel fault.
559 	 */
560 	if (fs.first_object == NULL) {
561 		panic("vm_fault: unrecoverable fault at %p in entry %p",
562 			(void *)vaddr, fs.entry);
563 	}
564 
565 	/*
566 	 * Make a reference to this object to prevent its disposal while we
567 	 * are messing with it.  Once we have the reference, the map is free
568 	 * to be diddled.  Since objects reference their shadows (and copies),
569 	 * they will stay around as well.
570 	 *
571 	 * Bump the paging-in-progress count to prevent size changes (e.g.
572 	 * truncation operations) during I/O.  This must be done after
573 	 * obtaining the vnode lock in order to avoid possible deadlocks.
574 	 *
575 	 * The vm_token is needed to manipulate the vm_object
576 	 */
577 	lwkt_gettoken(&vm_token);
578 	vm_object_reference(fs.first_object);
579 	fs.vp = vnode_pager_lock(fs.first_object);
580 	vm_object_pip_add(fs.first_object, 1);
581 	lwkt_reltoken(&vm_token);
582 
583 	fs.lookup_still_valid = TRUE;
584 	fs.first_m = NULL;
585 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
586 
587 	/*
588 	 * If the entry is wired we cannot change the page protection.
589 	 */
590 	if (fs.wired)
591 		fault_type = fs.first_prot;
592 
593 	/*
594 	 * The page we want is at (first_object, first_pindex), but if the
595 	 * vm_map_entry is VM_MAPTYPE_VPAGETABLE we have to traverse the
596 	 * page table to figure out the actual pindex.
597 	 *
598 	 * NOTE!  DEVELOPMENT IN PROGRESS, THIS IS AN INITIAL IMPLEMENTATION
599 	 * ONLY
600 	 */
601 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
602 		result = vm_fault_vpagetable(&fs, &first_pindex,
603 					     fs.entry->aux.master_pde,
604 					     fault_type);
605 		if (result == KERN_TRY_AGAIN)
606 			goto RetryFault;
607 		if (result != KERN_SUCCESS) {
608 			*errorp = result;
609 			return (NULL);
610 		}
611 	}
612 
613 	/*
614 	 * Now we have the actual (object, pindex), fault in the page.  If
615 	 * vm_fault_object() fails it will unlock and deallocate the FS
616 	 * data.   If it succeeds everything remains locked and fs->object
617 	 * will have an additinal PIP count if it is not equal to
618 	 * fs->first_object
619 	 */
620 	result = vm_fault_object(&fs, first_pindex, fault_type);
621 
622 	if (result == KERN_TRY_AGAIN)
623 		goto RetryFault;
624 	if (result != KERN_SUCCESS) {
625 		*errorp = result;
626 		return(NULL);
627 	}
628 
629 	if ((orig_fault_type & VM_PROT_WRITE) &&
630 	    (fs.prot & VM_PROT_WRITE) == 0) {
631 		*errorp = KERN_PROTECTION_FAILURE;
632 		unlock_and_deallocate(&fs);
633 		return(NULL);
634 	}
635 
636 	/*
637 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
638 	 * will contain a busied page.
639 	 */
640 	unlock_things(&fs);
641 
642 	/*
643 	 * Return a held page.  We are not doing any pmap manipulation so do
644 	 * not set PG_MAPPED.  However, adjust the page flags according to
645 	 * the fault type because the caller may not use a managed pmapping
646 	 * (so we don't want to lose the fact that the page will be dirtied
647 	 * if a write fault was specified).
648 	 */
649 	lwkt_gettoken(&vm_token);
650 	vm_page_hold(fs.m);
651 	vm_page_flag_clear(fs.m, PG_ZERO);
652 	if (fault_type & VM_PROT_WRITE)
653 		vm_page_dirty(fs.m);
654 
655 	/*
656 	 * Update the pmap.  We really only have to do this if a COW
657 	 * occured to replace the read-only page with the new page.  For
658 	 * now just do it unconditionally. XXX
659 	 */
660 	pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, fs.wired);
661 	vm_page_flag_set(fs.m, PG_REFERENCED);
662 
663 	/*
664 	 * Unbusy the page by activating it.  It remains held and will not
665 	 * be reclaimed.
666 	 */
667 	vm_page_activate(fs.m);
668 
669 	if (curthread->td_lwp) {
670 		if (fs.hardfault) {
671 			curthread->td_lwp->lwp_ru.ru_majflt++;
672 		} else {
673 			curthread->td_lwp->lwp_ru.ru_minflt++;
674 		}
675 	}
676 
677 	/*
678 	 * Unlock everything, and return the held page.
679 	 */
680 	vm_page_wakeup(fs.m);
681 	vm_object_deallocate(fs.first_object);
682 	lwkt_reltoken(&vm_token);
683 
684 	*errorp = 0;
685 	return(fs.m);
686 }
687 
688 /*
689  * Fault in the specified (object,offset), dirty the returned page as
690  * needed.  If the requested fault_type cannot be done NULL and an
691  * error is returned.
692  *
693  * A held (but not busied) page is returned.
694  *
695  * No requirements.
696  */
697 vm_page_t
698 vm_fault_object_page(vm_object_t object, vm_ooffset_t offset,
699 		     vm_prot_t fault_type, int fault_flags, int *errorp)
700 {
701 	int result;
702 	vm_pindex_t first_pindex;
703 	struct faultstate fs;
704 	struct vm_map_entry entry;
705 
706 	bzero(&entry, sizeof(entry));
707 	entry.object.vm_object = object;
708 	entry.maptype = VM_MAPTYPE_NORMAL;
709 	entry.protection = entry.max_protection = fault_type;
710 
711 	fs.didlimit = 0;
712 	fs.hardfault = 0;
713 	fs.fault_flags = fault_flags;
714 	fs.map = NULL;
715 	KKASSERT((fault_flags & VM_FAULT_WIRE_MASK) == 0);
716 
717 RetryFault:
718 
719 	fs.first_object = object;
720 	first_pindex = OFF_TO_IDX(offset);
721 	fs.entry = &entry;
722 	fs.first_prot = fault_type;
723 	fs.wired = 0;
724 	/*fs.map_generation = 0; unused */
725 
726 	/*
727 	 * Make a reference to this object to prevent its disposal while we
728 	 * are messing with it.  Once we have the reference, the map is free
729 	 * to be diddled.  Since objects reference their shadows (and copies),
730 	 * they will stay around as well.
731 	 *
732 	 * Bump the paging-in-progress count to prevent size changes (e.g.
733 	 * truncation operations) during I/O.  This must be done after
734 	 * obtaining the vnode lock in order to avoid possible deadlocks.
735 	 */
736 	lwkt_gettoken(&vm_token);
737 	vm_object_reference(fs.first_object);
738 	fs.vp = vnode_pager_lock(fs.first_object);
739 	vm_object_pip_add(fs.first_object, 1);
740 	lwkt_reltoken(&vm_token);
741 
742 	fs.lookup_still_valid = TRUE;
743 	fs.first_m = NULL;
744 	fs.object = fs.first_object;	/* so unlock_and_deallocate works */
745 
746 #if 0
747 	/* XXX future - ability to operate on VM object using vpagetable */
748 	if (fs.entry->maptype == VM_MAPTYPE_VPAGETABLE) {
749 		result = vm_fault_vpagetable(&fs, &first_pindex,
750 					     fs.entry->aux.master_pde,
751 					     fault_type);
752 		if (result == KERN_TRY_AGAIN)
753 			goto RetryFault;
754 		if (result != KERN_SUCCESS) {
755 			*errorp = result;
756 			return (NULL);
757 		}
758 	}
759 #endif
760 
761 	/*
762 	 * Now we have the actual (object, pindex), fault in the page.  If
763 	 * vm_fault_object() fails it will unlock and deallocate the FS
764 	 * data.   If it succeeds everything remains locked and fs->object
765 	 * will have an additinal PIP count if it is not equal to
766 	 * fs->first_object
767 	 */
768 	result = vm_fault_object(&fs, first_pindex, fault_type);
769 
770 	if (result == KERN_TRY_AGAIN)
771 		goto RetryFault;
772 	if (result != KERN_SUCCESS) {
773 		*errorp = result;
774 		return(NULL);
775 	}
776 
777 	if ((fault_type & VM_PROT_WRITE) && (fs.prot & VM_PROT_WRITE) == 0) {
778 		*errorp = KERN_PROTECTION_FAILURE;
779 		unlock_and_deallocate(&fs);
780 		return(NULL);
781 	}
782 
783 	/*
784 	 * On success vm_fault_object() does not unlock or deallocate, and fs.m
785 	 * will contain a busied page.
786 	 */
787 	unlock_things(&fs);
788 
789 	/*
790 	 * Return a held page.  We are not doing any pmap manipulation so do
791 	 * not set PG_MAPPED.  However, adjust the page flags according to
792 	 * the fault type because the caller may not use a managed pmapping
793 	 * (so we don't want to lose the fact that the page will be dirtied
794 	 * if a write fault was specified).
795 	 */
796 	lwkt_gettoken(&vm_token);
797 	vm_page_hold(fs.m);
798 	vm_page_flag_clear(fs.m, PG_ZERO);
799 	if (fault_type & VM_PROT_WRITE)
800 		vm_page_dirty(fs.m);
801 
802 	if (fault_flags & VM_FAULT_DIRTY)
803 		vm_page_dirty(fs.m);
804 	if (fault_flags & VM_FAULT_UNSWAP)
805 		swap_pager_unswapped(fs.m);
806 
807 	/*
808 	 * Indicate that the page was accessed.
809 	 */
810 	vm_page_flag_set(fs.m, PG_REFERENCED);
811 
812 	/*
813 	 * Unbusy the page by activating it.  It remains held and will not
814 	 * be reclaimed.
815 	 */
816 	vm_page_activate(fs.m);
817 
818 	if (curthread->td_lwp) {
819 		if (fs.hardfault) {
820 			mycpu->gd_cnt.v_vm_faults++;
821 			curthread->td_lwp->lwp_ru.ru_majflt++;
822 		} else {
823 			curthread->td_lwp->lwp_ru.ru_minflt++;
824 		}
825 	}
826 
827 	/*
828 	 * Unlock everything, and return the held page.
829 	 */
830 	vm_page_wakeup(fs.m);
831 	vm_object_deallocate(fs.first_object);
832 	lwkt_reltoken(&vm_token);
833 
834 	*errorp = 0;
835 	return(fs.m);
836 }
837 
838 /*
839  * Translate the virtual page number (first_pindex) that is relative
840  * to the address space into a logical page number that is relative to the
841  * backing object.  Use the virtual page table pointed to by (vpte).
842  *
843  * This implements an N-level page table.  Any level can terminate the
844  * scan by setting VPTE_PS.   A linear mapping is accomplished by setting
845  * VPTE_PS in the master page directory entry set via mcontrol(MADV_SETMAP).
846  *
847  * No requirements (vm_token need not be held).
848  */
849 static
850 int
851 vm_fault_vpagetable(struct faultstate *fs, vm_pindex_t *pindex,
852 		    vpte_t vpte, int fault_type)
853 {
854 	struct lwbuf *lwb;
855 	int vshift = VPTE_FRAME_END - PAGE_SHIFT; /* index bits remaining */
856 	int result = KERN_SUCCESS;
857 	vpte_t *ptep;
858 
859 	for (;;) {
860 		/*
861 		 * We cannot proceed if the vpte is not valid, not readable
862 		 * for a read fault, or not writable for a write fault.
863 		 */
864 		if ((vpte & VPTE_V) == 0) {
865 			unlock_and_deallocate(fs);
866 			return (KERN_FAILURE);
867 		}
868 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_R) == 0) {
869 			unlock_and_deallocate(fs);
870 			return (KERN_FAILURE);
871 		}
872 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_W) == 0) {
873 			unlock_and_deallocate(fs);
874 			return (KERN_FAILURE);
875 		}
876 		if ((vpte & VPTE_PS) || vshift == 0)
877 			break;
878 		KKASSERT(vshift >= VPTE_PAGE_BITS);
879 
880 		/*
881 		 * Get the page table page.  Nominally we only read the page
882 		 * table, but since we are actively setting VPTE_M and VPTE_A,
883 		 * tell vm_fault_object() that we are writing it.
884 		 *
885 		 * There is currently no real need to optimize this.
886 		 */
887 		result = vm_fault_object(fs, (vpte & VPTE_FRAME) >> PAGE_SHIFT,
888 					 VM_PROT_READ|VM_PROT_WRITE);
889 		if (result != KERN_SUCCESS)
890 			return (result);
891 
892 		/*
893 		 * Process the returned fs.m and look up the page table
894 		 * entry in the page table page.
895 		 */
896 		vshift -= VPTE_PAGE_BITS;
897 		lwb = lwbuf_alloc(fs->m);
898 		ptep = ((vpte_t *)lwbuf_kva(lwb) +
899 		        ((*pindex >> vshift) & VPTE_PAGE_MASK));
900 		vpte = *ptep;
901 
902 		/*
903 		 * Page table write-back.  If the vpte is valid for the
904 		 * requested operation, do a write-back to the page table.
905 		 *
906 		 * XXX VPTE_M is not set properly for page directory pages.
907 		 * It doesn't get set in the page directory if the page table
908 		 * is modified during a read access.
909 		 */
910 		if ((fault_type & VM_PROT_WRITE) && (vpte & VPTE_V) &&
911 		    (vpte & VPTE_W)) {
912 			if ((vpte & (VPTE_M|VPTE_A)) != (VPTE_M|VPTE_A)) {
913 				atomic_set_long(ptep, VPTE_M | VPTE_A);
914 				vm_page_dirty(fs->m);
915 			}
916 		}
917 		if ((fault_type & VM_PROT_READ) && (vpte & VPTE_V) &&
918 		    (vpte & VPTE_R)) {
919 			if ((vpte & VPTE_A) == 0) {
920 				atomic_set_long(ptep, VPTE_A);
921 				vm_page_dirty(fs->m);
922 			}
923 		}
924 		lwbuf_free(lwb);
925 		vm_page_flag_set(fs->m, PG_REFERENCED);
926 		vm_page_activate(fs->m);
927 		vm_page_wakeup(fs->m);
928 		cleanup_successful_fault(fs);
929 	}
930 	/*
931 	 * Combine remaining address bits with the vpte.
932 	 */
933 	/* JG how many bits from each? */
934 	*pindex = ((vpte & VPTE_FRAME) >> PAGE_SHIFT) +
935 		  (*pindex & ((1L << vshift) - 1));
936 	return (KERN_SUCCESS);
937 }
938 
939 
940 /*
941  * This is the core of the vm_fault code.
942  *
943  * Do all operations required to fault-in (fs.first_object, pindex).  Run
944  * through the shadow chain as necessary and do required COW or virtual
945  * copy operations.  The caller has already fully resolved the vm_map_entry
946  * and, if appropriate, has created a copy-on-write layer.  All we need to
947  * do is iterate the object chain.
948  *
949  * On failure (fs) is unlocked and deallocated and the caller may return or
950  * retry depending on the failure code.  On success (fs) is NOT unlocked or
951  * deallocated, fs.m will contained a resolved, busied page, and fs.object
952  * will have an additional PIP count if it is not equal to fs.first_object.
953  *
954  * No requirements.
955  */
956 static
957 int
958 vm_fault_object(struct faultstate *fs,
959 		vm_pindex_t first_pindex, vm_prot_t fault_type)
960 {
961 	vm_object_t next_object;
962 	vm_pindex_t pindex;
963 
964 	fs->prot = fs->first_prot;
965 	fs->object = fs->first_object;
966 	pindex = first_pindex;
967 
968 	/*
969 	 * If a read fault occurs we try to make the page writable if
970 	 * possible.  There are three cases where we cannot make the
971 	 * page mapping writable:
972 	 *
973 	 * (1) The mapping is read-only or the VM object is read-only,
974 	 *     fs->prot above will simply not have VM_PROT_WRITE set.
975 	 *
976 	 * (2) If the mapping is a virtual page table we need to be able
977 	 *     to detect writes so we can set VPTE_M in the virtual page
978 	 *     table.
979 	 *
980 	 * (3) If the VM page is read-only or copy-on-write, upgrading would
981 	 *     just result in an unnecessary COW fault.
982 	 *
983 	 * VM_PROT_VPAGED is set if faulting via a virtual page table and
984 	 * causes adjustments to the 'M'odify bit to also turn off write
985 	 * access to force a re-fault.
986 	 */
987 	if (fs->entry->maptype == VM_MAPTYPE_VPAGETABLE) {
988 		if ((fault_type & VM_PROT_WRITE) == 0)
989 			fs->prot &= ~VM_PROT_WRITE;
990 	}
991 
992 	lwkt_gettoken(&vm_token);
993 
994 	for (;;) {
995 		/*
996 		 * If the object is dead, we stop here
997 		 */
998 		if (fs->object->flags & OBJ_DEAD) {
999 			unlock_and_deallocate(fs);
1000 			lwkt_reltoken(&vm_token);
1001 			return (KERN_PROTECTION_FAILURE);
1002 		}
1003 
1004 		/*
1005 		 * See if page is resident.  spl protection is required
1006 		 * to avoid an interrupt unbusy/free race against our
1007 		 * lookup.  We must hold the protection through a page
1008 		 * allocation or busy.
1009 		 */
1010 		crit_enter();
1011 		fs->m = vm_page_lookup(fs->object, pindex);
1012 		if (fs->m != NULL) {
1013 			int queue;
1014 			/*
1015 			 * Wait/Retry if the page is busy.  We have to do this
1016 			 * if the page is busy via either PG_BUSY or
1017 			 * vm_page_t->busy because the vm_pager may be using
1018 			 * vm_page_t->busy for pageouts ( and even pageins if
1019 			 * it is the vnode pager ), and we could end up trying
1020 			 * to pagein and pageout the same page simultaneously.
1021 			 *
1022 			 * We can theoretically allow the busy case on a read
1023 			 * fault if the page is marked valid, but since such
1024 			 * pages are typically already pmap'd, putting that
1025 			 * special case in might be more effort then it is
1026 			 * worth.  We cannot under any circumstances mess
1027 			 * around with a vm_page_t->busy page except, perhaps,
1028 			 * to pmap it.
1029 			 */
1030 			if ((fs->m->flags & PG_BUSY) || fs->m->busy) {
1031 				unlock_things(fs);
1032 				vm_page_sleep_busy(fs->m, TRUE, "vmpfw");
1033 				mycpu->gd_cnt.v_intrans++;
1034 				vm_object_deallocate(fs->first_object);
1035 				fs->first_object = NULL;
1036 				lwkt_reltoken(&vm_token);
1037 				crit_exit();
1038 				return (KERN_TRY_AGAIN);
1039 			}
1040 
1041 			/*
1042 			 * If reactivating a page from PQ_CACHE we may have
1043 			 * to rate-limit.
1044 			 */
1045 			queue = fs->m->queue;
1046 			vm_page_unqueue_nowakeup(fs->m);
1047 
1048 			if ((queue - fs->m->pc) == PQ_CACHE &&
1049 			    vm_page_count_severe()) {
1050 				vm_page_activate(fs->m);
1051 				unlock_and_deallocate(fs);
1052 				vm_waitpfault();
1053 				lwkt_reltoken(&vm_token);
1054 				crit_exit();
1055 				return (KERN_TRY_AGAIN);
1056 			}
1057 
1058 			/*
1059 			 * Mark page busy for other processes, and the
1060 			 * pagedaemon.  If it still isn't completely valid
1061 			 * (readable), or if a read-ahead-mark is set on
1062 			 * the VM page, jump to readrest, else we found the
1063 			 * page and can return.
1064 			 *
1065 			 * We can release the spl once we have marked the
1066 			 * page busy.
1067 			 */
1068 			vm_page_busy(fs->m);
1069 			crit_exit();
1070 
1071 			if (fs->m->object != &kernel_object) {
1072 				if ((fs->m->valid & VM_PAGE_BITS_ALL) !=
1073 				    VM_PAGE_BITS_ALL) {
1074 					goto readrest;
1075 				}
1076 				if (fs->m->flags & PG_RAM) {
1077 					if (debug_cluster)
1078 						kprintf("R");
1079 					vm_page_flag_clear(fs->m, PG_RAM);
1080 					goto readrest;
1081 				}
1082 			}
1083 			break; /* break to PAGE HAS BEEN FOUND */
1084 		}
1085 
1086 		/*
1087 		 * Page is not resident, If this is the search termination
1088 		 * or the pager might contain the page, allocate a new page.
1089 		 *
1090 		 * NOTE: We are still in a critical section.
1091 		 */
1092 		if (TRYPAGER(fs) || fs->object == fs->first_object) {
1093 			/*
1094 			 * If the page is beyond the object size we fail
1095 			 */
1096 			if (pindex >= fs->object->size) {
1097 				lwkt_reltoken(&vm_token);
1098 				crit_exit();
1099 				unlock_and_deallocate(fs);
1100 				return (KERN_PROTECTION_FAILURE);
1101 			}
1102 
1103 			/*
1104 			 * Ratelimit.
1105 			 */
1106 			if (fs->didlimit == 0 && curproc != NULL) {
1107 				int limticks;
1108 
1109 				limticks = vm_fault_ratelimit(curproc->p_vmspace);
1110 				if (limticks) {
1111 					lwkt_reltoken(&vm_token);
1112 					crit_exit();
1113 					unlock_and_deallocate(fs);
1114 					tsleep(curproc, 0, "vmrate", limticks);
1115 					fs->didlimit = 1;
1116 					return (KERN_TRY_AGAIN);
1117 				}
1118 			}
1119 
1120 			/*
1121 			 * Allocate a new page for this object/offset pair.
1122 			 */
1123 			fs->m = NULL;
1124 			if (!vm_page_count_severe()) {
1125 				fs->m = vm_page_alloc(fs->object, pindex,
1126 				    (fs->vp || fs->object->backing_object) ? VM_ALLOC_NORMAL : VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
1127 			}
1128 			if (fs->m == NULL) {
1129 				lwkt_reltoken(&vm_token);
1130 				crit_exit();
1131 				unlock_and_deallocate(fs);
1132 				vm_waitpfault();
1133 				return (KERN_TRY_AGAIN);
1134 			}
1135 		}
1136 		crit_exit();
1137 
1138 readrest:
1139 		/*
1140 		 * We have found an invalid or partially valid page, a
1141 		 * page with a read-ahead mark which might be partially or
1142 		 * fully valid (and maybe dirty too), or we have allocated
1143 		 * a new page.
1144 		 *
1145 		 * Attempt to fault-in the page if there is a chance that the
1146 		 * pager has it, and potentially fault in additional pages
1147 		 * at the same time.
1148 		 *
1149 		 * We are NOT in splvm here and if TRYPAGER is true then
1150 		 * fs.m will be non-NULL and will be PG_BUSY for us.
1151 		 */
1152 		if (TRYPAGER(fs)) {
1153 			int rv;
1154 			int seqaccess;
1155 			u_char behavior = vm_map_entry_behavior(fs->entry);
1156 
1157 			if (behavior == MAP_ENTRY_BEHAV_RANDOM)
1158 				seqaccess = 0;
1159 			else
1160 				seqaccess = -1;
1161 
1162 			/*
1163 			 * If sequential access is detected then attempt
1164 			 * to deactivate/cache pages behind the scan to
1165 			 * prevent resource hogging.
1166 			 *
1167 			 * Use of PG_RAM to detect sequential access
1168 			 * also simulates multi-zone sequential access
1169 			 * detection for free.
1170 			 *
1171 			 * NOTE: Partially valid dirty pages cannot be
1172 			 *	 deactivated without causing NFS picemeal
1173 			 *	 writes to barf.
1174 			 */
1175 			if ((fs->first_object->type != OBJT_DEVICE) &&
1176 			    (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
1177                                 (behavior != MAP_ENTRY_BEHAV_RANDOM &&
1178 				 (fs->m->flags & PG_RAM)))
1179 			) {
1180 				vm_pindex_t scan_pindex;
1181 				int scan_count = 16;
1182 
1183 				if (first_pindex < 16) {
1184 					scan_pindex = 0;
1185 					scan_count = 0;
1186 				} else {
1187 					scan_pindex = first_pindex - 16;
1188 					if (scan_pindex < 16)
1189 						scan_count = scan_pindex;
1190 					else
1191 						scan_count = 16;
1192 				}
1193 
1194 				crit_enter();
1195 				while (scan_count) {
1196 					vm_page_t mt;
1197 
1198 					mt = vm_page_lookup(fs->first_object,
1199 							    scan_pindex);
1200 					if (mt == NULL ||
1201 					    (mt->valid != VM_PAGE_BITS_ALL)) {
1202 						break;
1203 					}
1204 					if (mt->busy ||
1205 					    (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
1206 					    mt->hold_count ||
1207 					    mt->wire_count)  {
1208 						goto skip;
1209 					}
1210 					if (mt->dirty == 0)
1211 						vm_page_test_dirty(mt);
1212 					if (mt->dirty) {
1213 						vm_page_busy(mt);
1214 						vm_page_protect(mt,
1215 								VM_PROT_NONE);
1216 						vm_page_deactivate(mt);
1217 						vm_page_wakeup(mt);
1218 					} else {
1219 						vm_page_cache(mt);
1220 					}
1221 skip:
1222 					--scan_count;
1223 					--scan_pindex;
1224 				}
1225 				crit_exit();
1226 
1227 				seqaccess = 1;
1228 			}
1229 
1230 			/*
1231 			 * Avoid deadlocking against the map when doing I/O.
1232 			 * fs.object and the page is PG_BUSY'd.
1233 			 */
1234 			unlock_map(fs);
1235 
1236 			/*
1237 			 * Acquire the page data.  We still hold a ref on
1238 			 * fs.object and the page has been PG_BUSY's.
1239 			 *
1240 			 * The pager may replace the page (for example, in
1241 			 * order to enter a fictitious page into the
1242 			 * object).  If it does so it is responsible for
1243 			 * cleaning up the passed page and properly setting
1244 			 * the new page PG_BUSY.
1245 			 *
1246 			 * If we got here through a PG_RAM read-ahead
1247 			 * mark the page may be partially dirty and thus
1248 			 * not freeable.  Don't bother checking to see
1249 			 * if the pager has the page because we can't free
1250 			 * it anyway.  We have to depend on the get_page
1251 			 * operation filling in any gaps whether there is
1252 			 * backing store or not.
1253 			 */
1254 			rv = vm_pager_get_page(fs->object, &fs->m, seqaccess);
1255 
1256 			if (rv == VM_PAGER_OK) {
1257 				/*
1258 				 * Relookup in case pager changed page. Pager
1259 				 * is responsible for disposition of old page
1260 				 * if moved.
1261 				 *
1262 				 * XXX other code segments do relookups too.
1263 				 * It's a bad abstraction that needs to be
1264 				 * fixed/removed.
1265 				 */
1266 				fs->m = vm_page_lookup(fs->object, pindex);
1267 				if (fs->m == NULL) {
1268 					lwkt_reltoken(&vm_token);
1269 					unlock_and_deallocate(fs);
1270 					return (KERN_TRY_AGAIN);
1271 				}
1272 
1273 				++fs->hardfault;
1274 				break; /* break to PAGE HAS BEEN FOUND */
1275 			}
1276 
1277 			/*
1278 			 * Remove the bogus page (which does not exist at this
1279 			 * object/offset); before doing so, we must get back
1280 			 * our object lock to preserve our invariant.
1281 			 *
1282 			 * Also wake up any other process that may want to bring
1283 			 * in this page.
1284 			 *
1285 			 * If this is the top-level object, we must leave the
1286 			 * busy page to prevent another process from rushing
1287 			 * past us, and inserting the page in that object at
1288 			 * the same time that we are.
1289 			 */
1290 			if (rv == VM_PAGER_ERROR) {
1291 				if (curproc)
1292 					kprintf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm);
1293 				else
1294 					kprintf("vm_fault: pager read error, thread %p (%s)\n", curthread, curproc->p_comm);
1295 			}
1296 
1297 			/*
1298 			 * Data outside the range of the pager or an I/O error
1299 			 *
1300 			 * The page may have been wired during the pagein,
1301 			 * e.g. by the buffer cache, and cannot simply be
1302 			 * freed.  Call vnode_pager_freepage() to deal with it.
1303 			 */
1304 			/*
1305 			 * XXX - the check for kernel_map is a kludge to work
1306 			 * around having the machine panic on a kernel space
1307 			 * fault w/ I/O error.
1308 			 */
1309 			if (((fs->map != &kernel_map) &&
1310 			    (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) {
1311 				vnode_pager_freepage(fs->m);
1312 				lwkt_reltoken(&vm_token);
1313 				fs->m = NULL;
1314 				unlock_and_deallocate(fs);
1315 				if (rv == VM_PAGER_ERROR)
1316 					return (KERN_FAILURE);
1317 				else
1318 					return (KERN_PROTECTION_FAILURE);
1319 				/* NOT REACHED */
1320 			}
1321 			if (fs->object != fs->first_object) {
1322 				vnode_pager_freepage(fs->m);
1323 				fs->m = NULL;
1324 				/*
1325 				 * XXX - we cannot just fall out at this
1326 				 * point, m has been freed and is invalid!
1327 				 */
1328 			}
1329 		}
1330 
1331 		/*
1332 		 * We get here if the object has a default pager (or unwiring)
1333 		 * or the pager doesn't have the page.
1334 		 */
1335 		if (fs->object == fs->first_object)
1336 			fs->first_m = fs->m;
1337 
1338 		/*
1339 		 * Move on to the next object.  Lock the next object before
1340 		 * unlocking the current one.
1341 		 */
1342 		pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1343 		next_object = fs->object->backing_object;
1344 		if (next_object == NULL) {
1345 			/*
1346 			 * If there's no object left, fill the page in the top
1347 			 * object with zeros.
1348 			 */
1349 			if (fs->object != fs->first_object) {
1350 				vm_object_pip_wakeup(fs->object);
1351 
1352 				fs->object = fs->first_object;
1353 				pindex = first_pindex;
1354 				fs->m = fs->first_m;
1355 			}
1356 			fs->first_m = NULL;
1357 
1358 			/*
1359 			 * Zero the page if necessary and mark it valid.
1360 			 */
1361 			if ((fs->m->flags & PG_ZERO) == 0) {
1362 				vm_page_zero_fill(fs->m);
1363 			} else {
1364 				mycpu->gd_cnt.v_ozfod++;
1365 			}
1366 			mycpu->gd_cnt.v_zfod++;
1367 			fs->m->valid = VM_PAGE_BITS_ALL;
1368 			break;	/* break to PAGE HAS BEEN FOUND */
1369 		}
1370 		if (fs->object != fs->first_object) {
1371 			vm_object_pip_wakeup(fs->object);
1372 		}
1373 		KASSERT(fs->object != next_object,
1374 			("object loop %p", next_object));
1375 		fs->object = next_object;
1376 		vm_object_pip_add(fs->object, 1);
1377 	}
1378 
1379 	/*
1380 	 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1381 	 * is held.]
1382 	 *
1383 	 * vm_token is still held
1384 	 *
1385 	 * If the page is being written, but isn't already owned by the
1386 	 * top-level object, we have to copy it into a new page owned by the
1387 	 * top-level object.
1388 	 */
1389 	KASSERT((fs->m->flags & PG_BUSY) != 0,
1390 		("vm_fault: not busy after main loop"));
1391 
1392 	if (fs->object != fs->first_object) {
1393 		/*
1394 		 * We only really need to copy if we want to write it.
1395 		 */
1396 		if (fault_type & VM_PROT_WRITE) {
1397 			/*
1398 			 * This allows pages to be virtually copied from a
1399 			 * backing_object into the first_object, where the
1400 			 * backing object has no other refs to it, and cannot
1401 			 * gain any more refs.  Instead of a bcopy, we just
1402 			 * move the page from the backing object to the
1403 			 * first object.  Note that we must mark the page
1404 			 * dirty in the first object so that it will go out
1405 			 * to swap when needed.
1406 			 */
1407 			if (
1408 				/*
1409 				 * Map, if present, has not changed
1410 				 */
1411 				(fs->map == NULL ||
1412 				fs->map_generation == fs->map->timestamp) &&
1413 				/*
1414 				 * Only one shadow object
1415 				 */
1416 				(fs->object->shadow_count == 1) &&
1417 				/*
1418 				 * No COW refs, except us
1419 				 */
1420 				(fs->object->ref_count == 1) &&
1421 				/*
1422 				 * No one else can look this object up
1423 				 */
1424 				(fs->object->handle == NULL) &&
1425 				/*
1426 				 * No other ways to look the object up
1427 				 */
1428 				((fs->object->type == OBJT_DEFAULT) ||
1429 				 (fs->object->type == OBJT_SWAP)) &&
1430 				/*
1431 				 * We don't chase down the shadow chain
1432 				 */
1433 				(fs->object == fs->first_object->backing_object) &&
1434 
1435 				/*
1436 				 * grab the lock if we need to
1437 				 */
1438 				(fs->lookup_still_valid ||
1439 				 fs->map == NULL ||
1440 				 lockmgr(&fs->map->lock, LK_EXCLUSIVE|LK_NOWAIT) == 0)
1441 			    ) {
1442 
1443 				fs->lookup_still_valid = 1;
1444 				/*
1445 				 * get rid of the unnecessary page
1446 				 */
1447 				vm_page_protect(fs->first_m, VM_PROT_NONE);
1448 				vm_page_free(fs->first_m);
1449 				fs->first_m = NULL;
1450 
1451 				/*
1452 				 * grab the page and put it into the
1453 				 * process'es object.  The page is
1454 				 * automatically made dirty.
1455 				 */
1456 				vm_page_rename(fs->m, fs->first_object, first_pindex);
1457 				fs->first_m = fs->m;
1458 				vm_page_busy(fs->first_m);
1459 				fs->m = NULL;
1460 				mycpu->gd_cnt.v_cow_optim++;
1461 			} else {
1462 				/*
1463 				 * Oh, well, lets copy it.
1464 				 */
1465 				vm_page_copy(fs->m, fs->first_m);
1466 				vm_page_event(fs->m, VMEVENT_COW);
1467 			}
1468 
1469 			if (fs->m) {
1470 				/*
1471 				 * We no longer need the old page or object.
1472 				 */
1473 				release_page(fs);
1474 			}
1475 
1476 			/*
1477 			 * fs->object != fs->first_object due to above
1478 			 * conditional
1479 			 */
1480 			vm_object_pip_wakeup(fs->object);
1481 
1482 			/*
1483 			 * Only use the new page below...
1484 			 */
1485 
1486 			mycpu->gd_cnt.v_cow_faults++;
1487 			fs->m = fs->first_m;
1488 			fs->object = fs->first_object;
1489 			pindex = first_pindex;
1490 		} else {
1491 			/*
1492 			 * If it wasn't a write fault avoid having to copy
1493 			 * the page by mapping it read-only.
1494 			 */
1495 			fs->prot &= ~VM_PROT_WRITE;
1496 		}
1497 	}
1498 
1499 	/*
1500 	 * We may have had to unlock a map to do I/O.  If we did then
1501 	 * lookup_still_valid will be FALSE.  If the map generation count
1502 	 * also changed then all sorts of things could have happened while
1503 	 * we were doing the I/O and we need to retry.
1504 	 */
1505 
1506 	if (!fs->lookup_still_valid &&
1507 	    fs->map != NULL &&
1508 	    (fs->map->timestamp != fs->map_generation)) {
1509 		release_page(fs);
1510 		lwkt_reltoken(&vm_token);
1511 		unlock_and_deallocate(fs);
1512 		return (KERN_TRY_AGAIN);
1513 	}
1514 
1515 	/*
1516 	 * If the fault is a write, we know that this page is being
1517 	 * written NOW so dirty it explicitly to save on pmap_is_modified()
1518 	 * calls later.
1519 	 *
1520 	 * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
1521 	 * if the page is already dirty to prevent data written with
1522 	 * the expectation of being synced from not being synced.
1523 	 * Likewise if this entry does not request NOSYNC then make
1524 	 * sure the page isn't marked NOSYNC.  Applications sharing
1525 	 * data should use the same flags to avoid ping ponging.
1526 	 *
1527 	 * Also tell the backing pager, if any, that it should remove
1528 	 * any swap backing since the page is now dirty.
1529 	 */
1530 	if (fs->prot & VM_PROT_WRITE) {
1531 		vm_object_set_writeable_dirty(fs->m->object);
1532 		vm_set_nosync(fs->m, fs->entry);
1533 		if (fs->fault_flags & VM_FAULT_DIRTY) {
1534 			crit_enter();
1535 			vm_page_dirty(fs->m);
1536 			swap_pager_unswapped(fs->m);
1537 			crit_exit();
1538 		}
1539 	}
1540 
1541 	lwkt_reltoken(&vm_token);
1542 
1543 	/*
1544 	 * Page had better still be busy.  We are still locked up and
1545 	 * fs->object will have another PIP reference if it is not equal
1546 	 * to fs->first_object.
1547 	 */
1548 	KASSERT(fs->m->flags & PG_BUSY,
1549 		("vm_fault: page %p not busy!", fs->m));
1550 
1551 	/*
1552 	 * Sanity check: page must be completely valid or it is not fit to
1553 	 * map into user space.  vm_pager_get_pages() ensures this.
1554 	 */
1555 	if (fs->m->valid != VM_PAGE_BITS_ALL) {
1556 		vm_page_zero_invalid(fs->m, TRUE);
1557 		kprintf("Warning: page %p partially invalid on fault\n", fs->m);
1558 	}
1559 
1560 	return (KERN_SUCCESS);
1561 }
1562 
1563 /*
1564  * Wire down a range of virtual addresses in a map.  The entry in question
1565  * should be marked in-transition and the map must be locked.  We must
1566  * release the map temporarily while faulting-in the page to avoid a
1567  * deadlock.  Note that the entry may be clipped while we are blocked but
1568  * will never be freed.
1569  *
1570  * No requirements.
1571  */
1572 int
1573 vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
1574 {
1575 	boolean_t fictitious;
1576 	vm_offset_t start;
1577 	vm_offset_t end;
1578 	vm_offset_t va;
1579 	vm_paddr_t pa;
1580 	pmap_t pmap;
1581 	int rv;
1582 
1583 	pmap = vm_map_pmap(map);
1584 	start = entry->start;
1585 	end = entry->end;
1586 	fictitious = entry->object.vm_object &&
1587 			(entry->object.vm_object->type == OBJT_DEVICE);
1588 	if (entry->eflags & MAP_ENTRY_KSTACK)
1589 		start += PAGE_SIZE;
1590 	lwkt_gettoken(&vm_token);
1591 	vm_map_unlock(map);
1592 	map->timestamp++;
1593 
1594 	/*
1595 	 * We simulate a fault to get the page and enter it in the physical
1596 	 * map.
1597 	 */
1598 	for (va = start; va < end; va += PAGE_SIZE) {
1599 		if (user_wire) {
1600 			rv = vm_fault(map, va, VM_PROT_READ,
1601 					VM_FAULT_USER_WIRE);
1602 		} else {
1603 			rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
1604 					VM_FAULT_CHANGE_WIRING);
1605 		}
1606 		if (rv) {
1607 			while (va > start) {
1608 				va -= PAGE_SIZE;
1609 				if ((pa = pmap_extract(pmap, va)) == 0)
1610 					continue;
1611 				pmap_change_wiring(pmap, va, FALSE);
1612 				if (!fictitious)
1613 					vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1614 			}
1615 			vm_map_lock(map);
1616 			lwkt_reltoken(&vm_token);
1617 			return (rv);
1618 		}
1619 	}
1620 	vm_map_lock(map);
1621 	lwkt_reltoken(&vm_token);
1622 	return (KERN_SUCCESS);
1623 }
1624 
1625 /*
1626  * Unwire a range of virtual addresses in a map.  The map should be
1627  * locked.
1628  */
1629 void
1630 vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
1631 {
1632 	boolean_t fictitious;
1633 	vm_offset_t start;
1634 	vm_offset_t end;
1635 	vm_offset_t va;
1636 	vm_paddr_t pa;
1637 	pmap_t pmap;
1638 
1639 	pmap = vm_map_pmap(map);
1640 	start = entry->start;
1641 	end = entry->end;
1642 	fictitious = entry->object.vm_object &&
1643 			(entry->object.vm_object->type == OBJT_DEVICE);
1644 	if (entry->eflags & MAP_ENTRY_KSTACK)
1645 		start += PAGE_SIZE;
1646 
1647 	/*
1648 	 * Since the pages are wired down, we must be able to get their
1649 	 * mappings from the physical map system.
1650 	 */
1651 	lwkt_gettoken(&vm_token);
1652 	for (va = start; va < end; va += PAGE_SIZE) {
1653 		pa = pmap_extract(pmap, va);
1654 		if (pa != 0) {
1655 			pmap_change_wiring(pmap, va, FALSE);
1656 			if (!fictitious)
1657 				vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1658 		}
1659 	}
1660 	lwkt_reltoken(&vm_token);
1661 }
1662 
1663 /*
1664  * Reduce the rate at which memory is allocated to a process based
1665  * on the perceived load on the VM system. As the load increases
1666  * the allocation burst rate goes down and the delay increases.
1667  *
1668  * Rate limiting does not apply when faulting active or inactive
1669  * pages.  When faulting 'cache' pages, rate limiting only applies
1670  * if the system currently has a severe page deficit.
1671  *
1672  * XXX vm_pagesupply should be increased when a page is freed.
1673  *
1674  * We sleep up to 1/10 of a second.
1675  */
1676 static int
1677 vm_fault_ratelimit(struct vmspace *vmspace)
1678 {
1679 	if (vm_load_enable == 0)
1680 		return(0);
1681 	if (vmspace->vm_pagesupply > 0) {
1682 		--vmspace->vm_pagesupply;	/* SMP race ok */
1683 		return(0);
1684 	}
1685 #ifdef INVARIANTS
1686 	if (vm_load_debug) {
1687 		kprintf("load %-4d give %d pgs, wait %d, pid %-5d (%s)\n",
1688 			vm_load,
1689 			(1000 - vm_load ) / 10, vm_load * hz / 10000,
1690 			curproc->p_pid, curproc->p_comm);
1691 	}
1692 #endif
1693 	vmspace->vm_pagesupply = (1000 - vm_load) / 10;
1694 	return(vm_load * hz / 10000);
1695 }
1696 
1697 /*
1698  * Copy all of the pages from a wired-down map entry to another.
1699  *
1700  * The source and destination maps must be locked for write.
1701  * The source map entry must be wired down (or be a sharing map
1702  * entry corresponding to a main map entry that is wired down).
1703  *
1704  * No other requirements.
1705  */
1706 void
1707 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1708 		    vm_map_entry_t dst_entry, vm_map_entry_t src_entry)
1709 {
1710 	vm_object_t dst_object;
1711 	vm_object_t src_object;
1712 	vm_ooffset_t dst_offset;
1713 	vm_ooffset_t src_offset;
1714 	vm_prot_t prot;
1715 	vm_offset_t vaddr;
1716 	vm_page_t dst_m;
1717 	vm_page_t src_m;
1718 
1719 #ifdef	lint
1720 	src_map++;
1721 #endif	/* lint */
1722 
1723 	src_object = src_entry->object.vm_object;
1724 	src_offset = src_entry->offset;
1725 
1726 	/*
1727 	 * Create the top-level object for the destination entry. (Doesn't
1728 	 * actually shadow anything - we copy the pages directly.)
1729 	 */
1730 	vm_map_entry_allocate_object(dst_entry);
1731 	dst_object = dst_entry->object.vm_object;
1732 
1733 	prot = dst_entry->max_protection;
1734 
1735 	/*
1736 	 * Loop through all of the pages in the entry's range, copying each
1737 	 * one from the source object (it should be there) to the destination
1738 	 * object.
1739 	 */
1740 	for (vaddr = dst_entry->start, dst_offset = 0;
1741 	    vaddr < dst_entry->end;
1742 	    vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1743 
1744 		/*
1745 		 * Allocate a page in the destination object
1746 		 */
1747 		do {
1748 			dst_m = vm_page_alloc(dst_object,
1749 				OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1750 			if (dst_m == NULL) {
1751 				vm_wait(0);
1752 			}
1753 		} while (dst_m == NULL);
1754 
1755 		/*
1756 		 * Find the page in the source object, and copy it in.
1757 		 * (Because the source is wired down, the page will be in
1758 		 * memory.)
1759 		 */
1760 		src_m = vm_page_lookup(src_object,
1761 			OFF_TO_IDX(dst_offset + src_offset));
1762 		if (src_m == NULL)
1763 			panic("vm_fault_copy_wired: page missing");
1764 
1765 		vm_page_copy(src_m, dst_m);
1766 		vm_page_event(src_m, VMEVENT_COW);
1767 
1768 		/*
1769 		 * Enter it in the pmap...
1770 		 */
1771 
1772 		vm_page_flag_clear(dst_m, PG_ZERO);
1773 		pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1774 
1775 		/*
1776 		 * Mark it no longer busy, and put it on the active list.
1777 		 */
1778 		vm_page_activate(dst_m);
1779 		vm_page_wakeup(dst_m);
1780 	}
1781 }
1782 
1783 #if 0
1784 
1785 /*
1786  * This routine checks around the requested page for other pages that
1787  * might be able to be faulted in.  This routine brackets the viable
1788  * pages for the pages to be paged in.
1789  *
1790  * Inputs:
1791  *	m, rbehind, rahead
1792  *
1793  * Outputs:
1794  *  marray (array of vm_page_t), reqpage (index of requested page)
1795  *
1796  * Return value:
1797  *  number of pages in marray
1798  */
1799 static int
1800 vm_fault_additional_pages(vm_page_t m, int rbehind, int rahead,
1801 			  vm_page_t *marray, int *reqpage)
1802 {
1803 	int i,j;
1804 	vm_object_t object;
1805 	vm_pindex_t pindex, startpindex, endpindex, tpindex;
1806 	vm_page_t rtm;
1807 	int cbehind, cahead;
1808 
1809 	object = m->object;
1810 	pindex = m->pindex;
1811 
1812 	/*
1813 	 * we don't fault-ahead for device pager
1814 	 */
1815 	if (object->type == OBJT_DEVICE) {
1816 		*reqpage = 0;
1817 		marray[0] = m;
1818 		return 1;
1819 	}
1820 
1821 	/*
1822 	 * if the requested page is not available, then give up now
1823 	 */
1824 	if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1825 		*reqpage = 0;	/* not used by caller, fix compiler warn */
1826 		return 0;
1827 	}
1828 
1829 	if ((cbehind == 0) && (cahead == 0)) {
1830 		*reqpage = 0;
1831 		marray[0] = m;
1832 		return 1;
1833 	}
1834 
1835 	if (rahead > cahead) {
1836 		rahead = cahead;
1837 	}
1838 
1839 	if (rbehind > cbehind) {
1840 		rbehind = cbehind;
1841 	}
1842 
1843 	/*
1844 	 * Do not do any readahead if we have insufficient free memory.
1845 	 *
1846 	 * XXX code was broken disabled before and has instability
1847 	 * with this conditonal fixed, so shortcut for now.
1848 	 */
1849 	if (burst_fault == 0 || vm_page_count_severe()) {
1850 		marray[0] = m;
1851 		*reqpage = 0;
1852 		return 1;
1853 	}
1854 
1855 	/*
1856 	 * scan backward for the read behind pages -- in memory
1857 	 *
1858 	 * Assume that if the page is not found an interrupt will not
1859 	 * create it.  Theoretically interrupts can only remove (busy)
1860 	 * pages, not create new associations.
1861 	 */
1862 	if (pindex > 0) {
1863 		if (rbehind > pindex) {
1864 			rbehind = pindex;
1865 			startpindex = 0;
1866 		} else {
1867 			startpindex = pindex - rbehind;
1868 		}
1869 
1870 		crit_enter();
1871 		lwkt_gettoken(&vm_token);
1872 		for (tpindex = pindex; tpindex > startpindex; --tpindex) {
1873 			if (vm_page_lookup(object, tpindex - 1))
1874 				break;
1875 		}
1876 
1877 		i = 0;
1878 		while (tpindex < pindex) {
1879 			rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1880 			if (rtm == NULL) {
1881 				lwkt_reltoken(&vm_token);
1882 				crit_exit();
1883 				for (j = 0; j < i; j++) {
1884 					vm_page_free(marray[j]);
1885 				}
1886 				marray[0] = m;
1887 				*reqpage = 0;
1888 				return 1;
1889 			}
1890 			marray[i] = rtm;
1891 			++i;
1892 			++tpindex;
1893 		}
1894 		lwkt_reltoken(&vm_token);
1895 		crit_exit();
1896 	} else {
1897 		i = 0;
1898 	}
1899 
1900 	/*
1901 	 * Assign requested page
1902 	 */
1903 	marray[i] = m;
1904 	*reqpage = i;
1905 	++i;
1906 
1907 	/*
1908 	 * Scan forwards for read-ahead pages
1909 	 */
1910 	tpindex = pindex + 1;
1911 	endpindex = tpindex + rahead;
1912 	if (endpindex > object->size)
1913 		endpindex = object->size;
1914 
1915 	crit_enter();
1916 	lwkt_gettoken(&vm_token);
1917 	while (tpindex < endpindex) {
1918 		if (vm_page_lookup(object, tpindex))
1919 			break;
1920 		rtm = vm_page_alloc(object, tpindex, VM_ALLOC_SYSTEM);
1921 		if (rtm == NULL)
1922 			break;
1923 		marray[i] = rtm;
1924 		++i;
1925 		++tpindex;
1926 	}
1927 	lwkt_reltoken(&vm_token);
1928 	crit_exit();
1929 
1930 	return (i);
1931 }
1932 
1933 #endif
1934 
1935 /*
1936  * vm_prefault() provides a quick way of clustering pagefaults into a
1937  * processes address space.  It is a "cousin" of pmap_object_init_pt,
1938  * except it runs at page fault time instead of mmap time.
1939  *
1940  * This code used to be per-platform pmap_prefault().  It is now
1941  * machine-independent and enhanced to also pre-fault zero-fill pages
1942  * (see vm.fast_fault) as well as make them writable, which greatly
1943  * reduces the number of page faults programs incur.
1944  *
1945  * Application performance when pre-faulting zero-fill pages is heavily
1946  * dependent on the application.  Very tiny applications like /bin/echo
1947  * lose a little performance while applications of any appreciable size
1948  * gain performance.  Prefaulting multiple pages also reduces SMP
1949  * congestion and can improve SMP performance significantly.
1950  *
1951  * NOTE!  prot may allow writing but this only applies to the top level
1952  *	  object.  If we wind up mapping a page extracted from a backing
1953  *	  object we have to make sure it is read-only.
1954  *
1955  * NOTE!  The caller has already handled any COW operations on the
1956  *	  vm_map_entry via the normal fault code.  Do NOT call this
1957  *	  shortcut unless the normal fault code has run on this entry.
1958  *
1959  * No other requirements.
1960  */
1961 #define PFBAK 4
1962 #define PFFOR 4
1963 #define PAGEORDER_SIZE (PFBAK+PFFOR)
1964 
1965 static int vm_prefault_pageorder[] = {
1966 	-PAGE_SIZE, PAGE_SIZE,
1967 	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
1968 	-3 * PAGE_SIZE, 3 * PAGE_SIZE,
1969 	-4 * PAGE_SIZE, 4 * PAGE_SIZE
1970 };
1971 
1972 /*
1973  * Set PG_NOSYNC if the map entry indicates so, but only if the page
1974  * is not already dirty by other means.  This will prevent passive
1975  * filesystem syncing as well as 'sync' from writing out the page.
1976  */
1977 static void
1978 vm_set_nosync(vm_page_t m, vm_map_entry_t entry)
1979 {
1980 	if (entry->eflags & MAP_ENTRY_NOSYNC) {
1981 		if (m->dirty == 0)
1982 			vm_page_flag_set(m, PG_NOSYNC);
1983 	} else {
1984 		vm_page_flag_clear(m, PG_NOSYNC);
1985 	}
1986 }
1987 
1988 static void
1989 vm_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry, int prot)
1990 {
1991 	struct lwp *lp;
1992 	vm_page_t m;
1993 	vm_offset_t starta;
1994 	vm_offset_t addr;
1995 	vm_pindex_t index;
1996 	vm_pindex_t pindex;
1997 	vm_object_t object;
1998 	int pprot;
1999 	int i;
2000 
2001 	/*
2002 	 * We do not currently prefault mappings that use virtual page
2003 	 * tables.  We do not prefault foreign pmaps.
2004 	 */
2005 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
2006 		return;
2007 	lp = curthread->td_lwp;
2008 	if (lp == NULL || (pmap != vmspace_pmap(lp->lwp_vmspace)))
2009 		return;
2010 
2011 	object = entry->object.vm_object;
2012 
2013 	starta = addra - PFBAK * PAGE_SIZE;
2014 	if (starta < entry->start)
2015 		starta = entry->start;
2016 	else if (starta > addra)
2017 		starta = 0;
2018 
2019 	/*
2020 	 * critical section protection is required to maintain the
2021 	 * page/object association, interrupts can free pages and remove
2022 	 * them from their objects.
2023 	 */
2024 	crit_enter();
2025 	lwkt_gettoken(&vm_token);
2026 	for (i = 0; i < PAGEORDER_SIZE; i++) {
2027 		vm_object_t lobject;
2028 		int allocated = 0;
2029 
2030 		addr = addra + vm_prefault_pageorder[i];
2031 		if (addr > addra + (PFFOR * PAGE_SIZE))
2032 			addr = 0;
2033 
2034 		if (addr < starta || addr >= entry->end)
2035 			continue;
2036 
2037 		if (pmap_prefault_ok(pmap, addr) == 0)
2038 			continue;
2039 
2040 		/*
2041 		 * Follow the VM object chain to obtain the page to be mapped
2042 		 * into the pmap.
2043 		 *
2044 		 * If we reach the terminal object without finding a page
2045 		 * and we determine it would be advantageous, then allocate
2046 		 * a zero-fill page for the base object.  The base object
2047 		 * is guaranteed to be OBJT_DEFAULT for this case.
2048 		 *
2049 		 * In order to not have to check the pager via *haspage*()
2050 		 * we stop if any non-default object is encountered.  e.g.
2051 		 * a vnode or swap object would stop the loop.
2052 		 */
2053 		index = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2054 		lobject = object;
2055 		pindex = index;
2056 		pprot = prot;
2057 
2058 		while ((m = vm_page_lookup(lobject, pindex)) == NULL) {
2059 			if (lobject->type != OBJT_DEFAULT)
2060 				break;
2061 			if (lobject->backing_object == NULL) {
2062 				if (vm_fast_fault == 0)
2063 					break;
2064 				if (vm_prefault_pageorder[i] < 0 ||
2065 				    (prot & VM_PROT_WRITE) == 0 ||
2066 				    vm_page_count_min(0)) {
2067 					break;
2068 				}
2069 				/* note: allocate from base object */
2070 				m = vm_page_alloc(object, index,
2071 					      VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
2072 
2073 				if ((m->flags & PG_ZERO) == 0) {
2074 					vm_page_zero_fill(m);
2075 				} else {
2076 					vm_page_flag_clear(m, PG_ZERO);
2077 					mycpu->gd_cnt.v_ozfod++;
2078 				}
2079 				mycpu->gd_cnt.v_zfod++;
2080 				m->valid = VM_PAGE_BITS_ALL;
2081 				allocated = 1;
2082 				pprot = prot;
2083 				/* lobject = object .. not needed */
2084 				break;
2085 			}
2086 			if (lobject->backing_object_offset & PAGE_MASK)
2087 				break;
2088 			pindex += lobject->backing_object_offset >> PAGE_SHIFT;
2089 			lobject = lobject->backing_object;
2090 			pprot &= ~VM_PROT_WRITE;
2091 		}
2092 		/*
2093 		 * NOTE: lobject now invalid (if we did a zero-fill we didn't
2094 		 *	 bother assigning lobject = object).
2095 		 *
2096 		 * Give-up if the page is not available.
2097 		 */
2098 		if (m == NULL)
2099 			break;
2100 
2101 		/*
2102 		 * Do not conditionalize on PG_RAM.  If pages are present in
2103 		 * the VM system we assume optimal caching.  If caching is
2104 		 * not optimal the I/O gravy train will be restarted when we
2105 		 * hit an unavailable page.  We do not want to try to restart
2106 		 * the gravy train now because we really don't know how much
2107 		 * of the object has been cached.  The cost for restarting
2108 		 * the gravy train should be low (since accesses will likely
2109 		 * be I/O bound anyway).
2110 		 *
2111 		 * The object must be marked dirty if we are mapping a
2112 		 * writable page.
2113 		 */
2114 		if (pprot & VM_PROT_WRITE)
2115 			vm_object_set_writeable_dirty(m->object);
2116 
2117 		/*
2118 		 * Enter the page into the pmap if appropriate.  If we had
2119 		 * allocated the page we have to place it on a queue.  If not
2120 		 * we just have to make sure it isn't on the cache queue
2121 		 * (pages on the cache queue are not allowed to be mapped).
2122 		 */
2123 		if (allocated) {
2124 			if (pprot & VM_PROT_WRITE)
2125 				vm_set_nosync(m, entry);
2126 			pmap_enter(pmap, addr, m, pprot, 0);
2127 			vm_page_deactivate(m);
2128 			vm_page_wakeup(m);
2129 		} else if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2130 		    (m->busy == 0) &&
2131 		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2132 
2133 			if ((m->queue - m->pc) == PQ_CACHE) {
2134 				vm_page_deactivate(m);
2135 			}
2136 			vm_page_busy(m);
2137 			if (pprot & VM_PROT_WRITE)
2138 				vm_set_nosync(m, entry);
2139 			pmap_enter(pmap, addr, m, pprot, 0);
2140 			vm_page_wakeup(m);
2141 		}
2142 	}
2143 	lwkt_reltoken(&vm_token);
2144 	crit_exit();
2145 }
2146