xref: /dragonfly/sys/vm/vm_map.c (revision f503b4c4)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
63  */
64 
65 /*
66  *	Virtual memory mapping module.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/proc.h>
73 #include <sys/serialize.h>
74 #include <sys/lock.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/resourcevar.h>
79 #include <sys/shm.h>
80 #include <sys/tree.h>
81 #include <sys/malloc.h>
82 #include <sys/objcache.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_zone.h>
95 
96 #include <sys/thread2.h>
97 #include <sys/random.h>
98 #include <sys/sysctl.h>
99 
100 /*
101  * Virtual memory maps provide for the mapping, protection, and sharing
102  * of virtual memory objects.  In addition, this module provides for an
103  * efficient virtual copy of memory from one map to another.
104  *
105  * Synchronization is required prior to most operations.
106  *
107  * Maps consist of an ordered doubly-linked list of simple entries.
108  * A hint and a RB tree is used to speed-up lookups.
109  *
110  * Callers looking to modify maps specify start/end addresses which cause
111  * the related map entry to be clipped if necessary, and then later
112  * recombined if the pieces remained compatible.
113  *
114  * Virtual copy operations are performed by copying VM object references
115  * from one map to another, and then marking both regions as copy-on-write.
116  */
117 static __boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags);
118 static void vmspace_dtor(void *obj, void *privdata);
119 static void vmspace_terminate(struct vmspace *vm, int final);
120 
121 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
122 static struct objcache *vmspace_cache;
123 
124 /*
125  * per-cpu page table cross mappings are initialized in early boot
126  * and might require a considerable number of vm_map_entry structures.
127  */
128 #define MAPENTRYBSP_CACHE	(MAXCPU+1)
129 #define MAPENTRYAP_CACHE	8
130 
131 static struct vm_zone mapentzone_store, mapzone_store;
132 static vm_zone_t mapentzone, mapzone;
133 static struct vm_object mapentobj, mapobj;
134 
135 static struct vm_map_entry map_entry_init[MAX_MAPENT];
136 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE];
137 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE];
138 static struct vm_map map_init[MAX_KMAP];
139 
140 static int randomize_mmap;
141 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
142     "Randomize mmap offsets");
143 static int vm_map_relock_enable = 1;
144 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW,
145 	   &vm_map_relock_enable, 0, "Randomize mmap offsets");
146 
147 static void vm_map_entry_shadow(vm_map_entry_t entry, int addref);
148 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
149 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
150 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
151 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
152 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
153 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
154 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
155 		vm_map_entry_t);
156 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
157 
158 /*
159  * Initialize the vm_map module.  Must be called before any other vm_map
160  * routines.
161  *
162  * Map and entry structures are allocated from the general purpose
163  * memory pool with some exceptions:
164  *
165  *	- The kernel map is allocated statically.
166  *	- Initial kernel map entries are allocated out of a static pool.
167  *	- We must set ZONE_SPECIAL here or the early boot code can get
168  *	  stuck if there are >63 cores.
169  *
170  *	These restrictions are necessary since malloc() uses the
171  *	maps and requires map entries.
172  *
173  * Called from the low level boot code only.
174  */
175 void
176 vm_map_startup(void)
177 {
178 	mapzone = &mapzone_store;
179 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
180 		map_init, MAX_KMAP);
181 	mapentzone = &mapentzone_store;
182 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
183 		  map_entry_init, MAX_MAPENT);
184 	mapentzone_store.zflags |= ZONE_SPECIAL;
185 }
186 
187 /*
188  * Called prior to any vmspace allocations.
189  *
190  * Called from the low level boot code only.
191  */
192 void
193 vm_init2(void)
194 {
195 	vmspace_cache = objcache_create_mbacked(M_VMSPACE,
196 						sizeof(struct vmspace),
197 						0, ncpus * 4,
198 						vmspace_ctor, vmspace_dtor,
199 						NULL);
200 	zinitna(mapentzone, &mapentobj, NULL, 0, 0,
201 		ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
202 	zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
203 	pmap_init2();
204 	vm_object_init2();
205 }
206 
207 /*
208  * objcache support.  We leave the pmap root cached as long as possible
209  * for performance reasons.
210  */
211 static
212 __boolean_t
213 vmspace_ctor(void *obj, void *privdata, int ocflags)
214 {
215 	struct vmspace *vm = obj;
216 
217 	bzero(vm, sizeof(*vm));
218 	vm->vm_refcnt = (u_int)-1;
219 
220 	return 1;
221 }
222 
223 static
224 void
225 vmspace_dtor(void *obj, void *privdata)
226 {
227 	struct vmspace *vm = obj;
228 
229 	KKASSERT(vm->vm_refcnt == (u_int)-1);
230 	pmap_puninit(vmspace_pmap(vm));
231 }
232 
233 /*
234  * Red black tree functions
235  *
236  * The caller must hold the related map lock.
237  */
238 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
239 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
240 
241 /* a->start is address, and the only field has to be initialized */
242 static int
243 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
244 {
245 	if (a->start < b->start)
246 		return(-1);
247 	else if (a->start > b->start)
248 		return(1);
249 	return(0);
250 }
251 
252 /*
253  * Initialize vmspace ref/hold counts vmspace0.  There is a holdcnt for
254  * every refcnt.
255  */
256 void
257 vmspace_initrefs(struct vmspace *vm)
258 {
259 	vm->vm_refcnt = 1;
260 	vm->vm_holdcnt = 1;
261 }
262 
263 /*
264  * Allocate a vmspace structure, including a vm_map and pmap.
265  * Initialize numerous fields.  While the initial allocation is zerod,
266  * subsequence reuse from the objcache leaves elements of the structure
267  * intact (particularly the pmap), so portions must be zerod.
268  *
269  * Returns a referenced vmspace.
270  *
271  * No requirements.
272  */
273 struct vmspace *
274 vmspace_alloc(vm_offset_t min, vm_offset_t max)
275 {
276 	struct vmspace *vm;
277 
278 	vm = objcache_get(vmspace_cache, M_WAITOK);
279 
280 	bzero(&vm->vm_startcopy,
281 	      (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
282 	vm_map_init(&vm->vm_map, min, max, NULL);	/* initializes token */
283 
284 	/*
285 	 * NOTE: hold to acquires token for safety.
286 	 *
287 	 * On return vmspace is referenced (refs=1, hold=1).  That is,
288 	 * each refcnt also has a holdcnt.  There can be additional holds
289 	 * (holdcnt) above and beyond the refcnt.  Finalization is handled in
290 	 * two stages, one on refs 1->0, and the the second on hold 1->0.
291 	 */
292 	KKASSERT(vm->vm_holdcnt == 0);
293 	KKASSERT(vm->vm_refcnt == (u_int)-1);
294 	vmspace_initrefs(vm);
295 	vmspace_hold(vm);
296 	pmap_pinit(vmspace_pmap(vm));		/* (some fields reused) */
297 	vm->vm_map.pmap = vmspace_pmap(vm);	/* XXX */
298 	vm->vm_shm = NULL;
299 	vm->vm_flags = 0;
300 	cpu_vmspace_alloc(vm);
301 	vmspace_drop(vm);
302 
303 	return (vm);
304 }
305 
306 /*
307  * NOTE: Can return -1 if the vmspace is exiting.
308  */
309 int
310 vmspace_getrefs(struct vmspace *vm)
311 {
312 	return ((int)vm->vm_refcnt);
313 }
314 
315 /*
316  * A vmspace object must already have a non-zero hold to be able to gain
317  * further holds on it.
318  */
319 static void
320 vmspace_hold_notoken(struct vmspace *vm)
321 {
322 	KKASSERT(vm->vm_holdcnt != 0);
323 	refcount_acquire(&vm->vm_holdcnt);
324 }
325 
326 static void
327 vmspace_drop_notoken(struct vmspace *vm)
328 {
329 	if (refcount_release(&vm->vm_holdcnt)) {
330 		if (vm->vm_refcnt == (u_int)-1) {
331 			vmspace_terminate(vm, 1);
332 		}
333 	}
334 }
335 
336 void
337 vmspace_hold(struct vmspace *vm)
338 {
339 	vmspace_hold_notoken(vm);
340 	lwkt_gettoken(&vm->vm_map.token);
341 }
342 
343 void
344 vmspace_drop(struct vmspace *vm)
345 {
346 	lwkt_reltoken(&vm->vm_map.token);
347 	vmspace_drop_notoken(vm);
348 }
349 
350 /*
351  * A vmspace object must not be in a terminated state to be able to obtain
352  * additional refs on it.
353  *
354  * Ref'ing a vmspace object also increments its hold count.
355  */
356 void
357 vmspace_ref(struct vmspace *vm)
358 {
359 	KKASSERT((int)vm->vm_refcnt >= 0);
360 	vmspace_hold_notoken(vm);
361 	refcount_acquire(&vm->vm_refcnt);
362 }
363 
364 /*
365  * Release a ref on the vmspace.  On the 1->0 transition we do stage-1
366  * termination of the vmspace.  Then, on the final drop of the hold we
367  * will do stage-2 final termination.
368  */
369 void
370 vmspace_rel(struct vmspace *vm)
371 {
372 	if (refcount_release(&vm->vm_refcnt)) {
373 		vm->vm_refcnt = (u_int)-1;	/* no other refs possible */
374 		vmspace_terminate(vm, 0);
375 	}
376 	vmspace_drop_notoken(vm);
377 }
378 
379 /*
380  * This is called during exit indicating that the vmspace is no
381  * longer in used by an exiting process, but the process has not yet
382  * been reaped.
383  *
384  * We release the refcnt but not the associated holdcnt.
385  *
386  * No requirements.
387  */
388 void
389 vmspace_relexit(struct vmspace *vm)
390 {
391 	if (refcount_release(&vm->vm_refcnt)) {
392 		vm->vm_refcnt = (u_int)-1;	/* no other refs possible */
393 		vmspace_terminate(vm, 0);
394 	}
395 }
396 
397 /*
398  * Called during reap to disconnect the remainder of the vmspace from
399  * the process.  On the hold drop the vmspace termination is finalized.
400  *
401  * No requirements.
402  */
403 void
404 vmspace_exitfree(struct proc *p)
405 {
406 	struct vmspace *vm;
407 
408 	vm = p->p_vmspace;
409 	p->p_vmspace = NULL;
410 	vmspace_drop_notoken(vm);
411 }
412 
413 /*
414  * Called in two cases:
415  *
416  * (1) When the last refcnt is dropped and the vmspace becomes inactive,
417  *     called with final == 0.  refcnt will be (u_int)-1 at this point,
418  *     and holdcnt will still be non-zero.
419  *
420  * (2) When holdcnt becomes 0, called with final == 1.  There should no
421  *     longer be anyone with access to the vmspace.
422  *
423  * VMSPACE_EXIT1 flags the primary deactivation
424  * VMSPACE_EXIT2 flags the last reap
425  */
426 static void
427 vmspace_terminate(struct vmspace *vm, int final)
428 {
429 	int count;
430 
431 	lwkt_gettoken(&vm->vm_map.token);
432 	if (final == 0) {
433 		KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0);
434 
435 		/*
436 		 * Get rid of most of the resources.  Leave the kernel pmap
437 		 * intact.
438 		 */
439 		vm->vm_flags |= VMSPACE_EXIT1;
440 		shmexit(vm);
441 		pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
442 				  VM_MAX_USER_ADDRESS);
443 		vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
444 			      VM_MAX_USER_ADDRESS);
445 		lwkt_reltoken(&vm->vm_map.token);
446 	} else {
447 		KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0);
448 		KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0);
449 
450 		/*
451 		 * Get rid of remaining basic resources.
452 		 */
453 		vm->vm_flags |= VMSPACE_EXIT2;
454 		cpu_vmspace_free(vm);
455 		shmexit(vm);
456 
457 		/*
458 		 * Lock the map, to wait out all other references to it.
459 		 * Delete all of the mappings and pages they hold, then call
460 		 * the pmap module to reclaim anything left.
461 		 */
462 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
463 		vm_map_lock(&vm->vm_map);
464 		vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
465 			      vm->vm_map.max_offset, &count);
466 		vm_map_unlock(&vm->vm_map);
467 		vm_map_entry_release(count);
468 
469 		lwkt_gettoken(&vmspace_pmap(vm)->pm_token);
470 		pmap_release(vmspace_pmap(vm));
471 		lwkt_reltoken(&vmspace_pmap(vm)->pm_token);
472 		lwkt_reltoken(&vm->vm_map.token);
473 		objcache_put(vmspace_cache, vm);
474 	}
475 }
476 
477 /*
478  * Swap useage is determined by taking the proportional swap used by
479  * VM objects backing the VM map.  To make up for fractional losses,
480  * if the VM object has any swap use at all the associated map entries
481  * count for at least 1 swap page.
482  *
483  * No requirements.
484  */
485 int
486 vmspace_swap_count(struct vmspace *vm)
487 {
488 	vm_map_t map = &vm->vm_map;
489 	vm_map_entry_t cur;
490 	vm_object_t object;
491 	int count = 0;
492 	int n;
493 
494 	vmspace_hold(vm);
495 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
496 		switch(cur->maptype) {
497 		case VM_MAPTYPE_NORMAL:
498 		case VM_MAPTYPE_VPAGETABLE:
499 			if ((object = cur->object.vm_object) == NULL)
500 				break;
501 			if (object->swblock_count) {
502 				n = (cur->end - cur->start) / PAGE_SIZE;
503 				count += object->swblock_count *
504 				    SWAP_META_PAGES * n / object->size + 1;
505 			}
506 			break;
507 		default:
508 			break;
509 		}
510 	}
511 	vmspace_drop(vm);
512 
513 	return(count);
514 }
515 
516 /*
517  * Calculate the approximate number of anonymous pages in use by
518  * this vmspace.  To make up for fractional losses, we count each
519  * VM object as having at least 1 anonymous page.
520  *
521  * No requirements.
522  */
523 int
524 vmspace_anonymous_count(struct vmspace *vm)
525 {
526 	vm_map_t map = &vm->vm_map;
527 	vm_map_entry_t cur;
528 	vm_object_t object;
529 	int count = 0;
530 
531 	vmspace_hold(vm);
532 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
533 		switch(cur->maptype) {
534 		case VM_MAPTYPE_NORMAL:
535 		case VM_MAPTYPE_VPAGETABLE:
536 			if ((object = cur->object.vm_object) == NULL)
537 				break;
538 			if (object->type != OBJT_DEFAULT &&
539 			    object->type != OBJT_SWAP) {
540 				break;
541 			}
542 			count += object->resident_page_count;
543 			break;
544 		default:
545 			break;
546 		}
547 	}
548 	vmspace_drop(vm);
549 
550 	return(count);
551 }
552 
553 /*
554  * Creates and returns a new empty VM map with the given physical map
555  * structure, and having the given lower and upper address bounds.
556  *
557  * No requirements.
558  */
559 vm_map_t
560 vm_map_create(vm_map_t result, pmap_t pmap, vm_offset_t min, vm_offset_t max)
561 {
562 	if (result == NULL)
563 		result = zalloc(mapzone);
564 	vm_map_init(result, min, max, pmap);
565 	return (result);
566 }
567 
568 /*
569  * Initialize an existing vm_map structure such as that in the vmspace
570  * structure.  The pmap is initialized elsewhere.
571  *
572  * No requirements.
573  */
574 void
575 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap)
576 {
577 	map->header.next = map->header.prev = &map->header;
578 	RB_INIT(&map->rb_root);
579 	map->nentries = 0;
580 	map->size = 0;
581 	map->system_map = 0;
582 	map->min_offset = min;
583 	map->max_offset = max;
584 	map->pmap = pmap;
585 	map->first_free = &map->header;
586 	map->hint = &map->header;
587 	map->timestamp = 0;
588 	map->flags = 0;
589 	lwkt_token_init(&map->token, "vm_map");
590 	lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0);
591 }
592 
593 /*
594  * Shadow the vm_map_entry's object.  This typically needs to be done when
595  * a write fault is taken on an entry which had previously been cloned by
596  * fork().  The shared object (which might be NULL) must become private so
597  * we add a shadow layer above it.
598  *
599  * Object allocation for anonymous mappings is defered as long as possible.
600  * When creating a shadow, however, the underlying object must be instantiated
601  * so it can be shared.
602  *
603  * If the map segment is governed by a virtual page table then it is
604  * possible to address offsets beyond the mapped area.  Just allocate
605  * a maximally sized object for this case.
606  *
607  * The vm_map must be exclusively locked.
608  * No other requirements.
609  */
610 static
611 void
612 vm_map_entry_shadow(vm_map_entry_t entry, int addref)
613 {
614 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
615 		vm_object_shadow(&entry->object.vm_object, &entry->offset,
616 				 0x7FFFFFFF, addref);	/* XXX */
617 	} else {
618 		vm_object_shadow(&entry->object.vm_object, &entry->offset,
619 				 atop(entry->end - entry->start), addref);
620 	}
621 	entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
622 }
623 
624 /*
625  * Allocate an object for a vm_map_entry.
626  *
627  * Object allocation for anonymous mappings is defered as long as possible.
628  * This function is called when we can defer no longer, generally when a map
629  * entry might be split or forked or takes a page fault.
630  *
631  * If the map segment is governed by a virtual page table then it is
632  * possible to address offsets beyond the mapped area.  Just allocate
633  * a maximally sized object for this case.
634  *
635  * The vm_map must be exclusively locked.
636  * No other requirements.
637  */
638 void
639 vm_map_entry_allocate_object(vm_map_entry_t entry)
640 {
641 	vm_object_t obj;
642 
643 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
644 		obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
645 	} else {
646 		obj = vm_object_allocate(OBJT_DEFAULT,
647 					 atop(entry->end - entry->start));
648 	}
649 	entry->object.vm_object = obj;
650 	entry->offset = 0;
651 }
652 
653 /*
654  * Set an initial negative count so the first attempt to reserve
655  * space preloads a bunch of vm_map_entry's for this cpu.  Also
656  * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
657  * map a new page for vm_map_entry structures.  SMP systems are
658  * particularly sensitive.
659  *
660  * This routine is called in early boot so we cannot just call
661  * vm_map_entry_reserve().
662  *
663  * Called from the low level boot code only (for each cpu)
664  *
665  * WARNING! Take care not to have too-big a static/BSS structure here
666  *	    as MAXCPU can be 256+, otherwise the loader's 64MB heap
667  *	    can get blown out by the kernel plus the initrd image.
668  */
669 void
670 vm_map_entry_reserve_cpu_init(globaldata_t gd)
671 {
672 	vm_map_entry_t entry;
673 	int count;
674 	int i;
675 
676 	gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
677 	if (gd->gd_cpuid == 0) {
678 		entry = &cpu_map_entry_init_bsp[0];
679 		count = MAPENTRYBSP_CACHE;
680 	} else {
681 		entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0];
682 		count = MAPENTRYAP_CACHE;
683 	}
684 	for (i = 0; i < count; ++i, ++entry) {
685 		entry->next = gd->gd_vme_base;
686 		gd->gd_vme_base = entry;
687 	}
688 }
689 
690 /*
691  * Reserves vm_map_entry structures so code later on can manipulate
692  * map_entry structures within a locked map without blocking trying
693  * to allocate a new vm_map_entry.
694  *
695  * No requirements.
696  */
697 int
698 vm_map_entry_reserve(int count)
699 {
700 	struct globaldata *gd = mycpu;
701 	vm_map_entry_t entry;
702 
703 	/*
704 	 * Make sure we have enough structures in gd_vme_base to handle
705 	 * the reservation request.
706 	 *
707 	 * The critical section protects access to the per-cpu gd.
708 	 */
709 	crit_enter();
710 	while (gd->gd_vme_avail < count) {
711 		entry = zalloc(mapentzone);
712 		entry->next = gd->gd_vme_base;
713 		gd->gd_vme_base = entry;
714 		++gd->gd_vme_avail;
715 	}
716 	gd->gd_vme_avail -= count;
717 	crit_exit();
718 
719 	return(count);
720 }
721 
722 /*
723  * Releases previously reserved vm_map_entry structures that were not
724  * used.  If we have too much junk in our per-cpu cache clean some of
725  * it out.
726  *
727  * No requirements.
728  */
729 void
730 vm_map_entry_release(int count)
731 {
732 	struct globaldata *gd = mycpu;
733 	vm_map_entry_t entry;
734 
735 	crit_enter();
736 	gd->gd_vme_avail += count;
737 	while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
738 		entry = gd->gd_vme_base;
739 		KKASSERT(entry != NULL);
740 		gd->gd_vme_base = entry->next;
741 		--gd->gd_vme_avail;
742 		crit_exit();
743 		zfree(mapentzone, entry);
744 		crit_enter();
745 	}
746 	crit_exit();
747 }
748 
749 /*
750  * Reserve map entry structures for use in kernel_map itself.  These
751  * entries have *ALREADY* been reserved on a per-cpu basis when the map
752  * was inited.  This function is used by zalloc() to avoid a recursion
753  * when zalloc() itself needs to allocate additional kernel memory.
754  *
755  * This function works like the normal reserve but does not load the
756  * vm_map_entry cache (because that would result in an infinite
757  * recursion).  Note that gd_vme_avail may go negative.  This is expected.
758  *
759  * Any caller of this function must be sure to renormalize after
760  * potentially eating entries to ensure that the reserve supply
761  * remains intact.
762  *
763  * No requirements.
764  */
765 int
766 vm_map_entry_kreserve(int count)
767 {
768 	struct globaldata *gd = mycpu;
769 
770 	crit_enter();
771 	gd->gd_vme_avail -= count;
772 	crit_exit();
773 	KASSERT(gd->gd_vme_base != NULL,
774 		("no reserved entries left, gd_vme_avail = %d",
775 		gd->gd_vme_avail));
776 	return(count);
777 }
778 
779 /*
780  * Release previously reserved map entries for kernel_map.  We do not
781  * attempt to clean up like the normal release function as this would
782  * cause an unnecessary (but probably not fatal) deep procedure call.
783  *
784  * No requirements.
785  */
786 void
787 vm_map_entry_krelease(int count)
788 {
789 	struct globaldata *gd = mycpu;
790 
791 	crit_enter();
792 	gd->gd_vme_avail += count;
793 	crit_exit();
794 }
795 
796 /*
797  * Allocates a VM map entry for insertion.  No entry fields are filled in.
798  *
799  * The entries should have previously been reserved.  The reservation count
800  * is tracked in (*countp).
801  *
802  * No requirements.
803  */
804 static vm_map_entry_t
805 vm_map_entry_create(vm_map_t map, int *countp)
806 {
807 	struct globaldata *gd = mycpu;
808 	vm_map_entry_t entry;
809 
810 	KKASSERT(*countp > 0);
811 	--*countp;
812 	crit_enter();
813 	entry = gd->gd_vme_base;
814 	KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
815 	gd->gd_vme_base = entry->next;
816 	crit_exit();
817 
818 	return(entry);
819 }
820 
821 /*
822  * Dispose of a vm_map_entry that is no longer being referenced.
823  *
824  * No requirements.
825  */
826 static void
827 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
828 {
829 	struct globaldata *gd = mycpu;
830 
831 	KKASSERT(map->hint != entry);
832 	KKASSERT(map->first_free != entry);
833 
834 	++*countp;
835 	crit_enter();
836 	entry->next = gd->gd_vme_base;
837 	gd->gd_vme_base = entry;
838 	crit_exit();
839 }
840 
841 
842 /*
843  * Insert/remove entries from maps.
844  *
845  * The related map must be exclusively locked.
846  * The caller must hold map->token
847  * No other requirements.
848  */
849 static __inline void
850 vm_map_entry_link(vm_map_t map,
851 		  vm_map_entry_t after_where,
852 		  vm_map_entry_t entry)
853 {
854 	ASSERT_VM_MAP_LOCKED(map);
855 
856 	map->nentries++;
857 	entry->prev = after_where;
858 	entry->next = after_where->next;
859 	entry->next->prev = entry;
860 	after_where->next = entry;
861 	if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
862 		panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
863 }
864 
865 static __inline void
866 vm_map_entry_unlink(vm_map_t map,
867 		    vm_map_entry_t entry)
868 {
869 	vm_map_entry_t prev;
870 	vm_map_entry_t next;
871 
872 	ASSERT_VM_MAP_LOCKED(map);
873 
874 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
875 		panic("vm_map_entry_unlink: attempt to mess with "
876 		      "locked entry! %p", entry);
877 	}
878 	prev = entry->prev;
879 	next = entry->next;
880 	next->prev = prev;
881 	prev->next = next;
882 	vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
883 	map->nentries--;
884 }
885 
886 /*
887  * Finds the map entry containing (or immediately preceding) the specified
888  * address in the given map.  The entry is returned in (*entry).
889  *
890  * The boolean result indicates whether the address is actually contained
891  * in the map.
892  *
893  * The related map must be locked.
894  * No other requirements.
895  */
896 boolean_t
897 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
898 {
899 	vm_map_entry_t tmp;
900 	vm_map_entry_t last;
901 
902 	ASSERT_VM_MAP_LOCKED(map);
903 #if 0
904 	/*
905 	 * XXX TEMPORARILY DISABLED.  For some reason our attempt to revive
906 	 * the hint code with the red-black lookup meets with system crashes
907 	 * and lockups.  We do not yet know why.
908 	 *
909 	 * It is possible that the problem is related to the setting
910 	 * of the hint during map_entry deletion, in the code specified
911 	 * at the GGG comment later on in this file.
912 	 *
913 	 * YYY More likely it's because this function can be called with
914 	 * a shared lock on the map, resulting in map->hint updates possibly
915 	 * racing.  Fixed now but untested.
916 	 */
917 	/*
918 	 * Quickly check the cached hint, there's a good chance of a match.
919 	 */
920 	tmp = map->hint;
921 	cpu_ccfence();
922 	if (tmp != &map->header) {
923 		if (address >= tmp->start && address < tmp->end) {
924 			*entry = tmp;
925 			return(TRUE);
926 		}
927 	}
928 #endif
929 
930 	/*
931 	 * Locate the record from the top of the tree.  'last' tracks the
932 	 * closest prior record and is returned if no match is found, which
933 	 * in binary tree terms means tracking the most recent right-branch
934 	 * taken.  If there is no prior record, &map->header is returned.
935 	 */
936 	last = &map->header;
937 	tmp = RB_ROOT(&map->rb_root);
938 
939 	while (tmp) {
940 		if (address >= tmp->start) {
941 			if (address < tmp->end) {
942 				*entry = tmp;
943 				map->hint = tmp;
944 				return(TRUE);
945 			}
946 			last = tmp;
947 			tmp = RB_RIGHT(tmp, rb_entry);
948 		} else {
949 			tmp = RB_LEFT(tmp, rb_entry);
950 		}
951 	}
952 	*entry = last;
953 	return (FALSE);
954 }
955 
956 /*
957  * Inserts the given whole VM object into the target map at the specified
958  * address range.  The object's size should match that of the address range.
959  *
960  * The map must be exclusively locked.
961  * The object must be held.
962  * The caller must have reserved sufficient vm_map_entry structures.
963  *
964  * If object is non-NULL, ref count must be bumped by caller prior to
965  * making call to account for the new entry.
966  */
967 int
968 vm_map_insert(vm_map_t map, int *countp,
969 	      vm_object_t object, vm_ooffset_t offset,
970 	      vm_offset_t start, vm_offset_t end,
971 	      vm_maptype_t maptype,
972 	      vm_prot_t prot, vm_prot_t max,
973 	      int cow)
974 {
975 	vm_map_entry_t new_entry;
976 	vm_map_entry_t prev_entry;
977 	vm_map_entry_t temp_entry;
978 	vm_eflags_t protoeflags;
979 	int must_drop = 0;
980 
981 	ASSERT_VM_MAP_LOCKED(map);
982 	if (object)
983 		ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
984 
985 	/*
986 	 * Check that the start and end points are not bogus.
987 	 */
988 	if ((start < map->min_offset) || (end > map->max_offset) ||
989 	    (start >= end))
990 		return (KERN_INVALID_ADDRESS);
991 
992 	/*
993 	 * Find the entry prior to the proposed starting address; if it's part
994 	 * of an existing entry, this range is bogus.
995 	 */
996 	if (vm_map_lookup_entry(map, start, &temp_entry))
997 		return (KERN_NO_SPACE);
998 
999 	prev_entry = temp_entry;
1000 
1001 	/*
1002 	 * Assert that the next entry doesn't overlap the end point.
1003 	 */
1004 
1005 	if ((prev_entry->next != &map->header) &&
1006 	    (prev_entry->next->start < end))
1007 		return (KERN_NO_SPACE);
1008 
1009 	protoeflags = 0;
1010 
1011 	if (cow & MAP_COPY_ON_WRITE)
1012 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1013 
1014 	if (cow & MAP_NOFAULT) {
1015 		protoeflags |= MAP_ENTRY_NOFAULT;
1016 
1017 		KASSERT(object == NULL,
1018 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1019 	}
1020 	if (cow & MAP_DISABLE_SYNCER)
1021 		protoeflags |= MAP_ENTRY_NOSYNC;
1022 	if (cow & MAP_DISABLE_COREDUMP)
1023 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1024 	if (cow & MAP_IS_STACK)
1025 		protoeflags |= MAP_ENTRY_STACK;
1026 	if (cow & MAP_IS_KSTACK)
1027 		protoeflags |= MAP_ENTRY_KSTACK;
1028 
1029 	lwkt_gettoken(&map->token);
1030 
1031 	if (object) {
1032 		/*
1033 		 * When object is non-NULL, it could be shared with another
1034 		 * process.  We have to set or clear OBJ_ONEMAPPING
1035 		 * appropriately.
1036 		 *
1037 		 * NOTE: This flag is only applicable to DEFAULT and SWAP
1038 		 *	 objects and will already be clear in other types
1039 		 *	 of objects, so a shared object lock is ok for
1040 		 *	 VNODE objects.
1041 		 */
1042 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
1043 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1044 		}
1045 	}
1046 	else if ((prev_entry != &map->header) &&
1047 		 (prev_entry->eflags == protoeflags) &&
1048 		 (prev_entry->end == start) &&
1049 		 (prev_entry->wired_count == 0) &&
1050 		 prev_entry->maptype == maptype &&
1051 		 ((prev_entry->object.vm_object == NULL) ||
1052 		  vm_object_coalesce(prev_entry->object.vm_object,
1053 				     OFF_TO_IDX(prev_entry->offset),
1054 				     (vm_size_t)(prev_entry->end - prev_entry->start),
1055 				     (vm_size_t)(end - prev_entry->end)))) {
1056 		/*
1057 		 * We were able to extend the object.  Determine if we
1058 		 * can extend the previous map entry to include the
1059 		 * new range as well.
1060 		 */
1061 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1062 		    (prev_entry->protection == prot) &&
1063 		    (prev_entry->max_protection == max)) {
1064 			map->size += (end - prev_entry->end);
1065 			prev_entry->end = end;
1066 			vm_map_simplify_entry(map, prev_entry, countp);
1067 			lwkt_reltoken(&map->token);
1068 			return (KERN_SUCCESS);
1069 		}
1070 
1071 		/*
1072 		 * If we can extend the object but cannot extend the
1073 		 * map entry, we have to create a new map entry.  We
1074 		 * must bump the ref count on the extended object to
1075 		 * account for it.  object may be NULL.
1076 		 */
1077 		object = prev_entry->object.vm_object;
1078 		offset = prev_entry->offset +
1079 			(prev_entry->end - prev_entry->start);
1080 		if (object) {
1081 			vm_object_hold(object);
1082 			vm_object_chain_wait(object, 0);
1083 			vm_object_reference_locked(object);
1084 			must_drop = 1;
1085 		}
1086 	}
1087 
1088 	/*
1089 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1090 	 * in things like the buffer map where we manage kva but do not manage
1091 	 * backing objects.
1092 	 */
1093 
1094 	/*
1095 	 * Create a new entry
1096 	 */
1097 
1098 	new_entry = vm_map_entry_create(map, countp);
1099 	new_entry->start = start;
1100 	new_entry->end = end;
1101 
1102 	new_entry->maptype = maptype;
1103 	new_entry->eflags = protoeflags;
1104 	new_entry->object.vm_object = object;
1105 	new_entry->offset = offset;
1106 	new_entry->aux.master_pde = 0;
1107 
1108 	new_entry->inheritance = VM_INHERIT_DEFAULT;
1109 	new_entry->protection = prot;
1110 	new_entry->max_protection = max;
1111 	new_entry->wired_count = 0;
1112 
1113 	/*
1114 	 * Insert the new entry into the list
1115 	 */
1116 
1117 	vm_map_entry_link(map, prev_entry, new_entry);
1118 	map->size += new_entry->end - new_entry->start;
1119 
1120 	/*
1121 	 * Update the free space hint.  Entries cannot overlap.
1122 	 * An exact comparison is needed to avoid matching
1123 	 * against the map->header.
1124 	 */
1125 	if ((map->first_free == prev_entry) &&
1126 	    (prev_entry->end == new_entry->start)) {
1127 		map->first_free = new_entry;
1128 	}
1129 
1130 #if 0
1131 	/*
1132 	 * Temporarily removed to avoid MAP_STACK panic, due to
1133 	 * MAP_STACK being a huge hack.  Will be added back in
1134 	 * when MAP_STACK (and the user stack mapping) is fixed.
1135 	 */
1136 	/*
1137 	 * It may be possible to simplify the entry
1138 	 */
1139 	vm_map_simplify_entry(map, new_entry, countp);
1140 #endif
1141 
1142 	/*
1143 	 * Try to pre-populate the page table.  Mappings governed by virtual
1144 	 * page tables cannot be prepopulated without a lot of work, so
1145 	 * don't try.
1146 	 */
1147 	if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1148 	    maptype != VM_MAPTYPE_VPAGETABLE) {
1149 		int dorelock = 0;
1150 		if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) {
1151 			dorelock = 1;
1152 			vm_object_lock_swap();
1153 			vm_object_drop(object);
1154 		}
1155 		pmap_object_init_pt(map->pmap, start, prot,
1156 				    object, OFF_TO_IDX(offset), end - start,
1157 				    cow & MAP_PREFAULT_PARTIAL);
1158 		if (dorelock) {
1159 			vm_object_hold(object);
1160 			vm_object_lock_swap();
1161 		}
1162 	}
1163 	if (must_drop)
1164 		vm_object_drop(object);
1165 
1166 	lwkt_reltoken(&map->token);
1167 	return (KERN_SUCCESS);
1168 }
1169 
1170 /*
1171  * Find sufficient space for `length' bytes in the given map, starting at
1172  * `start'.  Returns 0 on success, 1 on no space.
1173  *
1174  * This function will returned an arbitrarily aligned pointer.  If no
1175  * particular alignment is required you should pass align as 1.  Note that
1176  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1177  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1178  * argument.
1179  *
1180  * 'align' should be a power of 2 but is not required to be.
1181  *
1182  * The map must be exclusively locked.
1183  * No other requirements.
1184  */
1185 int
1186 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1187 		 vm_size_t align, int flags, vm_offset_t *addr)
1188 {
1189 	vm_map_entry_t entry, next;
1190 	vm_offset_t end;
1191 	vm_offset_t align_mask;
1192 
1193 	if (start < map->min_offset)
1194 		start = map->min_offset;
1195 	if (start > map->max_offset)
1196 		return (1);
1197 
1198 	/*
1199 	 * If the alignment is not a power of 2 we will have to use
1200 	 * a mod/division, set align_mask to a special value.
1201 	 */
1202 	if ((align | (align - 1)) + 1 != (align << 1))
1203 		align_mask = (vm_offset_t)-1;
1204 	else
1205 		align_mask = align - 1;
1206 
1207 	/*
1208 	 * Look for the first possible address; if there's already something
1209 	 * at this address, we have to start after it.
1210 	 */
1211 	if (start == map->min_offset) {
1212 		if ((entry = map->first_free) != &map->header)
1213 			start = entry->end;
1214 	} else {
1215 		vm_map_entry_t tmp;
1216 
1217 		if (vm_map_lookup_entry(map, start, &tmp))
1218 			start = tmp->end;
1219 		entry = tmp;
1220 	}
1221 
1222 	/*
1223 	 * Look through the rest of the map, trying to fit a new region in the
1224 	 * gap between existing regions, or after the very last region.
1225 	 */
1226 	for (;; start = (entry = next)->end) {
1227 		/*
1228 		 * Adjust the proposed start by the requested alignment,
1229 		 * be sure that we didn't wrap the address.
1230 		 */
1231 		if (align_mask == (vm_offset_t)-1)
1232 			end = ((start + align - 1) / align) * align;
1233 		else
1234 			end = (start + align_mask) & ~align_mask;
1235 		if (end < start)
1236 			return (1);
1237 		start = end;
1238 		/*
1239 		 * Find the end of the proposed new region.  Be sure we didn't
1240 		 * go beyond the end of the map, or wrap around the address.
1241 		 * Then check to see if this is the last entry or if the
1242 		 * proposed end fits in the gap between this and the next
1243 		 * entry.
1244 		 */
1245 		end = start + length;
1246 		if (end > map->max_offset || end < start)
1247 			return (1);
1248 		next = entry->next;
1249 
1250 		/*
1251 		 * If the next entry's start address is beyond the desired
1252 		 * end address we may have found a good entry.
1253 		 *
1254 		 * If the next entry is a stack mapping we do not map into
1255 		 * the stack's reserved space.
1256 		 *
1257 		 * XXX continue to allow mapping into the stack's reserved
1258 		 * space if doing a MAP_STACK mapping inside a MAP_STACK
1259 		 * mapping, for backwards compatibility.  But the caller
1260 		 * really should use MAP_STACK | MAP_TRYFIXED if they
1261 		 * want to do that.
1262 		 */
1263 		if (next == &map->header)
1264 			break;
1265 		if (next->start >= end) {
1266 			if ((next->eflags & MAP_ENTRY_STACK) == 0)
1267 				break;
1268 			if (flags & MAP_STACK)
1269 				break;
1270 			if (next->start - next->aux.avail_ssize >= end)
1271 				break;
1272 		}
1273 	}
1274 	map->hint = entry;
1275 
1276 	/*
1277 	 * Grow the kernel_map if necessary.  pmap_growkernel() will panic
1278 	 * if it fails.  The kernel_map is locked and nothing can steal
1279 	 * our address space if pmap_growkernel() blocks.
1280 	 *
1281 	 * NOTE: This may be unconditionally called for kldload areas on
1282 	 *	 x86_64 because these do not bump kernel_vm_end (which would
1283 	 *	 fill 128G worth of page tables!).  Therefore we must not
1284 	 *	 retry.
1285 	 */
1286 	if (map == &kernel_map) {
1287 		vm_offset_t kstop;
1288 
1289 		kstop = round_page(start + length);
1290 		if (kstop > kernel_vm_end)
1291 			pmap_growkernel(start, kstop);
1292 	}
1293 	*addr = start;
1294 	return (0);
1295 }
1296 
1297 /*
1298  * vm_map_find finds an unallocated region in the target address map with
1299  * the given length and allocates it.  The search is defined to be first-fit
1300  * from the specified address; the region found is returned in the same
1301  * parameter.
1302  *
1303  * If object is non-NULL, ref count must be bumped by caller
1304  * prior to making call to account for the new entry.
1305  *
1306  * No requirements.  This function will lock the map temporarily.
1307  */
1308 int
1309 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1310 	    vm_offset_t *addr,	vm_size_t length, vm_size_t align,
1311 	    boolean_t fitit,
1312 	    vm_maptype_t maptype,
1313 	    vm_prot_t prot, vm_prot_t max,
1314 	    int cow)
1315 {
1316 	vm_offset_t start;
1317 	int result;
1318 	int count;
1319 
1320 	start = *addr;
1321 
1322 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1323 	vm_map_lock(map);
1324 	if (object)
1325 		vm_object_hold_shared(object);
1326 	if (fitit) {
1327 		if (vm_map_findspace(map, start, length, align, 0, addr)) {
1328 			if (object)
1329 				vm_object_drop(object);
1330 			vm_map_unlock(map);
1331 			vm_map_entry_release(count);
1332 			return (KERN_NO_SPACE);
1333 		}
1334 		start = *addr;
1335 	}
1336 	result = vm_map_insert(map, &count, object, offset,
1337 			       start, start + length,
1338 			       maptype,
1339 			       prot, max,
1340 			       cow);
1341 	if (object)
1342 		vm_object_drop(object);
1343 	vm_map_unlock(map);
1344 	vm_map_entry_release(count);
1345 
1346 	return (result);
1347 }
1348 
1349 /*
1350  * Simplify the given map entry by merging with either neighbor.  This
1351  * routine also has the ability to merge with both neighbors.
1352  *
1353  * This routine guarentees that the passed entry remains valid (though
1354  * possibly extended).  When merging, this routine may delete one or
1355  * both neighbors.  No action is taken on entries which have their
1356  * in-transition flag set.
1357  *
1358  * The map must be exclusively locked.
1359  */
1360 void
1361 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1362 {
1363 	vm_map_entry_t next, prev;
1364 	vm_size_t prevsize, esize;
1365 
1366 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1367 		++mycpu->gd_cnt.v_intrans_coll;
1368 		return;
1369 	}
1370 
1371 	if (entry->maptype == VM_MAPTYPE_SUBMAP)
1372 		return;
1373 
1374 	prev = entry->prev;
1375 	if (prev != &map->header) {
1376 		prevsize = prev->end - prev->start;
1377 		if ( (prev->end == entry->start) &&
1378 		     (prev->maptype == entry->maptype) &&
1379 		     (prev->object.vm_object == entry->object.vm_object) &&
1380 		     (!prev->object.vm_object ||
1381 			(prev->offset + prevsize == entry->offset)) &&
1382 		     (prev->eflags == entry->eflags) &&
1383 		     (prev->protection == entry->protection) &&
1384 		     (prev->max_protection == entry->max_protection) &&
1385 		     (prev->inheritance == entry->inheritance) &&
1386 		     (prev->wired_count == entry->wired_count)) {
1387 			if (map->first_free == prev)
1388 				map->first_free = entry;
1389 			if (map->hint == prev)
1390 				map->hint = entry;
1391 			vm_map_entry_unlink(map, prev);
1392 			entry->start = prev->start;
1393 			entry->offset = prev->offset;
1394 			if (prev->object.vm_object)
1395 				vm_object_deallocate(prev->object.vm_object);
1396 			vm_map_entry_dispose(map, prev, countp);
1397 		}
1398 	}
1399 
1400 	next = entry->next;
1401 	if (next != &map->header) {
1402 		esize = entry->end - entry->start;
1403 		if ((entry->end == next->start) &&
1404 		    (next->maptype == entry->maptype) &&
1405 		    (next->object.vm_object == entry->object.vm_object) &&
1406 		     (!entry->object.vm_object ||
1407 			(entry->offset + esize == next->offset)) &&
1408 		    (next->eflags == entry->eflags) &&
1409 		    (next->protection == entry->protection) &&
1410 		    (next->max_protection == entry->max_protection) &&
1411 		    (next->inheritance == entry->inheritance) &&
1412 		    (next->wired_count == entry->wired_count)) {
1413 			if (map->first_free == next)
1414 				map->first_free = entry;
1415 			if (map->hint == next)
1416 				map->hint = entry;
1417 			vm_map_entry_unlink(map, next);
1418 			entry->end = next->end;
1419 			if (next->object.vm_object)
1420 				vm_object_deallocate(next->object.vm_object);
1421 			vm_map_entry_dispose(map, next, countp);
1422 	        }
1423 	}
1424 }
1425 
1426 /*
1427  * Asserts that the given entry begins at or after the specified address.
1428  * If necessary, it splits the entry into two.
1429  */
1430 #define vm_map_clip_start(map, entry, startaddr, countp)		\
1431 {									\
1432 	if (startaddr > entry->start)					\
1433 		_vm_map_clip_start(map, entry, startaddr, countp);	\
1434 }
1435 
1436 /*
1437  * This routine is called only when it is known that the entry must be split.
1438  *
1439  * The map must be exclusively locked.
1440  */
1441 static void
1442 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1443 		   int *countp)
1444 {
1445 	vm_map_entry_t new_entry;
1446 
1447 	/*
1448 	 * Split off the front portion -- note that we must insert the new
1449 	 * entry BEFORE this one, so that this entry has the specified
1450 	 * starting address.
1451 	 */
1452 
1453 	vm_map_simplify_entry(map, entry, countp);
1454 
1455 	/*
1456 	 * If there is no object backing this entry, we might as well create
1457 	 * one now.  If we defer it, an object can get created after the map
1458 	 * is clipped, and individual objects will be created for the split-up
1459 	 * map.  This is a bit of a hack, but is also about the best place to
1460 	 * put this improvement.
1461 	 */
1462 	if (entry->object.vm_object == NULL && !map->system_map) {
1463 		vm_map_entry_allocate_object(entry);
1464 	}
1465 
1466 	new_entry = vm_map_entry_create(map, countp);
1467 	*new_entry = *entry;
1468 
1469 	new_entry->end = start;
1470 	entry->offset += (start - entry->start);
1471 	entry->start = start;
1472 
1473 	vm_map_entry_link(map, entry->prev, new_entry);
1474 
1475 	switch(entry->maptype) {
1476 	case VM_MAPTYPE_NORMAL:
1477 	case VM_MAPTYPE_VPAGETABLE:
1478 		if (new_entry->object.vm_object) {
1479 			vm_object_hold(new_entry->object.vm_object);
1480 			vm_object_chain_wait(new_entry->object.vm_object, 0);
1481 			vm_object_reference_locked(new_entry->object.vm_object);
1482 			vm_object_drop(new_entry->object.vm_object);
1483 		}
1484 		break;
1485 	default:
1486 		break;
1487 	}
1488 }
1489 
1490 /*
1491  * Asserts that the given entry ends at or before the specified address.
1492  * If necessary, it splits the entry into two.
1493  *
1494  * The map must be exclusively locked.
1495  */
1496 #define vm_map_clip_end(map, entry, endaddr, countp)		\
1497 {								\
1498 	if (endaddr < entry->end)				\
1499 		_vm_map_clip_end(map, entry, endaddr, countp);	\
1500 }
1501 
1502 /*
1503  * This routine is called only when it is known that the entry must be split.
1504  *
1505  * The map must be exclusively locked.
1506  */
1507 static void
1508 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1509 		 int *countp)
1510 {
1511 	vm_map_entry_t new_entry;
1512 
1513 	/*
1514 	 * If there is no object backing this entry, we might as well create
1515 	 * one now.  If we defer it, an object can get created after the map
1516 	 * is clipped, and individual objects will be created for the split-up
1517 	 * map.  This is a bit of a hack, but is also about the best place to
1518 	 * put this improvement.
1519 	 */
1520 
1521 	if (entry->object.vm_object == NULL && !map->system_map) {
1522 		vm_map_entry_allocate_object(entry);
1523 	}
1524 
1525 	/*
1526 	 * Create a new entry and insert it AFTER the specified entry
1527 	 */
1528 
1529 	new_entry = vm_map_entry_create(map, countp);
1530 	*new_entry = *entry;
1531 
1532 	new_entry->start = entry->end = end;
1533 	new_entry->offset += (end - entry->start);
1534 
1535 	vm_map_entry_link(map, entry, new_entry);
1536 
1537 	switch(entry->maptype) {
1538 	case VM_MAPTYPE_NORMAL:
1539 	case VM_MAPTYPE_VPAGETABLE:
1540 		if (new_entry->object.vm_object) {
1541 			vm_object_hold(new_entry->object.vm_object);
1542 			vm_object_chain_wait(new_entry->object.vm_object, 0);
1543 			vm_object_reference_locked(new_entry->object.vm_object);
1544 			vm_object_drop(new_entry->object.vm_object);
1545 		}
1546 		break;
1547 	default:
1548 		break;
1549 	}
1550 }
1551 
1552 /*
1553  * Asserts that the starting and ending region addresses fall within the
1554  * valid range for the map.
1555  */
1556 #define	VM_MAP_RANGE_CHECK(map, start, end)	\
1557 {						\
1558 	if (start < vm_map_min(map))		\
1559 		start = vm_map_min(map);	\
1560 	if (end > vm_map_max(map))		\
1561 		end = vm_map_max(map);		\
1562 	if (start > end)			\
1563 		start = end;			\
1564 }
1565 
1566 /*
1567  * Used to block when an in-transition collison occurs.  The map
1568  * is unlocked for the sleep and relocked before the return.
1569  */
1570 void
1571 vm_map_transition_wait(vm_map_t map)
1572 {
1573 	tsleep_interlock(map, 0);
1574 	vm_map_unlock(map);
1575 	tsleep(map, PINTERLOCKED, "vment", 0);
1576 	vm_map_lock(map);
1577 }
1578 
1579 /*
1580  * When we do blocking operations with the map lock held it is
1581  * possible that a clip might have occured on our in-transit entry,
1582  * requiring an adjustment to the entry in our loop.  These macros
1583  * help the pageable and clip_range code deal with the case.  The
1584  * conditional costs virtually nothing if no clipping has occured.
1585  */
1586 
1587 #define CLIP_CHECK_BACK(entry, save_start)		\
1588     do {						\
1589 	    while (entry->start != save_start) {	\
1590 		    entry = entry->prev;		\
1591 		    KASSERT(entry != &map->header, ("bad entry clip")); \
1592 	    }						\
1593     } while(0)
1594 
1595 #define CLIP_CHECK_FWD(entry, save_end)			\
1596     do {						\
1597 	    while (entry->end != save_end) {		\
1598 		    entry = entry->next;		\
1599 		    KASSERT(entry != &map->header, ("bad entry clip")); \
1600 	    }						\
1601     } while(0)
1602 
1603 
1604 /*
1605  * Clip the specified range and return the base entry.  The
1606  * range may cover several entries starting at the returned base
1607  * and the first and last entry in the covering sequence will be
1608  * properly clipped to the requested start and end address.
1609  *
1610  * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1611  * flag.
1612  *
1613  * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1614  * covered by the requested range.
1615  *
1616  * The map must be exclusively locked on entry and will remain locked
1617  * on return. If no range exists or the range contains holes and you
1618  * specified that no holes were allowed, NULL will be returned.  This
1619  * routine may temporarily unlock the map in order avoid a deadlock when
1620  * sleeping.
1621  */
1622 static
1623 vm_map_entry_t
1624 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end,
1625 		  int *countp, int flags)
1626 {
1627 	vm_map_entry_t start_entry;
1628 	vm_map_entry_t entry;
1629 
1630 	/*
1631 	 * Locate the entry and effect initial clipping.  The in-transition
1632 	 * case does not occur very often so do not try to optimize it.
1633 	 */
1634 again:
1635 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1636 		return (NULL);
1637 	entry = start_entry;
1638 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1639 		entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1640 		++mycpu->gd_cnt.v_intrans_coll;
1641 		++mycpu->gd_cnt.v_intrans_wait;
1642 		vm_map_transition_wait(map);
1643 		/*
1644 		 * entry and/or start_entry may have been clipped while
1645 		 * we slept, or may have gone away entirely.  We have
1646 		 * to restart from the lookup.
1647 		 */
1648 		goto again;
1649 	}
1650 
1651 	/*
1652 	 * Since we hold an exclusive map lock we do not have to restart
1653 	 * after clipping, even though clipping may block in zalloc.
1654 	 */
1655 	vm_map_clip_start(map, entry, start, countp);
1656 	vm_map_clip_end(map, entry, end, countp);
1657 	entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1658 
1659 	/*
1660 	 * Scan entries covered by the range.  When working on the next
1661 	 * entry a restart need only re-loop on the current entry which
1662 	 * we have already locked, since 'next' may have changed.  Also,
1663 	 * even though entry is safe, it may have been clipped so we
1664 	 * have to iterate forwards through the clip after sleeping.
1665 	 */
1666 	while (entry->next != &map->header && entry->next->start < end) {
1667 		vm_map_entry_t next = entry->next;
1668 
1669 		if (flags & MAP_CLIP_NO_HOLES) {
1670 			if (next->start > entry->end) {
1671 				vm_map_unclip_range(map, start_entry,
1672 					start, entry->end, countp, flags);
1673 				return(NULL);
1674 			}
1675 		}
1676 
1677 		if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1678 			vm_offset_t save_end = entry->end;
1679 			next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1680 			++mycpu->gd_cnt.v_intrans_coll;
1681 			++mycpu->gd_cnt.v_intrans_wait;
1682 			vm_map_transition_wait(map);
1683 
1684 			/*
1685 			 * clips might have occured while we blocked.
1686 			 */
1687 			CLIP_CHECK_FWD(entry, save_end);
1688 			CLIP_CHECK_BACK(start_entry, start);
1689 			continue;
1690 		}
1691 		/*
1692 		 * No restart necessary even though clip_end may block, we
1693 		 * are holding the map lock.
1694 		 */
1695 		vm_map_clip_end(map, next, end, countp);
1696 		next->eflags |= MAP_ENTRY_IN_TRANSITION;
1697 		entry = next;
1698 	}
1699 	if (flags & MAP_CLIP_NO_HOLES) {
1700 		if (entry->end != end) {
1701 			vm_map_unclip_range(map, start_entry,
1702 				start, entry->end, countp, flags);
1703 			return(NULL);
1704 		}
1705 	}
1706 	return(start_entry);
1707 }
1708 
1709 /*
1710  * Undo the effect of vm_map_clip_range().  You should pass the same
1711  * flags and the same range that you passed to vm_map_clip_range().
1712  * This code will clear the in-transition flag on the entries and
1713  * wake up anyone waiting.  This code will also simplify the sequence
1714  * and attempt to merge it with entries before and after the sequence.
1715  *
1716  * The map must be locked on entry and will remain locked on return.
1717  *
1718  * Note that you should also pass the start_entry returned by
1719  * vm_map_clip_range().  However, if you block between the two calls
1720  * with the map unlocked please be aware that the start_entry may
1721  * have been clipped and you may need to scan it backwards to find
1722  * the entry corresponding with the original start address.  You are
1723  * responsible for this, vm_map_unclip_range() expects the correct
1724  * start_entry to be passed to it and will KASSERT otherwise.
1725  */
1726 static
1727 void
1728 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
1729 		    vm_offset_t start, vm_offset_t end,
1730 		    int *countp, int flags)
1731 {
1732 	vm_map_entry_t entry;
1733 
1734 	entry = start_entry;
1735 
1736 	KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1737 	while (entry != &map->header && entry->start < end) {
1738 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1739 			("in-transition flag not set during unclip on: %p",
1740 			entry));
1741 		KASSERT(entry->end <= end,
1742 			("unclip_range: tail wasn't clipped"));
1743 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1744 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1745 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1746 			wakeup(map);
1747 		}
1748 		entry = entry->next;
1749 	}
1750 
1751 	/*
1752 	 * Simplification does not block so there is no restart case.
1753 	 */
1754 	entry = start_entry;
1755 	while (entry != &map->header && entry->start < end) {
1756 		vm_map_simplify_entry(map, entry, countp);
1757 		entry = entry->next;
1758 	}
1759 }
1760 
1761 /*
1762  * Mark the given range as handled by a subordinate map.
1763  *
1764  * This range must have been created with vm_map_find(), and no other
1765  * operations may have been performed on this range prior to calling
1766  * vm_map_submap().
1767  *
1768  * Submappings cannot be removed.
1769  *
1770  * No requirements.
1771  */
1772 int
1773 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1774 {
1775 	vm_map_entry_t entry;
1776 	int result = KERN_INVALID_ARGUMENT;
1777 	int count;
1778 
1779 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1780 	vm_map_lock(map);
1781 
1782 	VM_MAP_RANGE_CHECK(map, start, end);
1783 
1784 	if (vm_map_lookup_entry(map, start, &entry)) {
1785 		vm_map_clip_start(map, entry, start, &count);
1786 	} else {
1787 		entry = entry->next;
1788 	}
1789 
1790 	vm_map_clip_end(map, entry, end, &count);
1791 
1792 	if ((entry->start == start) && (entry->end == end) &&
1793 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1794 	    (entry->object.vm_object == NULL)) {
1795 		entry->object.sub_map = submap;
1796 		entry->maptype = VM_MAPTYPE_SUBMAP;
1797 		result = KERN_SUCCESS;
1798 	}
1799 	vm_map_unlock(map);
1800 	vm_map_entry_release(count);
1801 
1802 	return (result);
1803 }
1804 
1805 /*
1806  * Sets the protection of the specified address region in the target map.
1807  * If "set_max" is specified, the maximum protection is to be set;
1808  * otherwise, only the current protection is affected.
1809  *
1810  * The protection is not applicable to submaps, but is applicable to normal
1811  * maps and maps governed by virtual page tables.  For example, when operating
1812  * on a virtual page table our protection basically controls how COW occurs
1813  * on the backing object, whereas the virtual page table abstraction itself
1814  * is an abstraction for userland.
1815  *
1816  * No requirements.
1817  */
1818 int
1819 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1820 	       vm_prot_t new_prot, boolean_t set_max)
1821 {
1822 	vm_map_entry_t current;
1823 	vm_map_entry_t entry;
1824 	int count;
1825 
1826 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1827 	vm_map_lock(map);
1828 
1829 	VM_MAP_RANGE_CHECK(map, start, end);
1830 
1831 	if (vm_map_lookup_entry(map, start, &entry)) {
1832 		vm_map_clip_start(map, entry, start, &count);
1833 	} else {
1834 		entry = entry->next;
1835 	}
1836 
1837 	/*
1838 	 * Make a first pass to check for protection violations.
1839 	 */
1840 	current = entry;
1841 	while ((current != &map->header) && (current->start < end)) {
1842 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
1843 			vm_map_unlock(map);
1844 			vm_map_entry_release(count);
1845 			return (KERN_INVALID_ARGUMENT);
1846 		}
1847 		if ((new_prot & current->max_protection) != new_prot) {
1848 			vm_map_unlock(map);
1849 			vm_map_entry_release(count);
1850 			return (KERN_PROTECTION_FAILURE);
1851 		}
1852 		current = current->next;
1853 	}
1854 
1855 	/*
1856 	 * Go back and fix up protections. [Note that clipping is not
1857 	 * necessary the second time.]
1858 	 */
1859 	current = entry;
1860 
1861 	while ((current != &map->header) && (current->start < end)) {
1862 		vm_prot_t old_prot;
1863 
1864 		vm_map_clip_end(map, current, end, &count);
1865 
1866 		old_prot = current->protection;
1867 		if (set_max) {
1868 			current->protection =
1869 			    (current->max_protection = new_prot) &
1870 			    old_prot;
1871 		} else {
1872 			current->protection = new_prot;
1873 		}
1874 
1875 		/*
1876 		 * Update physical map if necessary. Worry about copy-on-write
1877 		 * here -- CHECK THIS XXX
1878 		 */
1879 
1880 		if (current->protection != old_prot) {
1881 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1882 							VM_PROT_ALL)
1883 
1884 			pmap_protect(map->pmap, current->start,
1885 			    current->end,
1886 			    current->protection & MASK(current));
1887 #undef	MASK
1888 		}
1889 
1890 		vm_map_simplify_entry(map, current, &count);
1891 
1892 		current = current->next;
1893 	}
1894 
1895 	vm_map_unlock(map);
1896 	vm_map_entry_release(count);
1897 	return (KERN_SUCCESS);
1898 }
1899 
1900 /*
1901  * This routine traverses a processes map handling the madvise
1902  * system call.  Advisories are classified as either those effecting
1903  * the vm_map_entry structure, or those effecting the underlying
1904  * objects.
1905  *
1906  * The <value> argument is used for extended madvise calls.
1907  *
1908  * No requirements.
1909  */
1910 int
1911 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1912 	       int behav, off_t value)
1913 {
1914 	vm_map_entry_t current, entry;
1915 	int modify_map = 0;
1916 	int error = 0;
1917 	int count;
1918 
1919 	/*
1920 	 * Some madvise calls directly modify the vm_map_entry, in which case
1921 	 * we need to use an exclusive lock on the map and we need to perform
1922 	 * various clipping operations.  Otherwise we only need a read-lock
1923 	 * on the map.
1924 	 */
1925 
1926 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1927 
1928 	switch(behav) {
1929 	case MADV_NORMAL:
1930 	case MADV_SEQUENTIAL:
1931 	case MADV_RANDOM:
1932 	case MADV_NOSYNC:
1933 	case MADV_AUTOSYNC:
1934 	case MADV_NOCORE:
1935 	case MADV_CORE:
1936 	case MADV_SETMAP:
1937 	case MADV_INVAL:
1938 		modify_map = 1;
1939 		vm_map_lock(map);
1940 		break;
1941 	case MADV_WILLNEED:
1942 	case MADV_DONTNEED:
1943 	case MADV_FREE:
1944 		vm_map_lock_read(map);
1945 		break;
1946 	default:
1947 		vm_map_entry_release(count);
1948 		return (EINVAL);
1949 	}
1950 
1951 	/*
1952 	 * Locate starting entry and clip if necessary.
1953 	 */
1954 
1955 	VM_MAP_RANGE_CHECK(map, start, end);
1956 
1957 	if (vm_map_lookup_entry(map, start, &entry)) {
1958 		if (modify_map)
1959 			vm_map_clip_start(map, entry, start, &count);
1960 	} else {
1961 		entry = entry->next;
1962 	}
1963 
1964 	if (modify_map) {
1965 		/*
1966 		 * madvise behaviors that are implemented in the vm_map_entry.
1967 		 *
1968 		 * We clip the vm_map_entry so that behavioral changes are
1969 		 * limited to the specified address range.
1970 		 */
1971 		for (current = entry;
1972 		     (current != &map->header) && (current->start < end);
1973 		     current = current->next
1974 		) {
1975 			if (current->maptype == VM_MAPTYPE_SUBMAP)
1976 				continue;
1977 
1978 			vm_map_clip_end(map, current, end, &count);
1979 
1980 			switch (behav) {
1981 			case MADV_NORMAL:
1982 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1983 				break;
1984 			case MADV_SEQUENTIAL:
1985 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1986 				break;
1987 			case MADV_RANDOM:
1988 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1989 				break;
1990 			case MADV_NOSYNC:
1991 				current->eflags |= MAP_ENTRY_NOSYNC;
1992 				break;
1993 			case MADV_AUTOSYNC:
1994 				current->eflags &= ~MAP_ENTRY_NOSYNC;
1995 				break;
1996 			case MADV_NOCORE:
1997 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
1998 				break;
1999 			case MADV_CORE:
2000 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2001 				break;
2002 			case MADV_INVAL:
2003 				/*
2004 				 * Invalidate the related pmap entries, used
2005 				 * to flush portions of the real kernel's
2006 				 * pmap when the caller has removed or
2007 				 * modified existing mappings in a virtual
2008 				 * page table.
2009 				 */
2010 				pmap_remove(map->pmap,
2011 					    current->start, current->end);
2012 				break;
2013 			case MADV_SETMAP:
2014 				/*
2015 				 * Set the page directory page for a map
2016 				 * governed by a virtual page table.  Mark
2017 				 * the entry as being governed by a virtual
2018 				 * page table if it is not.
2019 				 *
2020 				 * XXX the page directory page is stored
2021 				 * in the avail_ssize field if the map_entry.
2022 				 *
2023 				 * XXX the map simplification code does not
2024 				 * compare this field so weird things may
2025 				 * happen if you do not apply this function
2026 				 * to the entire mapping governed by the
2027 				 * virtual page table.
2028 				 */
2029 				if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
2030 					error = EINVAL;
2031 					break;
2032 				}
2033 				current->aux.master_pde = value;
2034 				pmap_remove(map->pmap,
2035 					    current->start, current->end);
2036 				break;
2037 			default:
2038 				error = EINVAL;
2039 				break;
2040 			}
2041 			vm_map_simplify_entry(map, current, &count);
2042 		}
2043 		vm_map_unlock(map);
2044 	} else {
2045 		vm_pindex_t pindex;
2046 		int count;
2047 
2048 		/*
2049 		 * madvise behaviors that are implemented in the underlying
2050 		 * vm_object.
2051 		 *
2052 		 * Since we don't clip the vm_map_entry, we have to clip
2053 		 * the vm_object pindex and count.
2054 		 *
2055 		 * NOTE!  We currently do not support these functions on
2056 		 * virtual page tables.
2057 		 */
2058 		for (current = entry;
2059 		     (current != &map->header) && (current->start < end);
2060 		     current = current->next
2061 		) {
2062 			vm_offset_t useStart;
2063 
2064 			if (current->maptype != VM_MAPTYPE_NORMAL)
2065 				continue;
2066 
2067 			pindex = OFF_TO_IDX(current->offset);
2068 			count = atop(current->end - current->start);
2069 			useStart = current->start;
2070 
2071 			if (current->start < start) {
2072 				pindex += atop(start - current->start);
2073 				count -= atop(start - current->start);
2074 				useStart = start;
2075 			}
2076 			if (current->end > end)
2077 				count -= atop(current->end - end);
2078 
2079 			if (count <= 0)
2080 				continue;
2081 
2082 			vm_object_madvise(current->object.vm_object,
2083 					  pindex, count, behav);
2084 
2085 			/*
2086 			 * Try to populate the page table.  Mappings governed
2087 			 * by virtual page tables cannot be pre-populated
2088 			 * without a lot of work so don't try.
2089 			 */
2090 			if (behav == MADV_WILLNEED &&
2091 			    current->maptype != VM_MAPTYPE_VPAGETABLE) {
2092 				pmap_object_init_pt(
2093 				    map->pmap,
2094 				    useStart,
2095 				    current->protection,
2096 				    current->object.vm_object,
2097 				    pindex,
2098 				    (count << PAGE_SHIFT),
2099 				    MAP_PREFAULT_MADVISE
2100 				);
2101 			}
2102 		}
2103 		vm_map_unlock_read(map);
2104 	}
2105 	vm_map_entry_release(count);
2106 	return(error);
2107 }
2108 
2109 
2110 /*
2111  * Sets the inheritance of the specified address range in the target map.
2112  * Inheritance affects how the map will be shared with child maps at the
2113  * time of vm_map_fork.
2114  */
2115 int
2116 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2117 	       vm_inherit_t new_inheritance)
2118 {
2119 	vm_map_entry_t entry;
2120 	vm_map_entry_t temp_entry;
2121 	int count;
2122 
2123 	switch (new_inheritance) {
2124 	case VM_INHERIT_NONE:
2125 	case VM_INHERIT_COPY:
2126 	case VM_INHERIT_SHARE:
2127 		break;
2128 	default:
2129 		return (KERN_INVALID_ARGUMENT);
2130 	}
2131 
2132 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2133 	vm_map_lock(map);
2134 
2135 	VM_MAP_RANGE_CHECK(map, start, end);
2136 
2137 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2138 		entry = temp_entry;
2139 		vm_map_clip_start(map, entry, start, &count);
2140 	} else
2141 		entry = temp_entry->next;
2142 
2143 	while ((entry != &map->header) && (entry->start < end)) {
2144 		vm_map_clip_end(map, entry, end, &count);
2145 
2146 		entry->inheritance = new_inheritance;
2147 
2148 		vm_map_simplify_entry(map, entry, &count);
2149 
2150 		entry = entry->next;
2151 	}
2152 	vm_map_unlock(map);
2153 	vm_map_entry_release(count);
2154 	return (KERN_SUCCESS);
2155 }
2156 
2157 /*
2158  * Implement the semantics of mlock
2159  */
2160 int
2161 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2162 	      boolean_t new_pageable)
2163 {
2164 	vm_map_entry_t entry;
2165 	vm_map_entry_t start_entry;
2166 	vm_offset_t end;
2167 	int rv = KERN_SUCCESS;
2168 	int count;
2169 
2170 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2171 	vm_map_lock(map);
2172 	VM_MAP_RANGE_CHECK(map, start, real_end);
2173 	end = real_end;
2174 
2175 	start_entry = vm_map_clip_range(map, start, end, &count,
2176 					MAP_CLIP_NO_HOLES);
2177 	if (start_entry == NULL) {
2178 		vm_map_unlock(map);
2179 		vm_map_entry_release(count);
2180 		return (KERN_INVALID_ADDRESS);
2181 	}
2182 
2183 	if (new_pageable == 0) {
2184 		entry = start_entry;
2185 		while ((entry != &map->header) && (entry->start < end)) {
2186 			vm_offset_t save_start;
2187 			vm_offset_t save_end;
2188 
2189 			/*
2190 			 * Already user wired or hard wired (trivial cases)
2191 			 */
2192 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2193 				entry = entry->next;
2194 				continue;
2195 			}
2196 			if (entry->wired_count != 0) {
2197 				entry->wired_count++;
2198 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2199 				entry = entry->next;
2200 				continue;
2201 			}
2202 
2203 			/*
2204 			 * A new wiring requires instantiation of appropriate
2205 			 * management structures and the faulting in of the
2206 			 * page.
2207 			 */
2208 			if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2209 				int copyflag = entry->eflags &
2210 					       MAP_ENTRY_NEEDS_COPY;
2211 				if (copyflag && ((entry->protection &
2212 						  VM_PROT_WRITE) != 0)) {
2213 					vm_map_entry_shadow(entry, 0);
2214 				} else if (entry->object.vm_object == NULL &&
2215 					   !map->system_map) {
2216 					vm_map_entry_allocate_object(entry);
2217 				}
2218 			}
2219 			entry->wired_count++;
2220 			entry->eflags |= MAP_ENTRY_USER_WIRED;
2221 
2222 			/*
2223 			 * Now fault in the area.  Note that vm_fault_wire()
2224 			 * may release the map lock temporarily, it will be
2225 			 * relocked on return.  The in-transition
2226 			 * flag protects the entries.
2227 			 */
2228 			save_start = entry->start;
2229 			save_end = entry->end;
2230 			rv = vm_fault_wire(map, entry, TRUE, 0);
2231 			if (rv) {
2232 				CLIP_CHECK_BACK(entry, save_start);
2233 				for (;;) {
2234 					KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2235 					entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2236 					entry->wired_count = 0;
2237 					if (entry->end == save_end)
2238 						break;
2239 					entry = entry->next;
2240 					KASSERT(entry != &map->header, ("bad entry clip during backout"));
2241 				}
2242 				end = save_start;	/* unwire the rest */
2243 				break;
2244 			}
2245 			/*
2246 			 * note that even though the entry might have been
2247 			 * clipped, the USER_WIRED flag we set prevents
2248 			 * duplication so we do not have to do a
2249 			 * clip check.
2250 			 */
2251 			entry = entry->next;
2252 		}
2253 
2254 		/*
2255 		 * If we failed fall through to the unwiring section to
2256 		 * unwire what we had wired so far.  'end' has already
2257 		 * been adjusted.
2258 		 */
2259 		if (rv)
2260 			new_pageable = 1;
2261 
2262 		/*
2263 		 * start_entry might have been clipped if we unlocked the
2264 		 * map and blocked.  No matter how clipped it has gotten
2265 		 * there should be a fragment that is on our start boundary.
2266 		 */
2267 		CLIP_CHECK_BACK(start_entry, start);
2268 	}
2269 
2270 	/*
2271 	 * Deal with the unwiring case.
2272 	 */
2273 	if (new_pageable) {
2274 		/*
2275 		 * This is the unwiring case.  We must first ensure that the
2276 		 * range to be unwired is really wired down.  We know there
2277 		 * are no holes.
2278 		 */
2279 		entry = start_entry;
2280 		while ((entry != &map->header) && (entry->start < end)) {
2281 			if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2282 				rv = KERN_INVALID_ARGUMENT;
2283 				goto done;
2284 			}
2285 			KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
2286 			entry = entry->next;
2287 		}
2288 
2289 		/*
2290 		 * Now decrement the wiring count for each region. If a region
2291 		 * becomes completely unwired, unwire its physical pages and
2292 		 * mappings.
2293 		 */
2294 		/*
2295 		 * The map entries are processed in a loop, checking to
2296 		 * make sure the entry is wired and asserting it has a wired
2297 		 * count. However, another loop was inserted more-or-less in
2298 		 * the middle of the unwiring path. This loop picks up the
2299 		 * "entry" loop variable from the first loop without first
2300 		 * setting it to start_entry. Naturally, the secound loop
2301 		 * is never entered and the pages backing the entries are
2302 		 * never unwired. This can lead to a leak of wired pages.
2303 		 */
2304 		entry = start_entry;
2305 		while ((entry != &map->header) && (entry->start < end)) {
2306 			KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2307 				("expected USER_WIRED on entry %p", entry));
2308 			entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2309 			entry->wired_count--;
2310 			if (entry->wired_count == 0)
2311 				vm_fault_unwire(map, entry);
2312 			entry = entry->next;
2313 		}
2314 	}
2315 done:
2316 	vm_map_unclip_range(map, start_entry, start, real_end, &count,
2317 		MAP_CLIP_NO_HOLES);
2318 	map->timestamp++;
2319 	vm_map_unlock(map);
2320 	vm_map_entry_release(count);
2321 	return (rv);
2322 }
2323 
2324 /*
2325  * Sets the pageability of the specified address range in the target map.
2326  * Regions specified as not pageable require locked-down physical
2327  * memory and physical page maps.
2328  *
2329  * The map must not be locked, but a reference must remain to the map
2330  * throughout the call.
2331  *
2332  * This function may be called via the zalloc path and must properly
2333  * reserve map entries for kernel_map.
2334  *
2335  * No requirements.
2336  */
2337 int
2338 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2339 {
2340 	vm_map_entry_t entry;
2341 	vm_map_entry_t start_entry;
2342 	vm_offset_t end;
2343 	int rv = KERN_SUCCESS;
2344 	int count;
2345 
2346 	if (kmflags & KM_KRESERVE)
2347 		count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2348 	else
2349 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2350 	vm_map_lock(map);
2351 	VM_MAP_RANGE_CHECK(map, start, real_end);
2352 	end = real_end;
2353 
2354 	start_entry = vm_map_clip_range(map, start, end, &count,
2355 					MAP_CLIP_NO_HOLES);
2356 	if (start_entry == NULL) {
2357 		vm_map_unlock(map);
2358 		rv = KERN_INVALID_ADDRESS;
2359 		goto failure;
2360 	}
2361 	if ((kmflags & KM_PAGEABLE) == 0) {
2362 		/*
2363 		 * Wiring.
2364 		 *
2365 		 * 1.  Holding the write lock, we create any shadow or zero-fill
2366 		 * objects that need to be created. Then we clip each map
2367 		 * entry to the region to be wired and increment its wiring
2368 		 * count.  We create objects before clipping the map entries
2369 		 * to avoid object proliferation.
2370 		 *
2371 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
2372 		 * fault in the pages for any newly wired area (wired_count is
2373 		 * 1).
2374 		 *
2375 		 * Downgrading to a read lock for vm_fault_wire avoids a
2376 		 * possible deadlock with another process that may have faulted
2377 		 * on one of the pages to be wired (it would mark the page busy,
2378 		 * blocking us, then in turn block on the map lock that we
2379 		 * hold).  Because of problems in the recursive lock package,
2380 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2381 		 * any actions that require the write lock must be done
2382 		 * beforehand.  Because we keep the read lock on the map, the
2383 		 * copy-on-write status of the entries we modify here cannot
2384 		 * change.
2385 		 */
2386 		entry = start_entry;
2387 		while ((entry != &map->header) && (entry->start < end)) {
2388 			/*
2389 			 * Trivial case if the entry is already wired
2390 			 */
2391 			if (entry->wired_count) {
2392 				entry->wired_count++;
2393 				entry = entry->next;
2394 				continue;
2395 			}
2396 
2397 			/*
2398 			 * The entry is being newly wired, we have to setup
2399 			 * appropriate management structures.  A shadow
2400 			 * object is required for a copy-on-write region,
2401 			 * or a normal object for a zero-fill region.  We
2402 			 * do not have to do this for entries that point to sub
2403 			 * maps because we won't hold the lock on the sub map.
2404 			 */
2405 			if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2406 				int copyflag = entry->eflags &
2407 					       MAP_ENTRY_NEEDS_COPY;
2408 				if (copyflag && ((entry->protection &
2409 						  VM_PROT_WRITE) != 0)) {
2410 					vm_map_entry_shadow(entry, 0);
2411 				} else if (entry->object.vm_object == NULL &&
2412 					   !map->system_map) {
2413 					vm_map_entry_allocate_object(entry);
2414 				}
2415 			}
2416 
2417 			entry->wired_count++;
2418 			entry = entry->next;
2419 		}
2420 
2421 		/*
2422 		 * Pass 2.
2423 		 */
2424 
2425 		/*
2426 		 * HACK HACK HACK HACK
2427 		 *
2428 		 * vm_fault_wire() temporarily unlocks the map to avoid
2429 		 * deadlocks.  The in-transition flag from vm_map_clip_range
2430 		 * call should protect us from changes while the map is
2431 		 * unlocked.  T
2432 		 *
2433 		 * NOTE: Previously this comment stated that clipping might
2434 		 *	 still occur while the entry is unlocked, but from
2435 		 *	 what I can tell it actually cannot.
2436 		 *
2437 		 *	 It is unclear whether the CLIP_CHECK_*() calls
2438 		 *	 are still needed but we keep them in anyway.
2439 		 *
2440 		 * HACK HACK HACK HACK
2441 		 */
2442 
2443 		entry = start_entry;
2444 		while (entry != &map->header && entry->start < end) {
2445 			/*
2446 			 * If vm_fault_wire fails for any page we need to undo
2447 			 * what has been done.  We decrement the wiring count
2448 			 * for those pages which have not yet been wired (now)
2449 			 * and unwire those that have (later).
2450 			 */
2451 			vm_offset_t save_start = entry->start;
2452 			vm_offset_t save_end = entry->end;
2453 
2454 			if (entry->wired_count == 1)
2455 				rv = vm_fault_wire(map, entry, FALSE, kmflags);
2456 			if (rv) {
2457 				CLIP_CHECK_BACK(entry, save_start);
2458 				for (;;) {
2459 					KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2460 					entry->wired_count = 0;
2461 					if (entry->end == save_end)
2462 						break;
2463 					entry = entry->next;
2464 					KASSERT(entry != &map->header, ("bad entry clip during backout"));
2465 				}
2466 				end = save_start;
2467 				break;
2468 			}
2469 			CLIP_CHECK_FWD(entry, save_end);
2470 			entry = entry->next;
2471 		}
2472 
2473 		/*
2474 		 * If a failure occured undo everything by falling through
2475 		 * to the unwiring code.  'end' has already been adjusted
2476 		 * appropriately.
2477 		 */
2478 		if (rv)
2479 			kmflags |= KM_PAGEABLE;
2480 
2481 		/*
2482 		 * start_entry is still IN_TRANSITION but may have been
2483 		 * clipped since vm_fault_wire() unlocks and relocks the
2484 		 * map.  No matter how clipped it has gotten there should
2485 		 * be a fragment that is on our start boundary.
2486 		 */
2487 		CLIP_CHECK_BACK(start_entry, start);
2488 	}
2489 
2490 	if (kmflags & KM_PAGEABLE) {
2491 		/*
2492 		 * This is the unwiring case.  We must first ensure that the
2493 		 * range to be unwired is really wired down.  We know there
2494 		 * are no holes.
2495 		 */
2496 		entry = start_entry;
2497 		while ((entry != &map->header) && (entry->start < end)) {
2498 			if (entry->wired_count == 0) {
2499 				rv = KERN_INVALID_ARGUMENT;
2500 				goto done;
2501 			}
2502 			entry = entry->next;
2503 		}
2504 
2505 		/*
2506 		 * Now decrement the wiring count for each region. If a region
2507 		 * becomes completely unwired, unwire its physical pages and
2508 		 * mappings.
2509 		 */
2510 		entry = start_entry;
2511 		while ((entry != &map->header) && (entry->start < end)) {
2512 			entry->wired_count--;
2513 			if (entry->wired_count == 0)
2514 				vm_fault_unwire(map, entry);
2515 			entry = entry->next;
2516 		}
2517 	}
2518 done:
2519 	vm_map_unclip_range(map, start_entry, start, real_end,
2520 			    &count, MAP_CLIP_NO_HOLES);
2521 	map->timestamp++;
2522 	vm_map_unlock(map);
2523 failure:
2524 	if (kmflags & KM_KRESERVE)
2525 		vm_map_entry_krelease(count);
2526 	else
2527 		vm_map_entry_release(count);
2528 	return (rv);
2529 }
2530 
2531 /*
2532  * Mark a newly allocated address range as wired but do not fault in
2533  * the pages.  The caller is expected to load the pages into the object.
2534  *
2535  * The map must be locked on entry and will remain locked on return.
2536  * No other requirements.
2537  */
2538 void
2539 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2540 		       int *countp)
2541 {
2542 	vm_map_entry_t scan;
2543 	vm_map_entry_t entry;
2544 
2545 	entry = vm_map_clip_range(map, addr, addr + size,
2546 				  countp, MAP_CLIP_NO_HOLES);
2547 	for (scan = entry;
2548 	     scan != &map->header && scan->start < addr + size;
2549 	     scan = scan->next) {
2550 	    KKASSERT(scan->wired_count == 0);
2551 	    scan->wired_count = 1;
2552 	}
2553 	vm_map_unclip_range(map, entry, addr, addr + size,
2554 			    countp, MAP_CLIP_NO_HOLES);
2555 }
2556 
2557 /*
2558  * Push any dirty cached pages in the address range to their pager.
2559  * If syncio is TRUE, dirty pages are written synchronously.
2560  * If invalidate is TRUE, any cached pages are freed as well.
2561  *
2562  * This routine is called by sys_msync()
2563  *
2564  * Returns an error if any part of the specified range is not mapped.
2565  *
2566  * No requirements.
2567  */
2568 int
2569 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2570 	     boolean_t syncio, boolean_t invalidate)
2571 {
2572 	vm_map_entry_t current;
2573 	vm_map_entry_t entry;
2574 	vm_size_t size;
2575 	vm_object_t object;
2576 	vm_object_t tobj;
2577 	vm_ooffset_t offset;
2578 
2579 	vm_map_lock_read(map);
2580 	VM_MAP_RANGE_CHECK(map, start, end);
2581 	if (!vm_map_lookup_entry(map, start, &entry)) {
2582 		vm_map_unlock_read(map);
2583 		return (KERN_INVALID_ADDRESS);
2584 	}
2585 	lwkt_gettoken(&map->token);
2586 
2587 	/*
2588 	 * Make a first pass to check for holes.
2589 	 */
2590 	for (current = entry; current->start < end; current = current->next) {
2591 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2592 			lwkt_reltoken(&map->token);
2593 			vm_map_unlock_read(map);
2594 			return (KERN_INVALID_ARGUMENT);
2595 		}
2596 		if (end > current->end &&
2597 		    (current->next == &map->header ||
2598 			current->end != current->next->start)) {
2599 			lwkt_reltoken(&map->token);
2600 			vm_map_unlock_read(map);
2601 			return (KERN_INVALID_ADDRESS);
2602 		}
2603 	}
2604 
2605 	if (invalidate)
2606 		pmap_remove(vm_map_pmap(map), start, end);
2607 
2608 	/*
2609 	 * Make a second pass, cleaning/uncaching pages from the indicated
2610 	 * objects as we go.
2611 	 */
2612 	for (current = entry; current->start < end; current = current->next) {
2613 		offset = current->offset + (start - current->start);
2614 		size = (end <= current->end ? end : current->end) - start;
2615 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2616 			vm_map_t smap;
2617 			vm_map_entry_t tentry;
2618 			vm_size_t tsize;
2619 
2620 			smap = current->object.sub_map;
2621 			vm_map_lock_read(smap);
2622 			vm_map_lookup_entry(smap, offset, &tentry);
2623 			tsize = tentry->end - offset;
2624 			if (tsize < size)
2625 				size = tsize;
2626 			object = tentry->object.vm_object;
2627 			offset = tentry->offset + (offset - tentry->start);
2628 			vm_map_unlock_read(smap);
2629 		} else {
2630 			object = current->object.vm_object;
2631 		}
2632 
2633 		if (object)
2634 			vm_object_hold(object);
2635 
2636 		/*
2637 		 * Note that there is absolutely no sense in writing out
2638 		 * anonymous objects, so we track down the vnode object
2639 		 * to write out.
2640 		 * We invalidate (remove) all pages from the address space
2641 		 * anyway, for semantic correctness.
2642 		 *
2643 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
2644 		 * may start out with a NULL object.
2645 		 */
2646 		while (object && (tobj = object->backing_object) != NULL) {
2647 			vm_object_hold(tobj);
2648 			if (tobj == object->backing_object) {
2649 				vm_object_lock_swap();
2650 				offset += object->backing_object_offset;
2651 				vm_object_drop(object);
2652 				object = tobj;
2653 				if (object->size < OFF_TO_IDX(offset + size))
2654 					size = IDX_TO_OFF(object->size) -
2655 					       offset;
2656 				break;
2657 			}
2658 			vm_object_drop(tobj);
2659 		}
2660 		if (object && (object->type == OBJT_VNODE) &&
2661 		    (current->protection & VM_PROT_WRITE) &&
2662 		    (object->flags & OBJ_NOMSYNC) == 0) {
2663 			/*
2664 			 * Flush pages if writing is allowed, invalidate them
2665 			 * if invalidation requested.  Pages undergoing I/O
2666 			 * will be ignored by vm_object_page_remove().
2667 			 *
2668 			 * We cannot lock the vnode and then wait for paging
2669 			 * to complete without deadlocking against vm_fault.
2670 			 * Instead we simply call vm_object_page_remove() and
2671 			 * allow it to block internally on a page-by-page
2672 			 * basis when it encounters pages undergoing async
2673 			 * I/O.
2674 			 */
2675 			int flags;
2676 
2677 			/* no chain wait needed for vnode objects */
2678 			vm_object_reference_locked(object);
2679 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2680 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2681 			flags |= invalidate ? OBJPC_INVAL : 0;
2682 
2683 			/*
2684 			 * When operating on a virtual page table just
2685 			 * flush the whole object.  XXX we probably ought
2686 			 * to
2687 			 */
2688 			switch(current->maptype) {
2689 			case VM_MAPTYPE_NORMAL:
2690 				vm_object_page_clean(object,
2691 				    OFF_TO_IDX(offset),
2692 				    OFF_TO_IDX(offset + size + PAGE_MASK),
2693 				    flags);
2694 				break;
2695 			case VM_MAPTYPE_VPAGETABLE:
2696 				vm_object_page_clean(object, 0, 0, flags);
2697 				break;
2698 			}
2699 			vn_unlock(((struct vnode *)object->handle));
2700 			vm_object_deallocate_locked(object);
2701 		}
2702 		if (object && invalidate &&
2703 		   ((object->type == OBJT_VNODE) ||
2704 		    (object->type == OBJT_DEVICE) ||
2705 		    (object->type == OBJT_MGTDEVICE))) {
2706 			int clean_only =
2707 				((object->type == OBJT_DEVICE) ||
2708 				(object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE;
2709 			/* no chain wait needed for vnode/device objects */
2710 			vm_object_reference_locked(object);
2711 			switch(current->maptype) {
2712 			case VM_MAPTYPE_NORMAL:
2713 				vm_object_page_remove(object,
2714 				    OFF_TO_IDX(offset),
2715 				    OFF_TO_IDX(offset + size + PAGE_MASK),
2716 				    clean_only);
2717 				break;
2718 			case VM_MAPTYPE_VPAGETABLE:
2719 				vm_object_page_remove(object, 0, 0, clean_only);
2720 				break;
2721 			}
2722 			vm_object_deallocate_locked(object);
2723 		}
2724 		start += size;
2725 		if (object)
2726 			vm_object_drop(object);
2727 	}
2728 
2729 	lwkt_reltoken(&map->token);
2730 	vm_map_unlock_read(map);
2731 
2732 	return (KERN_SUCCESS);
2733 }
2734 
2735 /*
2736  * Make the region specified by this entry pageable.
2737  *
2738  * The vm_map must be exclusively locked.
2739  */
2740 static void
2741 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2742 {
2743 	entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2744 	entry->wired_count = 0;
2745 	vm_fault_unwire(map, entry);
2746 }
2747 
2748 /*
2749  * Deallocate the given entry from the target map.
2750  *
2751  * The vm_map must be exclusively locked.
2752  */
2753 static void
2754 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2755 {
2756 	vm_map_entry_unlink(map, entry);
2757 	map->size -= entry->end - entry->start;
2758 
2759 	switch(entry->maptype) {
2760 	case VM_MAPTYPE_NORMAL:
2761 	case VM_MAPTYPE_VPAGETABLE:
2762 		vm_object_deallocate(entry->object.vm_object);
2763 		break;
2764 	default:
2765 		break;
2766 	}
2767 
2768 	vm_map_entry_dispose(map, entry, countp);
2769 }
2770 
2771 /*
2772  * Deallocates the given address range from the target map.
2773  *
2774  * The vm_map must be exclusively locked.
2775  */
2776 int
2777 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2778 {
2779 	vm_object_t object;
2780 	vm_map_entry_t entry;
2781 	vm_map_entry_t first_entry;
2782 
2783 	ASSERT_VM_MAP_LOCKED(map);
2784 	lwkt_gettoken(&map->token);
2785 again:
2786 	/*
2787 	 * Find the start of the region, and clip it.  Set entry to point
2788 	 * at the first record containing the requested address or, if no
2789 	 * such record exists, the next record with a greater address.  The
2790 	 * loop will run from this point until a record beyond the termination
2791 	 * address is encountered.
2792 	 *
2793 	 * map->hint must be adjusted to not point to anything we delete,
2794 	 * so set it to the entry prior to the one being deleted.
2795 	 *
2796 	 * GGG see other GGG comment.
2797 	 */
2798 	if (vm_map_lookup_entry(map, start, &first_entry)) {
2799 		entry = first_entry;
2800 		vm_map_clip_start(map, entry, start, countp);
2801 		map->hint = entry->prev;	/* possible problem XXX */
2802 	} else {
2803 		map->hint = first_entry;	/* possible problem XXX */
2804 		entry = first_entry->next;
2805 	}
2806 
2807 	/*
2808 	 * If a hole opens up prior to the current first_free then
2809 	 * adjust first_free.  As with map->hint, map->first_free
2810 	 * cannot be left set to anything we might delete.
2811 	 */
2812 	if (entry == &map->header) {
2813 		map->first_free = &map->header;
2814 	} else if (map->first_free->start >= start) {
2815 		map->first_free = entry->prev;
2816 	}
2817 
2818 	/*
2819 	 * Step through all entries in this region
2820 	 */
2821 	while ((entry != &map->header) && (entry->start < end)) {
2822 		vm_map_entry_t next;
2823 		vm_offset_t s, e;
2824 		vm_pindex_t offidxstart, offidxend, count;
2825 
2826 		/*
2827 		 * If we hit an in-transition entry we have to sleep and
2828 		 * retry.  It's easier (and not really slower) to just retry
2829 		 * since this case occurs so rarely and the hint is already
2830 		 * pointing at the right place.  We have to reset the
2831 		 * start offset so as not to accidently delete an entry
2832 		 * another process just created in vacated space.
2833 		 */
2834 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2835 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2836 			start = entry->start;
2837 			++mycpu->gd_cnt.v_intrans_coll;
2838 			++mycpu->gd_cnt.v_intrans_wait;
2839 			vm_map_transition_wait(map);
2840 			goto again;
2841 		}
2842 		vm_map_clip_end(map, entry, end, countp);
2843 
2844 		s = entry->start;
2845 		e = entry->end;
2846 		next = entry->next;
2847 
2848 		offidxstart = OFF_TO_IDX(entry->offset);
2849 		count = OFF_TO_IDX(e - s);
2850 		object = entry->object.vm_object;
2851 
2852 		/*
2853 		 * Unwire before removing addresses from the pmap; otherwise,
2854 		 * unwiring will put the entries back in the pmap.
2855 		 */
2856 		if (entry->wired_count != 0)
2857 			vm_map_entry_unwire(map, entry);
2858 
2859 		offidxend = offidxstart + count;
2860 
2861 		if (object == &kernel_object) {
2862 			vm_object_hold(object);
2863 			vm_object_page_remove(object, offidxstart,
2864 					      offidxend, FALSE);
2865 			vm_object_drop(object);
2866 		} else if (object && object->type != OBJT_DEFAULT &&
2867 			   object->type != OBJT_SWAP) {
2868 			/*
2869 			 * vnode object routines cannot be chain-locked,
2870 			 * but since we aren't removing pages from the
2871 			 * object here we can use a shared hold.
2872 			 */
2873 			vm_object_hold_shared(object);
2874 			pmap_remove(map->pmap, s, e);
2875 			vm_object_drop(object);
2876 		} else if (object) {
2877 			vm_object_hold(object);
2878 			vm_object_chain_acquire(object, 0);
2879 			pmap_remove(map->pmap, s, e);
2880 
2881 			if (object != NULL &&
2882 			    object->ref_count != 1 &&
2883 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
2884 			     OBJ_ONEMAPPING &&
2885 			    (object->type == OBJT_DEFAULT ||
2886 			     object->type == OBJT_SWAP)) {
2887 				vm_object_collapse(object, NULL);
2888 				vm_object_page_remove(object, offidxstart,
2889 						      offidxend, FALSE);
2890 				if (object->type == OBJT_SWAP) {
2891 					swap_pager_freespace(object,
2892 							     offidxstart,
2893 							     count);
2894 				}
2895 				if (offidxend >= object->size &&
2896 				    offidxstart < object->size) {
2897 					object->size = offidxstart;
2898 				}
2899 			}
2900 			vm_object_chain_release(object);
2901 			vm_object_drop(object);
2902 		}
2903 
2904 		/*
2905 		 * Delete the entry (which may delete the object) only after
2906 		 * removing all pmap entries pointing to its pages.
2907 		 * (Otherwise, its page frames may be reallocated, and any
2908 		 * modify bits will be set in the wrong object!)
2909 		 */
2910 		vm_map_entry_delete(map, entry, countp);
2911 		entry = next;
2912 	}
2913 	lwkt_reltoken(&map->token);
2914 	return (KERN_SUCCESS);
2915 }
2916 
2917 /*
2918  * Remove the given address range from the target map.
2919  * This is the exported form of vm_map_delete.
2920  *
2921  * No requirements.
2922  */
2923 int
2924 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2925 {
2926 	int result;
2927 	int count;
2928 
2929 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2930 	vm_map_lock(map);
2931 	VM_MAP_RANGE_CHECK(map, start, end);
2932 	result = vm_map_delete(map, start, end, &count);
2933 	vm_map_unlock(map);
2934 	vm_map_entry_release(count);
2935 
2936 	return (result);
2937 }
2938 
2939 /*
2940  * Assert that the target map allows the specified privilege on the
2941  * entire address region given.  The entire region must be allocated.
2942  *
2943  * The caller must specify whether the vm_map is already locked or not.
2944  */
2945 boolean_t
2946 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2947 			vm_prot_t protection, boolean_t have_lock)
2948 {
2949 	vm_map_entry_t entry;
2950 	vm_map_entry_t tmp_entry;
2951 	boolean_t result;
2952 
2953 	if (have_lock == FALSE)
2954 		vm_map_lock_read(map);
2955 
2956 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2957 		if (have_lock == FALSE)
2958 			vm_map_unlock_read(map);
2959 		return (FALSE);
2960 	}
2961 	entry = tmp_entry;
2962 
2963 	result = TRUE;
2964 	while (start < end) {
2965 		if (entry == &map->header) {
2966 			result = FALSE;
2967 			break;
2968 		}
2969 		/*
2970 		 * No holes allowed!
2971 		 */
2972 
2973 		if (start < entry->start) {
2974 			result = FALSE;
2975 			break;
2976 		}
2977 		/*
2978 		 * Check protection associated with entry.
2979 		 */
2980 
2981 		if ((entry->protection & protection) != protection) {
2982 			result = FALSE;
2983 			break;
2984 		}
2985 		/* go to next entry */
2986 
2987 		start = entry->end;
2988 		entry = entry->next;
2989 	}
2990 	if (have_lock == FALSE)
2991 		vm_map_unlock_read(map);
2992 	return (result);
2993 }
2994 
2995 /*
2996  * If appropriate this function shadows the original object with a new object
2997  * and moves the VM pages from the original object to the new object.
2998  * The original object will also be collapsed, if possible.
2999  *
3000  * We can only do this for normal memory objects with a single mapping, and
3001  * it only makes sense to do it if there are 2 or more refs on the original
3002  * object.  i.e. typically a memory object that has been extended into
3003  * multiple vm_map_entry's with non-overlapping ranges.
3004  *
3005  * This makes it easier to remove unused pages and keeps object inheritance
3006  * from being a negative impact on memory usage.
3007  *
3008  * On return the (possibly new) entry->object.vm_object will have an
3009  * additional ref on it for the caller to dispose of (usually by cloning
3010  * the vm_map_entry).  The additional ref had to be done in this routine
3011  * to avoid racing a collapse.  The object's ONEMAPPING flag will also be
3012  * cleared.
3013  *
3014  * The vm_map must be locked and its token held.
3015  */
3016 static void
3017 vm_map_split(vm_map_entry_t entry)
3018 {
3019 	/* OPTIMIZED */
3020 	vm_object_t oobject, nobject, bobject;
3021 	vm_offset_t s, e;
3022 	vm_page_t m;
3023 	vm_pindex_t offidxstart, offidxend, idx;
3024 	vm_size_t size;
3025 	vm_ooffset_t offset;
3026 	int useshadowlist;
3027 
3028 	/*
3029 	 * Optimize away object locks for vnode objects.  Important exit/exec
3030 	 * critical path.
3031 	 *
3032 	 * OBJ_ONEMAPPING doesn't apply to vnode objects but clear the flag
3033 	 * anyway.
3034 	 */
3035 	oobject = entry->object.vm_object;
3036 	if (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) {
3037 		vm_object_reference_quick(oobject);
3038 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3039 		return;
3040 	}
3041 
3042 	/*
3043 	 * Setup.  Chain lock the original object throughout the entire
3044 	 * routine to prevent new page faults from occuring.
3045 	 *
3046 	 * XXX can madvise WILLNEED interfere with us too?
3047 	 */
3048 	vm_object_hold(oobject);
3049 	vm_object_chain_acquire(oobject, 0);
3050 
3051 	/*
3052 	 * Original object cannot be split?  Might have also changed state.
3053 	 */
3054 	if (oobject->handle == NULL || (oobject->type != OBJT_DEFAULT &&
3055 					oobject->type != OBJT_SWAP)) {
3056 		vm_object_chain_release(oobject);
3057 		vm_object_reference_locked(oobject);
3058 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3059 		vm_object_drop(oobject);
3060 		return;
3061 	}
3062 
3063 	/*
3064 	 * Collapse original object with its backing store as an
3065 	 * optimization to reduce chain lengths when possible.
3066 	 *
3067 	 * If ref_count <= 1 there aren't other non-overlapping vm_map_entry's
3068 	 * for oobject, so there's no point collapsing it.
3069 	 *
3070 	 * Then re-check whether the object can be split.
3071 	 */
3072 	vm_object_collapse(oobject, NULL);
3073 
3074 	if (oobject->ref_count <= 1 ||
3075 	    (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) ||
3076 	    (oobject->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) != OBJ_ONEMAPPING) {
3077 		vm_object_chain_release(oobject);
3078 		vm_object_reference_locked(oobject);
3079 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3080 		vm_object_drop(oobject);
3081 		return;
3082 	}
3083 
3084 	/*
3085 	 * Acquire the chain lock on the backing object.
3086 	 *
3087 	 * Give bobject an additional ref count for when it will be shadowed
3088 	 * by nobject.
3089 	 */
3090 	useshadowlist = 0;
3091 	if ((bobject = oobject->backing_object) != NULL) {
3092 		if (bobject->type != OBJT_VNODE) {
3093 			useshadowlist = 1;
3094 			vm_object_hold(bobject);
3095 			vm_object_chain_wait(bobject, 0);
3096 			vm_object_reference_locked(bobject);
3097 			vm_object_chain_acquire(bobject, 0);
3098 			KKASSERT(bobject->backing_object == bobject);
3099 			KKASSERT((bobject->flags & OBJ_DEAD) == 0);
3100 		} else {
3101 			vm_object_reference_quick(bobject);
3102 		}
3103 	}
3104 
3105 	/*
3106 	 * Calculate the object page range and allocate the new object.
3107 	 */
3108 	offset = entry->offset;
3109 	s = entry->start;
3110 	e = entry->end;
3111 
3112 	offidxstart = OFF_TO_IDX(offset);
3113 	offidxend = offidxstart + OFF_TO_IDX(e - s);
3114 	size = offidxend - offidxstart;
3115 
3116 	switch(oobject->type) {
3117 	case OBJT_DEFAULT:
3118 		nobject = default_pager_alloc(NULL, IDX_TO_OFF(size),
3119 					      VM_PROT_ALL, 0);
3120 		break;
3121 	case OBJT_SWAP:
3122 		nobject = swap_pager_alloc(NULL, IDX_TO_OFF(size),
3123 					   VM_PROT_ALL, 0);
3124 		break;
3125 	default:
3126 		/* not reached */
3127 		nobject = NULL;
3128 		KKASSERT(0);
3129 	}
3130 
3131 	if (nobject == NULL) {
3132 		if (bobject) {
3133 			if (useshadowlist) {
3134 				vm_object_chain_release(bobject);
3135 				vm_object_deallocate(bobject);
3136 				vm_object_drop(bobject);
3137 			} else {
3138 				vm_object_deallocate(bobject);
3139 			}
3140 		}
3141 		vm_object_chain_release(oobject);
3142 		vm_object_reference_locked(oobject);
3143 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3144 		vm_object_drop(oobject);
3145 		return;
3146 	}
3147 
3148 	/*
3149 	 * The new object will replace entry->object.vm_object so it needs
3150 	 * a second reference (the caller expects an additional ref).
3151 	 */
3152 	vm_object_hold(nobject);
3153 	vm_object_reference_locked(nobject);
3154 	vm_object_chain_acquire(nobject, 0);
3155 
3156 	/*
3157 	 * nobject shadows bobject (oobject already shadows bobject).
3158 	 */
3159 	if (bobject) {
3160 		nobject->backing_object_offset =
3161 		    oobject->backing_object_offset + IDX_TO_OFF(offidxstart);
3162 		nobject->backing_object = bobject;
3163 		if (useshadowlist) {
3164 			bobject->shadow_count++;
3165 			bobject->generation++;
3166 			LIST_INSERT_HEAD(&bobject->shadow_head,
3167 					 nobject, shadow_list);
3168 			vm_object_clear_flag(bobject, OBJ_ONEMAPPING); /*XXX*/
3169 			vm_object_chain_release(bobject);
3170 			vm_object_drop(bobject);
3171 			vm_object_set_flag(nobject, OBJ_ONSHADOW);
3172 		}
3173 	}
3174 
3175 	/*
3176 	 * Move the VM pages from oobject to nobject
3177 	 */
3178 	for (idx = 0; idx < size; idx++) {
3179 		vm_page_t m;
3180 
3181 		m = vm_page_lookup_busy_wait(oobject, offidxstart + idx,
3182 					     TRUE, "vmpg");
3183 		if (m == NULL)
3184 			continue;
3185 
3186 		/*
3187 		 * We must wait for pending I/O to complete before we can
3188 		 * rename the page.
3189 		 *
3190 		 * We do not have to VM_PROT_NONE the page as mappings should
3191 		 * not be changed by this operation.
3192 		 *
3193 		 * NOTE: The act of renaming a page updates chaingen for both
3194 		 *	 objects.
3195 		 */
3196 		vm_page_rename(m, nobject, idx);
3197 		/* page automatically made dirty by rename and cache handled */
3198 		/* page remains busy */
3199 	}
3200 
3201 	if (oobject->type == OBJT_SWAP) {
3202 		vm_object_pip_add(oobject, 1);
3203 		/*
3204 		 * copy oobject pages into nobject and destroy unneeded
3205 		 * pages in shadow object.
3206 		 */
3207 		swap_pager_copy(oobject, nobject, offidxstart, 0);
3208 		vm_object_pip_wakeup(oobject);
3209 	}
3210 
3211 	/*
3212 	 * Wakeup the pages we played with.  No spl protection is needed
3213 	 * for a simple wakeup.
3214 	 */
3215 	for (idx = 0; idx < size; idx++) {
3216 		m = vm_page_lookup(nobject, idx);
3217 		if (m) {
3218 			KKASSERT(m->flags & PG_BUSY);
3219 			vm_page_wakeup(m);
3220 		}
3221 	}
3222 	entry->object.vm_object = nobject;
3223 	entry->offset = 0LL;
3224 
3225 	/*
3226 	 * Cleanup
3227 	 *
3228 	 * NOTE: There is no need to remove OBJ_ONEMAPPING from oobject, the
3229 	 *	 related pages were moved and are no longer applicable to the
3230 	 *	 original object.
3231 	 *
3232 	 * NOTE: Deallocate oobject (due to its entry->object.vm_object being
3233 	 *	 replaced by nobject).
3234 	 */
3235 	vm_object_chain_release(nobject);
3236 	vm_object_drop(nobject);
3237 	if (bobject && useshadowlist) {
3238 		vm_object_chain_release(bobject);
3239 		vm_object_drop(bobject);
3240 	}
3241 	vm_object_chain_release(oobject);
3242 	/*vm_object_clear_flag(oobject, OBJ_ONEMAPPING);*/
3243 	vm_object_deallocate_locked(oobject);
3244 	vm_object_drop(oobject);
3245 }
3246 
3247 /*
3248  * Copies the contents of the source entry to the destination
3249  * entry.  The entries *must* be aligned properly.
3250  *
3251  * The vm_maps must be exclusively locked.
3252  * The vm_map's token must be held.
3253  *
3254  * Because the maps are locked no faults can be in progress during the
3255  * operation.
3256  */
3257 static void
3258 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
3259 		  vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
3260 {
3261 	vm_object_t src_object;
3262 
3263 	if (dst_entry->maptype == VM_MAPTYPE_SUBMAP)
3264 		return;
3265 	if (src_entry->maptype == VM_MAPTYPE_SUBMAP)
3266 		return;
3267 
3268 	if (src_entry->wired_count == 0) {
3269 		/*
3270 		 * If the source entry is marked needs_copy, it is already
3271 		 * write-protected.
3272 		 */
3273 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
3274 			pmap_protect(src_map->pmap,
3275 			    src_entry->start,
3276 			    src_entry->end,
3277 			    src_entry->protection & ~VM_PROT_WRITE);
3278 		}
3279 
3280 		/*
3281 		 * Make a copy of the object.
3282 		 *
3283 		 * The object must be locked prior to checking the object type
3284 		 * and for the call to vm_object_collapse() and vm_map_split().
3285 		 * We cannot use *_hold() here because the split code will
3286 		 * probably try to destroy the object.  The lock is a pool
3287 		 * token and doesn't care.
3288 		 *
3289 		 * We must bump src_map->timestamp when setting
3290 		 * MAP_ENTRY_NEEDS_COPY to force any concurrent fault
3291 		 * to retry, otherwise the concurrent fault might improperly
3292 		 * install a RW pte when its supposed to be a RO(COW) pte.
3293 		 * This race can occur because a vnode-backed fault may have
3294 		 * to temporarily release the map lock.
3295 		 */
3296 		if (src_entry->object.vm_object != NULL) {
3297 			vm_map_split(src_entry);
3298 			src_object = src_entry->object.vm_object;
3299 			dst_entry->object.vm_object = src_object;
3300 			src_entry->eflags |= (MAP_ENTRY_COW |
3301 					      MAP_ENTRY_NEEDS_COPY);
3302 			dst_entry->eflags |= (MAP_ENTRY_COW |
3303 					      MAP_ENTRY_NEEDS_COPY);
3304 			dst_entry->offset = src_entry->offset;
3305 			++src_map->timestamp;
3306 		} else {
3307 			dst_entry->object.vm_object = NULL;
3308 			dst_entry->offset = 0;
3309 		}
3310 
3311 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3312 		    dst_entry->end - dst_entry->start, src_entry->start);
3313 	} else {
3314 		/*
3315 		 * Of course, wired down pages can't be set copy-on-write.
3316 		 * Cause wired pages to be copied into the new map by
3317 		 * simulating faults (the new pages are pageable)
3318 		 */
3319 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3320 	}
3321 }
3322 
3323 /*
3324  * vmspace_fork:
3325  * Create a new process vmspace structure and vm_map
3326  * based on those of an existing process.  The new map
3327  * is based on the old map, according to the inheritance
3328  * values on the regions in that map.
3329  *
3330  * The source map must not be locked.
3331  * No requirements.
3332  */
3333 struct vmspace *
3334 vmspace_fork(struct vmspace *vm1)
3335 {
3336 	struct vmspace *vm2;
3337 	vm_map_t old_map = &vm1->vm_map;
3338 	vm_map_t new_map;
3339 	vm_map_entry_t old_entry;
3340 	vm_map_entry_t new_entry;
3341 	vm_object_t object;
3342 	int count;
3343 
3344 	lwkt_gettoken(&vm1->vm_map.token);
3345 	vm_map_lock(old_map);
3346 
3347 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3348 	lwkt_gettoken(&vm2->vm_map.token);
3349 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3350 	    (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3351 	new_map = &vm2->vm_map;	/* XXX */
3352 	new_map->timestamp = 1;
3353 
3354 	vm_map_lock(new_map);
3355 
3356 	count = 0;
3357 	old_entry = old_map->header.next;
3358 	while (old_entry != &old_map->header) {
3359 		++count;
3360 		old_entry = old_entry->next;
3361 	}
3362 
3363 	count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3364 
3365 	old_entry = old_map->header.next;
3366 	while (old_entry != &old_map->header) {
3367 		if (old_entry->maptype == VM_MAPTYPE_SUBMAP)
3368 			panic("vm_map_fork: encountered a submap");
3369 
3370 		switch (old_entry->inheritance) {
3371 		case VM_INHERIT_NONE:
3372 			break;
3373 		case VM_INHERIT_SHARE:
3374 			/*
3375 			 * Clone the entry, creating the shared object if
3376 			 * necessary.
3377 			 */
3378 			if (old_entry->object.vm_object == NULL)
3379 				vm_map_entry_allocate_object(old_entry);
3380 
3381 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3382 				/*
3383 				 * Shadow a map_entry which needs a copy,
3384 				 * replacing its object with a new object
3385 				 * that points to the old one.  Ask the
3386 				 * shadow code to automatically add an
3387 				 * additional ref.  We can't do it afterwords
3388 				 * because we might race a collapse.  The call
3389 				 * to vm_map_entry_shadow() will also clear
3390 				 * OBJ_ONEMAPPING.
3391 				 */
3392 				vm_map_entry_shadow(old_entry, 1);
3393 			} else if (old_entry->object.vm_object) {
3394 				/*
3395 				 * We will make a shared copy of the object,
3396 				 * and must clear OBJ_ONEMAPPING.
3397 				 *
3398 				 * Optimize vnode objects.  OBJ_ONEMAPPING
3399 				 * is non-applicable but clear it anyway,
3400 				 * and its terminal so we don'th ave to deal
3401 				 * with chains.  Reduces SMP conflicts.
3402 				 *
3403 				 * XXX assert that object.vm_object != NULL
3404 				 *     since we allocate it above.
3405 				 */
3406 				object = old_entry->object.vm_object;
3407 				if (object->type == OBJT_VNODE) {
3408 					vm_object_reference_quick(object);
3409 					vm_object_clear_flag(object,
3410 							     OBJ_ONEMAPPING);
3411 				} else {
3412 					vm_object_hold(object);
3413 					vm_object_chain_wait(object, 0);
3414 					vm_object_reference_locked(object);
3415 					vm_object_clear_flag(object,
3416 							     OBJ_ONEMAPPING);
3417 					vm_object_drop(object);
3418 				}
3419 			}
3420 
3421 			/*
3422 			 * Clone the entry.  We've already bumped the ref on
3423 			 * any vm_object.
3424 			 */
3425 			new_entry = vm_map_entry_create(new_map, &count);
3426 			*new_entry = *old_entry;
3427 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3428 			new_entry->wired_count = 0;
3429 
3430 			/*
3431 			 * Insert the entry into the new map -- we know we're
3432 			 * inserting at the end of the new map.
3433 			 */
3434 
3435 			vm_map_entry_link(new_map, new_map->header.prev,
3436 					  new_entry);
3437 
3438 			/*
3439 			 * Update the physical map
3440 			 */
3441 			pmap_copy(new_map->pmap, old_map->pmap,
3442 				  new_entry->start,
3443 				  (old_entry->end - old_entry->start),
3444 				  old_entry->start);
3445 			break;
3446 		case VM_INHERIT_COPY:
3447 			/*
3448 			 * Clone the entry and link into the map.
3449 			 */
3450 			new_entry = vm_map_entry_create(new_map, &count);
3451 			*new_entry = *old_entry;
3452 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3453 			new_entry->wired_count = 0;
3454 			new_entry->object.vm_object = NULL;
3455 			vm_map_entry_link(new_map, new_map->header.prev,
3456 					  new_entry);
3457 			vm_map_copy_entry(old_map, new_map, old_entry,
3458 					  new_entry);
3459 			break;
3460 		}
3461 		old_entry = old_entry->next;
3462 	}
3463 
3464 	new_map->size = old_map->size;
3465 	vm_map_unlock(old_map);
3466 	vm_map_unlock(new_map);
3467 	vm_map_entry_release(count);
3468 
3469 	lwkt_reltoken(&vm2->vm_map.token);
3470 	lwkt_reltoken(&vm1->vm_map.token);
3471 
3472 	return (vm2);
3473 }
3474 
3475 /*
3476  * Create an auto-grow stack entry
3477  *
3478  * No requirements.
3479  */
3480 int
3481 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3482 	      int flags, vm_prot_t prot, vm_prot_t max, int cow)
3483 {
3484 	vm_map_entry_t	prev_entry;
3485 	vm_map_entry_t	new_stack_entry;
3486 	vm_size_t	init_ssize;
3487 	int		rv;
3488 	int		count;
3489 	vm_offset_t	tmpaddr;
3490 
3491 	cow |= MAP_IS_STACK;
3492 
3493 	if (max_ssize < sgrowsiz)
3494 		init_ssize = max_ssize;
3495 	else
3496 		init_ssize = sgrowsiz;
3497 
3498 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3499 	vm_map_lock(map);
3500 
3501 	/*
3502 	 * Find space for the mapping
3503 	 */
3504 	if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3505 		if (vm_map_findspace(map, addrbos, max_ssize, 1,
3506 				     flags, &tmpaddr)) {
3507 			vm_map_unlock(map);
3508 			vm_map_entry_release(count);
3509 			return (KERN_NO_SPACE);
3510 		}
3511 		addrbos = tmpaddr;
3512 	}
3513 
3514 	/* If addr is already mapped, no go */
3515 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3516 		vm_map_unlock(map);
3517 		vm_map_entry_release(count);
3518 		return (KERN_NO_SPACE);
3519 	}
3520 
3521 #if 0
3522 	/* XXX already handled by kern_mmap() */
3523 	/* If we would blow our VMEM resource limit, no go */
3524 	if (map->size + init_ssize >
3525 	    curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3526 		vm_map_unlock(map);
3527 		vm_map_entry_release(count);
3528 		return (KERN_NO_SPACE);
3529 	}
3530 #endif
3531 
3532 	/*
3533 	 * If we can't accomodate max_ssize in the current mapping,
3534 	 * no go.  However, we need to be aware that subsequent user
3535 	 * mappings might map into the space we have reserved for
3536 	 * stack, and currently this space is not protected.
3537 	 *
3538 	 * Hopefully we will at least detect this condition
3539 	 * when we try to grow the stack.
3540 	 */
3541 	if ((prev_entry->next != &map->header) &&
3542 	    (prev_entry->next->start < addrbos + max_ssize)) {
3543 		vm_map_unlock(map);
3544 		vm_map_entry_release(count);
3545 		return (KERN_NO_SPACE);
3546 	}
3547 
3548 	/*
3549 	 * We initially map a stack of only init_ssize.  We will
3550 	 * grow as needed later.  Since this is to be a grow
3551 	 * down stack, we map at the top of the range.
3552 	 *
3553 	 * Note: we would normally expect prot and max to be
3554 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
3555 	 * eliminate these as input parameters, and just
3556 	 * pass these values here in the insert call.
3557 	 */
3558 	rv = vm_map_insert(map, &count,
3559 			   NULL, 0, addrbos + max_ssize - init_ssize,
3560 	                   addrbos + max_ssize,
3561 			   VM_MAPTYPE_NORMAL,
3562 			   prot, max,
3563 			   cow);
3564 
3565 	/* Now set the avail_ssize amount */
3566 	if (rv == KERN_SUCCESS) {
3567 		if (prev_entry != &map->header)
3568 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
3569 		new_stack_entry = prev_entry->next;
3570 		if (new_stack_entry->end   != addrbos + max_ssize ||
3571 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
3572 			panic ("Bad entry start/end for new stack entry");
3573 		else
3574 			new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
3575 	}
3576 
3577 	vm_map_unlock(map);
3578 	vm_map_entry_release(count);
3579 	return (rv);
3580 }
3581 
3582 /*
3583  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3584  * desired address is already mapped, or if we successfully grow
3585  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3586  * stack range (this is strange, but preserves compatibility with
3587  * the grow function in vm_machdep.c).
3588  *
3589  * No requirements.
3590  */
3591 int
3592 vm_map_growstack (struct proc *p, vm_offset_t addr)
3593 {
3594 	vm_map_entry_t prev_entry;
3595 	vm_map_entry_t stack_entry;
3596 	vm_map_entry_t new_stack_entry;
3597 	struct vmspace *vm = p->p_vmspace;
3598 	vm_map_t map = &vm->vm_map;
3599 	vm_offset_t    end;
3600 	int grow_amount;
3601 	int rv = KERN_SUCCESS;
3602 	int is_procstack;
3603 	int use_read_lock = 1;
3604 	int count;
3605 
3606 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3607 Retry:
3608 	if (use_read_lock)
3609 		vm_map_lock_read(map);
3610 	else
3611 		vm_map_lock(map);
3612 
3613 	/* If addr is already in the entry range, no need to grow.*/
3614 	if (vm_map_lookup_entry(map, addr, &prev_entry))
3615 		goto done;
3616 
3617 	if ((stack_entry = prev_entry->next) == &map->header)
3618 		goto done;
3619 	if (prev_entry == &map->header)
3620 		end = stack_entry->start - stack_entry->aux.avail_ssize;
3621 	else
3622 		end = prev_entry->end;
3623 
3624 	/*
3625 	 * This next test mimics the old grow function in vm_machdep.c.
3626 	 * It really doesn't quite make sense, but we do it anyway
3627 	 * for compatibility.
3628 	 *
3629 	 * If not growable stack, return success.  This signals the
3630 	 * caller to proceed as he would normally with normal vm.
3631 	 */
3632 	if (stack_entry->aux.avail_ssize < 1 ||
3633 	    addr >= stack_entry->start ||
3634 	    addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3635 		goto done;
3636 	}
3637 
3638 	/* Find the minimum grow amount */
3639 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3640 	if (grow_amount > stack_entry->aux.avail_ssize) {
3641 		rv = KERN_NO_SPACE;
3642 		goto done;
3643 	}
3644 
3645 	/*
3646 	 * If there is no longer enough space between the entries
3647 	 * nogo, and adjust the available space.  Note: this
3648 	 * should only happen if the user has mapped into the
3649 	 * stack area after the stack was created, and is
3650 	 * probably an error.
3651 	 *
3652 	 * This also effectively destroys any guard page the user
3653 	 * might have intended by limiting the stack size.
3654 	 */
3655 	if (grow_amount > stack_entry->start - end) {
3656 		if (use_read_lock && vm_map_lock_upgrade(map)) {
3657 			/* lost lock */
3658 			use_read_lock = 0;
3659 			goto Retry;
3660 		}
3661 		use_read_lock = 0;
3662 		stack_entry->aux.avail_ssize = stack_entry->start - end;
3663 		rv = KERN_NO_SPACE;
3664 		goto done;
3665 	}
3666 
3667 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3668 
3669 	/* If this is the main process stack, see if we're over the
3670 	 * stack limit.
3671 	 */
3672 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3673 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3674 		rv = KERN_NO_SPACE;
3675 		goto done;
3676 	}
3677 
3678 	/* Round up the grow amount modulo SGROWSIZ */
3679 	grow_amount = roundup (grow_amount, sgrowsiz);
3680 	if (grow_amount > stack_entry->aux.avail_ssize) {
3681 		grow_amount = stack_entry->aux.avail_ssize;
3682 	}
3683 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3684 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3685 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3686 		              ctob(vm->vm_ssize);
3687 	}
3688 
3689 	/* If we would blow our VMEM resource limit, no go */
3690 	if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3691 		rv = KERN_NO_SPACE;
3692 		goto done;
3693 	}
3694 
3695 	if (use_read_lock && vm_map_lock_upgrade(map)) {
3696 		/* lost lock */
3697 		use_read_lock = 0;
3698 		goto Retry;
3699 	}
3700 	use_read_lock = 0;
3701 
3702 	/* Get the preliminary new entry start value */
3703 	addr = stack_entry->start - grow_amount;
3704 
3705 	/* If this puts us into the previous entry, cut back our growth
3706 	 * to the available space.  Also, see the note above.
3707 	 */
3708 	if (addr < end) {
3709 		stack_entry->aux.avail_ssize = stack_entry->start - end;
3710 		addr = end;
3711 	}
3712 
3713 	rv = vm_map_insert(map, &count,
3714 			   NULL, 0, addr, stack_entry->start,
3715 			   VM_MAPTYPE_NORMAL,
3716 			   VM_PROT_ALL, VM_PROT_ALL,
3717 			   0);
3718 
3719 	/* Adjust the available stack space by the amount we grew. */
3720 	if (rv == KERN_SUCCESS) {
3721 		if (prev_entry != &map->header)
3722 			vm_map_clip_end(map, prev_entry, addr, &count);
3723 		new_stack_entry = prev_entry->next;
3724 		if (new_stack_entry->end   != stack_entry->start  ||
3725 		    new_stack_entry->start != addr)
3726 			panic ("Bad stack grow start/end in new stack entry");
3727 		else {
3728 			new_stack_entry->aux.avail_ssize =
3729 				stack_entry->aux.avail_ssize -
3730 				(new_stack_entry->end - new_stack_entry->start);
3731 			if (is_procstack)
3732 				vm->vm_ssize += btoc(new_stack_entry->end -
3733 						     new_stack_entry->start);
3734 		}
3735 
3736 		if (map->flags & MAP_WIREFUTURE)
3737 			vm_map_unwire(map, new_stack_entry->start,
3738 				      new_stack_entry->end, FALSE);
3739 	}
3740 
3741 done:
3742 	if (use_read_lock)
3743 		vm_map_unlock_read(map);
3744 	else
3745 		vm_map_unlock(map);
3746 	vm_map_entry_release(count);
3747 	return (rv);
3748 }
3749 
3750 /*
3751  * Unshare the specified VM space for exec.  If other processes are
3752  * mapped to it, then create a new one.  The new vmspace is null.
3753  *
3754  * No requirements.
3755  */
3756 void
3757 vmspace_exec(struct proc *p, struct vmspace *vmcopy)
3758 {
3759 	struct vmspace *oldvmspace = p->p_vmspace;
3760 	struct vmspace *newvmspace;
3761 	vm_map_t map = &p->p_vmspace->vm_map;
3762 
3763 	/*
3764 	 * If we are execing a resident vmspace we fork it, otherwise
3765 	 * we create a new vmspace.  Note that exitingcnt is not
3766 	 * copied to the new vmspace.
3767 	 */
3768 	lwkt_gettoken(&oldvmspace->vm_map.token);
3769 	if (vmcopy)  {
3770 		newvmspace = vmspace_fork(vmcopy);
3771 		lwkt_gettoken(&newvmspace->vm_map.token);
3772 	} else {
3773 		newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3774 		lwkt_gettoken(&newvmspace->vm_map.token);
3775 		bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3776 		      (caddr_t)&oldvmspace->vm_endcopy -
3777 		       (caddr_t)&oldvmspace->vm_startcopy);
3778 	}
3779 
3780 	/*
3781 	 * Finish initializing the vmspace before assigning it
3782 	 * to the process.  The vmspace will become the current vmspace
3783 	 * if p == curproc.
3784 	 */
3785 	pmap_pinit2(vmspace_pmap(newvmspace));
3786 	pmap_replacevm(p, newvmspace, 0);
3787 	lwkt_reltoken(&newvmspace->vm_map.token);
3788 	lwkt_reltoken(&oldvmspace->vm_map.token);
3789 	vmspace_rel(oldvmspace);
3790 }
3791 
3792 /*
3793  * Unshare the specified VM space for forcing COW.  This
3794  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3795  */
3796 void
3797 vmspace_unshare(struct proc *p)
3798 {
3799 	struct vmspace *oldvmspace = p->p_vmspace;
3800 	struct vmspace *newvmspace;
3801 
3802 	lwkt_gettoken(&oldvmspace->vm_map.token);
3803 	if (vmspace_getrefs(oldvmspace) == 1) {
3804 		lwkt_reltoken(&oldvmspace->vm_map.token);
3805 		return;
3806 	}
3807 	newvmspace = vmspace_fork(oldvmspace);
3808 	lwkt_gettoken(&newvmspace->vm_map.token);
3809 	pmap_pinit2(vmspace_pmap(newvmspace));
3810 	pmap_replacevm(p, newvmspace, 0);
3811 	lwkt_reltoken(&newvmspace->vm_map.token);
3812 	lwkt_reltoken(&oldvmspace->vm_map.token);
3813 	vmspace_rel(oldvmspace);
3814 }
3815 
3816 /*
3817  * vm_map_hint: return the beginning of the best area suitable for
3818  * creating a new mapping with "prot" protection.
3819  *
3820  * No requirements.
3821  */
3822 vm_offset_t
3823 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
3824 {
3825 	struct vmspace *vms = p->p_vmspace;
3826 
3827 	if (!randomize_mmap || addr != 0) {
3828 		/*
3829 		 * Set a reasonable start point for the hint if it was
3830 		 * not specified or if it falls within the heap space.
3831 		 * Hinted mmap()s do not allocate out of the heap space.
3832 		 */
3833 		if (addr == 0 ||
3834 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
3835 		     addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) {
3836 			addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
3837 		}
3838 
3839 		return addr;
3840 	}
3841 
3842 #ifdef notyet
3843 #ifdef __i386__
3844 	/*
3845 	 * If executable skip first two pages, otherwise start
3846 	 * after data + heap region.
3847 	 */
3848 	if ((prot & VM_PROT_EXECUTE) &&
3849 	    ((vm_offset_t)vms->vm_daddr >= I386_MAX_EXE_ADDR)) {
3850 		addr = (PAGE_SIZE * 2) +
3851 		    (karc4random() & (I386_MAX_EXE_ADDR / 2 - 1));
3852 		return (round_page(addr));
3853 	}
3854 #endif /* __i386__ */
3855 #endif /* notyet */
3856 
3857 	addr = (vm_offset_t)vms->vm_daddr + MAXDSIZ;
3858 	addr += karc4random() & (MIN((256 * 1024 * 1024), MAXDSIZ) - 1);
3859 
3860 	return (round_page(addr));
3861 }
3862 
3863 /*
3864  * Finds the VM object, offset, and protection for a given virtual address
3865  * in the specified map, assuming a page fault of the type specified.
3866  *
3867  * Leaves the map in question locked for read; return values are guaranteed
3868  * until a vm_map_lookup_done call is performed.  Note that the map argument
3869  * is in/out; the returned map must be used in the call to vm_map_lookup_done.
3870  *
3871  * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
3872  * that fast.
3873  *
3874  * If a lookup is requested with "write protection" specified, the map may
3875  * be changed to perform virtual copying operations, although the data
3876  * referenced will remain the same.
3877  *
3878  * No requirements.
3879  */
3880 int
3881 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3882 	      vm_offset_t vaddr,
3883 	      vm_prot_t fault_typea,
3884 	      vm_map_entry_t *out_entry,	/* OUT */
3885 	      vm_object_t *object,		/* OUT */
3886 	      vm_pindex_t *pindex,		/* OUT */
3887 	      vm_prot_t *out_prot,		/* OUT */
3888 	      boolean_t *wired)			/* OUT */
3889 {
3890 	vm_map_entry_t entry;
3891 	vm_map_t map = *var_map;
3892 	vm_prot_t prot;
3893 	vm_prot_t fault_type = fault_typea;
3894 	int use_read_lock = 1;
3895 	int rv = KERN_SUCCESS;
3896 
3897 RetryLookup:
3898 	if (use_read_lock)
3899 		vm_map_lock_read(map);
3900 	else
3901 		vm_map_lock(map);
3902 
3903 	/*
3904 	 * If the map has an interesting hint, try it before calling full
3905 	 * blown lookup routine.
3906 	 */
3907 	entry = map->hint;
3908 	cpu_ccfence();
3909 	*out_entry = entry;
3910 	*object = NULL;
3911 
3912 	if ((entry == &map->header) ||
3913 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
3914 		vm_map_entry_t tmp_entry;
3915 
3916 		/*
3917 		 * Entry was either not a valid hint, or the vaddr was not
3918 		 * contained in the entry, so do a full lookup.
3919 		 */
3920 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
3921 			rv = KERN_INVALID_ADDRESS;
3922 			goto done;
3923 		}
3924 
3925 		entry = tmp_entry;
3926 		*out_entry = entry;
3927 	}
3928 
3929 	/*
3930 	 * Handle submaps.
3931 	 */
3932 	if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3933 		vm_map_t old_map = map;
3934 
3935 		*var_map = map = entry->object.sub_map;
3936 		if (use_read_lock)
3937 			vm_map_unlock_read(old_map);
3938 		else
3939 			vm_map_unlock(old_map);
3940 		use_read_lock = 1;
3941 		goto RetryLookup;
3942 	}
3943 
3944 	/*
3945 	 * Check whether this task is allowed to have this page.
3946 	 * Note the special case for MAP_ENTRY_COW
3947 	 * pages with an override.  This is to implement a forced
3948 	 * COW for debuggers.
3949 	 */
3950 
3951 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
3952 		prot = entry->max_protection;
3953 	else
3954 		prot = entry->protection;
3955 
3956 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3957 	if ((fault_type & prot) != fault_type) {
3958 		rv = KERN_PROTECTION_FAILURE;
3959 		goto done;
3960 	}
3961 
3962 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3963 	    (entry->eflags & MAP_ENTRY_COW) &&
3964 	    (fault_type & VM_PROT_WRITE) &&
3965 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3966 		rv = KERN_PROTECTION_FAILURE;
3967 		goto done;
3968 	}
3969 
3970 	/*
3971 	 * If this page is not pageable, we have to get it for all possible
3972 	 * accesses.
3973 	 */
3974 	*wired = (entry->wired_count != 0);
3975 	if (*wired)
3976 		prot = fault_type = entry->protection;
3977 
3978 	/*
3979 	 * Virtual page tables may need to update the accessed (A) bit
3980 	 * in a page table entry.  Upgrade the fault to a write fault for
3981 	 * that case if the map will support it.  If the map does not support
3982 	 * it the page table entry simply will not be updated.
3983 	 */
3984 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
3985 		if (prot & VM_PROT_WRITE)
3986 			fault_type |= VM_PROT_WRITE;
3987 	}
3988 
3989 	if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
3990 	    pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
3991 		if ((prot & VM_PROT_WRITE) == 0)
3992 			fault_type |= VM_PROT_WRITE;
3993 	}
3994 
3995 	/*
3996 	 * If the entry was copy-on-write, we either ...
3997 	 */
3998 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3999 		/*
4000 		 * If we want to write the page, we may as well handle that
4001 		 * now since we've got the map locked.
4002 		 *
4003 		 * If we don't need to write the page, we just demote the
4004 		 * permissions allowed.
4005 		 */
4006 
4007 		if (fault_type & VM_PROT_WRITE) {
4008 			/*
4009 			 * Make a new object, and place it in the object
4010 			 * chain.  Note that no new references have appeared
4011 			 * -- one just moved from the map to the new
4012 			 * object.
4013 			 */
4014 
4015 			if (use_read_lock && vm_map_lock_upgrade(map)) {
4016 				/* lost lock */
4017 				use_read_lock = 0;
4018 				goto RetryLookup;
4019 			}
4020 			use_read_lock = 0;
4021 
4022 			vm_map_entry_shadow(entry, 0);
4023 		} else {
4024 			/*
4025 			 * We're attempting to read a copy-on-write page --
4026 			 * don't allow writes.
4027 			 */
4028 
4029 			prot &= ~VM_PROT_WRITE;
4030 		}
4031 	}
4032 
4033 	/*
4034 	 * Create an object if necessary.
4035 	 */
4036 	if (entry->object.vm_object == NULL && !map->system_map) {
4037 		if (use_read_lock && vm_map_lock_upgrade(map))  {
4038 			/* lost lock */
4039 			use_read_lock = 0;
4040 			goto RetryLookup;
4041 		}
4042 		use_read_lock = 0;
4043 		vm_map_entry_allocate_object(entry);
4044 	}
4045 
4046 	/*
4047 	 * Return the object/offset from this entry.  If the entry was
4048 	 * copy-on-write or empty, it has been fixed up.
4049 	 */
4050 
4051 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4052 	*object = entry->object.vm_object;
4053 
4054 	/*
4055 	 * Return whether this is the only map sharing this data.  On
4056 	 * success we return with a read lock held on the map.  On failure
4057 	 * we return with the map unlocked.
4058 	 */
4059 	*out_prot = prot;
4060 done:
4061 	if (rv == KERN_SUCCESS) {
4062 		if (use_read_lock == 0)
4063 			vm_map_lock_downgrade(map);
4064 	} else if (use_read_lock) {
4065 		vm_map_unlock_read(map);
4066 	} else {
4067 		vm_map_unlock(map);
4068 	}
4069 	return (rv);
4070 }
4071 
4072 /*
4073  * Releases locks acquired by a vm_map_lookup()
4074  * (according to the handle returned by that lookup).
4075  *
4076  * No other requirements.
4077  */
4078 void
4079 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
4080 {
4081 	/*
4082 	 * Unlock the main-level map
4083 	 */
4084 	vm_map_unlock_read(map);
4085 	if (count)
4086 		vm_map_entry_release(count);
4087 }
4088 
4089 #include "opt_ddb.h"
4090 #ifdef DDB
4091 #include <sys/kernel.h>
4092 
4093 #include <ddb/ddb.h>
4094 
4095 /*
4096  * Debugging only
4097  */
4098 DB_SHOW_COMMAND(map, vm_map_print)
4099 {
4100 	static int nlines;
4101 	/* XXX convert args. */
4102 	vm_map_t map = (vm_map_t)addr;
4103 	boolean_t full = have_addr;
4104 
4105 	vm_map_entry_t entry;
4106 
4107 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4108 	    (void *)map,
4109 	    (void *)map->pmap, map->nentries, map->timestamp);
4110 	nlines++;
4111 
4112 	if (!full && db_indent)
4113 		return;
4114 
4115 	db_indent += 2;
4116 	for (entry = map->header.next; entry != &map->header;
4117 	    entry = entry->next) {
4118 		db_iprintf("map entry %p: start=%p, end=%p\n",
4119 		    (void *)entry, (void *)entry->start, (void *)entry->end);
4120 		nlines++;
4121 		{
4122 			static char *inheritance_name[4] =
4123 			{"share", "copy", "none", "donate_copy"};
4124 
4125 			db_iprintf(" prot=%x/%x/%s",
4126 			    entry->protection,
4127 			    entry->max_protection,
4128 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4129 			if (entry->wired_count != 0)
4130 				db_printf(", wired");
4131 		}
4132 		if (entry->maptype == VM_MAPTYPE_SUBMAP) {
4133 			/* XXX no %qd in kernel.  Truncate entry->offset. */
4134 			db_printf(", share=%p, offset=0x%lx\n",
4135 			    (void *)entry->object.sub_map,
4136 			    (long)entry->offset);
4137 			nlines++;
4138 			if ((entry->prev == &map->header) ||
4139 			    (entry->prev->object.sub_map !=
4140 				entry->object.sub_map)) {
4141 				db_indent += 2;
4142 				vm_map_print((db_expr_t)(intptr_t)
4143 					     entry->object.sub_map,
4144 					     full, 0, NULL);
4145 				db_indent -= 2;
4146 			}
4147 		} else {
4148 			/* XXX no %qd in kernel.  Truncate entry->offset. */
4149 			db_printf(", object=%p, offset=0x%lx",
4150 			    (void *)entry->object.vm_object,
4151 			    (long)entry->offset);
4152 			if (entry->eflags & MAP_ENTRY_COW)
4153 				db_printf(", copy (%s)",
4154 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4155 			db_printf("\n");
4156 			nlines++;
4157 
4158 			if ((entry->prev == &map->header) ||
4159 			    (entry->prev->object.vm_object !=
4160 				entry->object.vm_object)) {
4161 				db_indent += 2;
4162 				vm_object_print((db_expr_t)(intptr_t)
4163 						entry->object.vm_object,
4164 						full, 0, NULL);
4165 				nlines += 4;
4166 				db_indent -= 2;
4167 			}
4168 		}
4169 	}
4170 	db_indent -= 2;
4171 	if (db_indent == 0)
4172 		nlines = 0;
4173 }
4174 
4175 /*
4176  * Debugging only
4177  */
4178 DB_SHOW_COMMAND(procvm, procvm)
4179 {
4180 	struct proc *p;
4181 
4182 	if (have_addr) {
4183 		p = (struct proc *) addr;
4184 	} else {
4185 		p = curproc;
4186 	}
4187 
4188 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4189 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4190 	    (void *)vmspace_pmap(p->p_vmspace));
4191 
4192 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
4193 }
4194 
4195 #endif /* DDB */
4196