xref: /dragonfly/sys/vm/vm_map.c (revision e32fb2aa)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 2003-2019 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * This code is derived from software contributed to The DragonFly Project
10  * by Matthew Dillon <dillon@backplane.com>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
39  * All rights reserved.
40  *
41  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/proc.h>
67 #include <sys/serialize.h>
68 #include <sys/lock.h>
69 #include <sys/vmmeter.h>
70 #include <sys/mman.h>
71 #include <sys/vnode.h>
72 #include <sys/resourcevar.h>
73 #include <sys/shm.h>
74 #include <sys/tree.h>
75 #include <sys/malloc.h>
76 #include <sys/objcache.h>
77 #include <sys/kern_syscall.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 #include <vm/swap_pager.h>
89 #include <vm/vm_zone.h>
90 
91 #include <sys/random.h>
92 #include <sys/sysctl.h>
93 #include <sys/spinlock.h>
94 
95 #include <sys/thread2.h>
96 #include <sys/spinlock2.h>
97 
98 /*
99  * Virtual memory maps provide for the mapping, protection, and sharing
100  * of virtual memory objects.  In addition, this module provides for an
101  * efficient virtual copy of memory from one map to another.
102  *
103  * Synchronization is required prior to most operations.
104  *
105  * Maps consist of an ordered doubly-linked list of simple entries.
106  * A hint and a RB tree is used to speed-up lookups.
107  *
108  * Callers looking to modify maps specify start/end addresses which cause
109  * the related map entry to be clipped if necessary, and then later
110  * recombined if the pieces remained compatible.
111  *
112  * Virtual copy operations are performed by copying VM object references
113  * from one map to another, and then marking both regions as copy-on-write.
114  */
115 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags);
116 static void vmspace_dtor(void *obj, void *privdata);
117 static void vmspace_terminate(struct vmspace *vm, int final);
118 
119 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
120 MALLOC_DEFINE(M_MAP_BACKING, "map_backing", "vm_map_backing to entry");
121 static struct objcache *vmspace_cache;
122 
123 /*
124  * per-cpu page table cross mappings are initialized in early boot
125  * and might require a considerable number of vm_map_entry structures.
126  */
127 #define MAPENTRYBSP_CACHE	(MAXCPU+1)
128 #define MAPENTRYAP_CACHE	8
129 
130 /*
131  * Partioning threaded programs with large anonymous memory areas can
132  * improve concurrent fault performance.
133  */
134 #define MAP_ENTRY_PARTITION_SIZE	((vm_offset_t)(32 * 1024 * 1024))
135 #define MAP_ENTRY_PARTITION_MASK	(MAP_ENTRY_PARTITION_SIZE - 1)
136 
137 #define VM_MAP_ENTRY_WITHIN_PARTITION(entry)	\
138 	((((entry)->ba.start ^ (entry)->ba.end) & ~MAP_ENTRY_PARTITION_MASK) == 0)
139 
140 static struct vm_zone mapentzone_store;
141 static vm_zone_t mapentzone;
142 
143 static struct vm_map_entry map_entry_init[MAX_MAPENT];
144 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE];
145 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE];
146 
147 static int randomize_mmap;
148 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
149     "Randomize mmap offsets");
150 static int vm_map_relock_enable = 1;
151 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW,
152 	   &vm_map_relock_enable, 0, "insert pop pgtable optimization");
153 static int vm_map_partition_enable = 1;
154 SYSCTL_INT(_vm, OID_AUTO, map_partition_enable, CTLFLAG_RW,
155 	   &vm_map_partition_enable, 0, "Break up larger vm_map_entry's");
156 static int vm_map_backing_limit = 5;
157 SYSCTL_INT(_vm, OID_AUTO, map_backing_limit, CTLFLAG_RW,
158 	   &vm_map_backing_limit, 0, "ba.backing_ba link depth");
159 static int vm_map_backing_shadow_test = 1;
160 SYSCTL_INT(_vm, OID_AUTO, map_backing_shadow_test, CTLFLAG_RW,
161 	   &vm_map_backing_shadow_test, 0, "ba.object shadow test");
162 
163 static void vmspace_drop_notoken(struct vmspace *vm);
164 static void vm_map_entry_shadow(vm_map_entry_t entry);
165 static vm_map_entry_t vm_map_entry_create(int *);
166 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
167 static void vm_map_entry_dispose_ba (vm_map_backing_t ba);
168 static void vm_map_backing_replicated(vm_map_t map,
169 		vm_map_entry_t entry, int flags);
170 static void vm_map_backing_adjust_start(vm_map_entry_t entry,
171 		vm_ooffset_t start);
172 static void vm_map_backing_adjust_end(vm_map_entry_t entry,
173 		vm_ooffset_t end);
174 static void vm_map_backing_attach (vm_map_backing_t ba);
175 static void vm_map_backing_detach (vm_map_backing_t ba);
176 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
177 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
178 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
179 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
180 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
181 		vm_map_entry_t);
182 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry,
183 		vm_offset_t start, vm_offset_t end, int *countp, int flags);
184 static void vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry,
185 		vm_offset_t vaddr, int *countp);
186 
187 #define MAP_BACK_CLIPPED	0x0001
188 #define MAP_BACK_BASEOBJREFD	0x0002
189 
190 /*
191  * Initialize the vm_map module.  Must be called before any other vm_map
192  * routines.
193  *
194  * Map and entry structures are allocated from the general purpose
195  * memory pool with some exceptions:
196  *
197  *	- The kernel map is allocated statically.
198  *	- Initial kernel map entries are allocated out of a static pool.
199  *	- We must set ZONE_SPECIAL here or the early boot code can get
200  *	  stuck if there are >63 cores.
201  *
202  *	These restrictions are necessary since malloc() uses the
203  *	maps and requires map entries.
204  *
205  * Called from the low level boot code only.
206  */
207 void
208 vm_map_startup(void)
209 {
210 	mapentzone = &mapentzone_store;
211 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
212 		  map_entry_init, MAX_MAPENT);
213 	mapentzone_store.zflags |= ZONE_SPECIAL;
214 }
215 
216 /*
217  * Called prior to any vmspace allocations.
218  *
219  * Called from the low level boot code only.
220  */
221 void
222 vm_init2(void)
223 {
224 	vmspace_cache = objcache_create_mbacked(M_VMSPACE,
225 						sizeof(struct vmspace),
226 						0, ncpus * 4,
227 						vmspace_ctor, vmspace_dtor,
228 						NULL);
229 	zinitna(mapentzone, NULL, 0, 0, ZONE_USE_RESERVE | ZONE_SPECIAL);
230 	pmap_init2();
231 	vm_object_init2();
232 }
233 
234 /*
235  * objcache support.  We leave the pmap root cached as long as possible
236  * for performance reasons.
237  */
238 static
239 boolean_t
240 vmspace_ctor(void *obj, void *privdata, int ocflags)
241 {
242 	struct vmspace *vm = obj;
243 
244 	bzero(vm, sizeof(*vm));
245 	vm->vm_refcnt = VM_REF_DELETED;
246 
247 	return 1;
248 }
249 
250 static
251 void
252 vmspace_dtor(void *obj, void *privdata)
253 {
254 	struct vmspace *vm = obj;
255 
256 	KKASSERT(vm->vm_refcnt == VM_REF_DELETED);
257 	pmap_puninit(vmspace_pmap(vm));
258 }
259 
260 /*
261  * Red black tree functions
262  *
263  * The caller must hold the related map lock.
264  */
265 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
266 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
267 
268 /* a->ba.start is address, and the only field which must be initialized */
269 static int
270 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
271 {
272 	if (a->ba.start < b->ba.start)
273 		return(-1);
274 	else if (a->ba.start > b->ba.start)
275 		return(1);
276 	return(0);
277 }
278 
279 /*
280  * Initialize vmspace ref/hold counts vmspace0.  There is a holdcnt for
281  * every refcnt.
282  */
283 void
284 vmspace_initrefs(struct vmspace *vm)
285 {
286 	vm->vm_refcnt = 1;
287 	vm->vm_holdcnt = 1;
288 }
289 
290 /*
291  * Allocate a vmspace structure, including a vm_map and pmap.
292  * Initialize numerous fields.  While the initial allocation is zerod,
293  * subsequence reuse from the objcache leaves elements of the structure
294  * intact (particularly the pmap), so portions must be zerod.
295  *
296  * Returns a referenced vmspace.
297  *
298  * No requirements.
299  */
300 struct vmspace *
301 vmspace_alloc(vm_offset_t min, vm_offset_t max)
302 {
303 	struct vmspace *vm;
304 
305 	vm = objcache_get(vmspace_cache, M_WAITOK);
306 
307 	bzero(&vm->vm_startcopy,
308 	      (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
309 	vm_map_init(&vm->vm_map, min, max, NULL);	/* initializes token */
310 
311 	/*
312 	 * NOTE: hold to acquires token for safety.
313 	 *
314 	 * On return vmspace is referenced (refs=1, hold=1).  That is,
315 	 * each refcnt also has a holdcnt.  There can be additional holds
316 	 * (holdcnt) above and beyond the refcnt.  Finalization is handled in
317 	 * two stages, one on refs 1->0, and the the second on hold 1->0.
318 	 */
319 	KKASSERT(vm->vm_holdcnt == 0);
320 	KKASSERT(vm->vm_refcnt == VM_REF_DELETED);
321 	vmspace_initrefs(vm);
322 	vmspace_hold(vm);
323 	pmap_pinit(vmspace_pmap(vm));		/* (some fields reused) */
324 	vm->vm_map.pmap = vmspace_pmap(vm);	/* XXX */
325 	vm->vm_shm = NULL;
326 	vm->vm_flags = 0;
327 	cpu_vmspace_alloc(vm);
328 	vmspace_drop(vm);
329 
330 	return (vm);
331 }
332 
333 /*
334  * NOTE: Can return 0 if the vmspace is exiting.
335  */
336 int
337 vmspace_getrefs(struct vmspace *vm)
338 {
339 	int32_t n;
340 
341 	n = vm->vm_refcnt;
342 	cpu_ccfence();
343 	if (n & VM_REF_DELETED)
344 		n = -1;
345 	return n;
346 }
347 
348 void
349 vmspace_hold(struct vmspace *vm)
350 {
351 	atomic_add_int(&vm->vm_holdcnt, 1);
352 	lwkt_gettoken(&vm->vm_map.token);
353 }
354 
355 /*
356  * Drop with final termination interlock.
357  */
358 void
359 vmspace_drop(struct vmspace *vm)
360 {
361 	lwkt_reltoken(&vm->vm_map.token);
362 	vmspace_drop_notoken(vm);
363 }
364 
365 static void
366 vmspace_drop_notoken(struct vmspace *vm)
367 {
368 	if (atomic_fetchadd_int(&vm->vm_holdcnt, -1) == 1) {
369 		if (vm->vm_refcnt & VM_REF_DELETED)
370 			vmspace_terminate(vm, 1);
371 	}
372 }
373 
374 /*
375  * A vmspace object must not be in a terminated state to be able to obtain
376  * additional refs on it.
377  *
378  * These are official references to the vmspace, the count is used to check
379  * for vmspace sharing.  Foreign accessors should use 'hold' and not 'ref'.
380  *
381  * XXX we need to combine hold & ref together into one 64-bit field to allow
382  * holds to prevent stage-1 termination.
383  */
384 void
385 vmspace_ref(struct vmspace *vm)
386 {
387 	uint32_t n;
388 
389 	atomic_add_int(&vm->vm_holdcnt, 1);
390 	n = atomic_fetchadd_int(&vm->vm_refcnt, 1);
391 	KKASSERT((n & VM_REF_DELETED) == 0);
392 }
393 
394 /*
395  * Release a ref on the vmspace.  On the 1->0 transition we do stage-1
396  * termination of the vmspace.  Then, on the final drop of the hold we
397  * will do stage-2 final termination.
398  */
399 void
400 vmspace_rel(struct vmspace *vm)
401 {
402 	uint32_t n;
403 
404 	/*
405 	 * Drop refs.  Each ref also has a hold which is also dropped.
406 	 *
407 	 * When refs hits 0 compete to get the VM_REF_DELETED flag (hold
408 	 * prevent finalization) to start termination processing.
409 	 * Finalization occurs when the last hold count drops to 0.
410 	 */
411 	n = atomic_fetchadd_int(&vm->vm_refcnt, -1) - 1;
412 	while (n == 0) {
413 		if (atomic_cmpset_int(&vm->vm_refcnt, 0, VM_REF_DELETED)) {
414 			vmspace_terminate(vm, 0);
415 			break;
416 		}
417 		n = vm->vm_refcnt;
418 		cpu_ccfence();
419 	}
420 	vmspace_drop_notoken(vm);
421 }
422 
423 /*
424  * This is called during exit indicating that the vmspace is no
425  * longer in used by an exiting process, but the process has not yet
426  * been reaped.
427  *
428  * We drop refs, allowing for stage-1 termination, but maintain a holdcnt
429  * to prevent stage-2 until the process is reaped.  Note hte order of
430  * operation, we must hold first.
431  *
432  * No requirements.
433  */
434 void
435 vmspace_relexit(struct vmspace *vm)
436 {
437 	atomic_add_int(&vm->vm_holdcnt, 1);
438 	vmspace_rel(vm);
439 }
440 
441 /*
442  * Called during reap to disconnect the remainder of the vmspace from
443  * the process.  On the hold drop the vmspace termination is finalized.
444  *
445  * No requirements.
446  */
447 void
448 vmspace_exitfree(struct proc *p)
449 {
450 	struct vmspace *vm;
451 
452 	vm = p->p_vmspace;
453 	p->p_vmspace = NULL;
454 	vmspace_drop_notoken(vm);
455 }
456 
457 /*
458  * Called in two cases:
459  *
460  * (1) When the last refcnt is dropped and the vmspace becomes inactive,
461  *     called with final == 0.  refcnt will be (u_int)-1 at this point,
462  *     and holdcnt will still be non-zero.
463  *
464  * (2) When holdcnt becomes 0, called with final == 1.  There should no
465  *     longer be anyone with access to the vmspace.
466  *
467  * VMSPACE_EXIT1 flags the primary deactivation
468  * VMSPACE_EXIT2 flags the last reap
469  */
470 static void
471 vmspace_terminate(struct vmspace *vm, int final)
472 {
473 	int count;
474 
475 	lwkt_gettoken(&vm->vm_map.token);
476 	if (final == 0) {
477 		KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0);
478 		vm->vm_flags |= VMSPACE_EXIT1;
479 
480 		/*
481 		 * Get rid of most of the resources.  Leave the kernel pmap
482 		 * intact.
483 		 *
484 		 * If the pmap does not contain wired pages we can bulk-delete
485 		 * the pmap as a performance optimization before removing the
486 		 * related mappings.
487 		 *
488 		 * If the pmap contains wired pages we cannot do this
489 		 * pre-optimization because currently vm_fault_unwire()
490 		 * expects the pmap pages to exist and will not decrement
491 		 * p->wire_count if they do not.
492 		 */
493 		shmexit(vm);
494 		if (vmspace_pmap(vm)->pm_stats.wired_count) {
495 			vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
496 				      VM_MAX_USER_ADDRESS);
497 			pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
498 					  VM_MAX_USER_ADDRESS);
499 		} else {
500 			pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
501 					  VM_MAX_USER_ADDRESS);
502 			vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
503 				      VM_MAX_USER_ADDRESS);
504 		}
505 		lwkt_reltoken(&vm->vm_map.token);
506 	} else {
507 		KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0);
508 		KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0);
509 
510 		/*
511 		 * Get rid of remaining basic resources.
512 		 */
513 		vm->vm_flags |= VMSPACE_EXIT2;
514 		shmexit(vm);
515 
516 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
517 		vm_map_lock(&vm->vm_map);
518 		cpu_vmspace_free(vm);
519 
520 		/*
521 		 * Lock the map, to wait out all other references to it.
522 		 * Delete all of the mappings and pages they hold, then call
523 		 * the pmap module to reclaim anything left.
524 		 */
525 		vm_map_delete(&vm->vm_map,
526 			      vm_map_min(&vm->vm_map),
527 			      vm_map_max(&vm->vm_map),
528 			      &count);
529 		vm_map_unlock(&vm->vm_map);
530 		vm_map_entry_release(count);
531 
532 		pmap_release(vmspace_pmap(vm));
533 		lwkt_reltoken(&vm->vm_map.token);
534 		objcache_put(vmspace_cache, vm);
535 	}
536 }
537 
538 /*
539  * Swap useage is determined by taking the proportional swap used by
540  * VM objects backing the VM map.  To make up for fractional losses,
541  * if the VM object has any swap use at all the associated map entries
542  * count for at least 1 swap page.
543  *
544  * No requirements.
545  */
546 vm_offset_t
547 vmspace_swap_count(struct vmspace *vm)
548 {
549 	vm_map_t map = &vm->vm_map;
550 	vm_map_entry_t cur;
551 	vm_object_t object;
552 	vm_offset_t count = 0;
553 	vm_offset_t n;
554 
555 	vmspace_hold(vm);
556 
557 	RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) {
558 		switch(cur->maptype) {
559 		case VM_MAPTYPE_NORMAL:
560 		case VM_MAPTYPE_VPAGETABLE:
561 			if ((object = cur->ba.object) == NULL)
562 				break;
563 			if (object->swblock_count) {
564 				n = (cur->ba.end - cur->ba.start) / PAGE_SIZE;
565 				count += object->swblock_count *
566 				    SWAP_META_PAGES * n / object->size + 1;
567 			}
568 			break;
569 		default:
570 			break;
571 		}
572 	}
573 	vmspace_drop(vm);
574 
575 	return(count);
576 }
577 
578 /*
579  * Calculate the approximate number of anonymous pages in use by
580  * this vmspace.  To make up for fractional losses, we count each
581  * VM object as having at least 1 anonymous page.
582  *
583  * No requirements.
584  */
585 vm_offset_t
586 vmspace_anonymous_count(struct vmspace *vm)
587 {
588 	vm_map_t map = &vm->vm_map;
589 	vm_map_entry_t cur;
590 	vm_object_t object;
591 	vm_offset_t count = 0;
592 
593 	vmspace_hold(vm);
594 	RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) {
595 		switch(cur->maptype) {
596 		case VM_MAPTYPE_NORMAL:
597 		case VM_MAPTYPE_VPAGETABLE:
598 			if ((object = cur->ba.object) == NULL)
599 				break;
600 			if (object->type != OBJT_DEFAULT &&
601 			    object->type != OBJT_SWAP) {
602 				break;
603 			}
604 			count += object->resident_page_count;
605 			break;
606 		default:
607 			break;
608 		}
609 	}
610 	vmspace_drop(vm);
611 
612 	return(count);
613 }
614 
615 /*
616  * Initialize an existing vm_map structure such as that in the vmspace
617  * structure.  The pmap is initialized elsewhere.
618  *
619  * No requirements.
620  */
621 void
622 vm_map_init(struct vm_map *map, vm_offset_t min_addr, vm_offset_t max_addr,
623 	    pmap_t pmap)
624 {
625 	RB_INIT(&map->rb_root);
626 	spin_init(&map->ilock_spin, "ilock");
627 	map->ilock_base = NULL;
628 	map->nentries = 0;
629 	map->size = 0;
630 	map->system_map = 0;
631 	vm_map_min(map) = min_addr;
632 	vm_map_max(map) = max_addr;
633 	map->pmap = pmap;
634 	map->timestamp = 0;
635 	map->flags = 0;
636 	bzero(&map->freehint, sizeof(map->freehint));
637 	lwkt_token_init(&map->token, "vm_map");
638 	lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0);
639 }
640 
641 /*
642  * Find the first possible free address for the specified request length.
643  * Returns 0 if we don't have one cached.
644  */
645 static
646 vm_offset_t
647 vm_map_freehint_find(vm_map_t map, vm_size_t length, vm_size_t align)
648 {
649 	vm_map_freehint_t *scan;
650 
651 	scan = &map->freehint[0];
652 	while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
653 		if (scan->length == length && scan->align == align)
654 			return(scan->start);
655 		++scan;
656 	}
657 	return 0;
658 }
659 
660 /*
661  * Unconditionally set the freehint.  Called by vm_map_findspace() after
662  * it finds an address.  This will help us iterate optimally on the next
663  * similar findspace.
664  */
665 static
666 void
667 vm_map_freehint_update(vm_map_t map, vm_offset_t start,
668 		       vm_size_t length, vm_size_t align)
669 {
670 	vm_map_freehint_t *scan;
671 
672 	scan = &map->freehint[0];
673 	while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
674 		if (scan->length == length && scan->align == align) {
675 			scan->start = start;
676 			return;
677 		}
678 		++scan;
679 	}
680 	scan = &map->freehint[map->freehint_newindex & VM_MAP_FFMASK];
681 	scan->start = start;
682 	scan->align = align;
683 	scan->length = length;
684 	++map->freehint_newindex;
685 }
686 
687 /*
688  * Update any existing freehints (for any alignment), for the hole we just
689  * added.
690  */
691 static
692 void
693 vm_map_freehint_hole(vm_map_t map, vm_offset_t start, vm_size_t length)
694 {
695 	vm_map_freehint_t *scan;
696 
697 	scan = &map->freehint[0];
698 	while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
699 		if (scan->length <= length && scan->start > start)
700 			scan->start = start;
701 		++scan;
702 	}
703 }
704 
705 /*
706  * This function handles MAP_ENTRY_NEEDS_COPY by inserting a fronting
707  * object in the entry for COW faults.
708  *
709  * The entire chain including entry->ba (prior to inserting the fronting
710  * object) essentially becomes set in stone... elements of it can be paged
711  * in or out, but cannot be further modified.
712  *
713  * NOTE: If we do not optimize the backing chain then a unique copy is not
714  *	 needed.  Note, however, that because portions of the chain are
715  *	 shared across pmaps we cannot make any changes to the vm_map_backing
716  *	 elements themselves.
717  *
718  * If the map segment is governed by a virtual page table then it is
719  * possible to address offsets beyond the mapped area.  Just allocate
720  * a maximally sized object for this case.
721  *
722  * If addref is non-zero an additional reference is added to the returned
723  * entry.  This mechanic exists because the additional reference might have
724  * to be added atomically and not after return to prevent a premature
725  * collapse.  XXX currently there is no collapse code.
726  *
727  * The vm_map must be exclusively locked.
728  * No other requirements.
729  */
730 static
731 void
732 vm_map_entry_shadow(vm_map_entry_t entry)
733 {
734 	vm_map_backing_t ba;
735 	vm_size_t length;
736 	vm_object_t source;
737 	vm_object_t result;
738 
739 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
740 		length = 0x7FFFFFFF;
741 	else
742 		length = atop(entry->ba.end - entry->ba.start);
743 
744 	/*
745 	 * Don't create the new object if the old object isn't shared.
746 	 * This case occurs quite often when programs fork/exec/wait.
747 	 *
748 	 * Caller ensures source exists (all backing_ba's must have objects),
749 	 * typically indirectly by virtue of the NEEDS_COPY flag being set.
750 	 * We have a ref on source by virtue of the entry and do not need
751 	 * to lock it to do this test.
752 	 */
753 	source = entry->ba.object;
754 	KKASSERT(source);
755 
756 	if (source->type != OBJT_VNODE) {
757 		if (source->ref_count == 1 &&
758 		    source->handle == NULL &&
759 		    (source->type == OBJT_DEFAULT ||
760 		     source->type == OBJT_SWAP)) {
761 			goto done;
762 		}
763 	}
764 	ba = kmalloc(sizeof(*ba), M_MAP_BACKING, M_INTWAIT); /* copied later */
765 	vm_object_hold_shared(source);
766 
767 	/*
768 	 * Once it becomes part of a backing_ba chain it can wind up anywhere,
769 	 * drop the ONEMAPPING flag now.
770 	 */
771 	vm_object_clear_flag(source, OBJ_ONEMAPPING);
772 
773 	/*
774 	 * Allocate a new object with the given length.  The new object
775 	 * is returned referenced but we may have to add another one.
776 	 * If we are adding a second reference we must clear OBJ_ONEMAPPING.
777 	 * (typically because the caller is about to clone a vm_map_entry).
778 	 *
779 	 * The source object currently has an extra reference to prevent
780 	 * collapses into it while we mess with its shadow list, which
781 	 * we will remove later in this routine.
782 	 *
783 	 * The target object may require a second reference if asked for one
784 	 * by the caller.
785 	 */
786 	result = vm_object_allocate_hold(OBJT_DEFAULT, length);
787 	if (result == NULL)
788 		panic("vm_object_shadow: no object for shadowing");
789 
790 	/*
791 	 * The new object shadows the source object.
792 	 *
793 	 * Try to optimize the result object's page color when shadowing
794 	 * in order to maintain page coloring consistency in the combined
795 	 * shadowed object.
796 	 *
797 	 * The source object is moved to ba, retaining its existing ref-count.
798 	 * No additional ref is needed.
799 	 *
800 	 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS
801 	 */
802 	vm_map_backing_detach(&entry->ba);
803 	*ba = entry->ba;		/* previous ba */
804 	entry->ba.object = result;	/* new ba (at head of entry) */
805 	entry->ba.backing_ba = ba;
806 	entry->ba.backing_count = ba->backing_count + 1;
807 	entry->ba.offset = 0;
808 
809 	/* cpu localization twist */
810 	result->pg_color = vm_quickcolor();
811 
812 	vm_map_backing_attach(&entry->ba);
813 	vm_map_backing_attach(ba);
814 
815 	/*
816 	 * Adjust the return storage.  Drop the ref on source before
817 	 * returning.
818 	 */
819 	vm_object_drop(result);
820 	vm_object_drop(source);
821 done:
822 	entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
823 }
824 
825 /*
826  * Allocate an object for a vm_map_entry.
827  *
828  * Object allocation for anonymous mappings is defered as long as possible.
829  * This function is called when we can defer no longer, generally when a map
830  * entry might be split or forked or takes a page fault.
831  *
832  * If the map segment is governed by a virtual page table then it is
833  * possible to address offsets beyond the mapped area.  Just allocate
834  * a maximally sized object for this case.
835  *
836  * The vm_map must be exclusively locked.
837  * No other requirements.
838  */
839 void
840 vm_map_entry_allocate_object(vm_map_entry_t entry)
841 {
842 	vm_object_t obj;
843 
844 	/*
845 	 * ba.offset is NOT cumulatively added in the backing_ba scan like
846 	 * it was in the old object chain, so we can assign whatever offset
847 	 * we like to the new object.
848 	 *
849 	 * For now assign a value of 0 to make debugging object sizes
850 	 * easier.
851 	 */
852 	entry->ba.offset = 0;
853 
854 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
855 		/* XXX */
856 		obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF);
857 	} else {
858 		obj = vm_object_allocate(OBJT_DEFAULT,
859 					 atop(entry->ba.end - entry->ba.start) +
860 					 entry->ba.offset);
861 	}
862 	entry->ba.object = obj;
863 	vm_map_backing_attach(&entry->ba);
864 }
865 
866 /*
867  * Set an initial negative count so the first attempt to reserve
868  * space preloads a bunch of vm_map_entry's for this cpu.  Also
869  * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
870  * map a new page for vm_map_entry structures.  SMP systems are
871  * particularly sensitive.
872  *
873  * This routine is called in early boot so we cannot just call
874  * vm_map_entry_reserve().
875  *
876  * Called from the low level boot code only (for each cpu)
877  *
878  * WARNING! Take care not to have too-big a static/BSS structure here
879  *	    as MAXCPU can be 256+, otherwise the loader's 64MB heap
880  *	    can get blown out by the kernel plus the initrd image.
881  */
882 void
883 vm_map_entry_reserve_cpu_init(globaldata_t gd)
884 {
885 	vm_map_entry_t entry;
886 	int count;
887 	int i;
888 
889 	atomic_add_int(&gd->gd_vme_avail, -MAP_RESERVE_COUNT * 2);
890 	if (gd->gd_cpuid == 0) {
891 		entry = &cpu_map_entry_init_bsp[0];
892 		count = MAPENTRYBSP_CACHE;
893 	} else {
894 		entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0];
895 		count = MAPENTRYAP_CACHE;
896 	}
897 	for (i = 0; i < count; ++i, ++entry) {
898 		MAPENT_FREELIST(entry) = gd->gd_vme_base;
899 		gd->gd_vme_base = entry;
900 	}
901 }
902 
903 /*
904  * Reserves vm_map_entry structures so code later-on can manipulate
905  * map_entry structures within a locked map without blocking trying
906  * to allocate a new vm_map_entry.
907  *
908  * No requirements.
909  *
910  * WARNING!  We must not decrement gd_vme_avail until after we have
911  *	     ensured that sufficient entries exist, otherwise we can
912  *	     get into an endless call recursion in the zalloc code
913  *	     itself.
914  */
915 int
916 vm_map_entry_reserve(int count)
917 {
918 	struct globaldata *gd = mycpu;
919 	vm_map_entry_t entry;
920 
921 	/*
922 	 * Make sure we have enough structures in gd_vme_base to handle
923 	 * the reservation request.
924 	 *
925 	 * Use a critical section to protect against VM faults.  It might
926 	 * not be needed, but we have to be careful here.
927 	 */
928 	if (gd->gd_vme_avail < count) {
929 		crit_enter();
930 		while (gd->gd_vme_avail < count) {
931 			entry = zalloc(mapentzone);
932 			MAPENT_FREELIST(entry) = gd->gd_vme_base;
933 			gd->gd_vme_base = entry;
934 			atomic_add_int(&gd->gd_vme_avail, 1);
935 		}
936 		crit_exit();
937 	}
938 	atomic_add_int(&gd->gd_vme_avail, -count);
939 
940 	return(count);
941 }
942 
943 /*
944  * Releases previously reserved vm_map_entry structures that were not
945  * used.  If we have too much junk in our per-cpu cache clean some of
946  * it out.
947  *
948  * No requirements.
949  */
950 void
951 vm_map_entry_release(int count)
952 {
953 	struct globaldata *gd = mycpu;
954 	vm_map_entry_t entry;
955 	vm_map_entry_t efree;
956 
957 	count = atomic_fetchadd_int(&gd->gd_vme_avail, count) + count;
958 	if (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
959 		efree = NULL;
960 		crit_enter();
961 		while (gd->gd_vme_avail > MAP_RESERVE_HYST) {
962 			entry = gd->gd_vme_base;
963 			KKASSERT(entry != NULL);
964 			gd->gd_vme_base = MAPENT_FREELIST(entry);
965 			atomic_add_int(&gd->gd_vme_avail, -1);
966 			MAPENT_FREELIST(entry) = efree;
967 			efree = entry;
968 		}
969 		crit_exit();
970 		while ((entry = efree) != NULL) {
971 			efree = MAPENT_FREELIST(efree);
972 			zfree(mapentzone, entry);
973 		}
974 	}
975 }
976 
977 /*
978  * Reserve map entry structures for use in kernel_map itself.  These
979  * entries have *ALREADY* been reserved on a per-cpu basis when the map
980  * was inited.  This function is used by zalloc() to avoid a recursion
981  * when zalloc() itself needs to allocate additional kernel memory.
982  *
983  * This function works like the normal reserve but does not load the
984  * vm_map_entry cache (because that would result in an infinite
985  * recursion).  Note that gd_vme_avail may go negative.  This is expected.
986  *
987  * Any caller of this function must be sure to renormalize after
988  * potentially eating entries to ensure that the reserve supply
989  * remains intact.
990  *
991  * No requirements.
992  */
993 int
994 vm_map_entry_kreserve(int count)
995 {
996 	struct globaldata *gd = mycpu;
997 
998 	atomic_add_int(&gd->gd_vme_avail, -count);
999 	KASSERT(gd->gd_vme_base != NULL,
1000 		("no reserved entries left, gd_vme_avail = %d",
1001 		gd->gd_vme_avail));
1002 	return(count);
1003 }
1004 
1005 /*
1006  * Release previously reserved map entries for kernel_map.  We do not
1007  * attempt to clean up like the normal release function as this would
1008  * cause an unnecessary (but probably not fatal) deep procedure call.
1009  *
1010  * No requirements.
1011  */
1012 void
1013 vm_map_entry_krelease(int count)
1014 {
1015 	struct globaldata *gd = mycpu;
1016 
1017 	atomic_add_int(&gd->gd_vme_avail, count);
1018 }
1019 
1020 /*
1021  * Allocates a VM map entry for insertion.  No entry fields are filled in.
1022  *
1023  * The entries should have previously been reserved.  The reservation count
1024  * is tracked in (*countp).
1025  *
1026  * No requirements.
1027  */
1028 static vm_map_entry_t
1029 vm_map_entry_create(int *countp)
1030 {
1031 	struct globaldata *gd = mycpu;
1032 	vm_map_entry_t entry;
1033 
1034 	KKASSERT(*countp > 0);
1035 	--*countp;
1036 	crit_enter();
1037 	entry = gd->gd_vme_base;
1038 	KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
1039 	gd->gd_vme_base = MAPENT_FREELIST(entry);
1040 	crit_exit();
1041 
1042 	return(entry);
1043 }
1044 
1045 /*
1046  *
1047  */
1048 static void
1049 vm_map_backing_attach(vm_map_backing_t ba)
1050 {
1051 	vm_object_t obj = ba->object;
1052 
1053 	lockmgr(&obj->backing_lk, LK_EXCLUSIVE);
1054 	TAILQ_INSERT_TAIL(&obj->backing_list, ba, entry);
1055 	lockmgr(&obj->backing_lk, LK_RELEASE);
1056 }
1057 
1058 static void
1059 vm_map_backing_detach(vm_map_backing_t ba)
1060 {
1061 	vm_object_t obj = ba->object;
1062 
1063 	lockmgr(&obj->backing_lk, LK_EXCLUSIVE);
1064 	TAILQ_REMOVE(&obj->backing_list, ba, entry);
1065 	lockmgr(&obj->backing_lk, LK_RELEASE);
1066 }
1067 
1068 /*
1069  * Dispose of the dynamically allocated backing_ba chain associated
1070  * with a vm_map_entry.
1071  *
1072  * We decrement the (possibly shared) element and kfree() on the
1073  * 1->0 transition.  We only iterate to the next backing_ba when
1074  * the previous one went through a 1->0 transition.
1075  */
1076 static void
1077 vm_map_entry_dispose_ba(vm_map_backing_t ba)
1078 {
1079 	vm_map_backing_t next;
1080 
1081 	while (ba) {
1082 		if (ba->object) {
1083 			vm_map_backing_detach(ba);
1084 			vm_object_deallocate(ba->object);
1085 		}
1086 		next = ba->backing_ba;
1087 		kfree(ba, M_MAP_BACKING);
1088 		ba = next;
1089 	}
1090 }
1091 
1092 /*
1093  * Dispose of a vm_map_entry that is no longer being referenced.
1094  *
1095  * No requirements.
1096  */
1097 static void
1098 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
1099 {
1100 	struct globaldata *gd = mycpu;
1101 
1102 	/*
1103 	 * Dispose of the base object and the backing link.
1104 	 */
1105 	switch(entry->maptype) {
1106 	case VM_MAPTYPE_NORMAL:
1107 	case VM_MAPTYPE_VPAGETABLE:
1108 		if (entry->ba.object) {
1109 			vm_map_backing_detach(&entry->ba);
1110 			vm_object_deallocate(entry->ba.object);
1111 		}
1112 		break;
1113 	case VM_MAPTYPE_SUBMAP:
1114 	case VM_MAPTYPE_UKSMAP:
1115 		/* XXX TODO */
1116 		break;
1117 	default:
1118 		break;
1119 	}
1120 	vm_map_entry_dispose_ba(entry->ba.backing_ba);
1121 
1122 	/*
1123 	 * Cleanup for safety.
1124 	 */
1125 	entry->ba.backing_ba = NULL;
1126 	entry->ba.object = NULL;
1127 	entry->ba.offset = 0;
1128 
1129 	++*countp;
1130 	crit_enter();
1131 	MAPENT_FREELIST(entry) = gd->gd_vme_base;
1132 	gd->gd_vme_base = entry;
1133 	crit_exit();
1134 }
1135 
1136 
1137 /*
1138  * Insert/remove entries from maps.
1139  *
1140  * The related map must be exclusively locked.
1141  * The caller must hold map->token
1142  * No other requirements.
1143  */
1144 static __inline void
1145 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1146 {
1147 	ASSERT_VM_MAP_LOCKED(map);
1148 
1149 	map->nentries++;
1150 	if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
1151 		panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
1152 }
1153 
1154 static __inline void
1155 vm_map_entry_unlink(vm_map_t map,
1156 		    vm_map_entry_t entry)
1157 {
1158 	ASSERT_VM_MAP_LOCKED(map);
1159 
1160 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1161 		panic("vm_map_entry_unlink: attempt to mess with "
1162 		      "locked entry! %p", entry);
1163 	}
1164 	vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
1165 	map->nentries--;
1166 }
1167 
1168 /*
1169  * Finds the map entry containing (or immediately preceding) the specified
1170  * address in the given map.  The entry is returned in (*entry).
1171  *
1172  * The boolean result indicates whether the address is actually contained
1173  * in the map.
1174  *
1175  * The related map must be locked.
1176  * No other requirements.
1177  */
1178 boolean_t
1179 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
1180 {
1181 	vm_map_entry_t tmp;
1182 	vm_map_entry_t last;
1183 
1184 	ASSERT_VM_MAP_LOCKED(map);
1185 
1186 	/*
1187 	 * Locate the record from the top of the tree.  'last' tracks the
1188 	 * closest prior record and is returned if no match is found, which
1189 	 * in binary tree terms means tracking the most recent right-branch
1190 	 * taken.  If there is no prior record, *entry is set to NULL.
1191 	 */
1192 	last = NULL;
1193 	tmp = RB_ROOT(&map->rb_root);
1194 
1195 	while (tmp) {
1196 		if (address >= tmp->ba.start) {
1197 			if (address < tmp->ba.end) {
1198 				*entry = tmp;
1199 				return(TRUE);
1200 			}
1201 			last = tmp;
1202 			tmp = RB_RIGHT(tmp, rb_entry);
1203 		} else {
1204 			tmp = RB_LEFT(tmp, rb_entry);
1205 		}
1206 	}
1207 	*entry = last;
1208 	return (FALSE);
1209 }
1210 
1211 /*
1212  * Inserts the given whole VM object into the target map at the specified
1213  * address range.  The object's size should match that of the address range.
1214  *
1215  * The map must be exclusively locked.
1216  * The object must be held.
1217  * The caller must have reserved sufficient vm_map_entry structures.
1218  *
1219  * If object is non-NULL, ref count must be bumped by caller prior to
1220  * making call to account for the new entry.  XXX API is a bit messy.
1221  */
1222 int
1223 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
1224 	      vm_ooffset_t offset, vm_offset_t start, vm_offset_t end,
1225 	      vm_maptype_t maptype, vm_subsys_t id,
1226 	      vm_prot_t prot, vm_prot_t max, int cow)
1227 {
1228 	vm_map_entry_t new_entry;
1229 	vm_map_entry_t prev_entry;
1230 	vm_map_entry_t next;
1231 	vm_map_entry_t temp_entry;
1232 	vm_eflags_t protoeflags;
1233 	vm_object_t object;
1234 	int must_drop = 0;
1235 
1236 	if (maptype == VM_MAPTYPE_UKSMAP)
1237 		object = NULL;
1238 	else
1239 		object = map_object;
1240 
1241 	ASSERT_VM_MAP_LOCKED(map);
1242 	if (object)
1243 		ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1244 
1245 	/*
1246 	 * Check that the start and end points are not bogus.
1247 	 */
1248 	if ((start < vm_map_min(map)) || (end > vm_map_max(map)) ||
1249 	    (start >= end)) {
1250 		return (KERN_INVALID_ADDRESS);
1251 	}
1252 
1253 	/*
1254 	 * Find the entry prior to the proposed starting address; if it's part
1255 	 * of an existing entry, this range is bogus.
1256 	 */
1257 	if (vm_map_lookup_entry(map, start, &temp_entry))
1258 		return (KERN_NO_SPACE);
1259 	prev_entry = temp_entry;
1260 
1261 	/*
1262 	 * Assert that the next entry doesn't overlap the end point.
1263 	 */
1264 	if (prev_entry)
1265 		next = vm_map_rb_tree_RB_NEXT(prev_entry);
1266 	else
1267 		next = RB_MIN(vm_map_rb_tree, &map->rb_root);
1268 	if (next && next->ba.start < end)
1269 		return (KERN_NO_SPACE);
1270 
1271 	protoeflags = 0;
1272 
1273 	if (cow & MAP_COPY_ON_WRITE)
1274 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1275 
1276 	if (cow & MAP_NOFAULT) {
1277 		protoeflags |= MAP_ENTRY_NOFAULT;
1278 
1279 		KASSERT(object == NULL,
1280 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1281 	}
1282 	if (cow & MAP_DISABLE_SYNCER)
1283 		protoeflags |= MAP_ENTRY_NOSYNC;
1284 	if (cow & MAP_DISABLE_COREDUMP)
1285 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1286 	if (cow & MAP_IS_STACK)
1287 		protoeflags |= MAP_ENTRY_STACK;
1288 	if (cow & MAP_IS_KSTACK)
1289 		protoeflags |= MAP_ENTRY_KSTACK;
1290 
1291 	lwkt_gettoken(&map->token);
1292 
1293 	if (object) {
1294 		;
1295 	} else if (prev_entry &&
1296 		 (prev_entry->eflags == protoeflags) &&
1297 		 (prev_entry->ba.end == start) &&
1298 		 (prev_entry->wired_count == 0) &&
1299 		 (prev_entry->id == id) &&
1300 		 prev_entry->maptype == maptype &&
1301 		 maptype == VM_MAPTYPE_NORMAL &&
1302 		 prev_entry->ba.backing_ba == NULL &&	/* not backed */
1303 		 ((prev_entry->ba.object == NULL) ||
1304 		  vm_object_coalesce(prev_entry->ba.object,
1305 				     OFF_TO_IDX(prev_entry->ba.offset),
1306 				     (vm_size_t)(prev_entry->ba.end - prev_entry->ba.start),
1307 				     (vm_size_t)(end - prev_entry->ba.end)))) {
1308 		/*
1309 		 * We were able to extend the object.  Determine if we
1310 		 * can extend the previous map entry to include the
1311 		 * new range as well.
1312 		 */
1313 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1314 		    (prev_entry->protection == prot) &&
1315 		    (prev_entry->max_protection == max)) {
1316 			map->size += (end - prev_entry->ba.end);
1317 			vm_map_backing_adjust_end(prev_entry, end);
1318 			vm_map_simplify_entry(map, prev_entry, countp);
1319 			lwkt_reltoken(&map->token);
1320 			return (KERN_SUCCESS);
1321 		}
1322 
1323 		/*
1324 		 * If we can extend the object but cannot extend the
1325 		 * map entry, we have to create a new map entry.  We
1326 		 * must bump the ref count on the extended object to
1327 		 * account for it.  object may be NULL.
1328 		 */
1329 		object = prev_entry->ba.object;
1330 		offset = prev_entry->ba.offset +
1331 			(prev_entry->ba.end - prev_entry->ba.start);
1332 		if (object) {
1333 			vm_object_hold(object);
1334 			vm_object_lock_swap(); /* map->token order */
1335 			vm_object_reference_locked(object);
1336 			map_object = object;
1337 			must_drop = 1;
1338 		}
1339 	}
1340 
1341 	/*
1342 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1343 	 * in things like the buffer map where we manage kva but do not manage
1344 	 * backing objects.
1345 	 */
1346 
1347 	/*
1348 	 * Create a new entry
1349 	 */
1350 	new_entry = vm_map_entry_create(countp);
1351 	new_entry->ba.pmap = map->pmap;
1352 	new_entry->ba.start = start;
1353 	new_entry->ba.end = end;
1354 	new_entry->id = id;
1355 
1356 	new_entry->maptype = maptype;
1357 	new_entry->eflags = protoeflags;
1358 	new_entry->aux.master_pde = 0;		/* in case size is different */
1359 	new_entry->aux.map_aux = map_aux;
1360 	new_entry->ba.map_object = map_object;
1361 	new_entry->ba.backing_ba = NULL;
1362 	new_entry->ba.backing_count = 0;
1363 	new_entry->ba.offset = offset;
1364 	new_entry->ba.flags = 0;
1365 	new_entry->ba.pmap = map->pmap;
1366 
1367 	new_entry->inheritance = VM_INHERIT_DEFAULT;
1368 	new_entry->protection = prot;
1369 	new_entry->max_protection = max;
1370 	new_entry->wired_count = 0;
1371 
1372 	/*
1373 	 * Insert the new entry into the list
1374 	 */
1375 	vm_map_backing_replicated(map, new_entry, MAP_BACK_BASEOBJREFD);
1376 	vm_map_entry_link(map, new_entry);
1377 	map->size += new_entry->ba.end - new_entry->ba.start;
1378 
1379 	/*
1380 	 * Don't worry about updating freehint[] when inserting, allow
1381 	 * addresses to be lower than the actual first free spot.
1382 	 */
1383 #if 0
1384 	/*
1385 	 * Temporarily removed to avoid MAP_STACK panic, due to
1386 	 * MAP_STACK being a huge hack.  Will be added back in
1387 	 * when MAP_STACK (and the user stack mapping) is fixed.
1388 	 */
1389 	/*
1390 	 * It may be possible to simplify the entry
1391 	 */
1392 	vm_map_simplify_entry(map, new_entry, countp);
1393 #endif
1394 
1395 	/*
1396 	 * Try to pre-populate the page table.  Mappings governed by virtual
1397 	 * page tables cannot be prepopulated without a lot of work, so
1398 	 * don't try.
1399 	 */
1400 	if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1401 	    maptype != VM_MAPTYPE_VPAGETABLE &&
1402 	    maptype != VM_MAPTYPE_UKSMAP) {
1403 		int dorelock = 0;
1404 		if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) {
1405 			dorelock = 1;
1406 			vm_object_lock_swap();
1407 			vm_object_drop(object);
1408 		}
1409 		pmap_object_init_pt(map->pmap, new_entry,
1410 				    new_entry->ba.start,
1411 				    new_entry->ba.end - new_entry->ba.start,
1412 				    cow & MAP_PREFAULT_PARTIAL);
1413 		if (dorelock) {
1414 			vm_object_hold(object);
1415 			vm_object_lock_swap();
1416 		}
1417 	}
1418 	lwkt_reltoken(&map->token);
1419 	if (must_drop)
1420 		vm_object_drop(object);
1421 
1422 	return (KERN_SUCCESS);
1423 }
1424 
1425 /*
1426  * Find sufficient space for `length' bytes in the given map, starting at
1427  * `start'.  Returns 0 on success, 1 on no space.
1428  *
1429  * This function will returned an arbitrarily aligned pointer.  If no
1430  * particular alignment is required you should pass align as 1.  Note that
1431  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1432  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1433  * argument.
1434  *
1435  * 'align' should be a power of 2 but is not required to be.
1436  *
1437  * The map must be exclusively locked.
1438  * No other requirements.
1439  */
1440 int
1441 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1442 		 vm_size_t align, int flags, vm_offset_t *addr)
1443 {
1444 	vm_map_entry_t entry;
1445 	vm_map_entry_t tmp;
1446 	vm_offset_t hole_start;
1447 	vm_offset_t end;
1448 	vm_offset_t align_mask;
1449 
1450 	if (start < vm_map_min(map))
1451 		start = vm_map_min(map);
1452 	if (start > vm_map_max(map))
1453 		return (1);
1454 
1455 	/*
1456 	 * If the alignment is not a power of 2 we will have to use
1457 	 * a mod/division, set align_mask to a special value.
1458 	 */
1459 	if ((align | (align - 1)) + 1 != (align << 1))
1460 		align_mask = (vm_offset_t)-1;
1461 	else
1462 		align_mask = align - 1;
1463 
1464 	/*
1465 	 * Use freehint to adjust the start point, hopefully reducing
1466 	 * the iteration to O(1).
1467 	 */
1468 	hole_start = vm_map_freehint_find(map, length, align);
1469 	if (start < hole_start)
1470 		start = hole_start;
1471 	if (vm_map_lookup_entry(map, start, &tmp))
1472 		start = tmp->ba.end;
1473 	entry = tmp;	/* may be NULL */
1474 
1475 	/*
1476 	 * Look through the rest of the map, trying to fit a new region in the
1477 	 * gap between existing regions, or after the very last region.
1478 	 */
1479 	for (;;) {
1480 		/*
1481 		 * Adjust the proposed start by the requested alignment,
1482 		 * be sure that we didn't wrap the address.
1483 		 */
1484 		if (align_mask == (vm_offset_t)-1)
1485 			end = roundup(start, align);
1486 		else
1487 			end = (start + align_mask) & ~align_mask;
1488 		if (end < start)
1489 			return (1);
1490 		start = end;
1491 
1492 		/*
1493 		 * Find the end of the proposed new region.  Be sure we didn't
1494 		 * go beyond the end of the map, or wrap around the address.
1495 		 * Then check to see if this is the last entry or if the
1496 		 * proposed end fits in the gap between this and the next
1497 		 * entry.
1498 		 */
1499 		end = start + length;
1500 		if (end > vm_map_max(map) || end < start)
1501 			return (1);
1502 
1503 		/*
1504 		 * Locate the next entry, we can stop if this is the
1505 		 * last entry (we know we are in-bounds so that would
1506 		 * be a sucess).
1507 		 */
1508 		if (entry)
1509 			entry = vm_map_rb_tree_RB_NEXT(entry);
1510 		else
1511 			entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
1512 		if (entry == NULL)
1513 			break;
1514 
1515 		/*
1516 		 * Determine if the proposed area would overlap the
1517 		 * next entry.
1518 		 *
1519 		 * When matching against a STACK entry, only allow the
1520 		 * memory map to intrude on the ungrown portion of the
1521 		 * STACK entry when MAP_TRYFIXED is set.
1522 		 */
1523 		if (entry->ba.start >= end) {
1524 			if ((entry->eflags & MAP_ENTRY_STACK) == 0)
1525 				break;
1526 			if (flags & MAP_TRYFIXED)
1527 				break;
1528 			if (entry->ba.start - entry->aux.avail_ssize >= end)
1529 				break;
1530 		}
1531 		start = entry->ba.end;
1532 	}
1533 
1534 	/*
1535 	 * Update the freehint
1536 	 */
1537 	vm_map_freehint_update(map, start, length, align);
1538 
1539 	/*
1540 	 * Grow the kernel_map if necessary.  pmap_growkernel() will panic
1541 	 * if it fails.  The kernel_map is locked and nothing can steal
1542 	 * our address space if pmap_growkernel() blocks.
1543 	 *
1544 	 * NOTE: This may be unconditionally called for kldload areas on
1545 	 *	 x86_64 because these do not bump kernel_vm_end (which would
1546 	 *	 fill 128G worth of page tables!).  Therefore we must not
1547 	 *	 retry.
1548 	 */
1549 	if (map == &kernel_map) {
1550 		vm_offset_t kstop;
1551 
1552 		kstop = round_page(start + length);
1553 		if (kstop > kernel_vm_end)
1554 			pmap_growkernel(start, kstop);
1555 	}
1556 	*addr = start;
1557 	return (0);
1558 }
1559 
1560 /*
1561  * vm_map_find finds an unallocated region in the target address map with
1562  * the given length and allocates it.  The search is defined to be first-fit
1563  * from the specified address; the region found is returned in the same
1564  * parameter.
1565  *
1566  * If object is non-NULL, ref count must be bumped by caller
1567  * prior to making call to account for the new entry.
1568  *
1569  * No requirements.  This function will lock the map temporarily.
1570  */
1571 int
1572 vm_map_find(vm_map_t map, void *map_object, void *map_aux,
1573 	    vm_ooffset_t offset, vm_offset_t *addr,
1574 	    vm_size_t length, vm_size_t align, boolean_t fitit,
1575 	    vm_maptype_t maptype, vm_subsys_t id,
1576 	    vm_prot_t prot, vm_prot_t max, int cow)
1577 {
1578 	vm_offset_t start;
1579 	vm_object_t object;
1580 	int result;
1581 	int count;
1582 
1583 	if (maptype == VM_MAPTYPE_UKSMAP)
1584 		object = NULL;
1585 	else
1586 		object = map_object;
1587 
1588 	start = *addr;
1589 
1590 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1591 	vm_map_lock(map);
1592 	if (object)
1593 		vm_object_hold_shared(object);
1594 	if (fitit) {
1595 		if (vm_map_findspace(map, start, length, align, 0, addr)) {
1596 			if (object)
1597 				vm_object_drop(object);
1598 			vm_map_unlock(map);
1599 			vm_map_entry_release(count);
1600 			return (KERN_NO_SPACE);
1601 		}
1602 		start = *addr;
1603 	}
1604 	result = vm_map_insert(map, &count, map_object, map_aux,
1605 			       offset, start, start + length,
1606 			       maptype, id, prot, max, cow);
1607 	if (object)
1608 		vm_object_drop(object);
1609 	vm_map_unlock(map);
1610 	vm_map_entry_release(count);
1611 
1612 	return (result);
1613 }
1614 
1615 /*
1616  * Simplify the given map entry by merging with either neighbor.  This
1617  * routine also has the ability to merge with both neighbors.
1618  *
1619  * This routine guarentees that the passed entry remains valid (though
1620  * possibly extended).  When merging, this routine may delete one or
1621  * both neighbors.  No action is taken on entries which have their
1622  * in-transition flag set.
1623  *
1624  * The map must be exclusively locked.
1625  */
1626 void
1627 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1628 {
1629 	vm_map_entry_t next, prev;
1630 	vm_size_t prevsize, esize;
1631 
1632 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1633 		++mycpu->gd_cnt.v_intrans_coll;
1634 		return;
1635 	}
1636 
1637 	if (entry->maptype == VM_MAPTYPE_SUBMAP)
1638 		return;
1639 	if (entry->maptype == VM_MAPTYPE_UKSMAP)
1640 		return;
1641 
1642 	prev = vm_map_rb_tree_RB_PREV(entry);
1643 	if (prev) {
1644 		prevsize = prev->ba.end - prev->ba.start;
1645 		if ( (prev->ba.end == entry->ba.start) &&
1646 		     (prev->maptype == entry->maptype) &&
1647 		     (prev->ba.object == entry->ba.object) &&
1648 		     (prev->ba.backing_ba == entry->ba.backing_ba) &&
1649 		     (!prev->ba.object ||
1650 			(prev->ba.offset + prevsize == entry->ba.offset)) &&
1651 		     (prev->eflags == entry->eflags) &&
1652 		     (prev->protection == entry->protection) &&
1653 		     (prev->max_protection == entry->max_protection) &&
1654 		     (prev->inheritance == entry->inheritance) &&
1655 		     (prev->id == entry->id) &&
1656 		     (prev->wired_count == entry->wired_count)) {
1657 			/*
1658 			 * NOTE: order important.  Unlink before gumming up
1659 			 *	 the RBTREE w/adjust, adjust before disposal
1660 			 *	 of prior entry, to avoid pmap snafus.
1661 			 */
1662 			vm_map_entry_unlink(map, prev);
1663 			vm_map_backing_adjust_start(entry, prev->ba.start);
1664 			if (entry->ba.object == NULL)
1665 				entry->ba.offset = 0;
1666 			vm_map_entry_dispose(map, prev, countp);
1667 		}
1668 	}
1669 
1670 	next = vm_map_rb_tree_RB_NEXT(entry);
1671 	if (next) {
1672 		esize = entry->ba.end - entry->ba.start;
1673 		if ((entry->ba.end == next->ba.start) &&
1674 		    (next->maptype == entry->maptype) &&
1675 		    (next->ba.object == entry->ba.object) &&
1676 		     (prev->ba.backing_ba == entry->ba.backing_ba) &&
1677 		     (!entry->ba.object ||
1678 			(entry->ba.offset + esize == next->ba.offset)) &&
1679 		    (next->eflags == entry->eflags) &&
1680 		    (next->protection == entry->protection) &&
1681 		    (next->max_protection == entry->max_protection) &&
1682 		    (next->inheritance == entry->inheritance) &&
1683 		    (next->id == entry->id) &&
1684 		    (next->wired_count == entry->wired_count)) {
1685 			/*
1686 			 * NOTE: order important.  Unlink before gumming up
1687 			 *	 the RBTREE w/adjust, adjust before disposal
1688 			 *	 of prior entry, to avoid pmap snafus.
1689 			 */
1690 			vm_map_entry_unlink(map, next);
1691 			vm_map_backing_adjust_end(entry, next->ba.end);
1692 			vm_map_entry_dispose(map, next, countp);
1693 	        }
1694 	}
1695 }
1696 
1697 /*
1698  * Asserts that the given entry begins at or after the specified address.
1699  * If necessary, it splits the entry into two.
1700  */
1701 #define vm_map_clip_start(map, entry, startaddr, countp)		\
1702 {									\
1703 	if (startaddr > entry->ba.start)				\
1704 		_vm_map_clip_start(map, entry, startaddr, countp);	\
1705 }
1706 
1707 /*
1708  * This routine is called only when it is known that the entry must be split.
1709  *
1710  * The map must be exclusively locked.
1711  */
1712 static void
1713 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1714 		   int *countp)
1715 {
1716 	vm_map_entry_t new_entry;
1717 
1718 	/*
1719 	 * Split off the front portion -- note that we must insert the new
1720 	 * entry BEFORE this one, so that this entry has the specified
1721 	 * starting address.
1722 	 */
1723 
1724 	vm_map_simplify_entry(map, entry, countp);
1725 
1726 	/*
1727 	 * If there is no object backing this entry, we might as well create
1728 	 * one now.  If we defer it, an object can get created after the map
1729 	 * is clipped, and individual objects will be created for the split-up
1730 	 * map.  This is a bit of a hack, but is also about the best place to
1731 	 * put this improvement.
1732 	 */
1733 	if (entry->ba.object == NULL && !map->system_map &&
1734 	    VM_MAP_ENTRY_WITHIN_PARTITION(entry)) {
1735 		vm_map_entry_allocate_object(entry);
1736 	}
1737 
1738 	/*
1739 	 * NOTE: The replicated function will adjust start, end, and offset
1740 	 *	 for the remainder of the backing_ba linkages.  We must fixup
1741 	 *	 the embedded ba.
1742 	 */
1743 	new_entry = vm_map_entry_create(countp);
1744 	*new_entry = *entry;
1745 	new_entry->ba.end = start;
1746 
1747 	/*
1748 	 * Ordering is important, make sure the new entry is replicated
1749 	 * before we cut the exiting entry.
1750 	 */
1751 	vm_map_backing_replicated(map, new_entry, MAP_BACK_CLIPPED);
1752 	vm_map_backing_adjust_start(entry, start);
1753 	vm_map_entry_link(map, new_entry);
1754 }
1755 
1756 /*
1757  * Asserts that the given entry ends at or before the specified address.
1758  * If necessary, it splits the entry into two.
1759  *
1760  * The map must be exclusively locked.
1761  */
1762 #define vm_map_clip_end(map, entry, endaddr, countp)		\
1763 {								\
1764 	if (endaddr < entry->ba.end)				\
1765 		_vm_map_clip_end(map, entry, endaddr, countp);	\
1766 }
1767 
1768 /*
1769  * This routine is called only when it is known that the entry must be split.
1770  *
1771  * The map must be exclusively locked.
1772  */
1773 static void
1774 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1775 		 int *countp)
1776 {
1777 	vm_map_entry_t new_entry;
1778 
1779 	/*
1780 	 * If there is no object backing this entry, we might as well create
1781 	 * one now.  If we defer it, an object can get created after the map
1782 	 * is clipped, and individual objects will be created for the split-up
1783 	 * map.  This is a bit of a hack, but is also about the best place to
1784 	 * put this improvement.
1785 	 */
1786 
1787 	if (entry->ba.object == NULL && !map->system_map &&
1788 	    VM_MAP_ENTRY_WITHIN_PARTITION(entry)) {
1789 		vm_map_entry_allocate_object(entry);
1790 	}
1791 
1792 	/*
1793 	 * Create a new entry and insert it AFTER the specified entry
1794 	 *
1795 	 * NOTE: The replicated function will adjust start, end, and offset
1796 	 *	 for the remainder of the backing_ba linkages.  We must fixup
1797 	 *	 the embedded ba.
1798 	 */
1799 	new_entry = vm_map_entry_create(countp);
1800 	*new_entry = *entry;
1801 	new_entry->ba.start = end;
1802 	new_entry->ba.offset += (new_entry->ba.start - entry->ba.start);
1803 
1804 	/*
1805 	 * Ordering is important, make sure the new entry is replicated
1806 	 * before we cut the exiting entry.
1807 	 */
1808 	vm_map_backing_replicated(map, new_entry, MAP_BACK_CLIPPED);
1809 	vm_map_backing_adjust_end(entry, end);
1810 	vm_map_entry_link(map, new_entry);
1811 }
1812 
1813 /*
1814  * Asserts that the starting and ending region addresses fall within the
1815  * valid range for the map.
1816  */
1817 #define	VM_MAP_RANGE_CHECK(map, start, end)	\
1818 {						\
1819 	if (start < vm_map_min(map))		\
1820 		start = vm_map_min(map);	\
1821 	if (end > vm_map_max(map))		\
1822 		end = vm_map_max(map);		\
1823 	if (start > end)			\
1824 		start = end;			\
1825 }
1826 
1827 /*
1828  * Used to block when an in-transition collison occurs.  The map
1829  * is unlocked for the sleep and relocked before the return.
1830  */
1831 void
1832 vm_map_transition_wait(vm_map_t map, int relock)
1833 {
1834 	tsleep_interlock(map, 0);
1835 	vm_map_unlock(map);
1836 	tsleep(map, PINTERLOCKED, "vment", 0);
1837 	if (relock)
1838 		vm_map_lock(map);
1839 }
1840 
1841 /*
1842  * When we do blocking operations with the map lock held it is
1843  * possible that a clip might have occured on our in-transit entry,
1844  * requiring an adjustment to the entry in our loop.  These macros
1845  * help the pageable and clip_range code deal with the case.  The
1846  * conditional costs virtually nothing if no clipping has occured.
1847  */
1848 
1849 #define CLIP_CHECK_BACK(entry, save_start)			\
1850     do {							\
1851 	    while (entry->ba.start != save_start) {		\
1852 		    entry = vm_map_rb_tree_RB_PREV(entry);	\
1853 		    KASSERT(entry, ("bad entry clip")); 	\
1854 	    }							\
1855     } while(0)
1856 
1857 #define CLIP_CHECK_FWD(entry, save_end)				\
1858     do {							\
1859 	    while (entry->ba.end != save_end) {			\
1860 		    entry = vm_map_rb_tree_RB_NEXT(entry);	\
1861 		    KASSERT(entry, ("bad entry clip")); 	\
1862 	    }							\
1863     } while(0)
1864 
1865 
1866 /*
1867  * Clip the specified range and return the base entry.  The
1868  * range may cover several entries starting at the returned base
1869  * and the first and last entry in the covering sequence will be
1870  * properly clipped to the requested start and end address.
1871  *
1872  * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1873  * flag.
1874  *
1875  * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1876  * covered by the requested range.
1877  *
1878  * The map must be exclusively locked on entry and will remain locked
1879  * on return. If no range exists or the range contains holes and you
1880  * specified that no holes were allowed, NULL will be returned.  This
1881  * routine may temporarily unlock the map in order avoid a deadlock when
1882  * sleeping.
1883  */
1884 static
1885 vm_map_entry_t
1886 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end,
1887 		  int *countp, int flags)
1888 {
1889 	vm_map_entry_t start_entry;
1890 	vm_map_entry_t entry;
1891 	vm_map_entry_t next;
1892 
1893 	/*
1894 	 * Locate the entry and effect initial clipping.  The in-transition
1895 	 * case does not occur very often so do not try to optimize it.
1896 	 */
1897 again:
1898 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1899 		return (NULL);
1900 	entry = start_entry;
1901 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1902 		entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1903 		++mycpu->gd_cnt.v_intrans_coll;
1904 		++mycpu->gd_cnt.v_intrans_wait;
1905 		vm_map_transition_wait(map, 1);
1906 		/*
1907 		 * entry and/or start_entry may have been clipped while
1908 		 * we slept, or may have gone away entirely.  We have
1909 		 * to restart from the lookup.
1910 		 */
1911 		goto again;
1912 	}
1913 
1914 	/*
1915 	 * Since we hold an exclusive map lock we do not have to restart
1916 	 * after clipping, even though clipping may block in zalloc.
1917 	 */
1918 	vm_map_clip_start(map, entry, start, countp);
1919 	vm_map_clip_end(map, entry, end, countp);
1920 	entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1921 
1922 	/*
1923 	 * Scan entries covered by the range.  When working on the next
1924 	 * entry a restart need only re-loop on the current entry which
1925 	 * we have already locked, since 'next' may have changed.  Also,
1926 	 * even though entry is safe, it may have been clipped so we
1927 	 * have to iterate forwards through the clip after sleeping.
1928 	 */
1929 	for (;;) {
1930 		next = vm_map_rb_tree_RB_NEXT(entry);
1931 		if (next == NULL || next->ba.start >= end)
1932 			break;
1933 		if (flags & MAP_CLIP_NO_HOLES) {
1934 			if (next->ba.start > entry->ba.end) {
1935 				vm_map_unclip_range(map, start_entry,
1936 					start, entry->ba.end, countp, flags);
1937 				return(NULL);
1938 			}
1939 		}
1940 
1941 		if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1942 			vm_offset_t save_end = entry->ba.end;
1943 			next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1944 			++mycpu->gd_cnt.v_intrans_coll;
1945 			++mycpu->gd_cnt.v_intrans_wait;
1946 			vm_map_transition_wait(map, 1);
1947 
1948 			/*
1949 			 * clips might have occured while we blocked.
1950 			 */
1951 			CLIP_CHECK_FWD(entry, save_end);
1952 			CLIP_CHECK_BACK(start_entry, start);
1953 			continue;
1954 		}
1955 
1956 		/*
1957 		 * No restart necessary even though clip_end may block, we
1958 		 * are holding the map lock.
1959 		 */
1960 		vm_map_clip_end(map, next, end, countp);
1961 		next->eflags |= MAP_ENTRY_IN_TRANSITION;
1962 		entry = next;
1963 	}
1964 	if (flags & MAP_CLIP_NO_HOLES) {
1965 		if (entry->ba.end != end) {
1966 			vm_map_unclip_range(map, start_entry,
1967 				start, entry->ba.end, countp, flags);
1968 			return(NULL);
1969 		}
1970 	}
1971 	return(start_entry);
1972 }
1973 
1974 /*
1975  * Undo the effect of vm_map_clip_range().  You should pass the same
1976  * flags and the same range that you passed to vm_map_clip_range().
1977  * This code will clear the in-transition flag on the entries and
1978  * wake up anyone waiting.  This code will also simplify the sequence
1979  * and attempt to merge it with entries before and after the sequence.
1980  *
1981  * The map must be locked on entry and will remain locked on return.
1982  *
1983  * Note that you should also pass the start_entry returned by
1984  * vm_map_clip_range().  However, if you block between the two calls
1985  * with the map unlocked please be aware that the start_entry may
1986  * have been clipped and you may need to scan it backwards to find
1987  * the entry corresponding with the original start address.  You are
1988  * responsible for this, vm_map_unclip_range() expects the correct
1989  * start_entry to be passed to it and will KASSERT otherwise.
1990  */
1991 static
1992 void
1993 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
1994 		    vm_offset_t start, vm_offset_t end,
1995 		    int *countp, int flags)
1996 {
1997 	vm_map_entry_t entry;
1998 
1999 	entry = start_entry;
2000 
2001 	KASSERT(entry->ba.start == start, ("unclip_range: illegal base entry"));
2002 	while (entry && entry->ba.start < end) {
2003 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2004 			("in-transition flag not set during unclip on: %p",
2005 			entry));
2006 		KASSERT(entry->ba.end <= end,
2007 			("unclip_range: tail wasn't clipped"));
2008 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2009 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2010 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2011 			wakeup(map);
2012 		}
2013 		entry = vm_map_rb_tree_RB_NEXT(entry);
2014 	}
2015 
2016 	/*
2017 	 * Simplification does not block so there is no restart case.
2018 	 */
2019 	entry = start_entry;
2020 	while (entry && entry->ba.start < end) {
2021 		vm_map_simplify_entry(map, entry, countp);
2022 		entry = vm_map_rb_tree_RB_NEXT(entry);
2023 	}
2024 }
2025 
2026 /*
2027  * Mark the given range as handled by a subordinate map.
2028  *
2029  * This range must have been created with vm_map_find(), and no other
2030  * operations may have been performed on this range prior to calling
2031  * vm_map_submap().
2032  *
2033  * Submappings cannot be removed.
2034  *
2035  * No requirements.
2036  */
2037 int
2038 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
2039 {
2040 	vm_map_entry_t entry;
2041 	int result = KERN_INVALID_ARGUMENT;
2042 	int count;
2043 
2044 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2045 	vm_map_lock(map);
2046 
2047 	VM_MAP_RANGE_CHECK(map, start, end);
2048 
2049 	if (vm_map_lookup_entry(map, start, &entry)) {
2050 		vm_map_clip_start(map, entry, start, &count);
2051 	} else if (entry) {
2052 		entry = vm_map_rb_tree_RB_NEXT(entry);
2053 	} else {
2054 		entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
2055 	}
2056 
2057 	vm_map_clip_end(map, entry, end, &count);
2058 
2059 	if ((entry->ba.start == start) && (entry->ba.end == end) &&
2060 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
2061 	    (entry->ba.object == NULL)) {
2062 		entry->ba.sub_map = submap;
2063 		entry->maptype = VM_MAPTYPE_SUBMAP;
2064 		result = KERN_SUCCESS;
2065 	}
2066 	vm_map_unlock(map);
2067 	vm_map_entry_release(count);
2068 
2069 	return (result);
2070 }
2071 
2072 /*
2073  * Sets the protection of the specified address region in the target map.
2074  * If "set_max" is specified, the maximum protection is to be set;
2075  * otherwise, only the current protection is affected.
2076  *
2077  * The protection is not applicable to submaps, but is applicable to normal
2078  * maps and maps governed by virtual page tables.  For example, when operating
2079  * on a virtual page table our protection basically controls how COW occurs
2080  * on the backing object, whereas the virtual page table abstraction itself
2081  * is an abstraction for userland.
2082  *
2083  * No requirements.
2084  */
2085 int
2086 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2087 	       vm_prot_t new_prot, boolean_t set_max)
2088 {
2089 	vm_map_entry_t current;
2090 	vm_map_entry_t entry;
2091 	int count;
2092 
2093 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2094 	vm_map_lock(map);
2095 
2096 	VM_MAP_RANGE_CHECK(map, start, end);
2097 
2098 	if (vm_map_lookup_entry(map, start, &entry)) {
2099 		vm_map_clip_start(map, entry, start, &count);
2100 	} else if (entry) {
2101 		entry = vm_map_rb_tree_RB_NEXT(entry);
2102 	} else {
2103 		entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
2104 	}
2105 
2106 	/*
2107 	 * Make a first pass to check for protection violations.
2108 	 */
2109 	current = entry;
2110 	while (current && current->ba.start < end) {
2111 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2112 			vm_map_unlock(map);
2113 			vm_map_entry_release(count);
2114 			return (KERN_INVALID_ARGUMENT);
2115 		}
2116 		if ((new_prot & current->max_protection) != new_prot) {
2117 			vm_map_unlock(map);
2118 			vm_map_entry_release(count);
2119 			return (KERN_PROTECTION_FAILURE);
2120 		}
2121 
2122 		/*
2123 		 * When making a SHARED+RW file mmap writable, update
2124 		 * v_lastwrite_ts.
2125 		 */
2126 		if (new_prot & PROT_WRITE &&
2127 		    (current->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
2128 		    (current->maptype == VM_MAPTYPE_NORMAL ||
2129 		     current->maptype == VM_MAPTYPE_VPAGETABLE) &&
2130 		    current->ba.object &&
2131 		    current->ba.object->type == OBJT_VNODE) {
2132 			struct vnode *vp;
2133 
2134 			vp = current->ba.object->handle;
2135 			if (vp && vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT) == 0) {
2136 				vfs_timestamp(&vp->v_lastwrite_ts);
2137 				vsetflags(vp, VLASTWRITETS);
2138 				vn_unlock(vp);
2139 			}
2140 		}
2141 		current = vm_map_rb_tree_RB_NEXT(current);
2142 	}
2143 
2144 	/*
2145 	 * Go back and fix up protections. [Note that clipping is not
2146 	 * necessary the second time.]
2147 	 */
2148 	current = entry;
2149 
2150 	while (current && current->ba.start < end) {
2151 		vm_prot_t old_prot;
2152 
2153 		vm_map_clip_end(map, current, end, &count);
2154 
2155 		old_prot = current->protection;
2156 		if (set_max) {
2157 			current->max_protection = new_prot;
2158 			current->protection = new_prot & old_prot;
2159 		} else {
2160 			current->protection = new_prot;
2161 		}
2162 
2163 		/*
2164 		 * Update physical map if necessary. Worry about copy-on-write
2165 		 * here -- CHECK THIS XXX
2166 		 */
2167 		if (current->protection != old_prot) {
2168 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2169 							VM_PROT_ALL)
2170 
2171 			pmap_protect(map->pmap, current->ba.start,
2172 			    current->ba.end,
2173 			    current->protection & MASK(current));
2174 #undef	MASK
2175 		}
2176 
2177 		vm_map_simplify_entry(map, current, &count);
2178 
2179 		current = vm_map_rb_tree_RB_NEXT(current);
2180 	}
2181 	vm_map_unlock(map);
2182 	vm_map_entry_release(count);
2183 	return (KERN_SUCCESS);
2184 }
2185 
2186 /*
2187  * This routine traverses a processes map handling the madvise
2188  * system call.  Advisories are classified as either those effecting
2189  * the vm_map_entry structure, or those effecting the underlying
2190  * objects.
2191  *
2192  * The <value> argument is used for extended madvise calls.
2193  *
2194  * No requirements.
2195  */
2196 int
2197 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
2198 	       int behav, off_t value)
2199 {
2200 	vm_map_entry_t current, entry;
2201 	int modify_map = 0;
2202 	int error = 0;
2203 	int count;
2204 
2205 	/*
2206 	 * Some madvise calls directly modify the vm_map_entry, in which case
2207 	 * we need to use an exclusive lock on the map and we need to perform
2208 	 * various clipping operations.  Otherwise we only need a read-lock
2209 	 * on the map.
2210 	 */
2211 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2212 
2213 	switch(behav) {
2214 	case MADV_NORMAL:
2215 	case MADV_SEQUENTIAL:
2216 	case MADV_RANDOM:
2217 	case MADV_NOSYNC:
2218 	case MADV_AUTOSYNC:
2219 	case MADV_NOCORE:
2220 	case MADV_CORE:
2221 	case MADV_SETMAP:
2222 		modify_map = 1;
2223 		vm_map_lock(map);
2224 		break;
2225 	case MADV_INVAL:
2226 	case MADV_WILLNEED:
2227 	case MADV_DONTNEED:
2228 	case MADV_FREE:
2229 		vm_map_lock_read(map);
2230 		break;
2231 	default:
2232 		vm_map_entry_release(count);
2233 		return (EINVAL);
2234 	}
2235 
2236 	/*
2237 	 * Locate starting entry and clip if necessary.
2238 	 */
2239 
2240 	VM_MAP_RANGE_CHECK(map, start, end);
2241 
2242 	if (vm_map_lookup_entry(map, start, &entry)) {
2243 		if (modify_map)
2244 			vm_map_clip_start(map, entry, start, &count);
2245 	} else if (entry) {
2246 		entry = vm_map_rb_tree_RB_NEXT(entry);
2247 	} else {
2248 		entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
2249 	}
2250 
2251 	if (modify_map) {
2252 		/*
2253 		 * madvise behaviors that are implemented in the vm_map_entry.
2254 		 *
2255 		 * We clip the vm_map_entry so that behavioral changes are
2256 		 * limited to the specified address range.
2257 		 */
2258 		for (current = entry;
2259 		     current && current->ba.start < end;
2260 		     current = vm_map_rb_tree_RB_NEXT(current)) {
2261 			/*
2262 			 * Ignore submaps
2263 			 */
2264 			if (current->maptype == VM_MAPTYPE_SUBMAP)
2265 				continue;
2266 
2267 			vm_map_clip_end(map, current, end, &count);
2268 
2269 			switch (behav) {
2270 			case MADV_NORMAL:
2271 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2272 				break;
2273 			case MADV_SEQUENTIAL:
2274 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2275 				break;
2276 			case MADV_RANDOM:
2277 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2278 				break;
2279 			case MADV_NOSYNC:
2280 				current->eflags |= MAP_ENTRY_NOSYNC;
2281 				break;
2282 			case MADV_AUTOSYNC:
2283 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2284 				break;
2285 			case MADV_NOCORE:
2286 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2287 				break;
2288 			case MADV_CORE:
2289 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2290 				break;
2291 			case MADV_SETMAP:
2292 				/*
2293 				 * Set the page directory page for a map
2294 				 * governed by a virtual page table.  Mark
2295 				 * the entry as being governed by a virtual
2296 				 * page table if it is not.
2297 				 *
2298 				 * XXX the page directory page is stored
2299 				 * in the avail_ssize field if the map_entry.
2300 				 *
2301 				 * XXX the map simplification code does not
2302 				 * compare this field so weird things may
2303 				 * happen if you do not apply this function
2304 				 * to the entire mapping governed by the
2305 				 * virtual page table.
2306 				 */
2307 				if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
2308 					error = EINVAL;
2309 					break;
2310 				}
2311 				current->aux.master_pde = value;
2312 				pmap_remove(map->pmap,
2313 					    current->ba.start, current->ba.end);
2314 				break;
2315 			case MADV_INVAL:
2316 				/*
2317 				 * Invalidate the related pmap entries, used
2318 				 * to flush portions of the real kernel's
2319 				 * pmap when the caller has removed or
2320 				 * modified existing mappings in a virtual
2321 				 * page table.
2322 				 *
2323 				 * (exclusive locked map version does not
2324 				 * need the range interlock).
2325 				 */
2326 				pmap_remove(map->pmap,
2327 					    current->ba.start, current->ba.end);
2328 				break;
2329 			default:
2330 				error = EINVAL;
2331 				break;
2332 			}
2333 			vm_map_simplify_entry(map, current, &count);
2334 		}
2335 		vm_map_unlock(map);
2336 	} else {
2337 		vm_pindex_t pindex;
2338 		vm_pindex_t delta;
2339 
2340 		/*
2341 		 * madvise behaviors that are implemented in the underlying
2342 		 * vm_object.
2343 		 *
2344 		 * Since we don't clip the vm_map_entry, we have to clip
2345 		 * the vm_object pindex and count.
2346 		 *
2347 		 * NOTE!  These functions are only supported on normal maps,
2348 		 *	  except MADV_INVAL which is also supported on
2349 		 *	  virtual page tables.
2350 		 *
2351 		 * NOTE!  These functions only apply to the top-most object.
2352 		 *	  It is not applicable to backing objects.
2353 		 */
2354 		for (current = entry;
2355 		     current && current->ba.start < end;
2356 		     current = vm_map_rb_tree_RB_NEXT(current)) {
2357 			vm_offset_t useStart;
2358 
2359 			if (current->maptype != VM_MAPTYPE_NORMAL &&
2360 			    (current->maptype != VM_MAPTYPE_VPAGETABLE ||
2361 			     behav != MADV_INVAL)) {
2362 				continue;
2363 			}
2364 
2365 			pindex = OFF_TO_IDX(current->ba.offset);
2366 			delta = atop(current->ba.end - current->ba.start);
2367 			useStart = current->ba.start;
2368 
2369 			if (current->ba.start < start) {
2370 				pindex += atop(start - current->ba.start);
2371 				delta -= atop(start - current->ba.start);
2372 				useStart = start;
2373 			}
2374 			if (current->ba.end > end)
2375 				delta -= atop(current->ba.end - end);
2376 
2377 			if ((vm_spindex_t)delta <= 0)
2378 				continue;
2379 
2380 			if (behav == MADV_INVAL) {
2381 				/*
2382 				 * Invalidate the related pmap entries, used
2383 				 * to flush portions of the real kernel's
2384 				 * pmap when the caller has removed or
2385 				 * modified existing mappings in a virtual
2386 				 * page table.
2387 				 *
2388 				 * (shared locked map version needs the
2389 				 * interlock, see vm_fault()).
2390 				 */
2391 				struct vm_map_ilock ilock;
2392 
2393 				KASSERT(useStart >= VM_MIN_USER_ADDRESS &&
2394 					    useStart + ptoa(delta) <=
2395 					    VM_MAX_USER_ADDRESS,
2396 					 ("Bad range %016jx-%016jx (%016jx)",
2397 					 useStart, useStart + ptoa(delta),
2398 					 delta));
2399 				vm_map_interlock(map, &ilock,
2400 						 useStart,
2401 						 useStart + ptoa(delta));
2402 				pmap_remove(map->pmap,
2403 					    useStart,
2404 					    useStart + ptoa(delta));
2405 				vm_map_deinterlock(map, &ilock);
2406 			} else {
2407 				vm_object_madvise(current->ba.object,
2408 						  pindex, delta, behav);
2409 			}
2410 
2411 			/*
2412 			 * Try to populate the page table.  Mappings governed
2413 			 * by virtual page tables cannot be pre-populated
2414 			 * without a lot of work so don't try.
2415 			 */
2416 			if (behav == MADV_WILLNEED &&
2417 			    current->maptype != VM_MAPTYPE_VPAGETABLE) {
2418 				pmap_object_init_pt(
2419 				    map->pmap, current,
2420 				    useStart,
2421 				    (delta << PAGE_SHIFT),
2422 				    MAP_PREFAULT_MADVISE
2423 				);
2424 			}
2425 		}
2426 		vm_map_unlock_read(map);
2427 	}
2428 	vm_map_entry_release(count);
2429 	return(error);
2430 }
2431 
2432 
2433 /*
2434  * Sets the inheritance of the specified address range in the target map.
2435  * Inheritance affects how the map will be shared with child maps at the
2436  * time of vm_map_fork.
2437  */
2438 int
2439 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2440 	       vm_inherit_t new_inheritance)
2441 {
2442 	vm_map_entry_t entry;
2443 	vm_map_entry_t temp_entry;
2444 	int count;
2445 
2446 	switch (new_inheritance) {
2447 	case VM_INHERIT_NONE:
2448 	case VM_INHERIT_COPY:
2449 	case VM_INHERIT_SHARE:
2450 		break;
2451 	default:
2452 		return (KERN_INVALID_ARGUMENT);
2453 	}
2454 
2455 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2456 	vm_map_lock(map);
2457 
2458 	VM_MAP_RANGE_CHECK(map, start, end);
2459 
2460 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2461 		entry = temp_entry;
2462 		vm_map_clip_start(map, entry, start, &count);
2463 	} else if (temp_entry) {
2464 		entry = vm_map_rb_tree_RB_NEXT(temp_entry);
2465 	} else {
2466 		entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
2467 	}
2468 
2469 	while (entry && entry->ba.start < end) {
2470 		vm_map_clip_end(map, entry, end, &count);
2471 
2472 		entry->inheritance = new_inheritance;
2473 
2474 		vm_map_simplify_entry(map, entry, &count);
2475 
2476 		entry = vm_map_rb_tree_RB_NEXT(entry);
2477 	}
2478 	vm_map_unlock(map);
2479 	vm_map_entry_release(count);
2480 	return (KERN_SUCCESS);
2481 }
2482 
2483 /*
2484  * Implement the semantics of mlock
2485  */
2486 int
2487 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2488 	      boolean_t new_pageable)
2489 {
2490 	vm_map_entry_t entry;
2491 	vm_map_entry_t start_entry;
2492 	vm_offset_t end;
2493 	int rv = KERN_SUCCESS;
2494 	int count;
2495 
2496 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2497 	vm_map_lock(map);
2498 	VM_MAP_RANGE_CHECK(map, start, real_end);
2499 	end = real_end;
2500 
2501 	start_entry = vm_map_clip_range(map, start, end, &count,
2502 					MAP_CLIP_NO_HOLES);
2503 	if (start_entry == NULL) {
2504 		vm_map_unlock(map);
2505 		vm_map_entry_release(count);
2506 		return (KERN_INVALID_ADDRESS);
2507 	}
2508 
2509 	if (new_pageable == 0) {
2510 		entry = start_entry;
2511 		while (entry && entry->ba.start < end) {
2512 			vm_offset_t save_start;
2513 			vm_offset_t save_end;
2514 
2515 			/*
2516 			 * Already user wired or hard wired (trivial cases)
2517 			 */
2518 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2519 				entry = vm_map_rb_tree_RB_NEXT(entry);
2520 				continue;
2521 			}
2522 			if (entry->wired_count != 0) {
2523 				entry->wired_count++;
2524 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2525 				entry = vm_map_rb_tree_RB_NEXT(entry);
2526 				continue;
2527 			}
2528 
2529 			/*
2530 			 * A new wiring requires instantiation of appropriate
2531 			 * management structures and the faulting in of the
2532 			 * page.
2533 			 */
2534 			if (entry->maptype == VM_MAPTYPE_NORMAL ||
2535 			    entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2536 				int copyflag = entry->eflags &
2537 					       MAP_ENTRY_NEEDS_COPY;
2538 				if (copyflag && ((entry->protection &
2539 						  VM_PROT_WRITE) != 0)) {
2540 					vm_map_entry_shadow(entry);
2541 				} else if (entry->ba.object == NULL &&
2542 					   !map->system_map) {
2543 					vm_map_entry_allocate_object(entry);
2544 				}
2545 			}
2546 			entry->wired_count++;
2547 			entry->eflags |= MAP_ENTRY_USER_WIRED;
2548 
2549 			/*
2550 			 * Now fault in the area.  Note that vm_fault_wire()
2551 			 * may release the map lock temporarily, it will be
2552 			 * relocked on return.  The in-transition
2553 			 * flag protects the entries.
2554 			 */
2555 			save_start = entry->ba.start;
2556 			save_end = entry->ba.end;
2557 			rv = vm_fault_wire(map, entry, TRUE, 0);
2558 			if (rv) {
2559 				CLIP_CHECK_BACK(entry, save_start);
2560 				for (;;) {
2561 					KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2562 					entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2563 					entry->wired_count = 0;
2564 					if (entry->ba.end == save_end)
2565 						break;
2566 					entry = vm_map_rb_tree_RB_NEXT(entry);
2567 					KASSERT(entry,
2568 					     ("bad entry clip during backout"));
2569 				}
2570 				end = save_start;	/* unwire the rest */
2571 				break;
2572 			}
2573 			/*
2574 			 * note that even though the entry might have been
2575 			 * clipped, the USER_WIRED flag we set prevents
2576 			 * duplication so we do not have to do a
2577 			 * clip check.
2578 			 */
2579 			entry = vm_map_rb_tree_RB_NEXT(entry);
2580 		}
2581 
2582 		/*
2583 		 * If we failed fall through to the unwiring section to
2584 		 * unwire what we had wired so far.  'end' has already
2585 		 * been adjusted.
2586 		 */
2587 		if (rv)
2588 			new_pageable = 1;
2589 
2590 		/*
2591 		 * start_entry might have been clipped if we unlocked the
2592 		 * map and blocked.  No matter how clipped it has gotten
2593 		 * there should be a fragment that is on our start boundary.
2594 		 */
2595 		CLIP_CHECK_BACK(start_entry, start);
2596 	}
2597 
2598 	/*
2599 	 * Deal with the unwiring case.
2600 	 */
2601 	if (new_pageable) {
2602 		/*
2603 		 * This is the unwiring case.  We must first ensure that the
2604 		 * range to be unwired is really wired down.  We know there
2605 		 * are no holes.
2606 		 */
2607 		entry = start_entry;
2608 		while (entry && entry->ba.start < end) {
2609 			if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2610 				rv = KERN_INVALID_ARGUMENT;
2611 				goto done;
2612 			}
2613 			KASSERT(entry->wired_count != 0,
2614 				("wired count was 0 with USER_WIRED set! %p",
2615 				 entry));
2616 			entry = vm_map_rb_tree_RB_NEXT(entry);
2617 		}
2618 
2619 		/*
2620 		 * Now decrement the wiring count for each region. If a region
2621 		 * becomes completely unwired, unwire its physical pages and
2622 		 * mappings.
2623 		 */
2624 		/*
2625 		 * The map entries are processed in a loop, checking to
2626 		 * make sure the entry is wired and asserting it has a wired
2627 		 * count. However, another loop was inserted more-or-less in
2628 		 * the middle of the unwiring path. This loop picks up the
2629 		 * "entry" loop variable from the first loop without first
2630 		 * setting it to start_entry. Naturally, the secound loop
2631 		 * is never entered and the pages backing the entries are
2632 		 * never unwired. This can lead to a leak of wired pages.
2633 		 */
2634 		entry = start_entry;
2635 		while (entry && entry->ba.start < end) {
2636 			KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2637 				("expected USER_WIRED on entry %p", entry));
2638 			entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2639 			entry->wired_count--;
2640 			if (entry->wired_count == 0)
2641 				vm_fault_unwire(map, entry);
2642 			entry = vm_map_rb_tree_RB_NEXT(entry);
2643 		}
2644 	}
2645 done:
2646 	vm_map_unclip_range(map, start_entry, start, real_end, &count,
2647 		MAP_CLIP_NO_HOLES);
2648 	vm_map_unlock(map);
2649 	vm_map_entry_release(count);
2650 
2651 	return (rv);
2652 }
2653 
2654 /*
2655  * Sets the pageability of the specified address range in the target map.
2656  * Regions specified as not pageable require locked-down physical
2657  * memory and physical page maps.
2658  *
2659  * The map must not be locked, but a reference must remain to the map
2660  * throughout the call.
2661  *
2662  * This function may be called via the zalloc path and must properly
2663  * reserve map entries for kernel_map.
2664  *
2665  * No requirements.
2666  */
2667 int
2668 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2669 {
2670 	vm_map_entry_t entry;
2671 	vm_map_entry_t start_entry;
2672 	vm_offset_t end;
2673 	int rv = KERN_SUCCESS;
2674 	int count;
2675 
2676 	if (kmflags & KM_KRESERVE)
2677 		count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2678 	else
2679 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2680 	vm_map_lock(map);
2681 	VM_MAP_RANGE_CHECK(map, start, real_end);
2682 	end = real_end;
2683 
2684 	start_entry = vm_map_clip_range(map, start, end, &count,
2685 					MAP_CLIP_NO_HOLES);
2686 	if (start_entry == NULL) {
2687 		vm_map_unlock(map);
2688 		rv = KERN_INVALID_ADDRESS;
2689 		goto failure;
2690 	}
2691 	if ((kmflags & KM_PAGEABLE) == 0) {
2692 		/*
2693 		 * Wiring.
2694 		 *
2695 		 * 1.  Holding the write lock, we create any shadow or zero-fill
2696 		 * objects that need to be created. Then we clip each map
2697 		 * entry to the region to be wired and increment its wiring
2698 		 * count.  We create objects before clipping the map entries
2699 		 * to avoid object proliferation.
2700 		 *
2701 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
2702 		 * fault in the pages for any newly wired area (wired_count is
2703 		 * 1).
2704 		 *
2705 		 * Downgrading to a read lock for vm_fault_wire avoids a
2706 		 * possible deadlock with another process that may have faulted
2707 		 * on one of the pages to be wired (it would mark the page busy,
2708 		 * blocking us, then in turn block on the map lock that we
2709 		 * hold).  Because of problems in the recursive lock package,
2710 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2711 		 * any actions that require the write lock must be done
2712 		 * beforehand.  Because we keep the read lock on the map, the
2713 		 * copy-on-write status of the entries we modify here cannot
2714 		 * change.
2715 		 */
2716 		entry = start_entry;
2717 		while (entry && entry->ba.start < end) {
2718 			/*
2719 			 * Trivial case if the entry is already wired
2720 			 */
2721 			if (entry->wired_count) {
2722 				entry->wired_count++;
2723 				entry = vm_map_rb_tree_RB_NEXT(entry);
2724 				continue;
2725 			}
2726 
2727 			/*
2728 			 * The entry is being newly wired, we have to setup
2729 			 * appropriate management structures.  A shadow
2730 			 * object is required for a copy-on-write region,
2731 			 * or a normal object for a zero-fill region.  We
2732 			 * do not have to do this for entries that point to sub
2733 			 * maps because we won't hold the lock on the sub map.
2734 			 */
2735 			if (entry->maptype == VM_MAPTYPE_NORMAL ||
2736 			    entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2737 				int copyflag = entry->eflags &
2738 					       MAP_ENTRY_NEEDS_COPY;
2739 				if (copyflag && ((entry->protection &
2740 						  VM_PROT_WRITE) != 0)) {
2741 					vm_map_entry_shadow(entry);
2742 				} else if (entry->ba.object == NULL &&
2743 					   !map->system_map) {
2744 					vm_map_entry_allocate_object(entry);
2745 				}
2746 			}
2747 			entry->wired_count++;
2748 			entry = vm_map_rb_tree_RB_NEXT(entry);
2749 		}
2750 
2751 		/*
2752 		 * Pass 2.
2753 		 */
2754 
2755 		/*
2756 		 * HACK HACK HACK HACK
2757 		 *
2758 		 * vm_fault_wire() temporarily unlocks the map to avoid
2759 		 * deadlocks.  The in-transition flag from vm_map_clip_range
2760 		 * call should protect us from changes while the map is
2761 		 * unlocked.  T
2762 		 *
2763 		 * NOTE: Previously this comment stated that clipping might
2764 		 *	 still occur while the entry is unlocked, but from
2765 		 *	 what I can tell it actually cannot.
2766 		 *
2767 		 *	 It is unclear whether the CLIP_CHECK_*() calls
2768 		 *	 are still needed but we keep them in anyway.
2769 		 *
2770 		 * HACK HACK HACK HACK
2771 		 */
2772 
2773 		entry = start_entry;
2774 		while (entry && entry->ba.start < end) {
2775 			/*
2776 			 * If vm_fault_wire fails for any page we need to undo
2777 			 * what has been done.  We decrement the wiring count
2778 			 * for those pages which have not yet been wired (now)
2779 			 * and unwire those that have (later).
2780 			 */
2781 			vm_offset_t save_start = entry->ba.start;
2782 			vm_offset_t save_end = entry->ba.end;
2783 
2784 			if (entry->wired_count == 1)
2785 				rv = vm_fault_wire(map, entry, FALSE, kmflags);
2786 			if (rv) {
2787 				CLIP_CHECK_BACK(entry, save_start);
2788 				for (;;) {
2789 					KASSERT(entry->wired_count == 1,
2790 					  ("wired_count changed unexpectedly"));
2791 					entry->wired_count = 0;
2792 					if (entry->ba.end == save_end)
2793 						break;
2794 					entry = vm_map_rb_tree_RB_NEXT(entry);
2795 					KASSERT(entry,
2796 					  ("bad entry clip during backout"));
2797 				}
2798 				end = save_start;
2799 				break;
2800 			}
2801 			CLIP_CHECK_FWD(entry, save_end);
2802 			entry = vm_map_rb_tree_RB_NEXT(entry);
2803 		}
2804 
2805 		/*
2806 		 * If a failure occured undo everything by falling through
2807 		 * to the unwiring code.  'end' has already been adjusted
2808 		 * appropriately.
2809 		 */
2810 		if (rv)
2811 			kmflags |= KM_PAGEABLE;
2812 
2813 		/*
2814 		 * start_entry is still IN_TRANSITION but may have been
2815 		 * clipped since vm_fault_wire() unlocks and relocks the
2816 		 * map.  No matter how clipped it has gotten there should
2817 		 * be a fragment that is on our start boundary.
2818 		 */
2819 		CLIP_CHECK_BACK(start_entry, start);
2820 	}
2821 
2822 	if (kmflags & KM_PAGEABLE) {
2823 		/*
2824 		 * This is the unwiring case.  We must first ensure that the
2825 		 * range to be unwired is really wired down.  We know there
2826 		 * are no holes.
2827 		 */
2828 		entry = start_entry;
2829 		while (entry && entry->ba.start < end) {
2830 			if (entry->wired_count == 0) {
2831 				rv = KERN_INVALID_ARGUMENT;
2832 				goto done;
2833 			}
2834 			entry = vm_map_rb_tree_RB_NEXT(entry);
2835 		}
2836 
2837 		/*
2838 		 * Now decrement the wiring count for each region. If a region
2839 		 * becomes completely unwired, unwire its physical pages and
2840 		 * mappings.
2841 		 */
2842 		entry = start_entry;
2843 		while (entry && entry->ba.start < end) {
2844 			entry->wired_count--;
2845 			if (entry->wired_count == 0)
2846 				vm_fault_unwire(map, entry);
2847 			entry = vm_map_rb_tree_RB_NEXT(entry);
2848 		}
2849 	}
2850 done:
2851 	vm_map_unclip_range(map, start_entry, start, real_end,
2852 			    &count, MAP_CLIP_NO_HOLES);
2853 	vm_map_unlock(map);
2854 failure:
2855 	if (kmflags & KM_KRESERVE)
2856 		vm_map_entry_krelease(count);
2857 	else
2858 		vm_map_entry_release(count);
2859 	return (rv);
2860 }
2861 
2862 /*
2863  * Mark a newly allocated address range as wired but do not fault in
2864  * the pages.  The caller is expected to load the pages into the object.
2865  *
2866  * The map must be locked on entry and will remain locked on return.
2867  * No other requirements.
2868  */
2869 void
2870 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2871 		       int *countp)
2872 {
2873 	vm_map_entry_t scan;
2874 	vm_map_entry_t entry;
2875 
2876 	entry = vm_map_clip_range(map, addr, addr + size,
2877 				  countp, MAP_CLIP_NO_HOLES);
2878 	scan = entry;
2879 	while (scan && scan->ba.start < addr + size) {
2880 		KKASSERT(scan->wired_count == 0);
2881 		scan->wired_count = 1;
2882 		scan = vm_map_rb_tree_RB_NEXT(scan);
2883 	}
2884 	vm_map_unclip_range(map, entry, addr, addr + size,
2885 			    countp, MAP_CLIP_NO_HOLES);
2886 }
2887 
2888 /*
2889  * Push any dirty cached pages in the address range to their pager.
2890  * If syncio is TRUE, dirty pages are written synchronously.
2891  * If invalidate is TRUE, any cached pages are freed as well.
2892  *
2893  * This routine is called by sys_msync()
2894  *
2895  * Returns an error if any part of the specified range is not mapped.
2896  *
2897  * No requirements.
2898  */
2899 int
2900 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2901 	     boolean_t syncio, boolean_t invalidate)
2902 {
2903 	vm_map_entry_t current;
2904 	vm_map_entry_t next;
2905 	vm_map_entry_t entry;
2906 	vm_map_backing_t ba;
2907 	vm_size_t size;
2908 	vm_object_t object;
2909 	vm_ooffset_t offset;
2910 
2911 	vm_map_lock_read(map);
2912 	VM_MAP_RANGE_CHECK(map, start, end);
2913 	if (!vm_map_lookup_entry(map, start, &entry)) {
2914 		vm_map_unlock_read(map);
2915 		return (KERN_INVALID_ADDRESS);
2916 	}
2917 	lwkt_gettoken(&map->token);
2918 
2919 	/*
2920 	 * Make a first pass to check for holes.
2921 	 */
2922 	current = entry;
2923 	while (current && current->ba.start < end) {
2924 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2925 			lwkt_reltoken(&map->token);
2926 			vm_map_unlock_read(map);
2927 			return (KERN_INVALID_ARGUMENT);
2928 		}
2929 		next = vm_map_rb_tree_RB_NEXT(current);
2930 		if (end > current->ba.end &&
2931 		    (next == NULL ||
2932 		     current->ba.end != next->ba.start)) {
2933 			lwkt_reltoken(&map->token);
2934 			vm_map_unlock_read(map);
2935 			return (KERN_INVALID_ADDRESS);
2936 		}
2937 		current = next;
2938 	}
2939 
2940 	if (invalidate)
2941 		pmap_remove(vm_map_pmap(map), start, end);
2942 
2943 	/*
2944 	 * Make a second pass, cleaning/uncaching pages from the indicated
2945 	 * objects as we go.
2946 	 */
2947 	current = entry;
2948 	while (current && current->ba.start < end) {
2949 		offset = current->ba.offset + (start - current->ba.start);
2950 		size = (end <= current->ba.end ? end : current->ba.end) - start;
2951 
2952 		switch(current->maptype) {
2953 		case VM_MAPTYPE_SUBMAP:
2954 		{
2955 			vm_map_t smap;
2956 			vm_map_entry_t tentry;
2957 			vm_size_t tsize;
2958 
2959 			smap = current->ba.sub_map;
2960 			vm_map_lock_read(smap);
2961 			vm_map_lookup_entry(smap, offset, &tentry);
2962 			if (tentry == NULL) {
2963 				tsize = vm_map_max(smap) - offset;
2964 				ba = NULL;
2965 				offset = 0 + (offset - vm_map_min(smap));
2966 			} else {
2967 				tsize = tentry->ba.end - offset;
2968 				ba = &tentry->ba;
2969 				offset = tentry->ba.offset +
2970 					 (offset - tentry->ba.start);
2971 			}
2972 			vm_map_unlock_read(smap);
2973 			if (tsize < size)
2974 				size = tsize;
2975 			break;
2976 		}
2977 		case VM_MAPTYPE_NORMAL:
2978 		case VM_MAPTYPE_VPAGETABLE:
2979 			ba = &current->ba;
2980 			break;
2981 		default:
2982 			ba = NULL;
2983 			break;
2984 		}
2985 		if (ba) {
2986 			object = ba->object;
2987 			if (object)
2988 				vm_object_hold(object);
2989 		} else {
2990 			object = NULL;
2991 		}
2992 
2993 		/*
2994 		 * Note that there is absolutely no sense in writing out
2995 		 * anonymous objects, so we track down the vnode object
2996 		 * to write out.
2997 		 * We invalidate (remove) all pages from the address space
2998 		 * anyway, for semantic correctness.
2999 		 *
3000 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
3001 		 * may start out with a NULL object.
3002 		 *
3003 		 * XXX do we really want to stop at the first backing store
3004 		 * here if there are more? XXX
3005 		 */
3006 		if (ba) {
3007 			vm_object_t tobj;
3008 
3009 			tobj = object;
3010 			while (ba->backing_ba != NULL) {
3011 				offset -= ba->offset;
3012 				ba = ba->backing_ba;
3013 				offset += ba->offset;
3014 				tobj = ba->object;
3015 				if (tobj->size < OFF_TO_IDX(offset + size))
3016 					size = IDX_TO_OFF(tobj->size) - offset;
3017 				break; /* XXX this break is not correct */
3018 			}
3019 			if (object != tobj) {
3020 				if (object)
3021 					vm_object_drop(object);
3022 				object = tobj;
3023 				vm_object_hold(object);
3024 			}
3025 		}
3026 
3027 		if (object && (object->type == OBJT_VNODE) &&
3028 		    (current->protection & VM_PROT_WRITE) &&
3029 		    (object->flags & OBJ_NOMSYNC) == 0) {
3030 			/*
3031 			 * Flush pages if writing is allowed, invalidate them
3032 			 * if invalidation requested.  Pages undergoing I/O
3033 			 * will be ignored by vm_object_page_remove().
3034 			 *
3035 			 * We cannot lock the vnode and then wait for paging
3036 			 * to complete without deadlocking against vm_fault.
3037 			 * Instead we simply call vm_object_page_remove() and
3038 			 * allow it to block internally on a page-by-page
3039 			 * basis when it encounters pages undergoing async
3040 			 * I/O.
3041 			 */
3042 			int flags;
3043 
3044 			/* no chain wait needed for vnode objects */
3045 			vm_object_reference_locked(object);
3046 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
3047 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
3048 			flags |= invalidate ? OBJPC_INVAL : 0;
3049 
3050 			/*
3051 			 * When operating on a virtual page table just
3052 			 * flush the whole object.  XXX we probably ought
3053 			 * to
3054 			 */
3055 			switch(current->maptype) {
3056 			case VM_MAPTYPE_NORMAL:
3057 				vm_object_page_clean(object,
3058 				    OFF_TO_IDX(offset),
3059 				    OFF_TO_IDX(offset + size + PAGE_MASK),
3060 				    flags);
3061 				break;
3062 			case VM_MAPTYPE_VPAGETABLE:
3063 				vm_object_page_clean(object, 0, 0, flags);
3064 				break;
3065 			}
3066 			vn_unlock(((struct vnode *)object->handle));
3067 			vm_object_deallocate_locked(object);
3068 		}
3069 		if (object && invalidate &&
3070 		   ((object->type == OBJT_VNODE) ||
3071 		    (object->type == OBJT_DEVICE) ||
3072 		    (object->type == OBJT_MGTDEVICE))) {
3073 			int clean_only =
3074 				((object->type == OBJT_DEVICE) ||
3075 				(object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE;
3076 			/* no chain wait needed for vnode/device objects */
3077 			vm_object_reference_locked(object);
3078 			switch(current->maptype) {
3079 			case VM_MAPTYPE_NORMAL:
3080 				vm_object_page_remove(object,
3081 				    OFF_TO_IDX(offset),
3082 				    OFF_TO_IDX(offset + size + PAGE_MASK),
3083 				    clean_only);
3084 				break;
3085 			case VM_MAPTYPE_VPAGETABLE:
3086 				vm_object_page_remove(object, 0, 0, clean_only);
3087 				break;
3088 			}
3089 			vm_object_deallocate_locked(object);
3090 		}
3091 		start += size;
3092 		if (object)
3093 			vm_object_drop(object);
3094 		current = vm_map_rb_tree_RB_NEXT(current);
3095 	}
3096 
3097 	lwkt_reltoken(&map->token);
3098 	vm_map_unlock_read(map);
3099 
3100 	return (KERN_SUCCESS);
3101 }
3102 
3103 /*
3104  * Make the region specified by this entry pageable.
3105  *
3106  * The vm_map must be exclusively locked.
3107  */
3108 static void
3109 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3110 {
3111 	entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3112 	entry->wired_count = 0;
3113 	vm_fault_unwire(map, entry);
3114 }
3115 
3116 /*
3117  * Deallocate the given entry from the target map.
3118  *
3119  * The vm_map must be exclusively locked.
3120  */
3121 static void
3122 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
3123 {
3124 	vm_map_entry_unlink(map, entry);
3125 	map->size -= entry->ba.end - entry->ba.start;
3126 	vm_map_entry_dispose(map, entry, countp);
3127 }
3128 
3129 /*
3130  * Deallocates the given address range from the target map.
3131  *
3132  * The vm_map must be exclusively locked.
3133  */
3134 int
3135 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
3136 {
3137 	vm_object_t object;
3138 	vm_map_entry_t entry;
3139 	vm_map_entry_t first_entry;
3140 	vm_offset_t hole_start;
3141 
3142 	ASSERT_VM_MAP_LOCKED(map);
3143 	lwkt_gettoken(&map->token);
3144 again:
3145 	/*
3146 	 * Find the start of the region, and clip it.  Set entry to point
3147 	 * at the first record containing the requested address or, if no
3148 	 * such record exists, the next record with a greater address.  The
3149 	 * loop will run from this point until a record beyond the termination
3150 	 * address is encountered.
3151 	 *
3152 	 * Adjust freehint[] for either the clip case or the extension case.
3153 	 *
3154 	 * GGG see other GGG comment.
3155 	 */
3156 	if (vm_map_lookup_entry(map, start, &first_entry)) {
3157 		entry = first_entry;
3158 		vm_map_clip_start(map, entry, start, countp);
3159 		hole_start = start;
3160 	} else {
3161 		if (first_entry) {
3162 			entry = vm_map_rb_tree_RB_NEXT(first_entry);
3163 			if (entry == NULL)
3164 				hole_start = first_entry->ba.start;
3165 			else
3166 				hole_start = first_entry->ba.end;
3167 		} else {
3168 			entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
3169 			if (entry == NULL)
3170 				hole_start = vm_map_min(map);
3171 			else
3172 				hole_start = vm_map_max(map);
3173 		}
3174 	}
3175 
3176 	/*
3177 	 * Step through all entries in this region
3178 	 */
3179 	while (entry && entry->ba.start < end) {
3180 		vm_map_entry_t next;
3181 		vm_offset_t s, e;
3182 		vm_pindex_t offidxstart, offidxend, count;
3183 
3184 		/*
3185 		 * If we hit an in-transition entry we have to sleep and
3186 		 * retry.  It's easier (and not really slower) to just retry
3187 		 * since this case occurs so rarely and the hint is already
3188 		 * pointing at the right place.  We have to reset the
3189 		 * start offset so as not to accidently delete an entry
3190 		 * another process just created in vacated space.
3191 		 */
3192 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3193 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3194 			start = entry->ba.start;
3195 			++mycpu->gd_cnt.v_intrans_coll;
3196 			++mycpu->gd_cnt.v_intrans_wait;
3197 			vm_map_transition_wait(map, 1);
3198 			goto again;
3199 		}
3200 		vm_map_clip_end(map, entry, end, countp);
3201 
3202 		s = entry->ba.start;
3203 		e = entry->ba.end;
3204 		next = vm_map_rb_tree_RB_NEXT(entry);
3205 
3206 		offidxstart = OFF_TO_IDX(entry->ba.offset);
3207 		count = OFF_TO_IDX(e - s);
3208 
3209 		switch(entry->maptype) {
3210 		case VM_MAPTYPE_NORMAL:
3211 		case VM_MAPTYPE_VPAGETABLE:
3212 		case VM_MAPTYPE_SUBMAP:
3213 			object = entry->ba.object;
3214 			break;
3215 		default:
3216 			object = NULL;
3217 			break;
3218 		}
3219 
3220 		/*
3221 		 * Unwire before removing addresses from the pmap; otherwise,
3222 		 * unwiring will put the entries back in the pmap.
3223 		 *
3224 		 * Generally speaking, doing a bulk pmap_remove() before
3225 		 * removing the pages from the VM object is better at
3226 		 * reducing unnecessary IPIs.  The pmap code is now optimized
3227 		 * to not blindly iterate the range when pt and pd pages
3228 		 * are missing.
3229 		 */
3230 		if (entry->wired_count != 0)
3231 			vm_map_entry_unwire(map, entry);
3232 
3233 		offidxend = offidxstart + count;
3234 
3235 		if (object == &kernel_object) {
3236 			pmap_remove(map->pmap, s, e);
3237 			vm_object_hold(object);
3238 			vm_object_page_remove(object, offidxstart,
3239 					      offidxend, FALSE);
3240 			vm_object_drop(object);
3241 		} else if (object && object->type != OBJT_DEFAULT &&
3242 			   object->type != OBJT_SWAP) {
3243 			/*
3244 			 * vnode object routines cannot be chain-locked,
3245 			 * but since we aren't removing pages from the
3246 			 * object here we can use a shared hold.
3247 			 */
3248 			vm_object_hold_shared(object);
3249 			pmap_remove(map->pmap, s, e);
3250 			vm_object_drop(object);
3251 		} else if (object) {
3252 			vm_object_hold(object);
3253 			pmap_remove(map->pmap, s, e);
3254 
3255 			if (object != NULL &&
3256 			    object->ref_count != 1 &&
3257 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
3258 			     OBJ_ONEMAPPING &&
3259 			    (object->type == OBJT_DEFAULT ||
3260 			     object->type == OBJT_SWAP)) {
3261 				/*
3262 				 * When ONEMAPPING is set we can destroy the
3263 				 * pages underlying the entry's range.
3264 				 */
3265 				vm_object_page_remove(object, offidxstart,
3266 						      offidxend, FALSE);
3267 				if (object->type == OBJT_SWAP) {
3268 					swap_pager_freespace(object,
3269 							     offidxstart,
3270 							     count);
3271 				}
3272 				if (offidxend >= object->size &&
3273 				    offidxstart < object->size) {
3274 					object->size = offidxstart;
3275 				}
3276 			}
3277 			vm_object_drop(object);
3278 		} else if (entry->maptype == VM_MAPTYPE_UKSMAP) {
3279 			pmap_remove(map->pmap, s, e);
3280 		}
3281 
3282 		/*
3283 		 * Delete the entry (which may delete the object) only after
3284 		 * removing all pmap entries pointing to its pages.
3285 		 * (Otherwise, its page frames may be reallocated, and any
3286 		 * modify bits will be set in the wrong object!)
3287 		 */
3288 		vm_map_entry_delete(map, entry, countp);
3289 		entry = next;
3290 	}
3291 
3292 	/*
3293 	 * We either reached the end and use vm_map_max as the end
3294 	 * address, or we didn't and we use the next entry as the
3295 	 * end address.
3296 	 */
3297 	if (entry == NULL) {
3298 		vm_map_freehint_hole(map, hole_start,
3299 				     vm_map_max(map) - hole_start);
3300 	} else {
3301 		vm_map_freehint_hole(map, hole_start,
3302 				     entry->ba.start - hole_start);
3303 	}
3304 
3305 	lwkt_reltoken(&map->token);
3306 
3307 	return (KERN_SUCCESS);
3308 }
3309 
3310 /*
3311  * Remove the given address range from the target map.
3312  * This is the exported form of vm_map_delete.
3313  *
3314  * No requirements.
3315  */
3316 int
3317 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3318 {
3319 	int result;
3320 	int count;
3321 
3322 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3323 	vm_map_lock(map);
3324 	VM_MAP_RANGE_CHECK(map, start, end);
3325 	result = vm_map_delete(map, start, end, &count);
3326 	vm_map_unlock(map);
3327 	vm_map_entry_release(count);
3328 
3329 	return (result);
3330 }
3331 
3332 /*
3333  * Assert that the target map allows the specified privilege on the
3334  * entire address region given.  The entire region must be allocated.
3335  *
3336  * The caller must specify whether the vm_map is already locked or not.
3337  */
3338 boolean_t
3339 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3340 			vm_prot_t protection, boolean_t have_lock)
3341 {
3342 	vm_map_entry_t entry;
3343 	vm_map_entry_t tmp_entry;
3344 	boolean_t result;
3345 
3346 	if (have_lock == FALSE)
3347 		vm_map_lock_read(map);
3348 
3349 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
3350 		if (have_lock == FALSE)
3351 			vm_map_unlock_read(map);
3352 		return (FALSE);
3353 	}
3354 	entry = tmp_entry;
3355 
3356 	result = TRUE;
3357 	while (start < end) {
3358 		if (entry == NULL) {
3359 			result = FALSE;
3360 			break;
3361 		}
3362 
3363 		/*
3364 		 * No holes allowed!
3365 		 */
3366 
3367 		if (start < entry->ba.start) {
3368 			result = FALSE;
3369 			break;
3370 		}
3371 		/*
3372 		 * Check protection associated with entry.
3373 		 */
3374 
3375 		if ((entry->protection & protection) != protection) {
3376 			result = FALSE;
3377 			break;
3378 		}
3379 		/* go to next entry */
3380 		start = entry->ba.end;
3381 		entry = vm_map_rb_tree_RB_NEXT(entry);
3382 	}
3383 	if (have_lock == FALSE)
3384 		vm_map_unlock_read(map);
3385 	return (result);
3386 }
3387 
3388 /*
3389  * vm_map_backing structures are not shared across forks and must be
3390  * replicated.
3391  *
3392  * Generally speaking we must reallocate the backing_ba sequence and
3393  * also adjust it for any changes made to the base entry->ba.start and
3394  * entry->ba.end.  The first ba in the chain is of course &entry->ba,
3395  * so we only need to adjust subsequent ba's start, end, and offset.
3396  *
3397  * MAP_BACK_CLIPPED	- Called as part of a clipping replication.
3398  *			  Do not clear OBJ_ONEMAPPING.
3399  *
3400  * MAP_BACK_BASEOBJREFD - Called from vm_map_insert().  The base object
3401  *			  has already been referenced.
3402  */
3403 static
3404 void
3405 vm_map_backing_replicated(vm_map_t map, vm_map_entry_t entry, int flags)
3406 {
3407 	vm_map_backing_t ba;
3408 	vm_map_backing_t nba;
3409 	vm_object_t object;
3410 
3411 	ba = &entry->ba;
3412 	for (;;) {
3413 		object = ba->object;
3414 		ba->pmap = map->pmap;
3415 		if (object &&
3416 		    (entry->maptype == VM_MAPTYPE_VPAGETABLE ||
3417 		     entry->maptype == VM_MAPTYPE_NORMAL)) {
3418 			if (ba != &entry->ba ||
3419 			    (flags & MAP_BACK_BASEOBJREFD) == 0) {
3420 				vm_object_reference_quick(object);
3421 			}
3422 			vm_map_backing_attach(ba);
3423 			if ((flags & MAP_BACK_CLIPPED) == 0 &&
3424 			    object->ref_count > 1) {
3425 				vm_object_clear_flag(object, OBJ_ONEMAPPING);
3426 			}
3427 		}
3428 		if (ba->backing_ba == NULL)
3429 			break;
3430 		nba = kmalloc(sizeof(*nba), M_MAP_BACKING, M_INTWAIT);
3431 		*nba = *ba->backing_ba;
3432 		nba->offset += (ba->start - nba->start);  /* += (new - old) */
3433 		nba->start = ba->start;
3434 		nba->end = ba->end;
3435 		ba->backing_ba = nba;
3436 		ba = nba;
3437 		/* pmap is replaced at the top of the loop */
3438 	}
3439 }
3440 
3441 static
3442 void
3443 vm_map_backing_adjust_start(vm_map_entry_t entry, vm_ooffset_t start)
3444 {
3445 	vm_map_backing_t ba;
3446 
3447 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE ||
3448 	    entry->maptype == VM_MAPTYPE_NORMAL) {
3449 		for (ba = &entry->ba; ba; ba = ba->backing_ba) {
3450 			if (ba->object) {
3451 				lockmgr(&ba->object->backing_lk, LK_EXCLUSIVE);
3452 				ba->offset += (start - ba->start);
3453 				ba->start = start;
3454 				lockmgr(&ba->object->backing_lk, LK_RELEASE);
3455 			} else {
3456 				ba->offset += (start - ba->start);
3457 				ba->start = start;
3458 			}
3459 		}
3460 	} else {
3461 		/* not an object and can't be shadowed */
3462 	}
3463 }
3464 
3465 static
3466 void
3467 vm_map_backing_adjust_end(vm_map_entry_t entry, vm_ooffset_t end)
3468 {
3469 	vm_map_backing_t ba;
3470 
3471 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE ||
3472 	    entry->maptype == VM_MAPTYPE_NORMAL) {
3473 		for (ba = &entry->ba; ba; ba = ba->backing_ba) {
3474 			if (ba->object) {
3475 				lockmgr(&ba->object->backing_lk, LK_EXCLUSIVE);
3476 				ba->end = end;
3477 				lockmgr(&ba->object->backing_lk, LK_RELEASE);
3478 			} else {
3479 				ba->end = end;
3480 			}
3481 		}
3482 	} else {
3483 		/* not an object and can't be shadowed */
3484 	}
3485 }
3486 
3487 /*
3488  * Handles the dirty work of making src_entry and dst_entry copy-on-write
3489  * after src_entry has been cloned to dst_entry.  For normal entries only.
3490  *
3491  * The vm_maps must be exclusively locked.
3492  * The vm_map's token must be held.
3493  *
3494  * Because the maps are locked no faults can be in progress during the
3495  * operation.
3496  */
3497 static void
3498 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
3499 		  vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
3500 {
3501 	vm_object_t obj;
3502 
3503 	KKASSERT(dst_entry->maptype == VM_MAPTYPE_NORMAL ||
3504 		 dst_entry->maptype == VM_MAPTYPE_VPAGETABLE);
3505 
3506 	if (src_entry->wired_count &&
3507 	    src_entry->maptype != VM_MAPTYPE_VPAGETABLE) {
3508 		/*
3509 		 * Of course, wired down pages can't be set copy-on-write.
3510 		 * Cause wired pages to be copied into the new map by
3511 		 * simulating faults (the new pages are pageable)
3512 		 *
3513 		 * Scrap ba.object (its ref-count has not yet been adjusted
3514 		 * so we can just NULL out the field).  Remove the backing
3515 		 * store.
3516 		 *
3517 		 * Then call vm_fault_copy_entry() to create a new object
3518 		 * in dst_entry and copy the wired pages from src to dst.
3519 		 *
3520 		 * The fault-copy code doesn't work with virtual page
3521 		 * tables.
3522 		 */
3523 		if ((obj = dst_entry->ba.object) != NULL) {
3524 			vm_map_backing_detach(&dst_entry->ba);
3525 			dst_entry->ba.object = NULL;
3526 			vm_map_entry_dispose_ba(dst_entry->ba.backing_ba);
3527 			dst_entry->ba.backing_ba = NULL;
3528 			dst_entry->ba.backing_count = 0;
3529 		}
3530 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3531 	} else {
3532 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
3533 			/*
3534 			 * If the source entry is not already marked NEEDS_COPY
3535 			 * we need to write-protect the PTEs.
3536 			 */
3537 			pmap_protect(src_map->pmap,
3538 				     src_entry->ba.start,
3539 				     src_entry->ba.end,
3540 				     src_entry->protection & ~VM_PROT_WRITE);
3541 		}
3542 
3543 		/*
3544 		 * dst_entry.ba_object might be stale.  Update it (its
3545 		 * ref-count has not yet been updated so just overwrite
3546 		 * the field).
3547 		 *
3548 		 * If there is no object then we are golden.  Also, in
3549 		 * this situation if there are no backing_ba linkages then
3550 		 * we can set ba.offset to whatever we want.  For now we
3551 		 * set the offset for 0 for make debugging object sizes
3552 		 * easier.
3553 		 */
3554 		obj = src_entry->ba.object;
3555 
3556 		if (obj) {
3557 			src_entry->eflags |= (MAP_ENTRY_COW |
3558 					      MAP_ENTRY_NEEDS_COPY);
3559 			dst_entry->eflags |= (MAP_ENTRY_COW |
3560 					      MAP_ENTRY_NEEDS_COPY);
3561 			KKASSERT(dst_entry->ba.offset == src_entry->ba.offset);
3562 		} else {
3563 			dst_entry->ba.offset = 0;
3564 		}
3565 
3566 		/*
3567 		 * Normal, allow the backing_ba link depth to
3568 		 * increase.
3569 		 */
3570 		pmap_copy(dst_map->pmap, src_map->pmap,
3571 			  dst_entry->ba.start,
3572 			  dst_entry->ba.end - dst_entry->ba.start,
3573 			  src_entry->ba.start);
3574 	}
3575 }
3576 
3577 /*
3578  * Create a vmspace for a new process and its related vm_map based on an
3579  * existing vmspace.  The new map inherits information from the old map
3580  * according to inheritance settings.
3581  *
3582  * The source map must not be locked.
3583  * No requirements.
3584  */
3585 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3586 			  vm_map_entry_t old_entry, int *countp);
3587 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3588 			  vm_map_entry_t old_entry, int *countp);
3589 
3590 struct vmspace *
3591 vmspace_fork(struct vmspace *vm1)
3592 {
3593 	struct vmspace *vm2;
3594 	vm_map_t old_map = &vm1->vm_map;
3595 	vm_map_t new_map;
3596 	vm_map_entry_t old_entry;
3597 	int count;
3598 
3599 	lwkt_gettoken(&vm1->vm_map.token);
3600 	vm_map_lock(old_map);
3601 
3602 	vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
3603 	lwkt_gettoken(&vm2->vm_map.token);
3604 
3605 	/*
3606 	 * We must bump the timestamp to force any concurrent fault
3607 	 * to retry.
3608 	 */
3609 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3610 	      (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3611 	new_map = &vm2->vm_map;	/* XXX */
3612 	new_map->timestamp = 1;
3613 
3614 	vm_map_lock(new_map);
3615 
3616 	count = old_map->nentries;
3617 	count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3618 
3619 	RB_FOREACH(old_entry, vm_map_rb_tree, &old_map->rb_root) {
3620 		switch(old_entry->maptype) {
3621 		case VM_MAPTYPE_SUBMAP:
3622 			panic("vm_map_fork: encountered a submap");
3623 			break;
3624 		case VM_MAPTYPE_UKSMAP:
3625 			vmspace_fork_uksmap_entry(old_map, new_map,
3626 						  old_entry, &count);
3627 			break;
3628 		case VM_MAPTYPE_NORMAL:
3629 		case VM_MAPTYPE_VPAGETABLE:
3630 			vmspace_fork_normal_entry(old_map, new_map,
3631 						  old_entry, &count);
3632 			break;
3633 		}
3634 	}
3635 
3636 	new_map->size = old_map->size;
3637 	vm_map_unlock(new_map);
3638 	vm_map_unlock(old_map);
3639 	vm_map_entry_release(count);
3640 
3641 	lwkt_reltoken(&vm2->vm_map.token);
3642 	lwkt_reltoken(&vm1->vm_map.token);
3643 
3644 	return (vm2);
3645 }
3646 
3647 static
3648 void
3649 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3650 			  vm_map_entry_t old_entry, int *countp)
3651 {
3652 	vm_map_entry_t new_entry;
3653 	vm_map_backing_t ba;
3654 	vm_object_t object;
3655 
3656 	/*
3657 	 * If the backing_ba link list gets too long then fault it
3658 	 * all into the head object and dispose of the list.  We do
3659 	 * this in old_entry prior to cloning in order to benefit both
3660 	 * parent and child.
3661 	 *
3662 	 * We can test our fronting object's size against its
3663 	 * resident_page_count for a really cheap (but probably not perfect)
3664 	 * all-shadowed test, allowing us to disconnect the backing_ba
3665 	 * link list early.
3666 	 *
3667 	 * XXX Currently doesn't work for VPAGETABLEs (the entire object
3668 	 *     would have to be copied).
3669 	 */
3670 	object = old_entry->ba.object;
3671 	if (old_entry->ba.backing_ba &&
3672 	    old_entry->maptype != VM_MAPTYPE_VPAGETABLE &&
3673 	    (old_entry->ba.backing_count >= vm_map_backing_limit ||
3674 	     (vm_map_backing_shadow_test && object &&
3675 	      object->size == object->resident_page_count))) {
3676 		/*
3677 		 * If there are too many backing_ba linkages we
3678 		 * collapse everything into the head
3679 		 *
3680 		 * This will also remove all the pte's.
3681 		 */
3682 		if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY)
3683 			vm_map_entry_shadow(old_entry);
3684 		if (object == NULL)
3685 			vm_map_entry_allocate_object(old_entry);
3686 		if (vm_fault_collapse(old_map, old_entry) == KERN_SUCCESS) {
3687 			ba = old_entry->ba.backing_ba;
3688 			old_entry->ba.backing_ba = NULL;
3689 			old_entry->ba.backing_count = 0;
3690 			vm_map_entry_dispose_ba(ba);
3691 		}
3692 	}
3693 	object = NULL;	/* object variable is now invalid */
3694 
3695 	/*
3696 	 * Fork the entry
3697 	 */
3698 	switch (old_entry->inheritance) {
3699 	case VM_INHERIT_NONE:
3700 		break;
3701 	case VM_INHERIT_SHARE:
3702 		/*
3703 		 * Clone the entry as a shared entry.  This will look like
3704 		 * shared memory across the old and the new process.  We must
3705 		 * ensure that the object is allocated.
3706 		 */
3707 		if (old_entry->ba.object == NULL)
3708 			vm_map_entry_allocate_object(old_entry);
3709 
3710 		if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3711 			/*
3712 			 * Create the fronting vm_map_backing for
3713 			 * an entry which needs a copy, plus an extra
3714 			 * ref because we are going to duplicate it
3715 			 * in the fork.
3716 			 *
3717 			 * The call to vm_map_entry_shadow() will also clear
3718 			 * OBJ_ONEMAPPING.
3719 			 *
3720 			 * XXX no more collapse.  Still need extra ref
3721 			 * for the fork.
3722 			 */
3723 			vm_map_entry_shadow(old_entry);
3724 		} else if (old_entry->ba.object) {
3725 			object = old_entry->ba.object;
3726 		}
3727 
3728 		/*
3729 		 * Clone the entry.  We've already bumped the ref on
3730 		 * the vm_object for our new entry.
3731 		 */
3732 		new_entry = vm_map_entry_create(countp);
3733 		*new_entry = *old_entry;
3734 
3735 		new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3736 		new_entry->wired_count = 0;
3737 
3738 		/*
3739 		 * Replicate and index the vm_map_backing.  Don't share
3740 		 * the vm_map_backing across vm_map's (only across clips).
3741 		 *
3742 		 * Insert the entry into the new map -- we know we're
3743 		 * inserting at the end of the new map.
3744 		 */
3745 		vm_map_backing_replicated(new_map, new_entry, 0);
3746 		vm_map_entry_link(new_map, new_entry);
3747 
3748 		/*
3749 		 * Update the physical map
3750 		 */
3751 		pmap_copy(new_map->pmap, old_map->pmap,
3752 			  new_entry->ba.start,
3753 			  (old_entry->ba.end - old_entry->ba.start),
3754 			  old_entry->ba.start);
3755 		break;
3756 	case VM_INHERIT_COPY:
3757 		/*
3758 		 * Clone the entry and link the copy into the new map.
3759 		 *
3760 		 * Note that ref-counting adjustment for old_entry->ba.object
3761 		 * (if it isn't a special map that is) is handled by
3762 		 * vm_map_copy_entry().
3763 		 */
3764 		new_entry = vm_map_entry_create(countp);
3765 		*new_entry = *old_entry;
3766 
3767 		new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3768 		new_entry->wired_count = 0;
3769 
3770 		vm_map_backing_replicated(new_map, new_entry, 0);
3771 		vm_map_entry_link(new_map, new_entry);
3772 
3773 		/*
3774 		 * This does the actual dirty work of making both entries
3775 		 * copy-on-write, and will also handle the fronting object.
3776 		 */
3777 		vm_map_copy_entry(old_map, new_map, old_entry, new_entry);
3778 		break;
3779 	}
3780 }
3781 
3782 /*
3783  * When forking user-kernel shared maps, the map might change in the
3784  * child so do not try to copy the underlying pmap entries.
3785  */
3786 static
3787 void
3788 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3789 			  vm_map_entry_t old_entry, int *countp)
3790 {
3791 	vm_map_entry_t new_entry;
3792 
3793 	new_entry = vm_map_entry_create(countp);
3794 	*new_entry = *old_entry;
3795 
3796 	new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3797 	new_entry->wired_count = 0;
3798 	KKASSERT(new_entry->ba.backing_ba == NULL);
3799 	vm_map_backing_replicated(new_map, new_entry, 0);
3800 
3801 	vm_map_entry_link(new_map, new_entry);
3802 }
3803 
3804 /*
3805  * Create an auto-grow stack entry
3806  *
3807  * No requirements.
3808  */
3809 int
3810 vm_map_stack (vm_map_t map, vm_offset_t *addrbos, vm_size_t max_ssize,
3811 	      int flags, vm_prot_t prot, vm_prot_t max, int cow)
3812 {
3813 	vm_map_entry_t	prev_entry;
3814 	vm_map_entry_t	next;
3815 	vm_size_t	init_ssize;
3816 	int		rv;
3817 	int		count;
3818 	vm_offset_t	tmpaddr;
3819 
3820 	cow |= MAP_IS_STACK;
3821 
3822 	if (max_ssize < sgrowsiz)
3823 		init_ssize = max_ssize;
3824 	else
3825 		init_ssize = sgrowsiz;
3826 
3827 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3828 	vm_map_lock(map);
3829 
3830 	/*
3831 	 * Find space for the mapping
3832 	 */
3833 	if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3834 		if (vm_map_findspace(map, *addrbos, max_ssize, 1,
3835 				     flags, &tmpaddr)) {
3836 			vm_map_unlock(map);
3837 			vm_map_entry_release(count);
3838 			return (KERN_NO_SPACE);
3839 		}
3840 		*addrbos = tmpaddr;
3841 	}
3842 
3843 	/* If addr is already mapped, no go */
3844 	if (vm_map_lookup_entry(map, *addrbos, &prev_entry)) {
3845 		vm_map_unlock(map);
3846 		vm_map_entry_release(count);
3847 		return (KERN_NO_SPACE);
3848 	}
3849 
3850 #if 0
3851 	/* XXX already handled by kern_mmap() */
3852 	/* If we would blow our VMEM resource limit, no go */
3853 	if (map->size + init_ssize >
3854 	    curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3855 		vm_map_unlock(map);
3856 		vm_map_entry_release(count);
3857 		return (KERN_NO_SPACE);
3858 	}
3859 #endif
3860 
3861 	/*
3862 	 * If we can't accomodate max_ssize in the current mapping,
3863 	 * no go.  However, we need to be aware that subsequent user
3864 	 * mappings might map into the space we have reserved for
3865 	 * stack, and currently this space is not protected.
3866 	 *
3867 	 * Hopefully we will at least detect this condition
3868 	 * when we try to grow the stack.
3869 	 */
3870 	if (prev_entry)
3871 		next = vm_map_rb_tree_RB_NEXT(prev_entry);
3872 	else
3873 		next = RB_MIN(vm_map_rb_tree, &map->rb_root);
3874 
3875 	if (next && next->ba.start < *addrbos + max_ssize) {
3876 		vm_map_unlock(map);
3877 		vm_map_entry_release(count);
3878 		return (KERN_NO_SPACE);
3879 	}
3880 
3881 	/*
3882 	 * We initially map a stack of only init_ssize.  We will
3883 	 * grow as needed later.  Since this is to be a grow
3884 	 * down stack, we map at the top of the range.
3885 	 *
3886 	 * Note: we would normally expect prot and max to be
3887 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
3888 	 * eliminate these as input parameters, and just
3889 	 * pass these values here in the insert call.
3890 	 */
3891 	rv = vm_map_insert(map, &count, NULL, NULL,
3892 			   0, *addrbos + max_ssize - init_ssize,
3893 	                   *addrbos + max_ssize,
3894 			   VM_MAPTYPE_NORMAL,
3895 			   VM_SUBSYS_STACK, prot, max, cow);
3896 
3897 	/* Now set the avail_ssize amount */
3898 	if (rv == KERN_SUCCESS) {
3899 		if (prev_entry)
3900 			next = vm_map_rb_tree_RB_NEXT(prev_entry);
3901 		else
3902 			next = RB_MIN(vm_map_rb_tree, &map->rb_root);
3903 		if (prev_entry != NULL) {
3904 			vm_map_clip_end(map,
3905 					prev_entry,
3906 					*addrbos + max_ssize - init_ssize,
3907 					&count);
3908 		}
3909 		if (next->ba.end   != *addrbos + max_ssize ||
3910 		    next->ba.start != *addrbos + max_ssize - init_ssize){
3911 			panic ("Bad entry start/end for new stack entry");
3912 		} else {
3913 			next->aux.avail_ssize = max_ssize - init_ssize;
3914 		}
3915 	}
3916 
3917 	vm_map_unlock(map);
3918 	vm_map_entry_release(count);
3919 	return (rv);
3920 }
3921 
3922 /*
3923  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3924  * desired address is already mapped, or if we successfully grow
3925  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3926  * stack range (this is strange, but preserves compatibility with
3927  * the grow function in vm_machdep.c).
3928  *
3929  * No requirements.
3930  */
3931 int
3932 vm_map_growstack (vm_map_t map, vm_offset_t addr)
3933 {
3934 	vm_map_entry_t prev_entry;
3935 	vm_map_entry_t stack_entry;
3936 	vm_map_entry_t next;
3937 	struct vmspace *vm;
3938 	struct lwp *lp;
3939 	struct proc *p;
3940 	vm_offset_t    end;
3941 	int grow_amount;
3942 	int rv = KERN_SUCCESS;
3943 	int is_procstack;
3944 	int use_read_lock = 1;
3945 	int count;
3946 
3947 	/*
3948 	 * Find the vm
3949 	 */
3950 	lp = curthread->td_lwp;
3951 	p = curthread->td_proc;
3952 	KKASSERT(lp != NULL);
3953 	vm = lp->lwp_vmspace;
3954 
3955 	/*
3956 	 * Growstack is only allowed on the current process.  We disallow
3957 	 * other use cases, e.g. trying to access memory via procfs that
3958 	 * the stack hasn't grown into.
3959 	 */
3960 	if (map != &vm->vm_map) {
3961 		return KERN_FAILURE;
3962 	}
3963 
3964 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3965 Retry:
3966 	if (use_read_lock)
3967 		vm_map_lock_read(map);
3968 	else
3969 		vm_map_lock(map);
3970 
3971 	/*
3972 	 * If addr is already in the entry range, no need to grow.
3973 	 * prev_entry returns NULL if addr is at the head.
3974 	 */
3975 	if (vm_map_lookup_entry(map, addr, &prev_entry))
3976 		goto done;
3977 	if (prev_entry)
3978 		stack_entry = vm_map_rb_tree_RB_NEXT(prev_entry);
3979 	else
3980 		stack_entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
3981 
3982 	if (stack_entry == NULL)
3983 		goto done;
3984 	if (prev_entry == NULL)
3985 		end = stack_entry->ba.start - stack_entry->aux.avail_ssize;
3986 	else
3987 		end = prev_entry->ba.end;
3988 
3989 	/*
3990 	 * This next test mimics the old grow function in vm_machdep.c.
3991 	 * It really doesn't quite make sense, but we do it anyway
3992 	 * for compatibility.
3993 	 *
3994 	 * If not growable stack, return success.  This signals the
3995 	 * caller to proceed as he would normally with normal vm.
3996 	 */
3997 	if (stack_entry->aux.avail_ssize < 1 ||
3998 	    addr >= stack_entry->ba.start ||
3999 	    addr <  stack_entry->ba.start - stack_entry->aux.avail_ssize) {
4000 		goto done;
4001 	}
4002 
4003 	/* Find the minimum grow amount */
4004 	grow_amount = roundup (stack_entry->ba.start - addr, PAGE_SIZE);
4005 	if (grow_amount > stack_entry->aux.avail_ssize) {
4006 		rv = KERN_NO_SPACE;
4007 		goto done;
4008 	}
4009 
4010 	/*
4011 	 * If there is no longer enough space between the entries
4012 	 * nogo, and adjust the available space.  Note: this
4013 	 * should only happen if the user has mapped into the
4014 	 * stack area after the stack was created, and is
4015 	 * probably an error.
4016 	 *
4017 	 * This also effectively destroys any guard page the user
4018 	 * might have intended by limiting the stack size.
4019 	 */
4020 	if (grow_amount > stack_entry->ba.start - end) {
4021 		if (use_read_lock && vm_map_lock_upgrade(map)) {
4022 			/* lost lock */
4023 			use_read_lock = 0;
4024 			goto Retry;
4025 		}
4026 		use_read_lock = 0;
4027 		stack_entry->aux.avail_ssize = stack_entry->ba.start - end;
4028 		rv = KERN_NO_SPACE;
4029 		goto done;
4030 	}
4031 
4032 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
4033 
4034 	/* If this is the main process stack, see if we're over the
4035 	 * stack limit.
4036 	 */
4037 	if (is_procstack && (vm->vm_ssize + grow_amount >
4038 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
4039 		rv = KERN_NO_SPACE;
4040 		goto done;
4041 	}
4042 
4043 	/* Round up the grow amount modulo SGROWSIZ */
4044 	grow_amount = roundup (grow_amount, sgrowsiz);
4045 	if (grow_amount > stack_entry->aux.avail_ssize) {
4046 		grow_amount = stack_entry->aux.avail_ssize;
4047 	}
4048 	if (is_procstack && (vm->vm_ssize + grow_amount >
4049 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
4050 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - vm->vm_ssize;
4051 	}
4052 
4053 	/* If we would blow our VMEM resource limit, no go */
4054 	if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
4055 		rv = KERN_NO_SPACE;
4056 		goto done;
4057 	}
4058 
4059 	if (use_read_lock && vm_map_lock_upgrade(map)) {
4060 		/* lost lock */
4061 		use_read_lock = 0;
4062 		goto Retry;
4063 	}
4064 	use_read_lock = 0;
4065 
4066 	/* Get the preliminary new entry start value */
4067 	addr = stack_entry->ba.start - grow_amount;
4068 
4069 	/* If this puts us into the previous entry, cut back our growth
4070 	 * to the available space.  Also, see the note above.
4071 	 */
4072 	if (addr < end) {
4073 		stack_entry->aux.avail_ssize = stack_entry->ba.start - end;
4074 		addr = end;
4075 	}
4076 
4077 	rv = vm_map_insert(map, &count, NULL, NULL,
4078 			   0, addr, stack_entry->ba.start,
4079 			   VM_MAPTYPE_NORMAL,
4080 			   VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0);
4081 
4082 	/* Adjust the available stack space by the amount we grew. */
4083 	if (rv == KERN_SUCCESS) {
4084 		if (prev_entry) {
4085 			vm_map_clip_end(map, prev_entry, addr, &count);
4086 			next = vm_map_rb_tree_RB_NEXT(prev_entry);
4087 		} else {
4088 			next = RB_MIN(vm_map_rb_tree, &map->rb_root);
4089 		}
4090 		if (next->ba.end != stack_entry->ba.start  ||
4091 		    next->ba.start != addr) {
4092 			panic ("Bad stack grow start/end in new stack entry");
4093 		} else {
4094 			next->aux.avail_ssize =
4095 				stack_entry->aux.avail_ssize -
4096 				(next->ba.end - next->ba.start);
4097 			if (is_procstack) {
4098 				vm->vm_ssize += next->ba.end -
4099 						next->ba.start;
4100 			}
4101 		}
4102 
4103 		if (map->flags & MAP_WIREFUTURE)
4104 			vm_map_unwire(map, next->ba.start, next->ba.end, FALSE);
4105 	}
4106 
4107 done:
4108 	if (use_read_lock)
4109 		vm_map_unlock_read(map);
4110 	else
4111 		vm_map_unlock(map);
4112 	vm_map_entry_release(count);
4113 	return (rv);
4114 }
4115 
4116 /*
4117  * Unshare the specified VM space for exec.  If other processes are
4118  * mapped to it, then create a new one.  The new vmspace is null.
4119  *
4120  * No requirements.
4121  */
4122 void
4123 vmspace_exec(struct proc *p, struct vmspace *vmcopy)
4124 {
4125 	struct vmspace *oldvmspace = p->p_vmspace;
4126 	struct vmspace *newvmspace;
4127 	vm_map_t map = &p->p_vmspace->vm_map;
4128 
4129 	/*
4130 	 * If we are execing a resident vmspace we fork it, otherwise
4131 	 * we create a new vmspace.  Note that exitingcnt is not
4132 	 * copied to the new vmspace.
4133 	 */
4134 	lwkt_gettoken(&oldvmspace->vm_map.token);
4135 	if (vmcopy)  {
4136 		newvmspace = vmspace_fork(vmcopy);
4137 		lwkt_gettoken(&newvmspace->vm_map.token);
4138 	} else {
4139 		newvmspace = vmspace_alloc(vm_map_min(map), vm_map_max(map));
4140 		lwkt_gettoken(&newvmspace->vm_map.token);
4141 		bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
4142 		      (caddr_t)&oldvmspace->vm_endcopy -
4143 		       (caddr_t)&oldvmspace->vm_startcopy);
4144 	}
4145 
4146 	/*
4147 	 * Finish initializing the vmspace before assigning it
4148 	 * to the process.  The vmspace will become the current vmspace
4149 	 * if p == curproc.
4150 	 */
4151 	pmap_pinit2(vmspace_pmap(newvmspace));
4152 	pmap_replacevm(p, newvmspace, 0);
4153 	lwkt_reltoken(&newvmspace->vm_map.token);
4154 	lwkt_reltoken(&oldvmspace->vm_map.token);
4155 	vmspace_rel(oldvmspace);
4156 }
4157 
4158 /*
4159  * Unshare the specified VM space for forcing COW.  This
4160  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4161  */
4162 void
4163 vmspace_unshare(struct proc *p)
4164 {
4165 	struct vmspace *oldvmspace = p->p_vmspace;
4166 	struct vmspace *newvmspace;
4167 
4168 	lwkt_gettoken(&oldvmspace->vm_map.token);
4169 	if (vmspace_getrefs(oldvmspace) == 1) {
4170 		lwkt_reltoken(&oldvmspace->vm_map.token);
4171 		return;
4172 	}
4173 	newvmspace = vmspace_fork(oldvmspace);
4174 	lwkt_gettoken(&newvmspace->vm_map.token);
4175 	pmap_pinit2(vmspace_pmap(newvmspace));
4176 	pmap_replacevm(p, newvmspace, 0);
4177 	lwkt_reltoken(&newvmspace->vm_map.token);
4178 	lwkt_reltoken(&oldvmspace->vm_map.token);
4179 	vmspace_rel(oldvmspace);
4180 }
4181 
4182 /*
4183  * vm_map_hint: return the beginning of the best area suitable for
4184  * creating a new mapping with "prot" protection.
4185  *
4186  * No requirements.
4187  */
4188 vm_offset_t
4189 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
4190 {
4191 	struct vmspace *vms = p->p_vmspace;
4192 	struct rlimit limit;
4193 	rlim_t dsiz;
4194 
4195 	/*
4196 	 * Acquire datasize limit for mmap() operation,
4197 	 * calculate nearest power of 2.
4198 	 */
4199 	if (kern_getrlimit(RLIMIT_DATA, &limit))
4200 		limit.rlim_cur = maxdsiz;
4201 	dsiz = limit.rlim_cur;
4202 
4203 	if (!randomize_mmap || addr != 0) {
4204 		/*
4205 		 * Set a reasonable start point for the hint if it was
4206 		 * not specified or if it falls within the heap space.
4207 		 * Hinted mmap()s do not allocate out of the heap space.
4208 		 */
4209 		if (addr == 0 ||
4210 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
4211 		     addr < round_page((vm_offset_t)vms->vm_daddr + dsiz))) {
4212 			addr = round_page((vm_offset_t)vms->vm_daddr + dsiz);
4213 		}
4214 
4215 		return addr;
4216 	}
4217 
4218 	/*
4219 	 * randomize_mmap && addr == 0.  For now randomize the
4220 	 * address within a dsiz range beyond the data limit.
4221 	 */
4222 	addr = (vm_offset_t)vms->vm_daddr + dsiz;
4223 	if (dsiz)
4224 		addr += (karc4random64() & 0x7FFFFFFFFFFFFFFFLU) % dsiz;
4225 	return (round_page(addr));
4226 }
4227 
4228 /*
4229  * Finds the VM object, offset, and protection for a given virtual address
4230  * in the specified map, assuming a page fault of the type specified.
4231  *
4232  * Leaves the map in question locked for read; return values are guaranteed
4233  * until a vm_map_lookup_done call is performed.  Note that the map argument
4234  * is in/out; the returned map must be used in the call to vm_map_lookup_done.
4235  *
4236  * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
4237  * that fast.
4238  *
4239  * If a lookup is requested with "write protection" specified, the map may
4240  * be changed to perform virtual copying operations, although the data
4241  * referenced will remain the same.
4242  *
4243  * No requirements.
4244  */
4245 int
4246 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
4247 	      vm_offset_t vaddr,
4248 	      vm_prot_t fault_typea,
4249 	      vm_map_entry_t *out_entry,	/* OUT */
4250 	      struct vm_map_backing **bap,	/* OUT */
4251 	      vm_pindex_t *pindex,		/* OUT */
4252 	      vm_prot_t *out_prot,		/* OUT */
4253 	      int *wflags)			/* OUT */
4254 {
4255 	vm_map_entry_t entry;
4256 	vm_map_t map = *var_map;
4257 	vm_prot_t prot;
4258 	vm_prot_t fault_type = fault_typea;
4259 	int use_read_lock = 1;
4260 	int rv = KERN_SUCCESS;
4261 	int count;
4262 	thread_t td = curthread;
4263 
4264 	/*
4265 	 * vm_map_entry_reserve() implements an important mitigation
4266 	 * against mmap() span running the kernel out of vm_map_entry
4267 	 * structures, but it can also cause an infinite call recursion.
4268 	 * Use td_nest_count to prevent an infinite recursion (allows
4269 	 * the vm_map code to dig into the pcpu vm_map_entry reserve).
4270 	 */
4271 	count = 0;
4272 	if (td->td_nest_count == 0) {
4273 		++td->td_nest_count;
4274 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
4275 		--td->td_nest_count;
4276 	}
4277 RetryLookup:
4278 	if (use_read_lock)
4279 		vm_map_lock_read(map);
4280 	else
4281 		vm_map_lock(map);
4282 
4283 	/*
4284 	 * Always do a full lookup.  The hint doesn't get us much anymore
4285 	 * now that the map is RB'd.
4286 	 */
4287 	cpu_ccfence();
4288 	*out_entry = NULL;
4289 	*bap = NULL;
4290 
4291 	{
4292 		vm_map_entry_t tmp_entry;
4293 
4294 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
4295 			rv = KERN_INVALID_ADDRESS;
4296 			goto done;
4297 		}
4298 		entry = tmp_entry;
4299 		*out_entry = entry;
4300 	}
4301 
4302 	/*
4303 	 * Handle submaps.
4304 	 */
4305 	if (entry->maptype == VM_MAPTYPE_SUBMAP) {
4306 		vm_map_t old_map = map;
4307 
4308 		*var_map = map = entry->ba.sub_map;
4309 		if (use_read_lock)
4310 			vm_map_unlock_read(old_map);
4311 		else
4312 			vm_map_unlock(old_map);
4313 		use_read_lock = 1;
4314 		goto RetryLookup;
4315 	}
4316 
4317 	/*
4318 	 * Check whether this task is allowed to have this page.
4319 	 * Note the special case for MAP_ENTRY_COW pages with an override.
4320 	 * This is to implement a forced COW for debuggers.
4321 	 */
4322 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
4323 		prot = entry->max_protection;
4324 	else
4325 		prot = entry->protection;
4326 
4327 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
4328 	if ((fault_type & prot) != fault_type) {
4329 		rv = KERN_PROTECTION_FAILURE;
4330 		goto done;
4331 	}
4332 
4333 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4334 	    (entry->eflags & MAP_ENTRY_COW) &&
4335 	    (fault_type & VM_PROT_WRITE) &&
4336 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
4337 		rv = KERN_PROTECTION_FAILURE;
4338 		goto done;
4339 	}
4340 
4341 	/*
4342 	 * If this page is not pageable, we have to get it for all possible
4343 	 * accesses.
4344 	 */
4345 	*wflags = 0;
4346 	if (entry->wired_count) {
4347 		*wflags |= FW_WIRED;
4348 		prot = fault_type = entry->protection;
4349 	}
4350 
4351 	/*
4352 	 * Virtual page tables may need to update the accessed (A) bit
4353 	 * in a page table entry.  Upgrade the fault to a write fault for
4354 	 * that case if the map will support it.  If the map does not support
4355 	 * it the page table entry simply will not be updated.
4356 	 */
4357 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
4358 		if (prot & VM_PROT_WRITE)
4359 			fault_type |= VM_PROT_WRITE;
4360 	}
4361 
4362 	if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
4363 	    pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
4364 		if ((prot & VM_PROT_WRITE) == 0)
4365 			fault_type |= VM_PROT_WRITE;
4366 	}
4367 
4368 	/*
4369 	 * Only NORMAL and VPAGETABLE maps are object-based.  UKSMAPs are not.
4370 	 */
4371 	if (entry->maptype != VM_MAPTYPE_NORMAL &&
4372 	    entry->maptype != VM_MAPTYPE_VPAGETABLE) {
4373 		*bap = NULL;
4374 		goto skip;
4375 	}
4376 
4377 	/*
4378 	 * If the entry was copy-on-write, we either ...
4379 	 */
4380 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4381 		/*
4382 		 * If we want to write the page, we may as well handle that
4383 		 * now since we've got the map locked.
4384 		 *
4385 		 * If we don't need to write the page, we just demote the
4386 		 * permissions allowed.
4387 		 */
4388 		if (fault_type & VM_PROT_WRITE) {
4389 			/*
4390 			 * Not allowed if TDF_NOFAULT is set as the shadowing
4391 			 * operation can deadlock against the faulting
4392 			 * function due to the copy-on-write.
4393 			 */
4394 			if (curthread->td_flags & TDF_NOFAULT) {
4395 				rv = KERN_FAILURE_NOFAULT;
4396 				goto done;
4397 			}
4398 
4399 			/*
4400 			 * Make a new vm_map_backing + object, and place it
4401 			 * in the object chain.  Note that no new references
4402 			 * have appeared -- one just moved from the map to
4403 			 * the new object.
4404 			 */
4405 			if (use_read_lock && vm_map_lock_upgrade(map)) {
4406 				/* lost lock */
4407 				use_read_lock = 0;
4408 				goto RetryLookup;
4409 			}
4410 			use_read_lock = 0;
4411 			vm_map_entry_shadow(entry);
4412 			*wflags |= FW_DIDCOW;
4413 		} else {
4414 			/*
4415 			 * We're attempting to read a copy-on-write page --
4416 			 * don't allow writes.
4417 			 */
4418 			prot &= ~VM_PROT_WRITE;
4419 		}
4420 	}
4421 
4422 	/*
4423 	 * Create an object if necessary.  This code also handles
4424 	 * partitioning large entries to improve vm_fault performance.
4425 	 */
4426 	if (entry->ba.object == NULL && !map->system_map) {
4427 		if (use_read_lock && vm_map_lock_upgrade(map))  {
4428 			/* lost lock */
4429 			use_read_lock = 0;
4430 			goto RetryLookup;
4431 		}
4432 		use_read_lock = 0;
4433 
4434 		/*
4435 		 * Partition large entries, giving each its own VM object,
4436 		 * to improve concurrent fault performance.  This is only
4437 		 * applicable to userspace.
4438 		 */
4439 		if (map != &kernel_map &&
4440 		    entry->maptype == VM_MAPTYPE_NORMAL &&
4441 		    ((entry->ba.start ^ entry->ba.end) &
4442 		     ~MAP_ENTRY_PARTITION_MASK) &&
4443 		    vm_map_partition_enable) {
4444 			if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
4445 				entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
4446 				++mycpu->gd_cnt.v_intrans_coll;
4447 				++mycpu->gd_cnt.v_intrans_wait;
4448 				vm_map_transition_wait(map, 0);
4449 				goto RetryLookup;
4450 			}
4451 			vm_map_entry_partition(map, entry, vaddr, &count);
4452 		}
4453 		vm_map_entry_allocate_object(entry);
4454 	}
4455 
4456 	/*
4457 	 * Return the object/offset from this entry.  If the entry was
4458 	 * copy-on-write or empty, it has been fixed up.
4459 	 */
4460 	*bap = &entry->ba;
4461 
4462 skip:
4463 	*pindex = OFF_TO_IDX((vaddr - entry->ba.start) + entry->ba.offset);
4464 
4465 	/*
4466 	 * Return whether this is the only map sharing this data.  On
4467 	 * success we return with a read lock held on the map.  On failure
4468 	 * we return with the map unlocked.
4469 	 */
4470 	*out_prot = prot;
4471 done:
4472 	if (rv == KERN_SUCCESS) {
4473 		if (use_read_lock == 0)
4474 			vm_map_lock_downgrade(map);
4475 	} else if (use_read_lock) {
4476 		vm_map_unlock_read(map);
4477 	} else {
4478 		vm_map_unlock(map);
4479 	}
4480 	if (count > 0)
4481 		vm_map_entry_release(count);
4482 
4483 	return (rv);
4484 }
4485 
4486 /*
4487  * Releases locks acquired by a vm_map_lookup()
4488  * (according to the handle returned by that lookup).
4489  *
4490  * No other requirements.
4491  */
4492 void
4493 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
4494 {
4495 	/*
4496 	 * Unlock the main-level map
4497 	 */
4498 	vm_map_unlock_read(map);
4499 	if (count)
4500 		vm_map_entry_release(count);
4501 }
4502 
4503 static void
4504 vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry,
4505 		       vm_offset_t vaddr, int *countp)
4506 {
4507 	vaddr &= ~MAP_ENTRY_PARTITION_MASK;
4508 	vm_map_clip_start(map, entry, vaddr, countp);
4509 	vaddr += MAP_ENTRY_PARTITION_SIZE;
4510 	vm_map_clip_end(map, entry, vaddr, countp);
4511 }
4512 
4513 /*
4514  * Quick hack, needs some help to make it more SMP friendly.
4515  */
4516 void
4517 vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock,
4518 		 vm_offset_t ran_beg, vm_offset_t ran_end)
4519 {
4520 	struct vm_map_ilock *scan;
4521 
4522 	ilock->ran_beg = ran_beg;
4523 	ilock->ran_end = ran_end;
4524 	ilock->flags = 0;
4525 
4526 	spin_lock(&map->ilock_spin);
4527 restart:
4528 	for (scan = map->ilock_base; scan; scan = scan->next) {
4529 		if (ran_end > scan->ran_beg && ran_beg < scan->ran_end) {
4530 			scan->flags |= ILOCK_WAITING;
4531 			ssleep(scan, &map->ilock_spin, 0, "ilock", 0);
4532 			goto restart;
4533 		}
4534 	}
4535 	ilock->next = map->ilock_base;
4536 	map->ilock_base = ilock;
4537 	spin_unlock(&map->ilock_spin);
4538 }
4539 
4540 void
4541 vm_map_deinterlock(vm_map_t map, struct  vm_map_ilock *ilock)
4542 {
4543 	struct vm_map_ilock *scan;
4544 	struct vm_map_ilock **scanp;
4545 
4546 	spin_lock(&map->ilock_spin);
4547 	scanp = &map->ilock_base;
4548 	while ((scan = *scanp) != NULL) {
4549 		if (scan == ilock) {
4550 			*scanp = ilock->next;
4551 			spin_unlock(&map->ilock_spin);
4552 			if (ilock->flags & ILOCK_WAITING)
4553 				wakeup(ilock);
4554 			return;
4555 		}
4556 		scanp = &scan->next;
4557 	}
4558 	spin_unlock(&map->ilock_spin);
4559 	panic("vm_map_deinterlock: missing ilock!");
4560 }
4561 
4562 #include "opt_ddb.h"
4563 #ifdef DDB
4564 #include <ddb/ddb.h>
4565 
4566 /*
4567  * Debugging only
4568  */
4569 DB_SHOW_COMMAND(map, vm_map_print)
4570 {
4571 	static int nlines;
4572 	/* XXX convert args. */
4573 	vm_map_t map = (vm_map_t)addr;
4574 	boolean_t full = have_addr;
4575 
4576 	vm_map_entry_t entry;
4577 
4578 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4579 	    (void *)map,
4580 	    (void *)map->pmap, map->nentries, map->timestamp);
4581 	nlines++;
4582 
4583 	if (!full && db_indent)
4584 		return;
4585 
4586 	db_indent += 2;
4587 	RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) {
4588 		db_iprintf("map entry %p: start=%p, end=%p\n",
4589 		    (void *)entry,
4590 		    (void *)entry->ba.start, (void *)entry->ba.end);
4591 		nlines++;
4592 		{
4593 			static char *inheritance_name[4] =
4594 			{"share", "copy", "none", "donate_copy"};
4595 
4596 			db_iprintf(" prot=%x/%x/%s",
4597 			    entry->protection,
4598 			    entry->max_protection,
4599 			    inheritance_name[(int)(unsigned char)
4600 						entry->inheritance]);
4601 			if (entry->wired_count != 0)
4602 				db_printf(", wired");
4603 		}
4604 		switch(entry->maptype) {
4605 		case VM_MAPTYPE_SUBMAP:
4606 			/* XXX no %qd in kernel.  Truncate entry->ba.offset. */
4607 			db_printf(", share=%p, offset=0x%lx\n",
4608 			    (void *)entry->ba.sub_map,
4609 			    (long)entry->ba.offset);
4610 			nlines++;
4611 
4612 			db_indent += 2;
4613 			vm_map_print((db_expr_t)(intptr_t)entry->ba.sub_map,
4614 				     full, 0, NULL);
4615 			db_indent -= 2;
4616 			break;
4617 		case VM_MAPTYPE_NORMAL:
4618 		case VM_MAPTYPE_VPAGETABLE:
4619 			/* XXX no %qd in kernel.  Truncate entry->ba.offset. */
4620 			db_printf(", object=%p, offset=0x%lx",
4621 			    (void *)entry->ba.object,
4622 			    (long)entry->ba.offset);
4623 			if (entry->eflags & MAP_ENTRY_COW)
4624 				db_printf(", copy (%s)",
4625 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4626 			db_printf("\n");
4627 			nlines++;
4628 
4629 			if (entry->ba.object) {
4630 				db_indent += 2;
4631 				vm_object_print((db_expr_t)(intptr_t)
4632 						entry->ba.object,
4633 						full, 0, NULL);
4634 				nlines += 4;
4635 				db_indent -= 2;
4636 			}
4637 			break;
4638 		case VM_MAPTYPE_UKSMAP:
4639 			db_printf(", uksmap=%p, offset=0x%lx",
4640 			    (void *)entry->ba.uksmap,
4641 			    (long)entry->ba.offset);
4642 			if (entry->eflags & MAP_ENTRY_COW)
4643 				db_printf(", copy (%s)",
4644 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4645 			db_printf("\n");
4646 			nlines++;
4647 			break;
4648 		default:
4649 			break;
4650 		}
4651 	}
4652 	db_indent -= 2;
4653 	if (db_indent == 0)
4654 		nlines = 0;
4655 }
4656 
4657 /*
4658  * Debugging only
4659  */
4660 DB_SHOW_COMMAND(procvm, procvm)
4661 {
4662 	struct proc *p;
4663 
4664 	if (have_addr) {
4665 		p = (struct proc *) addr;
4666 	} else {
4667 		p = curproc;
4668 	}
4669 
4670 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4671 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4672 	    (void *)vmspace_pmap(p->p_vmspace));
4673 
4674 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
4675 }
4676 
4677 #endif /* DDB */
4678