xref: /dragonfly/sys/vm/vm_map.c (revision 6a3cbbc2)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 2003-2019 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * This code is derived from software contributed to The DragonFly Project
10  * by Matthew Dillon <dillon@backplane.com>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
39  * All rights reserved.
40  *
41  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/proc.h>
67 #include <sys/serialize.h>
68 #include <sys/lock.h>
69 #include <sys/vmmeter.h>
70 #include <sys/mman.h>
71 #include <sys/vnode.h>
72 #include <sys/resourcevar.h>
73 #include <sys/shm.h>
74 #include <sys/tree.h>
75 #include <sys/malloc.h>
76 #include <sys/objcache.h>
77 #include <sys/kern_syscall.h>
78 
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 #include <vm/swap_pager.h>
89 #include <vm/vm_zone.h>
90 
91 #include <sys/random.h>
92 #include <sys/sysctl.h>
93 #include <sys/spinlock.h>
94 
95 #include <sys/thread2.h>
96 #include <sys/spinlock2.h>
97 
98 /*
99  * Virtual memory maps provide for the mapping, protection, and sharing
100  * of virtual memory objects.  In addition, this module provides for an
101  * efficient virtual copy of memory from one map to another.
102  *
103  * Synchronization is required prior to most operations.
104  *
105  * Maps consist of an ordered doubly-linked list of simple entries.
106  * A hint and a RB tree is used to speed-up lookups.
107  *
108  * Callers looking to modify maps specify start/end addresses which cause
109  * the related map entry to be clipped if necessary, and then later
110  * recombined if the pieces remained compatible.
111  *
112  * Virtual copy operations are performed by copying VM object references
113  * from one map to another, and then marking both regions as copy-on-write.
114  */
115 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags);
116 static void vmspace_dtor(void *obj, void *privdata);
117 static void vmspace_terminate(struct vmspace *vm, int final);
118 
119 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
120 MALLOC_DEFINE(M_MAP_BACKING, "map_backing", "vm_map_backing to entry");
121 static struct objcache *vmspace_cache;
122 
123 /*
124  * per-cpu page table cross mappings are initialized in early boot
125  * and might require a considerable number of vm_map_entry structures.
126  */
127 #define MAPENTRYBSP_CACHE	(MAXCPU+1)
128 #define MAPENTRYAP_CACHE	8
129 
130 /*
131  * Partioning threaded programs with large anonymous memory areas can
132  * improve concurrent fault performance.
133  */
134 #define MAP_ENTRY_PARTITION_SIZE	((vm_offset_t)(32 * 1024 * 1024))
135 #define MAP_ENTRY_PARTITION_MASK	(MAP_ENTRY_PARTITION_SIZE - 1)
136 
137 #define VM_MAP_ENTRY_WITHIN_PARTITION(entry)	\
138 	((((entry)->ba.start ^ (entry)->ba.end) & ~MAP_ENTRY_PARTITION_MASK) == 0)
139 
140 static struct vm_zone mapentzone_store;
141 __read_mostly static vm_zone_t mapentzone;
142 
143 static struct vm_map_entry map_entry_init[MAX_MAPENT];
144 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE];
145 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE];
146 
147 __read_mostly static int randomize_mmap;
148 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
149     "Randomize mmap offsets");
150 __read_mostly static int vm_map_relock_enable = 1;
151 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW,
152 	   &vm_map_relock_enable, 0, "insert pop pgtable optimization");
153 __read_mostly static int vm_map_partition_enable = 1;
154 SYSCTL_INT(_vm, OID_AUTO, map_partition_enable, CTLFLAG_RW,
155 	   &vm_map_partition_enable, 0, "Break up larger vm_map_entry's");
156 __read_mostly static int vm_map_backing_limit = 5;
157 SYSCTL_INT(_vm, OID_AUTO, map_backing_limit, CTLFLAG_RW,
158 	   &vm_map_backing_limit, 0, "ba.backing_ba link depth");
159 __read_mostly static int vm_map_backing_shadow_test = 1;
160 SYSCTL_INT(_vm, OID_AUTO, map_backing_shadow_test, CTLFLAG_RW,
161 	   &vm_map_backing_shadow_test, 0, "ba.object shadow test");
162 
163 static void vmspace_drop_notoken(struct vmspace *vm);
164 static void vm_map_entry_shadow(vm_map_entry_t entry);
165 static vm_map_entry_t vm_map_entry_create(int *);
166 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
167 static void vm_map_entry_dispose_ba (vm_map_entry_t entry, vm_map_backing_t ba);
168 static void vm_map_backing_replicated(vm_map_t map,
169 		vm_map_entry_t entry, int flags);
170 static void vm_map_backing_adjust_start(vm_map_entry_t entry,
171 		vm_ooffset_t start);
172 static void vm_map_backing_adjust_end(vm_map_entry_t entry,
173 		vm_ooffset_t end);
174 static void vm_map_backing_attach (vm_map_entry_t entry, vm_map_backing_t ba);
175 static void vm_map_backing_detach (vm_map_entry_t entry, vm_map_backing_t ba);
176 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
177 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
178 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
179 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
180 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
181 		vm_map_entry_t);
182 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry,
183 		vm_offset_t start, vm_offset_t end, int *countp, int flags);
184 static void vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry,
185 		vm_offset_t vaddr, int *countp);
186 
187 #define MAP_BACK_CLIPPED	0x0001
188 #define MAP_BACK_BASEOBJREFD	0x0002
189 
190 /*
191  * Initialize the vm_map module.  Must be called before any other vm_map
192  * routines.
193  *
194  * Map and entry structures are allocated from the general purpose
195  * memory pool with some exceptions:
196  *
197  *	- The kernel map is allocated statically.
198  *	- Initial kernel map entries are allocated out of a static pool.
199  *	- We must set ZONE_SPECIAL here or the early boot code can get
200  *	  stuck if there are >63 cores.
201  *
202  *	These restrictions are necessary since malloc() uses the
203  *	maps and requires map entries.
204  *
205  * Called from the low level boot code only.
206  */
207 void
208 vm_map_startup(void)
209 {
210 	mapentzone = &mapentzone_store;
211 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
212 		  map_entry_init, MAX_MAPENT);
213 	mapentzone_store.zflags |= ZONE_SPECIAL;
214 }
215 
216 /*
217  * Called prior to any vmspace allocations.
218  *
219  * Called from the low level boot code only.
220  */
221 void
222 vm_init2(void)
223 {
224 	vmspace_cache = objcache_create_mbacked(M_VMSPACE,
225 						sizeof(struct vmspace),
226 						0, ncpus * 4,
227 						vmspace_ctor, vmspace_dtor,
228 						NULL);
229 	zinitna(mapentzone, NULL, 0, 0, ZONE_USE_RESERVE | ZONE_SPECIAL);
230 	pmap_init2();
231 	vm_object_init2();
232 }
233 
234 /*
235  * objcache support.  We leave the pmap root cached as long as possible
236  * for performance reasons.
237  */
238 static
239 boolean_t
240 vmspace_ctor(void *obj, void *privdata, int ocflags)
241 {
242 	struct vmspace *vm = obj;
243 
244 	bzero(vm, sizeof(*vm));
245 	vm->vm_refcnt = VM_REF_DELETED;
246 
247 	return 1;
248 }
249 
250 static
251 void
252 vmspace_dtor(void *obj, void *privdata)
253 {
254 	struct vmspace *vm = obj;
255 
256 	KKASSERT(vm->vm_refcnt == VM_REF_DELETED);
257 	pmap_puninit(vmspace_pmap(vm));
258 }
259 
260 /*
261  * Red black tree functions
262  *
263  * The caller must hold the related map lock.
264  */
265 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
266 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
267 
268 /* a->ba.start is address, and the only field which must be initialized */
269 static int
270 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
271 {
272 	if (a->ba.start < b->ba.start)
273 		return(-1);
274 	else if (a->ba.start > b->ba.start)
275 		return(1);
276 	return(0);
277 }
278 
279 /*
280  * Initialize vmspace ref/hold counts vmspace0.  There is a holdcnt for
281  * every refcnt.
282  */
283 void
284 vmspace_initrefs(struct vmspace *vm)
285 {
286 	vm->vm_refcnt = 1;
287 	vm->vm_holdcnt = 1;
288 }
289 
290 /*
291  * Allocate a vmspace structure, including a vm_map and pmap.
292  * Initialize numerous fields.  While the initial allocation is zerod,
293  * subsequence reuse from the objcache leaves elements of the structure
294  * intact (particularly the pmap), so portions must be zerod.
295  *
296  * Returns a referenced vmspace.
297  *
298  * No requirements.
299  */
300 struct vmspace *
301 vmspace_alloc(vm_offset_t min, vm_offset_t max)
302 {
303 	struct vmspace *vm;
304 
305 	vm = objcache_get(vmspace_cache, M_WAITOK);
306 
307 	bzero(&vm->vm_startcopy,
308 	      (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
309 	vm_map_init(&vm->vm_map, min, max, NULL);	/* initializes token */
310 
311 	/*
312 	 * NOTE: hold to acquires token for safety.
313 	 *
314 	 * On return vmspace is referenced (refs=1, hold=1).  That is,
315 	 * each refcnt also has a holdcnt.  There can be additional holds
316 	 * (holdcnt) above and beyond the refcnt.  Finalization is handled in
317 	 * two stages, one on refs 1->0, and the the second on hold 1->0.
318 	 */
319 	KKASSERT(vm->vm_holdcnt == 0);
320 	KKASSERT(vm->vm_refcnt == VM_REF_DELETED);
321 	vmspace_initrefs(vm);
322 	vmspace_hold(vm);
323 	pmap_pinit(vmspace_pmap(vm));		/* (some fields reused) */
324 	vm->vm_map.pmap = vmspace_pmap(vm);	/* XXX */
325 	vm->vm_shm = NULL;
326 	vm->vm_flags = 0;
327 	cpu_vmspace_alloc(vm);
328 	vmspace_drop(vm);
329 
330 	return (vm);
331 }
332 
333 /*
334  * NOTE: Can return 0 if the vmspace is exiting.
335  */
336 int
337 vmspace_getrefs(struct vmspace *vm)
338 {
339 	int32_t n;
340 
341 	n = vm->vm_refcnt;
342 	cpu_ccfence();
343 	if (n & VM_REF_DELETED)
344 		n = -1;
345 	return n;
346 }
347 
348 void
349 vmspace_hold(struct vmspace *vm)
350 {
351 	atomic_add_int(&vm->vm_holdcnt, 1);
352 	lwkt_gettoken(&vm->vm_map.token);
353 }
354 
355 /*
356  * Drop with final termination interlock.
357  */
358 void
359 vmspace_drop(struct vmspace *vm)
360 {
361 	lwkt_reltoken(&vm->vm_map.token);
362 	vmspace_drop_notoken(vm);
363 }
364 
365 static void
366 vmspace_drop_notoken(struct vmspace *vm)
367 {
368 	if (atomic_fetchadd_int(&vm->vm_holdcnt, -1) == 1) {
369 		if (vm->vm_refcnt & VM_REF_DELETED)
370 			vmspace_terminate(vm, 1);
371 	}
372 }
373 
374 /*
375  * A vmspace object must not be in a terminated state to be able to obtain
376  * additional refs on it.
377  *
378  * These are official references to the vmspace, the count is used to check
379  * for vmspace sharing.  Foreign accessors should use 'hold' and not 'ref'.
380  *
381  * XXX we need to combine hold & ref together into one 64-bit field to allow
382  * holds to prevent stage-1 termination.
383  */
384 void
385 vmspace_ref(struct vmspace *vm)
386 {
387 	uint32_t n;
388 
389 	atomic_add_int(&vm->vm_holdcnt, 1);
390 	n = atomic_fetchadd_int(&vm->vm_refcnt, 1);
391 	KKASSERT((n & VM_REF_DELETED) == 0);
392 }
393 
394 /*
395  * Release a ref on the vmspace.  On the 1->0 transition we do stage-1
396  * termination of the vmspace.  Then, on the final drop of the hold we
397  * will do stage-2 final termination.
398  */
399 void
400 vmspace_rel(struct vmspace *vm)
401 {
402 	uint32_t n;
403 
404 	/*
405 	 * Drop refs.  Each ref also has a hold which is also dropped.
406 	 *
407 	 * When refs hits 0 compete to get the VM_REF_DELETED flag (hold
408 	 * prevent finalization) to start termination processing.
409 	 * Finalization occurs when the last hold count drops to 0.
410 	 */
411 	n = atomic_fetchadd_int(&vm->vm_refcnt, -1) - 1;
412 	while (n == 0) {
413 		if (atomic_cmpset_int(&vm->vm_refcnt, 0, VM_REF_DELETED)) {
414 			vmspace_terminate(vm, 0);
415 			break;
416 		}
417 		n = vm->vm_refcnt;
418 		cpu_ccfence();
419 	}
420 	vmspace_drop_notoken(vm);
421 }
422 
423 /*
424  * This is called during exit indicating that the vmspace is no
425  * longer in used by an exiting process, but the process has not yet
426  * been reaped.
427  *
428  * We drop refs, allowing for stage-1 termination, but maintain a holdcnt
429  * to prevent stage-2 until the process is reaped.  Note hte order of
430  * operation, we must hold first.
431  *
432  * No requirements.
433  */
434 void
435 vmspace_relexit(struct vmspace *vm)
436 {
437 	atomic_add_int(&vm->vm_holdcnt, 1);
438 	vmspace_rel(vm);
439 }
440 
441 /*
442  * Called during reap to disconnect the remainder of the vmspace from
443  * the process.  On the hold drop the vmspace termination is finalized.
444  *
445  * No requirements.
446  */
447 void
448 vmspace_exitfree(struct proc *p)
449 {
450 	struct vmspace *vm;
451 
452 	vm = p->p_vmspace;
453 	p->p_vmspace = NULL;
454 	vmspace_drop_notoken(vm);
455 }
456 
457 /*
458  * Called in two cases:
459  *
460  * (1) When the last refcnt is dropped and the vmspace becomes inactive,
461  *     called with final == 0.  refcnt will be (u_int)-1 at this point,
462  *     and holdcnt will still be non-zero.
463  *
464  * (2) When holdcnt becomes 0, called with final == 1.  There should no
465  *     longer be anyone with access to the vmspace.
466  *
467  * VMSPACE_EXIT1 flags the primary deactivation
468  * VMSPACE_EXIT2 flags the last reap
469  */
470 static void
471 vmspace_terminate(struct vmspace *vm, int final)
472 {
473 	int count;
474 
475 	lwkt_gettoken(&vm->vm_map.token);
476 	if (final == 0) {
477 		KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0);
478 		vm->vm_flags |= VMSPACE_EXIT1;
479 
480 		/*
481 		 * Get rid of most of the resources.  Leave the kernel pmap
482 		 * intact.
483 		 *
484 		 * If the pmap does not contain wired pages we can bulk-delete
485 		 * the pmap as a performance optimization before removing the
486 		 * related mappings.
487 		 *
488 		 * If the pmap contains wired pages we cannot do this
489 		 * pre-optimization because currently vm_fault_unwire()
490 		 * expects the pmap pages to exist and will not decrement
491 		 * p->wire_count if they do not.
492 		 */
493 		shmexit(vm);
494 		if (vmspace_pmap(vm)->pm_stats.wired_count) {
495 			vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
496 				      VM_MAX_USER_ADDRESS);
497 			pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
498 					  VM_MAX_USER_ADDRESS);
499 		} else {
500 			pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
501 					  VM_MAX_USER_ADDRESS);
502 			vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
503 				      VM_MAX_USER_ADDRESS);
504 		}
505 		lwkt_reltoken(&vm->vm_map.token);
506 	} else {
507 		KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0);
508 		KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0);
509 
510 		/*
511 		 * Get rid of remaining basic resources.
512 		 */
513 		vm->vm_flags |= VMSPACE_EXIT2;
514 		shmexit(vm);
515 
516 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
517 		vm_map_lock(&vm->vm_map);
518 		cpu_vmspace_free(vm);
519 
520 		/*
521 		 * Lock the map, to wait out all other references to it.
522 		 * Delete all of the mappings and pages they hold, then call
523 		 * the pmap module to reclaim anything left.
524 		 */
525 		vm_map_delete(&vm->vm_map,
526 			      vm_map_min(&vm->vm_map),
527 			      vm_map_max(&vm->vm_map),
528 			      &count);
529 		vm_map_unlock(&vm->vm_map);
530 		vm_map_entry_release(count);
531 
532 		pmap_release(vmspace_pmap(vm));
533 		lwkt_reltoken(&vm->vm_map.token);
534 		objcache_put(vmspace_cache, vm);
535 	}
536 }
537 
538 /*
539  * Swap useage is determined by taking the proportional swap used by
540  * VM objects backing the VM map.  To make up for fractional losses,
541  * if the VM object has any swap use at all the associated map entries
542  * count for at least 1 swap page.
543  *
544  * No requirements.
545  */
546 vm_offset_t
547 vmspace_swap_count(struct vmspace *vm)
548 {
549 	vm_map_t map = &vm->vm_map;
550 	vm_map_entry_t cur;
551 	vm_object_t object;
552 	vm_offset_t count = 0;
553 	vm_offset_t n;
554 
555 	vmspace_hold(vm);
556 
557 	RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) {
558 		switch(cur->maptype) {
559 		case VM_MAPTYPE_NORMAL:
560 		case VM_MAPTYPE_VPAGETABLE:
561 			if ((object = cur->ba.object) == NULL)
562 				break;
563 			if (object->swblock_count) {
564 				n = (cur->ba.end - cur->ba.start) / PAGE_SIZE;
565 				count += object->swblock_count *
566 				    SWAP_META_PAGES * n / object->size + 1;
567 			}
568 			break;
569 		default:
570 			break;
571 		}
572 	}
573 	vmspace_drop(vm);
574 
575 	return(count);
576 }
577 
578 /*
579  * Calculate the approximate number of anonymous pages in use by
580  * this vmspace.  To make up for fractional losses, we count each
581  * VM object as having at least 1 anonymous page.
582  *
583  * No requirements.
584  */
585 vm_offset_t
586 vmspace_anonymous_count(struct vmspace *vm)
587 {
588 	vm_map_t map = &vm->vm_map;
589 	vm_map_entry_t cur;
590 	vm_object_t object;
591 	vm_offset_t count = 0;
592 
593 	vmspace_hold(vm);
594 	RB_FOREACH(cur, vm_map_rb_tree, &map->rb_root) {
595 		switch(cur->maptype) {
596 		case VM_MAPTYPE_NORMAL:
597 		case VM_MAPTYPE_VPAGETABLE:
598 			if ((object = cur->ba.object) == NULL)
599 				break;
600 			if (object->type != OBJT_DEFAULT &&
601 			    object->type != OBJT_SWAP) {
602 				break;
603 			}
604 			count += object->resident_page_count;
605 			break;
606 		default:
607 			break;
608 		}
609 	}
610 	vmspace_drop(vm);
611 
612 	return(count);
613 }
614 
615 /*
616  * Initialize an existing vm_map structure such as that in the vmspace
617  * structure.  The pmap is initialized elsewhere.
618  *
619  * No requirements.
620  */
621 void
622 vm_map_init(struct vm_map *map, vm_offset_t min_addr, vm_offset_t max_addr,
623 	    pmap_t pmap)
624 {
625 	RB_INIT(&map->rb_root);
626 	spin_init(&map->ilock_spin, "ilock");
627 	map->ilock_base = NULL;
628 	map->nentries = 0;
629 	map->size = 0;
630 	map->system_map = 0;
631 	vm_map_min(map) = min_addr;
632 	vm_map_max(map) = max_addr;
633 	map->pmap = pmap;
634 	map->timestamp = 0;
635 	map->flags = 0;
636 	bzero(&map->freehint, sizeof(map->freehint));
637 	lwkt_token_init(&map->token, "vm_map");
638 	lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0);
639 }
640 
641 /*
642  * Find the first possible free address for the specified request length.
643  * Returns 0 if we don't have one cached.
644  */
645 static
646 vm_offset_t
647 vm_map_freehint_find(vm_map_t map, vm_size_t length, vm_size_t align)
648 {
649 	vm_map_freehint_t *scan;
650 
651 	scan = &map->freehint[0];
652 	while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
653 		if (scan->length == length && scan->align == align)
654 			return(scan->start);
655 		++scan;
656 	}
657 	return 0;
658 }
659 
660 /*
661  * Unconditionally set the freehint.  Called by vm_map_findspace() after
662  * it finds an address.  This will help us iterate optimally on the next
663  * similar findspace.
664  */
665 static
666 void
667 vm_map_freehint_update(vm_map_t map, vm_offset_t start,
668 		       vm_size_t length, vm_size_t align)
669 {
670 	vm_map_freehint_t *scan;
671 
672 	scan = &map->freehint[0];
673 	while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
674 		if (scan->length == length && scan->align == align) {
675 			scan->start = start;
676 			return;
677 		}
678 		++scan;
679 	}
680 	scan = &map->freehint[map->freehint_newindex & VM_MAP_FFMASK];
681 	scan->start = start;
682 	scan->align = align;
683 	scan->length = length;
684 	++map->freehint_newindex;
685 }
686 
687 /*
688  * Update any existing freehints (for any alignment), for the hole we just
689  * added.
690  */
691 static
692 void
693 vm_map_freehint_hole(vm_map_t map, vm_offset_t start, vm_size_t length)
694 {
695 	vm_map_freehint_t *scan;
696 
697 	scan = &map->freehint[0];
698 	while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
699 		if (scan->length <= length && scan->start > start)
700 			scan->start = start;
701 		++scan;
702 	}
703 }
704 
705 /*
706  * This function handles MAP_ENTRY_NEEDS_COPY by inserting a fronting
707  * object in the entry for COW faults.
708  *
709  * The entire chain including entry->ba (prior to inserting the fronting
710  * object) essentially becomes set in stone... elements of it can be paged
711  * in or out, but cannot be further modified.
712  *
713  * NOTE: If we do not optimize the backing chain then a unique copy is not
714  *	 needed.  Note, however, that because portions of the chain are
715  *	 shared across pmaps we cannot make any changes to the vm_map_backing
716  *	 elements themselves.
717  *
718  * If the map segment is governed by a virtual page table then it is
719  * possible to address offsets beyond the mapped area.  Just allocate
720  * a maximally sized object for this case.
721  *
722  * If addref is non-zero an additional reference is added to the returned
723  * entry.  This mechanic exists because the additional reference might have
724  * to be added atomically and not after return to prevent a premature
725  * collapse.  XXX currently there is no collapse code.
726  *
727  * The vm_map must be exclusively locked.
728  * No other requirements.
729  */
730 static
731 void
732 vm_map_entry_shadow(vm_map_entry_t entry)
733 {
734 	vm_map_backing_t ba;
735 	vm_size_t length;
736 	vm_object_t source;
737 	vm_object_t result;
738 
739 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE)
740 		length = 0x7FFFFFFF;
741 	else
742 		length = atop(entry->ba.end - entry->ba.start);
743 
744 	/*
745 	 * Don't create the new object if the old object isn't shared.
746 	 * This case occurs quite often when programs fork/exec/wait.
747 	 *
748 	 * Caller ensures source exists (all backing_ba's must have objects),
749 	 * typically indirectly by virtue of the NEEDS_COPY flag being set.
750 	 * We have a ref on source by virtue of the entry and do not need
751 	 * to lock it to do this test.
752 	 */
753 	source = entry->ba.object;
754 	KKASSERT(source);
755 
756 	if (source->type != OBJT_VNODE) {
757 		if (source->ref_count == 1 &&
758 		    source->handle == NULL &&
759 		    (source->type == OBJT_DEFAULT ||
760 		     source->type == OBJT_SWAP)) {
761 			goto done;
762 		}
763 	}
764 	ba = kmalloc(sizeof(*ba), M_MAP_BACKING, M_INTWAIT); /* copied later */
765 	vm_object_hold_shared(source);
766 
767 	/*
768 	 * Once it becomes part of a backing_ba chain it can wind up anywhere,
769 	 * drop the ONEMAPPING flag now.
770 	 */
771 	vm_object_clear_flag(source, OBJ_ONEMAPPING);
772 
773 	/*
774 	 * Allocate a new object with the given length.  The new object
775 	 * is returned referenced but we may have to add another one.
776 	 * If we are adding a second reference we must clear OBJ_ONEMAPPING.
777 	 * (typically because the caller is about to clone a vm_map_entry).
778 	 *
779 	 * The source object currently has an extra reference to prevent
780 	 * collapses into it while we mess with its shadow list, which
781 	 * we will remove later in this routine.
782 	 *
783 	 * The target object may require a second reference if asked for one
784 	 * by the caller.
785 	 */
786 	result = vm_object_allocate_hold(OBJT_DEFAULT, length);
787 	if (result == NULL)
788 		panic("vm_object_shadow: no object for shadowing");
789 
790 	/*
791 	 * The new object shadows the source object.
792 	 *
793 	 * Try to optimize the result object's page color when shadowing
794 	 * in order to maintain page coloring consistency in the combined
795 	 * shadowed object.
796 	 *
797 	 * The source object is moved to ba, retaining its existing ref-count.
798 	 * No additional ref is needed.
799 	 *
800 	 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS
801 	 */
802 	vm_map_backing_detach(entry, &entry->ba);
803 	*ba = entry->ba;		/* previous ba */
804 	entry->ba.object = result;	/* new ba (at head of entry) */
805 	entry->ba.backing_ba = ba;
806 	entry->ba.backing_count = ba->backing_count + 1;
807 	entry->ba.offset = 0;
808 
809 	/* cpu localization twist */
810 	result->pg_color = vm_quickcolor();
811 
812 	vm_map_backing_attach(entry, &entry->ba);
813 	vm_map_backing_attach(entry, ba);
814 
815 	/*
816 	 * Adjust the return storage.  Drop the ref on source before
817 	 * returning.
818 	 */
819 	vm_object_drop(result);
820 	vm_object_drop(source);
821 done:
822 	entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
823 }
824 
825 /*
826  * Allocate an object for a vm_map_entry.
827  *
828  * Object allocation for anonymous mappings is defered as long as possible.
829  * This function is called when we can defer no longer, generally when a map
830  * entry might be split or forked or takes a page fault.
831  *
832  * If the map segment is governed by a virtual page table then it is
833  * possible to address offsets beyond the mapped area.  Just allocate
834  * a maximally sized object for this case.
835  *
836  * The vm_map must be exclusively locked.
837  * No other requirements.
838  */
839 void
840 vm_map_entry_allocate_object(vm_map_entry_t entry)
841 {
842 	vm_object_t obj;
843 
844 	/*
845 	 * ba.offset is NOT cumulatively added in the backing_ba scan like
846 	 * it was in the old object chain, so we can assign whatever offset
847 	 * we like to the new object.
848 	 *
849 	 * For now assign a value of 0 to make debugging object sizes
850 	 * easier.
851 	 */
852 	entry->ba.offset = 0;
853 
854 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
855 		/* XXX */
856 		obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF);
857 	} else {
858 		obj = vm_object_allocate(OBJT_DEFAULT,
859 					 atop(entry->ba.end - entry->ba.start) +
860 					 entry->ba.offset);
861 	}
862 	entry->ba.object = obj;
863 	vm_map_backing_attach(entry, &entry->ba);
864 }
865 
866 /*
867  * Set an initial negative count so the first attempt to reserve
868  * space preloads a bunch of vm_map_entry's for this cpu.  Also
869  * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
870  * map a new page for vm_map_entry structures.  SMP systems are
871  * particularly sensitive.
872  *
873  * This routine is called in early boot so we cannot just call
874  * vm_map_entry_reserve().
875  *
876  * Called from the low level boot code only (for each cpu)
877  *
878  * WARNING! Take care not to have too-big a static/BSS structure here
879  *	    as MAXCPU can be 256+, otherwise the loader's 64MB heap
880  *	    can get blown out by the kernel plus the initrd image.
881  */
882 void
883 vm_map_entry_reserve_cpu_init(globaldata_t gd)
884 {
885 	vm_map_entry_t entry;
886 	int count;
887 	int i;
888 
889 	atomic_add_int(&gd->gd_vme_avail, -MAP_RESERVE_COUNT * 2);
890 	if (gd->gd_cpuid == 0) {
891 		entry = &cpu_map_entry_init_bsp[0];
892 		count = MAPENTRYBSP_CACHE;
893 	} else {
894 		entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0];
895 		count = MAPENTRYAP_CACHE;
896 	}
897 	for (i = 0; i < count; ++i, ++entry) {
898 		MAPENT_FREELIST(entry) = gd->gd_vme_base;
899 		gd->gd_vme_base = entry;
900 	}
901 }
902 
903 /*
904  * Reserves vm_map_entry structures so code later-on can manipulate
905  * map_entry structures within a locked map without blocking trying
906  * to allocate a new vm_map_entry.
907  *
908  * No requirements.
909  *
910  * WARNING!  We must not decrement gd_vme_avail until after we have
911  *	     ensured that sufficient entries exist, otherwise we can
912  *	     get into an endless call recursion in the zalloc code
913  *	     itself.
914  */
915 int
916 vm_map_entry_reserve(int count)
917 {
918 	struct globaldata *gd = mycpu;
919 	vm_map_entry_t entry;
920 
921 	/*
922 	 * Make sure we have enough structures in gd_vme_base to handle
923 	 * the reservation request.
924 	 *
925 	 * Use a critical section to protect against VM faults.  It might
926 	 * not be needed, but we have to be careful here.
927 	 */
928 	if (gd->gd_vme_avail < count) {
929 		crit_enter();
930 		while (gd->gd_vme_avail < count) {
931 			entry = zalloc(mapentzone);
932 			MAPENT_FREELIST(entry) = gd->gd_vme_base;
933 			gd->gd_vme_base = entry;
934 			atomic_add_int(&gd->gd_vme_avail, 1);
935 		}
936 		crit_exit();
937 	}
938 	atomic_add_int(&gd->gd_vme_avail, -count);
939 
940 	return(count);
941 }
942 
943 /*
944  * Releases previously reserved vm_map_entry structures that were not
945  * used.  If we have too much junk in our per-cpu cache clean some of
946  * it out.
947  *
948  * No requirements.
949  */
950 void
951 vm_map_entry_release(int count)
952 {
953 	struct globaldata *gd = mycpu;
954 	vm_map_entry_t entry;
955 	vm_map_entry_t efree;
956 
957 	count = atomic_fetchadd_int(&gd->gd_vme_avail, count) + count;
958 	if (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
959 		efree = NULL;
960 		crit_enter();
961 		while (gd->gd_vme_avail > MAP_RESERVE_HYST) {
962 			entry = gd->gd_vme_base;
963 			KKASSERT(entry != NULL);
964 			gd->gd_vme_base = MAPENT_FREELIST(entry);
965 			atomic_add_int(&gd->gd_vme_avail, -1);
966 			MAPENT_FREELIST(entry) = efree;
967 			efree = entry;
968 		}
969 		crit_exit();
970 		while ((entry = efree) != NULL) {
971 			efree = MAPENT_FREELIST(efree);
972 			zfree(mapentzone, entry);
973 		}
974 	}
975 }
976 
977 /*
978  * Reserve map entry structures for use in kernel_map itself.  These
979  * entries have *ALREADY* been reserved on a per-cpu basis when the map
980  * was inited.  This function is used by zalloc() to avoid a recursion
981  * when zalloc() itself needs to allocate additional kernel memory.
982  *
983  * This function works like the normal reserve but does not load the
984  * vm_map_entry cache (because that would result in an infinite
985  * recursion).  Note that gd_vme_avail may go negative.  This is expected.
986  *
987  * Any caller of this function must be sure to renormalize after
988  * potentially eating entries to ensure that the reserve supply
989  * remains intact.
990  *
991  * No requirements.
992  */
993 int
994 vm_map_entry_kreserve(int count)
995 {
996 	struct globaldata *gd = mycpu;
997 
998 	atomic_add_int(&gd->gd_vme_avail, -count);
999 	KASSERT(gd->gd_vme_base != NULL,
1000 		("no reserved entries left, gd_vme_avail = %d",
1001 		gd->gd_vme_avail));
1002 	return(count);
1003 }
1004 
1005 /*
1006  * Release previously reserved map entries for kernel_map.  We do not
1007  * attempt to clean up like the normal release function as this would
1008  * cause an unnecessary (but probably not fatal) deep procedure call.
1009  *
1010  * No requirements.
1011  */
1012 void
1013 vm_map_entry_krelease(int count)
1014 {
1015 	struct globaldata *gd = mycpu;
1016 
1017 	atomic_add_int(&gd->gd_vme_avail, count);
1018 }
1019 
1020 /*
1021  * Allocates a VM map entry for insertion.  No entry fields are filled in.
1022  *
1023  * The entries should have previously been reserved.  The reservation count
1024  * is tracked in (*countp).
1025  *
1026  * No requirements.
1027  */
1028 static vm_map_entry_t
1029 vm_map_entry_create(int *countp)
1030 {
1031 	struct globaldata *gd = mycpu;
1032 	vm_map_entry_t entry;
1033 
1034 	KKASSERT(*countp > 0);
1035 	--*countp;
1036 	crit_enter();
1037 	entry = gd->gd_vme_base;
1038 	KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
1039 	gd->gd_vme_base = MAPENT_FREELIST(entry);
1040 	crit_exit();
1041 
1042 	return(entry);
1043 }
1044 
1045 /*
1046  * Attach and detach backing store elements
1047  */
1048 static void
1049 vm_map_backing_attach(vm_map_entry_t entry, vm_map_backing_t ba)
1050 {
1051 	vm_object_t obj;
1052 
1053 	switch(entry->maptype) {
1054 	case VM_MAPTYPE_VPAGETABLE:
1055 	case VM_MAPTYPE_NORMAL:
1056 		obj = ba->object;
1057 		lockmgr(&obj->backing_lk, LK_EXCLUSIVE);
1058 		TAILQ_INSERT_TAIL(&obj->backing_list, ba, entry);
1059 		lockmgr(&obj->backing_lk, LK_RELEASE);
1060 		break;
1061 	case VM_MAPTYPE_UKSMAP:
1062 		ba->uksmap(ba, UKSMAPOP_ADD, entry->aux.dev, NULL);
1063 		break;
1064 	}
1065 }
1066 
1067 static void
1068 vm_map_backing_detach(vm_map_entry_t entry, vm_map_backing_t ba)
1069 {
1070 	vm_object_t obj;
1071 
1072 	switch(entry->maptype) {
1073 	case VM_MAPTYPE_VPAGETABLE:
1074 	case VM_MAPTYPE_NORMAL:
1075 		obj = ba->object;
1076 		lockmgr(&obj->backing_lk, LK_EXCLUSIVE);
1077 		TAILQ_REMOVE(&obj->backing_list, ba, entry);
1078 		lockmgr(&obj->backing_lk, LK_RELEASE);
1079 		break;
1080 	case VM_MAPTYPE_UKSMAP:
1081 		ba->uksmap(ba, UKSMAPOP_REM, entry->aux.dev, NULL);
1082 		break;
1083 	}
1084 }
1085 
1086 /*
1087  * Dispose of the dynamically allocated backing_ba chain associated
1088  * with a vm_map_entry.
1089  *
1090  * We decrement the (possibly shared) element and kfree() on the
1091  * 1->0 transition.  We only iterate to the next backing_ba when
1092  * the previous one went through a 1->0 transition.
1093  *
1094  * These can only be normal vm_object based backings.
1095  */
1096 static void
1097 vm_map_entry_dispose_ba(vm_map_entry_t entry, vm_map_backing_t ba)
1098 {
1099 	vm_map_backing_t next;
1100 
1101 	while (ba) {
1102 		if (ba->map_object) {
1103 			vm_map_backing_detach(entry, ba);
1104 			vm_object_deallocate(ba->object);
1105 		}
1106 		next = ba->backing_ba;
1107 		kfree(ba, M_MAP_BACKING);
1108 		ba = next;
1109 	}
1110 }
1111 
1112 /*
1113  * Dispose of a vm_map_entry that is no longer being referenced.
1114  *
1115  * No requirements.
1116  */
1117 static void
1118 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
1119 {
1120 	struct globaldata *gd = mycpu;
1121 
1122 	/*
1123 	 * Dispose of the base object and the backing link.
1124 	 */
1125 	switch(entry->maptype) {
1126 	case VM_MAPTYPE_NORMAL:
1127 	case VM_MAPTYPE_VPAGETABLE:
1128 		if (entry->ba.map_object) {
1129 			vm_map_backing_detach(entry, &entry->ba);
1130 			vm_object_deallocate(entry->ba.object);
1131 		}
1132 		break;
1133 	case VM_MAPTYPE_SUBMAP:
1134 		break;
1135 	case VM_MAPTYPE_UKSMAP:
1136 		vm_map_backing_detach(entry, &entry->ba);
1137 		break;
1138 	default:
1139 		break;
1140 	}
1141 	vm_map_entry_dispose_ba(entry, entry->ba.backing_ba);
1142 
1143 	/*
1144 	 * Cleanup for safety.
1145 	 */
1146 	entry->ba.backing_ba = NULL;
1147 	entry->ba.object = NULL;
1148 	entry->ba.offset = 0;
1149 
1150 	++*countp;
1151 	crit_enter();
1152 	MAPENT_FREELIST(entry) = gd->gd_vme_base;
1153 	gd->gd_vme_base = entry;
1154 	crit_exit();
1155 }
1156 
1157 
1158 /*
1159  * Insert/remove entries from maps.
1160  *
1161  * The related map must be exclusively locked.
1162  * The caller must hold map->token
1163  * No other requirements.
1164  */
1165 static __inline void
1166 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1167 {
1168 	ASSERT_VM_MAP_LOCKED(map);
1169 
1170 	map->nentries++;
1171 	if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
1172 		panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
1173 }
1174 
1175 static __inline void
1176 vm_map_entry_unlink(vm_map_t map,
1177 		    vm_map_entry_t entry)
1178 {
1179 	ASSERT_VM_MAP_LOCKED(map);
1180 
1181 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1182 		panic("vm_map_entry_unlink: attempt to mess with "
1183 		      "locked entry! %p", entry);
1184 	}
1185 	vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
1186 	map->nentries--;
1187 }
1188 
1189 /*
1190  * Finds the map entry containing (or immediately preceding) the specified
1191  * address in the given map.  The entry is returned in (*entry).
1192  *
1193  * The boolean result indicates whether the address is actually contained
1194  * in the map.
1195  *
1196  * The related map must be locked.
1197  * No other requirements.
1198  */
1199 boolean_t
1200 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
1201 {
1202 	vm_map_entry_t tmp;
1203 	vm_map_entry_t last;
1204 
1205 	ASSERT_VM_MAP_LOCKED(map);
1206 
1207 	/*
1208 	 * Locate the record from the top of the tree.  'last' tracks the
1209 	 * closest prior record and is returned if no match is found, which
1210 	 * in binary tree terms means tracking the most recent right-branch
1211 	 * taken.  If there is no prior record, *entry is set to NULL.
1212 	 */
1213 	last = NULL;
1214 	tmp = RB_ROOT(&map->rb_root);
1215 
1216 	while (tmp) {
1217 		if (address >= tmp->ba.start) {
1218 			if (address < tmp->ba.end) {
1219 				*entry = tmp;
1220 				return(TRUE);
1221 			}
1222 			last = tmp;
1223 			tmp = RB_RIGHT(tmp, rb_entry);
1224 		} else {
1225 			tmp = RB_LEFT(tmp, rb_entry);
1226 		}
1227 	}
1228 	*entry = last;
1229 	return (FALSE);
1230 }
1231 
1232 /*
1233  * Inserts the given whole VM object into the target map at the specified
1234  * address range.  The object's size should match that of the address range.
1235  *
1236  * The map must be exclusively locked.
1237  * The object must be held.
1238  * The caller must have reserved sufficient vm_map_entry structures.
1239  *
1240  * If object is non-NULL, ref count must be bumped by caller prior to
1241  * making call to account for the new entry.  XXX API is a bit messy.
1242  */
1243 int
1244 vm_map_insert(vm_map_t map, int *countp,
1245 	      void *map_object, void *map_aux,
1246 	      vm_ooffset_t offset, void *aux_info,
1247 	      vm_offset_t start, vm_offset_t end,
1248 	      vm_maptype_t maptype, vm_subsys_t id,
1249 	      vm_prot_t prot, vm_prot_t max, int cow)
1250 {
1251 	vm_map_entry_t new_entry;
1252 	vm_map_entry_t prev_entry;
1253 	vm_map_entry_t next;
1254 	vm_map_entry_t temp_entry;
1255 	vm_eflags_t protoeflags;
1256 	vm_object_t object;
1257 	int must_drop = 0;
1258 
1259 	if (maptype == VM_MAPTYPE_UKSMAP)
1260 		object = NULL;
1261 	else
1262 		object = map_object;
1263 
1264 	ASSERT_VM_MAP_LOCKED(map);
1265 	if (object)
1266 		ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1267 
1268 	/*
1269 	 * Check that the start and end points are not bogus.
1270 	 */
1271 	if ((start < vm_map_min(map)) || (end > vm_map_max(map)) ||
1272 	    (start >= end)) {
1273 		return (KERN_INVALID_ADDRESS);
1274 	}
1275 
1276 	/*
1277 	 * Find the entry prior to the proposed starting address; if it's part
1278 	 * of an existing entry, this range is bogus.
1279 	 */
1280 	if (vm_map_lookup_entry(map, start, &temp_entry))
1281 		return (KERN_NO_SPACE);
1282 	prev_entry = temp_entry;
1283 
1284 	/*
1285 	 * Assert that the next entry doesn't overlap the end point.
1286 	 */
1287 	if (prev_entry)
1288 		next = vm_map_rb_tree_RB_NEXT(prev_entry);
1289 	else
1290 		next = RB_MIN(vm_map_rb_tree, &map->rb_root);
1291 	if (next && next->ba.start < end)
1292 		return (KERN_NO_SPACE);
1293 
1294 	protoeflags = 0;
1295 
1296 	if (cow & MAP_COPY_ON_WRITE)
1297 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1298 
1299 	if (cow & MAP_NOFAULT) {
1300 		protoeflags |= MAP_ENTRY_NOFAULT;
1301 
1302 		KASSERT(object == NULL,
1303 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1304 	}
1305 	if (cow & MAP_DISABLE_SYNCER)
1306 		protoeflags |= MAP_ENTRY_NOSYNC;
1307 	if (cow & MAP_DISABLE_COREDUMP)
1308 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1309 	if (cow & MAP_IS_STACK)
1310 		protoeflags |= MAP_ENTRY_STACK;
1311 	if (cow & MAP_IS_KSTACK)
1312 		protoeflags |= MAP_ENTRY_KSTACK;
1313 
1314 	lwkt_gettoken(&map->token);
1315 
1316 	if (object) {
1317 		;
1318 	} else if (prev_entry &&
1319 		 (prev_entry->eflags == protoeflags) &&
1320 		 (prev_entry->ba.end == start) &&
1321 		 (prev_entry->wired_count == 0) &&
1322 		 (prev_entry->id == id) &&
1323 		 prev_entry->maptype == maptype &&
1324 		 maptype == VM_MAPTYPE_NORMAL &&
1325 		 prev_entry->ba.backing_ba == NULL &&	/* not backed */
1326 		 ((prev_entry->ba.object == NULL) ||
1327 		  vm_object_coalesce(prev_entry->ba.object,
1328 				     OFF_TO_IDX(prev_entry->ba.offset),
1329 				     (vm_size_t)(prev_entry->ba.end - prev_entry->ba.start),
1330 				     (vm_size_t)(end - prev_entry->ba.end)))) {
1331 		/*
1332 		 * We were able to extend the object.  Determine if we
1333 		 * can extend the previous map entry to include the
1334 		 * new range as well.
1335 		 */
1336 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1337 		    (prev_entry->protection == prot) &&
1338 		    (prev_entry->max_protection == max)) {
1339 			map->size += (end - prev_entry->ba.end);
1340 			vm_map_backing_adjust_end(prev_entry, end);
1341 			vm_map_simplify_entry(map, prev_entry, countp);
1342 			lwkt_reltoken(&map->token);
1343 			return (KERN_SUCCESS);
1344 		}
1345 
1346 		/*
1347 		 * If we can extend the object but cannot extend the
1348 		 * map entry, we have to create a new map entry.  We
1349 		 * must bump the ref count on the extended object to
1350 		 * account for it.  object may be NULL.
1351 		 */
1352 		object = prev_entry->ba.object;
1353 		offset = prev_entry->ba.offset +
1354 			(prev_entry->ba.end - prev_entry->ba.start);
1355 		if (object) {
1356 			vm_object_hold(object);
1357 			vm_object_lock_swap(); /* map->token order */
1358 			vm_object_reference_locked(object);
1359 			map_object = object;
1360 			must_drop = 1;
1361 		}
1362 	}
1363 
1364 	/*
1365 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1366 	 * in things like the buffer map where we manage kva but do not manage
1367 	 * backing objects.
1368 	 */
1369 
1370 	/*
1371 	 * Create a new entry
1372 	 */
1373 	new_entry = vm_map_entry_create(countp);
1374 	new_entry->ba.pmap = map->pmap;
1375 	new_entry->ba.start = start;
1376 	new_entry->ba.end = end;
1377 	new_entry->id = id;
1378 
1379 	new_entry->maptype = maptype;
1380 	new_entry->eflags = protoeflags;
1381 	new_entry->aux.master_pde = 0;		/* in case size is different */
1382 	new_entry->aux.map_aux = map_aux;
1383 	new_entry->ba.map_object = map_object;
1384 	new_entry->ba.backing_ba = NULL;
1385 	new_entry->ba.backing_count = 0;
1386 	new_entry->ba.offset = offset;
1387 	new_entry->ba.aux_info = aux_info;
1388 	new_entry->ba.flags = 0;
1389 	new_entry->ba.pmap = map->pmap;
1390 
1391 	new_entry->inheritance = VM_INHERIT_DEFAULT;
1392 	new_entry->protection = prot;
1393 	new_entry->max_protection = max;
1394 	new_entry->wired_count = 0;
1395 
1396 	/*
1397 	 * Insert the new entry into the list
1398 	 */
1399 	vm_map_backing_replicated(map, new_entry, MAP_BACK_BASEOBJREFD);
1400 	vm_map_entry_link(map, new_entry);
1401 	map->size += new_entry->ba.end - new_entry->ba.start;
1402 
1403 	/*
1404 	 * Don't worry about updating freehint[] when inserting, allow
1405 	 * addresses to be lower than the actual first free spot.
1406 	 */
1407 #if 0
1408 	/*
1409 	 * Temporarily removed to avoid MAP_STACK panic, due to
1410 	 * MAP_STACK being a huge hack.  Will be added back in
1411 	 * when MAP_STACK (and the user stack mapping) is fixed.
1412 	 */
1413 	/*
1414 	 * It may be possible to simplify the entry
1415 	 */
1416 	vm_map_simplify_entry(map, new_entry, countp);
1417 #endif
1418 
1419 	/*
1420 	 * Try to pre-populate the page table.  Mappings governed by virtual
1421 	 * page tables cannot be prepopulated without a lot of work, so
1422 	 * don't try.
1423 	 */
1424 	if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1425 	    maptype != VM_MAPTYPE_VPAGETABLE &&
1426 	    maptype != VM_MAPTYPE_UKSMAP) {
1427 		int dorelock = 0;
1428 		if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) {
1429 			dorelock = 1;
1430 			vm_object_lock_swap();
1431 			vm_object_drop(object);
1432 		}
1433 		pmap_object_init_pt(map->pmap, new_entry,
1434 				    new_entry->ba.start,
1435 				    new_entry->ba.end - new_entry->ba.start,
1436 				    cow & MAP_PREFAULT_PARTIAL);
1437 		if (dorelock) {
1438 			vm_object_hold(object);
1439 			vm_object_lock_swap();
1440 		}
1441 	}
1442 	lwkt_reltoken(&map->token);
1443 	if (must_drop)
1444 		vm_object_drop(object);
1445 
1446 	return (KERN_SUCCESS);
1447 }
1448 
1449 /*
1450  * Find sufficient space for `length' bytes in the given map, starting at
1451  * `start'.  Returns 0 on success, 1 on no space.
1452  *
1453  * This function will returned an arbitrarily aligned pointer.  If no
1454  * particular alignment is required you should pass align as 1.  Note that
1455  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1456  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1457  * argument.
1458  *
1459  * 'align' should be a power of 2 but is not required to be.
1460  *
1461  * The map must be exclusively locked.
1462  * No other requirements.
1463  */
1464 int
1465 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1466 		 vm_size_t align, int flags, vm_offset_t *addr)
1467 {
1468 	vm_map_entry_t entry;
1469 	vm_map_entry_t tmp;
1470 	vm_offset_t hole_start;
1471 	vm_offset_t end;
1472 	vm_offset_t align_mask;
1473 
1474 	if (start < vm_map_min(map))
1475 		start = vm_map_min(map);
1476 	if (start > vm_map_max(map))
1477 		return (1);
1478 
1479 	/*
1480 	 * If the alignment is not a power of 2 we will have to use
1481 	 * a mod/division, set align_mask to a special value.
1482 	 */
1483 	if ((align | (align - 1)) + 1 != (align << 1))
1484 		align_mask = (vm_offset_t)-1;
1485 	else
1486 		align_mask = align - 1;
1487 
1488 	/*
1489 	 * Use freehint to adjust the start point, hopefully reducing
1490 	 * the iteration to O(1).
1491 	 */
1492 	hole_start = vm_map_freehint_find(map, length, align);
1493 	if (start < hole_start)
1494 		start = hole_start;
1495 	if (vm_map_lookup_entry(map, start, &tmp))
1496 		start = tmp->ba.end;
1497 	entry = tmp;	/* may be NULL */
1498 
1499 	/*
1500 	 * Look through the rest of the map, trying to fit a new region in the
1501 	 * gap between existing regions, or after the very last region.
1502 	 */
1503 	for (;;) {
1504 		/*
1505 		 * Adjust the proposed start by the requested alignment,
1506 		 * be sure that we didn't wrap the address.
1507 		 */
1508 		if (align_mask == (vm_offset_t)-1)
1509 			end = roundup(start, align);
1510 		else
1511 			end = (start + align_mask) & ~align_mask;
1512 		if (end < start)
1513 			return (1);
1514 		start = end;
1515 
1516 		/*
1517 		 * Find the end of the proposed new region.  Be sure we didn't
1518 		 * go beyond the end of the map, or wrap around the address.
1519 		 * Then check to see if this is the last entry or if the
1520 		 * proposed end fits in the gap between this and the next
1521 		 * entry.
1522 		 */
1523 		end = start + length;
1524 		if (end > vm_map_max(map) || end < start)
1525 			return (1);
1526 
1527 		/*
1528 		 * Locate the next entry, we can stop if this is the
1529 		 * last entry (we know we are in-bounds so that would
1530 		 * be a sucess).
1531 		 */
1532 		if (entry)
1533 			entry = vm_map_rb_tree_RB_NEXT(entry);
1534 		else
1535 			entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
1536 		if (entry == NULL)
1537 			break;
1538 
1539 		/*
1540 		 * Determine if the proposed area would overlap the
1541 		 * next entry.
1542 		 *
1543 		 * When matching against a STACK entry, only allow the
1544 		 * memory map to intrude on the ungrown portion of the
1545 		 * STACK entry when MAP_TRYFIXED is set.
1546 		 */
1547 		if (entry->ba.start >= end) {
1548 			if ((entry->eflags & MAP_ENTRY_STACK) == 0)
1549 				break;
1550 			if (flags & MAP_TRYFIXED)
1551 				break;
1552 			if (entry->ba.start - entry->aux.avail_ssize >= end)
1553 				break;
1554 		}
1555 		start = entry->ba.end;
1556 	}
1557 
1558 	/*
1559 	 * Update the freehint
1560 	 */
1561 	vm_map_freehint_update(map, start, length, align);
1562 
1563 	/*
1564 	 * Grow the kernel_map if necessary.  pmap_growkernel() will panic
1565 	 * if it fails.  The kernel_map is locked and nothing can steal
1566 	 * our address space if pmap_growkernel() blocks.
1567 	 *
1568 	 * NOTE: This may be unconditionally called for kldload areas on
1569 	 *	 x86_64 because these do not bump kernel_vm_end (which would
1570 	 *	 fill 128G worth of page tables!).  Therefore we must not
1571 	 *	 retry.
1572 	 */
1573 	if (map == &kernel_map) {
1574 		vm_offset_t kstop;
1575 
1576 		kstop = round_page(start + length);
1577 		if (kstop > kernel_vm_end)
1578 			pmap_growkernel(start, kstop);
1579 	}
1580 	*addr = start;
1581 	return (0);
1582 }
1583 
1584 /*
1585  * vm_map_find finds an unallocated region in the target address map with
1586  * the given length and allocates it.  The search is defined to be first-fit
1587  * from the specified address; the region found is returned in the same
1588  * parameter.
1589  *
1590  * If object is non-NULL, ref count must be bumped by caller
1591  * prior to making call to account for the new entry.
1592  *
1593  * No requirements.  This function will lock the map temporarily.
1594  */
1595 int
1596 vm_map_find(vm_map_t map, void *map_object, void *map_aux,
1597 	    vm_ooffset_t offset, vm_offset_t *addr,
1598 	    vm_size_t length, vm_size_t align, boolean_t fitit,
1599 	    vm_maptype_t maptype, vm_subsys_t id,
1600 	    vm_prot_t prot, vm_prot_t max, int cow)
1601 {
1602 	vm_offset_t start;
1603 	vm_object_t object;
1604 	void *aux_info;
1605 	int result;
1606 	int count;
1607 
1608 	/*
1609 	 * Certain UKSMAPs may need aux_info.
1610 	 *
1611 	 * (map_object is the callback function, aux_info is the process
1612 	 *  or thread, if necessary).
1613 	 */
1614 	aux_info = NULL;
1615 	if (maptype == VM_MAPTYPE_UKSMAP) {
1616 		KKASSERT(map_aux != NULL && map_object != NULL);
1617 
1618 		switch(minor(((struct cdev *)map_aux))) {
1619 		case 5:
1620 			/*
1621 			 * /dev/upmap
1622 			 */
1623 			aux_info = curproc;
1624 			break;
1625 		case 6:
1626 			/*
1627 			 * /dev/kpmap
1628 			 */
1629 			break;
1630 		case 7:
1631 			/*
1632 			 * /dev/lpmap
1633 			 */
1634 			aux_info = curthread->td_lwp;
1635 			break;
1636 		}
1637 		object = NULL;
1638 	} else {
1639 		object = map_object;
1640 	}
1641 
1642 	start = *addr;
1643 
1644 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1645 	vm_map_lock(map);
1646 	if (object)
1647 		vm_object_hold_shared(object);
1648 	if (fitit) {
1649 		if (vm_map_findspace(map, start, length, align, 0, addr)) {
1650 			if (object)
1651 				vm_object_drop(object);
1652 			vm_map_unlock(map);
1653 			vm_map_entry_release(count);
1654 			return (KERN_NO_SPACE);
1655 		}
1656 		start = *addr;
1657 	}
1658 	result = vm_map_insert(map, &count,
1659 			       map_object, map_aux,
1660 			       offset, aux_info,
1661 			       start, start + length,
1662 			       maptype, id, prot, max, cow);
1663 	if (object)
1664 		vm_object_drop(object);
1665 	vm_map_unlock(map);
1666 	vm_map_entry_release(count);
1667 
1668 	return (result);
1669 }
1670 
1671 /*
1672  * Simplify the given map entry by merging with either neighbor.  This
1673  * routine also has the ability to merge with both neighbors.
1674  *
1675  * This routine guarentees that the passed entry remains valid (though
1676  * possibly extended).  When merging, this routine may delete one or
1677  * both neighbors.  No action is taken on entries which have their
1678  * in-transition flag set.
1679  *
1680  * The map must be exclusively locked.
1681  */
1682 void
1683 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1684 {
1685 	vm_map_entry_t next, prev;
1686 	vm_size_t prevsize, esize;
1687 
1688 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1689 		++mycpu->gd_cnt.v_intrans_coll;
1690 		return;
1691 	}
1692 
1693 	if (entry->maptype == VM_MAPTYPE_SUBMAP)
1694 		return;
1695 	if (entry->maptype == VM_MAPTYPE_UKSMAP)
1696 		return;
1697 
1698 	prev = vm_map_rb_tree_RB_PREV(entry);
1699 	if (prev) {
1700 		prevsize = prev->ba.end - prev->ba.start;
1701 		if ( (prev->ba.end == entry->ba.start) &&
1702 		     (prev->maptype == entry->maptype) &&
1703 		     (prev->ba.object == entry->ba.object) &&
1704 		     (prev->ba.backing_ba == entry->ba.backing_ba) &&
1705 		     (!prev->ba.object ||
1706 			(prev->ba.offset + prevsize == entry->ba.offset)) &&
1707 		     (prev->eflags == entry->eflags) &&
1708 		     (prev->protection == entry->protection) &&
1709 		     (prev->max_protection == entry->max_protection) &&
1710 		     (prev->inheritance == entry->inheritance) &&
1711 		     (prev->id == entry->id) &&
1712 		     (prev->wired_count == entry->wired_count)) {
1713 			/*
1714 			 * NOTE: order important.  Unlink before gumming up
1715 			 *	 the RBTREE w/adjust, adjust before disposal
1716 			 *	 of prior entry, to avoid pmap snafus.
1717 			 */
1718 			vm_map_entry_unlink(map, prev);
1719 			vm_map_backing_adjust_start(entry, prev->ba.start);
1720 			if (entry->ba.object == NULL)
1721 				entry->ba.offset = 0;
1722 			vm_map_entry_dispose(map, prev, countp);
1723 		}
1724 	}
1725 
1726 	next = vm_map_rb_tree_RB_NEXT(entry);
1727 	if (next) {
1728 		esize = entry->ba.end - entry->ba.start;
1729 		if ((entry->ba.end == next->ba.start) &&
1730 		    (next->maptype == entry->maptype) &&
1731 		    (next->ba.object == entry->ba.object) &&
1732 		     (prev->ba.backing_ba == entry->ba.backing_ba) &&
1733 		     (!entry->ba.object ||
1734 			(entry->ba.offset + esize == next->ba.offset)) &&
1735 		    (next->eflags == entry->eflags) &&
1736 		    (next->protection == entry->protection) &&
1737 		    (next->max_protection == entry->max_protection) &&
1738 		    (next->inheritance == entry->inheritance) &&
1739 		    (next->id == entry->id) &&
1740 		    (next->wired_count == entry->wired_count)) {
1741 			/*
1742 			 * NOTE: order important.  Unlink before gumming up
1743 			 *	 the RBTREE w/adjust, adjust before disposal
1744 			 *	 of prior entry, to avoid pmap snafus.
1745 			 */
1746 			vm_map_entry_unlink(map, next);
1747 			vm_map_backing_adjust_end(entry, next->ba.end);
1748 			vm_map_entry_dispose(map, next, countp);
1749 	        }
1750 	}
1751 }
1752 
1753 /*
1754  * Asserts that the given entry begins at or after the specified address.
1755  * If necessary, it splits the entry into two.
1756  */
1757 #define vm_map_clip_start(map, entry, startaddr, countp)		\
1758 {									\
1759 	if (startaddr > entry->ba.start)				\
1760 		_vm_map_clip_start(map, entry, startaddr, countp);	\
1761 }
1762 
1763 /*
1764  * This routine is called only when it is known that the entry must be split.
1765  *
1766  * The map must be exclusively locked.
1767  */
1768 static void
1769 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1770 		   int *countp)
1771 {
1772 	vm_map_entry_t new_entry;
1773 
1774 	/*
1775 	 * Split off the front portion -- note that we must insert the new
1776 	 * entry BEFORE this one, so that this entry has the specified
1777 	 * starting address.
1778 	 */
1779 
1780 	vm_map_simplify_entry(map, entry, countp);
1781 
1782 	/*
1783 	 * If there is no object backing this entry, we might as well create
1784 	 * one now.  If we defer it, an object can get created after the map
1785 	 * is clipped, and individual objects will be created for the split-up
1786 	 * map.  This is a bit of a hack, but is also about the best place to
1787 	 * put this improvement.
1788 	 */
1789 	if (entry->ba.object == NULL && !map->system_map &&
1790 	    VM_MAP_ENTRY_WITHIN_PARTITION(entry)) {
1791 		vm_map_entry_allocate_object(entry);
1792 	}
1793 
1794 	/*
1795 	 * NOTE: The replicated function will adjust start, end, and offset
1796 	 *	 for the remainder of the backing_ba linkages.  We must fixup
1797 	 *	 the embedded ba.
1798 	 */
1799 	new_entry = vm_map_entry_create(countp);
1800 	*new_entry = *entry;
1801 	new_entry->ba.end = start;
1802 
1803 	/*
1804 	 * Ordering is important, make sure the new entry is replicated
1805 	 * before we cut the exiting entry.
1806 	 */
1807 	vm_map_backing_replicated(map, new_entry, MAP_BACK_CLIPPED);
1808 	vm_map_backing_adjust_start(entry, start);
1809 	vm_map_entry_link(map, new_entry);
1810 }
1811 
1812 /*
1813  * Asserts that the given entry ends at or before the specified address.
1814  * If necessary, it splits the entry into two.
1815  *
1816  * The map must be exclusively locked.
1817  */
1818 #define vm_map_clip_end(map, entry, endaddr, countp)		\
1819 {								\
1820 	if (endaddr < entry->ba.end)				\
1821 		_vm_map_clip_end(map, entry, endaddr, countp);	\
1822 }
1823 
1824 /*
1825  * This routine is called only when it is known that the entry must be split.
1826  *
1827  * The map must be exclusively locked.
1828  */
1829 static void
1830 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1831 		 int *countp)
1832 {
1833 	vm_map_entry_t new_entry;
1834 
1835 	/*
1836 	 * If there is no object backing this entry, we might as well create
1837 	 * one now.  If we defer it, an object can get created after the map
1838 	 * is clipped, and individual objects will be created for the split-up
1839 	 * map.  This is a bit of a hack, but is also about the best place to
1840 	 * put this improvement.
1841 	 */
1842 
1843 	if (entry->ba.object == NULL && !map->system_map &&
1844 	    VM_MAP_ENTRY_WITHIN_PARTITION(entry)) {
1845 		vm_map_entry_allocate_object(entry);
1846 	}
1847 
1848 	/*
1849 	 * Create a new entry and insert it AFTER the specified entry
1850 	 *
1851 	 * NOTE: The replicated function will adjust start, end, and offset
1852 	 *	 for the remainder of the backing_ba linkages.  We must fixup
1853 	 *	 the embedded ba.
1854 	 */
1855 	new_entry = vm_map_entry_create(countp);
1856 	*new_entry = *entry;
1857 	new_entry->ba.start = end;
1858 	new_entry->ba.offset += (new_entry->ba.start - entry->ba.start);
1859 
1860 	/*
1861 	 * Ordering is important, make sure the new entry is replicated
1862 	 * before we cut the exiting entry.
1863 	 */
1864 	vm_map_backing_replicated(map, new_entry, MAP_BACK_CLIPPED);
1865 	vm_map_backing_adjust_end(entry, end);
1866 	vm_map_entry_link(map, new_entry);
1867 }
1868 
1869 /*
1870  * Asserts that the starting and ending region addresses fall within the
1871  * valid range for the map.
1872  */
1873 #define	VM_MAP_RANGE_CHECK(map, start, end)	\
1874 {						\
1875 	if (start < vm_map_min(map))		\
1876 		start = vm_map_min(map);	\
1877 	if (end > vm_map_max(map))		\
1878 		end = vm_map_max(map);		\
1879 	if (start > end)			\
1880 		start = end;			\
1881 }
1882 
1883 /*
1884  * Used to block when an in-transition collison occurs.  The map
1885  * is unlocked for the sleep and relocked before the return.
1886  */
1887 void
1888 vm_map_transition_wait(vm_map_t map, int relock)
1889 {
1890 	tsleep_interlock(map, 0);
1891 	vm_map_unlock(map);
1892 	tsleep(map, PINTERLOCKED, "vment", 0);
1893 	if (relock)
1894 		vm_map_lock(map);
1895 }
1896 
1897 /*
1898  * When we do blocking operations with the map lock held it is
1899  * possible that a clip might have occured on our in-transit entry,
1900  * requiring an adjustment to the entry in our loop.  These macros
1901  * help the pageable and clip_range code deal with the case.  The
1902  * conditional costs virtually nothing if no clipping has occured.
1903  */
1904 
1905 #define CLIP_CHECK_BACK(entry, save_start)			\
1906     do {							\
1907 	    while (entry->ba.start != save_start) {		\
1908 		    entry = vm_map_rb_tree_RB_PREV(entry);	\
1909 		    KASSERT(entry, ("bad entry clip")); 	\
1910 	    }							\
1911     } while(0)
1912 
1913 #define CLIP_CHECK_FWD(entry, save_end)				\
1914     do {							\
1915 	    while (entry->ba.end != save_end) {			\
1916 		    entry = vm_map_rb_tree_RB_NEXT(entry);	\
1917 		    KASSERT(entry, ("bad entry clip")); 	\
1918 	    }							\
1919     } while(0)
1920 
1921 
1922 /*
1923  * Clip the specified range and return the base entry.  The
1924  * range may cover several entries starting at the returned base
1925  * and the first and last entry in the covering sequence will be
1926  * properly clipped to the requested start and end address.
1927  *
1928  * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1929  * flag.
1930  *
1931  * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1932  * covered by the requested range.
1933  *
1934  * The map must be exclusively locked on entry and will remain locked
1935  * on return. If no range exists or the range contains holes and you
1936  * specified that no holes were allowed, NULL will be returned.  This
1937  * routine may temporarily unlock the map in order avoid a deadlock when
1938  * sleeping.
1939  */
1940 static
1941 vm_map_entry_t
1942 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end,
1943 		  int *countp, int flags)
1944 {
1945 	vm_map_entry_t start_entry;
1946 	vm_map_entry_t entry;
1947 	vm_map_entry_t next;
1948 
1949 	/*
1950 	 * Locate the entry and effect initial clipping.  The in-transition
1951 	 * case does not occur very often so do not try to optimize it.
1952 	 */
1953 again:
1954 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1955 		return (NULL);
1956 	entry = start_entry;
1957 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1958 		entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1959 		++mycpu->gd_cnt.v_intrans_coll;
1960 		++mycpu->gd_cnt.v_intrans_wait;
1961 		vm_map_transition_wait(map, 1);
1962 		/*
1963 		 * entry and/or start_entry may have been clipped while
1964 		 * we slept, or may have gone away entirely.  We have
1965 		 * to restart from the lookup.
1966 		 */
1967 		goto again;
1968 	}
1969 
1970 	/*
1971 	 * Since we hold an exclusive map lock we do not have to restart
1972 	 * after clipping, even though clipping may block in zalloc.
1973 	 */
1974 	vm_map_clip_start(map, entry, start, countp);
1975 	vm_map_clip_end(map, entry, end, countp);
1976 	entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1977 
1978 	/*
1979 	 * Scan entries covered by the range.  When working on the next
1980 	 * entry a restart need only re-loop on the current entry which
1981 	 * we have already locked, since 'next' may have changed.  Also,
1982 	 * even though entry is safe, it may have been clipped so we
1983 	 * have to iterate forwards through the clip after sleeping.
1984 	 */
1985 	for (;;) {
1986 		next = vm_map_rb_tree_RB_NEXT(entry);
1987 		if (next == NULL || next->ba.start >= end)
1988 			break;
1989 		if (flags & MAP_CLIP_NO_HOLES) {
1990 			if (next->ba.start > entry->ba.end) {
1991 				vm_map_unclip_range(map, start_entry,
1992 					start, entry->ba.end, countp, flags);
1993 				return(NULL);
1994 			}
1995 		}
1996 
1997 		if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1998 			vm_offset_t save_end = entry->ba.end;
1999 			next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2000 			++mycpu->gd_cnt.v_intrans_coll;
2001 			++mycpu->gd_cnt.v_intrans_wait;
2002 			vm_map_transition_wait(map, 1);
2003 
2004 			/*
2005 			 * clips might have occured while we blocked.
2006 			 */
2007 			CLIP_CHECK_FWD(entry, save_end);
2008 			CLIP_CHECK_BACK(start_entry, start);
2009 			continue;
2010 		}
2011 
2012 		/*
2013 		 * No restart necessary even though clip_end may block, we
2014 		 * are holding the map lock.
2015 		 */
2016 		vm_map_clip_end(map, next, end, countp);
2017 		next->eflags |= MAP_ENTRY_IN_TRANSITION;
2018 		entry = next;
2019 	}
2020 	if (flags & MAP_CLIP_NO_HOLES) {
2021 		if (entry->ba.end != end) {
2022 			vm_map_unclip_range(map, start_entry,
2023 				start, entry->ba.end, countp, flags);
2024 			return(NULL);
2025 		}
2026 	}
2027 	return(start_entry);
2028 }
2029 
2030 /*
2031  * Undo the effect of vm_map_clip_range().  You should pass the same
2032  * flags and the same range that you passed to vm_map_clip_range().
2033  * This code will clear the in-transition flag on the entries and
2034  * wake up anyone waiting.  This code will also simplify the sequence
2035  * and attempt to merge it with entries before and after the sequence.
2036  *
2037  * The map must be locked on entry and will remain locked on return.
2038  *
2039  * Note that you should also pass the start_entry returned by
2040  * vm_map_clip_range().  However, if you block between the two calls
2041  * with the map unlocked please be aware that the start_entry may
2042  * have been clipped and you may need to scan it backwards to find
2043  * the entry corresponding with the original start address.  You are
2044  * responsible for this, vm_map_unclip_range() expects the correct
2045  * start_entry to be passed to it and will KASSERT otherwise.
2046  */
2047 static
2048 void
2049 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
2050 		    vm_offset_t start, vm_offset_t end,
2051 		    int *countp, int flags)
2052 {
2053 	vm_map_entry_t entry;
2054 
2055 	entry = start_entry;
2056 
2057 	KASSERT(entry->ba.start == start, ("unclip_range: illegal base entry"));
2058 	while (entry && entry->ba.start < end) {
2059 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2060 			("in-transition flag not set during unclip on: %p",
2061 			entry));
2062 		KASSERT(entry->ba.end <= end,
2063 			("unclip_range: tail wasn't clipped"));
2064 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2065 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2066 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2067 			wakeup(map);
2068 		}
2069 		entry = vm_map_rb_tree_RB_NEXT(entry);
2070 	}
2071 
2072 	/*
2073 	 * Simplification does not block so there is no restart case.
2074 	 */
2075 	entry = start_entry;
2076 	while (entry && entry->ba.start < end) {
2077 		vm_map_simplify_entry(map, entry, countp);
2078 		entry = vm_map_rb_tree_RB_NEXT(entry);
2079 	}
2080 }
2081 
2082 /*
2083  * Mark the given range as handled by a subordinate map.
2084  *
2085  * This range must have been created with vm_map_find(), and no other
2086  * operations may have been performed on this range prior to calling
2087  * vm_map_submap().
2088  *
2089  * Submappings cannot be removed.
2090  *
2091  * No requirements.
2092  */
2093 int
2094 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
2095 {
2096 	vm_map_entry_t entry;
2097 	int result = KERN_INVALID_ARGUMENT;
2098 	int count;
2099 
2100 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2101 	vm_map_lock(map);
2102 
2103 	VM_MAP_RANGE_CHECK(map, start, end);
2104 
2105 	if (vm_map_lookup_entry(map, start, &entry)) {
2106 		vm_map_clip_start(map, entry, start, &count);
2107 	} else if (entry) {
2108 		entry = vm_map_rb_tree_RB_NEXT(entry);
2109 	} else {
2110 		entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
2111 	}
2112 
2113 	vm_map_clip_end(map, entry, end, &count);
2114 
2115 	if ((entry->ba.start == start) && (entry->ba.end == end) &&
2116 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
2117 	    (entry->ba.object == NULL)) {
2118 		entry->ba.sub_map = submap;
2119 		entry->maptype = VM_MAPTYPE_SUBMAP;
2120 		result = KERN_SUCCESS;
2121 	}
2122 	vm_map_unlock(map);
2123 	vm_map_entry_release(count);
2124 
2125 	return (result);
2126 }
2127 
2128 /*
2129  * Sets the protection of the specified address region in the target map.
2130  * If "set_max" is specified, the maximum protection is to be set;
2131  * otherwise, only the current protection is affected.
2132  *
2133  * The protection is not applicable to submaps, but is applicable to normal
2134  * maps and maps governed by virtual page tables.  For example, when operating
2135  * on a virtual page table our protection basically controls how COW occurs
2136  * on the backing object, whereas the virtual page table abstraction itself
2137  * is an abstraction for userland.
2138  *
2139  * No requirements.
2140  */
2141 int
2142 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2143 	       vm_prot_t new_prot, boolean_t set_max)
2144 {
2145 	vm_map_entry_t current;
2146 	vm_map_entry_t entry;
2147 	int count;
2148 
2149 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2150 	vm_map_lock(map);
2151 
2152 	VM_MAP_RANGE_CHECK(map, start, end);
2153 
2154 	if (vm_map_lookup_entry(map, start, &entry)) {
2155 		vm_map_clip_start(map, entry, start, &count);
2156 	} else if (entry) {
2157 		entry = vm_map_rb_tree_RB_NEXT(entry);
2158 	} else {
2159 		entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
2160 	}
2161 
2162 	/*
2163 	 * Make a first pass to check for protection violations.
2164 	 */
2165 	current = entry;
2166 	while (current && current->ba.start < end) {
2167 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2168 			vm_map_unlock(map);
2169 			vm_map_entry_release(count);
2170 			return (KERN_INVALID_ARGUMENT);
2171 		}
2172 		if ((new_prot & current->max_protection) != new_prot) {
2173 			vm_map_unlock(map);
2174 			vm_map_entry_release(count);
2175 			return (KERN_PROTECTION_FAILURE);
2176 		}
2177 
2178 		/*
2179 		 * When making a SHARED+RW file mmap writable, update
2180 		 * v_lastwrite_ts.
2181 		 */
2182 		if (new_prot & PROT_WRITE &&
2183 		    (current->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
2184 		    (current->maptype == VM_MAPTYPE_NORMAL ||
2185 		     current->maptype == VM_MAPTYPE_VPAGETABLE) &&
2186 		    current->ba.object &&
2187 		    current->ba.object->type == OBJT_VNODE) {
2188 			struct vnode *vp;
2189 
2190 			vp = current->ba.object->handle;
2191 			if (vp && vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT) == 0) {
2192 				vfs_timestamp(&vp->v_lastwrite_ts);
2193 				vsetflags(vp, VLASTWRITETS);
2194 				vn_unlock(vp);
2195 			}
2196 		}
2197 		current = vm_map_rb_tree_RB_NEXT(current);
2198 	}
2199 
2200 	/*
2201 	 * Go back and fix up protections. [Note that clipping is not
2202 	 * necessary the second time.]
2203 	 */
2204 	current = entry;
2205 
2206 	while (current && current->ba.start < end) {
2207 		vm_prot_t old_prot;
2208 
2209 		vm_map_clip_end(map, current, end, &count);
2210 
2211 		old_prot = current->protection;
2212 		if (set_max) {
2213 			current->max_protection = new_prot;
2214 			current->protection = new_prot & old_prot;
2215 		} else {
2216 			current->protection = new_prot;
2217 		}
2218 
2219 		/*
2220 		 * Update physical map if necessary. Worry about copy-on-write
2221 		 * here -- CHECK THIS XXX
2222 		 */
2223 		if (current->protection != old_prot) {
2224 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2225 							VM_PROT_ALL)
2226 
2227 			pmap_protect(map->pmap, current->ba.start,
2228 			    current->ba.end,
2229 			    current->protection & MASK(current));
2230 #undef	MASK
2231 		}
2232 
2233 		vm_map_simplify_entry(map, current, &count);
2234 
2235 		current = vm_map_rb_tree_RB_NEXT(current);
2236 	}
2237 	vm_map_unlock(map);
2238 	vm_map_entry_release(count);
2239 	return (KERN_SUCCESS);
2240 }
2241 
2242 /*
2243  * This routine traverses a processes map handling the madvise
2244  * system call.  Advisories are classified as either those effecting
2245  * the vm_map_entry structure, or those effecting the underlying
2246  * objects.
2247  *
2248  * The <value> argument is used for extended madvise calls.
2249  *
2250  * No requirements.
2251  */
2252 int
2253 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
2254 	       int behav, off_t value)
2255 {
2256 	vm_map_entry_t current, entry;
2257 	int modify_map = 0;
2258 	int error = 0;
2259 	int count;
2260 
2261 	/*
2262 	 * Some madvise calls directly modify the vm_map_entry, in which case
2263 	 * we need to use an exclusive lock on the map and we need to perform
2264 	 * various clipping operations.  Otherwise we only need a read-lock
2265 	 * on the map.
2266 	 */
2267 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2268 
2269 	switch(behav) {
2270 	case MADV_NORMAL:
2271 	case MADV_SEQUENTIAL:
2272 	case MADV_RANDOM:
2273 	case MADV_NOSYNC:
2274 	case MADV_AUTOSYNC:
2275 	case MADV_NOCORE:
2276 	case MADV_CORE:
2277 	case MADV_SETMAP:
2278 		modify_map = 1;
2279 		vm_map_lock(map);
2280 		break;
2281 	case MADV_INVAL:
2282 	case MADV_WILLNEED:
2283 	case MADV_DONTNEED:
2284 	case MADV_FREE:
2285 		vm_map_lock_read(map);
2286 		break;
2287 	default:
2288 		vm_map_entry_release(count);
2289 		return (EINVAL);
2290 	}
2291 
2292 	/*
2293 	 * Locate starting entry and clip if necessary.
2294 	 */
2295 
2296 	VM_MAP_RANGE_CHECK(map, start, end);
2297 
2298 	if (vm_map_lookup_entry(map, start, &entry)) {
2299 		if (modify_map)
2300 			vm_map_clip_start(map, entry, start, &count);
2301 	} else if (entry) {
2302 		entry = vm_map_rb_tree_RB_NEXT(entry);
2303 	} else {
2304 		entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
2305 	}
2306 
2307 	if (modify_map) {
2308 		/*
2309 		 * madvise behaviors that are implemented in the vm_map_entry.
2310 		 *
2311 		 * We clip the vm_map_entry so that behavioral changes are
2312 		 * limited to the specified address range.
2313 		 */
2314 		for (current = entry;
2315 		     current && current->ba.start < end;
2316 		     current = vm_map_rb_tree_RB_NEXT(current)) {
2317 			/*
2318 			 * Ignore submaps
2319 			 */
2320 			if (current->maptype == VM_MAPTYPE_SUBMAP)
2321 				continue;
2322 
2323 			vm_map_clip_end(map, current, end, &count);
2324 
2325 			switch (behav) {
2326 			case MADV_NORMAL:
2327 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2328 				break;
2329 			case MADV_SEQUENTIAL:
2330 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2331 				break;
2332 			case MADV_RANDOM:
2333 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2334 				break;
2335 			case MADV_NOSYNC:
2336 				current->eflags |= MAP_ENTRY_NOSYNC;
2337 				break;
2338 			case MADV_AUTOSYNC:
2339 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2340 				break;
2341 			case MADV_NOCORE:
2342 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2343 				break;
2344 			case MADV_CORE:
2345 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2346 				break;
2347 			case MADV_SETMAP:
2348 				/*
2349 				 * Set the page directory page for a map
2350 				 * governed by a virtual page table.  Mark
2351 				 * the entry as being governed by a virtual
2352 				 * page table if it is not.
2353 				 *
2354 				 * XXX the page directory page is stored
2355 				 * in the avail_ssize field if the map_entry.
2356 				 *
2357 				 * XXX the map simplification code does not
2358 				 * compare this field so weird things may
2359 				 * happen if you do not apply this function
2360 				 * to the entire mapping governed by the
2361 				 * virtual page table.
2362 				 */
2363 				if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
2364 					error = EINVAL;
2365 					break;
2366 				}
2367 				current->aux.master_pde = value;
2368 				pmap_remove(map->pmap,
2369 					    current->ba.start, current->ba.end);
2370 				break;
2371 			case MADV_INVAL:
2372 				/*
2373 				 * Invalidate the related pmap entries, used
2374 				 * to flush portions of the real kernel's
2375 				 * pmap when the caller has removed or
2376 				 * modified existing mappings in a virtual
2377 				 * page table.
2378 				 *
2379 				 * (exclusive locked map version does not
2380 				 * need the range interlock).
2381 				 */
2382 				pmap_remove(map->pmap,
2383 					    current->ba.start, current->ba.end);
2384 				break;
2385 			default:
2386 				error = EINVAL;
2387 				break;
2388 			}
2389 			vm_map_simplify_entry(map, current, &count);
2390 		}
2391 		vm_map_unlock(map);
2392 	} else {
2393 		vm_pindex_t pindex;
2394 		vm_pindex_t delta;
2395 
2396 		/*
2397 		 * madvise behaviors that are implemented in the underlying
2398 		 * vm_object.
2399 		 *
2400 		 * Since we don't clip the vm_map_entry, we have to clip
2401 		 * the vm_object pindex and count.
2402 		 *
2403 		 * NOTE!  These functions are only supported on normal maps,
2404 		 *	  except MADV_INVAL which is also supported on
2405 		 *	  virtual page tables.
2406 		 *
2407 		 * NOTE!  These functions only apply to the top-most object.
2408 		 *	  It is not applicable to backing objects.
2409 		 */
2410 		for (current = entry;
2411 		     current && current->ba.start < end;
2412 		     current = vm_map_rb_tree_RB_NEXT(current)) {
2413 			vm_offset_t useStart;
2414 
2415 			if (current->maptype != VM_MAPTYPE_NORMAL &&
2416 			    (current->maptype != VM_MAPTYPE_VPAGETABLE ||
2417 			     behav != MADV_INVAL)) {
2418 				continue;
2419 			}
2420 
2421 			pindex = OFF_TO_IDX(current->ba.offset);
2422 			delta = atop(current->ba.end - current->ba.start);
2423 			useStart = current->ba.start;
2424 
2425 			if (current->ba.start < start) {
2426 				pindex += atop(start - current->ba.start);
2427 				delta -= atop(start - current->ba.start);
2428 				useStart = start;
2429 			}
2430 			if (current->ba.end > end)
2431 				delta -= atop(current->ba.end - end);
2432 
2433 			if ((vm_spindex_t)delta <= 0)
2434 				continue;
2435 
2436 			if (behav == MADV_INVAL) {
2437 				/*
2438 				 * Invalidate the related pmap entries, used
2439 				 * to flush portions of the real kernel's
2440 				 * pmap when the caller has removed or
2441 				 * modified existing mappings in a virtual
2442 				 * page table.
2443 				 *
2444 				 * (shared locked map version needs the
2445 				 * interlock, see vm_fault()).
2446 				 */
2447 				struct vm_map_ilock ilock;
2448 
2449 				KASSERT(useStart >= VM_MIN_USER_ADDRESS &&
2450 					    useStart + ptoa(delta) <=
2451 					    VM_MAX_USER_ADDRESS,
2452 					 ("Bad range %016jx-%016jx (%016jx)",
2453 					 useStart, useStart + ptoa(delta),
2454 					 delta));
2455 				vm_map_interlock(map, &ilock,
2456 						 useStart,
2457 						 useStart + ptoa(delta));
2458 				pmap_remove(map->pmap,
2459 					    useStart,
2460 					    useStart + ptoa(delta));
2461 				vm_map_deinterlock(map, &ilock);
2462 			} else {
2463 				vm_object_madvise(current->ba.object,
2464 						  pindex, delta, behav);
2465 			}
2466 
2467 			/*
2468 			 * Try to populate the page table.  Mappings governed
2469 			 * by virtual page tables cannot be pre-populated
2470 			 * without a lot of work so don't try.
2471 			 */
2472 			if (behav == MADV_WILLNEED &&
2473 			    current->maptype != VM_MAPTYPE_VPAGETABLE) {
2474 				pmap_object_init_pt(
2475 				    map->pmap, current,
2476 				    useStart,
2477 				    (delta << PAGE_SHIFT),
2478 				    MAP_PREFAULT_MADVISE
2479 				);
2480 			}
2481 		}
2482 		vm_map_unlock_read(map);
2483 	}
2484 	vm_map_entry_release(count);
2485 	return(error);
2486 }
2487 
2488 
2489 /*
2490  * Sets the inheritance of the specified address range in the target map.
2491  * Inheritance affects how the map will be shared with child maps at the
2492  * time of vm_map_fork.
2493  */
2494 int
2495 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2496 	       vm_inherit_t new_inheritance)
2497 {
2498 	vm_map_entry_t entry;
2499 	vm_map_entry_t temp_entry;
2500 	int count;
2501 
2502 	switch (new_inheritance) {
2503 	case VM_INHERIT_NONE:
2504 	case VM_INHERIT_COPY:
2505 	case VM_INHERIT_SHARE:
2506 		break;
2507 	default:
2508 		return (KERN_INVALID_ARGUMENT);
2509 	}
2510 
2511 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2512 	vm_map_lock(map);
2513 
2514 	VM_MAP_RANGE_CHECK(map, start, end);
2515 
2516 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2517 		entry = temp_entry;
2518 		vm_map_clip_start(map, entry, start, &count);
2519 	} else if (temp_entry) {
2520 		entry = vm_map_rb_tree_RB_NEXT(temp_entry);
2521 	} else {
2522 		entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
2523 	}
2524 
2525 	while (entry && entry->ba.start < end) {
2526 		vm_map_clip_end(map, entry, end, &count);
2527 
2528 		entry->inheritance = new_inheritance;
2529 
2530 		vm_map_simplify_entry(map, entry, &count);
2531 
2532 		entry = vm_map_rb_tree_RB_NEXT(entry);
2533 	}
2534 	vm_map_unlock(map);
2535 	vm_map_entry_release(count);
2536 	return (KERN_SUCCESS);
2537 }
2538 
2539 /*
2540  * Implement the semantics of mlock
2541  */
2542 int
2543 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2544 	      boolean_t new_pageable)
2545 {
2546 	vm_map_entry_t entry;
2547 	vm_map_entry_t start_entry;
2548 	vm_offset_t end;
2549 	int rv = KERN_SUCCESS;
2550 	int count;
2551 
2552 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2553 	vm_map_lock(map);
2554 	VM_MAP_RANGE_CHECK(map, start, real_end);
2555 	end = real_end;
2556 
2557 	start_entry = vm_map_clip_range(map, start, end, &count,
2558 					MAP_CLIP_NO_HOLES);
2559 	if (start_entry == NULL) {
2560 		vm_map_unlock(map);
2561 		vm_map_entry_release(count);
2562 		return (KERN_INVALID_ADDRESS);
2563 	}
2564 
2565 	if (new_pageable == 0) {
2566 		entry = start_entry;
2567 		while (entry && entry->ba.start < end) {
2568 			vm_offset_t save_start;
2569 			vm_offset_t save_end;
2570 
2571 			/*
2572 			 * Already user wired or hard wired (trivial cases)
2573 			 */
2574 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2575 				entry = vm_map_rb_tree_RB_NEXT(entry);
2576 				continue;
2577 			}
2578 			if (entry->wired_count != 0) {
2579 				entry->wired_count++;
2580 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2581 				entry = vm_map_rb_tree_RB_NEXT(entry);
2582 				continue;
2583 			}
2584 
2585 			/*
2586 			 * A new wiring requires instantiation of appropriate
2587 			 * management structures and the faulting in of the
2588 			 * page.
2589 			 */
2590 			if (entry->maptype == VM_MAPTYPE_NORMAL ||
2591 			    entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2592 				int copyflag = entry->eflags &
2593 					       MAP_ENTRY_NEEDS_COPY;
2594 				if (copyflag && ((entry->protection &
2595 						  VM_PROT_WRITE) != 0)) {
2596 					vm_map_entry_shadow(entry);
2597 				} else if (entry->ba.object == NULL &&
2598 					   !map->system_map) {
2599 					vm_map_entry_allocate_object(entry);
2600 				}
2601 			}
2602 			entry->wired_count++;
2603 			entry->eflags |= MAP_ENTRY_USER_WIRED;
2604 
2605 			/*
2606 			 * Now fault in the area.  Note that vm_fault_wire()
2607 			 * may release the map lock temporarily, it will be
2608 			 * relocked on return.  The in-transition
2609 			 * flag protects the entries.
2610 			 */
2611 			save_start = entry->ba.start;
2612 			save_end = entry->ba.end;
2613 			rv = vm_fault_wire(map, entry, TRUE, 0);
2614 			if (rv) {
2615 				CLIP_CHECK_BACK(entry, save_start);
2616 				for (;;) {
2617 					KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2618 					entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2619 					entry->wired_count = 0;
2620 					if (entry->ba.end == save_end)
2621 						break;
2622 					entry = vm_map_rb_tree_RB_NEXT(entry);
2623 					KASSERT(entry,
2624 					     ("bad entry clip during backout"));
2625 				}
2626 				end = save_start;	/* unwire the rest */
2627 				break;
2628 			}
2629 			/*
2630 			 * note that even though the entry might have been
2631 			 * clipped, the USER_WIRED flag we set prevents
2632 			 * duplication so we do not have to do a
2633 			 * clip check.
2634 			 */
2635 			entry = vm_map_rb_tree_RB_NEXT(entry);
2636 		}
2637 
2638 		/*
2639 		 * If we failed fall through to the unwiring section to
2640 		 * unwire what we had wired so far.  'end' has already
2641 		 * been adjusted.
2642 		 */
2643 		if (rv)
2644 			new_pageable = 1;
2645 
2646 		/*
2647 		 * start_entry might have been clipped if we unlocked the
2648 		 * map and blocked.  No matter how clipped it has gotten
2649 		 * there should be a fragment that is on our start boundary.
2650 		 */
2651 		CLIP_CHECK_BACK(start_entry, start);
2652 	}
2653 
2654 	/*
2655 	 * Deal with the unwiring case.
2656 	 */
2657 	if (new_pageable) {
2658 		/*
2659 		 * This is the unwiring case.  We must first ensure that the
2660 		 * range to be unwired is really wired down.  We know there
2661 		 * are no holes.
2662 		 */
2663 		entry = start_entry;
2664 		while (entry && entry->ba.start < end) {
2665 			if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2666 				rv = KERN_INVALID_ARGUMENT;
2667 				goto done;
2668 			}
2669 			KASSERT(entry->wired_count != 0,
2670 				("wired count was 0 with USER_WIRED set! %p",
2671 				 entry));
2672 			entry = vm_map_rb_tree_RB_NEXT(entry);
2673 		}
2674 
2675 		/*
2676 		 * Now decrement the wiring count for each region. If a region
2677 		 * becomes completely unwired, unwire its physical pages and
2678 		 * mappings.
2679 		 */
2680 		/*
2681 		 * The map entries are processed in a loop, checking to
2682 		 * make sure the entry is wired and asserting it has a wired
2683 		 * count. However, another loop was inserted more-or-less in
2684 		 * the middle of the unwiring path. This loop picks up the
2685 		 * "entry" loop variable from the first loop without first
2686 		 * setting it to start_entry. Naturally, the secound loop
2687 		 * is never entered and the pages backing the entries are
2688 		 * never unwired. This can lead to a leak of wired pages.
2689 		 */
2690 		entry = start_entry;
2691 		while (entry && entry->ba.start < end) {
2692 			KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2693 				("expected USER_WIRED on entry %p", entry));
2694 			entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2695 			entry->wired_count--;
2696 			if (entry->wired_count == 0)
2697 				vm_fault_unwire(map, entry);
2698 			entry = vm_map_rb_tree_RB_NEXT(entry);
2699 		}
2700 	}
2701 done:
2702 	vm_map_unclip_range(map, start_entry, start, real_end, &count,
2703 		MAP_CLIP_NO_HOLES);
2704 	vm_map_unlock(map);
2705 	vm_map_entry_release(count);
2706 
2707 	return (rv);
2708 }
2709 
2710 /*
2711  * Sets the pageability of the specified address range in the target map.
2712  * Regions specified as not pageable require locked-down physical
2713  * memory and physical page maps.
2714  *
2715  * The map must not be locked, but a reference must remain to the map
2716  * throughout the call.
2717  *
2718  * This function may be called via the zalloc path and must properly
2719  * reserve map entries for kernel_map.
2720  *
2721  * No requirements.
2722  */
2723 int
2724 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2725 {
2726 	vm_map_entry_t entry;
2727 	vm_map_entry_t start_entry;
2728 	vm_offset_t end;
2729 	int rv = KERN_SUCCESS;
2730 	int count;
2731 
2732 	if (kmflags & KM_KRESERVE)
2733 		count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2734 	else
2735 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2736 	vm_map_lock(map);
2737 	VM_MAP_RANGE_CHECK(map, start, real_end);
2738 	end = real_end;
2739 
2740 	start_entry = vm_map_clip_range(map, start, end, &count,
2741 					MAP_CLIP_NO_HOLES);
2742 	if (start_entry == NULL) {
2743 		vm_map_unlock(map);
2744 		rv = KERN_INVALID_ADDRESS;
2745 		goto failure;
2746 	}
2747 	if ((kmflags & KM_PAGEABLE) == 0) {
2748 		/*
2749 		 * Wiring.
2750 		 *
2751 		 * 1.  Holding the write lock, we create any shadow or zero-fill
2752 		 * objects that need to be created. Then we clip each map
2753 		 * entry to the region to be wired and increment its wiring
2754 		 * count.  We create objects before clipping the map entries
2755 		 * to avoid object proliferation.
2756 		 *
2757 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
2758 		 * fault in the pages for any newly wired area (wired_count is
2759 		 * 1).
2760 		 *
2761 		 * Downgrading to a read lock for vm_fault_wire avoids a
2762 		 * possible deadlock with another process that may have faulted
2763 		 * on one of the pages to be wired (it would mark the page busy,
2764 		 * blocking us, then in turn block on the map lock that we
2765 		 * hold).  Because of problems in the recursive lock package,
2766 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2767 		 * any actions that require the write lock must be done
2768 		 * beforehand.  Because we keep the read lock on the map, the
2769 		 * copy-on-write status of the entries we modify here cannot
2770 		 * change.
2771 		 */
2772 		entry = start_entry;
2773 		while (entry && entry->ba.start < end) {
2774 			/*
2775 			 * Trivial case if the entry is already wired
2776 			 */
2777 			if (entry->wired_count) {
2778 				entry->wired_count++;
2779 				entry = vm_map_rb_tree_RB_NEXT(entry);
2780 				continue;
2781 			}
2782 
2783 			/*
2784 			 * The entry is being newly wired, we have to setup
2785 			 * appropriate management structures.  A shadow
2786 			 * object is required for a copy-on-write region,
2787 			 * or a normal object for a zero-fill region.  We
2788 			 * do not have to do this for entries that point to sub
2789 			 * maps because we won't hold the lock on the sub map.
2790 			 */
2791 			if (entry->maptype == VM_MAPTYPE_NORMAL ||
2792 			    entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2793 				int copyflag = entry->eflags &
2794 					       MAP_ENTRY_NEEDS_COPY;
2795 				if (copyflag && ((entry->protection &
2796 						  VM_PROT_WRITE) != 0)) {
2797 					vm_map_entry_shadow(entry);
2798 				} else if (entry->ba.object == NULL &&
2799 					   !map->system_map) {
2800 					vm_map_entry_allocate_object(entry);
2801 				}
2802 			}
2803 			entry->wired_count++;
2804 			entry = vm_map_rb_tree_RB_NEXT(entry);
2805 		}
2806 
2807 		/*
2808 		 * Pass 2.
2809 		 */
2810 
2811 		/*
2812 		 * HACK HACK HACK HACK
2813 		 *
2814 		 * vm_fault_wire() temporarily unlocks the map to avoid
2815 		 * deadlocks.  The in-transition flag from vm_map_clip_range
2816 		 * call should protect us from changes while the map is
2817 		 * unlocked.  T
2818 		 *
2819 		 * NOTE: Previously this comment stated that clipping might
2820 		 *	 still occur while the entry is unlocked, but from
2821 		 *	 what I can tell it actually cannot.
2822 		 *
2823 		 *	 It is unclear whether the CLIP_CHECK_*() calls
2824 		 *	 are still needed but we keep them in anyway.
2825 		 *
2826 		 * HACK HACK HACK HACK
2827 		 */
2828 
2829 		entry = start_entry;
2830 		while (entry && entry->ba.start < end) {
2831 			/*
2832 			 * If vm_fault_wire fails for any page we need to undo
2833 			 * what has been done.  We decrement the wiring count
2834 			 * for those pages which have not yet been wired (now)
2835 			 * and unwire those that have (later).
2836 			 */
2837 			vm_offset_t save_start = entry->ba.start;
2838 			vm_offset_t save_end = entry->ba.end;
2839 
2840 			if (entry->wired_count == 1)
2841 				rv = vm_fault_wire(map, entry, FALSE, kmflags);
2842 			if (rv) {
2843 				CLIP_CHECK_BACK(entry, save_start);
2844 				for (;;) {
2845 					KASSERT(entry->wired_count == 1,
2846 					  ("wired_count changed unexpectedly"));
2847 					entry->wired_count = 0;
2848 					if (entry->ba.end == save_end)
2849 						break;
2850 					entry = vm_map_rb_tree_RB_NEXT(entry);
2851 					KASSERT(entry,
2852 					  ("bad entry clip during backout"));
2853 				}
2854 				end = save_start;
2855 				break;
2856 			}
2857 			CLIP_CHECK_FWD(entry, save_end);
2858 			entry = vm_map_rb_tree_RB_NEXT(entry);
2859 		}
2860 
2861 		/*
2862 		 * If a failure occured undo everything by falling through
2863 		 * to the unwiring code.  'end' has already been adjusted
2864 		 * appropriately.
2865 		 */
2866 		if (rv)
2867 			kmflags |= KM_PAGEABLE;
2868 
2869 		/*
2870 		 * start_entry is still IN_TRANSITION but may have been
2871 		 * clipped since vm_fault_wire() unlocks and relocks the
2872 		 * map.  No matter how clipped it has gotten there should
2873 		 * be a fragment that is on our start boundary.
2874 		 */
2875 		CLIP_CHECK_BACK(start_entry, start);
2876 	}
2877 
2878 	if (kmflags & KM_PAGEABLE) {
2879 		/*
2880 		 * This is the unwiring case.  We must first ensure that the
2881 		 * range to be unwired is really wired down.  We know there
2882 		 * are no holes.
2883 		 */
2884 		entry = start_entry;
2885 		while (entry && entry->ba.start < end) {
2886 			if (entry->wired_count == 0) {
2887 				rv = KERN_INVALID_ARGUMENT;
2888 				goto done;
2889 			}
2890 			entry = vm_map_rb_tree_RB_NEXT(entry);
2891 		}
2892 
2893 		/*
2894 		 * Now decrement the wiring count for each region. If a region
2895 		 * becomes completely unwired, unwire its physical pages and
2896 		 * mappings.
2897 		 */
2898 		entry = start_entry;
2899 		while (entry && entry->ba.start < end) {
2900 			entry->wired_count--;
2901 			if (entry->wired_count == 0)
2902 				vm_fault_unwire(map, entry);
2903 			entry = vm_map_rb_tree_RB_NEXT(entry);
2904 		}
2905 	}
2906 done:
2907 	vm_map_unclip_range(map, start_entry, start, real_end,
2908 			    &count, MAP_CLIP_NO_HOLES);
2909 	vm_map_unlock(map);
2910 failure:
2911 	if (kmflags & KM_KRESERVE)
2912 		vm_map_entry_krelease(count);
2913 	else
2914 		vm_map_entry_release(count);
2915 	return (rv);
2916 }
2917 
2918 /*
2919  * Mark a newly allocated address range as wired but do not fault in
2920  * the pages.  The caller is expected to load the pages into the object.
2921  *
2922  * The map must be locked on entry and will remain locked on return.
2923  * No other requirements.
2924  */
2925 void
2926 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2927 		       int *countp)
2928 {
2929 	vm_map_entry_t scan;
2930 	vm_map_entry_t entry;
2931 
2932 	entry = vm_map_clip_range(map, addr, addr + size,
2933 				  countp, MAP_CLIP_NO_HOLES);
2934 	scan = entry;
2935 	while (scan && scan->ba.start < addr + size) {
2936 		KKASSERT(scan->wired_count == 0);
2937 		scan->wired_count = 1;
2938 		scan = vm_map_rb_tree_RB_NEXT(scan);
2939 	}
2940 	vm_map_unclip_range(map, entry, addr, addr + size,
2941 			    countp, MAP_CLIP_NO_HOLES);
2942 }
2943 
2944 /*
2945  * Push any dirty cached pages in the address range to their pager.
2946  * If syncio is TRUE, dirty pages are written synchronously.
2947  * If invalidate is TRUE, any cached pages are freed as well.
2948  *
2949  * This routine is called by sys_msync()
2950  *
2951  * Returns an error if any part of the specified range is not mapped.
2952  *
2953  * No requirements.
2954  */
2955 int
2956 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2957 	     boolean_t syncio, boolean_t invalidate)
2958 {
2959 	vm_map_entry_t current;
2960 	vm_map_entry_t next;
2961 	vm_map_entry_t entry;
2962 	vm_map_backing_t ba;
2963 	vm_size_t size;
2964 	vm_object_t object;
2965 	vm_ooffset_t offset;
2966 
2967 	vm_map_lock_read(map);
2968 	VM_MAP_RANGE_CHECK(map, start, end);
2969 	if (!vm_map_lookup_entry(map, start, &entry)) {
2970 		vm_map_unlock_read(map);
2971 		return (KERN_INVALID_ADDRESS);
2972 	}
2973 	lwkt_gettoken(&map->token);
2974 
2975 	/*
2976 	 * Make a first pass to check for holes.
2977 	 */
2978 	current = entry;
2979 	while (current && current->ba.start < end) {
2980 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2981 			lwkt_reltoken(&map->token);
2982 			vm_map_unlock_read(map);
2983 			return (KERN_INVALID_ARGUMENT);
2984 		}
2985 		next = vm_map_rb_tree_RB_NEXT(current);
2986 		if (end > current->ba.end &&
2987 		    (next == NULL ||
2988 		     current->ba.end != next->ba.start)) {
2989 			lwkt_reltoken(&map->token);
2990 			vm_map_unlock_read(map);
2991 			return (KERN_INVALID_ADDRESS);
2992 		}
2993 		current = next;
2994 	}
2995 
2996 	if (invalidate)
2997 		pmap_remove(vm_map_pmap(map), start, end);
2998 
2999 	/*
3000 	 * Make a second pass, cleaning/uncaching pages from the indicated
3001 	 * objects as we go.
3002 	 */
3003 	current = entry;
3004 	while (current && current->ba.start < end) {
3005 		offset = current->ba.offset + (start - current->ba.start);
3006 		size = (end <= current->ba.end ? end : current->ba.end) - start;
3007 
3008 		switch(current->maptype) {
3009 		case VM_MAPTYPE_SUBMAP:
3010 		{
3011 			vm_map_t smap;
3012 			vm_map_entry_t tentry;
3013 			vm_size_t tsize;
3014 
3015 			smap = current->ba.sub_map;
3016 			vm_map_lock_read(smap);
3017 			vm_map_lookup_entry(smap, offset, &tentry);
3018 			if (tentry == NULL) {
3019 				tsize = vm_map_max(smap) - offset;
3020 				ba = NULL;
3021 				offset = 0 + (offset - vm_map_min(smap));
3022 			} else {
3023 				tsize = tentry->ba.end - offset;
3024 				ba = &tentry->ba;
3025 				offset = tentry->ba.offset +
3026 					 (offset - tentry->ba.start);
3027 			}
3028 			vm_map_unlock_read(smap);
3029 			if (tsize < size)
3030 				size = tsize;
3031 			break;
3032 		}
3033 		case VM_MAPTYPE_NORMAL:
3034 		case VM_MAPTYPE_VPAGETABLE:
3035 			ba = &current->ba;
3036 			break;
3037 		default:
3038 			ba = NULL;
3039 			break;
3040 		}
3041 		if (ba) {
3042 			object = ba->object;
3043 			if (object)
3044 				vm_object_hold(object);
3045 		} else {
3046 			object = NULL;
3047 		}
3048 
3049 		/*
3050 		 * Note that there is absolutely no sense in writing out
3051 		 * anonymous objects, so we track down the vnode object
3052 		 * to write out.
3053 		 * We invalidate (remove) all pages from the address space
3054 		 * anyway, for semantic correctness.
3055 		 *
3056 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
3057 		 * may start out with a NULL object.
3058 		 *
3059 		 * XXX do we really want to stop at the first backing store
3060 		 * here if there are more? XXX
3061 		 */
3062 		if (ba) {
3063 			vm_object_t tobj;
3064 
3065 			tobj = object;
3066 			while (ba->backing_ba != NULL) {
3067 				offset -= ba->offset;
3068 				ba = ba->backing_ba;
3069 				offset += ba->offset;
3070 				tobj = ba->object;
3071 				if (tobj->size < OFF_TO_IDX(offset + size))
3072 					size = IDX_TO_OFF(tobj->size) - offset;
3073 				break; /* XXX this break is not correct */
3074 			}
3075 			if (object != tobj) {
3076 				if (object)
3077 					vm_object_drop(object);
3078 				object = tobj;
3079 				vm_object_hold(object);
3080 			}
3081 		}
3082 
3083 		if (object && (object->type == OBJT_VNODE) &&
3084 		    (current->protection & VM_PROT_WRITE) &&
3085 		    (object->flags & OBJ_NOMSYNC) == 0) {
3086 			/*
3087 			 * Flush pages if writing is allowed, invalidate them
3088 			 * if invalidation requested.  Pages undergoing I/O
3089 			 * will be ignored by vm_object_page_remove().
3090 			 *
3091 			 * We cannot lock the vnode and then wait for paging
3092 			 * to complete without deadlocking against vm_fault.
3093 			 * Instead we simply call vm_object_page_remove() and
3094 			 * allow it to block internally on a page-by-page
3095 			 * basis when it encounters pages undergoing async
3096 			 * I/O.
3097 			 */
3098 			int flags;
3099 
3100 			/* no chain wait needed for vnode objects */
3101 			vm_object_reference_locked(object);
3102 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
3103 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
3104 			flags |= invalidate ? OBJPC_INVAL : 0;
3105 
3106 			/*
3107 			 * When operating on a virtual page table just
3108 			 * flush the whole object.  XXX we probably ought
3109 			 * to
3110 			 */
3111 			switch(current->maptype) {
3112 			case VM_MAPTYPE_NORMAL:
3113 				vm_object_page_clean(object,
3114 				    OFF_TO_IDX(offset),
3115 				    OFF_TO_IDX(offset + size + PAGE_MASK),
3116 				    flags);
3117 				break;
3118 			case VM_MAPTYPE_VPAGETABLE:
3119 				vm_object_page_clean(object, 0, 0, flags);
3120 				break;
3121 			}
3122 			vn_unlock(((struct vnode *)object->handle));
3123 			vm_object_deallocate_locked(object);
3124 		}
3125 		if (object && invalidate &&
3126 		   ((object->type == OBJT_VNODE) ||
3127 		    (object->type == OBJT_DEVICE) ||
3128 		    (object->type == OBJT_MGTDEVICE))) {
3129 			int clean_only =
3130 				((object->type == OBJT_DEVICE) ||
3131 				(object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE;
3132 			/* no chain wait needed for vnode/device objects */
3133 			vm_object_reference_locked(object);
3134 			switch(current->maptype) {
3135 			case VM_MAPTYPE_NORMAL:
3136 				vm_object_page_remove(object,
3137 				    OFF_TO_IDX(offset),
3138 				    OFF_TO_IDX(offset + size + PAGE_MASK),
3139 				    clean_only);
3140 				break;
3141 			case VM_MAPTYPE_VPAGETABLE:
3142 				vm_object_page_remove(object, 0, 0, clean_only);
3143 				break;
3144 			}
3145 			vm_object_deallocate_locked(object);
3146 		}
3147 		start += size;
3148 		if (object)
3149 			vm_object_drop(object);
3150 		current = vm_map_rb_tree_RB_NEXT(current);
3151 	}
3152 
3153 	lwkt_reltoken(&map->token);
3154 	vm_map_unlock_read(map);
3155 
3156 	return (KERN_SUCCESS);
3157 }
3158 
3159 /*
3160  * Make the region specified by this entry pageable.
3161  *
3162  * The vm_map must be exclusively locked.
3163  */
3164 static void
3165 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3166 {
3167 	entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3168 	entry->wired_count = 0;
3169 	vm_fault_unwire(map, entry);
3170 }
3171 
3172 /*
3173  * Deallocate the given entry from the target map.
3174  *
3175  * The vm_map must be exclusively locked.
3176  */
3177 static void
3178 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
3179 {
3180 	vm_map_entry_unlink(map, entry);
3181 	map->size -= entry->ba.end - entry->ba.start;
3182 	vm_map_entry_dispose(map, entry, countp);
3183 }
3184 
3185 /*
3186  * Deallocates the given address range from the target map.
3187  *
3188  * The vm_map must be exclusively locked.
3189  */
3190 int
3191 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
3192 {
3193 	vm_object_t object;
3194 	vm_map_entry_t entry;
3195 	vm_map_entry_t first_entry;
3196 	vm_offset_t hole_start;
3197 
3198 	ASSERT_VM_MAP_LOCKED(map);
3199 	lwkt_gettoken(&map->token);
3200 again:
3201 	/*
3202 	 * Find the start of the region, and clip it.  Set entry to point
3203 	 * at the first record containing the requested address or, if no
3204 	 * such record exists, the next record with a greater address.  The
3205 	 * loop will run from this point until a record beyond the termination
3206 	 * address is encountered.
3207 	 *
3208 	 * Adjust freehint[] for either the clip case or the extension case.
3209 	 *
3210 	 * GGG see other GGG comment.
3211 	 */
3212 	if (vm_map_lookup_entry(map, start, &first_entry)) {
3213 		entry = first_entry;
3214 		vm_map_clip_start(map, entry, start, countp);
3215 		hole_start = start;
3216 	} else {
3217 		if (first_entry) {
3218 			entry = vm_map_rb_tree_RB_NEXT(first_entry);
3219 			if (entry == NULL)
3220 				hole_start = first_entry->ba.start;
3221 			else
3222 				hole_start = first_entry->ba.end;
3223 		} else {
3224 			entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
3225 			if (entry == NULL)
3226 				hole_start = vm_map_min(map);
3227 			else
3228 				hole_start = vm_map_max(map);
3229 		}
3230 	}
3231 
3232 	/*
3233 	 * Step through all entries in this region
3234 	 */
3235 	while (entry && entry->ba.start < end) {
3236 		vm_map_entry_t next;
3237 		vm_offset_t s, e;
3238 		vm_pindex_t offidxstart, offidxend, count;
3239 
3240 		/*
3241 		 * If we hit an in-transition entry we have to sleep and
3242 		 * retry.  It's easier (and not really slower) to just retry
3243 		 * since this case occurs so rarely and the hint is already
3244 		 * pointing at the right place.  We have to reset the
3245 		 * start offset so as not to accidently delete an entry
3246 		 * another process just created in vacated space.
3247 		 */
3248 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3249 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3250 			start = entry->ba.start;
3251 			++mycpu->gd_cnt.v_intrans_coll;
3252 			++mycpu->gd_cnt.v_intrans_wait;
3253 			vm_map_transition_wait(map, 1);
3254 			goto again;
3255 		}
3256 		vm_map_clip_end(map, entry, end, countp);
3257 
3258 		s = entry->ba.start;
3259 		e = entry->ba.end;
3260 		next = vm_map_rb_tree_RB_NEXT(entry);
3261 
3262 		offidxstart = OFF_TO_IDX(entry->ba.offset);
3263 		count = OFF_TO_IDX(e - s);
3264 
3265 		switch(entry->maptype) {
3266 		case VM_MAPTYPE_NORMAL:
3267 		case VM_MAPTYPE_VPAGETABLE:
3268 		case VM_MAPTYPE_SUBMAP:
3269 			object = entry->ba.object;
3270 			break;
3271 		default:
3272 			object = NULL;
3273 			break;
3274 		}
3275 
3276 		/*
3277 		 * Unwire before removing addresses from the pmap; otherwise,
3278 		 * unwiring will put the entries back in the pmap.
3279 		 *
3280 		 * Generally speaking, doing a bulk pmap_remove() before
3281 		 * removing the pages from the VM object is better at
3282 		 * reducing unnecessary IPIs.  The pmap code is now optimized
3283 		 * to not blindly iterate the range when pt and pd pages
3284 		 * are missing.
3285 		 */
3286 		if (entry->wired_count != 0)
3287 			vm_map_entry_unwire(map, entry);
3288 
3289 		offidxend = offidxstart + count;
3290 
3291 		if (object == &kernel_object) {
3292 			pmap_remove(map->pmap, s, e);
3293 			vm_object_hold(object);
3294 			vm_object_page_remove(object, offidxstart,
3295 					      offidxend, FALSE);
3296 			vm_object_drop(object);
3297 		} else if (object && object->type != OBJT_DEFAULT &&
3298 			   object->type != OBJT_SWAP) {
3299 			/*
3300 			 * vnode object routines cannot be chain-locked,
3301 			 * but since we aren't removing pages from the
3302 			 * object here we can use a shared hold.
3303 			 */
3304 			vm_object_hold_shared(object);
3305 			pmap_remove(map->pmap, s, e);
3306 			vm_object_drop(object);
3307 		} else if (object) {
3308 			vm_object_hold(object);
3309 			pmap_remove(map->pmap, s, e);
3310 
3311 			if (object != NULL &&
3312 			    object->ref_count != 1 &&
3313 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
3314 			     OBJ_ONEMAPPING &&
3315 			    (object->type == OBJT_DEFAULT ||
3316 			     object->type == OBJT_SWAP)) {
3317 				/*
3318 				 * When ONEMAPPING is set we can destroy the
3319 				 * pages underlying the entry's range.
3320 				 */
3321 				vm_object_page_remove(object, offidxstart,
3322 						      offidxend, FALSE);
3323 				if (object->type == OBJT_SWAP) {
3324 					swap_pager_freespace(object,
3325 							     offidxstart,
3326 							     count);
3327 				}
3328 				if (offidxend >= object->size &&
3329 				    offidxstart < object->size) {
3330 					object->size = offidxstart;
3331 				}
3332 			}
3333 			vm_object_drop(object);
3334 		} else if (entry->maptype == VM_MAPTYPE_UKSMAP) {
3335 			pmap_remove(map->pmap, s, e);
3336 		}
3337 
3338 		/*
3339 		 * Delete the entry (which may delete the object) only after
3340 		 * removing all pmap entries pointing to its pages.
3341 		 * (Otherwise, its page frames may be reallocated, and any
3342 		 * modify bits will be set in the wrong object!)
3343 		 */
3344 		vm_map_entry_delete(map, entry, countp);
3345 		entry = next;
3346 	}
3347 
3348 	/*
3349 	 * We either reached the end and use vm_map_max as the end
3350 	 * address, or we didn't and we use the next entry as the
3351 	 * end address.
3352 	 */
3353 	if (entry == NULL) {
3354 		vm_map_freehint_hole(map, hole_start,
3355 				     vm_map_max(map) - hole_start);
3356 	} else {
3357 		vm_map_freehint_hole(map, hole_start,
3358 				     entry->ba.start - hole_start);
3359 	}
3360 
3361 	lwkt_reltoken(&map->token);
3362 
3363 	return (KERN_SUCCESS);
3364 }
3365 
3366 /*
3367  * Remove the given address range from the target map.
3368  * This is the exported form of vm_map_delete.
3369  *
3370  * No requirements.
3371  */
3372 int
3373 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3374 {
3375 	int result;
3376 	int count;
3377 
3378 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3379 	vm_map_lock(map);
3380 	VM_MAP_RANGE_CHECK(map, start, end);
3381 	result = vm_map_delete(map, start, end, &count);
3382 	vm_map_unlock(map);
3383 	vm_map_entry_release(count);
3384 
3385 	return (result);
3386 }
3387 
3388 /*
3389  * Assert that the target map allows the specified privilege on the
3390  * entire address region given.  The entire region must be allocated.
3391  *
3392  * The caller must specify whether the vm_map is already locked or not.
3393  */
3394 boolean_t
3395 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3396 			vm_prot_t protection, boolean_t have_lock)
3397 {
3398 	vm_map_entry_t entry;
3399 	vm_map_entry_t tmp_entry;
3400 	boolean_t result;
3401 
3402 	if (have_lock == FALSE)
3403 		vm_map_lock_read(map);
3404 
3405 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
3406 		if (have_lock == FALSE)
3407 			vm_map_unlock_read(map);
3408 		return (FALSE);
3409 	}
3410 	entry = tmp_entry;
3411 
3412 	result = TRUE;
3413 	while (start < end) {
3414 		if (entry == NULL) {
3415 			result = FALSE;
3416 			break;
3417 		}
3418 
3419 		/*
3420 		 * No holes allowed!
3421 		 */
3422 
3423 		if (start < entry->ba.start) {
3424 			result = FALSE;
3425 			break;
3426 		}
3427 		/*
3428 		 * Check protection associated with entry.
3429 		 */
3430 
3431 		if ((entry->protection & protection) != protection) {
3432 			result = FALSE;
3433 			break;
3434 		}
3435 		/* go to next entry */
3436 		start = entry->ba.end;
3437 		entry = vm_map_rb_tree_RB_NEXT(entry);
3438 	}
3439 	if (have_lock == FALSE)
3440 		vm_map_unlock_read(map);
3441 	return (result);
3442 }
3443 
3444 /*
3445  * vm_map_backing structures are not shared across forks and must be
3446  * replicated.
3447  *
3448  * Generally speaking we must reallocate the backing_ba sequence and
3449  * also adjust it for any changes made to the base entry->ba.start and
3450  * entry->ba.end.  The first ba in the chain is of course &entry->ba,
3451  * so we only need to adjust subsequent ba's start, end, and offset.
3452  *
3453  * MAP_BACK_CLIPPED	- Called as part of a clipping replication.
3454  *			  Do not clear OBJ_ONEMAPPING.
3455  *
3456  * MAP_BACK_BASEOBJREFD - Called from vm_map_insert().  The base object
3457  *			  has already been referenced.
3458  */
3459 static
3460 void
3461 vm_map_backing_replicated(vm_map_t map, vm_map_entry_t entry, int flags)
3462 {
3463 	vm_map_backing_t ba;
3464 	vm_map_backing_t nba;
3465 	vm_object_t object;
3466 
3467 	ba = &entry->ba;
3468 	for (;;) {
3469 		ba->pmap = map->pmap;
3470 
3471 		if (ba->map_object) {
3472 			switch(entry->maptype) {
3473 			case VM_MAPTYPE_VPAGETABLE:
3474 			case VM_MAPTYPE_NORMAL:
3475 				object = ba->object;
3476 				if (ba != &entry->ba ||
3477 				    (flags & MAP_BACK_BASEOBJREFD) == 0) {
3478 					vm_object_reference_quick(object);
3479 				}
3480 				vm_map_backing_attach(entry, ba);
3481 				if ((flags & MAP_BACK_CLIPPED) == 0 &&
3482 				    object->ref_count > 1) {
3483 					vm_object_clear_flag(object,
3484 							     OBJ_ONEMAPPING);
3485 				}
3486 				break;
3487 			case VM_MAPTYPE_UKSMAP:
3488 				vm_map_backing_attach(entry, ba);
3489 				break;
3490 			default:
3491 				break;
3492 			}
3493 		}
3494 		if (ba->backing_ba == NULL)
3495 			break;
3496 
3497 		/*
3498 		 * NOTE: The aux_info field is retained.
3499 		 */
3500 		nba = kmalloc(sizeof(*nba), M_MAP_BACKING, M_INTWAIT);
3501 		*nba = *ba->backing_ba;
3502 		nba->offset += (ba->start - nba->start);  /* += (new - old) */
3503 		nba->start = ba->start;
3504 		nba->end = ba->end;
3505 		ba->backing_ba = nba;
3506 		ba = nba;
3507 		/* pmap is replaced at the top of the loop */
3508 	}
3509 }
3510 
3511 static
3512 void
3513 vm_map_backing_adjust_start(vm_map_entry_t entry, vm_ooffset_t start)
3514 {
3515 	vm_map_backing_t ba;
3516 
3517 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE ||
3518 	    entry->maptype == VM_MAPTYPE_NORMAL) {
3519 		for (ba = &entry->ba; ba; ba = ba->backing_ba) {
3520 			if (ba->object) {
3521 				lockmgr(&ba->object->backing_lk, LK_EXCLUSIVE);
3522 				ba->offset += (start - ba->start);
3523 				ba->start = start;
3524 				lockmgr(&ba->object->backing_lk, LK_RELEASE);
3525 			} else {
3526 				ba->offset += (start - ba->start);
3527 				ba->start = start;
3528 			}
3529 		}
3530 	} else {
3531 		/* not an object and can't be shadowed */
3532 	}
3533 }
3534 
3535 static
3536 void
3537 vm_map_backing_adjust_end(vm_map_entry_t entry, vm_ooffset_t end)
3538 {
3539 	vm_map_backing_t ba;
3540 
3541 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE ||
3542 	    entry->maptype == VM_MAPTYPE_NORMAL) {
3543 		for (ba = &entry->ba; ba; ba = ba->backing_ba) {
3544 			if (ba->object) {
3545 				lockmgr(&ba->object->backing_lk, LK_EXCLUSIVE);
3546 				ba->end = end;
3547 				lockmgr(&ba->object->backing_lk, LK_RELEASE);
3548 			} else {
3549 				ba->end = end;
3550 			}
3551 		}
3552 	} else {
3553 		/* not an object and can't be shadowed */
3554 	}
3555 }
3556 
3557 /*
3558  * Handles the dirty work of making src_entry and dst_entry copy-on-write
3559  * after src_entry has been cloned to dst_entry.  For normal entries only.
3560  *
3561  * The vm_maps must be exclusively locked.
3562  * The vm_map's token must be held.
3563  *
3564  * Because the maps are locked no faults can be in progress during the
3565  * operation.
3566  */
3567 static void
3568 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
3569 		  vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
3570 {
3571 	vm_object_t obj;
3572 
3573 	KKASSERT(dst_entry->maptype == VM_MAPTYPE_NORMAL ||
3574 		 dst_entry->maptype == VM_MAPTYPE_VPAGETABLE);
3575 
3576 	if (src_entry->wired_count &&
3577 	    src_entry->maptype != VM_MAPTYPE_VPAGETABLE) {
3578 		/*
3579 		 * Of course, wired down pages can't be set copy-on-write.
3580 		 * Cause wired pages to be copied into the new map by
3581 		 * simulating faults (the new pages are pageable)
3582 		 *
3583 		 * Scrap ba.object (its ref-count has not yet been adjusted
3584 		 * so we can just NULL out the field).  Remove the backing
3585 		 * store.
3586 		 *
3587 		 * Then call vm_fault_copy_entry() to create a new object
3588 		 * in dst_entry and copy the wired pages from src to dst.
3589 		 *
3590 		 * The fault-copy code doesn't work with virtual page
3591 		 * tables.
3592 		 *
3593 		 * NOTE: obj is not actually an object for all MAPTYPEs,
3594 		 *	 just test against NULL.
3595 		 */
3596 		if (dst_entry->ba.map_object != NULL) {
3597 			vm_map_backing_detach(dst_entry, &dst_entry->ba);
3598 			dst_entry->ba.map_object = NULL;
3599 			vm_map_entry_dispose_ba(dst_entry,
3600 						dst_entry->ba.backing_ba);
3601 			dst_entry->ba.backing_ba = NULL;
3602 			dst_entry->ba.backing_count = 0;
3603 		}
3604 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3605 	} else {
3606 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
3607 			/*
3608 			 * If the source entry is not already marked NEEDS_COPY
3609 			 * we need to write-protect the PTEs.
3610 			 */
3611 			pmap_protect(src_map->pmap,
3612 				     src_entry->ba.start,
3613 				     src_entry->ba.end,
3614 				     src_entry->protection & ~VM_PROT_WRITE);
3615 		}
3616 
3617 		/*
3618 		 * dst_entry.ba_object might be stale.  Update it (its
3619 		 * ref-count has not yet been updated so just overwrite
3620 		 * the field).
3621 		 *
3622 		 * If there is no object then we are golden.  Also, in
3623 		 * this situation if there are no backing_ba linkages then
3624 		 * we can set ba.offset to whatever we want.  For now we
3625 		 * set the offset for 0 for make debugging object sizes
3626 		 * easier.
3627 		 */
3628 		obj = src_entry->ba.object;
3629 
3630 		if (obj) {
3631 			src_entry->eflags |= (MAP_ENTRY_COW |
3632 					      MAP_ENTRY_NEEDS_COPY);
3633 			dst_entry->eflags |= (MAP_ENTRY_COW |
3634 					      MAP_ENTRY_NEEDS_COPY);
3635 			KKASSERT(dst_entry->ba.offset == src_entry->ba.offset);
3636 		} else {
3637 			dst_entry->ba.offset = 0;
3638 		}
3639 
3640 		/*
3641 		 * Normal, allow the backing_ba link depth to
3642 		 * increase.
3643 		 */
3644 		pmap_copy(dst_map->pmap, src_map->pmap,
3645 			  dst_entry->ba.start,
3646 			  dst_entry->ba.end - dst_entry->ba.start,
3647 			  src_entry->ba.start);
3648 	}
3649 }
3650 
3651 /*
3652  * Create a vmspace for a new process and its related vm_map based on an
3653  * existing vmspace.  The new map inherits information from the old map
3654  * according to inheritance settings.
3655  *
3656  * The source map must not be locked.
3657  * No requirements.
3658  */
3659 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3660 			  vm_map_entry_t old_entry, int *countp);
3661 static void vmspace_fork_uksmap_entry(struct proc *p2, struct lwp *lp2,
3662 			  vm_map_t old_map, vm_map_t new_map,
3663 			  vm_map_entry_t old_entry, int *countp);
3664 
3665 struct vmspace *
3666 vmspace_fork(struct vmspace *vm1, struct proc *p2, struct lwp *lp2)
3667 {
3668 	struct vmspace *vm2;
3669 	vm_map_t old_map = &vm1->vm_map;
3670 	vm_map_t new_map;
3671 	vm_map_entry_t old_entry;
3672 	int count;
3673 
3674 	lwkt_gettoken(&vm1->vm_map.token);
3675 	vm_map_lock(old_map);
3676 
3677 	vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
3678 	lwkt_gettoken(&vm2->vm_map.token);
3679 
3680 	/*
3681 	 * We must bump the timestamp to force any concurrent fault
3682 	 * to retry.
3683 	 */
3684 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3685 	      (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3686 	new_map = &vm2->vm_map;	/* XXX */
3687 	new_map->timestamp = 1;
3688 
3689 	vm_map_lock(new_map);
3690 
3691 	count = old_map->nentries;
3692 	count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3693 
3694 	RB_FOREACH(old_entry, vm_map_rb_tree, &old_map->rb_root) {
3695 		switch(old_entry->maptype) {
3696 		case VM_MAPTYPE_SUBMAP:
3697 			panic("vm_map_fork: encountered a submap");
3698 			break;
3699 		case VM_MAPTYPE_UKSMAP:
3700 			vmspace_fork_uksmap_entry(p2, lp2,
3701 						  old_map, new_map,
3702 						  old_entry, &count);
3703 			break;
3704 		case VM_MAPTYPE_NORMAL:
3705 		case VM_MAPTYPE_VPAGETABLE:
3706 			vmspace_fork_normal_entry(old_map, new_map,
3707 						  old_entry, &count);
3708 			break;
3709 		}
3710 	}
3711 
3712 	new_map->size = old_map->size;
3713 	vm_map_unlock(new_map);
3714 	vm_map_unlock(old_map);
3715 	vm_map_entry_release(count);
3716 
3717 	lwkt_reltoken(&vm2->vm_map.token);
3718 	lwkt_reltoken(&vm1->vm_map.token);
3719 
3720 	return (vm2);
3721 }
3722 
3723 static
3724 void
3725 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3726 			  vm_map_entry_t old_entry, int *countp)
3727 {
3728 	vm_map_entry_t new_entry;
3729 	vm_map_backing_t ba;
3730 	vm_object_t object;
3731 
3732 	/*
3733 	 * If the backing_ba link list gets too long then fault it
3734 	 * all into the head object and dispose of the list.  We do
3735 	 * this in old_entry prior to cloning in order to benefit both
3736 	 * parent and child.
3737 	 *
3738 	 * We can test our fronting object's size against its
3739 	 * resident_page_count for a really cheap (but probably not perfect)
3740 	 * all-shadowed test, allowing us to disconnect the backing_ba
3741 	 * link list early.
3742 	 *
3743 	 * XXX Currently doesn't work for VPAGETABLEs (the entire object
3744 	 *     would have to be copied).
3745 	 */
3746 	object = old_entry->ba.object;
3747 	if (old_entry->ba.backing_ba &&
3748 	    old_entry->maptype != VM_MAPTYPE_VPAGETABLE &&
3749 	    (old_entry->ba.backing_count >= vm_map_backing_limit ||
3750 	     (vm_map_backing_shadow_test && object &&
3751 	      object->size == object->resident_page_count))) {
3752 		/*
3753 		 * If there are too many backing_ba linkages we
3754 		 * collapse everything into the head
3755 		 *
3756 		 * This will also remove all the pte's.
3757 		 */
3758 		if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY)
3759 			vm_map_entry_shadow(old_entry);
3760 		if (object == NULL)
3761 			vm_map_entry_allocate_object(old_entry);
3762 		if (vm_fault_collapse(old_map, old_entry) == KERN_SUCCESS) {
3763 			ba = old_entry->ba.backing_ba;
3764 			old_entry->ba.backing_ba = NULL;
3765 			old_entry->ba.backing_count = 0;
3766 			vm_map_entry_dispose_ba(old_entry, ba);
3767 		}
3768 	}
3769 	object = NULL;	/* object variable is now invalid */
3770 
3771 	/*
3772 	 * Fork the entry
3773 	 */
3774 	switch (old_entry->inheritance) {
3775 	case VM_INHERIT_NONE:
3776 		break;
3777 	case VM_INHERIT_SHARE:
3778 		/*
3779 		 * Clone the entry as a shared entry.  This will look like
3780 		 * shared memory across the old and the new process.  We must
3781 		 * ensure that the object is allocated.
3782 		 */
3783 		if (old_entry->ba.object == NULL)
3784 			vm_map_entry_allocate_object(old_entry);
3785 
3786 		if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3787 			/*
3788 			 * Create the fronting vm_map_backing for
3789 			 * an entry which needs a copy, plus an extra
3790 			 * ref because we are going to duplicate it
3791 			 * in the fork.
3792 			 *
3793 			 * The call to vm_map_entry_shadow() will also clear
3794 			 * OBJ_ONEMAPPING.
3795 			 *
3796 			 * XXX no more collapse.  Still need extra ref
3797 			 * for the fork.
3798 			 */
3799 			vm_map_entry_shadow(old_entry);
3800 		} else if (old_entry->ba.object) {
3801 			object = old_entry->ba.object;
3802 		}
3803 
3804 		/*
3805 		 * Clone the entry.  We've already bumped the ref on
3806 		 * the vm_object for our new entry.
3807 		 */
3808 		new_entry = vm_map_entry_create(countp);
3809 		*new_entry = *old_entry;
3810 
3811 		new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3812 		new_entry->wired_count = 0;
3813 
3814 		/*
3815 		 * Replicate and index the vm_map_backing.  Don't share
3816 		 * the vm_map_backing across vm_map's (only across clips).
3817 		 *
3818 		 * Insert the entry into the new map -- we know we're
3819 		 * inserting at the end of the new map.
3820 		 */
3821 		vm_map_backing_replicated(new_map, new_entry, 0);
3822 		vm_map_entry_link(new_map, new_entry);
3823 
3824 		/*
3825 		 * Update the physical map
3826 		 */
3827 		pmap_copy(new_map->pmap, old_map->pmap,
3828 			  new_entry->ba.start,
3829 			  (old_entry->ba.end - old_entry->ba.start),
3830 			  old_entry->ba.start);
3831 		break;
3832 	case VM_INHERIT_COPY:
3833 		/*
3834 		 * Clone the entry and link the copy into the new map.
3835 		 *
3836 		 * Note that ref-counting adjustment for old_entry->ba.object
3837 		 * (if it isn't a special map that is) is handled by
3838 		 * vm_map_copy_entry().
3839 		 */
3840 		new_entry = vm_map_entry_create(countp);
3841 		*new_entry = *old_entry;
3842 
3843 		new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3844 		new_entry->wired_count = 0;
3845 
3846 		vm_map_backing_replicated(new_map, new_entry, 0);
3847 		vm_map_entry_link(new_map, new_entry);
3848 
3849 		/*
3850 		 * This does the actual dirty work of making both entries
3851 		 * copy-on-write, and will also handle the fronting object.
3852 		 */
3853 		vm_map_copy_entry(old_map, new_map, old_entry, new_entry);
3854 		break;
3855 	}
3856 }
3857 
3858 /*
3859  * When forking user-kernel shared maps, the map might change in the
3860  * child so do not try to copy the underlying pmap entries.
3861  */
3862 static
3863 void
3864 vmspace_fork_uksmap_entry(struct proc *p2, struct lwp *lp2,
3865 			  vm_map_t old_map, vm_map_t new_map,
3866 			  vm_map_entry_t old_entry, int *countp)
3867 {
3868 	vm_map_entry_t new_entry;
3869 
3870 	/*
3871 	 * Do not fork lpmap entries whos TIDs do not match lp2's tid.
3872 	 *
3873 	 * XXX if p2 is NULL and lp2 is non-NULL, we retain the lpmap entry
3874 	 * (this is for e.g. resident'ing vmspace's) but set the field
3875 	 * to NULL.  Upon restore it should be restored. XXX NOT IMPL YET
3876 	 */
3877 	if (old_entry->aux.dev) {
3878 		switch(minor(old_entry->aux.dev)) {
3879 		case 5:
3880 			break;
3881 		case 6:
3882 			break;
3883 		case 7:
3884 			if (lp2 == NULL)
3885 				return;
3886 			if (old_entry->ba.aux_info == NULL)
3887 				return;
3888 			if (((struct lwp *)old_entry->ba.aux_info)->lwp_tid !=
3889 			    lp2->lwp_tid)
3890 				return;
3891 			break;
3892 		}
3893 	}
3894 
3895 	new_entry = vm_map_entry_create(countp);
3896 	*new_entry = *old_entry;
3897 
3898 	new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3899 	new_entry->wired_count = 0;
3900 	KKASSERT(new_entry->ba.backing_ba == NULL);
3901 
3902 	if (new_entry->aux.dev) {
3903 		switch(minor(new_entry->aux.dev)) {
3904 		case 5:
3905 			/*
3906 			 * upmap
3907 			 */
3908 			new_entry->ba.aux_info = p2;
3909 			break;
3910 		case 6:
3911 			/*
3912 			 * kpmap
3913 			 */
3914 			new_entry->ba.aux_info = NULL;
3915 			break;
3916 		case 7:
3917 			/*
3918 			 * lpmap
3919 			 */
3920 			new_entry->ba.aux_info = lp2;
3921 			break;
3922 		}
3923 	} else {
3924 		new_entry->ba.aux_info = NULL;
3925 	}
3926 
3927 	vm_map_backing_replicated(new_map, new_entry, 0);
3928 
3929 	vm_map_entry_link(new_map, new_entry);
3930 }
3931 
3932 /*
3933  * Create an auto-grow stack entry
3934  *
3935  * No requirements.
3936  */
3937 int
3938 vm_map_stack (vm_map_t map, vm_offset_t *addrbos, vm_size_t max_ssize,
3939 	      int flags, vm_prot_t prot, vm_prot_t max, int cow)
3940 {
3941 	vm_map_entry_t	prev_entry;
3942 	vm_map_entry_t	next;
3943 	vm_size_t	init_ssize;
3944 	int		rv;
3945 	int		count;
3946 	vm_offset_t	tmpaddr;
3947 
3948 	cow |= MAP_IS_STACK;
3949 
3950 	if (max_ssize < sgrowsiz)
3951 		init_ssize = max_ssize;
3952 	else
3953 		init_ssize = sgrowsiz;
3954 
3955 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3956 	vm_map_lock(map);
3957 
3958 	/*
3959 	 * Find space for the mapping
3960 	 */
3961 	if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3962 		if (vm_map_findspace(map, *addrbos, max_ssize, 1,
3963 				     flags, &tmpaddr)) {
3964 			vm_map_unlock(map);
3965 			vm_map_entry_release(count);
3966 			return (KERN_NO_SPACE);
3967 		}
3968 		*addrbos = tmpaddr;
3969 	}
3970 
3971 	/* If addr is already mapped, no go */
3972 	if (vm_map_lookup_entry(map, *addrbos, &prev_entry)) {
3973 		vm_map_unlock(map);
3974 		vm_map_entry_release(count);
3975 		return (KERN_NO_SPACE);
3976 	}
3977 
3978 #if 0
3979 	/* XXX already handled by kern_mmap() */
3980 	/* If we would blow our VMEM resource limit, no go */
3981 	if (map->size + init_ssize >
3982 	    curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3983 		vm_map_unlock(map);
3984 		vm_map_entry_release(count);
3985 		return (KERN_NO_SPACE);
3986 	}
3987 #endif
3988 
3989 	/*
3990 	 * If we can't accomodate max_ssize in the current mapping,
3991 	 * no go.  However, we need to be aware that subsequent user
3992 	 * mappings might map into the space we have reserved for
3993 	 * stack, and currently this space is not protected.
3994 	 *
3995 	 * Hopefully we will at least detect this condition
3996 	 * when we try to grow the stack.
3997 	 */
3998 	if (prev_entry)
3999 		next = vm_map_rb_tree_RB_NEXT(prev_entry);
4000 	else
4001 		next = RB_MIN(vm_map_rb_tree, &map->rb_root);
4002 
4003 	if (next && next->ba.start < *addrbos + max_ssize) {
4004 		vm_map_unlock(map);
4005 		vm_map_entry_release(count);
4006 		return (KERN_NO_SPACE);
4007 	}
4008 
4009 	/*
4010 	 * We initially map a stack of only init_ssize.  We will
4011 	 * grow as needed later.  Since this is to be a grow
4012 	 * down stack, we map at the top of the range.
4013 	 *
4014 	 * Note: we would normally expect prot and max to be
4015 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
4016 	 * eliminate these as input parameters, and just
4017 	 * pass these values here in the insert call.
4018 	 */
4019 	rv = vm_map_insert(map, &count,
4020 			   NULL, NULL,
4021 			   0, NULL,
4022 			   *addrbos + max_ssize - init_ssize,
4023 	                   *addrbos + max_ssize,
4024 			   VM_MAPTYPE_NORMAL,
4025 			   VM_SUBSYS_STACK, prot, max, cow);
4026 
4027 	/* Now set the avail_ssize amount */
4028 	if (rv == KERN_SUCCESS) {
4029 		if (prev_entry)
4030 			next = vm_map_rb_tree_RB_NEXT(prev_entry);
4031 		else
4032 			next = RB_MIN(vm_map_rb_tree, &map->rb_root);
4033 		if (prev_entry != NULL) {
4034 			vm_map_clip_end(map,
4035 					prev_entry,
4036 					*addrbos + max_ssize - init_ssize,
4037 					&count);
4038 		}
4039 		if (next->ba.end   != *addrbos + max_ssize ||
4040 		    next->ba.start != *addrbos + max_ssize - init_ssize){
4041 			panic ("Bad entry start/end for new stack entry");
4042 		} else {
4043 			next->aux.avail_ssize = max_ssize - init_ssize;
4044 		}
4045 	}
4046 
4047 	vm_map_unlock(map);
4048 	vm_map_entry_release(count);
4049 	return (rv);
4050 }
4051 
4052 /*
4053  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
4054  * desired address is already mapped, or if we successfully grow
4055  * the stack.  Also returns KERN_SUCCESS if addr is outside the
4056  * stack range (this is strange, but preserves compatibility with
4057  * the grow function in vm_machdep.c).
4058  *
4059  * No requirements.
4060  */
4061 int
4062 vm_map_growstack (vm_map_t map, vm_offset_t addr)
4063 {
4064 	vm_map_entry_t prev_entry;
4065 	vm_map_entry_t stack_entry;
4066 	vm_map_entry_t next;
4067 	struct vmspace *vm;
4068 	struct lwp *lp;
4069 	struct proc *p;
4070 	vm_offset_t    end;
4071 	int grow_amount;
4072 	int rv = KERN_SUCCESS;
4073 	int is_procstack;
4074 	int use_read_lock = 1;
4075 	int count;
4076 
4077 	/*
4078 	 * Find the vm
4079 	 */
4080 	lp = curthread->td_lwp;
4081 	p = curthread->td_proc;
4082 	KKASSERT(lp != NULL);
4083 	vm = lp->lwp_vmspace;
4084 
4085 	/*
4086 	 * Growstack is only allowed on the current process.  We disallow
4087 	 * other use cases, e.g. trying to access memory via procfs that
4088 	 * the stack hasn't grown into.
4089 	 */
4090 	if (map != &vm->vm_map) {
4091 		return KERN_FAILURE;
4092 	}
4093 
4094 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
4095 Retry:
4096 	if (use_read_lock)
4097 		vm_map_lock_read(map);
4098 	else
4099 		vm_map_lock(map);
4100 
4101 	/*
4102 	 * If addr is already in the entry range, no need to grow.
4103 	 * prev_entry returns NULL if addr is at the head.
4104 	 */
4105 	if (vm_map_lookup_entry(map, addr, &prev_entry))
4106 		goto done;
4107 	if (prev_entry)
4108 		stack_entry = vm_map_rb_tree_RB_NEXT(prev_entry);
4109 	else
4110 		stack_entry = RB_MIN(vm_map_rb_tree, &map->rb_root);
4111 
4112 	if (stack_entry == NULL)
4113 		goto done;
4114 	if (prev_entry == NULL)
4115 		end = stack_entry->ba.start - stack_entry->aux.avail_ssize;
4116 	else
4117 		end = prev_entry->ba.end;
4118 
4119 	/*
4120 	 * This next test mimics the old grow function in vm_machdep.c.
4121 	 * It really doesn't quite make sense, but we do it anyway
4122 	 * for compatibility.
4123 	 *
4124 	 * If not growable stack, return success.  This signals the
4125 	 * caller to proceed as he would normally with normal vm.
4126 	 */
4127 	if (stack_entry->aux.avail_ssize < 1 ||
4128 	    addr >= stack_entry->ba.start ||
4129 	    addr <  stack_entry->ba.start - stack_entry->aux.avail_ssize) {
4130 		goto done;
4131 	}
4132 
4133 	/* Find the minimum grow amount */
4134 	grow_amount = roundup (stack_entry->ba.start - addr, PAGE_SIZE);
4135 	if (grow_amount > stack_entry->aux.avail_ssize) {
4136 		rv = KERN_NO_SPACE;
4137 		goto done;
4138 	}
4139 
4140 	/*
4141 	 * If there is no longer enough space between the entries
4142 	 * nogo, and adjust the available space.  Note: this
4143 	 * should only happen if the user has mapped into the
4144 	 * stack area after the stack was created, and is
4145 	 * probably an error.
4146 	 *
4147 	 * This also effectively destroys any guard page the user
4148 	 * might have intended by limiting the stack size.
4149 	 */
4150 	if (grow_amount > stack_entry->ba.start - end) {
4151 		if (use_read_lock && vm_map_lock_upgrade(map)) {
4152 			/* lost lock */
4153 			use_read_lock = 0;
4154 			goto Retry;
4155 		}
4156 		use_read_lock = 0;
4157 		stack_entry->aux.avail_ssize = stack_entry->ba.start - end;
4158 		rv = KERN_NO_SPACE;
4159 		goto done;
4160 	}
4161 
4162 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
4163 
4164 	/* If this is the main process stack, see if we're over the
4165 	 * stack limit.
4166 	 */
4167 	if (is_procstack && (vm->vm_ssize + grow_amount >
4168 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
4169 		rv = KERN_NO_SPACE;
4170 		goto done;
4171 	}
4172 
4173 	/* Round up the grow amount modulo SGROWSIZ */
4174 	grow_amount = roundup (grow_amount, sgrowsiz);
4175 	if (grow_amount > stack_entry->aux.avail_ssize) {
4176 		grow_amount = stack_entry->aux.avail_ssize;
4177 	}
4178 	if (is_procstack && (vm->vm_ssize + grow_amount >
4179 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
4180 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur - vm->vm_ssize;
4181 	}
4182 
4183 	/* If we would blow our VMEM resource limit, no go */
4184 	if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
4185 		rv = KERN_NO_SPACE;
4186 		goto done;
4187 	}
4188 
4189 	if (use_read_lock && vm_map_lock_upgrade(map)) {
4190 		/* lost lock */
4191 		use_read_lock = 0;
4192 		goto Retry;
4193 	}
4194 	use_read_lock = 0;
4195 
4196 	/* Get the preliminary new entry start value */
4197 	addr = stack_entry->ba.start - grow_amount;
4198 
4199 	/* If this puts us into the previous entry, cut back our growth
4200 	 * to the available space.  Also, see the note above.
4201 	 */
4202 	if (addr < end) {
4203 		stack_entry->aux.avail_ssize = stack_entry->ba.start - end;
4204 		addr = end;
4205 	}
4206 
4207 	rv = vm_map_insert(map, &count,
4208 			   NULL, NULL,
4209 			   0, NULL,
4210 			   addr, stack_entry->ba.start,
4211 			   VM_MAPTYPE_NORMAL,
4212 			   VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0);
4213 
4214 	/* Adjust the available stack space by the amount we grew. */
4215 	if (rv == KERN_SUCCESS) {
4216 		if (prev_entry) {
4217 			vm_map_clip_end(map, prev_entry, addr, &count);
4218 			next = vm_map_rb_tree_RB_NEXT(prev_entry);
4219 		} else {
4220 			next = RB_MIN(vm_map_rb_tree, &map->rb_root);
4221 		}
4222 		if (next->ba.end != stack_entry->ba.start  ||
4223 		    next->ba.start != addr) {
4224 			panic ("Bad stack grow start/end in new stack entry");
4225 		} else {
4226 			next->aux.avail_ssize =
4227 				stack_entry->aux.avail_ssize -
4228 				(next->ba.end - next->ba.start);
4229 			if (is_procstack) {
4230 				vm->vm_ssize += next->ba.end -
4231 						next->ba.start;
4232 			}
4233 		}
4234 
4235 		if (map->flags & MAP_WIREFUTURE)
4236 			vm_map_unwire(map, next->ba.start, next->ba.end, FALSE);
4237 	}
4238 
4239 done:
4240 	if (use_read_lock)
4241 		vm_map_unlock_read(map);
4242 	else
4243 		vm_map_unlock(map);
4244 	vm_map_entry_release(count);
4245 	return (rv);
4246 }
4247 
4248 /*
4249  * Unshare the specified VM space for exec.  If other processes are
4250  * mapped to it, then create a new one.  The new vmspace is null.
4251  *
4252  * No requirements.
4253  */
4254 void
4255 vmspace_exec(struct proc *p, struct vmspace *vmcopy)
4256 {
4257 	struct vmspace *oldvmspace = p->p_vmspace;
4258 	struct vmspace *newvmspace;
4259 	vm_map_t map = &p->p_vmspace->vm_map;
4260 
4261 	/*
4262 	 * If we are execing a resident vmspace we fork it, otherwise
4263 	 * we create a new vmspace.  Note that exitingcnt is not
4264 	 * copied to the new vmspace.
4265 	 */
4266 	lwkt_gettoken(&oldvmspace->vm_map.token);
4267 	if (vmcopy)  {
4268 		newvmspace = vmspace_fork(vmcopy, NULL, NULL);
4269 		lwkt_gettoken(&newvmspace->vm_map.token);
4270 	} else {
4271 		newvmspace = vmspace_alloc(vm_map_min(map), vm_map_max(map));
4272 		lwkt_gettoken(&newvmspace->vm_map.token);
4273 		bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
4274 		      (caddr_t)&oldvmspace->vm_endcopy -
4275 		       (caddr_t)&oldvmspace->vm_startcopy);
4276 	}
4277 
4278 	/*
4279 	 * Finish initializing the vmspace before assigning it
4280 	 * to the process.  The vmspace will become the current vmspace
4281 	 * if p == curproc.
4282 	 */
4283 	pmap_pinit2(vmspace_pmap(newvmspace));
4284 	pmap_replacevm(p, newvmspace, 0);
4285 	lwkt_reltoken(&newvmspace->vm_map.token);
4286 	lwkt_reltoken(&oldvmspace->vm_map.token);
4287 	vmspace_rel(oldvmspace);
4288 }
4289 
4290 /*
4291  * Unshare the specified VM space for forcing COW.  This
4292  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4293  */
4294 void
4295 vmspace_unshare(struct proc *p)
4296 {
4297 	struct vmspace *oldvmspace = p->p_vmspace;
4298 	struct vmspace *newvmspace;
4299 
4300 	lwkt_gettoken(&oldvmspace->vm_map.token);
4301 	if (vmspace_getrefs(oldvmspace) == 1) {
4302 		lwkt_reltoken(&oldvmspace->vm_map.token);
4303 		return;
4304 	}
4305 	newvmspace = vmspace_fork(oldvmspace, NULL, NULL);
4306 	lwkt_gettoken(&newvmspace->vm_map.token);
4307 	pmap_pinit2(vmspace_pmap(newvmspace));
4308 	pmap_replacevm(p, newvmspace, 0);
4309 	lwkt_reltoken(&newvmspace->vm_map.token);
4310 	lwkt_reltoken(&oldvmspace->vm_map.token);
4311 	vmspace_rel(oldvmspace);
4312 }
4313 
4314 /*
4315  * vm_map_hint: return the beginning of the best area suitable for
4316  * creating a new mapping with "prot" protection.
4317  *
4318  * No requirements.
4319  */
4320 vm_offset_t
4321 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
4322 {
4323 	struct vmspace *vms = p->p_vmspace;
4324 	struct rlimit limit;
4325 	rlim_t dsiz;
4326 
4327 	/*
4328 	 * Acquire datasize limit for mmap() operation,
4329 	 * calculate nearest power of 2.
4330 	 */
4331 	if (kern_getrlimit(RLIMIT_DATA, &limit))
4332 		limit.rlim_cur = maxdsiz;
4333 	dsiz = limit.rlim_cur;
4334 
4335 	if (!randomize_mmap || addr != 0) {
4336 		/*
4337 		 * Set a reasonable start point for the hint if it was
4338 		 * not specified or if it falls within the heap space.
4339 		 * Hinted mmap()s do not allocate out of the heap space.
4340 		 */
4341 		if (addr == 0 ||
4342 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
4343 		     addr < round_page((vm_offset_t)vms->vm_daddr + dsiz))) {
4344 			addr = round_page((vm_offset_t)vms->vm_daddr + dsiz);
4345 		}
4346 
4347 		return addr;
4348 	}
4349 
4350 	/*
4351 	 * randomize_mmap && addr == 0.  For now randomize the
4352 	 * address within a dsiz range beyond the data limit.
4353 	 */
4354 	addr = (vm_offset_t)vms->vm_daddr + dsiz;
4355 	if (dsiz)
4356 		addr += (karc4random64() & 0x7FFFFFFFFFFFFFFFLU) % dsiz;
4357 	return (round_page(addr));
4358 }
4359 
4360 /*
4361  * Finds the VM object, offset, and protection for a given virtual address
4362  * in the specified map, assuming a page fault of the type specified.
4363  *
4364  * Leaves the map in question locked for read; return values are guaranteed
4365  * until a vm_map_lookup_done call is performed.  Note that the map argument
4366  * is in/out; the returned map must be used in the call to vm_map_lookup_done.
4367  *
4368  * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
4369  * that fast.
4370  *
4371  * If a lookup is requested with "write protection" specified, the map may
4372  * be changed to perform virtual copying operations, although the data
4373  * referenced will remain the same.
4374  *
4375  * No requirements.
4376  */
4377 int
4378 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
4379 	      vm_offset_t vaddr,
4380 	      vm_prot_t fault_typea,
4381 	      vm_map_entry_t *out_entry,	/* OUT */
4382 	      struct vm_map_backing **bap,	/* OUT */
4383 	      vm_pindex_t *pindex,		/* OUT */
4384 	      vm_pindex_t *pcount,		/* OUT */
4385 	      vm_prot_t *out_prot,		/* OUT */
4386 	      int *wflags)			/* OUT */
4387 {
4388 	vm_map_entry_t entry;
4389 	vm_map_t map = *var_map;
4390 	vm_prot_t prot;
4391 	vm_prot_t fault_type = fault_typea;
4392 	int use_read_lock = 1;
4393 	int rv = KERN_SUCCESS;
4394 	int count;
4395 	thread_t td = curthread;
4396 
4397 	/*
4398 	 * vm_map_entry_reserve() implements an important mitigation
4399 	 * against mmap() span running the kernel out of vm_map_entry
4400 	 * structures, but it can also cause an infinite call recursion.
4401 	 * Use td_nest_count to prevent an infinite recursion (allows
4402 	 * the vm_map code to dig into the pcpu vm_map_entry reserve).
4403 	 */
4404 	count = 0;
4405 	if (td->td_nest_count == 0) {
4406 		++td->td_nest_count;
4407 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
4408 		--td->td_nest_count;
4409 	}
4410 RetryLookup:
4411 	if (use_read_lock)
4412 		vm_map_lock_read(map);
4413 	else
4414 		vm_map_lock(map);
4415 
4416 	/*
4417 	 * Always do a full lookup.  The hint doesn't get us much anymore
4418 	 * now that the map is RB'd.
4419 	 */
4420 	cpu_ccfence();
4421 	*out_entry = NULL;
4422 	*bap = NULL;
4423 
4424 	{
4425 		vm_map_entry_t tmp_entry;
4426 
4427 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
4428 			rv = KERN_INVALID_ADDRESS;
4429 			goto done;
4430 		}
4431 		entry = tmp_entry;
4432 		*out_entry = entry;
4433 	}
4434 
4435 	/*
4436 	 * Handle submaps.
4437 	 */
4438 	if (entry->maptype == VM_MAPTYPE_SUBMAP) {
4439 		vm_map_t old_map = map;
4440 
4441 		*var_map = map = entry->ba.sub_map;
4442 		if (use_read_lock)
4443 			vm_map_unlock_read(old_map);
4444 		else
4445 			vm_map_unlock(old_map);
4446 		use_read_lock = 1;
4447 		goto RetryLookup;
4448 	}
4449 
4450 	/*
4451 	 * Check whether this task is allowed to have this page.
4452 	 * Note the special case for MAP_ENTRY_COW pages with an override.
4453 	 * This is to implement a forced COW for debuggers.
4454 	 */
4455 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
4456 		prot = entry->max_protection;
4457 	else
4458 		prot = entry->protection;
4459 
4460 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
4461 	if ((fault_type & prot) != fault_type) {
4462 		rv = KERN_PROTECTION_FAILURE;
4463 		goto done;
4464 	}
4465 
4466 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4467 	    (entry->eflags & MAP_ENTRY_COW) &&
4468 	    (fault_type & VM_PROT_WRITE) &&
4469 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
4470 		rv = KERN_PROTECTION_FAILURE;
4471 		goto done;
4472 	}
4473 
4474 	/*
4475 	 * If this page is not pageable, we have to get it for all possible
4476 	 * accesses.
4477 	 */
4478 	*wflags = 0;
4479 	if (entry->wired_count) {
4480 		*wflags |= FW_WIRED;
4481 		prot = fault_type = entry->protection;
4482 	}
4483 
4484 	/*
4485 	 * Virtual page tables may need to update the accessed (A) bit
4486 	 * in a page table entry.  Upgrade the fault to a write fault for
4487 	 * that case if the map will support it.  If the map does not support
4488 	 * it the page table entry simply will not be updated.
4489 	 */
4490 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
4491 		if (prot & VM_PROT_WRITE)
4492 			fault_type |= VM_PROT_WRITE;
4493 	}
4494 
4495 	if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
4496 	    pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
4497 		if ((prot & VM_PROT_WRITE) == 0)
4498 			fault_type |= VM_PROT_WRITE;
4499 	}
4500 
4501 	/*
4502 	 * Only NORMAL and VPAGETABLE maps are object-based.  UKSMAPs are not.
4503 	 */
4504 	if (entry->maptype != VM_MAPTYPE_NORMAL &&
4505 	    entry->maptype != VM_MAPTYPE_VPAGETABLE) {
4506 		*bap = NULL;
4507 		goto skip;
4508 	}
4509 
4510 	/*
4511 	 * If the entry was copy-on-write, we either ...
4512 	 */
4513 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4514 		/*
4515 		 * If we want to write the page, we may as well handle that
4516 		 * now since we've got the map locked.
4517 		 *
4518 		 * If we don't need to write the page, we just demote the
4519 		 * permissions allowed.
4520 		 */
4521 		if (fault_type & VM_PROT_WRITE) {
4522 			/*
4523 			 * Not allowed if TDF_NOFAULT is set as the shadowing
4524 			 * operation can deadlock against the faulting
4525 			 * function due to the copy-on-write.
4526 			 */
4527 			if (curthread->td_flags & TDF_NOFAULT) {
4528 				rv = KERN_FAILURE_NOFAULT;
4529 				goto done;
4530 			}
4531 
4532 			/*
4533 			 * Make a new vm_map_backing + object, and place it
4534 			 * in the object chain.  Note that no new references
4535 			 * have appeared -- one just moved from the map to
4536 			 * the new object.
4537 			 */
4538 			if (use_read_lock && vm_map_lock_upgrade(map)) {
4539 				/* lost lock */
4540 				use_read_lock = 0;
4541 				goto RetryLookup;
4542 			}
4543 			use_read_lock = 0;
4544 			vm_map_entry_shadow(entry);
4545 			*wflags |= FW_DIDCOW;
4546 		} else {
4547 			/*
4548 			 * We're attempting to read a copy-on-write page --
4549 			 * don't allow writes.
4550 			 */
4551 			prot &= ~VM_PROT_WRITE;
4552 		}
4553 	}
4554 
4555 	/*
4556 	 * Create an object if necessary.  This code also handles
4557 	 * partitioning large entries to improve vm_fault performance.
4558 	 */
4559 	if (entry->ba.object == NULL && !map->system_map) {
4560 		if (use_read_lock && vm_map_lock_upgrade(map))  {
4561 			/* lost lock */
4562 			use_read_lock = 0;
4563 			goto RetryLookup;
4564 		}
4565 		use_read_lock = 0;
4566 
4567 		/*
4568 		 * Partition large entries, giving each its own VM object,
4569 		 * to improve concurrent fault performance.  This is only
4570 		 * applicable to userspace.
4571 		 */
4572 		if (map != &kernel_map &&
4573 		    entry->maptype == VM_MAPTYPE_NORMAL &&
4574 		    ((entry->ba.start ^ entry->ba.end) &
4575 		     ~MAP_ENTRY_PARTITION_MASK) &&
4576 		    vm_map_partition_enable) {
4577 			if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
4578 				entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
4579 				++mycpu->gd_cnt.v_intrans_coll;
4580 				++mycpu->gd_cnt.v_intrans_wait;
4581 				vm_map_transition_wait(map, 0);
4582 				goto RetryLookup;
4583 			}
4584 			vm_map_entry_partition(map, entry, vaddr, &count);
4585 		}
4586 		vm_map_entry_allocate_object(entry);
4587 	}
4588 
4589 	/*
4590 	 * Return the object/offset from this entry.  If the entry was
4591 	 * copy-on-write or empty, it has been fixed up.
4592 	 */
4593 	*bap = &entry->ba;
4594 
4595 skip:
4596 	*pindex = OFF_TO_IDX((vaddr - entry->ba.start) + entry->ba.offset);
4597 	*pcount = OFF_TO_IDX(entry->ba.end - trunc_page(vaddr));
4598 
4599 	/*
4600 	 * Return whether this is the only map sharing this data.  On
4601 	 * success we return with a read lock held on the map.  On failure
4602 	 * we return with the map unlocked.
4603 	 */
4604 	*out_prot = prot;
4605 done:
4606 	if (rv == KERN_SUCCESS) {
4607 		if (use_read_lock == 0)
4608 			vm_map_lock_downgrade(map);
4609 	} else if (use_read_lock) {
4610 		vm_map_unlock_read(map);
4611 	} else {
4612 		vm_map_unlock(map);
4613 	}
4614 	if (count > 0)
4615 		vm_map_entry_release(count);
4616 
4617 	return (rv);
4618 }
4619 
4620 /*
4621  * Releases locks acquired by a vm_map_lookup()
4622  * (according to the handle returned by that lookup).
4623  *
4624  * No other requirements.
4625  */
4626 void
4627 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
4628 {
4629 	/*
4630 	 * Unlock the main-level map
4631 	 */
4632 	vm_map_unlock_read(map);
4633 	if (count)
4634 		vm_map_entry_release(count);
4635 }
4636 
4637 static void
4638 vm_map_entry_partition(vm_map_t map, vm_map_entry_t entry,
4639 		       vm_offset_t vaddr, int *countp)
4640 {
4641 	vaddr &= ~MAP_ENTRY_PARTITION_MASK;
4642 	vm_map_clip_start(map, entry, vaddr, countp);
4643 	vaddr += MAP_ENTRY_PARTITION_SIZE;
4644 	vm_map_clip_end(map, entry, vaddr, countp);
4645 }
4646 
4647 /*
4648  * Quick hack, needs some help to make it more SMP friendly.
4649  */
4650 void
4651 vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock,
4652 		 vm_offset_t ran_beg, vm_offset_t ran_end)
4653 {
4654 	struct vm_map_ilock *scan;
4655 
4656 	ilock->ran_beg = ran_beg;
4657 	ilock->ran_end = ran_end;
4658 	ilock->flags = 0;
4659 
4660 	spin_lock(&map->ilock_spin);
4661 restart:
4662 	for (scan = map->ilock_base; scan; scan = scan->next) {
4663 		if (ran_end > scan->ran_beg && ran_beg < scan->ran_end) {
4664 			scan->flags |= ILOCK_WAITING;
4665 			ssleep(scan, &map->ilock_spin, 0, "ilock", 0);
4666 			goto restart;
4667 		}
4668 	}
4669 	ilock->next = map->ilock_base;
4670 	map->ilock_base = ilock;
4671 	spin_unlock(&map->ilock_spin);
4672 }
4673 
4674 void
4675 vm_map_deinterlock(vm_map_t map, struct  vm_map_ilock *ilock)
4676 {
4677 	struct vm_map_ilock *scan;
4678 	struct vm_map_ilock **scanp;
4679 
4680 	spin_lock(&map->ilock_spin);
4681 	scanp = &map->ilock_base;
4682 	while ((scan = *scanp) != NULL) {
4683 		if (scan == ilock) {
4684 			*scanp = ilock->next;
4685 			spin_unlock(&map->ilock_spin);
4686 			if (ilock->flags & ILOCK_WAITING)
4687 				wakeup(ilock);
4688 			return;
4689 		}
4690 		scanp = &scan->next;
4691 	}
4692 	spin_unlock(&map->ilock_spin);
4693 	panic("vm_map_deinterlock: missing ilock!");
4694 }
4695 
4696 #include "opt_ddb.h"
4697 #ifdef DDB
4698 #include <ddb/ddb.h>
4699 
4700 /*
4701  * Debugging only
4702  */
4703 DB_SHOW_COMMAND(map, vm_map_print)
4704 {
4705 	static int nlines;
4706 	/* XXX convert args. */
4707 	vm_map_t map = (vm_map_t)addr;
4708 	boolean_t full = have_addr;
4709 
4710 	vm_map_entry_t entry;
4711 
4712 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4713 	    (void *)map,
4714 	    (void *)map->pmap, map->nentries, map->timestamp);
4715 	nlines++;
4716 
4717 	if (!full && db_indent)
4718 		return;
4719 
4720 	db_indent += 2;
4721 	RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) {
4722 		db_iprintf("map entry %p: start=%p, end=%p\n",
4723 		    (void *)entry,
4724 		    (void *)entry->ba.start, (void *)entry->ba.end);
4725 		nlines++;
4726 		{
4727 			static char *inheritance_name[4] =
4728 			{"share", "copy", "none", "donate_copy"};
4729 
4730 			db_iprintf(" prot=%x/%x/%s",
4731 			    entry->protection,
4732 			    entry->max_protection,
4733 			    inheritance_name[(int)(unsigned char)
4734 						entry->inheritance]);
4735 			if (entry->wired_count != 0)
4736 				db_printf(", wired");
4737 		}
4738 		switch(entry->maptype) {
4739 		case VM_MAPTYPE_SUBMAP:
4740 			/* XXX no %qd in kernel.  Truncate entry->ba.offset. */
4741 			db_printf(", share=%p, offset=0x%lx\n",
4742 			    (void *)entry->ba.sub_map,
4743 			    (long)entry->ba.offset);
4744 			nlines++;
4745 
4746 			db_indent += 2;
4747 			vm_map_print((db_expr_t)(intptr_t)entry->ba.sub_map,
4748 				     full, 0, NULL);
4749 			db_indent -= 2;
4750 			break;
4751 		case VM_MAPTYPE_NORMAL:
4752 		case VM_MAPTYPE_VPAGETABLE:
4753 			/* XXX no %qd in kernel.  Truncate entry->ba.offset. */
4754 			db_printf(", object=%p, offset=0x%lx",
4755 			    (void *)entry->ba.object,
4756 			    (long)entry->ba.offset);
4757 			if (entry->eflags & MAP_ENTRY_COW)
4758 				db_printf(", copy (%s)",
4759 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4760 			db_printf("\n");
4761 			nlines++;
4762 
4763 			if (entry->ba.object) {
4764 				db_indent += 2;
4765 				vm_object_print((db_expr_t)(intptr_t)
4766 						entry->ba.object,
4767 						full, 0, NULL);
4768 				nlines += 4;
4769 				db_indent -= 2;
4770 			}
4771 			break;
4772 		case VM_MAPTYPE_UKSMAP:
4773 			db_printf(", uksmap=%p, offset=0x%lx",
4774 			    (void *)entry->ba.uksmap,
4775 			    (long)entry->ba.offset);
4776 			if (entry->eflags & MAP_ENTRY_COW)
4777 				db_printf(", copy (%s)",
4778 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4779 			db_printf("\n");
4780 			nlines++;
4781 			break;
4782 		default:
4783 			break;
4784 		}
4785 	}
4786 	db_indent -= 2;
4787 	if (db_indent == 0)
4788 		nlines = 0;
4789 }
4790 
4791 /*
4792  * Debugging only
4793  */
4794 DB_SHOW_COMMAND(procvm, procvm)
4795 {
4796 	struct proc *p;
4797 
4798 	if (have_addr) {
4799 		p = (struct proc *) addr;
4800 	} else {
4801 		p = curproc;
4802 	}
4803 
4804 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4805 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4806 	    (void *)vmspace_pmap(p->p_vmspace));
4807 
4808 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
4809 }
4810 
4811 #endif /* DDB */
4812