xref: /dragonfly/sys/vm/vm_map.c (revision a563ca70)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
67  * $DragonFly: src/sys/vm/vm_map.c,v 1.56 2007/04/29 18:25:41 dillon Exp $
68  */
69 
70 /*
71  *	Virtual memory mapping module.
72  */
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/proc.h>
78 #include <sys/serialize.h>
79 #include <sys/lock.h>
80 #include <sys/vmmeter.h>
81 #include <sys/mman.h>
82 #include <sys/vnode.h>
83 #include <sys/resourcevar.h>
84 #include <sys/shm.h>
85 #include <sys/tree.h>
86 #include <sys/malloc.h>
87 
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_extern.h>
97 #include <vm/swap_pager.h>
98 #include <vm/vm_zone.h>
99 
100 #include <sys/thread2.h>
101 #include <sys/sysref2.h>
102 #include <sys/random.h>
103 #include <sys/sysctl.h>
104 
105 /*
106  * Virtual memory maps provide for the mapping, protection, and sharing
107  * of virtual memory objects.  In addition, this module provides for an
108  * efficient virtual copy of memory from one map to another.
109  *
110  * Synchronization is required prior to most operations.
111  *
112  * Maps consist of an ordered doubly-linked list of simple entries.
113  * A hint and a RB tree is used to speed-up lookups.
114  *
115  * Callers looking to modify maps specify start/end addresses which cause
116  * the related map entry to be clipped if necessary, and then later
117  * recombined if the pieces remained compatible.
118  *
119  * Virtual copy operations are performed by copying VM object references
120  * from one map to another, and then marking both regions as copy-on-write.
121  */
122 static void vmspace_terminate(struct vmspace *vm);
123 static void vmspace_lock(struct vmspace *vm);
124 static void vmspace_unlock(struct vmspace *vm);
125 static void vmspace_dtor(void *obj, void *private);
126 
127 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
128 
129 struct sysref_class vmspace_sysref_class = {
130 	.name =		"vmspace",
131 	.mtype =	M_VMSPACE,
132 	.proto =	SYSREF_PROTO_VMSPACE,
133 	.offset =	offsetof(struct vmspace, vm_sysref),
134 	.objsize =	sizeof(struct vmspace),
135 	.nom_cache =	32,
136 	.flags = SRC_MANAGEDINIT,
137 	.dtor = vmspace_dtor,
138 	.ops = {
139 		.terminate = (sysref_terminate_func_t)vmspace_terminate,
140 		.lock = (sysref_lock_func_t)vmspace_lock,
141 		.unlock = (sysref_lock_func_t)vmspace_unlock
142 	}
143 };
144 
145 /*
146  * per-cpu page table cross mappings are initialized in early boot
147  * and might require a considerable number of vm_map_entry structures.
148  */
149 #define VMEPERCPU	(MAXCPU+1)
150 
151 static struct vm_zone mapentzone_store, mapzone_store;
152 static vm_zone_t mapentzone, mapzone;
153 static struct vm_object mapentobj, mapobj;
154 
155 static struct vm_map_entry map_entry_init[MAX_MAPENT];
156 static struct vm_map_entry cpu_map_entry_init[MAXCPU][VMEPERCPU];
157 static struct vm_map map_init[MAX_KMAP];
158 
159 static int randomize_mmap;
160 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
161     "Randomize mmap offsets");
162 
163 static void vm_map_entry_shadow(vm_map_entry_t entry, int addref);
164 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
165 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
166 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
167 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
168 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
169 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
170 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
171 		vm_map_entry_t);
172 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
173 
174 /*
175  * Initialize the vm_map module.  Must be called before any other vm_map
176  * routines.
177  *
178  * Map and entry structures are allocated from the general purpose
179  * memory pool with some exceptions:
180  *
181  *	- The kernel map is allocated statically.
182  *	- Initial kernel map entries are allocated out of a static pool.
183  *
184  *	These restrictions are necessary since malloc() uses the
185  *	maps and requires map entries.
186  *
187  * Called from the low level boot code only.
188  */
189 void
190 vm_map_startup(void)
191 {
192 	mapzone = &mapzone_store;
193 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
194 		map_init, MAX_KMAP);
195 	mapentzone = &mapentzone_store;
196 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
197 		map_entry_init, MAX_MAPENT);
198 }
199 
200 /*
201  * Called prior to any vmspace allocations.
202  *
203  * Called from the low level boot code only.
204  */
205 void
206 vm_init2(void)
207 {
208 	zinitna(mapentzone, &mapentobj, NULL, 0, 0,
209 		ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
210 	zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
211 	pmap_init2();
212 	vm_object_init2();
213 }
214 
215 
216 /*
217  * Red black tree functions
218  *
219  * The caller must hold the related map lock.
220  */
221 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
222 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
223 
224 /* a->start is address, and the only field has to be initialized */
225 static int
226 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
227 {
228 	if (a->start < b->start)
229 		return(-1);
230 	else if (a->start > b->start)
231 		return(1);
232 	return(0);
233 }
234 
235 /*
236  * Allocate a vmspace structure, including a vm_map and pmap.
237  * Initialize numerous fields.  While the initial allocation is zerod,
238  * subsequence reuse from the objcache leaves elements of the structure
239  * intact (particularly the pmap), so portions must be zerod.
240  *
241  * The structure is not considered activated until we call sysref_activate().
242  *
243  * No requirements.
244  */
245 struct vmspace *
246 vmspace_alloc(vm_offset_t min, vm_offset_t max)
247 {
248 	struct vmspace *vm;
249 
250 	vm = sysref_alloc(&vmspace_sysref_class);
251 	bzero(&vm->vm_startcopy,
252 	      (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
253 	vm_map_init(&vm->vm_map, min, max, NULL);
254 	pmap_pinit(vmspace_pmap(vm));		/* (some fields reused) */
255 	lwkt_gettoken(&vm->vm_map.token);
256 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
257 	vm->vm_shm = NULL;
258 	vm->vm_exitingcnt = 0;
259 	cpu_vmspace_alloc(vm);
260 	sysref_activate(&vm->vm_sysref);
261 	lwkt_reltoken(&vm->vm_map.token);
262 
263 	return (vm);
264 }
265 
266 /*
267  * dtor function - Some elements of the pmap are retained in the
268  * free-cached vmspaces to improve performance.  We have to clean them up
269  * here before returning the vmspace to the memory pool.
270  *
271  * No requirements.
272  */
273 static void
274 vmspace_dtor(void *obj, void *private)
275 {
276 	struct vmspace *vm = obj;
277 
278 	pmap_puninit(vmspace_pmap(vm));
279 }
280 
281 /*
282  * Called in two cases:
283  *
284  * (1) When the last sysref is dropped, but exitingcnt might still be
285  *     non-zero.
286  *
287  * (2) When there are no sysrefs (i.e. refcnt is negative) left and the
288  *     exitingcnt becomes zero
289  *
290  * sysref will not scrap the object until we call sysref_put() once more
291  * after the last ref has been dropped.
292  *
293  * Interlocked by the sysref API.
294  */
295 static void
296 vmspace_terminate(struct vmspace *vm)
297 {
298 	int count;
299 
300 	/*
301 	 * If exitingcnt is non-zero we can't get rid of the entire vmspace
302 	 * yet, but we can scrap user memory.
303 	 */
304 	lwkt_gettoken(&vm->vm_map.token);
305 	if (vm->vm_exitingcnt) {
306 		shmexit(vm);
307 		pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
308 				  VM_MAX_USER_ADDRESS);
309 		vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
310 			      VM_MAX_USER_ADDRESS);
311 		lwkt_reltoken(&vm->vm_map.token);
312 		return;
313 	}
314 	cpu_vmspace_free(vm);
315 
316 	/*
317 	 * Make sure any SysV shm is freed, it might not have in
318 	 * exit1()
319 	 */
320 	shmexit(vm);
321 
322 	KKASSERT(vm->vm_upcalls == NULL);
323 
324 	/*
325 	 * Lock the map, to wait out all other references to it.
326 	 * Delete all of the mappings and pages they hold, then call
327 	 * the pmap module to reclaim anything left.
328 	 */
329 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
330 	vm_map_lock(&vm->vm_map);
331 	vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
332 		vm->vm_map.max_offset, &count);
333 	vm_map_unlock(&vm->vm_map);
334 	vm_map_entry_release(count);
335 
336 	lwkt_gettoken(&vmspace_pmap(vm)->pm_token);
337 	pmap_release(vmspace_pmap(vm));
338 	lwkt_reltoken(&vmspace_pmap(vm)->pm_token);
339 	lwkt_reltoken(&vm->vm_map.token);
340 	sysref_put(&vm->vm_sysref);
341 }
342 
343 /*
344  * vmspaces are not currently locked.
345  */
346 static void
347 vmspace_lock(struct vmspace *vm __unused)
348 {
349 }
350 
351 static void
352 vmspace_unlock(struct vmspace *vm __unused)
353 {
354 }
355 
356 /*
357  * This is called during exit indicating that the vmspace is no
358  * longer in used by an exiting process, but the process has not yet
359  * been cleaned up.
360  *
361  * No requirements.
362  */
363 void
364 vmspace_exitbump(struct vmspace *vm)
365 {
366 	lwkt_gettoken(&vm->vm_map.token);
367 	++vm->vm_exitingcnt;
368 	lwkt_reltoken(&vm->vm_map.token);
369 }
370 
371 /*
372  * This is called in the wait*() handling code.  The vmspace can be terminated
373  * after the last wait is finished using it.
374  *
375  * No requirements.
376  */
377 void
378 vmspace_exitfree(struct proc *p)
379 {
380 	struct vmspace *vm;
381 
382 	vm = p->p_vmspace;
383 	lwkt_gettoken(&vm->vm_map.token);
384 	p->p_vmspace = NULL;
385 
386 	if (--vm->vm_exitingcnt == 0 && sysref_isinactive(&vm->vm_sysref)) {
387 		lwkt_reltoken(&vm->vm_map.token);
388 		vmspace_terminate(vm);
389 	} else {
390 		lwkt_reltoken(&vm->vm_map.token);
391 	}
392 }
393 
394 /*
395  * Swap useage is determined by taking the proportional swap used by
396  * VM objects backing the VM map.  To make up for fractional losses,
397  * if the VM object has any swap use at all the associated map entries
398  * count for at least 1 swap page.
399  *
400  * No requirements.
401  */
402 int
403 vmspace_swap_count(struct vmspace *vm)
404 {
405 	vm_map_t map = &vm->vm_map;
406 	vm_map_entry_t cur;
407 	vm_object_t object;
408 	int count = 0;
409 	int n;
410 
411 	lwkt_gettoken(&vm->vm_map.token);
412 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
413 		switch(cur->maptype) {
414 		case VM_MAPTYPE_NORMAL:
415 		case VM_MAPTYPE_VPAGETABLE:
416 			if ((object = cur->object.vm_object) == NULL)
417 				break;
418 			if (object->swblock_count) {
419 				n = (cur->end - cur->start) / PAGE_SIZE;
420 				count += object->swblock_count *
421 				    SWAP_META_PAGES * n / object->size + 1;
422 			}
423 			break;
424 		default:
425 			break;
426 		}
427 	}
428 	lwkt_reltoken(&vm->vm_map.token);
429 	return(count);
430 }
431 
432 /*
433  * Calculate the approximate number of anonymous pages in use by
434  * this vmspace.  To make up for fractional losses, we count each
435  * VM object as having at least 1 anonymous page.
436  *
437  * No requirements.
438  */
439 int
440 vmspace_anonymous_count(struct vmspace *vm)
441 {
442 	vm_map_t map = &vm->vm_map;
443 	vm_map_entry_t cur;
444 	vm_object_t object;
445 	int count = 0;
446 
447 	lwkt_gettoken(&vm->vm_map.token);
448 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
449 		switch(cur->maptype) {
450 		case VM_MAPTYPE_NORMAL:
451 		case VM_MAPTYPE_VPAGETABLE:
452 			if ((object = cur->object.vm_object) == NULL)
453 				break;
454 			if (object->type != OBJT_DEFAULT &&
455 			    object->type != OBJT_SWAP) {
456 				break;
457 			}
458 			count += object->resident_page_count;
459 			break;
460 		default:
461 			break;
462 		}
463 	}
464 	lwkt_reltoken(&vm->vm_map.token);
465 	return(count);
466 }
467 
468 /*
469  * Creates and returns a new empty VM map with the given physical map
470  * structure, and having the given lower and upper address bounds.
471  *
472  * No requirements.
473  */
474 vm_map_t
475 vm_map_create(vm_map_t result, pmap_t pmap, vm_offset_t min, vm_offset_t max)
476 {
477 	if (result == NULL)
478 		result = zalloc(mapzone);
479 	vm_map_init(result, min, max, pmap);
480 	return (result);
481 }
482 
483 /*
484  * Initialize an existing vm_map structure such as that in the vmspace
485  * structure.  The pmap is initialized elsewhere.
486  *
487  * No requirements.
488  */
489 void
490 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap)
491 {
492 	map->header.next = map->header.prev = &map->header;
493 	RB_INIT(&map->rb_root);
494 	map->nentries = 0;
495 	map->size = 0;
496 	map->system_map = 0;
497 	map->min_offset = min;
498 	map->max_offset = max;
499 	map->pmap = pmap;
500 	map->first_free = &map->header;
501 	map->hint = &map->header;
502 	map->timestamp = 0;
503 	map->flags = 0;
504 	lwkt_token_init(&map->token, "vm_map");
505 	lockinit(&map->lock, "thrd_sleep", (hz + 9) / 10, 0);
506 	TUNABLE_INT("vm.cache_vmspaces", &vmspace_sysref_class.nom_cache);
507 }
508 
509 /*
510  * Shadow the vm_map_entry's object.  This typically needs to be done when
511  * a write fault is taken on an entry which had previously been cloned by
512  * fork().  The shared object (which might be NULL) must become private so
513  * we add a shadow layer above it.
514  *
515  * Object allocation for anonymous mappings is defered as long as possible.
516  * When creating a shadow, however, the underlying object must be instantiated
517  * so it can be shared.
518  *
519  * If the map segment is governed by a virtual page table then it is
520  * possible to address offsets beyond the mapped area.  Just allocate
521  * a maximally sized object for this case.
522  *
523  * The vm_map must be exclusively locked.
524  * No other requirements.
525  */
526 static
527 void
528 vm_map_entry_shadow(vm_map_entry_t entry, int addref)
529 {
530 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
531 		vm_object_shadow(&entry->object.vm_object, &entry->offset,
532 				 0x7FFFFFFF, addref);	/* XXX */
533 	} else {
534 		vm_object_shadow(&entry->object.vm_object, &entry->offset,
535 				 atop(entry->end - entry->start), addref);
536 	}
537 	entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
538 }
539 
540 /*
541  * Allocate an object for a vm_map_entry.
542  *
543  * Object allocation for anonymous mappings is defered as long as possible.
544  * This function is called when we can defer no longer, generally when a map
545  * entry might be split or forked or takes a page fault.
546  *
547  * If the map segment is governed by a virtual page table then it is
548  * possible to address offsets beyond the mapped area.  Just allocate
549  * a maximally sized object for this case.
550  *
551  * The vm_map must be exclusively locked.
552  * No other requirements.
553  */
554 void
555 vm_map_entry_allocate_object(vm_map_entry_t entry)
556 {
557 	vm_object_t obj;
558 
559 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
560 		obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
561 	} else {
562 		obj = vm_object_allocate(OBJT_DEFAULT,
563 					 atop(entry->end - entry->start));
564 	}
565 	entry->object.vm_object = obj;
566 	entry->offset = 0;
567 }
568 
569 /*
570  * Set an initial negative count so the first attempt to reserve
571  * space preloads a bunch of vm_map_entry's for this cpu.  Also
572  * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
573  * map a new page for vm_map_entry structures.  SMP systems are
574  * particularly sensitive.
575  *
576  * This routine is called in early boot so we cannot just call
577  * vm_map_entry_reserve().
578  *
579  * Called from the low level boot code only (for each cpu)
580  */
581 void
582 vm_map_entry_reserve_cpu_init(globaldata_t gd)
583 {
584 	vm_map_entry_t entry;
585 	int i;
586 
587 	gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
588 	entry = &cpu_map_entry_init[gd->gd_cpuid][0];
589 	for (i = 0; i < VMEPERCPU; ++i, ++entry) {
590 		entry->next = gd->gd_vme_base;
591 		gd->gd_vme_base = entry;
592 	}
593 }
594 
595 /*
596  * Reserves vm_map_entry structures so code later on can manipulate
597  * map_entry structures within a locked map without blocking trying
598  * to allocate a new vm_map_entry.
599  *
600  * No requirements.
601  */
602 int
603 vm_map_entry_reserve(int count)
604 {
605 	struct globaldata *gd = mycpu;
606 	vm_map_entry_t entry;
607 
608 	/*
609 	 * Make sure we have enough structures in gd_vme_base to handle
610 	 * the reservation request.
611 	 *
612 	 * The critical section protects access to the per-cpu gd.
613 	 */
614 	crit_enter();
615 	while (gd->gd_vme_avail < count) {
616 		entry = zalloc(mapentzone);
617 		entry->next = gd->gd_vme_base;
618 		gd->gd_vme_base = entry;
619 		++gd->gd_vme_avail;
620 	}
621 	gd->gd_vme_avail -= count;
622 	crit_exit();
623 
624 	return(count);
625 }
626 
627 /*
628  * Releases previously reserved vm_map_entry structures that were not
629  * used.  If we have too much junk in our per-cpu cache clean some of
630  * it out.
631  *
632  * No requirements.
633  */
634 void
635 vm_map_entry_release(int count)
636 {
637 	struct globaldata *gd = mycpu;
638 	vm_map_entry_t entry;
639 
640 	crit_enter();
641 	gd->gd_vme_avail += count;
642 	while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
643 		entry = gd->gd_vme_base;
644 		KKASSERT(entry != NULL);
645 		gd->gd_vme_base = entry->next;
646 		--gd->gd_vme_avail;
647 		crit_exit();
648 		zfree(mapentzone, entry);
649 		crit_enter();
650 	}
651 	crit_exit();
652 }
653 
654 /*
655  * Reserve map entry structures for use in kernel_map itself.  These
656  * entries have *ALREADY* been reserved on a per-cpu basis when the map
657  * was inited.  This function is used by zalloc() to avoid a recursion
658  * when zalloc() itself needs to allocate additional kernel memory.
659  *
660  * This function works like the normal reserve but does not load the
661  * vm_map_entry cache (because that would result in an infinite
662  * recursion).  Note that gd_vme_avail may go negative.  This is expected.
663  *
664  * Any caller of this function must be sure to renormalize after
665  * potentially eating entries to ensure that the reserve supply
666  * remains intact.
667  *
668  * No requirements.
669  */
670 int
671 vm_map_entry_kreserve(int count)
672 {
673 	struct globaldata *gd = mycpu;
674 
675 	crit_enter();
676 	gd->gd_vme_avail -= count;
677 	crit_exit();
678 	KASSERT(gd->gd_vme_base != NULL,
679 		("no reserved entries left, gd_vme_avail = %d\n",
680 		gd->gd_vme_avail));
681 	return(count);
682 }
683 
684 /*
685  * Release previously reserved map entries for kernel_map.  We do not
686  * attempt to clean up like the normal release function as this would
687  * cause an unnecessary (but probably not fatal) deep procedure call.
688  *
689  * No requirements.
690  */
691 void
692 vm_map_entry_krelease(int count)
693 {
694 	struct globaldata *gd = mycpu;
695 
696 	crit_enter();
697 	gd->gd_vme_avail += count;
698 	crit_exit();
699 }
700 
701 /*
702  * Allocates a VM map entry for insertion.  No entry fields are filled in.
703  *
704  * The entries should have previously been reserved.  The reservation count
705  * is tracked in (*countp).
706  *
707  * No requirements.
708  */
709 static vm_map_entry_t
710 vm_map_entry_create(vm_map_t map, int *countp)
711 {
712 	struct globaldata *gd = mycpu;
713 	vm_map_entry_t entry;
714 
715 	KKASSERT(*countp > 0);
716 	--*countp;
717 	crit_enter();
718 	entry = gd->gd_vme_base;
719 	KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
720 	gd->gd_vme_base = entry->next;
721 	crit_exit();
722 
723 	return(entry);
724 }
725 
726 /*
727  * Dispose of a vm_map_entry that is no longer being referenced.
728  *
729  * No requirements.
730  */
731 static void
732 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
733 {
734 	struct globaldata *gd = mycpu;
735 
736 	KKASSERT(map->hint != entry);
737 	KKASSERT(map->first_free != entry);
738 
739 	++*countp;
740 	crit_enter();
741 	entry->next = gd->gd_vme_base;
742 	gd->gd_vme_base = entry;
743 	crit_exit();
744 }
745 
746 
747 /*
748  * Insert/remove entries from maps.
749  *
750  * The related map must be exclusively locked.
751  * The caller must hold map->token
752  * No other requirements.
753  */
754 static __inline void
755 vm_map_entry_link(vm_map_t map,
756 		  vm_map_entry_t after_where,
757 		  vm_map_entry_t entry)
758 {
759 	ASSERT_VM_MAP_LOCKED(map);
760 
761 	map->nentries++;
762 	entry->prev = after_where;
763 	entry->next = after_where->next;
764 	entry->next->prev = entry;
765 	after_where->next = entry;
766 	if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
767 		panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
768 }
769 
770 static __inline void
771 vm_map_entry_unlink(vm_map_t map,
772 		    vm_map_entry_t entry)
773 {
774 	vm_map_entry_t prev;
775 	vm_map_entry_t next;
776 
777 	ASSERT_VM_MAP_LOCKED(map);
778 
779 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
780 		panic("vm_map_entry_unlink: attempt to mess with "
781 		      "locked entry! %p", entry);
782 	}
783 	prev = entry->prev;
784 	next = entry->next;
785 	next->prev = prev;
786 	prev->next = next;
787 	vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
788 	map->nentries--;
789 }
790 
791 /*
792  * Finds the map entry containing (or immediately preceding) the specified
793  * address in the given map.  The entry is returned in (*entry).
794  *
795  * The boolean result indicates whether the address is actually contained
796  * in the map.
797  *
798  * The related map must be locked.
799  * No other requirements.
800  */
801 boolean_t
802 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
803 {
804 	vm_map_entry_t tmp;
805 	vm_map_entry_t last;
806 
807 	ASSERT_VM_MAP_LOCKED(map);
808 #if 0
809 	/*
810 	 * XXX TEMPORARILY DISABLED.  For some reason our attempt to revive
811 	 * the hint code with the red-black lookup meets with system crashes
812 	 * and lockups.  We do not yet know why.
813 	 *
814 	 * It is possible that the problem is related to the setting
815 	 * of the hint during map_entry deletion, in the code specified
816 	 * at the GGG comment later on in this file.
817 	 */
818 	/*
819 	 * Quickly check the cached hint, there's a good chance of a match.
820 	 */
821 	if (map->hint != &map->header) {
822 		tmp = map->hint;
823 		if (address >= tmp->start && address < tmp->end) {
824 			*entry = tmp;
825 			return(TRUE);
826 		}
827 	}
828 #endif
829 
830 	/*
831 	 * Locate the record from the top of the tree.  'last' tracks the
832 	 * closest prior record and is returned if no match is found, which
833 	 * in binary tree terms means tracking the most recent right-branch
834 	 * taken.  If there is no prior record, &map->header is returned.
835 	 */
836 	last = &map->header;
837 	tmp = RB_ROOT(&map->rb_root);
838 
839 	while (tmp) {
840 		if (address >= tmp->start) {
841 			if (address < tmp->end) {
842 				*entry = tmp;
843 				map->hint = tmp;
844 				return(TRUE);
845 			}
846 			last = tmp;
847 			tmp = RB_RIGHT(tmp, rb_entry);
848 		} else {
849 			tmp = RB_LEFT(tmp, rb_entry);
850 		}
851 	}
852 	*entry = last;
853 	return (FALSE);
854 }
855 
856 /*
857  * Inserts the given whole VM object into the target map at the specified
858  * address range.  The object's size should match that of the address range.
859  *
860  * The map must be exclusively locked.
861  * The object must be held.
862  * The caller must have reserved sufficient vm_map_entry structures.
863  *
864  * If object is non-NULL, ref count must be bumped by caller prior to
865  * making call to account for the new entry.
866  */
867 int
868 vm_map_insert(vm_map_t map, int *countp,
869 	      vm_object_t object, vm_ooffset_t offset,
870 	      vm_offset_t start, vm_offset_t end,
871 	      vm_maptype_t maptype,
872 	      vm_prot_t prot, vm_prot_t max,
873 	      int cow)
874 {
875 	vm_map_entry_t new_entry;
876 	vm_map_entry_t prev_entry;
877 	vm_map_entry_t temp_entry;
878 	vm_eflags_t protoeflags;
879 	int must_drop = 0;
880 
881 	ASSERT_VM_MAP_LOCKED(map);
882 	if (object)
883 		ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
884 
885 	/*
886 	 * Check that the start and end points are not bogus.
887 	 */
888 	if ((start < map->min_offset) || (end > map->max_offset) ||
889 	    (start >= end))
890 		return (KERN_INVALID_ADDRESS);
891 
892 	/*
893 	 * Find the entry prior to the proposed starting address; if it's part
894 	 * of an existing entry, this range is bogus.
895 	 */
896 	if (vm_map_lookup_entry(map, start, &temp_entry))
897 		return (KERN_NO_SPACE);
898 
899 	prev_entry = temp_entry;
900 
901 	/*
902 	 * Assert that the next entry doesn't overlap the end point.
903 	 */
904 
905 	if ((prev_entry->next != &map->header) &&
906 	    (prev_entry->next->start < end))
907 		return (KERN_NO_SPACE);
908 
909 	protoeflags = 0;
910 
911 	if (cow & MAP_COPY_ON_WRITE)
912 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
913 
914 	if (cow & MAP_NOFAULT) {
915 		protoeflags |= MAP_ENTRY_NOFAULT;
916 
917 		KASSERT(object == NULL,
918 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
919 	}
920 	if (cow & MAP_DISABLE_SYNCER)
921 		protoeflags |= MAP_ENTRY_NOSYNC;
922 	if (cow & MAP_DISABLE_COREDUMP)
923 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
924 	if (cow & MAP_IS_STACK)
925 		protoeflags |= MAP_ENTRY_STACK;
926 	if (cow & MAP_IS_KSTACK)
927 		protoeflags |= MAP_ENTRY_KSTACK;
928 
929 	lwkt_gettoken(&map->token);
930 
931 	if (object) {
932 		/*
933 		 * When object is non-NULL, it could be shared with another
934 		 * process.  We have to set or clear OBJ_ONEMAPPING
935 		 * appropriately.
936 		 */
937 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
938 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
939 		}
940 	}
941 	else if ((prev_entry != &map->header) &&
942 		 (prev_entry->eflags == protoeflags) &&
943 		 (prev_entry->end == start) &&
944 		 (prev_entry->wired_count == 0) &&
945 		 prev_entry->maptype == maptype &&
946 		 ((prev_entry->object.vm_object == NULL) ||
947 		  vm_object_coalesce(prev_entry->object.vm_object,
948 				     OFF_TO_IDX(prev_entry->offset),
949 				     (vm_size_t)(prev_entry->end - prev_entry->start),
950 				     (vm_size_t)(end - prev_entry->end)))) {
951 		/*
952 		 * We were able to extend the object.  Determine if we
953 		 * can extend the previous map entry to include the
954 		 * new range as well.
955 		 */
956 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
957 		    (prev_entry->protection == prot) &&
958 		    (prev_entry->max_protection == max)) {
959 			map->size += (end - prev_entry->end);
960 			prev_entry->end = end;
961 			vm_map_simplify_entry(map, prev_entry, countp);
962 			lwkt_reltoken(&map->token);
963 			return (KERN_SUCCESS);
964 		}
965 
966 		/*
967 		 * If we can extend the object but cannot extend the
968 		 * map entry, we have to create a new map entry.  We
969 		 * must bump the ref count on the extended object to
970 		 * account for it.  object may be NULL.
971 		 */
972 		object = prev_entry->object.vm_object;
973 		offset = prev_entry->offset +
974 			(prev_entry->end - prev_entry->start);
975 		if (object) {
976 			vm_object_hold(object);
977 			vm_object_chain_wait(object);
978 			vm_object_reference_locked(object);
979 			must_drop = 1;
980 		}
981 	}
982 
983 	/*
984 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
985 	 * in things like the buffer map where we manage kva but do not manage
986 	 * backing objects.
987 	 */
988 
989 	/*
990 	 * Create a new entry
991 	 */
992 
993 	new_entry = vm_map_entry_create(map, countp);
994 	new_entry->start = start;
995 	new_entry->end = end;
996 
997 	new_entry->maptype = maptype;
998 	new_entry->eflags = protoeflags;
999 	new_entry->object.vm_object = object;
1000 	new_entry->offset = offset;
1001 	new_entry->aux.master_pde = 0;
1002 
1003 	new_entry->inheritance = VM_INHERIT_DEFAULT;
1004 	new_entry->protection = prot;
1005 	new_entry->max_protection = max;
1006 	new_entry->wired_count = 0;
1007 
1008 	/*
1009 	 * Insert the new entry into the list
1010 	 */
1011 
1012 	vm_map_entry_link(map, prev_entry, new_entry);
1013 	map->size += new_entry->end - new_entry->start;
1014 
1015 	/*
1016 	 * Update the free space hint.  Entries cannot overlap.
1017 	 * An exact comparison is needed to avoid matching
1018 	 * against the map->header.
1019 	 */
1020 	if ((map->first_free == prev_entry) &&
1021 	    (prev_entry->end == new_entry->start)) {
1022 		map->first_free = new_entry;
1023 	}
1024 
1025 #if 0
1026 	/*
1027 	 * Temporarily removed to avoid MAP_STACK panic, due to
1028 	 * MAP_STACK being a huge hack.  Will be added back in
1029 	 * when MAP_STACK (and the user stack mapping) is fixed.
1030 	 */
1031 	/*
1032 	 * It may be possible to simplify the entry
1033 	 */
1034 	vm_map_simplify_entry(map, new_entry, countp);
1035 #endif
1036 
1037 	/*
1038 	 * Try to pre-populate the page table.  Mappings governed by virtual
1039 	 * page tables cannot be prepopulated without a lot of work, so
1040 	 * don't try.
1041 	 */
1042 	if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1043 	    maptype != VM_MAPTYPE_VPAGETABLE) {
1044 		pmap_object_init_pt(map->pmap, start, prot,
1045 				    object, OFF_TO_IDX(offset), end - start,
1046 				    cow & MAP_PREFAULT_PARTIAL);
1047 	}
1048 	if (must_drop)
1049 		vm_object_drop(object);
1050 
1051 	lwkt_reltoken(&map->token);
1052 	return (KERN_SUCCESS);
1053 }
1054 
1055 /*
1056  * Find sufficient space for `length' bytes in the given map, starting at
1057  * `start'.  Returns 0 on success, 1 on no space.
1058  *
1059  * This function will returned an arbitrarily aligned pointer.  If no
1060  * particular alignment is required you should pass align as 1.  Note that
1061  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1062  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1063  * argument.
1064  *
1065  * 'align' should be a power of 2 but is not required to be.
1066  *
1067  * The map must be exclusively locked.
1068  * No other requirements.
1069  */
1070 int
1071 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1072 		 vm_size_t align, int flags, vm_offset_t *addr)
1073 {
1074 	vm_map_entry_t entry, next;
1075 	vm_offset_t end;
1076 	vm_offset_t align_mask;
1077 
1078 	if (start < map->min_offset)
1079 		start = map->min_offset;
1080 	if (start > map->max_offset)
1081 		return (1);
1082 
1083 	/*
1084 	 * If the alignment is not a power of 2 we will have to use
1085 	 * a mod/division, set align_mask to a special value.
1086 	 */
1087 	if ((align | (align - 1)) + 1 != (align << 1))
1088 		align_mask = (vm_offset_t)-1;
1089 	else
1090 		align_mask = align - 1;
1091 
1092 	/*
1093 	 * Look for the first possible address; if there's already something
1094 	 * at this address, we have to start after it.
1095 	 */
1096 	if (start == map->min_offset) {
1097 		if ((entry = map->first_free) != &map->header)
1098 			start = entry->end;
1099 	} else {
1100 		vm_map_entry_t tmp;
1101 
1102 		if (vm_map_lookup_entry(map, start, &tmp))
1103 			start = tmp->end;
1104 		entry = tmp;
1105 	}
1106 
1107 	/*
1108 	 * Look through the rest of the map, trying to fit a new region in the
1109 	 * gap between existing regions, or after the very last region.
1110 	 */
1111 	for (;; start = (entry = next)->end) {
1112 		/*
1113 		 * Adjust the proposed start by the requested alignment,
1114 		 * be sure that we didn't wrap the address.
1115 		 */
1116 		if (align_mask == (vm_offset_t)-1)
1117 			end = ((start + align - 1) / align) * align;
1118 		else
1119 			end = (start + align_mask) & ~align_mask;
1120 		if (end < start)
1121 			return (1);
1122 		start = end;
1123 		/*
1124 		 * Find the end of the proposed new region.  Be sure we didn't
1125 		 * go beyond the end of the map, or wrap around the address.
1126 		 * Then check to see if this is the last entry or if the
1127 		 * proposed end fits in the gap between this and the next
1128 		 * entry.
1129 		 */
1130 		end = start + length;
1131 		if (end > map->max_offset || end < start)
1132 			return (1);
1133 		next = entry->next;
1134 
1135 		/*
1136 		 * If the next entry's start address is beyond the desired
1137 		 * end address we may have found a good entry.
1138 		 *
1139 		 * If the next entry is a stack mapping we do not map into
1140 		 * the stack's reserved space.
1141 		 *
1142 		 * XXX continue to allow mapping into the stack's reserved
1143 		 * space if doing a MAP_STACK mapping inside a MAP_STACK
1144 		 * mapping, for backwards compatibility.  But the caller
1145 		 * really should use MAP_STACK | MAP_TRYFIXED if they
1146 		 * want to do that.
1147 		 */
1148 		if (next == &map->header)
1149 			break;
1150 		if (next->start >= end) {
1151 			if ((next->eflags & MAP_ENTRY_STACK) == 0)
1152 				break;
1153 			if (flags & MAP_STACK)
1154 				break;
1155 			if (next->start - next->aux.avail_ssize >= end)
1156 				break;
1157 		}
1158 	}
1159 	map->hint = entry;
1160 
1161 	/*
1162 	 * Grow the kernel_map if necessary.  pmap_growkernel() will panic
1163 	 * if it fails.  The kernel_map is locked and nothing can steal
1164 	 * our address space if pmap_growkernel() blocks.
1165 	 *
1166 	 * NOTE: This may be unconditionally called for kldload areas on
1167 	 *	 x86_64 because these do not bump kernel_vm_end (which would
1168 	 *	 fill 128G worth of page tables!).  Therefore we must not
1169 	 *	 retry.
1170 	 */
1171 	if (map == &kernel_map) {
1172 		vm_offset_t kstop;
1173 
1174 		kstop = round_page(start + length);
1175 		if (kstop > kernel_vm_end)
1176 			pmap_growkernel(start, kstop);
1177 	}
1178 	*addr = start;
1179 	return (0);
1180 }
1181 
1182 /*
1183  * vm_map_find finds an unallocated region in the target address map with
1184  * the given length and allocates it.  The search is defined to be first-fit
1185  * from the specified address; the region found is returned in the same
1186  * parameter.
1187  *
1188  * If object is non-NULL, ref count must be bumped by caller
1189  * prior to making call to account for the new entry.
1190  *
1191  * No requirements.  This function will lock the map temporarily.
1192  */
1193 int
1194 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1195 	    vm_offset_t *addr,	vm_size_t length, vm_size_t align,
1196 	    boolean_t fitit,
1197 	    vm_maptype_t maptype,
1198 	    vm_prot_t prot, vm_prot_t max,
1199 	    int cow)
1200 {
1201 	vm_offset_t start;
1202 	int result;
1203 	int count;
1204 
1205 	start = *addr;
1206 
1207 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1208 	vm_map_lock(map);
1209 	if (object)
1210 		vm_object_hold(object);
1211 	if (fitit) {
1212 		if (vm_map_findspace(map, start, length, align, 0, addr)) {
1213 			vm_map_unlock(map);
1214 			vm_map_entry_release(count);
1215 			return (KERN_NO_SPACE);
1216 		}
1217 		start = *addr;
1218 	}
1219 	result = vm_map_insert(map, &count, object, offset,
1220 			       start, start + length,
1221 			       maptype,
1222 			       prot, max,
1223 			       cow);
1224 	if (object)
1225 		vm_object_drop(object);
1226 	vm_map_unlock(map);
1227 	vm_map_entry_release(count);
1228 
1229 	return (result);
1230 }
1231 
1232 /*
1233  * Simplify the given map entry by merging with either neighbor.  This
1234  * routine also has the ability to merge with both neighbors.
1235  *
1236  * This routine guarentees that the passed entry remains valid (though
1237  * possibly extended).  When merging, this routine may delete one or
1238  * both neighbors.  No action is taken on entries which have their
1239  * in-transition flag set.
1240  *
1241  * The map must be exclusively locked.
1242  */
1243 void
1244 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1245 {
1246 	vm_map_entry_t next, prev;
1247 	vm_size_t prevsize, esize;
1248 
1249 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1250 		++mycpu->gd_cnt.v_intrans_coll;
1251 		return;
1252 	}
1253 
1254 	if (entry->maptype == VM_MAPTYPE_SUBMAP)
1255 		return;
1256 
1257 	prev = entry->prev;
1258 	if (prev != &map->header) {
1259 		prevsize = prev->end - prev->start;
1260 		if ( (prev->end == entry->start) &&
1261 		     (prev->maptype == entry->maptype) &&
1262 		     (prev->object.vm_object == entry->object.vm_object) &&
1263 		     (!prev->object.vm_object ||
1264 			(prev->offset + prevsize == entry->offset)) &&
1265 		     (prev->eflags == entry->eflags) &&
1266 		     (prev->protection == entry->protection) &&
1267 		     (prev->max_protection == entry->max_protection) &&
1268 		     (prev->inheritance == entry->inheritance) &&
1269 		     (prev->wired_count == entry->wired_count)) {
1270 			if (map->first_free == prev)
1271 				map->first_free = entry;
1272 			if (map->hint == prev)
1273 				map->hint = entry;
1274 			vm_map_entry_unlink(map, prev);
1275 			entry->start = prev->start;
1276 			entry->offset = prev->offset;
1277 			if (prev->object.vm_object)
1278 				vm_object_deallocate(prev->object.vm_object);
1279 			vm_map_entry_dispose(map, prev, countp);
1280 		}
1281 	}
1282 
1283 	next = entry->next;
1284 	if (next != &map->header) {
1285 		esize = entry->end - entry->start;
1286 		if ((entry->end == next->start) &&
1287 		    (next->maptype == entry->maptype) &&
1288 		    (next->object.vm_object == entry->object.vm_object) &&
1289 		     (!entry->object.vm_object ||
1290 			(entry->offset + esize == next->offset)) &&
1291 		    (next->eflags == entry->eflags) &&
1292 		    (next->protection == entry->protection) &&
1293 		    (next->max_protection == entry->max_protection) &&
1294 		    (next->inheritance == entry->inheritance) &&
1295 		    (next->wired_count == entry->wired_count)) {
1296 			if (map->first_free == next)
1297 				map->first_free = entry;
1298 			if (map->hint == next)
1299 				map->hint = entry;
1300 			vm_map_entry_unlink(map, next);
1301 			entry->end = next->end;
1302 			if (next->object.vm_object)
1303 				vm_object_deallocate(next->object.vm_object);
1304 			vm_map_entry_dispose(map, next, countp);
1305 	        }
1306 	}
1307 }
1308 
1309 /*
1310  * Asserts that the given entry begins at or after the specified address.
1311  * If necessary, it splits the entry into two.
1312  */
1313 #define vm_map_clip_start(map, entry, startaddr, countp)		\
1314 {									\
1315 	if (startaddr > entry->start)					\
1316 		_vm_map_clip_start(map, entry, startaddr, countp);	\
1317 }
1318 
1319 /*
1320  * This routine is called only when it is known that the entry must be split.
1321  *
1322  * The map must be exclusively locked.
1323  */
1324 static void
1325 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1326 		   int *countp)
1327 {
1328 	vm_map_entry_t new_entry;
1329 
1330 	/*
1331 	 * Split off the front portion -- note that we must insert the new
1332 	 * entry BEFORE this one, so that this entry has the specified
1333 	 * starting address.
1334 	 */
1335 
1336 	vm_map_simplify_entry(map, entry, countp);
1337 
1338 	/*
1339 	 * If there is no object backing this entry, we might as well create
1340 	 * one now.  If we defer it, an object can get created after the map
1341 	 * is clipped, and individual objects will be created for the split-up
1342 	 * map.  This is a bit of a hack, but is also about the best place to
1343 	 * put this improvement.
1344 	 */
1345 	if (entry->object.vm_object == NULL && !map->system_map) {
1346 		vm_map_entry_allocate_object(entry);
1347 	}
1348 
1349 	new_entry = vm_map_entry_create(map, countp);
1350 	*new_entry = *entry;
1351 
1352 	new_entry->end = start;
1353 	entry->offset += (start - entry->start);
1354 	entry->start = start;
1355 
1356 	vm_map_entry_link(map, entry->prev, new_entry);
1357 
1358 	switch(entry->maptype) {
1359 	case VM_MAPTYPE_NORMAL:
1360 	case VM_MAPTYPE_VPAGETABLE:
1361 		if (new_entry->object.vm_object) {
1362 			vm_object_hold(new_entry->object.vm_object);
1363 			vm_object_chain_wait(new_entry->object.vm_object);
1364 			vm_object_reference_locked(new_entry->object.vm_object);
1365 			vm_object_drop(new_entry->object.vm_object);
1366 		}
1367 		break;
1368 	default:
1369 		break;
1370 	}
1371 }
1372 
1373 /*
1374  * Asserts that the given entry ends at or before the specified address.
1375  * If necessary, it splits the entry into two.
1376  *
1377  * The map must be exclusively locked.
1378  */
1379 #define vm_map_clip_end(map, entry, endaddr, countp)		\
1380 {								\
1381 	if (endaddr < entry->end)				\
1382 		_vm_map_clip_end(map, entry, endaddr, countp);	\
1383 }
1384 
1385 /*
1386  * This routine is called only when it is known that the entry must be split.
1387  *
1388  * The map must be exclusively locked.
1389  */
1390 static void
1391 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1392 		 int *countp)
1393 {
1394 	vm_map_entry_t new_entry;
1395 
1396 	/*
1397 	 * If there is no object backing this entry, we might as well create
1398 	 * one now.  If we defer it, an object can get created after the map
1399 	 * is clipped, and individual objects will be created for the split-up
1400 	 * map.  This is a bit of a hack, but is also about the best place to
1401 	 * put this improvement.
1402 	 */
1403 
1404 	if (entry->object.vm_object == NULL && !map->system_map) {
1405 		vm_map_entry_allocate_object(entry);
1406 	}
1407 
1408 	/*
1409 	 * Create a new entry and insert it AFTER the specified entry
1410 	 */
1411 
1412 	new_entry = vm_map_entry_create(map, countp);
1413 	*new_entry = *entry;
1414 
1415 	new_entry->start = entry->end = end;
1416 	new_entry->offset += (end - entry->start);
1417 
1418 	vm_map_entry_link(map, entry, new_entry);
1419 
1420 	switch(entry->maptype) {
1421 	case VM_MAPTYPE_NORMAL:
1422 	case VM_MAPTYPE_VPAGETABLE:
1423 		if (new_entry->object.vm_object) {
1424 			vm_object_hold(new_entry->object.vm_object);
1425 			vm_object_chain_wait(new_entry->object.vm_object);
1426 			vm_object_reference_locked(new_entry->object.vm_object);
1427 			vm_object_drop(new_entry->object.vm_object);
1428 		}
1429 		break;
1430 	default:
1431 		break;
1432 	}
1433 }
1434 
1435 /*
1436  * Asserts that the starting and ending region addresses fall within the
1437  * valid range for the map.
1438  */
1439 #define	VM_MAP_RANGE_CHECK(map, start, end)	\
1440 {						\
1441 	if (start < vm_map_min(map))		\
1442 		start = vm_map_min(map);	\
1443 	if (end > vm_map_max(map))		\
1444 		end = vm_map_max(map);		\
1445 	if (start > end)			\
1446 		start = end;			\
1447 }
1448 
1449 /*
1450  * Used to block when an in-transition collison occurs.  The map
1451  * is unlocked for the sleep and relocked before the return.
1452  */
1453 void
1454 vm_map_transition_wait(vm_map_t map)
1455 {
1456 	tsleep_interlock(map, 0);
1457 	vm_map_unlock(map);
1458 	tsleep(map, PINTERLOCKED, "vment", 0);
1459 	vm_map_lock(map);
1460 }
1461 
1462 /*
1463  * When we do blocking operations with the map lock held it is
1464  * possible that a clip might have occured on our in-transit entry,
1465  * requiring an adjustment to the entry in our loop.  These macros
1466  * help the pageable and clip_range code deal with the case.  The
1467  * conditional costs virtually nothing if no clipping has occured.
1468  */
1469 
1470 #define CLIP_CHECK_BACK(entry, save_start)		\
1471     do {						\
1472 	    while (entry->start != save_start) {	\
1473 		    entry = entry->prev;		\
1474 		    KASSERT(entry != &map->header, ("bad entry clip")); \
1475 	    }						\
1476     } while(0)
1477 
1478 #define CLIP_CHECK_FWD(entry, save_end)			\
1479     do {						\
1480 	    while (entry->end != save_end) {		\
1481 		    entry = entry->next;		\
1482 		    KASSERT(entry != &map->header, ("bad entry clip")); \
1483 	    }						\
1484     } while(0)
1485 
1486 
1487 /*
1488  * Clip the specified range and return the base entry.  The
1489  * range may cover several entries starting at the returned base
1490  * and the first and last entry in the covering sequence will be
1491  * properly clipped to the requested start and end address.
1492  *
1493  * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1494  * flag.
1495  *
1496  * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1497  * covered by the requested range.
1498  *
1499  * The map must be exclusively locked on entry and will remain locked
1500  * on return. If no range exists or the range contains holes and you
1501  * specified that no holes were allowed, NULL will be returned.  This
1502  * routine may temporarily unlock the map in order avoid a deadlock when
1503  * sleeping.
1504  */
1505 static
1506 vm_map_entry_t
1507 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end,
1508 		  int *countp, int flags)
1509 {
1510 	vm_map_entry_t start_entry;
1511 	vm_map_entry_t entry;
1512 
1513 	/*
1514 	 * Locate the entry and effect initial clipping.  The in-transition
1515 	 * case does not occur very often so do not try to optimize it.
1516 	 */
1517 again:
1518 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1519 		return (NULL);
1520 	entry = start_entry;
1521 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1522 		entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1523 		++mycpu->gd_cnt.v_intrans_coll;
1524 		++mycpu->gd_cnt.v_intrans_wait;
1525 		vm_map_transition_wait(map);
1526 		/*
1527 		 * entry and/or start_entry may have been clipped while
1528 		 * we slept, or may have gone away entirely.  We have
1529 		 * to restart from the lookup.
1530 		 */
1531 		goto again;
1532 	}
1533 
1534 	/*
1535 	 * Since we hold an exclusive map lock we do not have to restart
1536 	 * after clipping, even though clipping may block in zalloc.
1537 	 */
1538 	vm_map_clip_start(map, entry, start, countp);
1539 	vm_map_clip_end(map, entry, end, countp);
1540 	entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1541 
1542 	/*
1543 	 * Scan entries covered by the range.  When working on the next
1544 	 * entry a restart need only re-loop on the current entry which
1545 	 * we have already locked, since 'next' may have changed.  Also,
1546 	 * even though entry is safe, it may have been clipped so we
1547 	 * have to iterate forwards through the clip after sleeping.
1548 	 */
1549 	while (entry->next != &map->header && entry->next->start < end) {
1550 		vm_map_entry_t next = entry->next;
1551 
1552 		if (flags & MAP_CLIP_NO_HOLES) {
1553 			if (next->start > entry->end) {
1554 				vm_map_unclip_range(map, start_entry,
1555 					start, entry->end, countp, flags);
1556 				return(NULL);
1557 			}
1558 		}
1559 
1560 		if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1561 			vm_offset_t save_end = entry->end;
1562 			next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1563 			++mycpu->gd_cnt.v_intrans_coll;
1564 			++mycpu->gd_cnt.v_intrans_wait;
1565 			vm_map_transition_wait(map);
1566 
1567 			/*
1568 			 * clips might have occured while we blocked.
1569 			 */
1570 			CLIP_CHECK_FWD(entry, save_end);
1571 			CLIP_CHECK_BACK(start_entry, start);
1572 			continue;
1573 		}
1574 		/*
1575 		 * No restart necessary even though clip_end may block, we
1576 		 * are holding the map lock.
1577 		 */
1578 		vm_map_clip_end(map, next, end, countp);
1579 		next->eflags |= MAP_ENTRY_IN_TRANSITION;
1580 		entry = next;
1581 	}
1582 	if (flags & MAP_CLIP_NO_HOLES) {
1583 		if (entry->end != end) {
1584 			vm_map_unclip_range(map, start_entry,
1585 				start, entry->end, countp, flags);
1586 			return(NULL);
1587 		}
1588 	}
1589 	return(start_entry);
1590 }
1591 
1592 /*
1593  * Undo the effect of vm_map_clip_range().  You should pass the same
1594  * flags and the same range that you passed to vm_map_clip_range().
1595  * This code will clear the in-transition flag on the entries and
1596  * wake up anyone waiting.  This code will also simplify the sequence
1597  * and attempt to merge it with entries before and after the sequence.
1598  *
1599  * The map must be locked on entry and will remain locked on return.
1600  *
1601  * Note that you should also pass the start_entry returned by
1602  * vm_map_clip_range().  However, if you block between the two calls
1603  * with the map unlocked please be aware that the start_entry may
1604  * have been clipped and you may need to scan it backwards to find
1605  * the entry corresponding with the original start address.  You are
1606  * responsible for this, vm_map_unclip_range() expects the correct
1607  * start_entry to be passed to it and will KASSERT otherwise.
1608  */
1609 static
1610 void
1611 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
1612 		    vm_offset_t start, vm_offset_t end,
1613 		    int *countp, int flags)
1614 {
1615 	vm_map_entry_t entry;
1616 
1617 	entry = start_entry;
1618 
1619 	KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1620 	while (entry != &map->header && entry->start < end) {
1621 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1622 			("in-transition flag not set during unclip on: %p",
1623 			entry));
1624 		KASSERT(entry->end <= end,
1625 			("unclip_range: tail wasn't clipped"));
1626 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1627 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1628 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1629 			wakeup(map);
1630 		}
1631 		entry = entry->next;
1632 	}
1633 
1634 	/*
1635 	 * Simplification does not block so there is no restart case.
1636 	 */
1637 	entry = start_entry;
1638 	while (entry != &map->header && entry->start < end) {
1639 		vm_map_simplify_entry(map, entry, countp);
1640 		entry = entry->next;
1641 	}
1642 }
1643 
1644 /*
1645  * Mark the given range as handled by a subordinate map.
1646  *
1647  * This range must have been created with vm_map_find(), and no other
1648  * operations may have been performed on this range prior to calling
1649  * vm_map_submap().
1650  *
1651  * Submappings cannot be removed.
1652  *
1653  * No requirements.
1654  */
1655 int
1656 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1657 {
1658 	vm_map_entry_t entry;
1659 	int result = KERN_INVALID_ARGUMENT;
1660 	int count;
1661 
1662 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1663 	vm_map_lock(map);
1664 
1665 	VM_MAP_RANGE_CHECK(map, start, end);
1666 
1667 	if (vm_map_lookup_entry(map, start, &entry)) {
1668 		vm_map_clip_start(map, entry, start, &count);
1669 	} else {
1670 		entry = entry->next;
1671 	}
1672 
1673 	vm_map_clip_end(map, entry, end, &count);
1674 
1675 	if ((entry->start == start) && (entry->end == end) &&
1676 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1677 	    (entry->object.vm_object == NULL)) {
1678 		entry->object.sub_map = submap;
1679 		entry->maptype = VM_MAPTYPE_SUBMAP;
1680 		result = KERN_SUCCESS;
1681 	}
1682 	vm_map_unlock(map);
1683 	vm_map_entry_release(count);
1684 
1685 	return (result);
1686 }
1687 
1688 /*
1689  * Sets the protection of the specified address region in the target map.
1690  * If "set_max" is specified, the maximum protection is to be set;
1691  * otherwise, only the current protection is affected.
1692  *
1693  * The protection is not applicable to submaps, but is applicable to normal
1694  * maps and maps governed by virtual page tables.  For example, when operating
1695  * on a virtual page table our protection basically controls how COW occurs
1696  * on the backing object, whereas the virtual page table abstraction itself
1697  * is an abstraction for userland.
1698  *
1699  * No requirements.
1700  */
1701 int
1702 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1703 	       vm_prot_t new_prot, boolean_t set_max)
1704 {
1705 	vm_map_entry_t current;
1706 	vm_map_entry_t entry;
1707 	int count;
1708 
1709 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1710 	vm_map_lock(map);
1711 
1712 	VM_MAP_RANGE_CHECK(map, start, end);
1713 
1714 	if (vm_map_lookup_entry(map, start, &entry)) {
1715 		vm_map_clip_start(map, entry, start, &count);
1716 	} else {
1717 		entry = entry->next;
1718 	}
1719 
1720 	/*
1721 	 * Make a first pass to check for protection violations.
1722 	 */
1723 	current = entry;
1724 	while ((current != &map->header) && (current->start < end)) {
1725 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
1726 			vm_map_unlock(map);
1727 			vm_map_entry_release(count);
1728 			return (KERN_INVALID_ARGUMENT);
1729 		}
1730 		if ((new_prot & current->max_protection) != new_prot) {
1731 			vm_map_unlock(map);
1732 			vm_map_entry_release(count);
1733 			return (KERN_PROTECTION_FAILURE);
1734 		}
1735 		current = current->next;
1736 	}
1737 
1738 	/*
1739 	 * Go back and fix up protections. [Note that clipping is not
1740 	 * necessary the second time.]
1741 	 */
1742 	current = entry;
1743 
1744 	while ((current != &map->header) && (current->start < end)) {
1745 		vm_prot_t old_prot;
1746 
1747 		vm_map_clip_end(map, current, end, &count);
1748 
1749 		old_prot = current->protection;
1750 		if (set_max) {
1751 			current->protection =
1752 			    (current->max_protection = new_prot) &
1753 			    old_prot;
1754 		} else {
1755 			current->protection = new_prot;
1756 		}
1757 
1758 		/*
1759 		 * Update physical map if necessary. Worry about copy-on-write
1760 		 * here -- CHECK THIS XXX
1761 		 */
1762 
1763 		if (current->protection != old_prot) {
1764 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1765 							VM_PROT_ALL)
1766 
1767 			pmap_protect(map->pmap, current->start,
1768 			    current->end,
1769 			    current->protection & MASK(current));
1770 #undef	MASK
1771 		}
1772 
1773 		vm_map_simplify_entry(map, current, &count);
1774 
1775 		current = current->next;
1776 	}
1777 
1778 	vm_map_unlock(map);
1779 	vm_map_entry_release(count);
1780 	return (KERN_SUCCESS);
1781 }
1782 
1783 /*
1784  * This routine traverses a processes map handling the madvise
1785  * system call.  Advisories are classified as either those effecting
1786  * the vm_map_entry structure, or those effecting the underlying
1787  * objects.
1788  *
1789  * The <value> argument is used for extended madvise calls.
1790  *
1791  * No requirements.
1792  */
1793 int
1794 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1795 	       int behav, off_t value)
1796 {
1797 	vm_map_entry_t current, entry;
1798 	int modify_map = 0;
1799 	int error = 0;
1800 	int count;
1801 
1802 	/*
1803 	 * Some madvise calls directly modify the vm_map_entry, in which case
1804 	 * we need to use an exclusive lock on the map and we need to perform
1805 	 * various clipping operations.  Otherwise we only need a read-lock
1806 	 * on the map.
1807 	 */
1808 
1809 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1810 
1811 	switch(behav) {
1812 	case MADV_NORMAL:
1813 	case MADV_SEQUENTIAL:
1814 	case MADV_RANDOM:
1815 	case MADV_NOSYNC:
1816 	case MADV_AUTOSYNC:
1817 	case MADV_NOCORE:
1818 	case MADV_CORE:
1819 	case MADV_SETMAP:
1820 	case MADV_INVAL:
1821 		modify_map = 1;
1822 		vm_map_lock(map);
1823 		break;
1824 	case MADV_WILLNEED:
1825 	case MADV_DONTNEED:
1826 	case MADV_FREE:
1827 		vm_map_lock_read(map);
1828 		break;
1829 	default:
1830 		vm_map_entry_release(count);
1831 		return (EINVAL);
1832 	}
1833 
1834 	/*
1835 	 * Locate starting entry and clip if necessary.
1836 	 */
1837 
1838 	VM_MAP_RANGE_CHECK(map, start, end);
1839 
1840 	if (vm_map_lookup_entry(map, start, &entry)) {
1841 		if (modify_map)
1842 			vm_map_clip_start(map, entry, start, &count);
1843 	} else {
1844 		entry = entry->next;
1845 	}
1846 
1847 	if (modify_map) {
1848 		/*
1849 		 * madvise behaviors that are implemented in the vm_map_entry.
1850 		 *
1851 		 * We clip the vm_map_entry so that behavioral changes are
1852 		 * limited to the specified address range.
1853 		 */
1854 		for (current = entry;
1855 		     (current != &map->header) && (current->start < end);
1856 		     current = current->next
1857 		) {
1858 			if (current->maptype == VM_MAPTYPE_SUBMAP)
1859 				continue;
1860 
1861 			vm_map_clip_end(map, current, end, &count);
1862 
1863 			switch (behav) {
1864 			case MADV_NORMAL:
1865 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1866 				break;
1867 			case MADV_SEQUENTIAL:
1868 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1869 				break;
1870 			case MADV_RANDOM:
1871 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1872 				break;
1873 			case MADV_NOSYNC:
1874 				current->eflags |= MAP_ENTRY_NOSYNC;
1875 				break;
1876 			case MADV_AUTOSYNC:
1877 				current->eflags &= ~MAP_ENTRY_NOSYNC;
1878 				break;
1879 			case MADV_NOCORE:
1880 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
1881 				break;
1882 			case MADV_CORE:
1883 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1884 				break;
1885 			case MADV_INVAL:
1886 				/*
1887 				 * Invalidate the related pmap entries, used
1888 				 * to flush portions of the real kernel's
1889 				 * pmap when the caller has removed or
1890 				 * modified existing mappings in a virtual
1891 				 * page table.
1892 				 */
1893 				pmap_remove(map->pmap,
1894 					    current->start, current->end);
1895 				break;
1896 			case MADV_SETMAP:
1897 				/*
1898 				 * Set the page directory page for a map
1899 				 * governed by a virtual page table.  Mark
1900 				 * the entry as being governed by a virtual
1901 				 * page table if it is not.
1902 				 *
1903 				 * XXX the page directory page is stored
1904 				 * in the avail_ssize field if the map_entry.
1905 				 *
1906 				 * XXX the map simplification code does not
1907 				 * compare this field so weird things may
1908 				 * happen if you do not apply this function
1909 				 * to the entire mapping governed by the
1910 				 * virtual page table.
1911 				 */
1912 				if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
1913 					error = EINVAL;
1914 					break;
1915 				}
1916 				current->aux.master_pde = value;
1917 				pmap_remove(map->pmap,
1918 					    current->start, current->end);
1919 				break;
1920 			default:
1921 				error = EINVAL;
1922 				break;
1923 			}
1924 			vm_map_simplify_entry(map, current, &count);
1925 		}
1926 		vm_map_unlock(map);
1927 	} else {
1928 		vm_pindex_t pindex;
1929 		int count;
1930 
1931 		/*
1932 		 * madvise behaviors that are implemented in the underlying
1933 		 * vm_object.
1934 		 *
1935 		 * Since we don't clip the vm_map_entry, we have to clip
1936 		 * the vm_object pindex and count.
1937 		 *
1938 		 * NOTE!  We currently do not support these functions on
1939 		 * virtual page tables.
1940 		 */
1941 		for (current = entry;
1942 		     (current != &map->header) && (current->start < end);
1943 		     current = current->next
1944 		) {
1945 			vm_offset_t useStart;
1946 
1947 			if (current->maptype != VM_MAPTYPE_NORMAL)
1948 				continue;
1949 
1950 			pindex = OFF_TO_IDX(current->offset);
1951 			count = atop(current->end - current->start);
1952 			useStart = current->start;
1953 
1954 			if (current->start < start) {
1955 				pindex += atop(start - current->start);
1956 				count -= atop(start - current->start);
1957 				useStart = start;
1958 			}
1959 			if (current->end > end)
1960 				count -= atop(current->end - end);
1961 
1962 			if (count <= 0)
1963 				continue;
1964 
1965 			vm_object_madvise(current->object.vm_object,
1966 					  pindex, count, behav);
1967 
1968 			/*
1969 			 * Try to populate the page table.  Mappings governed
1970 			 * by virtual page tables cannot be pre-populated
1971 			 * without a lot of work so don't try.
1972 			 */
1973 			if (behav == MADV_WILLNEED &&
1974 			    current->maptype != VM_MAPTYPE_VPAGETABLE) {
1975 				pmap_object_init_pt(
1976 				    map->pmap,
1977 				    useStart,
1978 				    current->protection,
1979 				    current->object.vm_object,
1980 				    pindex,
1981 				    (count << PAGE_SHIFT),
1982 				    MAP_PREFAULT_MADVISE
1983 				);
1984 			}
1985 		}
1986 		vm_map_unlock_read(map);
1987 	}
1988 	vm_map_entry_release(count);
1989 	return(error);
1990 }
1991 
1992 
1993 /*
1994  * Sets the inheritance of the specified address range in the target map.
1995  * Inheritance affects how the map will be shared with child maps at the
1996  * time of vm_map_fork.
1997  */
1998 int
1999 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2000 	       vm_inherit_t new_inheritance)
2001 {
2002 	vm_map_entry_t entry;
2003 	vm_map_entry_t temp_entry;
2004 	int count;
2005 
2006 	switch (new_inheritance) {
2007 	case VM_INHERIT_NONE:
2008 	case VM_INHERIT_COPY:
2009 	case VM_INHERIT_SHARE:
2010 		break;
2011 	default:
2012 		return (KERN_INVALID_ARGUMENT);
2013 	}
2014 
2015 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2016 	vm_map_lock(map);
2017 
2018 	VM_MAP_RANGE_CHECK(map, start, end);
2019 
2020 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2021 		entry = temp_entry;
2022 		vm_map_clip_start(map, entry, start, &count);
2023 	} else
2024 		entry = temp_entry->next;
2025 
2026 	while ((entry != &map->header) && (entry->start < end)) {
2027 		vm_map_clip_end(map, entry, end, &count);
2028 
2029 		entry->inheritance = new_inheritance;
2030 
2031 		vm_map_simplify_entry(map, entry, &count);
2032 
2033 		entry = entry->next;
2034 	}
2035 	vm_map_unlock(map);
2036 	vm_map_entry_release(count);
2037 	return (KERN_SUCCESS);
2038 }
2039 
2040 /*
2041  * Implement the semantics of mlock
2042  */
2043 int
2044 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2045 	      boolean_t new_pageable)
2046 {
2047 	vm_map_entry_t entry;
2048 	vm_map_entry_t start_entry;
2049 	vm_offset_t end;
2050 	int rv = KERN_SUCCESS;
2051 	int count;
2052 
2053 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2054 	vm_map_lock(map);
2055 	VM_MAP_RANGE_CHECK(map, start, real_end);
2056 	end = real_end;
2057 
2058 	start_entry = vm_map_clip_range(map, start, end, &count,
2059 					MAP_CLIP_NO_HOLES);
2060 	if (start_entry == NULL) {
2061 		vm_map_unlock(map);
2062 		vm_map_entry_release(count);
2063 		return (KERN_INVALID_ADDRESS);
2064 	}
2065 
2066 	if (new_pageable == 0) {
2067 		entry = start_entry;
2068 		while ((entry != &map->header) && (entry->start < end)) {
2069 			vm_offset_t save_start;
2070 			vm_offset_t save_end;
2071 
2072 			/*
2073 			 * Already user wired or hard wired (trivial cases)
2074 			 */
2075 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2076 				entry = entry->next;
2077 				continue;
2078 			}
2079 			if (entry->wired_count != 0) {
2080 				entry->wired_count++;
2081 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2082 				entry = entry->next;
2083 				continue;
2084 			}
2085 
2086 			/*
2087 			 * A new wiring requires instantiation of appropriate
2088 			 * management structures and the faulting in of the
2089 			 * page.
2090 			 */
2091 			if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2092 				int copyflag = entry->eflags &
2093 					       MAP_ENTRY_NEEDS_COPY;
2094 				if (copyflag && ((entry->protection &
2095 						  VM_PROT_WRITE) != 0)) {
2096 					vm_map_entry_shadow(entry, 0);
2097 				} else if (entry->object.vm_object == NULL &&
2098 					   !map->system_map) {
2099 					vm_map_entry_allocate_object(entry);
2100 				}
2101 			}
2102 			entry->wired_count++;
2103 			entry->eflags |= MAP_ENTRY_USER_WIRED;
2104 
2105 			/*
2106 			 * Now fault in the area.  Note that vm_fault_wire()
2107 			 * may release the map lock temporarily, it will be
2108 			 * relocked on return.  The in-transition
2109 			 * flag protects the entries.
2110 			 */
2111 			save_start = entry->start;
2112 			save_end = entry->end;
2113 			rv = vm_fault_wire(map, entry, TRUE);
2114 			if (rv) {
2115 				CLIP_CHECK_BACK(entry, save_start);
2116 				for (;;) {
2117 					KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2118 					entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2119 					entry->wired_count = 0;
2120 					if (entry->end == save_end)
2121 						break;
2122 					entry = entry->next;
2123 					KASSERT(entry != &map->header, ("bad entry clip during backout"));
2124 				}
2125 				end = save_start;	/* unwire the rest */
2126 				break;
2127 			}
2128 			/*
2129 			 * note that even though the entry might have been
2130 			 * clipped, the USER_WIRED flag we set prevents
2131 			 * duplication so we do not have to do a
2132 			 * clip check.
2133 			 */
2134 			entry = entry->next;
2135 		}
2136 
2137 		/*
2138 		 * If we failed fall through to the unwiring section to
2139 		 * unwire what we had wired so far.  'end' has already
2140 		 * been adjusted.
2141 		 */
2142 		if (rv)
2143 			new_pageable = 1;
2144 
2145 		/*
2146 		 * start_entry might have been clipped if we unlocked the
2147 		 * map and blocked.  No matter how clipped it has gotten
2148 		 * there should be a fragment that is on our start boundary.
2149 		 */
2150 		CLIP_CHECK_BACK(start_entry, start);
2151 	}
2152 
2153 	/*
2154 	 * Deal with the unwiring case.
2155 	 */
2156 	if (new_pageable) {
2157 		/*
2158 		 * This is the unwiring case.  We must first ensure that the
2159 		 * range to be unwired is really wired down.  We know there
2160 		 * are no holes.
2161 		 */
2162 		entry = start_entry;
2163 		while ((entry != &map->header) && (entry->start < end)) {
2164 			if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2165 				rv = KERN_INVALID_ARGUMENT;
2166 				goto done;
2167 			}
2168 			KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
2169 			entry = entry->next;
2170 		}
2171 
2172 		/*
2173 		 * Now decrement the wiring count for each region. If a region
2174 		 * becomes completely unwired, unwire its physical pages and
2175 		 * mappings.
2176 		 */
2177 		/*
2178 		 * The map entries are processed in a loop, checking to
2179 		 * make sure the entry is wired and asserting it has a wired
2180 		 * count. However, another loop was inserted more-or-less in
2181 		 * the middle of the unwiring path. This loop picks up the
2182 		 * "entry" loop variable from the first loop without first
2183 		 * setting it to start_entry. Naturally, the secound loop
2184 		 * is never entered and the pages backing the entries are
2185 		 * never unwired. This can lead to a leak of wired pages.
2186 		 */
2187 		entry = start_entry;
2188 		while ((entry != &map->header) && (entry->start < end)) {
2189 			KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2190 				("expected USER_WIRED on entry %p", entry));
2191 			entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2192 			entry->wired_count--;
2193 			if (entry->wired_count == 0)
2194 				vm_fault_unwire(map, entry);
2195 			entry = entry->next;
2196 		}
2197 	}
2198 done:
2199 	vm_map_unclip_range(map, start_entry, start, real_end, &count,
2200 		MAP_CLIP_NO_HOLES);
2201 	map->timestamp++;
2202 	vm_map_unlock(map);
2203 	vm_map_entry_release(count);
2204 	return (rv);
2205 }
2206 
2207 /*
2208  * Sets the pageability of the specified address range in the target map.
2209  * Regions specified as not pageable require locked-down physical
2210  * memory and physical page maps.
2211  *
2212  * The map must not be locked, but a reference must remain to the map
2213  * throughout the call.
2214  *
2215  * This function may be called via the zalloc path and must properly
2216  * reserve map entries for kernel_map.
2217  *
2218  * No requirements.
2219  */
2220 int
2221 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2222 {
2223 	vm_map_entry_t entry;
2224 	vm_map_entry_t start_entry;
2225 	vm_offset_t end;
2226 	int rv = KERN_SUCCESS;
2227 	int count;
2228 
2229 	if (kmflags & KM_KRESERVE)
2230 		count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2231 	else
2232 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2233 	vm_map_lock(map);
2234 	VM_MAP_RANGE_CHECK(map, start, real_end);
2235 	end = real_end;
2236 
2237 	start_entry = vm_map_clip_range(map, start, end, &count,
2238 					MAP_CLIP_NO_HOLES);
2239 	if (start_entry == NULL) {
2240 		vm_map_unlock(map);
2241 		rv = KERN_INVALID_ADDRESS;
2242 		goto failure;
2243 	}
2244 	if ((kmflags & KM_PAGEABLE) == 0) {
2245 		/*
2246 		 * Wiring.
2247 		 *
2248 		 * 1.  Holding the write lock, we create any shadow or zero-fill
2249 		 * objects that need to be created. Then we clip each map
2250 		 * entry to the region to be wired and increment its wiring
2251 		 * count.  We create objects before clipping the map entries
2252 		 * to avoid object proliferation.
2253 		 *
2254 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
2255 		 * fault in the pages for any newly wired area (wired_count is
2256 		 * 1).
2257 		 *
2258 		 * Downgrading to a read lock for vm_fault_wire avoids a
2259 		 * possible deadlock with another process that may have faulted
2260 		 * on one of the pages to be wired (it would mark the page busy,
2261 		 * blocking us, then in turn block on the map lock that we
2262 		 * hold).  Because of problems in the recursive lock package,
2263 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2264 		 * any actions that require the write lock must be done
2265 		 * beforehand.  Because we keep the read lock on the map, the
2266 		 * copy-on-write status of the entries we modify here cannot
2267 		 * change.
2268 		 */
2269 		entry = start_entry;
2270 		while ((entry != &map->header) && (entry->start < end)) {
2271 			/*
2272 			 * Trivial case if the entry is already wired
2273 			 */
2274 			if (entry->wired_count) {
2275 				entry->wired_count++;
2276 				entry = entry->next;
2277 				continue;
2278 			}
2279 
2280 			/*
2281 			 * The entry is being newly wired, we have to setup
2282 			 * appropriate management structures.  A shadow
2283 			 * object is required for a copy-on-write region,
2284 			 * or a normal object for a zero-fill region.  We
2285 			 * do not have to do this for entries that point to sub
2286 			 * maps because we won't hold the lock on the sub map.
2287 			 */
2288 			if (entry->maptype != VM_MAPTYPE_SUBMAP) {
2289 				int copyflag = entry->eflags &
2290 					       MAP_ENTRY_NEEDS_COPY;
2291 				if (copyflag && ((entry->protection &
2292 						  VM_PROT_WRITE) != 0)) {
2293 					vm_map_entry_shadow(entry, 0);
2294 				} else if (entry->object.vm_object == NULL &&
2295 					   !map->system_map) {
2296 					vm_map_entry_allocate_object(entry);
2297 				}
2298 			}
2299 
2300 			entry->wired_count++;
2301 			entry = entry->next;
2302 		}
2303 
2304 		/*
2305 		 * Pass 2.
2306 		 */
2307 
2308 		/*
2309 		 * HACK HACK HACK HACK
2310 		 *
2311 		 * vm_fault_wire() temporarily unlocks the map to avoid
2312 		 * deadlocks.  The in-transition flag from vm_map_clip_range
2313 		 * call should protect us from changes while the map is
2314 		 * unlocked.  T
2315 		 *
2316 		 * NOTE: Previously this comment stated that clipping might
2317 		 *	 still occur while the entry is unlocked, but from
2318 		 *	 what I can tell it actually cannot.
2319 		 *
2320 		 *	 It is unclear whether the CLIP_CHECK_*() calls
2321 		 *	 are still needed but we keep them in anyway.
2322 		 *
2323 		 * HACK HACK HACK HACK
2324 		 */
2325 
2326 		entry = start_entry;
2327 		while (entry != &map->header && entry->start < end) {
2328 			/*
2329 			 * If vm_fault_wire fails for any page we need to undo
2330 			 * what has been done.  We decrement the wiring count
2331 			 * for those pages which have not yet been wired (now)
2332 			 * and unwire those that have (later).
2333 			 */
2334 			vm_offset_t save_start = entry->start;
2335 			vm_offset_t save_end = entry->end;
2336 
2337 			if (entry->wired_count == 1)
2338 				rv = vm_fault_wire(map, entry, FALSE);
2339 			if (rv) {
2340 				CLIP_CHECK_BACK(entry, save_start);
2341 				for (;;) {
2342 					KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2343 					entry->wired_count = 0;
2344 					if (entry->end == save_end)
2345 						break;
2346 					entry = entry->next;
2347 					KASSERT(entry != &map->header, ("bad entry clip during backout"));
2348 				}
2349 				end = save_start;
2350 				break;
2351 			}
2352 			CLIP_CHECK_FWD(entry, save_end);
2353 			entry = entry->next;
2354 		}
2355 
2356 		/*
2357 		 * If a failure occured undo everything by falling through
2358 		 * to the unwiring code.  'end' has already been adjusted
2359 		 * appropriately.
2360 		 */
2361 		if (rv)
2362 			kmflags |= KM_PAGEABLE;
2363 
2364 		/*
2365 		 * start_entry is still IN_TRANSITION but may have been
2366 		 * clipped since vm_fault_wire() unlocks and relocks the
2367 		 * map.  No matter how clipped it has gotten there should
2368 		 * be a fragment that is on our start boundary.
2369 		 */
2370 		CLIP_CHECK_BACK(start_entry, start);
2371 	}
2372 
2373 	if (kmflags & KM_PAGEABLE) {
2374 		/*
2375 		 * This is the unwiring case.  We must first ensure that the
2376 		 * range to be unwired is really wired down.  We know there
2377 		 * are no holes.
2378 		 */
2379 		entry = start_entry;
2380 		while ((entry != &map->header) && (entry->start < end)) {
2381 			if (entry->wired_count == 0) {
2382 				rv = KERN_INVALID_ARGUMENT;
2383 				goto done;
2384 			}
2385 			entry = entry->next;
2386 		}
2387 
2388 		/*
2389 		 * Now decrement the wiring count for each region. If a region
2390 		 * becomes completely unwired, unwire its physical pages and
2391 		 * mappings.
2392 		 */
2393 		entry = start_entry;
2394 		while ((entry != &map->header) && (entry->start < end)) {
2395 			entry->wired_count--;
2396 			if (entry->wired_count == 0)
2397 				vm_fault_unwire(map, entry);
2398 			entry = entry->next;
2399 		}
2400 	}
2401 done:
2402 	vm_map_unclip_range(map, start_entry, start, real_end,
2403 			    &count, MAP_CLIP_NO_HOLES);
2404 	map->timestamp++;
2405 	vm_map_unlock(map);
2406 failure:
2407 	if (kmflags & KM_KRESERVE)
2408 		vm_map_entry_krelease(count);
2409 	else
2410 		vm_map_entry_release(count);
2411 	return (rv);
2412 }
2413 
2414 /*
2415  * Mark a newly allocated address range as wired but do not fault in
2416  * the pages.  The caller is expected to load the pages into the object.
2417  *
2418  * The map must be locked on entry and will remain locked on return.
2419  * No other requirements.
2420  */
2421 void
2422 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2423 		       int *countp)
2424 {
2425 	vm_map_entry_t scan;
2426 	vm_map_entry_t entry;
2427 
2428 	entry = vm_map_clip_range(map, addr, addr + size,
2429 				  countp, MAP_CLIP_NO_HOLES);
2430 	for (scan = entry;
2431 	     scan != &map->header && scan->start < addr + size;
2432 	     scan = scan->next) {
2433 	    KKASSERT(entry->wired_count == 0);
2434 	    entry->wired_count = 1;
2435 	}
2436 	vm_map_unclip_range(map, entry, addr, addr + size,
2437 			    countp, MAP_CLIP_NO_HOLES);
2438 }
2439 
2440 /*
2441  * Push any dirty cached pages in the address range to their pager.
2442  * If syncio is TRUE, dirty pages are written synchronously.
2443  * If invalidate is TRUE, any cached pages are freed as well.
2444  *
2445  * This routine is called by sys_msync()
2446  *
2447  * Returns an error if any part of the specified range is not mapped.
2448  *
2449  * No requirements.
2450  */
2451 int
2452 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2453 	     boolean_t syncio, boolean_t invalidate)
2454 {
2455 	vm_map_entry_t current;
2456 	vm_map_entry_t entry;
2457 	vm_size_t size;
2458 	vm_object_t object;
2459 	vm_object_t tobj;
2460 	vm_ooffset_t offset;
2461 
2462 	vm_map_lock_read(map);
2463 	VM_MAP_RANGE_CHECK(map, start, end);
2464 	if (!vm_map_lookup_entry(map, start, &entry)) {
2465 		vm_map_unlock_read(map);
2466 		return (KERN_INVALID_ADDRESS);
2467 	}
2468 	lwkt_gettoken(&map->token);
2469 
2470 	/*
2471 	 * Make a first pass to check for holes.
2472 	 */
2473 	for (current = entry; current->start < end; current = current->next) {
2474 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2475 			lwkt_reltoken(&map->token);
2476 			vm_map_unlock_read(map);
2477 			return (KERN_INVALID_ARGUMENT);
2478 		}
2479 		if (end > current->end &&
2480 		    (current->next == &map->header ||
2481 			current->end != current->next->start)) {
2482 			lwkt_reltoken(&map->token);
2483 			vm_map_unlock_read(map);
2484 			return (KERN_INVALID_ADDRESS);
2485 		}
2486 	}
2487 
2488 	if (invalidate)
2489 		pmap_remove(vm_map_pmap(map), start, end);
2490 
2491 	/*
2492 	 * Make a second pass, cleaning/uncaching pages from the indicated
2493 	 * objects as we go.
2494 	 */
2495 	for (current = entry; current->start < end; current = current->next) {
2496 		offset = current->offset + (start - current->start);
2497 		size = (end <= current->end ? end : current->end) - start;
2498 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2499 			vm_map_t smap;
2500 			vm_map_entry_t tentry;
2501 			vm_size_t tsize;
2502 
2503 			smap = current->object.sub_map;
2504 			vm_map_lock_read(smap);
2505 			vm_map_lookup_entry(smap, offset, &tentry);
2506 			tsize = tentry->end - offset;
2507 			if (tsize < size)
2508 				size = tsize;
2509 			object = tentry->object.vm_object;
2510 			offset = tentry->offset + (offset - tentry->start);
2511 			vm_map_unlock_read(smap);
2512 		} else {
2513 			object = current->object.vm_object;
2514 		}
2515 
2516 		if (object)
2517 			vm_object_hold(object);
2518 
2519 		/*
2520 		 * Note that there is absolutely no sense in writing out
2521 		 * anonymous objects, so we track down the vnode object
2522 		 * to write out.
2523 		 * We invalidate (remove) all pages from the address space
2524 		 * anyway, for semantic correctness.
2525 		 *
2526 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
2527 		 * may start out with a NULL object.
2528 		 */
2529 		while (object && (tobj = object->backing_object) != NULL) {
2530 			vm_object_hold(tobj);
2531 			if (tobj == object->backing_object) {
2532 				vm_object_lock_swap();
2533 				offset += object->backing_object_offset;
2534 				vm_object_drop(object);
2535 				object = tobj;
2536 				if (object->size < OFF_TO_IDX(offset + size))
2537 					size = IDX_TO_OFF(object->size) -
2538 					       offset;
2539 				break;
2540 			}
2541 			vm_object_drop(tobj);
2542 		}
2543 		if (object && (object->type == OBJT_VNODE) &&
2544 		    (current->protection & VM_PROT_WRITE) &&
2545 		    (object->flags & OBJ_NOMSYNC) == 0) {
2546 			/*
2547 			 * Flush pages if writing is allowed, invalidate them
2548 			 * if invalidation requested.  Pages undergoing I/O
2549 			 * will be ignored by vm_object_page_remove().
2550 			 *
2551 			 * We cannot lock the vnode and then wait for paging
2552 			 * to complete without deadlocking against vm_fault.
2553 			 * Instead we simply call vm_object_page_remove() and
2554 			 * allow it to block internally on a page-by-page
2555 			 * basis when it encounters pages undergoing async
2556 			 * I/O.
2557 			 */
2558 			int flags;
2559 
2560 			/* no chain wait needed for vnode objects */
2561 			vm_object_reference_locked(object);
2562 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2563 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2564 			flags |= invalidate ? OBJPC_INVAL : 0;
2565 
2566 			/*
2567 			 * When operating on a virtual page table just
2568 			 * flush the whole object.  XXX we probably ought
2569 			 * to
2570 			 */
2571 			switch(current->maptype) {
2572 			case VM_MAPTYPE_NORMAL:
2573 				vm_object_page_clean(object,
2574 				    OFF_TO_IDX(offset),
2575 				    OFF_TO_IDX(offset + size + PAGE_MASK),
2576 				    flags);
2577 				break;
2578 			case VM_MAPTYPE_VPAGETABLE:
2579 				vm_object_page_clean(object, 0, 0, flags);
2580 				break;
2581 			}
2582 			vn_unlock(((struct vnode *)object->handle));
2583 			vm_object_deallocate_locked(object);
2584 		}
2585 		if (object && invalidate &&
2586 		   ((object->type == OBJT_VNODE) ||
2587 		    (object->type == OBJT_DEVICE))) {
2588 			int clean_only =
2589 				(object->type == OBJT_DEVICE) ? FALSE : TRUE;
2590 			/* no chain wait needed for vnode/device objects */
2591 			vm_object_reference_locked(object);
2592 			switch(current->maptype) {
2593 			case VM_MAPTYPE_NORMAL:
2594 				vm_object_page_remove(object,
2595 				    OFF_TO_IDX(offset),
2596 				    OFF_TO_IDX(offset + size + PAGE_MASK),
2597 				    clean_only);
2598 				break;
2599 			case VM_MAPTYPE_VPAGETABLE:
2600 				vm_object_page_remove(object, 0, 0, clean_only);
2601 				break;
2602 			}
2603 			vm_object_deallocate_locked(object);
2604 		}
2605 		start += size;
2606 		if (object)
2607 			vm_object_drop(object);
2608 	}
2609 
2610 	lwkt_reltoken(&map->token);
2611 	vm_map_unlock_read(map);
2612 
2613 	return (KERN_SUCCESS);
2614 }
2615 
2616 /*
2617  * Make the region specified by this entry pageable.
2618  *
2619  * The vm_map must be exclusively locked.
2620  */
2621 static void
2622 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2623 {
2624 	entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2625 	entry->wired_count = 0;
2626 	vm_fault_unwire(map, entry);
2627 }
2628 
2629 /*
2630  * Deallocate the given entry from the target map.
2631  *
2632  * The vm_map must be exclusively locked.
2633  */
2634 static void
2635 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2636 {
2637 	vm_map_entry_unlink(map, entry);
2638 	map->size -= entry->end - entry->start;
2639 
2640 	switch(entry->maptype) {
2641 	case VM_MAPTYPE_NORMAL:
2642 	case VM_MAPTYPE_VPAGETABLE:
2643 		vm_object_deallocate(entry->object.vm_object);
2644 		break;
2645 	default:
2646 		break;
2647 	}
2648 
2649 	vm_map_entry_dispose(map, entry, countp);
2650 }
2651 
2652 /*
2653  * Deallocates the given address range from the target map.
2654  *
2655  * The vm_map must be exclusively locked.
2656  */
2657 int
2658 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2659 {
2660 	vm_object_t object;
2661 	vm_map_entry_t entry;
2662 	vm_map_entry_t first_entry;
2663 
2664 	ASSERT_VM_MAP_LOCKED(map);
2665 	lwkt_gettoken(&map->token);
2666 again:
2667 	/*
2668 	 * Find the start of the region, and clip it.  Set entry to point
2669 	 * at the first record containing the requested address or, if no
2670 	 * such record exists, the next record with a greater address.  The
2671 	 * loop will run from this point until a record beyond the termination
2672 	 * address is encountered.
2673 	 *
2674 	 * map->hint must be adjusted to not point to anything we delete,
2675 	 * so set it to the entry prior to the one being deleted.
2676 	 *
2677 	 * GGG see other GGG comment.
2678 	 */
2679 	if (vm_map_lookup_entry(map, start, &first_entry)) {
2680 		entry = first_entry;
2681 		vm_map_clip_start(map, entry, start, countp);
2682 		map->hint = entry->prev;	/* possible problem XXX */
2683 	} else {
2684 		map->hint = first_entry;	/* possible problem XXX */
2685 		entry = first_entry->next;
2686 	}
2687 
2688 	/*
2689 	 * If a hole opens up prior to the current first_free then
2690 	 * adjust first_free.  As with map->hint, map->first_free
2691 	 * cannot be left set to anything we might delete.
2692 	 */
2693 	if (entry == &map->header) {
2694 		map->first_free = &map->header;
2695 	} else if (map->first_free->start >= start) {
2696 		map->first_free = entry->prev;
2697 	}
2698 
2699 	/*
2700 	 * Step through all entries in this region
2701 	 */
2702 	while ((entry != &map->header) && (entry->start < end)) {
2703 		vm_map_entry_t next;
2704 		vm_offset_t s, e;
2705 		vm_pindex_t offidxstart, offidxend, count;
2706 
2707 		/*
2708 		 * If we hit an in-transition entry we have to sleep and
2709 		 * retry.  It's easier (and not really slower) to just retry
2710 		 * since this case occurs so rarely and the hint is already
2711 		 * pointing at the right place.  We have to reset the
2712 		 * start offset so as not to accidently delete an entry
2713 		 * another process just created in vacated space.
2714 		 */
2715 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2716 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2717 			start = entry->start;
2718 			++mycpu->gd_cnt.v_intrans_coll;
2719 			++mycpu->gd_cnt.v_intrans_wait;
2720 			vm_map_transition_wait(map);
2721 			goto again;
2722 		}
2723 		vm_map_clip_end(map, entry, end, countp);
2724 
2725 		s = entry->start;
2726 		e = entry->end;
2727 		next = entry->next;
2728 
2729 		offidxstart = OFF_TO_IDX(entry->offset);
2730 		count = OFF_TO_IDX(e - s);
2731 		object = entry->object.vm_object;
2732 
2733 		/*
2734 		 * Unwire before removing addresses from the pmap; otherwise,
2735 		 * unwiring will put the entries back in the pmap.
2736 		 */
2737 		if (entry->wired_count != 0)
2738 			vm_map_entry_unwire(map, entry);
2739 
2740 		offidxend = offidxstart + count;
2741 
2742 		if (object == &kernel_object) {
2743 			vm_object_hold(object);
2744 			vm_object_page_remove(object, offidxstart,
2745 					      offidxend, FALSE);
2746 			vm_object_drop(object);
2747 		} else if (object && object->type != OBJT_DEFAULT &&
2748 			   object->type != OBJT_SWAP) {
2749 			/*
2750 			 * vnode object routines cannot be chain-locked
2751 			 */
2752 			vm_object_hold(object);
2753 			pmap_remove(map->pmap, s, e);
2754 			vm_object_drop(object);
2755 		} else if (object) {
2756 			vm_object_hold(object);
2757 			vm_object_chain_acquire(object);
2758 			pmap_remove(map->pmap, s, e);
2759 
2760 			if (object != NULL &&
2761 			    object->ref_count != 1 &&
2762 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
2763 			     OBJ_ONEMAPPING &&
2764 			    (object->type == OBJT_DEFAULT ||
2765 			     object->type == OBJT_SWAP)) {
2766 				vm_object_collapse(object, NULL);
2767 				vm_object_page_remove(object, offidxstart,
2768 						      offidxend, FALSE);
2769 				if (object->type == OBJT_SWAP) {
2770 					swap_pager_freespace(object,
2771 							     offidxstart,
2772 							     count);
2773 				}
2774 				if (offidxend >= object->size &&
2775 				    offidxstart < object->size) {
2776 					object->size = offidxstart;
2777 				}
2778 			}
2779 			vm_object_chain_release(object);
2780 			vm_object_drop(object);
2781 		}
2782 
2783 		/*
2784 		 * Delete the entry (which may delete the object) only after
2785 		 * removing all pmap entries pointing to its pages.
2786 		 * (Otherwise, its page frames may be reallocated, and any
2787 		 * modify bits will be set in the wrong object!)
2788 		 */
2789 		vm_map_entry_delete(map, entry, countp);
2790 		entry = next;
2791 	}
2792 	lwkt_reltoken(&map->token);
2793 	return (KERN_SUCCESS);
2794 }
2795 
2796 /*
2797  * Remove the given address range from the target map.
2798  * This is the exported form of vm_map_delete.
2799  *
2800  * No requirements.
2801  */
2802 int
2803 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2804 {
2805 	int result;
2806 	int count;
2807 
2808 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2809 	vm_map_lock(map);
2810 	VM_MAP_RANGE_CHECK(map, start, end);
2811 	result = vm_map_delete(map, start, end, &count);
2812 	vm_map_unlock(map);
2813 	vm_map_entry_release(count);
2814 
2815 	return (result);
2816 }
2817 
2818 /*
2819  * Assert that the target map allows the specified privilege on the
2820  * entire address region given.  The entire region must be allocated.
2821  *
2822  * The caller must specify whether the vm_map is already locked or not.
2823  */
2824 boolean_t
2825 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2826 			vm_prot_t protection, boolean_t have_lock)
2827 {
2828 	vm_map_entry_t entry;
2829 	vm_map_entry_t tmp_entry;
2830 	boolean_t result;
2831 
2832 	if (have_lock == FALSE)
2833 		vm_map_lock_read(map);
2834 
2835 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2836 		if (have_lock == FALSE)
2837 			vm_map_unlock_read(map);
2838 		return (FALSE);
2839 	}
2840 	entry = tmp_entry;
2841 
2842 	result = TRUE;
2843 	while (start < end) {
2844 		if (entry == &map->header) {
2845 			result = FALSE;
2846 			break;
2847 		}
2848 		/*
2849 		 * No holes allowed!
2850 		 */
2851 
2852 		if (start < entry->start) {
2853 			result = FALSE;
2854 			break;
2855 		}
2856 		/*
2857 		 * Check protection associated with entry.
2858 		 */
2859 
2860 		if ((entry->protection & protection) != protection) {
2861 			result = FALSE;
2862 			break;
2863 		}
2864 		/* go to next entry */
2865 
2866 		start = entry->end;
2867 		entry = entry->next;
2868 	}
2869 	if (have_lock == FALSE)
2870 		vm_map_unlock_read(map);
2871 	return (result);
2872 }
2873 
2874 /*
2875  * If appropriate this function shadows the original object with a new object
2876  * and moves the VM pages from the original object to the new object.
2877  * The original object will also be collapsed, if possible.
2878  *
2879  * We can only do this for normal memory objects with a single mapping, and
2880  * it only makes sense to do it if there are 2 or more refs on the original
2881  * object.  i.e. typically a memory object that has been extended into
2882  * multiple vm_map_entry's with non-overlapping ranges.
2883  *
2884  * This makes it easier to remove unused pages and keeps object inheritance
2885  * from being a negative impact on memory usage.
2886  *
2887  * On return the (possibly new) entry->object.vm_object will have an
2888  * additional ref on it for the caller to dispose of (usually by cloning
2889  * the vm_map_entry).  The additional ref had to be done in this routine
2890  * to avoid racing a collapse.  The object's ONEMAPPING flag will also be
2891  * cleared.
2892  *
2893  * The vm_map must be locked and its token held.
2894  */
2895 static void
2896 vm_map_split(vm_map_entry_t entry)
2897 {
2898 #if 0
2899 	/* UNOPTIMIZED */
2900 	vm_object_t oobject;
2901 
2902 	oobject = entry->object.vm_object;
2903 	vm_object_hold(oobject);
2904 	vm_object_chain_wait(oobject);
2905 	vm_object_reference_locked(oobject);
2906 	vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
2907 	vm_object_drop(oobject);
2908 #else
2909 	/* OPTIMIZED */
2910 	vm_object_t oobject, nobject, bobject;
2911 	vm_offset_t s, e;
2912 	vm_page_t m;
2913 	vm_pindex_t offidxstart, offidxend, idx;
2914 	vm_size_t size;
2915 	vm_ooffset_t offset;
2916 
2917 	/*
2918 	 * Setup.  Chain lock the original object throughout the entire
2919 	 * routine to prevent new page faults from occuring.
2920 	 *
2921 	 * XXX can madvise WILLNEED interfere with us too?
2922 	 */
2923 	oobject = entry->object.vm_object;
2924 	vm_object_hold(oobject);
2925 	vm_object_chain_acquire(oobject);
2926 
2927 	/*
2928 	 * Original object cannot be split?
2929 	 */
2930 	if (oobject->handle == NULL || (oobject->type != OBJT_DEFAULT &&
2931 					oobject->type != OBJT_SWAP)) {
2932 		vm_object_chain_release(oobject);
2933 		vm_object_reference_locked(oobject);
2934 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
2935 		vm_object_drop(oobject);
2936 		return;
2937 	}
2938 
2939 	/*
2940 	 * Collapse original object with its backing store as an
2941 	 * optimization to reduce chain lengths when possible.
2942 	 *
2943 	 * If ref_count <= 1 there aren't other non-overlapping vm_map_entry's
2944 	 * for oobject, so there's no point collapsing it.
2945 	 *
2946 	 * Then re-check whether the object can be split.
2947 	 */
2948 	vm_object_collapse(oobject, NULL);
2949 
2950 	if (oobject->ref_count <= 1 ||
2951 	    (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) ||
2952 	    (oobject->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) != OBJ_ONEMAPPING) {
2953 		vm_object_chain_release(oobject);
2954 		vm_object_reference_locked(oobject);
2955 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
2956 		vm_object_drop(oobject);
2957 		return;
2958 	}
2959 
2960 	/*
2961 	 * Acquire the chain lock on the backing object.
2962 	 *
2963 	 * Give bobject an additional ref count for when it will be shadowed
2964 	 * by nobject.
2965 	 */
2966 	if ((bobject = oobject->backing_object) != NULL) {
2967 		vm_object_hold(bobject);
2968 		vm_object_chain_wait(bobject);
2969 		vm_object_reference_locked(bobject);
2970 		vm_object_chain_acquire(bobject);
2971 		KKASSERT(bobject->backing_object == bobject);
2972 		KKASSERT((bobject->flags & OBJ_DEAD) == 0);
2973 	}
2974 
2975 	/*
2976 	 * Calculate the object page range and allocate the new object.
2977 	 */
2978 	offset = entry->offset;
2979 	s = entry->start;
2980 	e = entry->end;
2981 
2982 	offidxstart = OFF_TO_IDX(offset);
2983 	offidxend = offidxstart + OFF_TO_IDX(e - s);
2984 	size = offidxend - offidxstart;
2985 
2986 	switch(oobject->type) {
2987 	case OBJT_DEFAULT:
2988 		nobject = default_pager_alloc(NULL, IDX_TO_OFF(size),
2989 					      VM_PROT_ALL, 0);
2990 		break;
2991 	case OBJT_SWAP:
2992 		nobject = swap_pager_alloc(NULL, IDX_TO_OFF(size),
2993 					   VM_PROT_ALL, 0);
2994 		break;
2995 	default:
2996 		/* not reached */
2997 		nobject = NULL;
2998 		KKASSERT(0);
2999 	}
3000 
3001 	if (nobject == NULL) {
3002 		if (bobject) {
3003 			vm_object_chain_release(bobject);
3004 			vm_object_deallocate(bobject);
3005 			vm_object_drop(bobject);
3006 		}
3007 		vm_object_chain_release(oobject);
3008 		vm_object_reference_locked(oobject);
3009 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3010 		vm_object_drop(oobject);
3011 		return;
3012 	}
3013 
3014 	/*
3015 	 * The new object will replace entry->object.vm_object so it needs
3016 	 * a second reference (the caller expects an additional ref).
3017 	 */
3018 	vm_object_hold(nobject);
3019 	vm_object_reference_locked(nobject);
3020 	vm_object_chain_acquire(nobject);
3021 
3022 	/*
3023 	 * nobject shadows bobject (oobject already shadows bobject).
3024 	 */
3025 	if (bobject) {
3026 		nobject->backing_object_offset =
3027 		    oobject->backing_object_offset + IDX_TO_OFF(offidxstart);
3028 		nobject->backing_object = bobject;
3029 		bobject->shadow_count++;
3030 		bobject->generation++;
3031 		LIST_INSERT_HEAD(&bobject->shadow_head, nobject, shadow_list);
3032 		vm_object_clear_flag(bobject, OBJ_ONEMAPPING); /* XXX? */
3033 		vm_object_chain_release(bobject);
3034 		vm_object_drop(bobject);
3035 	}
3036 
3037 	/*
3038 	 * Move the VM pages from oobject to nobject
3039 	 */
3040 	for (idx = 0; idx < size; idx++) {
3041 		vm_page_t m;
3042 
3043 		m = vm_page_lookup_busy_wait(oobject, offidxstart + idx,
3044 					     TRUE, "vmpg");
3045 		if (m == NULL)
3046 			continue;
3047 
3048 		/*
3049 		 * We must wait for pending I/O to complete before we can
3050 		 * rename the page.
3051 		 *
3052 		 * We do not have to VM_PROT_NONE the page as mappings should
3053 		 * not be changed by this operation.
3054 		 *
3055 		 * NOTE: The act of renaming a page updates chaingen for both
3056 		 *	 objects.
3057 		 */
3058 		vm_page_rename(m, nobject, idx);
3059 		/* page automatically made dirty by rename and cache handled */
3060 		/* page remains busy */
3061 	}
3062 
3063 	if (oobject->type == OBJT_SWAP) {
3064 		vm_object_pip_add(oobject, 1);
3065 		/*
3066 		 * copy oobject pages into nobject and destroy unneeded
3067 		 * pages in shadow object.
3068 		 */
3069 		swap_pager_copy(oobject, nobject, offidxstart, 0);
3070 		vm_object_pip_wakeup(oobject);
3071 	}
3072 
3073 	/*
3074 	 * Wakeup the pages we played with.  No spl protection is needed
3075 	 * for a simple wakeup.
3076 	 */
3077 	for (idx = 0; idx < size; idx++) {
3078 		m = vm_page_lookup(nobject, idx);
3079 		if (m) {
3080 			KKASSERT(m->flags & PG_BUSY);
3081 			vm_page_wakeup(m);
3082 		}
3083 	}
3084 	entry->object.vm_object = nobject;
3085 	entry->offset = 0LL;
3086 
3087 	/*
3088 	 * Cleanup
3089 	 *
3090 	 * NOTE: There is no need to remove OBJ_ONEMAPPING from oobject, the
3091 	 *	 related pages were moved and are no longer applicable to the
3092 	 *	 original object.
3093 	 *
3094 	 * NOTE: Deallocate oobject (due to its entry->object.vm_object being
3095 	 *	 replaced by nobject).
3096 	 */
3097 	vm_object_chain_release(nobject);
3098 	vm_object_drop(nobject);
3099 	if (bobject) {
3100 		vm_object_chain_release(bobject);
3101 		vm_object_drop(bobject);
3102 	}
3103 	vm_object_chain_release(oobject);
3104 	/*vm_object_clear_flag(oobject, OBJ_ONEMAPPING);*/
3105 	vm_object_deallocate_locked(oobject);
3106 	vm_object_drop(oobject);
3107 #endif
3108 }
3109 
3110 /*
3111  * Copies the contents of the source entry to the destination
3112  * entry.  The entries *must* be aligned properly.
3113  *
3114  * The vm_maps must be exclusively locked.
3115  * The vm_map's token must be held.
3116  *
3117  * Because the maps are locked no faults can be in progress during the
3118  * operation.
3119  */
3120 static void
3121 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
3122 		  vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
3123 {
3124 	vm_object_t src_object;
3125 
3126 	if (dst_entry->maptype == VM_MAPTYPE_SUBMAP)
3127 		return;
3128 	if (src_entry->maptype == VM_MAPTYPE_SUBMAP)
3129 		return;
3130 
3131 	if (src_entry->wired_count == 0) {
3132 		/*
3133 		 * If the source entry is marked needs_copy, it is already
3134 		 * write-protected.
3135 		 */
3136 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
3137 			pmap_protect(src_map->pmap,
3138 			    src_entry->start,
3139 			    src_entry->end,
3140 			    src_entry->protection & ~VM_PROT_WRITE);
3141 		}
3142 
3143 		/*
3144 		 * Make a copy of the object.
3145 		 *
3146 		 * The object must be locked prior to checking the object type
3147 		 * and for the call to vm_object_collapse() and vm_map_split().
3148 		 * We cannot use *_hold() here because the split code will
3149 		 * probably try to destroy the object.  The lock is a pool
3150 		 * token and doesn't care.
3151 		 */
3152 		if (src_entry->object.vm_object != NULL) {
3153 			vm_map_split(src_entry);
3154 			src_object = src_entry->object.vm_object;
3155 			dst_entry->object.vm_object = src_object;
3156 			src_entry->eflags |= (MAP_ENTRY_COW |
3157 					      MAP_ENTRY_NEEDS_COPY);
3158 			dst_entry->eflags |= (MAP_ENTRY_COW |
3159 					      MAP_ENTRY_NEEDS_COPY);
3160 			dst_entry->offset = src_entry->offset;
3161 		} else {
3162 			dst_entry->object.vm_object = NULL;
3163 			dst_entry->offset = 0;
3164 		}
3165 
3166 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3167 		    dst_entry->end - dst_entry->start, src_entry->start);
3168 	} else {
3169 		/*
3170 		 * Of course, wired down pages can't be set copy-on-write.
3171 		 * Cause wired pages to be copied into the new map by
3172 		 * simulating faults (the new pages are pageable)
3173 		 */
3174 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3175 	}
3176 }
3177 
3178 /*
3179  * vmspace_fork:
3180  * Create a new process vmspace structure and vm_map
3181  * based on those of an existing process.  The new map
3182  * is based on the old map, according to the inheritance
3183  * values on the regions in that map.
3184  *
3185  * The source map must not be locked.
3186  * No requirements.
3187  */
3188 struct vmspace *
3189 vmspace_fork(struct vmspace *vm1)
3190 {
3191 	struct vmspace *vm2;
3192 	vm_map_t old_map = &vm1->vm_map;
3193 	vm_map_t new_map;
3194 	vm_map_entry_t old_entry;
3195 	vm_map_entry_t new_entry;
3196 	vm_object_t object;
3197 	int count;
3198 
3199 	lwkt_gettoken(&vm1->vm_map.token);
3200 	vm_map_lock(old_map);
3201 
3202 	/*
3203 	 * XXX Note: upcalls are not copied.
3204 	 */
3205 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3206 	lwkt_gettoken(&vm2->vm_map.token);
3207 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3208 	    (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3209 	new_map = &vm2->vm_map;	/* XXX */
3210 	new_map->timestamp = 1;
3211 
3212 	vm_map_lock(new_map);
3213 
3214 	count = 0;
3215 	old_entry = old_map->header.next;
3216 	while (old_entry != &old_map->header) {
3217 		++count;
3218 		old_entry = old_entry->next;
3219 	}
3220 
3221 	count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3222 
3223 	old_entry = old_map->header.next;
3224 	while (old_entry != &old_map->header) {
3225 		if (old_entry->maptype == VM_MAPTYPE_SUBMAP)
3226 			panic("vm_map_fork: encountered a submap");
3227 
3228 		switch (old_entry->inheritance) {
3229 		case VM_INHERIT_NONE:
3230 			break;
3231 		case VM_INHERIT_SHARE:
3232 			/*
3233 			 * Clone the entry, creating the shared object if
3234 			 * necessary.
3235 			 */
3236 			if (old_entry->object.vm_object == NULL)
3237 				vm_map_entry_allocate_object(old_entry);
3238 
3239 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3240 				/*
3241 				 * Shadow a map_entry which needs a copy,
3242 				 * replacing its object with a new object
3243 				 * that points to the old one.  Ask the
3244 				 * shadow code to automatically add an
3245 				 * additional ref.  We can't do it afterwords
3246 				 * because we might race a collapse.  The call
3247 				 * to vm_map_entry_shadow() will also clear
3248 				 * OBJ_ONEMAPPING.
3249 				 */
3250 				vm_map_entry_shadow(old_entry, 1);
3251 			} else {
3252 				/*
3253 				 * We will make a shared copy of the object,
3254 				 * and must clear OBJ_ONEMAPPING.
3255 				 *
3256 				 * XXX assert that object.vm_object != NULL
3257 				 *     since we allocate it above.
3258 				 */
3259 				if (old_entry->object.vm_object) {
3260 					object = old_entry->object.vm_object;
3261 					vm_object_hold(object);
3262 					vm_object_chain_wait(object);
3263 					vm_object_reference_locked(object);
3264 					vm_object_clear_flag(object,
3265 							     OBJ_ONEMAPPING);
3266 					vm_object_drop(object);
3267 				}
3268 			}
3269 
3270 			/*
3271 			 * Clone the entry.  We've already bumped the ref on
3272 			 * any vm_object.
3273 			 */
3274 			new_entry = vm_map_entry_create(new_map, &count);
3275 			*new_entry = *old_entry;
3276 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3277 			new_entry->wired_count = 0;
3278 
3279 			/*
3280 			 * Insert the entry into the new map -- we know we're
3281 			 * inserting at the end of the new map.
3282 			 */
3283 
3284 			vm_map_entry_link(new_map, new_map->header.prev,
3285 					  new_entry);
3286 
3287 			/*
3288 			 * Update the physical map
3289 			 */
3290 			pmap_copy(new_map->pmap, old_map->pmap,
3291 			    new_entry->start,
3292 			    (old_entry->end - old_entry->start),
3293 			    old_entry->start);
3294 			break;
3295 		case VM_INHERIT_COPY:
3296 			/*
3297 			 * Clone the entry and link into the map.
3298 			 */
3299 			new_entry = vm_map_entry_create(new_map, &count);
3300 			*new_entry = *old_entry;
3301 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3302 			new_entry->wired_count = 0;
3303 			new_entry->object.vm_object = NULL;
3304 			vm_map_entry_link(new_map, new_map->header.prev,
3305 					  new_entry);
3306 			vm_map_copy_entry(old_map, new_map, old_entry,
3307 					  new_entry);
3308 			break;
3309 		}
3310 		old_entry = old_entry->next;
3311 	}
3312 
3313 	new_map->size = old_map->size;
3314 	vm_map_unlock(old_map);
3315 	vm_map_unlock(new_map);
3316 	vm_map_entry_release(count);
3317 
3318 	lwkt_reltoken(&vm2->vm_map.token);
3319 	lwkt_reltoken(&vm1->vm_map.token);
3320 
3321 	return (vm2);
3322 }
3323 
3324 /*
3325  * Create an auto-grow stack entry
3326  *
3327  * No requirements.
3328  */
3329 int
3330 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3331 	      int flags, vm_prot_t prot, vm_prot_t max, int cow)
3332 {
3333 	vm_map_entry_t	prev_entry;
3334 	vm_map_entry_t	new_stack_entry;
3335 	vm_size_t	init_ssize;
3336 	int		rv;
3337 	int		count;
3338 	vm_offset_t	tmpaddr;
3339 
3340 	cow |= MAP_IS_STACK;
3341 
3342 	if (max_ssize < sgrowsiz)
3343 		init_ssize = max_ssize;
3344 	else
3345 		init_ssize = sgrowsiz;
3346 
3347 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3348 	vm_map_lock(map);
3349 
3350 	/*
3351 	 * Find space for the mapping
3352 	 */
3353 	if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3354 		if (vm_map_findspace(map, addrbos, max_ssize, 1,
3355 				     flags, &tmpaddr)) {
3356 			vm_map_unlock(map);
3357 			vm_map_entry_release(count);
3358 			return (KERN_NO_SPACE);
3359 		}
3360 		addrbos = tmpaddr;
3361 	}
3362 
3363 	/* If addr is already mapped, no go */
3364 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3365 		vm_map_unlock(map);
3366 		vm_map_entry_release(count);
3367 		return (KERN_NO_SPACE);
3368 	}
3369 
3370 #if 0
3371 	/* XXX already handled by kern_mmap() */
3372 	/* If we would blow our VMEM resource limit, no go */
3373 	if (map->size + init_ssize >
3374 	    curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3375 		vm_map_unlock(map);
3376 		vm_map_entry_release(count);
3377 		return (KERN_NO_SPACE);
3378 	}
3379 #endif
3380 
3381 	/*
3382 	 * If we can't accomodate max_ssize in the current mapping,
3383 	 * no go.  However, we need to be aware that subsequent user
3384 	 * mappings might map into the space we have reserved for
3385 	 * stack, and currently this space is not protected.
3386 	 *
3387 	 * Hopefully we will at least detect this condition
3388 	 * when we try to grow the stack.
3389 	 */
3390 	if ((prev_entry->next != &map->header) &&
3391 	    (prev_entry->next->start < addrbos + max_ssize)) {
3392 		vm_map_unlock(map);
3393 		vm_map_entry_release(count);
3394 		return (KERN_NO_SPACE);
3395 	}
3396 
3397 	/*
3398 	 * We initially map a stack of only init_ssize.  We will
3399 	 * grow as needed later.  Since this is to be a grow
3400 	 * down stack, we map at the top of the range.
3401 	 *
3402 	 * Note: we would normally expect prot and max to be
3403 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
3404 	 * eliminate these as input parameters, and just
3405 	 * pass these values here in the insert call.
3406 	 */
3407 	rv = vm_map_insert(map, &count,
3408 			   NULL, 0, addrbos + max_ssize - init_ssize,
3409 	                   addrbos + max_ssize,
3410 			   VM_MAPTYPE_NORMAL,
3411 			   prot, max,
3412 			   cow);
3413 
3414 	/* Now set the avail_ssize amount */
3415 	if (rv == KERN_SUCCESS) {
3416 		if (prev_entry != &map->header)
3417 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
3418 		new_stack_entry = prev_entry->next;
3419 		if (new_stack_entry->end   != addrbos + max_ssize ||
3420 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
3421 			panic ("Bad entry start/end for new stack entry");
3422 		else
3423 			new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
3424 	}
3425 
3426 	vm_map_unlock(map);
3427 	vm_map_entry_release(count);
3428 	return (rv);
3429 }
3430 
3431 /*
3432  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3433  * desired address is already mapped, or if we successfully grow
3434  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3435  * stack range (this is strange, but preserves compatibility with
3436  * the grow function in vm_machdep.c).
3437  *
3438  * No requirements.
3439  */
3440 int
3441 vm_map_growstack (struct proc *p, vm_offset_t addr)
3442 {
3443 	vm_map_entry_t prev_entry;
3444 	vm_map_entry_t stack_entry;
3445 	vm_map_entry_t new_stack_entry;
3446 	struct vmspace *vm = p->p_vmspace;
3447 	vm_map_t map = &vm->vm_map;
3448 	vm_offset_t    end;
3449 	int grow_amount;
3450 	int rv = KERN_SUCCESS;
3451 	int is_procstack;
3452 	int use_read_lock = 1;
3453 	int count;
3454 
3455 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3456 Retry:
3457 	if (use_read_lock)
3458 		vm_map_lock_read(map);
3459 	else
3460 		vm_map_lock(map);
3461 
3462 	/* If addr is already in the entry range, no need to grow.*/
3463 	if (vm_map_lookup_entry(map, addr, &prev_entry))
3464 		goto done;
3465 
3466 	if ((stack_entry = prev_entry->next) == &map->header)
3467 		goto done;
3468 	if (prev_entry == &map->header)
3469 		end = stack_entry->start - stack_entry->aux.avail_ssize;
3470 	else
3471 		end = prev_entry->end;
3472 
3473 	/*
3474 	 * This next test mimics the old grow function in vm_machdep.c.
3475 	 * It really doesn't quite make sense, but we do it anyway
3476 	 * for compatibility.
3477 	 *
3478 	 * If not growable stack, return success.  This signals the
3479 	 * caller to proceed as he would normally with normal vm.
3480 	 */
3481 	if (stack_entry->aux.avail_ssize < 1 ||
3482 	    addr >= stack_entry->start ||
3483 	    addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3484 		goto done;
3485 	}
3486 
3487 	/* Find the minimum grow amount */
3488 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3489 	if (grow_amount > stack_entry->aux.avail_ssize) {
3490 		rv = KERN_NO_SPACE;
3491 		goto done;
3492 	}
3493 
3494 	/*
3495 	 * If there is no longer enough space between the entries
3496 	 * nogo, and adjust the available space.  Note: this
3497 	 * should only happen if the user has mapped into the
3498 	 * stack area after the stack was created, and is
3499 	 * probably an error.
3500 	 *
3501 	 * This also effectively destroys any guard page the user
3502 	 * might have intended by limiting the stack size.
3503 	 */
3504 	if (grow_amount > stack_entry->start - end) {
3505 		if (use_read_lock && vm_map_lock_upgrade(map)) {
3506 			use_read_lock = 0;
3507 			goto Retry;
3508 		}
3509 		use_read_lock = 0;
3510 		stack_entry->aux.avail_ssize = stack_entry->start - end;
3511 		rv = KERN_NO_SPACE;
3512 		goto done;
3513 	}
3514 
3515 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3516 
3517 	/* If this is the main process stack, see if we're over the
3518 	 * stack limit.
3519 	 */
3520 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3521 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3522 		rv = KERN_NO_SPACE;
3523 		goto done;
3524 	}
3525 
3526 	/* Round up the grow amount modulo SGROWSIZ */
3527 	grow_amount = roundup (grow_amount, sgrowsiz);
3528 	if (grow_amount > stack_entry->aux.avail_ssize) {
3529 		grow_amount = stack_entry->aux.avail_ssize;
3530 	}
3531 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3532 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3533 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3534 		              ctob(vm->vm_ssize);
3535 	}
3536 
3537 	/* If we would blow our VMEM resource limit, no go */
3538 	if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3539 		rv = KERN_NO_SPACE;
3540 		goto done;
3541 	}
3542 
3543 	if (use_read_lock && vm_map_lock_upgrade(map)) {
3544 		use_read_lock = 0;
3545 		goto Retry;
3546 	}
3547 	use_read_lock = 0;
3548 
3549 	/* Get the preliminary new entry start value */
3550 	addr = stack_entry->start - grow_amount;
3551 
3552 	/* If this puts us into the previous entry, cut back our growth
3553 	 * to the available space.  Also, see the note above.
3554 	 */
3555 	if (addr < end) {
3556 		stack_entry->aux.avail_ssize = stack_entry->start - end;
3557 		addr = end;
3558 	}
3559 
3560 	rv = vm_map_insert(map, &count,
3561 			   NULL, 0, addr, stack_entry->start,
3562 			   VM_MAPTYPE_NORMAL,
3563 			   VM_PROT_ALL, VM_PROT_ALL,
3564 			   0);
3565 
3566 	/* Adjust the available stack space by the amount we grew. */
3567 	if (rv == KERN_SUCCESS) {
3568 		if (prev_entry != &map->header)
3569 			vm_map_clip_end(map, prev_entry, addr, &count);
3570 		new_stack_entry = prev_entry->next;
3571 		if (new_stack_entry->end   != stack_entry->start  ||
3572 		    new_stack_entry->start != addr)
3573 			panic ("Bad stack grow start/end in new stack entry");
3574 		else {
3575 			new_stack_entry->aux.avail_ssize =
3576 				stack_entry->aux.avail_ssize -
3577 				(new_stack_entry->end - new_stack_entry->start);
3578 			if (is_procstack)
3579 				vm->vm_ssize += btoc(new_stack_entry->end -
3580 						     new_stack_entry->start);
3581 		}
3582 
3583 		if (map->flags & MAP_WIREFUTURE)
3584 			vm_map_unwire(map, new_stack_entry->start,
3585 				      new_stack_entry->end, FALSE);
3586 	}
3587 
3588 done:
3589 	if (use_read_lock)
3590 		vm_map_unlock_read(map);
3591 	else
3592 		vm_map_unlock(map);
3593 	vm_map_entry_release(count);
3594 	return (rv);
3595 }
3596 
3597 /*
3598  * Unshare the specified VM space for exec.  If other processes are
3599  * mapped to it, then create a new one.  The new vmspace is null.
3600  *
3601  * No requirements.
3602  */
3603 void
3604 vmspace_exec(struct proc *p, struct vmspace *vmcopy)
3605 {
3606 	struct vmspace *oldvmspace = p->p_vmspace;
3607 	struct vmspace *newvmspace;
3608 	vm_map_t map = &p->p_vmspace->vm_map;
3609 
3610 	/*
3611 	 * If we are execing a resident vmspace we fork it, otherwise
3612 	 * we create a new vmspace.  Note that exitingcnt and upcalls
3613 	 * are not copied to the new vmspace.
3614 	 */
3615 	lwkt_gettoken(&oldvmspace->vm_map.token);
3616 	if (vmcopy)  {
3617 		newvmspace = vmspace_fork(vmcopy);
3618 		lwkt_gettoken(&newvmspace->vm_map.token);
3619 	} else {
3620 		newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3621 		lwkt_gettoken(&newvmspace->vm_map.token);
3622 		bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3623 		      (caddr_t)&oldvmspace->vm_endcopy -
3624 		       (caddr_t)&oldvmspace->vm_startcopy);
3625 	}
3626 
3627 	/*
3628 	 * Finish initializing the vmspace before assigning it
3629 	 * to the process.  The vmspace will become the current vmspace
3630 	 * if p == curproc.
3631 	 */
3632 	pmap_pinit2(vmspace_pmap(newvmspace));
3633 	pmap_replacevm(p, newvmspace, 0);
3634 	lwkt_reltoken(&newvmspace->vm_map.token);
3635 	lwkt_reltoken(&oldvmspace->vm_map.token);
3636 	sysref_put(&oldvmspace->vm_sysref);
3637 }
3638 
3639 /*
3640  * Unshare the specified VM space for forcing COW.  This
3641  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3642  *
3643  * The exitingcnt test is not strictly necessary but has been
3644  * included for code sanity (to make the code a bit more deterministic).
3645  */
3646 void
3647 vmspace_unshare(struct proc *p)
3648 {
3649 	struct vmspace *oldvmspace = p->p_vmspace;
3650 	struct vmspace *newvmspace;
3651 
3652 	lwkt_gettoken(&oldvmspace->vm_map.token);
3653 	if (oldvmspace->vm_sysref.refcnt == 1 && oldvmspace->vm_exitingcnt == 0) {
3654 		lwkt_reltoken(&oldvmspace->vm_map.token);
3655 		return;
3656 	}
3657 	newvmspace = vmspace_fork(oldvmspace);
3658 	lwkt_gettoken(&newvmspace->vm_map.token);
3659 	pmap_pinit2(vmspace_pmap(newvmspace));
3660 	pmap_replacevm(p, newvmspace, 0);
3661 	lwkt_reltoken(&newvmspace->vm_map.token);
3662 	lwkt_reltoken(&oldvmspace->vm_map.token);
3663 	sysref_put(&oldvmspace->vm_sysref);
3664 }
3665 
3666 /*
3667  * vm_map_hint: return the beginning of the best area suitable for
3668  * creating a new mapping with "prot" protection.
3669  *
3670  * No requirements.
3671  */
3672 vm_offset_t
3673 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
3674 {
3675 	struct vmspace *vms = p->p_vmspace;
3676 
3677 	if (!randomize_mmap) {
3678 		/*
3679 		 * Set a reasonable start point for the hint if it was
3680 		 * not specified or if it falls within the heap space.
3681 		 * Hinted mmap()s do not allocate out of the heap space.
3682 		 */
3683 		if (addr == 0 ||
3684 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
3685 		     addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) {
3686 			addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
3687 		}
3688 
3689 		return addr;
3690 	}
3691 
3692 	if (addr != 0 && addr >= (vm_offset_t)vms->vm_daddr)
3693 		return addr;
3694 
3695 #ifdef notyet
3696 #ifdef __i386__
3697 	/*
3698 	 * If executable skip first two pages, otherwise start
3699 	 * after data + heap region.
3700 	 */
3701 	if ((prot & VM_PROT_EXECUTE) &&
3702 	    ((vm_offset_t)vms->vm_daddr >= I386_MAX_EXE_ADDR)) {
3703 		addr = (PAGE_SIZE * 2) +
3704 		    (karc4random() & (I386_MAX_EXE_ADDR / 2 - 1));
3705 		return (round_page(addr));
3706 	}
3707 #endif /* __i386__ */
3708 #endif /* notyet */
3709 
3710 	addr = (vm_offset_t)vms->vm_daddr + MAXDSIZ;
3711 	addr += karc4random() & (MIN((256 * 1024 * 1024), MAXDSIZ) - 1);
3712 
3713 	return (round_page(addr));
3714 }
3715 
3716 /*
3717  * Finds the VM object, offset, and protection for a given virtual address
3718  * in the specified map, assuming a page fault of the type specified.
3719  *
3720  * Leaves the map in question locked for read; return values are guaranteed
3721  * until a vm_map_lookup_done call is performed.  Note that the map argument
3722  * is in/out; the returned map must be used in the call to vm_map_lookup_done.
3723  *
3724  * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
3725  * that fast.
3726  *
3727  * If a lookup is requested with "write protection" specified, the map may
3728  * be changed to perform virtual copying operations, although the data
3729  * referenced will remain the same.
3730  *
3731  * No requirements.
3732  */
3733 int
3734 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3735 	      vm_offset_t vaddr,
3736 	      vm_prot_t fault_typea,
3737 	      vm_map_entry_t *out_entry,	/* OUT */
3738 	      vm_object_t *object,		/* OUT */
3739 	      vm_pindex_t *pindex,		/* OUT */
3740 	      vm_prot_t *out_prot,		/* OUT */
3741 	      boolean_t *wired)			/* OUT */
3742 {
3743 	vm_map_entry_t entry;
3744 	vm_map_t map = *var_map;
3745 	vm_prot_t prot;
3746 	vm_prot_t fault_type = fault_typea;
3747 	int use_read_lock = 1;
3748 	int rv = KERN_SUCCESS;
3749 
3750 RetryLookup:
3751 	if (use_read_lock)
3752 		vm_map_lock_read(map);
3753 	else
3754 		vm_map_lock(map);
3755 
3756 	/*
3757 	 * If the map has an interesting hint, try it before calling full
3758 	 * blown lookup routine.
3759 	 */
3760 	entry = map->hint;
3761 	*out_entry = entry;
3762 	*object = NULL;
3763 
3764 	if ((entry == &map->header) ||
3765 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
3766 		vm_map_entry_t tmp_entry;
3767 
3768 		/*
3769 		 * Entry was either not a valid hint, or the vaddr was not
3770 		 * contained in the entry, so do a full lookup.
3771 		 */
3772 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
3773 			rv = KERN_INVALID_ADDRESS;
3774 			goto done;
3775 		}
3776 
3777 		entry = tmp_entry;
3778 		*out_entry = entry;
3779 	}
3780 
3781 	/*
3782 	 * Handle submaps.
3783 	 */
3784 	if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3785 		vm_map_t old_map = map;
3786 
3787 		*var_map = map = entry->object.sub_map;
3788 		if (use_read_lock)
3789 			vm_map_unlock_read(old_map);
3790 		else
3791 			vm_map_unlock(old_map);
3792 		use_read_lock = 1;
3793 		goto RetryLookup;
3794 	}
3795 
3796 	/*
3797 	 * Check whether this task is allowed to have this page.
3798 	 * Note the special case for MAP_ENTRY_COW
3799 	 * pages with an override.  This is to implement a forced
3800 	 * COW for debuggers.
3801 	 */
3802 
3803 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
3804 		prot = entry->max_protection;
3805 	else
3806 		prot = entry->protection;
3807 
3808 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3809 	if ((fault_type & prot) != fault_type) {
3810 		rv = KERN_PROTECTION_FAILURE;
3811 		goto done;
3812 	}
3813 
3814 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3815 	    (entry->eflags & MAP_ENTRY_COW) &&
3816 	    (fault_type & VM_PROT_WRITE) &&
3817 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3818 		rv = KERN_PROTECTION_FAILURE;
3819 		goto done;
3820 	}
3821 
3822 	/*
3823 	 * If this page is not pageable, we have to get it for all possible
3824 	 * accesses.
3825 	 */
3826 	*wired = (entry->wired_count != 0);
3827 	if (*wired)
3828 		prot = fault_type = entry->protection;
3829 
3830 	/*
3831 	 * Virtual page tables may need to update the accessed (A) bit
3832 	 * in a page table entry.  Upgrade the fault to a write fault for
3833 	 * that case if the map will support it.  If the map does not support
3834 	 * it the page table entry simply will not be updated.
3835 	 */
3836 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
3837 		if (prot & VM_PROT_WRITE)
3838 			fault_type |= VM_PROT_WRITE;
3839 	}
3840 
3841 	/*
3842 	 * If the entry was copy-on-write, we either ...
3843 	 */
3844 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3845 		/*
3846 		 * If we want to write the page, we may as well handle that
3847 		 * now since we've got the map locked.
3848 		 *
3849 		 * If we don't need to write the page, we just demote the
3850 		 * permissions allowed.
3851 		 */
3852 
3853 		if (fault_type & VM_PROT_WRITE) {
3854 			/*
3855 			 * Make a new object, and place it in the object
3856 			 * chain.  Note that no new references have appeared
3857 			 * -- one just moved from the map to the new
3858 			 * object.
3859 			 */
3860 
3861 			if (use_read_lock && vm_map_lock_upgrade(map)) {
3862 				use_read_lock = 0;
3863 				goto RetryLookup;
3864 			}
3865 			use_read_lock = 0;
3866 
3867 			vm_map_entry_shadow(entry, 0);
3868 		} else {
3869 			/*
3870 			 * We're attempting to read a copy-on-write page --
3871 			 * don't allow writes.
3872 			 */
3873 
3874 			prot &= ~VM_PROT_WRITE;
3875 		}
3876 	}
3877 
3878 	/*
3879 	 * Create an object if necessary.
3880 	 */
3881 	if (entry->object.vm_object == NULL &&
3882 	    !map->system_map) {
3883 		if (use_read_lock && vm_map_lock_upgrade(map))  {
3884 			use_read_lock = 0;
3885 			goto RetryLookup;
3886 		}
3887 		use_read_lock = 0;
3888 		vm_map_entry_allocate_object(entry);
3889 	}
3890 
3891 	/*
3892 	 * Return the object/offset from this entry.  If the entry was
3893 	 * copy-on-write or empty, it has been fixed up.
3894 	 */
3895 
3896 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3897 	*object = entry->object.vm_object;
3898 
3899 	/*
3900 	 * Return whether this is the only map sharing this data.  On
3901 	 * success we return with a read lock held on the map.  On failure
3902 	 * we return with the map unlocked.
3903 	 */
3904 	*out_prot = prot;
3905 done:
3906 	if (rv == KERN_SUCCESS) {
3907 		if (use_read_lock == 0)
3908 			vm_map_lock_downgrade(map);
3909 	} else if (use_read_lock) {
3910 		vm_map_unlock_read(map);
3911 	} else {
3912 		vm_map_unlock(map);
3913 	}
3914 	return (rv);
3915 }
3916 
3917 /*
3918  * Releases locks acquired by a vm_map_lookup()
3919  * (according to the handle returned by that lookup).
3920  *
3921  * No other requirements.
3922  */
3923 void
3924 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
3925 {
3926 	/*
3927 	 * Unlock the main-level map
3928 	 */
3929 	vm_map_unlock_read(map);
3930 	if (count)
3931 		vm_map_entry_release(count);
3932 }
3933 
3934 #include "opt_ddb.h"
3935 #ifdef DDB
3936 #include <sys/kernel.h>
3937 
3938 #include <ddb/ddb.h>
3939 
3940 /*
3941  * Debugging only
3942  */
3943 DB_SHOW_COMMAND(map, vm_map_print)
3944 {
3945 	static int nlines;
3946 	/* XXX convert args. */
3947 	vm_map_t map = (vm_map_t)addr;
3948 	boolean_t full = have_addr;
3949 
3950 	vm_map_entry_t entry;
3951 
3952 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3953 	    (void *)map,
3954 	    (void *)map->pmap, map->nentries, map->timestamp);
3955 	nlines++;
3956 
3957 	if (!full && db_indent)
3958 		return;
3959 
3960 	db_indent += 2;
3961 	for (entry = map->header.next; entry != &map->header;
3962 	    entry = entry->next) {
3963 		db_iprintf("map entry %p: start=%p, end=%p\n",
3964 		    (void *)entry, (void *)entry->start, (void *)entry->end);
3965 		nlines++;
3966 		{
3967 			static char *inheritance_name[4] =
3968 			{"share", "copy", "none", "donate_copy"};
3969 
3970 			db_iprintf(" prot=%x/%x/%s",
3971 			    entry->protection,
3972 			    entry->max_protection,
3973 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3974 			if (entry->wired_count != 0)
3975 				db_printf(", wired");
3976 		}
3977 		if (entry->maptype == VM_MAPTYPE_SUBMAP) {
3978 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3979 			db_printf(", share=%p, offset=0x%lx\n",
3980 			    (void *)entry->object.sub_map,
3981 			    (long)entry->offset);
3982 			nlines++;
3983 			if ((entry->prev == &map->header) ||
3984 			    (entry->prev->object.sub_map !=
3985 				entry->object.sub_map)) {
3986 				db_indent += 2;
3987 				vm_map_print((db_expr_t)(intptr_t)
3988 					     entry->object.sub_map,
3989 					     full, 0, NULL);
3990 				db_indent -= 2;
3991 			}
3992 		} else {
3993 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3994 			db_printf(", object=%p, offset=0x%lx",
3995 			    (void *)entry->object.vm_object,
3996 			    (long)entry->offset);
3997 			if (entry->eflags & MAP_ENTRY_COW)
3998 				db_printf(", copy (%s)",
3999 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4000 			db_printf("\n");
4001 			nlines++;
4002 
4003 			if ((entry->prev == &map->header) ||
4004 			    (entry->prev->object.vm_object !=
4005 				entry->object.vm_object)) {
4006 				db_indent += 2;
4007 				vm_object_print((db_expr_t)(intptr_t)
4008 						entry->object.vm_object,
4009 						full, 0, NULL);
4010 				nlines += 4;
4011 				db_indent -= 2;
4012 			}
4013 		}
4014 	}
4015 	db_indent -= 2;
4016 	if (db_indent == 0)
4017 		nlines = 0;
4018 }
4019 
4020 /*
4021  * Debugging only
4022  */
4023 DB_SHOW_COMMAND(procvm, procvm)
4024 {
4025 	struct proc *p;
4026 
4027 	if (have_addr) {
4028 		p = (struct proc *) addr;
4029 	} else {
4030 		p = curproc;
4031 	}
4032 
4033 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4034 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4035 	    (void *)vmspace_pmap(p->p_vmspace));
4036 
4037 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
4038 }
4039 
4040 #endif /* DDB */
4041