xref: /dragonfly/sys/vm/vm_map.c (revision 896f2e3a)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
63  */
64 
65 /*
66  *	Virtual memory mapping module.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/proc.h>
73 #include <sys/serialize.h>
74 #include <sys/lock.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/resourcevar.h>
79 #include <sys/shm.h>
80 #include <sys/tree.h>
81 #include <sys/malloc.h>
82 #include <sys/objcache.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_zone.h>
95 
96 #include <sys/thread2.h>
97 #include <sys/random.h>
98 #include <sys/sysctl.h>
99 
100 /*
101  * Virtual memory maps provide for the mapping, protection, and sharing
102  * of virtual memory objects.  In addition, this module provides for an
103  * efficient virtual copy of memory from one map to another.
104  *
105  * Synchronization is required prior to most operations.
106  *
107  * Maps consist of an ordered doubly-linked list of simple entries.
108  * A hint and a RB tree is used to speed-up lookups.
109  *
110  * Callers looking to modify maps specify start/end addresses which cause
111  * the related map entry to be clipped if necessary, and then later
112  * recombined if the pieces remained compatible.
113  *
114  * Virtual copy operations are performed by copying VM object references
115  * from one map to another, and then marking both regions as copy-on-write.
116  */
117 static __boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags);
118 static void vmspace_dtor(void *obj, void *privdata);
119 static void vmspace_terminate(struct vmspace *vm, int final);
120 
121 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
122 static struct objcache *vmspace_cache;
123 
124 /*
125  * per-cpu page table cross mappings are initialized in early boot
126  * and might require a considerable number of vm_map_entry structures.
127  */
128 #define MAPENTRYBSP_CACHE	(MAXCPU+1)
129 #define MAPENTRYAP_CACHE	8
130 
131 static struct vm_zone mapentzone_store, mapzone_store;
132 static vm_zone_t mapentzone, mapzone;
133 static struct vm_object mapentobj, mapobj;
134 
135 static struct vm_map_entry map_entry_init[MAX_MAPENT];
136 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE];
137 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE];
138 static struct vm_map map_init[MAX_KMAP];
139 
140 static int randomize_mmap;
141 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
142     "Randomize mmap offsets");
143 static int vm_map_relock_enable = 1;
144 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW,
145 	   &vm_map_relock_enable, 0, "Randomize mmap offsets");
146 
147 static void vm_map_entry_shadow(vm_map_entry_t entry, int addref);
148 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
149 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
150 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
151 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
152 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
153 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
154 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
155 		vm_map_entry_t);
156 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
157 
158 /*
159  * Initialize the vm_map module.  Must be called before any other vm_map
160  * routines.
161  *
162  * Map and entry structures are allocated from the general purpose
163  * memory pool with some exceptions:
164  *
165  *	- The kernel map is allocated statically.
166  *	- Initial kernel map entries are allocated out of a static pool.
167  *	- We must set ZONE_SPECIAL here or the early boot code can get
168  *	  stuck if there are >63 cores.
169  *
170  *	These restrictions are necessary since malloc() uses the
171  *	maps and requires map entries.
172  *
173  * Called from the low level boot code only.
174  */
175 void
176 vm_map_startup(void)
177 {
178 	mapzone = &mapzone_store;
179 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
180 		map_init, MAX_KMAP);
181 	mapentzone = &mapentzone_store;
182 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
183 		  map_entry_init, MAX_MAPENT);
184 	mapentzone_store.zflags |= ZONE_SPECIAL;
185 }
186 
187 /*
188  * Called prior to any vmspace allocations.
189  *
190  * Called from the low level boot code only.
191  */
192 void
193 vm_init2(void)
194 {
195 	vmspace_cache = objcache_create_mbacked(M_VMSPACE,
196 						sizeof(struct vmspace),
197 						0, ncpus * 4,
198 						vmspace_ctor, vmspace_dtor,
199 						NULL);
200 	zinitna(mapentzone, &mapentobj, NULL, 0, 0,
201 		ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
202 	zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
203 	pmap_init2();
204 	vm_object_init2();
205 }
206 
207 /*
208  * objcache support.  We leave the pmap root cached as long as possible
209  * for performance reasons.
210  */
211 static
212 __boolean_t
213 vmspace_ctor(void *obj, void *privdata, int ocflags)
214 {
215 	struct vmspace *vm = obj;
216 
217 	bzero(vm, sizeof(*vm));
218 	vm->vm_refcnt = (u_int)-1;
219 
220 	return 1;
221 }
222 
223 static
224 void
225 vmspace_dtor(void *obj, void *privdata)
226 {
227 	struct vmspace *vm = obj;
228 
229 	KKASSERT(vm->vm_refcnt == (u_int)-1);
230 	pmap_puninit(vmspace_pmap(vm));
231 }
232 
233 /*
234  * Red black tree functions
235  *
236  * The caller must hold the related map lock.
237  */
238 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
239 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
240 
241 /* a->start is address, and the only field has to be initialized */
242 static int
243 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
244 {
245 	if (a->start < b->start)
246 		return(-1);
247 	else if (a->start > b->start)
248 		return(1);
249 	return(0);
250 }
251 
252 /*
253  * Initialize vmspace ref/hold counts vmspace0.  There is a holdcnt for
254  * every refcnt.
255  */
256 void
257 vmspace_initrefs(struct vmspace *vm)
258 {
259 	vm->vm_refcnt = 1;
260 	vm->vm_holdcnt = 1;
261 }
262 
263 /*
264  * Allocate a vmspace structure, including a vm_map and pmap.
265  * Initialize numerous fields.  While the initial allocation is zerod,
266  * subsequence reuse from the objcache leaves elements of the structure
267  * intact (particularly the pmap), so portions must be zerod.
268  *
269  * Returns a referenced vmspace.
270  *
271  * No requirements.
272  */
273 struct vmspace *
274 vmspace_alloc(vm_offset_t min, vm_offset_t max)
275 {
276 	struct vmspace *vm;
277 
278 	vm = objcache_get(vmspace_cache, M_WAITOK);
279 
280 	bzero(&vm->vm_startcopy,
281 	      (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
282 	vm_map_init(&vm->vm_map, min, max, NULL);	/* initializes token */
283 
284 	/*
285 	 * NOTE: hold to acquires token for safety.
286 	 *
287 	 * On return vmspace is referenced (refs=1, hold=1).  That is,
288 	 * each refcnt also has a holdcnt.  There can be additional holds
289 	 * (holdcnt) above and beyond the refcnt.  Finalization is handled in
290 	 * two stages, one on refs 1->0, and the the second on hold 1->0.
291 	 */
292 	KKASSERT(vm->vm_holdcnt == 0);
293 	KKASSERT(vm->vm_refcnt == (u_int)-1);
294 	vmspace_initrefs(vm);
295 	vmspace_hold(vm);
296 	pmap_pinit(vmspace_pmap(vm));		/* (some fields reused) */
297 	vm->vm_map.pmap = vmspace_pmap(vm);	/* XXX */
298 	vm->vm_shm = NULL;
299 	vm->vm_flags = 0;
300 	cpu_vmspace_alloc(vm);
301 	vmspace_drop(vm);
302 
303 	return (vm);
304 }
305 
306 /*
307  * NOTE: Can return -1 if the vmspace is exiting.
308  */
309 int
310 vmspace_getrefs(struct vmspace *vm)
311 {
312 	return ((int)vm->vm_refcnt);
313 }
314 
315 /*
316  * A vmspace object must already have a non-zero hold to be able to gain
317  * further holds on it.
318  */
319 static void
320 vmspace_hold_notoken(struct vmspace *vm)
321 {
322 	KKASSERT(vm->vm_holdcnt != 0);
323 	refcount_acquire(&vm->vm_holdcnt);
324 }
325 
326 static void
327 vmspace_drop_notoken(struct vmspace *vm)
328 {
329 	if (refcount_release(&vm->vm_holdcnt)) {
330 		if (vm->vm_refcnt == (u_int)-1) {
331 			vmspace_terminate(vm, 1);
332 		}
333 	}
334 }
335 
336 void
337 vmspace_hold(struct vmspace *vm)
338 {
339 	vmspace_hold_notoken(vm);
340 	lwkt_gettoken(&vm->vm_map.token);
341 }
342 
343 void
344 vmspace_drop(struct vmspace *vm)
345 {
346 	lwkt_reltoken(&vm->vm_map.token);
347 	vmspace_drop_notoken(vm);
348 }
349 
350 /*
351  * A vmspace object must not be in a terminated state to be able to obtain
352  * additional refs on it.
353  *
354  * Ref'ing a vmspace object also increments its hold count.
355  */
356 void
357 vmspace_ref(struct vmspace *vm)
358 {
359 	KKASSERT((int)vm->vm_refcnt >= 0);
360 	vmspace_hold_notoken(vm);
361 	refcount_acquire(&vm->vm_refcnt);
362 }
363 
364 /*
365  * Release a ref on the vmspace.  On the 1->0 transition we do stage-1
366  * termination of the vmspace.  Then, on the final drop of the hold we
367  * will do stage-2 final termination.
368  */
369 void
370 vmspace_rel(struct vmspace *vm)
371 {
372 	if (refcount_release(&vm->vm_refcnt)) {
373 		vm->vm_refcnt = (u_int)-1;	/* no other refs possible */
374 		vmspace_terminate(vm, 0);
375 	}
376 	vmspace_drop_notoken(vm);
377 }
378 
379 /*
380  * This is called during exit indicating that the vmspace is no
381  * longer in used by an exiting process, but the process has not yet
382  * been reaped.
383  *
384  * We release the refcnt but not the associated holdcnt.
385  *
386  * No requirements.
387  */
388 void
389 vmspace_relexit(struct vmspace *vm)
390 {
391 	if (refcount_release(&vm->vm_refcnt)) {
392 		vm->vm_refcnt = (u_int)-1;	/* no other refs possible */
393 		vmspace_terminate(vm, 0);
394 	}
395 }
396 
397 /*
398  * Called during reap to disconnect the remainder of the vmspace from
399  * the process.  On the hold drop the vmspace termination is finalized.
400  *
401  * No requirements.
402  */
403 void
404 vmspace_exitfree(struct proc *p)
405 {
406 	struct vmspace *vm;
407 
408 	vm = p->p_vmspace;
409 	p->p_vmspace = NULL;
410 	vmspace_drop_notoken(vm);
411 }
412 
413 /*
414  * Called in two cases:
415  *
416  * (1) When the last refcnt is dropped and the vmspace becomes inactive,
417  *     called with final == 0.  refcnt will be (u_int)-1 at this point,
418  *     and holdcnt will still be non-zero.
419  *
420  * (2) When holdcnt becomes 0, called with final == 1.  There should no
421  *     longer be anyone with access to the vmspace.
422  *
423  * VMSPACE_EXIT1 flags the primary deactivation
424  * VMSPACE_EXIT2 flags the last reap
425  */
426 static void
427 vmspace_terminate(struct vmspace *vm, int final)
428 {
429 	int count;
430 
431 	lwkt_gettoken(&vm->vm_map.token);
432 	if (final == 0) {
433 		KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0);
434 
435 		/*
436 		 * Get rid of most of the resources.  Leave the kernel pmap
437 		 * intact.
438 		 */
439 		vm->vm_flags |= VMSPACE_EXIT1;
440 		shmexit(vm);
441 		pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
442 				  VM_MAX_USER_ADDRESS);
443 		vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
444 			      VM_MAX_USER_ADDRESS);
445 		lwkt_reltoken(&vm->vm_map.token);
446 	} else {
447 		KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0);
448 		KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0);
449 
450 		/*
451 		 * Get rid of remaining basic resources.
452 		 */
453 		vm->vm_flags |= VMSPACE_EXIT2;
454 		cpu_vmspace_free(vm);
455 		shmexit(vm);
456 
457 		/*
458 		 * Lock the map, to wait out all other references to it.
459 		 * Delete all of the mappings and pages they hold, then call
460 		 * the pmap module to reclaim anything left.
461 		 */
462 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
463 		vm_map_lock(&vm->vm_map);
464 		vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
465 			      vm->vm_map.max_offset, &count);
466 		vm_map_unlock(&vm->vm_map);
467 		vm_map_entry_release(count);
468 
469 		lwkt_gettoken(&vmspace_pmap(vm)->pm_token);
470 		pmap_release(vmspace_pmap(vm));
471 		lwkt_reltoken(&vmspace_pmap(vm)->pm_token);
472 		lwkt_reltoken(&vm->vm_map.token);
473 		objcache_put(vmspace_cache, vm);
474 	}
475 }
476 
477 /*
478  * Swap useage is determined by taking the proportional swap used by
479  * VM objects backing the VM map.  To make up for fractional losses,
480  * if the VM object has any swap use at all the associated map entries
481  * count for at least 1 swap page.
482  *
483  * No requirements.
484  */
485 int
486 vmspace_swap_count(struct vmspace *vm)
487 {
488 	vm_map_t map = &vm->vm_map;
489 	vm_map_entry_t cur;
490 	vm_object_t object;
491 	int count = 0;
492 	int n;
493 
494 	vmspace_hold(vm);
495 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
496 		switch(cur->maptype) {
497 		case VM_MAPTYPE_NORMAL:
498 		case VM_MAPTYPE_VPAGETABLE:
499 			if ((object = cur->object.vm_object) == NULL)
500 				break;
501 			if (object->swblock_count) {
502 				n = (cur->end - cur->start) / PAGE_SIZE;
503 				count += object->swblock_count *
504 				    SWAP_META_PAGES * n / object->size + 1;
505 			}
506 			break;
507 		default:
508 			break;
509 		}
510 	}
511 	vmspace_drop(vm);
512 
513 	return(count);
514 }
515 
516 /*
517  * Calculate the approximate number of anonymous pages in use by
518  * this vmspace.  To make up for fractional losses, we count each
519  * VM object as having at least 1 anonymous page.
520  *
521  * No requirements.
522  */
523 int
524 vmspace_anonymous_count(struct vmspace *vm)
525 {
526 	vm_map_t map = &vm->vm_map;
527 	vm_map_entry_t cur;
528 	vm_object_t object;
529 	int count = 0;
530 
531 	vmspace_hold(vm);
532 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
533 		switch(cur->maptype) {
534 		case VM_MAPTYPE_NORMAL:
535 		case VM_MAPTYPE_VPAGETABLE:
536 			if ((object = cur->object.vm_object) == NULL)
537 				break;
538 			if (object->type != OBJT_DEFAULT &&
539 			    object->type != OBJT_SWAP) {
540 				break;
541 			}
542 			count += object->resident_page_count;
543 			break;
544 		default:
545 			break;
546 		}
547 	}
548 	vmspace_drop(vm);
549 
550 	return(count);
551 }
552 
553 /*
554  * Creates and returns a new empty VM map with the given physical map
555  * structure, and having the given lower and upper address bounds.
556  *
557  * No requirements.
558  */
559 vm_map_t
560 vm_map_create(vm_map_t result, pmap_t pmap, vm_offset_t min, vm_offset_t max)
561 {
562 	if (result == NULL)
563 		result = zalloc(mapzone);
564 	vm_map_init(result, min, max, pmap);
565 	return (result);
566 }
567 
568 /*
569  * Initialize an existing vm_map structure such as that in the vmspace
570  * structure.  The pmap is initialized elsewhere.
571  *
572  * No requirements.
573  */
574 void
575 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap)
576 {
577 	map->header.next = map->header.prev = &map->header;
578 	RB_INIT(&map->rb_root);
579 	map->nentries = 0;
580 	map->size = 0;
581 	map->system_map = 0;
582 	map->min_offset = min;
583 	map->max_offset = max;
584 	map->pmap = pmap;
585 	map->first_free = &map->header;
586 	map->hint = &map->header;
587 	map->timestamp = 0;
588 	map->flags = 0;
589 	lwkt_token_init(&map->token, "vm_map");
590 	lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0);
591 }
592 
593 /*
594  * Shadow the vm_map_entry's object.  This typically needs to be done when
595  * a write fault is taken on an entry which had previously been cloned by
596  * fork().  The shared object (which might be NULL) must become private so
597  * we add a shadow layer above it.
598  *
599  * Object allocation for anonymous mappings is defered as long as possible.
600  * When creating a shadow, however, the underlying object must be instantiated
601  * so it can be shared.
602  *
603  * If the map segment is governed by a virtual page table then it is
604  * possible to address offsets beyond the mapped area.  Just allocate
605  * a maximally sized object for this case.
606  *
607  * If addref is non-zero an additional reference is added to the returned
608  * entry.  This mechanic exists because the additional reference might have
609  * to be added atomically and not after return to prevent a premature
610  * collapse.
611  *
612  * The vm_map must be exclusively locked.
613  * No other requirements.
614  */
615 static
616 void
617 vm_map_entry_shadow(vm_map_entry_t entry, int addref)
618 {
619 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
620 		vm_object_shadow(&entry->object.vm_object, &entry->offset,
621 				 0x7FFFFFFF, addref);	/* XXX */
622 	} else {
623 		vm_object_shadow(&entry->object.vm_object, &entry->offset,
624 				 atop(entry->end - entry->start), addref);
625 	}
626 	entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
627 }
628 
629 /*
630  * Allocate an object for a vm_map_entry.
631  *
632  * Object allocation for anonymous mappings is defered as long as possible.
633  * This function is called when we can defer no longer, generally when a map
634  * entry might be split or forked or takes a page fault.
635  *
636  * If the map segment is governed by a virtual page table then it is
637  * possible to address offsets beyond the mapped area.  Just allocate
638  * a maximally sized object for this case.
639  *
640  * The vm_map must be exclusively locked.
641  * No other requirements.
642  */
643 void
644 vm_map_entry_allocate_object(vm_map_entry_t entry)
645 {
646 	vm_object_t obj;
647 
648 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
649 		obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
650 	} else {
651 		obj = vm_object_allocate(OBJT_DEFAULT,
652 					 atop(entry->end - entry->start));
653 	}
654 	entry->object.vm_object = obj;
655 	entry->offset = 0;
656 }
657 
658 /*
659  * Set an initial negative count so the first attempt to reserve
660  * space preloads a bunch of vm_map_entry's for this cpu.  Also
661  * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
662  * map a new page for vm_map_entry structures.  SMP systems are
663  * particularly sensitive.
664  *
665  * This routine is called in early boot so we cannot just call
666  * vm_map_entry_reserve().
667  *
668  * Called from the low level boot code only (for each cpu)
669  *
670  * WARNING! Take care not to have too-big a static/BSS structure here
671  *	    as MAXCPU can be 256+, otherwise the loader's 64MB heap
672  *	    can get blown out by the kernel plus the initrd image.
673  */
674 void
675 vm_map_entry_reserve_cpu_init(globaldata_t gd)
676 {
677 	vm_map_entry_t entry;
678 	int count;
679 	int i;
680 
681 	gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
682 	if (gd->gd_cpuid == 0) {
683 		entry = &cpu_map_entry_init_bsp[0];
684 		count = MAPENTRYBSP_CACHE;
685 	} else {
686 		entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0];
687 		count = MAPENTRYAP_CACHE;
688 	}
689 	for (i = 0; i < count; ++i, ++entry) {
690 		entry->next = gd->gd_vme_base;
691 		gd->gd_vme_base = entry;
692 	}
693 }
694 
695 /*
696  * Reserves vm_map_entry structures so code later on can manipulate
697  * map_entry structures within a locked map without blocking trying
698  * to allocate a new vm_map_entry.
699  *
700  * No requirements.
701  */
702 int
703 vm_map_entry_reserve(int count)
704 {
705 	struct globaldata *gd = mycpu;
706 	vm_map_entry_t entry;
707 
708 	/*
709 	 * Make sure we have enough structures in gd_vme_base to handle
710 	 * the reservation request.
711 	 *
712 	 * The critical section protects access to the per-cpu gd.
713 	 */
714 	crit_enter();
715 	while (gd->gd_vme_avail < count) {
716 		entry = zalloc(mapentzone);
717 		entry->next = gd->gd_vme_base;
718 		gd->gd_vme_base = entry;
719 		++gd->gd_vme_avail;
720 	}
721 	gd->gd_vme_avail -= count;
722 	crit_exit();
723 
724 	return(count);
725 }
726 
727 /*
728  * Releases previously reserved vm_map_entry structures that were not
729  * used.  If we have too much junk in our per-cpu cache clean some of
730  * it out.
731  *
732  * No requirements.
733  */
734 void
735 vm_map_entry_release(int count)
736 {
737 	struct globaldata *gd = mycpu;
738 	vm_map_entry_t entry;
739 
740 	crit_enter();
741 	gd->gd_vme_avail += count;
742 	while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
743 		entry = gd->gd_vme_base;
744 		KKASSERT(entry != NULL);
745 		gd->gd_vme_base = entry->next;
746 		--gd->gd_vme_avail;
747 		crit_exit();
748 		zfree(mapentzone, entry);
749 		crit_enter();
750 	}
751 	crit_exit();
752 }
753 
754 /*
755  * Reserve map entry structures for use in kernel_map itself.  These
756  * entries have *ALREADY* been reserved on a per-cpu basis when the map
757  * was inited.  This function is used by zalloc() to avoid a recursion
758  * when zalloc() itself needs to allocate additional kernel memory.
759  *
760  * This function works like the normal reserve but does not load the
761  * vm_map_entry cache (because that would result in an infinite
762  * recursion).  Note that gd_vme_avail may go negative.  This is expected.
763  *
764  * Any caller of this function must be sure to renormalize after
765  * potentially eating entries to ensure that the reserve supply
766  * remains intact.
767  *
768  * No requirements.
769  */
770 int
771 vm_map_entry_kreserve(int count)
772 {
773 	struct globaldata *gd = mycpu;
774 
775 	crit_enter();
776 	gd->gd_vme_avail -= count;
777 	crit_exit();
778 	KASSERT(gd->gd_vme_base != NULL,
779 		("no reserved entries left, gd_vme_avail = %d",
780 		gd->gd_vme_avail));
781 	return(count);
782 }
783 
784 /*
785  * Release previously reserved map entries for kernel_map.  We do not
786  * attempt to clean up like the normal release function as this would
787  * cause an unnecessary (but probably not fatal) deep procedure call.
788  *
789  * No requirements.
790  */
791 void
792 vm_map_entry_krelease(int count)
793 {
794 	struct globaldata *gd = mycpu;
795 
796 	crit_enter();
797 	gd->gd_vme_avail += count;
798 	crit_exit();
799 }
800 
801 /*
802  * Allocates a VM map entry for insertion.  No entry fields are filled in.
803  *
804  * The entries should have previously been reserved.  The reservation count
805  * is tracked in (*countp).
806  *
807  * No requirements.
808  */
809 static vm_map_entry_t
810 vm_map_entry_create(vm_map_t map, int *countp)
811 {
812 	struct globaldata *gd = mycpu;
813 	vm_map_entry_t entry;
814 
815 	KKASSERT(*countp > 0);
816 	--*countp;
817 	crit_enter();
818 	entry = gd->gd_vme_base;
819 	KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
820 	gd->gd_vme_base = entry->next;
821 	crit_exit();
822 
823 	return(entry);
824 }
825 
826 /*
827  * Dispose of a vm_map_entry that is no longer being referenced.
828  *
829  * No requirements.
830  */
831 static void
832 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
833 {
834 	struct globaldata *gd = mycpu;
835 
836 	KKASSERT(map->hint != entry);
837 	KKASSERT(map->first_free != entry);
838 
839 	++*countp;
840 	crit_enter();
841 	entry->next = gd->gd_vme_base;
842 	gd->gd_vme_base = entry;
843 	crit_exit();
844 }
845 
846 
847 /*
848  * Insert/remove entries from maps.
849  *
850  * The related map must be exclusively locked.
851  * The caller must hold map->token
852  * No other requirements.
853  */
854 static __inline void
855 vm_map_entry_link(vm_map_t map,
856 		  vm_map_entry_t after_where,
857 		  vm_map_entry_t entry)
858 {
859 	ASSERT_VM_MAP_LOCKED(map);
860 
861 	map->nentries++;
862 	entry->prev = after_where;
863 	entry->next = after_where->next;
864 	entry->next->prev = entry;
865 	after_where->next = entry;
866 	if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
867 		panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
868 }
869 
870 static __inline void
871 vm_map_entry_unlink(vm_map_t map,
872 		    vm_map_entry_t entry)
873 {
874 	vm_map_entry_t prev;
875 	vm_map_entry_t next;
876 
877 	ASSERT_VM_MAP_LOCKED(map);
878 
879 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
880 		panic("vm_map_entry_unlink: attempt to mess with "
881 		      "locked entry! %p", entry);
882 	}
883 	prev = entry->prev;
884 	next = entry->next;
885 	next->prev = prev;
886 	prev->next = next;
887 	vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
888 	map->nentries--;
889 }
890 
891 /*
892  * Finds the map entry containing (or immediately preceding) the specified
893  * address in the given map.  The entry is returned in (*entry).
894  *
895  * The boolean result indicates whether the address is actually contained
896  * in the map.
897  *
898  * The related map must be locked.
899  * No other requirements.
900  */
901 boolean_t
902 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
903 {
904 	vm_map_entry_t tmp;
905 	vm_map_entry_t last;
906 
907 	ASSERT_VM_MAP_LOCKED(map);
908 #if 0
909 	/*
910 	 * XXX TEMPORARILY DISABLED.  For some reason our attempt to revive
911 	 * the hint code with the red-black lookup meets with system crashes
912 	 * and lockups.  We do not yet know why.
913 	 *
914 	 * It is possible that the problem is related to the setting
915 	 * of the hint during map_entry deletion, in the code specified
916 	 * at the GGG comment later on in this file.
917 	 *
918 	 * YYY More likely it's because this function can be called with
919 	 * a shared lock on the map, resulting in map->hint updates possibly
920 	 * racing.  Fixed now but untested.
921 	 */
922 	/*
923 	 * Quickly check the cached hint, there's a good chance of a match.
924 	 */
925 	tmp = map->hint;
926 	cpu_ccfence();
927 	if (tmp != &map->header) {
928 		if (address >= tmp->start && address < tmp->end) {
929 			*entry = tmp;
930 			return(TRUE);
931 		}
932 	}
933 #endif
934 
935 	/*
936 	 * Locate the record from the top of the tree.  'last' tracks the
937 	 * closest prior record and is returned if no match is found, which
938 	 * in binary tree terms means tracking the most recent right-branch
939 	 * taken.  If there is no prior record, &map->header is returned.
940 	 */
941 	last = &map->header;
942 	tmp = RB_ROOT(&map->rb_root);
943 
944 	while (tmp) {
945 		if (address >= tmp->start) {
946 			if (address < tmp->end) {
947 				*entry = tmp;
948 				map->hint = tmp;
949 				return(TRUE);
950 			}
951 			last = tmp;
952 			tmp = RB_RIGHT(tmp, rb_entry);
953 		} else {
954 			tmp = RB_LEFT(tmp, rb_entry);
955 		}
956 	}
957 	*entry = last;
958 	return (FALSE);
959 }
960 
961 /*
962  * Inserts the given whole VM object into the target map at the specified
963  * address range.  The object's size should match that of the address range.
964  *
965  * The map must be exclusively locked.
966  * The object must be held.
967  * The caller must have reserved sufficient vm_map_entry structures.
968  *
969  * If object is non-NULL, ref count must be bumped by caller prior to
970  * making call to account for the new entry.
971  */
972 int
973 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
974 	      vm_ooffset_t offset, vm_offset_t start, vm_offset_t end,
975 	      vm_maptype_t maptype,
976 	      vm_prot_t prot, vm_prot_t max, int cow)
977 {
978 	vm_map_entry_t new_entry;
979 	vm_map_entry_t prev_entry;
980 	vm_map_entry_t temp_entry;
981 	vm_eflags_t protoeflags;
982 	int must_drop = 0;
983 	vm_object_t object;
984 
985 	if (maptype == VM_MAPTYPE_UKSMAP)
986 		object = NULL;
987 	else
988 		object = map_object;
989 
990 	ASSERT_VM_MAP_LOCKED(map);
991 	if (object)
992 		ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
993 
994 	/*
995 	 * Check that the start and end points are not bogus.
996 	 */
997 	if ((start < map->min_offset) || (end > map->max_offset) ||
998 	    (start >= end))
999 		return (KERN_INVALID_ADDRESS);
1000 
1001 	/*
1002 	 * Find the entry prior to the proposed starting address; if it's part
1003 	 * of an existing entry, this range is bogus.
1004 	 */
1005 	if (vm_map_lookup_entry(map, start, &temp_entry))
1006 		return (KERN_NO_SPACE);
1007 
1008 	prev_entry = temp_entry;
1009 
1010 	/*
1011 	 * Assert that the next entry doesn't overlap the end point.
1012 	 */
1013 
1014 	if ((prev_entry->next != &map->header) &&
1015 	    (prev_entry->next->start < end))
1016 		return (KERN_NO_SPACE);
1017 
1018 	protoeflags = 0;
1019 
1020 	if (cow & MAP_COPY_ON_WRITE)
1021 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1022 
1023 	if (cow & MAP_NOFAULT) {
1024 		protoeflags |= MAP_ENTRY_NOFAULT;
1025 
1026 		KASSERT(object == NULL,
1027 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
1028 	}
1029 	if (cow & MAP_DISABLE_SYNCER)
1030 		protoeflags |= MAP_ENTRY_NOSYNC;
1031 	if (cow & MAP_DISABLE_COREDUMP)
1032 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1033 	if (cow & MAP_IS_STACK)
1034 		protoeflags |= MAP_ENTRY_STACK;
1035 	if (cow & MAP_IS_KSTACK)
1036 		protoeflags |= MAP_ENTRY_KSTACK;
1037 
1038 	lwkt_gettoken(&map->token);
1039 
1040 	if (object) {
1041 		/*
1042 		 * When object is non-NULL, it could be shared with another
1043 		 * process.  We have to set or clear OBJ_ONEMAPPING
1044 		 * appropriately.
1045 		 *
1046 		 * NOTE: This flag is only applicable to DEFAULT and SWAP
1047 		 *	 objects and will already be clear in other types
1048 		 *	 of objects, so a shared object lock is ok for
1049 		 *	 VNODE objects.
1050 		 */
1051 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
1052 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1053 		}
1054 	}
1055 	else if ((prev_entry != &map->header) &&
1056 		 (prev_entry->eflags == protoeflags) &&
1057 		 (prev_entry->end == start) &&
1058 		 (prev_entry->wired_count == 0) &&
1059 		 prev_entry->maptype == maptype &&
1060 		 maptype == VM_MAPTYPE_NORMAL &&
1061 		 ((prev_entry->object.vm_object == NULL) ||
1062 		  vm_object_coalesce(prev_entry->object.vm_object,
1063 				     OFF_TO_IDX(prev_entry->offset),
1064 				     (vm_size_t)(prev_entry->end - prev_entry->start),
1065 				     (vm_size_t)(end - prev_entry->end)))) {
1066 		/*
1067 		 * We were able to extend the object.  Determine if we
1068 		 * can extend the previous map entry to include the
1069 		 * new range as well.
1070 		 */
1071 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1072 		    (prev_entry->protection == prot) &&
1073 		    (prev_entry->max_protection == max)) {
1074 			map->size += (end - prev_entry->end);
1075 			prev_entry->end = end;
1076 			vm_map_simplify_entry(map, prev_entry, countp);
1077 			lwkt_reltoken(&map->token);
1078 			return (KERN_SUCCESS);
1079 		}
1080 
1081 		/*
1082 		 * If we can extend the object but cannot extend the
1083 		 * map entry, we have to create a new map entry.  We
1084 		 * must bump the ref count on the extended object to
1085 		 * account for it.  object may be NULL.
1086 		 *
1087 		 * XXX if object is NULL should we set offset to 0 here ?
1088 		 */
1089 		object = prev_entry->object.vm_object;
1090 		offset = prev_entry->offset +
1091 			(prev_entry->end - prev_entry->start);
1092 		if (object) {
1093 			vm_object_hold(object);
1094 			vm_object_chain_wait(object, 0);
1095 			vm_object_reference_locked(object);
1096 			must_drop = 1;
1097 			map_object = object;
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
1103 	 * in things like the buffer map where we manage kva but do not manage
1104 	 * backing objects.
1105 	 */
1106 
1107 	/*
1108 	 * Create a new entry
1109 	 */
1110 
1111 	new_entry = vm_map_entry_create(map, countp);
1112 	new_entry->start = start;
1113 	new_entry->end = end;
1114 
1115 	new_entry->maptype = maptype;
1116 	new_entry->eflags = protoeflags;
1117 	new_entry->object.map_object = map_object;
1118 	new_entry->aux.master_pde = 0;		/* in case size is different */
1119 	new_entry->aux.map_aux = map_aux;
1120 	new_entry->offset = offset;
1121 
1122 	new_entry->inheritance = VM_INHERIT_DEFAULT;
1123 	new_entry->protection = prot;
1124 	new_entry->max_protection = max;
1125 	new_entry->wired_count = 0;
1126 
1127 	/*
1128 	 * Insert the new entry into the list
1129 	 */
1130 
1131 	vm_map_entry_link(map, prev_entry, new_entry);
1132 	map->size += new_entry->end - new_entry->start;
1133 
1134 	/*
1135 	 * Update the free space hint.  Entries cannot overlap.
1136 	 * An exact comparison is needed to avoid matching
1137 	 * against the map->header.
1138 	 */
1139 	if ((map->first_free == prev_entry) &&
1140 	    (prev_entry->end == new_entry->start)) {
1141 		map->first_free = new_entry;
1142 	}
1143 
1144 #if 0
1145 	/*
1146 	 * Temporarily removed to avoid MAP_STACK panic, due to
1147 	 * MAP_STACK being a huge hack.  Will be added back in
1148 	 * when MAP_STACK (and the user stack mapping) is fixed.
1149 	 */
1150 	/*
1151 	 * It may be possible to simplify the entry
1152 	 */
1153 	vm_map_simplify_entry(map, new_entry, countp);
1154 #endif
1155 
1156 	/*
1157 	 * Try to pre-populate the page table.  Mappings governed by virtual
1158 	 * page tables cannot be prepopulated without a lot of work, so
1159 	 * don't try.
1160 	 */
1161 	if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1162 	    maptype != VM_MAPTYPE_VPAGETABLE &&
1163 	    maptype != VM_MAPTYPE_UKSMAP) {
1164 		int dorelock = 0;
1165 		if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) {
1166 			dorelock = 1;
1167 			vm_object_lock_swap();
1168 			vm_object_drop(object);
1169 		}
1170 		pmap_object_init_pt(map->pmap, start, prot,
1171 				    object, OFF_TO_IDX(offset), end - start,
1172 				    cow & MAP_PREFAULT_PARTIAL);
1173 		if (dorelock) {
1174 			vm_object_hold(object);
1175 			vm_object_lock_swap();
1176 		}
1177 	}
1178 	if (must_drop)
1179 		vm_object_drop(object);
1180 
1181 	lwkt_reltoken(&map->token);
1182 	return (KERN_SUCCESS);
1183 }
1184 
1185 /*
1186  * Find sufficient space for `length' bytes in the given map, starting at
1187  * `start'.  Returns 0 on success, 1 on no space.
1188  *
1189  * This function will returned an arbitrarily aligned pointer.  If no
1190  * particular alignment is required you should pass align as 1.  Note that
1191  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1192  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1193  * argument.
1194  *
1195  * 'align' should be a power of 2 but is not required to be.
1196  *
1197  * The map must be exclusively locked.
1198  * No other requirements.
1199  */
1200 int
1201 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1202 		 vm_size_t align, int flags, vm_offset_t *addr)
1203 {
1204 	vm_map_entry_t entry, next;
1205 	vm_offset_t end;
1206 	vm_offset_t align_mask;
1207 
1208 	if (start < map->min_offset)
1209 		start = map->min_offset;
1210 	if (start > map->max_offset)
1211 		return (1);
1212 
1213 	/*
1214 	 * If the alignment is not a power of 2 we will have to use
1215 	 * a mod/division, set align_mask to a special value.
1216 	 */
1217 	if ((align | (align - 1)) + 1 != (align << 1))
1218 		align_mask = (vm_offset_t)-1;
1219 	else
1220 		align_mask = align - 1;
1221 
1222 	/*
1223 	 * Look for the first possible address; if there's already something
1224 	 * at this address, we have to start after it.
1225 	 */
1226 	if (start == map->min_offset) {
1227 		if ((entry = map->first_free) != &map->header)
1228 			start = entry->end;
1229 	} else {
1230 		vm_map_entry_t tmp;
1231 
1232 		if (vm_map_lookup_entry(map, start, &tmp))
1233 			start = tmp->end;
1234 		entry = tmp;
1235 	}
1236 
1237 	/*
1238 	 * Look through the rest of the map, trying to fit a new region in the
1239 	 * gap between existing regions, or after the very last region.
1240 	 */
1241 	for (;; start = (entry = next)->end) {
1242 		/*
1243 		 * Adjust the proposed start by the requested alignment,
1244 		 * be sure that we didn't wrap the address.
1245 		 */
1246 		if (align_mask == (vm_offset_t)-1)
1247 			end = ((start + align - 1) / align) * align;
1248 		else
1249 			end = (start + align_mask) & ~align_mask;
1250 		if (end < start)
1251 			return (1);
1252 		start = end;
1253 		/*
1254 		 * Find the end of the proposed new region.  Be sure we didn't
1255 		 * go beyond the end of the map, or wrap around the address.
1256 		 * Then check to see if this is the last entry or if the
1257 		 * proposed end fits in the gap between this and the next
1258 		 * entry.
1259 		 */
1260 		end = start + length;
1261 		if (end > map->max_offset || end < start)
1262 			return (1);
1263 		next = entry->next;
1264 
1265 		/*
1266 		 * If the next entry's start address is beyond the desired
1267 		 * end address we may have found a good entry.
1268 		 *
1269 		 * If the next entry is a stack mapping we do not map into
1270 		 * the stack's reserved space.
1271 		 *
1272 		 * XXX continue to allow mapping into the stack's reserved
1273 		 * space if doing a MAP_STACK mapping inside a MAP_STACK
1274 		 * mapping, for backwards compatibility.  But the caller
1275 		 * really should use MAP_STACK | MAP_TRYFIXED if they
1276 		 * want to do that.
1277 		 */
1278 		if (next == &map->header)
1279 			break;
1280 		if (next->start >= end) {
1281 			if ((next->eflags & MAP_ENTRY_STACK) == 0)
1282 				break;
1283 			if (flags & MAP_STACK)
1284 				break;
1285 			if (next->start - next->aux.avail_ssize >= end)
1286 				break;
1287 		}
1288 	}
1289 	map->hint = entry;
1290 
1291 	/*
1292 	 * Grow the kernel_map if necessary.  pmap_growkernel() will panic
1293 	 * if it fails.  The kernel_map is locked and nothing can steal
1294 	 * our address space if pmap_growkernel() blocks.
1295 	 *
1296 	 * NOTE: This may be unconditionally called for kldload areas on
1297 	 *	 x86_64 because these do not bump kernel_vm_end (which would
1298 	 *	 fill 128G worth of page tables!).  Therefore we must not
1299 	 *	 retry.
1300 	 */
1301 	if (map == &kernel_map) {
1302 		vm_offset_t kstop;
1303 
1304 		kstop = round_page(start + length);
1305 		if (kstop > kernel_vm_end)
1306 			pmap_growkernel(start, kstop);
1307 	}
1308 	*addr = start;
1309 	return (0);
1310 }
1311 
1312 /*
1313  * vm_map_find finds an unallocated region in the target address map with
1314  * the given length and allocates it.  The search is defined to be first-fit
1315  * from the specified address; the region found is returned in the same
1316  * parameter.
1317  *
1318  * If object is non-NULL, ref count must be bumped by caller
1319  * prior to making call to account for the new entry.
1320  *
1321  * No requirements.  This function will lock the map temporarily.
1322  */
1323 int
1324 vm_map_find(vm_map_t map, void *map_object, void *map_aux,
1325 	    vm_ooffset_t offset, vm_offset_t *addr,
1326 	    vm_size_t length, vm_size_t align,
1327 	    boolean_t fitit,
1328 	    vm_maptype_t maptype,
1329 	    vm_prot_t prot, vm_prot_t max,
1330 	    int cow)
1331 {
1332 	vm_offset_t start;
1333 	vm_object_t object;
1334 	int result;
1335 	int count;
1336 
1337 	if (maptype == VM_MAPTYPE_UKSMAP)
1338 		object = NULL;
1339 	else
1340 		object = map_object;
1341 
1342 	start = *addr;
1343 
1344 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1345 	vm_map_lock(map);
1346 	if (object)
1347 		vm_object_hold_shared(object);
1348 	if (fitit) {
1349 		if (vm_map_findspace(map, start, length, align, 0, addr)) {
1350 			if (object)
1351 				vm_object_drop(object);
1352 			vm_map_unlock(map);
1353 			vm_map_entry_release(count);
1354 			return (KERN_NO_SPACE);
1355 		}
1356 		start = *addr;
1357 	}
1358 	result = vm_map_insert(map, &count, map_object, map_aux,
1359 			       offset, start, start + length,
1360 			       maptype, prot, max, cow);
1361 	if (object)
1362 		vm_object_drop(object);
1363 	vm_map_unlock(map);
1364 	vm_map_entry_release(count);
1365 
1366 	return (result);
1367 }
1368 
1369 /*
1370  * Simplify the given map entry by merging with either neighbor.  This
1371  * routine also has the ability to merge with both neighbors.
1372  *
1373  * This routine guarentees that the passed entry remains valid (though
1374  * possibly extended).  When merging, this routine may delete one or
1375  * both neighbors.  No action is taken on entries which have their
1376  * in-transition flag set.
1377  *
1378  * The map must be exclusively locked.
1379  */
1380 void
1381 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1382 {
1383 	vm_map_entry_t next, prev;
1384 	vm_size_t prevsize, esize;
1385 
1386 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1387 		++mycpu->gd_cnt.v_intrans_coll;
1388 		return;
1389 	}
1390 
1391 	if (entry->maptype == VM_MAPTYPE_SUBMAP)
1392 		return;
1393 	if (entry->maptype == VM_MAPTYPE_UKSMAP)
1394 		return;
1395 
1396 	prev = entry->prev;
1397 	if (prev != &map->header) {
1398 		prevsize = prev->end - prev->start;
1399 		if ( (prev->end == entry->start) &&
1400 		     (prev->maptype == entry->maptype) &&
1401 		     (prev->object.vm_object == entry->object.vm_object) &&
1402 		     (!prev->object.vm_object ||
1403 			(prev->offset + prevsize == entry->offset)) &&
1404 		     (prev->eflags == entry->eflags) &&
1405 		     (prev->protection == entry->protection) &&
1406 		     (prev->max_protection == entry->max_protection) &&
1407 		     (prev->inheritance == entry->inheritance) &&
1408 		     (prev->wired_count == entry->wired_count)) {
1409 			if (map->first_free == prev)
1410 				map->first_free = entry;
1411 			if (map->hint == prev)
1412 				map->hint = entry;
1413 			vm_map_entry_unlink(map, prev);
1414 			entry->start = prev->start;
1415 			entry->offset = prev->offset;
1416 			if (prev->object.vm_object)
1417 				vm_object_deallocate(prev->object.vm_object);
1418 			vm_map_entry_dispose(map, prev, countp);
1419 		}
1420 	}
1421 
1422 	next = entry->next;
1423 	if (next != &map->header) {
1424 		esize = entry->end - entry->start;
1425 		if ((entry->end == next->start) &&
1426 		    (next->maptype == entry->maptype) &&
1427 		    (next->object.vm_object == entry->object.vm_object) &&
1428 		     (!entry->object.vm_object ||
1429 			(entry->offset + esize == next->offset)) &&
1430 		    (next->eflags == entry->eflags) &&
1431 		    (next->protection == entry->protection) &&
1432 		    (next->max_protection == entry->max_protection) &&
1433 		    (next->inheritance == entry->inheritance) &&
1434 		    (next->wired_count == entry->wired_count)) {
1435 			if (map->first_free == next)
1436 				map->first_free = entry;
1437 			if (map->hint == next)
1438 				map->hint = entry;
1439 			vm_map_entry_unlink(map, next);
1440 			entry->end = next->end;
1441 			if (next->object.vm_object)
1442 				vm_object_deallocate(next->object.vm_object);
1443 			vm_map_entry_dispose(map, next, countp);
1444 	        }
1445 	}
1446 }
1447 
1448 /*
1449  * Asserts that the given entry begins at or after the specified address.
1450  * If necessary, it splits the entry into two.
1451  */
1452 #define vm_map_clip_start(map, entry, startaddr, countp)		\
1453 {									\
1454 	if (startaddr > entry->start)					\
1455 		_vm_map_clip_start(map, entry, startaddr, countp);	\
1456 }
1457 
1458 /*
1459  * This routine is called only when it is known that the entry must be split.
1460  *
1461  * The map must be exclusively locked.
1462  */
1463 static void
1464 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1465 		   int *countp)
1466 {
1467 	vm_map_entry_t new_entry;
1468 
1469 	/*
1470 	 * Split off the front portion -- note that we must insert the new
1471 	 * entry BEFORE this one, so that this entry has the specified
1472 	 * starting address.
1473 	 */
1474 
1475 	vm_map_simplify_entry(map, entry, countp);
1476 
1477 	/*
1478 	 * If there is no object backing this entry, we might as well create
1479 	 * one now.  If we defer it, an object can get created after the map
1480 	 * is clipped, and individual objects will be created for the split-up
1481 	 * map.  This is a bit of a hack, but is also about the best place to
1482 	 * put this improvement.
1483 	 */
1484 	if (entry->object.vm_object == NULL && !map->system_map) {
1485 		vm_map_entry_allocate_object(entry);
1486 	}
1487 
1488 	new_entry = vm_map_entry_create(map, countp);
1489 	*new_entry = *entry;
1490 
1491 	new_entry->end = start;
1492 	entry->offset += (start - entry->start);
1493 	entry->start = start;
1494 
1495 	vm_map_entry_link(map, entry->prev, new_entry);
1496 
1497 	switch(entry->maptype) {
1498 	case VM_MAPTYPE_NORMAL:
1499 	case VM_MAPTYPE_VPAGETABLE:
1500 		if (new_entry->object.vm_object) {
1501 			vm_object_hold(new_entry->object.vm_object);
1502 			vm_object_chain_wait(new_entry->object.vm_object, 0);
1503 			vm_object_reference_locked(new_entry->object.vm_object);
1504 			vm_object_drop(new_entry->object.vm_object);
1505 		}
1506 		break;
1507 	default:
1508 		break;
1509 	}
1510 }
1511 
1512 /*
1513  * Asserts that the given entry ends at or before the specified address.
1514  * If necessary, it splits the entry into two.
1515  *
1516  * The map must be exclusively locked.
1517  */
1518 #define vm_map_clip_end(map, entry, endaddr, countp)		\
1519 {								\
1520 	if (endaddr < entry->end)				\
1521 		_vm_map_clip_end(map, entry, endaddr, countp);	\
1522 }
1523 
1524 /*
1525  * This routine is called only when it is known that the entry must be split.
1526  *
1527  * The map must be exclusively locked.
1528  */
1529 static void
1530 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1531 		 int *countp)
1532 {
1533 	vm_map_entry_t new_entry;
1534 
1535 	/*
1536 	 * If there is no object backing this entry, we might as well create
1537 	 * one now.  If we defer it, an object can get created after the map
1538 	 * is clipped, and individual objects will be created for the split-up
1539 	 * map.  This is a bit of a hack, but is also about the best place to
1540 	 * put this improvement.
1541 	 */
1542 
1543 	if (entry->object.vm_object == NULL && !map->system_map) {
1544 		vm_map_entry_allocate_object(entry);
1545 	}
1546 
1547 	/*
1548 	 * Create a new entry and insert it AFTER the specified entry
1549 	 */
1550 
1551 	new_entry = vm_map_entry_create(map, countp);
1552 	*new_entry = *entry;
1553 
1554 	new_entry->start = entry->end = end;
1555 	new_entry->offset += (end - entry->start);
1556 
1557 	vm_map_entry_link(map, entry, new_entry);
1558 
1559 	switch(entry->maptype) {
1560 	case VM_MAPTYPE_NORMAL:
1561 	case VM_MAPTYPE_VPAGETABLE:
1562 		if (new_entry->object.vm_object) {
1563 			vm_object_hold(new_entry->object.vm_object);
1564 			vm_object_chain_wait(new_entry->object.vm_object, 0);
1565 			vm_object_reference_locked(new_entry->object.vm_object);
1566 			vm_object_drop(new_entry->object.vm_object);
1567 		}
1568 		break;
1569 	default:
1570 		break;
1571 	}
1572 }
1573 
1574 /*
1575  * Asserts that the starting and ending region addresses fall within the
1576  * valid range for the map.
1577  */
1578 #define	VM_MAP_RANGE_CHECK(map, start, end)	\
1579 {						\
1580 	if (start < vm_map_min(map))		\
1581 		start = vm_map_min(map);	\
1582 	if (end > vm_map_max(map))		\
1583 		end = vm_map_max(map);		\
1584 	if (start > end)			\
1585 		start = end;			\
1586 }
1587 
1588 /*
1589  * Used to block when an in-transition collison occurs.  The map
1590  * is unlocked for the sleep and relocked before the return.
1591  */
1592 void
1593 vm_map_transition_wait(vm_map_t map)
1594 {
1595 	tsleep_interlock(map, 0);
1596 	vm_map_unlock(map);
1597 	tsleep(map, PINTERLOCKED, "vment", 0);
1598 	vm_map_lock(map);
1599 }
1600 
1601 /*
1602  * When we do blocking operations with the map lock held it is
1603  * possible that a clip might have occured on our in-transit entry,
1604  * requiring an adjustment to the entry in our loop.  These macros
1605  * help the pageable and clip_range code deal with the case.  The
1606  * conditional costs virtually nothing if no clipping has occured.
1607  */
1608 
1609 #define CLIP_CHECK_BACK(entry, save_start)		\
1610     do {						\
1611 	    while (entry->start != save_start) {	\
1612 		    entry = entry->prev;		\
1613 		    KASSERT(entry != &map->header, ("bad entry clip")); \
1614 	    }						\
1615     } while(0)
1616 
1617 #define CLIP_CHECK_FWD(entry, save_end)			\
1618     do {						\
1619 	    while (entry->end != save_end) {		\
1620 		    entry = entry->next;		\
1621 		    KASSERT(entry != &map->header, ("bad entry clip")); \
1622 	    }						\
1623     } while(0)
1624 
1625 
1626 /*
1627  * Clip the specified range and return the base entry.  The
1628  * range may cover several entries starting at the returned base
1629  * and the first and last entry in the covering sequence will be
1630  * properly clipped to the requested start and end address.
1631  *
1632  * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1633  * flag.
1634  *
1635  * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1636  * covered by the requested range.
1637  *
1638  * The map must be exclusively locked on entry and will remain locked
1639  * on return. If no range exists or the range contains holes and you
1640  * specified that no holes were allowed, NULL will be returned.  This
1641  * routine may temporarily unlock the map in order avoid a deadlock when
1642  * sleeping.
1643  */
1644 static
1645 vm_map_entry_t
1646 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end,
1647 		  int *countp, int flags)
1648 {
1649 	vm_map_entry_t start_entry;
1650 	vm_map_entry_t entry;
1651 
1652 	/*
1653 	 * Locate the entry and effect initial clipping.  The in-transition
1654 	 * case does not occur very often so do not try to optimize it.
1655 	 */
1656 again:
1657 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1658 		return (NULL);
1659 	entry = start_entry;
1660 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1661 		entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1662 		++mycpu->gd_cnt.v_intrans_coll;
1663 		++mycpu->gd_cnt.v_intrans_wait;
1664 		vm_map_transition_wait(map);
1665 		/*
1666 		 * entry and/or start_entry may have been clipped while
1667 		 * we slept, or may have gone away entirely.  We have
1668 		 * to restart from the lookup.
1669 		 */
1670 		goto again;
1671 	}
1672 
1673 	/*
1674 	 * Since we hold an exclusive map lock we do not have to restart
1675 	 * after clipping, even though clipping may block in zalloc.
1676 	 */
1677 	vm_map_clip_start(map, entry, start, countp);
1678 	vm_map_clip_end(map, entry, end, countp);
1679 	entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1680 
1681 	/*
1682 	 * Scan entries covered by the range.  When working on the next
1683 	 * entry a restart need only re-loop on the current entry which
1684 	 * we have already locked, since 'next' may have changed.  Also,
1685 	 * even though entry is safe, it may have been clipped so we
1686 	 * have to iterate forwards through the clip after sleeping.
1687 	 */
1688 	while (entry->next != &map->header && entry->next->start < end) {
1689 		vm_map_entry_t next = entry->next;
1690 
1691 		if (flags & MAP_CLIP_NO_HOLES) {
1692 			if (next->start > entry->end) {
1693 				vm_map_unclip_range(map, start_entry,
1694 					start, entry->end, countp, flags);
1695 				return(NULL);
1696 			}
1697 		}
1698 
1699 		if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1700 			vm_offset_t save_end = entry->end;
1701 			next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1702 			++mycpu->gd_cnt.v_intrans_coll;
1703 			++mycpu->gd_cnt.v_intrans_wait;
1704 			vm_map_transition_wait(map);
1705 
1706 			/*
1707 			 * clips might have occured while we blocked.
1708 			 */
1709 			CLIP_CHECK_FWD(entry, save_end);
1710 			CLIP_CHECK_BACK(start_entry, start);
1711 			continue;
1712 		}
1713 		/*
1714 		 * No restart necessary even though clip_end may block, we
1715 		 * are holding the map lock.
1716 		 */
1717 		vm_map_clip_end(map, next, end, countp);
1718 		next->eflags |= MAP_ENTRY_IN_TRANSITION;
1719 		entry = next;
1720 	}
1721 	if (flags & MAP_CLIP_NO_HOLES) {
1722 		if (entry->end != end) {
1723 			vm_map_unclip_range(map, start_entry,
1724 				start, entry->end, countp, flags);
1725 			return(NULL);
1726 		}
1727 	}
1728 	return(start_entry);
1729 }
1730 
1731 /*
1732  * Undo the effect of vm_map_clip_range().  You should pass the same
1733  * flags and the same range that you passed to vm_map_clip_range().
1734  * This code will clear the in-transition flag on the entries and
1735  * wake up anyone waiting.  This code will also simplify the sequence
1736  * and attempt to merge it with entries before and after the sequence.
1737  *
1738  * The map must be locked on entry and will remain locked on return.
1739  *
1740  * Note that you should also pass the start_entry returned by
1741  * vm_map_clip_range().  However, if you block between the two calls
1742  * with the map unlocked please be aware that the start_entry may
1743  * have been clipped and you may need to scan it backwards to find
1744  * the entry corresponding with the original start address.  You are
1745  * responsible for this, vm_map_unclip_range() expects the correct
1746  * start_entry to be passed to it and will KASSERT otherwise.
1747  */
1748 static
1749 void
1750 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
1751 		    vm_offset_t start, vm_offset_t end,
1752 		    int *countp, int flags)
1753 {
1754 	vm_map_entry_t entry;
1755 
1756 	entry = start_entry;
1757 
1758 	KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1759 	while (entry != &map->header && entry->start < end) {
1760 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1761 			("in-transition flag not set during unclip on: %p",
1762 			entry));
1763 		KASSERT(entry->end <= end,
1764 			("unclip_range: tail wasn't clipped"));
1765 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1766 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1767 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1768 			wakeup(map);
1769 		}
1770 		entry = entry->next;
1771 	}
1772 
1773 	/*
1774 	 * Simplification does not block so there is no restart case.
1775 	 */
1776 	entry = start_entry;
1777 	while (entry != &map->header && entry->start < end) {
1778 		vm_map_simplify_entry(map, entry, countp);
1779 		entry = entry->next;
1780 	}
1781 }
1782 
1783 /*
1784  * Mark the given range as handled by a subordinate map.
1785  *
1786  * This range must have been created with vm_map_find(), and no other
1787  * operations may have been performed on this range prior to calling
1788  * vm_map_submap().
1789  *
1790  * Submappings cannot be removed.
1791  *
1792  * No requirements.
1793  */
1794 int
1795 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1796 {
1797 	vm_map_entry_t entry;
1798 	int result = KERN_INVALID_ARGUMENT;
1799 	int count;
1800 
1801 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1802 	vm_map_lock(map);
1803 
1804 	VM_MAP_RANGE_CHECK(map, start, end);
1805 
1806 	if (vm_map_lookup_entry(map, start, &entry)) {
1807 		vm_map_clip_start(map, entry, start, &count);
1808 	} else {
1809 		entry = entry->next;
1810 	}
1811 
1812 	vm_map_clip_end(map, entry, end, &count);
1813 
1814 	if ((entry->start == start) && (entry->end == end) &&
1815 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1816 	    (entry->object.vm_object == NULL)) {
1817 		entry->object.sub_map = submap;
1818 		entry->maptype = VM_MAPTYPE_SUBMAP;
1819 		result = KERN_SUCCESS;
1820 	}
1821 	vm_map_unlock(map);
1822 	vm_map_entry_release(count);
1823 
1824 	return (result);
1825 }
1826 
1827 /*
1828  * Sets the protection of the specified address region in the target map.
1829  * If "set_max" is specified, the maximum protection is to be set;
1830  * otherwise, only the current protection is affected.
1831  *
1832  * The protection is not applicable to submaps, but is applicable to normal
1833  * maps and maps governed by virtual page tables.  For example, when operating
1834  * on a virtual page table our protection basically controls how COW occurs
1835  * on the backing object, whereas the virtual page table abstraction itself
1836  * is an abstraction for userland.
1837  *
1838  * No requirements.
1839  */
1840 int
1841 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1842 	       vm_prot_t new_prot, boolean_t set_max)
1843 {
1844 	vm_map_entry_t current;
1845 	vm_map_entry_t entry;
1846 	int count;
1847 
1848 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1849 	vm_map_lock(map);
1850 
1851 	VM_MAP_RANGE_CHECK(map, start, end);
1852 
1853 	if (vm_map_lookup_entry(map, start, &entry)) {
1854 		vm_map_clip_start(map, entry, start, &count);
1855 	} else {
1856 		entry = entry->next;
1857 	}
1858 
1859 	/*
1860 	 * Make a first pass to check for protection violations.
1861 	 */
1862 	current = entry;
1863 	while ((current != &map->header) && (current->start < end)) {
1864 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
1865 			vm_map_unlock(map);
1866 			vm_map_entry_release(count);
1867 			return (KERN_INVALID_ARGUMENT);
1868 		}
1869 		if ((new_prot & current->max_protection) != new_prot) {
1870 			vm_map_unlock(map);
1871 			vm_map_entry_release(count);
1872 			return (KERN_PROTECTION_FAILURE);
1873 		}
1874 		current = current->next;
1875 	}
1876 
1877 	/*
1878 	 * Go back and fix up protections. [Note that clipping is not
1879 	 * necessary the second time.]
1880 	 */
1881 	current = entry;
1882 
1883 	while ((current != &map->header) && (current->start < end)) {
1884 		vm_prot_t old_prot;
1885 
1886 		vm_map_clip_end(map, current, end, &count);
1887 
1888 		old_prot = current->protection;
1889 		if (set_max) {
1890 			current->protection =
1891 			    (current->max_protection = new_prot) &
1892 			    old_prot;
1893 		} else {
1894 			current->protection = new_prot;
1895 		}
1896 
1897 		/*
1898 		 * Update physical map if necessary. Worry about copy-on-write
1899 		 * here -- CHECK THIS XXX
1900 		 */
1901 
1902 		if (current->protection != old_prot) {
1903 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1904 							VM_PROT_ALL)
1905 
1906 			pmap_protect(map->pmap, current->start,
1907 			    current->end,
1908 			    current->protection & MASK(current));
1909 #undef	MASK
1910 		}
1911 
1912 		vm_map_simplify_entry(map, current, &count);
1913 
1914 		current = current->next;
1915 	}
1916 
1917 	vm_map_unlock(map);
1918 	vm_map_entry_release(count);
1919 	return (KERN_SUCCESS);
1920 }
1921 
1922 /*
1923  * This routine traverses a processes map handling the madvise
1924  * system call.  Advisories are classified as either those effecting
1925  * the vm_map_entry structure, or those effecting the underlying
1926  * objects.
1927  *
1928  * The <value> argument is used for extended madvise calls.
1929  *
1930  * No requirements.
1931  */
1932 int
1933 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1934 	       int behav, off_t value)
1935 {
1936 	vm_map_entry_t current, entry;
1937 	int modify_map = 0;
1938 	int error = 0;
1939 	int count;
1940 
1941 	/*
1942 	 * Some madvise calls directly modify the vm_map_entry, in which case
1943 	 * we need to use an exclusive lock on the map and we need to perform
1944 	 * various clipping operations.  Otherwise we only need a read-lock
1945 	 * on the map.
1946 	 */
1947 
1948 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1949 
1950 	switch(behav) {
1951 	case MADV_NORMAL:
1952 	case MADV_SEQUENTIAL:
1953 	case MADV_RANDOM:
1954 	case MADV_NOSYNC:
1955 	case MADV_AUTOSYNC:
1956 	case MADV_NOCORE:
1957 	case MADV_CORE:
1958 	case MADV_SETMAP:
1959 	case MADV_INVAL:
1960 		modify_map = 1;
1961 		vm_map_lock(map);
1962 		break;
1963 	case MADV_WILLNEED:
1964 	case MADV_DONTNEED:
1965 	case MADV_FREE:
1966 		vm_map_lock_read(map);
1967 		break;
1968 	default:
1969 		vm_map_entry_release(count);
1970 		return (EINVAL);
1971 	}
1972 
1973 	/*
1974 	 * Locate starting entry and clip if necessary.
1975 	 */
1976 
1977 	VM_MAP_RANGE_CHECK(map, start, end);
1978 
1979 	if (vm_map_lookup_entry(map, start, &entry)) {
1980 		if (modify_map)
1981 			vm_map_clip_start(map, entry, start, &count);
1982 	} else {
1983 		entry = entry->next;
1984 	}
1985 
1986 	if (modify_map) {
1987 		/*
1988 		 * madvise behaviors that are implemented in the vm_map_entry.
1989 		 *
1990 		 * We clip the vm_map_entry so that behavioral changes are
1991 		 * limited to the specified address range.
1992 		 */
1993 		for (current = entry;
1994 		     (current != &map->header) && (current->start < end);
1995 		     current = current->next
1996 		) {
1997 			if (current->maptype == VM_MAPTYPE_SUBMAP)
1998 				continue;
1999 
2000 			vm_map_clip_end(map, current, end, &count);
2001 
2002 			switch (behav) {
2003 			case MADV_NORMAL:
2004 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2005 				break;
2006 			case MADV_SEQUENTIAL:
2007 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2008 				break;
2009 			case MADV_RANDOM:
2010 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2011 				break;
2012 			case MADV_NOSYNC:
2013 				current->eflags |= MAP_ENTRY_NOSYNC;
2014 				break;
2015 			case MADV_AUTOSYNC:
2016 				current->eflags &= ~MAP_ENTRY_NOSYNC;
2017 				break;
2018 			case MADV_NOCORE:
2019 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2020 				break;
2021 			case MADV_CORE:
2022 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2023 				break;
2024 			case MADV_INVAL:
2025 				/*
2026 				 * Invalidate the related pmap entries, used
2027 				 * to flush portions of the real kernel's
2028 				 * pmap when the caller has removed or
2029 				 * modified existing mappings in a virtual
2030 				 * page table.
2031 				 */
2032 				pmap_remove(map->pmap,
2033 					    current->start, current->end);
2034 				break;
2035 			case MADV_SETMAP:
2036 				/*
2037 				 * Set the page directory page for a map
2038 				 * governed by a virtual page table.  Mark
2039 				 * the entry as being governed by a virtual
2040 				 * page table if it is not.
2041 				 *
2042 				 * XXX the page directory page is stored
2043 				 * in the avail_ssize field if the map_entry.
2044 				 *
2045 				 * XXX the map simplification code does not
2046 				 * compare this field so weird things may
2047 				 * happen if you do not apply this function
2048 				 * to the entire mapping governed by the
2049 				 * virtual page table.
2050 				 */
2051 				if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
2052 					error = EINVAL;
2053 					break;
2054 				}
2055 				current->aux.master_pde = value;
2056 				pmap_remove(map->pmap,
2057 					    current->start, current->end);
2058 				break;
2059 			default:
2060 				error = EINVAL;
2061 				break;
2062 			}
2063 			vm_map_simplify_entry(map, current, &count);
2064 		}
2065 		vm_map_unlock(map);
2066 	} else {
2067 		vm_pindex_t pindex;
2068 		int count;
2069 
2070 		/*
2071 		 * madvise behaviors that are implemented in the underlying
2072 		 * vm_object.
2073 		 *
2074 		 * Since we don't clip the vm_map_entry, we have to clip
2075 		 * the vm_object pindex and count.
2076 		 *
2077 		 * NOTE!  We currently do not support these functions on
2078 		 * virtual page tables.
2079 		 */
2080 		for (current = entry;
2081 		     (current != &map->header) && (current->start < end);
2082 		     current = current->next
2083 		) {
2084 			vm_offset_t useStart;
2085 
2086 			if (current->maptype != VM_MAPTYPE_NORMAL)
2087 				continue;
2088 
2089 			pindex = OFF_TO_IDX(current->offset);
2090 			count = atop(current->end - current->start);
2091 			useStart = current->start;
2092 
2093 			if (current->start < start) {
2094 				pindex += atop(start - current->start);
2095 				count -= atop(start - current->start);
2096 				useStart = start;
2097 			}
2098 			if (current->end > end)
2099 				count -= atop(current->end - end);
2100 
2101 			if (count <= 0)
2102 				continue;
2103 
2104 			vm_object_madvise(current->object.vm_object,
2105 					  pindex, count, behav);
2106 
2107 			/*
2108 			 * Try to populate the page table.  Mappings governed
2109 			 * by virtual page tables cannot be pre-populated
2110 			 * without a lot of work so don't try.
2111 			 */
2112 			if (behav == MADV_WILLNEED &&
2113 			    current->maptype != VM_MAPTYPE_VPAGETABLE) {
2114 				pmap_object_init_pt(
2115 				    map->pmap,
2116 				    useStart,
2117 				    current->protection,
2118 				    current->object.vm_object,
2119 				    pindex,
2120 				    (count << PAGE_SHIFT),
2121 				    MAP_PREFAULT_MADVISE
2122 				);
2123 			}
2124 		}
2125 		vm_map_unlock_read(map);
2126 	}
2127 	vm_map_entry_release(count);
2128 	return(error);
2129 }
2130 
2131 
2132 /*
2133  * Sets the inheritance of the specified address range in the target map.
2134  * Inheritance affects how the map will be shared with child maps at the
2135  * time of vm_map_fork.
2136  */
2137 int
2138 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2139 	       vm_inherit_t new_inheritance)
2140 {
2141 	vm_map_entry_t entry;
2142 	vm_map_entry_t temp_entry;
2143 	int count;
2144 
2145 	switch (new_inheritance) {
2146 	case VM_INHERIT_NONE:
2147 	case VM_INHERIT_COPY:
2148 	case VM_INHERIT_SHARE:
2149 		break;
2150 	default:
2151 		return (KERN_INVALID_ARGUMENT);
2152 	}
2153 
2154 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2155 	vm_map_lock(map);
2156 
2157 	VM_MAP_RANGE_CHECK(map, start, end);
2158 
2159 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2160 		entry = temp_entry;
2161 		vm_map_clip_start(map, entry, start, &count);
2162 	} else
2163 		entry = temp_entry->next;
2164 
2165 	while ((entry != &map->header) && (entry->start < end)) {
2166 		vm_map_clip_end(map, entry, end, &count);
2167 
2168 		entry->inheritance = new_inheritance;
2169 
2170 		vm_map_simplify_entry(map, entry, &count);
2171 
2172 		entry = entry->next;
2173 	}
2174 	vm_map_unlock(map);
2175 	vm_map_entry_release(count);
2176 	return (KERN_SUCCESS);
2177 }
2178 
2179 /*
2180  * Implement the semantics of mlock
2181  */
2182 int
2183 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2184 	      boolean_t new_pageable)
2185 {
2186 	vm_map_entry_t entry;
2187 	vm_map_entry_t start_entry;
2188 	vm_offset_t end;
2189 	int rv = KERN_SUCCESS;
2190 	int count;
2191 
2192 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2193 	vm_map_lock(map);
2194 	VM_MAP_RANGE_CHECK(map, start, real_end);
2195 	end = real_end;
2196 
2197 	start_entry = vm_map_clip_range(map, start, end, &count,
2198 					MAP_CLIP_NO_HOLES);
2199 	if (start_entry == NULL) {
2200 		vm_map_unlock(map);
2201 		vm_map_entry_release(count);
2202 		return (KERN_INVALID_ADDRESS);
2203 	}
2204 
2205 	if (new_pageable == 0) {
2206 		entry = start_entry;
2207 		while ((entry != &map->header) && (entry->start < end)) {
2208 			vm_offset_t save_start;
2209 			vm_offset_t save_end;
2210 
2211 			/*
2212 			 * Already user wired or hard wired (trivial cases)
2213 			 */
2214 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2215 				entry = entry->next;
2216 				continue;
2217 			}
2218 			if (entry->wired_count != 0) {
2219 				entry->wired_count++;
2220 				entry->eflags |= MAP_ENTRY_USER_WIRED;
2221 				entry = entry->next;
2222 				continue;
2223 			}
2224 
2225 			/*
2226 			 * A new wiring requires instantiation of appropriate
2227 			 * management structures and the faulting in of the
2228 			 * page.
2229 			 */
2230 			if (entry->maptype == VM_MAPTYPE_NORMAL ||
2231 			    entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2232 				int copyflag = entry->eflags &
2233 					       MAP_ENTRY_NEEDS_COPY;
2234 				if (copyflag && ((entry->protection &
2235 						  VM_PROT_WRITE) != 0)) {
2236 					vm_map_entry_shadow(entry, 0);
2237 				} else if (entry->object.vm_object == NULL &&
2238 					   !map->system_map) {
2239 					vm_map_entry_allocate_object(entry);
2240 				}
2241 			}
2242 			entry->wired_count++;
2243 			entry->eflags |= MAP_ENTRY_USER_WIRED;
2244 
2245 			/*
2246 			 * Now fault in the area.  Note that vm_fault_wire()
2247 			 * may release the map lock temporarily, it will be
2248 			 * relocked on return.  The in-transition
2249 			 * flag protects the entries.
2250 			 */
2251 			save_start = entry->start;
2252 			save_end = entry->end;
2253 			rv = vm_fault_wire(map, entry, TRUE, 0);
2254 			if (rv) {
2255 				CLIP_CHECK_BACK(entry, save_start);
2256 				for (;;) {
2257 					KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2258 					entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2259 					entry->wired_count = 0;
2260 					if (entry->end == save_end)
2261 						break;
2262 					entry = entry->next;
2263 					KASSERT(entry != &map->header, ("bad entry clip during backout"));
2264 				}
2265 				end = save_start;	/* unwire the rest */
2266 				break;
2267 			}
2268 			/*
2269 			 * note that even though the entry might have been
2270 			 * clipped, the USER_WIRED flag we set prevents
2271 			 * duplication so we do not have to do a
2272 			 * clip check.
2273 			 */
2274 			entry = entry->next;
2275 		}
2276 
2277 		/*
2278 		 * If we failed fall through to the unwiring section to
2279 		 * unwire what we had wired so far.  'end' has already
2280 		 * been adjusted.
2281 		 */
2282 		if (rv)
2283 			new_pageable = 1;
2284 
2285 		/*
2286 		 * start_entry might have been clipped if we unlocked the
2287 		 * map and blocked.  No matter how clipped it has gotten
2288 		 * there should be a fragment that is on our start boundary.
2289 		 */
2290 		CLIP_CHECK_BACK(start_entry, start);
2291 	}
2292 
2293 	/*
2294 	 * Deal with the unwiring case.
2295 	 */
2296 	if (new_pageable) {
2297 		/*
2298 		 * This is the unwiring case.  We must first ensure that the
2299 		 * range to be unwired is really wired down.  We know there
2300 		 * are no holes.
2301 		 */
2302 		entry = start_entry;
2303 		while ((entry != &map->header) && (entry->start < end)) {
2304 			if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2305 				rv = KERN_INVALID_ARGUMENT;
2306 				goto done;
2307 			}
2308 			KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
2309 			entry = entry->next;
2310 		}
2311 
2312 		/*
2313 		 * Now decrement the wiring count for each region. If a region
2314 		 * becomes completely unwired, unwire its physical pages and
2315 		 * mappings.
2316 		 */
2317 		/*
2318 		 * The map entries are processed in a loop, checking to
2319 		 * make sure the entry is wired and asserting it has a wired
2320 		 * count. However, another loop was inserted more-or-less in
2321 		 * the middle of the unwiring path. This loop picks up the
2322 		 * "entry" loop variable from the first loop without first
2323 		 * setting it to start_entry. Naturally, the secound loop
2324 		 * is never entered and the pages backing the entries are
2325 		 * never unwired. This can lead to a leak of wired pages.
2326 		 */
2327 		entry = start_entry;
2328 		while ((entry != &map->header) && (entry->start < end)) {
2329 			KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2330 				("expected USER_WIRED on entry %p", entry));
2331 			entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2332 			entry->wired_count--;
2333 			if (entry->wired_count == 0)
2334 				vm_fault_unwire(map, entry);
2335 			entry = entry->next;
2336 		}
2337 	}
2338 done:
2339 	vm_map_unclip_range(map, start_entry, start, real_end, &count,
2340 		MAP_CLIP_NO_HOLES);
2341 	map->timestamp++;
2342 	vm_map_unlock(map);
2343 	vm_map_entry_release(count);
2344 	return (rv);
2345 }
2346 
2347 /*
2348  * Sets the pageability of the specified address range in the target map.
2349  * Regions specified as not pageable require locked-down physical
2350  * memory and physical page maps.
2351  *
2352  * The map must not be locked, but a reference must remain to the map
2353  * throughout the call.
2354  *
2355  * This function may be called via the zalloc path and must properly
2356  * reserve map entries for kernel_map.
2357  *
2358  * No requirements.
2359  */
2360 int
2361 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2362 {
2363 	vm_map_entry_t entry;
2364 	vm_map_entry_t start_entry;
2365 	vm_offset_t end;
2366 	int rv = KERN_SUCCESS;
2367 	int count;
2368 
2369 	if (kmflags & KM_KRESERVE)
2370 		count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2371 	else
2372 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2373 	vm_map_lock(map);
2374 	VM_MAP_RANGE_CHECK(map, start, real_end);
2375 	end = real_end;
2376 
2377 	start_entry = vm_map_clip_range(map, start, end, &count,
2378 					MAP_CLIP_NO_HOLES);
2379 	if (start_entry == NULL) {
2380 		vm_map_unlock(map);
2381 		rv = KERN_INVALID_ADDRESS;
2382 		goto failure;
2383 	}
2384 	if ((kmflags & KM_PAGEABLE) == 0) {
2385 		/*
2386 		 * Wiring.
2387 		 *
2388 		 * 1.  Holding the write lock, we create any shadow or zero-fill
2389 		 * objects that need to be created. Then we clip each map
2390 		 * entry to the region to be wired and increment its wiring
2391 		 * count.  We create objects before clipping the map entries
2392 		 * to avoid object proliferation.
2393 		 *
2394 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
2395 		 * fault in the pages for any newly wired area (wired_count is
2396 		 * 1).
2397 		 *
2398 		 * Downgrading to a read lock for vm_fault_wire avoids a
2399 		 * possible deadlock with another process that may have faulted
2400 		 * on one of the pages to be wired (it would mark the page busy,
2401 		 * blocking us, then in turn block on the map lock that we
2402 		 * hold).  Because of problems in the recursive lock package,
2403 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
2404 		 * any actions that require the write lock must be done
2405 		 * beforehand.  Because we keep the read lock on the map, the
2406 		 * copy-on-write status of the entries we modify here cannot
2407 		 * change.
2408 		 */
2409 		entry = start_entry;
2410 		while ((entry != &map->header) && (entry->start < end)) {
2411 			/*
2412 			 * Trivial case if the entry is already wired
2413 			 */
2414 			if (entry->wired_count) {
2415 				entry->wired_count++;
2416 				entry = entry->next;
2417 				continue;
2418 			}
2419 
2420 			/*
2421 			 * The entry is being newly wired, we have to setup
2422 			 * appropriate management structures.  A shadow
2423 			 * object is required for a copy-on-write region,
2424 			 * or a normal object for a zero-fill region.  We
2425 			 * do not have to do this for entries that point to sub
2426 			 * maps because we won't hold the lock on the sub map.
2427 			 */
2428 			if (entry->maptype == VM_MAPTYPE_NORMAL ||
2429 			    entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2430 				int copyflag = entry->eflags &
2431 					       MAP_ENTRY_NEEDS_COPY;
2432 				if (copyflag && ((entry->protection &
2433 						  VM_PROT_WRITE) != 0)) {
2434 					vm_map_entry_shadow(entry, 0);
2435 				} else if (entry->object.vm_object == NULL &&
2436 					   !map->system_map) {
2437 					vm_map_entry_allocate_object(entry);
2438 				}
2439 			}
2440 
2441 			entry->wired_count++;
2442 			entry = entry->next;
2443 		}
2444 
2445 		/*
2446 		 * Pass 2.
2447 		 */
2448 
2449 		/*
2450 		 * HACK HACK HACK HACK
2451 		 *
2452 		 * vm_fault_wire() temporarily unlocks the map to avoid
2453 		 * deadlocks.  The in-transition flag from vm_map_clip_range
2454 		 * call should protect us from changes while the map is
2455 		 * unlocked.  T
2456 		 *
2457 		 * NOTE: Previously this comment stated that clipping might
2458 		 *	 still occur while the entry is unlocked, but from
2459 		 *	 what I can tell it actually cannot.
2460 		 *
2461 		 *	 It is unclear whether the CLIP_CHECK_*() calls
2462 		 *	 are still needed but we keep them in anyway.
2463 		 *
2464 		 * HACK HACK HACK HACK
2465 		 */
2466 
2467 		entry = start_entry;
2468 		while (entry != &map->header && entry->start < end) {
2469 			/*
2470 			 * If vm_fault_wire fails for any page we need to undo
2471 			 * what has been done.  We decrement the wiring count
2472 			 * for those pages which have not yet been wired (now)
2473 			 * and unwire those that have (later).
2474 			 */
2475 			vm_offset_t save_start = entry->start;
2476 			vm_offset_t save_end = entry->end;
2477 
2478 			if (entry->wired_count == 1)
2479 				rv = vm_fault_wire(map, entry, FALSE, kmflags);
2480 			if (rv) {
2481 				CLIP_CHECK_BACK(entry, save_start);
2482 				for (;;) {
2483 					KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2484 					entry->wired_count = 0;
2485 					if (entry->end == save_end)
2486 						break;
2487 					entry = entry->next;
2488 					KASSERT(entry != &map->header, ("bad entry clip during backout"));
2489 				}
2490 				end = save_start;
2491 				break;
2492 			}
2493 			CLIP_CHECK_FWD(entry, save_end);
2494 			entry = entry->next;
2495 		}
2496 
2497 		/*
2498 		 * If a failure occured undo everything by falling through
2499 		 * to the unwiring code.  'end' has already been adjusted
2500 		 * appropriately.
2501 		 */
2502 		if (rv)
2503 			kmflags |= KM_PAGEABLE;
2504 
2505 		/*
2506 		 * start_entry is still IN_TRANSITION but may have been
2507 		 * clipped since vm_fault_wire() unlocks and relocks the
2508 		 * map.  No matter how clipped it has gotten there should
2509 		 * be a fragment that is on our start boundary.
2510 		 */
2511 		CLIP_CHECK_BACK(start_entry, start);
2512 	}
2513 
2514 	if (kmflags & KM_PAGEABLE) {
2515 		/*
2516 		 * This is the unwiring case.  We must first ensure that the
2517 		 * range to be unwired is really wired down.  We know there
2518 		 * are no holes.
2519 		 */
2520 		entry = start_entry;
2521 		while ((entry != &map->header) && (entry->start < end)) {
2522 			if (entry->wired_count == 0) {
2523 				rv = KERN_INVALID_ARGUMENT;
2524 				goto done;
2525 			}
2526 			entry = entry->next;
2527 		}
2528 
2529 		/*
2530 		 * Now decrement the wiring count for each region. If a region
2531 		 * becomes completely unwired, unwire its physical pages and
2532 		 * mappings.
2533 		 */
2534 		entry = start_entry;
2535 		while ((entry != &map->header) && (entry->start < end)) {
2536 			entry->wired_count--;
2537 			if (entry->wired_count == 0)
2538 				vm_fault_unwire(map, entry);
2539 			entry = entry->next;
2540 		}
2541 	}
2542 done:
2543 	vm_map_unclip_range(map, start_entry, start, real_end,
2544 			    &count, MAP_CLIP_NO_HOLES);
2545 	map->timestamp++;
2546 	vm_map_unlock(map);
2547 failure:
2548 	if (kmflags & KM_KRESERVE)
2549 		vm_map_entry_krelease(count);
2550 	else
2551 		vm_map_entry_release(count);
2552 	return (rv);
2553 }
2554 
2555 /*
2556  * Mark a newly allocated address range as wired but do not fault in
2557  * the pages.  The caller is expected to load the pages into the object.
2558  *
2559  * The map must be locked on entry and will remain locked on return.
2560  * No other requirements.
2561  */
2562 void
2563 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2564 		       int *countp)
2565 {
2566 	vm_map_entry_t scan;
2567 	vm_map_entry_t entry;
2568 
2569 	entry = vm_map_clip_range(map, addr, addr + size,
2570 				  countp, MAP_CLIP_NO_HOLES);
2571 	for (scan = entry;
2572 	     scan != &map->header && scan->start < addr + size;
2573 	     scan = scan->next) {
2574 	    KKASSERT(scan->wired_count == 0);
2575 	    scan->wired_count = 1;
2576 	}
2577 	vm_map_unclip_range(map, entry, addr, addr + size,
2578 			    countp, MAP_CLIP_NO_HOLES);
2579 }
2580 
2581 /*
2582  * Push any dirty cached pages in the address range to their pager.
2583  * If syncio is TRUE, dirty pages are written synchronously.
2584  * If invalidate is TRUE, any cached pages are freed as well.
2585  *
2586  * This routine is called by sys_msync()
2587  *
2588  * Returns an error if any part of the specified range is not mapped.
2589  *
2590  * No requirements.
2591  */
2592 int
2593 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2594 	     boolean_t syncio, boolean_t invalidate)
2595 {
2596 	vm_map_entry_t current;
2597 	vm_map_entry_t entry;
2598 	vm_size_t size;
2599 	vm_object_t object;
2600 	vm_object_t tobj;
2601 	vm_ooffset_t offset;
2602 
2603 	vm_map_lock_read(map);
2604 	VM_MAP_RANGE_CHECK(map, start, end);
2605 	if (!vm_map_lookup_entry(map, start, &entry)) {
2606 		vm_map_unlock_read(map);
2607 		return (KERN_INVALID_ADDRESS);
2608 	}
2609 	lwkt_gettoken(&map->token);
2610 
2611 	/*
2612 	 * Make a first pass to check for holes.
2613 	 */
2614 	for (current = entry; current->start < end; current = current->next) {
2615 		if (current->maptype == VM_MAPTYPE_SUBMAP) {
2616 			lwkt_reltoken(&map->token);
2617 			vm_map_unlock_read(map);
2618 			return (KERN_INVALID_ARGUMENT);
2619 		}
2620 		if (end > current->end &&
2621 		    (current->next == &map->header ||
2622 			current->end != current->next->start)) {
2623 			lwkt_reltoken(&map->token);
2624 			vm_map_unlock_read(map);
2625 			return (KERN_INVALID_ADDRESS);
2626 		}
2627 	}
2628 
2629 	if (invalidate)
2630 		pmap_remove(vm_map_pmap(map), start, end);
2631 
2632 	/*
2633 	 * Make a second pass, cleaning/uncaching pages from the indicated
2634 	 * objects as we go.
2635 	 */
2636 	for (current = entry; current->start < end; current = current->next) {
2637 		offset = current->offset + (start - current->start);
2638 		size = (end <= current->end ? end : current->end) - start;
2639 
2640 		switch(current->maptype) {
2641 		case VM_MAPTYPE_SUBMAP:
2642 		{
2643 			vm_map_t smap;
2644 			vm_map_entry_t tentry;
2645 			vm_size_t tsize;
2646 
2647 			smap = current->object.sub_map;
2648 			vm_map_lock_read(smap);
2649 			vm_map_lookup_entry(smap, offset, &tentry);
2650 			tsize = tentry->end - offset;
2651 			if (tsize < size)
2652 				size = tsize;
2653 			object = tentry->object.vm_object;
2654 			offset = tentry->offset + (offset - tentry->start);
2655 			vm_map_unlock_read(smap);
2656 			break;
2657 		}
2658 		case VM_MAPTYPE_NORMAL:
2659 		case VM_MAPTYPE_VPAGETABLE:
2660 			object = current->object.vm_object;
2661 			break;
2662 		default:
2663 			object = NULL;
2664 			break;
2665 		}
2666 
2667 		if (object)
2668 			vm_object_hold(object);
2669 
2670 		/*
2671 		 * Note that there is absolutely no sense in writing out
2672 		 * anonymous objects, so we track down the vnode object
2673 		 * to write out.
2674 		 * We invalidate (remove) all pages from the address space
2675 		 * anyway, for semantic correctness.
2676 		 *
2677 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
2678 		 * may start out with a NULL object.
2679 		 */
2680 		while (object && (tobj = object->backing_object) != NULL) {
2681 			vm_object_hold(tobj);
2682 			if (tobj == object->backing_object) {
2683 				vm_object_lock_swap();
2684 				offset += object->backing_object_offset;
2685 				vm_object_drop(object);
2686 				object = tobj;
2687 				if (object->size < OFF_TO_IDX(offset + size))
2688 					size = IDX_TO_OFF(object->size) -
2689 					       offset;
2690 				break;
2691 			}
2692 			vm_object_drop(tobj);
2693 		}
2694 		if (object && (object->type == OBJT_VNODE) &&
2695 		    (current->protection & VM_PROT_WRITE) &&
2696 		    (object->flags & OBJ_NOMSYNC) == 0) {
2697 			/*
2698 			 * Flush pages if writing is allowed, invalidate them
2699 			 * if invalidation requested.  Pages undergoing I/O
2700 			 * will be ignored by vm_object_page_remove().
2701 			 *
2702 			 * We cannot lock the vnode and then wait for paging
2703 			 * to complete without deadlocking against vm_fault.
2704 			 * Instead we simply call vm_object_page_remove() and
2705 			 * allow it to block internally on a page-by-page
2706 			 * basis when it encounters pages undergoing async
2707 			 * I/O.
2708 			 */
2709 			int flags;
2710 
2711 			/* no chain wait needed for vnode objects */
2712 			vm_object_reference_locked(object);
2713 			vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2714 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2715 			flags |= invalidate ? OBJPC_INVAL : 0;
2716 
2717 			/*
2718 			 * When operating on a virtual page table just
2719 			 * flush the whole object.  XXX we probably ought
2720 			 * to
2721 			 */
2722 			switch(current->maptype) {
2723 			case VM_MAPTYPE_NORMAL:
2724 				vm_object_page_clean(object,
2725 				    OFF_TO_IDX(offset),
2726 				    OFF_TO_IDX(offset + size + PAGE_MASK),
2727 				    flags);
2728 				break;
2729 			case VM_MAPTYPE_VPAGETABLE:
2730 				vm_object_page_clean(object, 0, 0, flags);
2731 				break;
2732 			}
2733 			vn_unlock(((struct vnode *)object->handle));
2734 			vm_object_deallocate_locked(object);
2735 		}
2736 		if (object && invalidate &&
2737 		   ((object->type == OBJT_VNODE) ||
2738 		    (object->type == OBJT_DEVICE) ||
2739 		    (object->type == OBJT_MGTDEVICE))) {
2740 			int clean_only =
2741 				((object->type == OBJT_DEVICE) ||
2742 				(object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE;
2743 			/* no chain wait needed for vnode/device objects */
2744 			vm_object_reference_locked(object);
2745 			switch(current->maptype) {
2746 			case VM_MAPTYPE_NORMAL:
2747 				vm_object_page_remove(object,
2748 				    OFF_TO_IDX(offset),
2749 				    OFF_TO_IDX(offset + size + PAGE_MASK),
2750 				    clean_only);
2751 				break;
2752 			case VM_MAPTYPE_VPAGETABLE:
2753 				vm_object_page_remove(object, 0, 0, clean_only);
2754 				break;
2755 			}
2756 			vm_object_deallocate_locked(object);
2757 		}
2758 		start += size;
2759 		if (object)
2760 			vm_object_drop(object);
2761 	}
2762 
2763 	lwkt_reltoken(&map->token);
2764 	vm_map_unlock_read(map);
2765 
2766 	return (KERN_SUCCESS);
2767 }
2768 
2769 /*
2770  * Make the region specified by this entry pageable.
2771  *
2772  * The vm_map must be exclusively locked.
2773  */
2774 static void
2775 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2776 {
2777 	entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2778 	entry->wired_count = 0;
2779 	vm_fault_unwire(map, entry);
2780 }
2781 
2782 /*
2783  * Deallocate the given entry from the target map.
2784  *
2785  * The vm_map must be exclusively locked.
2786  */
2787 static void
2788 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2789 {
2790 	vm_map_entry_unlink(map, entry);
2791 	map->size -= entry->end - entry->start;
2792 
2793 	switch(entry->maptype) {
2794 	case VM_MAPTYPE_NORMAL:
2795 	case VM_MAPTYPE_VPAGETABLE:
2796 	case VM_MAPTYPE_SUBMAP:
2797 		vm_object_deallocate(entry->object.vm_object);
2798 		break;
2799 	case VM_MAPTYPE_UKSMAP:
2800 		/* XXX TODO */
2801 		break;
2802 	default:
2803 		break;
2804 	}
2805 
2806 	vm_map_entry_dispose(map, entry, countp);
2807 }
2808 
2809 /*
2810  * Deallocates the given address range from the target map.
2811  *
2812  * The vm_map must be exclusively locked.
2813  */
2814 int
2815 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2816 {
2817 	vm_object_t object;
2818 	vm_map_entry_t entry;
2819 	vm_map_entry_t first_entry;
2820 
2821 	ASSERT_VM_MAP_LOCKED(map);
2822 	lwkt_gettoken(&map->token);
2823 again:
2824 	/*
2825 	 * Find the start of the region, and clip it.  Set entry to point
2826 	 * at the first record containing the requested address or, if no
2827 	 * such record exists, the next record with a greater address.  The
2828 	 * loop will run from this point until a record beyond the termination
2829 	 * address is encountered.
2830 	 *
2831 	 * map->hint must be adjusted to not point to anything we delete,
2832 	 * so set it to the entry prior to the one being deleted.
2833 	 *
2834 	 * GGG see other GGG comment.
2835 	 */
2836 	if (vm_map_lookup_entry(map, start, &first_entry)) {
2837 		entry = first_entry;
2838 		vm_map_clip_start(map, entry, start, countp);
2839 		map->hint = entry->prev;	/* possible problem XXX */
2840 	} else {
2841 		map->hint = first_entry;	/* possible problem XXX */
2842 		entry = first_entry->next;
2843 	}
2844 
2845 	/*
2846 	 * If a hole opens up prior to the current first_free then
2847 	 * adjust first_free.  As with map->hint, map->first_free
2848 	 * cannot be left set to anything we might delete.
2849 	 */
2850 	if (entry == &map->header) {
2851 		map->first_free = &map->header;
2852 	} else if (map->first_free->start >= start) {
2853 		map->first_free = entry->prev;
2854 	}
2855 
2856 	/*
2857 	 * Step through all entries in this region
2858 	 */
2859 	while ((entry != &map->header) && (entry->start < end)) {
2860 		vm_map_entry_t next;
2861 		vm_offset_t s, e;
2862 		vm_pindex_t offidxstart, offidxend, count;
2863 
2864 		/*
2865 		 * If we hit an in-transition entry we have to sleep and
2866 		 * retry.  It's easier (and not really slower) to just retry
2867 		 * since this case occurs so rarely and the hint is already
2868 		 * pointing at the right place.  We have to reset the
2869 		 * start offset so as not to accidently delete an entry
2870 		 * another process just created in vacated space.
2871 		 */
2872 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2873 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2874 			start = entry->start;
2875 			++mycpu->gd_cnt.v_intrans_coll;
2876 			++mycpu->gd_cnt.v_intrans_wait;
2877 			vm_map_transition_wait(map);
2878 			goto again;
2879 		}
2880 		vm_map_clip_end(map, entry, end, countp);
2881 
2882 		s = entry->start;
2883 		e = entry->end;
2884 		next = entry->next;
2885 
2886 		offidxstart = OFF_TO_IDX(entry->offset);
2887 		count = OFF_TO_IDX(e - s);
2888 
2889 		switch(entry->maptype) {
2890 		case VM_MAPTYPE_NORMAL:
2891 		case VM_MAPTYPE_VPAGETABLE:
2892 		case VM_MAPTYPE_SUBMAP:
2893 			object = entry->object.vm_object;
2894 			break;
2895 		default:
2896 			object = NULL;
2897 			break;
2898 		}
2899 
2900 		/*
2901 		 * Unwire before removing addresses from the pmap; otherwise,
2902 		 * unwiring will put the entries back in the pmap.
2903 		 */
2904 		if (entry->wired_count != 0)
2905 			vm_map_entry_unwire(map, entry);
2906 
2907 		offidxend = offidxstart + count;
2908 
2909 		if (object == &kernel_object) {
2910 			vm_object_hold(object);
2911 			vm_object_page_remove(object, offidxstart,
2912 					      offidxend, FALSE);
2913 			vm_object_drop(object);
2914 		} else if (object && object->type != OBJT_DEFAULT &&
2915 			   object->type != OBJT_SWAP) {
2916 			/*
2917 			 * vnode object routines cannot be chain-locked,
2918 			 * but since we aren't removing pages from the
2919 			 * object here we can use a shared hold.
2920 			 */
2921 			vm_object_hold_shared(object);
2922 			pmap_remove(map->pmap, s, e);
2923 			vm_object_drop(object);
2924 		} else if (object) {
2925 			vm_object_hold(object);
2926 			vm_object_chain_acquire(object, 0);
2927 			pmap_remove(map->pmap, s, e);
2928 
2929 			if (object != NULL &&
2930 			    object->ref_count != 1 &&
2931 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
2932 			     OBJ_ONEMAPPING &&
2933 			    (object->type == OBJT_DEFAULT ||
2934 			     object->type == OBJT_SWAP)) {
2935 				vm_object_collapse(object, NULL);
2936 				vm_object_page_remove(object, offidxstart,
2937 						      offidxend, FALSE);
2938 				if (object->type == OBJT_SWAP) {
2939 					swap_pager_freespace(object,
2940 							     offidxstart,
2941 							     count);
2942 				}
2943 				if (offidxend >= object->size &&
2944 				    offidxstart < object->size) {
2945 					object->size = offidxstart;
2946 				}
2947 			}
2948 			vm_object_chain_release(object);
2949 			vm_object_drop(object);
2950 		} else if (entry->maptype == VM_MAPTYPE_UKSMAP) {
2951 			pmap_remove(map->pmap, s, e);
2952 		}
2953 
2954 		/*
2955 		 * Delete the entry (which may delete the object) only after
2956 		 * removing all pmap entries pointing to its pages.
2957 		 * (Otherwise, its page frames may be reallocated, and any
2958 		 * modify bits will be set in the wrong object!)
2959 		 */
2960 		vm_map_entry_delete(map, entry, countp);
2961 		entry = next;
2962 	}
2963 	lwkt_reltoken(&map->token);
2964 	return (KERN_SUCCESS);
2965 }
2966 
2967 /*
2968  * Remove the given address range from the target map.
2969  * This is the exported form of vm_map_delete.
2970  *
2971  * No requirements.
2972  */
2973 int
2974 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2975 {
2976 	int result;
2977 	int count;
2978 
2979 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2980 	vm_map_lock(map);
2981 	VM_MAP_RANGE_CHECK(map, start, end);
2982 	result = vm_map_delete(map, start, end, &count);
2983 	vm_map_unlock(map);
2984 	vm_map_entry_release(count);
2985 
2986 	return (result);
2987 }
2988 
2989 /*
2990  * Assert that the target map allows the specified privilege on the
2991  * entire address region given.  The entire region must be allocated.
2992  *
2993  * The caller must specify whether the vm_map is already locked or not.
2994  */
2995 boolean_t
2996 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2997 			vm_prot_t protection, boolean_t have_lock)
2998 {
2999 	vm_map_entry_t entry;
3000 	vm_map_entry_t tmp_entry;
3001 	boolean_t result;
3002 
3003 	if (have_lock == FALSE)
3004 		vm_map_lock_read(map);
3005 
3006 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
3007 		if (have_lock == FALSE)
3008 			vm_map_unlock_read(map);
3009 		return (FALSE);
3010 	}
3011 	entry = tmp_entry;
3012 
3013 	result = TRUE;
3014 	while (start < end) {
3015 		if (entry == &map->header) {
3016 			result = FALSE;
3017 			break;
3018 		}
3019 		/*
3020 		 * No holes allowed!
3021 		 */
3022 
3023 		if (start < entry->start) {
3024 			result = FALSE;
3025 			break;
3026 		}
3027 		/*
3028 		 * Check protection associated with entry.
3029 		 */
3030 
3031 		if ((entry->protection & protection) != protection) {
3032 			result = FALSE;
3033 			break;
3034 		}
3035 		/* go to next entry */
3036 
3037 		start = entry->end;
3038 		entry = entry->next;
3039 	}
3040 	if (have_lock == FALSE)
3041 		vm_map_unlock_read(map);
3042 	return (result);
3043 }
3044 
3045 /*
3046  * If appropriate this function shadows the original object with a new object
3047  * and moves the VM pages from the original object to the new object.
3048  * The original object will also be collapsed, if possible.
3049  *
3050  * We can only do this for normal memory objects with a single mapping, and
3051  * it only makes sense to do it if there are 2 or more refs on the original
3052  * object.  i.e. typically a memory object that has been extended into
3053  * multiple vm_map_entry's with non-overlapping ranges.
3054  *
3055  * This makes it easier to remove unused pages and keeps object inheritance
3056  * from being a negative impact on memory usage.
3057  *
3058  * On return the (possibly new) entry->object.vm_object will have an
3059  * additional ref on it for the caller to dispose of (usually by cloning
3060  * the vm_map_entry).  The additional ref had to be done in this routine
3061  * to avoid racing a collapse.  The object's ONEMAPPING flag will also be
3062  * cleared.
3063  *
3064  * The vm_map must be locked and its token held.
3065  */
3066 static void
3067 vm_map_split(vm_map_entry_t entry)
3068 {
3069 	/* OPTIMIZED */
3070 	vm_object_t oobject, nobject, bobject;
3071 	vm_offset_t s, e;
3072 	vm_page_t m;
3073 	vm_pindex_t offidxstart, offidxend, idx;
3074 	vm_size_t size;
3075 	vm_ooffset_t offset;
3076 	int useshadowlist;
3077 
3078 	/*
3079 	 * Optimize away object locks for vnode objects.  Important exit/exec
3080 	 * critical path.
3081 	 *
3082 	 * OBJ_ONEMAPPING doesn't apply to vnode objects but clear the flag
3083 	 * anyway.
3084 	 */
3085 	oobject = entry->object.vm_object;
3086 	if (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) {
3087 		vm_object_reference_quick(oobject);
3088 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3089 		return;
3090 	}
3091 
3092 	/*
3093 	 * Setup.  Chain lock the original object throughout the entire
3094 	 * routine to prevent new page faults from occuring.
3095 	 *
3096 	 * XXX can madvise WILLNEED interfere with us too?
3097 	 */
3098 	vm_object_hold(oobject);
3099 	vm_object_chain_acquire(oobject, 0);
3100 
3101 	/*
3102 	 * Original object cannot be split?  Might have also changed state.
3103 	 */
3104 	if (oobject->handle == NULL || (oobject->type != OBJT_DEFAULT &&
3105 					oobject->type != OBJT_SWAP)) {
3106 		vm_object_chain_release(oobject);
3107 		vm_object_reference_locked(oobject);
3108 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3109 		vm_object_drop(oobject);
3110 		return;
3111 	}
3112 
3113 	/*
3114 	 * Collapse original object with its backing store as an
3115 	 * optimization to reduce chain lengths when possible.
3116 	 *
3117 	 * If ref_count <= 1 there aren't other non-overlapping vm_map_entry's
3118 	 * for oobject, so there's no point collapsing it.
3119 	 *
3120 	 * Then re-check whether the object can be split.
3121 	 */
3122 	vm_object_collapse(oobject, NULL);
3123 
3124 	if (oobject->ref_count <= 1 ||
3125 	    (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) ||
3126 	    (oobject->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) != OBJ_ONEMAPPING) {
3127 		vm_object_chain_release(oobject);
3128 		vm_object_reference_locked(oobject);
3129 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3130 		vm_object_drop(oobject);
3131 		return;
3132 	}
3133 
3134 	/*
3135 	 * Acquire the chain lock on the backing object.
3136 	 *
3137 	 * Give bobject an additional ref count for when it will be shadowed
3138 	 * by nobject.
3139 	 */
3140 	useshadowlist = 0;
3141 	if ((bobject = oobject->backing_object) != NULL) {
3142 		if (bobject->type != OBJT_VNODE) {
3143 			useshadowlist = 1;
3144 			vm_object_hold(bobject);
3145 			vm_object_chain_wait(bobject, 0);
3146 			/* ref for shadowing below */
3147 			vm_object_reference_locked(bobject);
3148 			vm_object_chain_acquire(bobject, 0);
3149 			KKASSERT(bobject->backing_object == bobject);
3150 			KKASSERT((bobject->flags & OBJ_DEAD) == 0);
3151 		} else {
3152 			/*
3153 			 * vnodes are not placed on the shadow list but
3154 			 * they still get another ref for the backing_object
3155 			 * reference.
3156 			 */
3157 			vm_object_reference_quick(bobject);
3158 		}
3159 	}
3160 
3161 	/*
3162 	 * Calculate the object page range and allocate the new object.
3163 	 */
3164 	offset = entry->offset;
3165 	s = entry->start;
3166 	e = entry->end;
3167 
3168 	offidxstart = OFF_TO_IDX(offset);
3169 	offidxend = offidxstart + OFF_TO_IDX(e - s);
3170 	size = offidxend - offidxstart;
3171 
3172 	switch(oobject->type) {
3173 	case OBJT_DEFAULT:
3174 		nobject = default_pager_alloc(NULL, IDX_TO_OFF(size),
3175 					      VM_PROT_ALL, 0);
3176 		break;
3177 	case OBJT_SWAP:
3178 		nobject = swap_pager_alloc(NULL, IDX_TO_OFF(size),
3179 					   VM_PROT_ALL, 0);
3180 		break;
3181 	default:
3182 		/* not reached */
3183 		nobject = NULL;
3184 		KKASSERT(0);
3185 	}
3186 
3187 	if (nobject == NULL) {
3188 		if (bobject) {
3189 			if (useshadowlist) {
3190 				vm_object_chain_release(bobject);
3191 				vm_object_deallocate(bobject);
3192 				vm_object_drop(bobject);
3193 			} else {
3194 				vm_object_deallocate(bobject);
3195 			}
3196 		}
3197 		vm_object_chain_release(oobject);
3198 		vm_object_reference_locked(oobject);
3199 		vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3200 		vm_object_drop(oobject);
3201 		return;
3202 	}
3203 
3204 	/*
3205 	 * The new object will replace entry->object.vm_object so it needs
3206 	 * a second reference (the caller expects an additional ref).
3207 	 */
3208 	vm_object_hold(nobject);
3209 	vm_object_reference_locked(nobject);
3210 	vm_object_chain_acquire(nobject, 0);
3211 
3212 	/*
3213 	 * nobject shadows bobject (oobject already shadows bobject).
3214 	 *
3215 	 * Adding an object to bobject's shadow list requires refing bobject
3216 	 * which we did above in the useshadowlist case.
3217 	 */
3218 	if (bobject) {
3219 		nobject->backing_object_offset =
3220 		    oobject->backing_object_offset + IDX_TO_OFF(offidxstart);
3221 		nobject->backing_object = bobject;
3222 		if (useshadowlist) {
3223 			bobject->shadow_count++;
3224 			bobject->generation++;
3225 			LIST_INSERT_HEAD(&bobject->shadow_head,
3226 					 nobject, shadow_list);
3227 			vm_object_clear_flag(bobject, OBJ_ONEMAPPING); /*XXX*/
3228 			vm_object_chain_release(bobject);
3229 			vm_object_drop(bobject);
3230 			vm_object_set_flag(nobject, OBJ_ONSHADOW);
3231 		}
3232 	}
3233 
3234 	/*
3235 	 * Move the VM pages from oobject to nobject
3236 	 */
3237 	for (idx = 0; idx < size; idx++) {
3238 		vm_page_t m;
3239 
3240 		m = vm_page_lookup_busy_wait(oobject, offidxstart + idx,
3241 					     TRUE, "vmpg");
3242 		if (m == NULL)
3243 			continue;
3244 
3245 		/*
3246 		 * We must wait for pending I/O to complete before we can
3247 		 * rename the page.
3248 		 *
3249 		 * We do not have to VM_PROT_NONE the page as mappings should
3250 		 * not be changed by this operation.
3251 		 *
3252 		 * NOTE: The act of renaming a page updates chaingen for both
3253 		 *	 objects.
3254 		 */
3255 		vm_page_rename(m, nobject, idx);
3256 		/* page automatically made dirty by rename and cache handled */
3257 		/* page remains busy */
3258 	}
3259 
3260 	if (oobject->type == OBJT_SWAP) {
3261 		vm_object_pip_add(oobject, 1);
3262 		/*
3263 		 * copy oobject pages into nobject and destroy unneeded
3264 		 * pages in shadow object.
3265 		 */
3266 		swap_pager_copy(oobject, nobject, offidxstart, 0);
3267 		vm_object_pip_wakeup(oobject);
3268 	}
3269 
3270 	/*
3271 	 * Wakeup the pages we played with.  No spl protection is needed
3272 	 * for a simple wakeup.
3273 	 */
3274 	for (idx = 0; idx < size; idx++) {
3275 		m = vm_page_lookup(nobject, idx);
3276 		if (m) {
3277 			KKASSERT(m->flags & PG_BUSY);
3278 			vm_page_wakeup(m);
3279 		}
3280 	}
3281 	entry->object.vm_object = nobject;
3282 	entry->offset = 0LL;
3283 
3284 	/*
3285 	 * Cleanup
3286 	 *
3287 	 * NOTE: There is no need to remove OBJ_ONEMAPPING from oobject, the
3288 	 *	 related pages were moved and are no longer applicable to the
3289 	 *	 original object.
3290 	 *
3291 	 * NOTE: Deallocate oobject (due to its entry->object.vm_object being
3292 	 *	 replaced by nobject).
3293 	 */
3294 	vm_object_chain_release(nobject);
3295 	vm_object_drop(nobject);
3296 	if (bobject && useshadowlist) {
3297 		vm_object_chain_release(bobject);
3298 		vm_object_drop(bobject);
3299 	}
3300 	vm_object_chain_release(oobject);
3301 	/*vm_object_clear_flag(oobject, OBJ_ONEMAPPING);*/
3302 	vm_object_deallocate_locked(oobject);
3303 	vm_object_drop(oobject);
3304 }
3305 
3306 /*
3307  * Copies the contents of the source entry to the destination
3308  * entry.  The entries *must* be aligned properly.
3309  *
3310  * The vm_maps must be exclusively locked.
3311  * The vm_map's token must be held.
3312  *
3313  * Because the maps are locked no faults can be in progress during the
3314  * operation.
3315  */
3316 static void
3317 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
3318 		  vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
3319 {
3320 	vm_object_t src_object;
3321 
3322 	if (dst_entry->maptype == VM_MAPTYPE_SUBMAP ||
3323 	    dst_entry->maptype == VM_MAPTYPE_UKSMAP)
3324 		return;
3325 	if (src_entry->maptype == VM_MAPTYPE_SUBMAP ||
3326 	    src_entry->maptype == VM_MAPTYPE_UKSMAP)
3327 		return;
3328 
3329 	if (src_entry->wired_count == 0) {
3330 		/*
3331 		 * If the source entry is marked needs_copy, it is already
3332 		 * write-protected.
3333 		 */
3334 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
3335 			pmap_protect(src_map->pmap,
3336 			    src_entry->start,
3337 			    src_entry->end,
3338 			    src_entry->protection & ~VM_PROT_WRITE);
3339 		}
3340 
3341 		/*
3342 		 * Make a copy of the object.
3343 		 *
3344 		 * The object must be locked prior to checking the object type
3345 		 * and for the call to vm_object_collapse() and vm_map_split().
3346 		 * We cannot use *_hold() here because the split code will
3347 		 * probably try to destroy the object.  The lock is a pool
3348 		 * token and doesn't care.
3349 		 *
3350 		 * We must bump src_map->timestamp when setting
3351 		 * MAP_ENTRY_NEEDS_COPY to force any concurrent fault
3352 		 * to retry, otherwise the concurrent fault might improperly
3353 		 * install a RW pte when its supposed to be a RO(COW) pte.
3354 		 * This race can occur because a vnode-backed fault may have
3355 		 * to temporarily release the map lock.
3356 		 */
3357 		if (src_entry->object.vm_object != NULL) {
3358 			vm_map_split(src_entry);
3359 			src_object = src_entry->object.vm_object;
3360 			dst_entry->object.vm_object = src_object;
3361 			src_entry->eflags |= (MAP_ENTRY_COW |
3362 					      MAP_ENTRY_NEEDS_COPY);
3363 			dst_entry->eflags |= (MAP_ENTRY_COW |
3364 					      MAP_ENTRY_NEEDS_COPY);
3365 			dst_entry->offset = src_entry->offset;
3366 			++src_map->timestamp;
3367 		} else {
3368 			dst_entry->object.vm_object = NULL;
3369 			dst_entry->offset = 0;
3370 		}
3371 
3372 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3373 		    dst_entry->end - dst_entry->start, src_entry->start);
3374 	} else {
3375 		/*
3376 		 * Of course, wired down pages can't be set copy-on-write.
3377 		 * Cause wired pages to be copied into the new map by
3378 		 * simulating faults (the new pages are pageable)
3379 		 */
3380 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3381 	}
3382 }
3383 
3384 /*
3385  * vmspace_fork:
3386  * Create a new process vmspace structure and vm_map
3387  * based on those of an existing process.  The new map
3388  * is based on the old map, according to the inheritance
3389  * values on the regions in that map.
3390  *
3391  * The source map must not be locked.
3392  * No requirements.
3393  */
3394 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3395 			  vm_map_entry_t old_entry, int *countp);
3396 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3397 			  vm_map_entry_t old_entry, int *countp);
3398 
3399 struct vmspace *
3400 vmspace_fork(struct vmspace *vm1)
3401 {
3402 	struct vmspace *vm2;
3403 	vm_map_t old_map = &vm1->vm_map;
3404 	vm_map_t new_map;
3405 	vm_map_entry_t old_entry;
3406 	int count;
3407 
3408 	lwkt_gettoken(&vm1->vm_map.token);
3409 	vm_map_lock(old_map);
3410 
3411 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3412 	lwkt_gettoken(&vm2->vm_map.token);
3413 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3414 	    (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3415 	new_map = &vm2->vm_map;	/* XXX */
3416 	new_map->timestamp = 1;
3417 
3418 	vm_map_lock(new_map);
3419 
3420 	count = 0;
3421 	old_entry = old_map->header.next;
3422 	while (old_entry != &old_map->header) {
3423 		++count;
3424 		old_entry = old_entry->next;
3425 	}
3426 
3427 	count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3428 
3429 	old_entry = old_map->header.next;
3430 	while (old_entry != &old_map->header) {
3431 		switch(old_entry->maptype) {
3432 		case VM_MAPTYPE_SUBMAP:
3433 			panic("vm_map_fork: encountered a submap");
3434 			break;
3435 		case VM_MAPTYPE_UKSMAP:
3436 			vmspace_fork_uksmap_entry(old_map, new_map,
3437 						  old_entry, &count);
3438 			break;
3439 		case VM_MAPTYPE_NORMAL:
3440 		case VM_MAPTYPE_VPAGETABLE:
3441 			vmspace_fork_normal_entry(old_map, new_map,
3442 						  old_entry, &count);
3443 			break;
3444 		}
3445 		old_entry = old_entry->next;
3446 	}
3447 
3448 	new_map->size = old_map->size;
3449 	vm_map_unlock(old_map);
3450 	vm_map_unlock(new_map);
3451 	vm_map_entry_release(count);
3452 
3453 	lwkt_reltoken(&vm2->vm_map.token);
3454 	lwkt_reltoken(&vm1->vm_map.token);
3455 
3456 	return (vm2);
3457 }
3458 
3459 static
3460 void
3461 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3462 			  vm_map_entry_t old_entry, int *countp)
3463 {
3464 	vm_map_entry_t new_entry;
3465 	vm_object_t object;
3466 
3467 	switch (old_entry->inheritance) {
3468 	case VM_INHERIT_NONE:
3469 		break;
3470 	case VM_INHERIT_SHARE:
3471 		/*
3472 		 * Clone the entry, creating the shared object if
3473 		 * necessary.
3474 		 */
3475 		if (old_entry->object.vm_object == NULL)
3476 			vm_map_entry_allocate_object(old_entry);
3477 
3478 		if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3479 			/*
3480 			 * Shadow a map_entry which needs a copy,
3481 			 * replacing its object with a new object
3482 			 * that points to the old one.  Ask the
3483 			 * shadow code to automatically add an
3484 			 * additional ref.  We can't do it afterwords
3485 			 * because we might race a collapse.  The call
3486 			 * to vm_map_entry_shadow() will also clear
3487 			 * OBJ_ONEMAPPING.
3488 			 */
3489 			vm_map_entry_shadow(old_entry, 1);
3490 		} else if (old_entry->object.vm_object) {
3491 			/*
3492 			 * We will make a shared copy of the object,
3493 			 * and must clear OBJ_ONEMAPPING.
3494 			 *
3495 			 * Optimize vnode objects.  OBJ_ONEMAPPING
3496 			 * is non-applicable but clear it anyway,
3497 			 * and its terminal so we don'th ave to deal
3498 			 * with chains.  Reduces SMP conflicts.
3499 			 *
3500 			 * XXX assert that object.vm_object != NULL
3501 			 *     since we allocate it above.
3502 			 */
3503 			object = old_entry->object.vm_object;
3504 			if (object->type == OBJT_VNODE) {
3505 				vm_object_reference_quick(object);
3506 				vm_object_clear_flag(object,
3507 						     OBJ_ONEMAPPING);
3508 			} else {
3509 				vm_object_hold(object);
3510 				vm_object_chain_wait(object, 0);
3511 				vm_object_reference_locked(object);
3512 				vm_object_clear_flag(object,
3513 						     OBJ_ONEMAPPING);
3514 				vm_object_drop(object);
3515 			}
3516 		}
3517 
3518 		/*
3519 		 * Clone the entry.  We've already bumped the ref on
3520 		 * any vm_object.
3521 		 */
3522 		new_entry = vm_map_entry_create(new_map, countp);
3523 		*new_entry = *old_entry;
3524 		new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3525 		new_entry->wired_count = 0;
3526 
3527 		/*
3528 		 * Insert the entry into the new map -- we know we're
3529 		 * inserting at the end of the new map.
3530 		 */
3531 
3532 		vm_map_entry_link(new_map, new_map->header.prev,
3533 				  new_entry);
3534 
3535 		/*
3536 		 * Update the physical map
3537 		 */
3538 		pmap_copy(new_map->pmap, old_map->pmap,
3539 			  new_entry->start,
3540 			  (old_entry->end - old_entry->start),
3541 			  old_entry->start);
3542 		break;
3543 	case VM_INHERIT_COPY:
3544 		/*
3545 		 * Clone the entry and link into the map.
3546 		 */
3547 		new_entry = vm_map_entry_create(new_map, countp);
3548 		*new_entry = *old_entry;
3549 		new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3550 		new_entry->wired_count = 0;
3551 		new_entry->object.vm_object = NULL;
3552 		vm_map_entry_link(new_map, new_map->header.prev,
3553 				  new_entry);
3554 		vm_map_copy_entry(old_map, new_map, old_entry,
3555 				  new_entry);
3556 		break;
3557 	}
3558 }
3559 
3560 /*
3561  * When forking user-kernel shared maps, the map might change in the
3562  * child so do not try to copy the underlying pmap entries.
3563  */
3564 static
3565 void
3566 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3567 			  vm_map_entry_t old_entry, int *countp)
3568 {
3569 	vm_map_entry_t new_entry;
3570 
3571 	new_entry = vm_map_entry_create(new_map, countp);
3572 	*new_entry = *old_entry;
3573 	new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3574 	new_entry->wired_count = 0;
3575 	vm_map_entry_link(new_map, new_map->header.prev,
3576 			  new_entry);
3577 }
3578 
3579 /*
3580  * Create an auto-grow stack entry
3581  *
3582  * No requirements.
3583  */
3584 int
3585 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3586 	      int flags, vm_prot_t prot, vm_prot_t max, int cow)
3587 {
3588 	vm_map_entry_t	prev_entry;
3589 	vm_map_entry_t	new_stack_entry;
3590 	vm_size_t	init_ssize;
3591 	int		rv;
3592 	int		count;
3593 	vm_offset_t	tmpaddr;
3594 
3595 	cow |= MAP_IS_STACK;
3596 
3597 	if (max_ssize < sgrowsiz)
3598 		init_ssize = max_ssize;
3599 	else
3600 		init_ssize = sgrowsiz;
3601 
3602 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3603 	vm_map_lock(map);
3604 
3605 	/*
3606 	 * Find space for the mapping
3607 	 */
3608 	if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3609 		if (vm_map_findspace(map, addrbos, max_ssize, 1,
3610 				     flags, &tmpaddr)) {
3611 			vm_map_unlock(map);
3612 			vm_map_entry_release(count);
3613 			return (KERN_NO_SPACE);
3614 		}
3615 		addrbos = tmpaddr;
3616 	}
3617 
3618 	/* If addr is already mapped, no go */
3619 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3620 		vm_map_unlock(map);
3621 		vm_map_entry_release(count);
3622 		return (KERN_NO_SPACE);
3623 	}
3624 
3625 #if 0
3626 	/* XXX already handled by kern_mmap() */
3627 	/* If we would blow our VMEM resource limit, no go */
3628 	if (map->size + init_ssize >
3629 	    curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3630 		vm_map_unlock(map);
3631 		vm_map_entry_release(count);
3632 		return (KERN_NO_SPACE);
3633 	}
3634 #endif
3635 
3636 	/*
3637 	 * If we can't accomodate max_ssize in the current mapping,
3638 	 * no go.  However, we need to be aware that subsequent user
3639 	 * mappings might map into the space we have reserved for
3640 	 * stack, and currently this space is not protected.
3641 	 *
3642 	 * Hopefully we will at least detect this condition
3643 	 * when we try to grow the stack.
3644 	 */
3645 	if ((prev_entry->next != &map->header) &&
3646 	    (prev_entry->next->start < addrbos + max_ssize)) {
3647 		vm_map_unlock(map);
3648 		vm_map_entry_release(count);
3649 		return (KERN_NO_SPACE);
3650 	}
3651 
3652 	/*
3653 	 * We initially map a stack of only init_ssize.  We will
3654 	 * grow as needed later.  Since this is to be a grow
3655 	 * down stack, we map at the top of the range.
3656 	 *
3657 	 * Note: we would normally expect prot and max to be
3658 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
3659 	 * eliminate these as input parameters, and just
3660 	 * pass these values here in the insert call.
3661 	 */
3662 	rv = vm_map_insert(map, &count, NULL, NULL,
3663 			   0, addrbos + max_ssize - init_ssize,
3664 	                   addrbos + max_ssize,
3665 			   VM_MAPTYPE_NORMAL,
3666 			   prot, max, cow);
3667 
3668 	/* Now set the avail_ssize amount */
3669 	if (rv == KERN_SUCCESS) {
3670 		if (prev_entry != &map->header)
3671 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
3672 		new_stack_entry = prev_entry->next;
3673 		if (new_stack_entry->end   != addrbos + max_ssize ||
3674 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
3675 			panic ("Bad entry start/end for new stack entry");
3676 		else
3677 			new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
3678 	}
3679 
3680 	vm_map_unlock(map);
3681 	vm_map_entry_release(count);
3682 	return (rv);
3683 }
3684 
3685 /*
3686  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3687  * desired address is already mapped, or if we successfully grow
3688  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3689  * stack range (this is strange, but preserves compatibility with
3690  * the grow function in vm_machdep.c).
3691  *
3692  * No requirements.
3693  */
3694 int
3695 vm_map_growstack (struct proc *p, vm_offset_t addr)
3696 {
3697 	vm_map_entry_t prev_entry;
3698 	vm_map_entry_t stack_entry;
3699 	vm_map_entry_t new_stack_entry;
3700 	struct vmspace *vm = p->p_vmspace;
3701 	vm_map_t map = &vm->vm_map;
3702 	vm_offset_t    end;
3703 	int grow_amount;
3704 	int rv = KERN_SUCCESS;
3705 	int is_procstack;
3706 	int use_read_lock = 1;
3707 	int count;
3708 
3709 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3710 Retry:
3711 	if (use_read_lock)
3712 		vm_map_lock_read(map);
3713 	else
3714 		vm_map_lock(map);
3715 
3716 	/* If addr is already in the entry range, no need to grow.*/
3717 	if (vm_map_lookup_entry(map, addr, &prev_entry))
3718 		goto done;
3719 
3720 	if ((stack_entry = prev_entry->next) == &map->header)
3721 		goto done;
3722 	if (prev_entry == &map->header)
3723 		end = stack_entry->start - stack_entry->aux.avail_ssize;
3724 	else
3725 		end = prev_entry->end;
3726 
3727 	/*
3728 	 * This next test mimics the old grow function in vm_machdep.c.
3729 	 * It really doesn't quite make sense, but we do it anyway
3730 	 * for compatibility.
3731 	 *
3732 	 * If not growable stack, return success.  This signals the
3733 	 * caller to proceed as he would normally with normal vm.
3734 	 */
3735 	if (stack_entry->aux.avail_ssize < 1 ||
3736 	    addr >= stack_entry->start ||
3737 	    addr <  stack_entry->start - stack_entry->aux.avail_ssize) {
3738 		goto done;
3739 	}
3740 
3741 	/* Find the minimum grow amount */
3742 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3743 	if (grow_amount > stack_entry->aux.avail_ssize) {
3744 		rv = KERN_NO_SPACE;
3745 		goto done;
3746 	}
3747 
3748 	/*
3749 	 * If there is no longer enough space between the entries
3750 	 * nogo, and adjust the available space.  Note: this
3751 	 * should only happen if the user has mapped into the
3752 	 * stack area after the stack was created, and is
3753 	 * probably an error.
3754 	 *
3755 	 * This also effectively destroys any guard page the user
3756 	 * might have intended by limiting the stack size.
3757 	 */
3758 	if (grow_amount > stack_entry->start - end) {
3759 		if (use_read_lock && vm_map_lock_upgrade(map)) {
3760 			/* lost lock */
3761 			use_read_lock = 0;
3762 			goto Retry;
3763 		}
3764 		use_read_lock = 0;
3765 		stack_entry->aux.avail_ssize = stack_entry->start - end;
3766 		rv = KERN_NO_SPACE;
3767 		goto done;
3768 	}
3769 
3770 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3771 
3772 	/* If this is the main process stack, see if we're over the
3773 	 * stack limit.
3774 	 */
3775 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3776 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3777 		rv = KERN_NO_SPACE;
3778 		goto done;
3779 	}
3780 
3781 	/* Round up the grow amount modulo SGROWSIZ */
3782 	grow_amount = roundup (grow_amount, sgrowsiz);
3783 	if (grow_amount > stack_entry->aux.avail_ssize) {
3784 		grow_amount = stack_entry->aux.avail_ssize;
3785 	}
3786 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3787 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3788 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3789 		              ctob(vm->vm_ssize);
3790 	}
3791 
3792 	/* If we would blow our VMEM resource limit, no go */
3793 	if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3794 		rv = KERN_NO_SPACE;
3795 		goto done;
3796 	}
3797 
3798 	if (use_read_lock && vm_map_lock_upgrade(map)) {
3799 		/* lost lock */
3800 		use_read_lock = 0;
3801 		goto Retry;
3802 	}
3803 	use_read_lock = 0;
3804 
3805 	/* Get the preliminary new entry start value */
3806 	addr = stack_entry->start - grow_amount;
3807 
3808 	/* If this puts us into the previous entry, cut back our growth
3809 	 * to the available space.  Also, see the note above.
3810 	 */
3811 	if (addr < end) {
3812 		stack_entry->aux.avail_ssize = stack_entry->start - end;
3813 		addr = end;
3814 	}
3815 
3816 	rv = vm_map_insert(map, &count, NULL, NULL,
3817 			   0, addr, stack_entry->start,
3818 			   VM_MAPTYPE_NORMAL,
3819 			   VM_PROT_ALL, VM_PROT_ALL, 0);
3820 
3821 	/* Adjust the available stack space by the amount we grew. */
3822 	if (rv == KERN_SUCCESS) {
3823 		if (prev_entry != &map->header)
3824 			vm_map_clip_end(map, prev_entry, addr, &count);
3825 		new_stack_entry = prev_entry->next;
3826 		if (new_stack_entry->end   != stack_entry->start  ||
3827 		    new_stack_entry->start != addr)
3828 			panic ("Bad stack grow start/end in new stack entry");
3829 		else {
3830 			new_stack_entry->aux.avail_ssize =
3831 				stack_entry->aux.avail_ssize -
3832 				(new_stack_entry->end - new_stack_entry->start);
3833 			if (is_procstack)
3834 				vm->vm_ssize += btoc(new_stack_entry->end -
3835 						     new_stack_entry->start);
3836 		}
3837 
3838 		if (map->flags & MAP_WIREFUTURE)
3839 			vm_map_unwire(map, new_stack_entry->start,
3840 				      new_stack_entry->end, FALSE);
3841 	}
3842 
3843 done:
3844 	if (use_read_lock)
3845 		vm_map_unlock_read(map);
3846 	else
3847 		vm_map_unlock(map);
3848 	vm_map_entry_release(count);
3849 	return (rv);
3850 }
3851 
3852 /*
3853  * Unshare the specified VM space for exec.  If other processes are
3854  * mapped to it, then create a new one.  The new vmspace is null.
3855  *
3856  * No requirements.
3857  */
3858 void
3859 vmspace_exec(struct proc *p, struct vmspace *vmcopy)
3860 {
3861 	struct vmspace *oldvmspace = p->p_vmspace;
3862 	struct vmspace *newvmspace;
3863 	vm_map_t map = &p->p_vmspace->vm_map;
3864 
3865 	/*
3866 	 * If we are execing a resident vmspace we fork it, otherwise
3867 	 * we create a new vmspace.  Note that exitingcnt is not
3868 	 * copied to the new vmspace.
3869 	 */
3870 	lwkt_gettoken(&oldvmspace->vm_map.token);
3871 	if (vmcopy)  {
3872 		newvmspace = vmspace_fork(vmcopy);
3873 		lwkt_gettoken(&newvmspace->vm_map.token);
3874 	} else {
3875 		newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3876 		lwkt_gettoken(&newvmspace->vm_map.token);
3877 		bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3878 		      (caddr_t)&oldvmspace->vm_endcopy -
3879 		       (caddr_t)&oldvmspace->vm_startcopy);
3880 	}
3881 
3882 	/*
3883 	 * Finish initializing the vmspace before assigning it
3884 	 * to the process.  The vmspace will become the current vmspace
3885 	 * if p == curproc.
3886 	 */
3887 	pmap_pinit2(vmspace_pmap(newvmspace));
3888 	pmap_replacevm(p, newvmspace, 0);
3889 	lwkt_reltoken(&newvmspace->vm_map.token);
3890 	lwkt_reltoken(&oldvmspace->vm_map.token);
3891 	vmspace_rel(oldvmspace);
3892 }
3893 
3894 /*
3895  * Unshare the specified VM space for forcing COW.  This
3896  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3897  */
3898 void
3899 vmspace_unshare(struct proc *p)
3900 {
3901 	struct vmspace *oldvmspace = p->p_vmspace;
3902 	struct vmspace *newvmspace;
3903 
3904 	lwkt_gettoken(&oldvmspace->vm_map.token);
3905 	if (vmspace_getrefs(oldvmspace) == 1) {
3906 		lwkt_reltoken(&oldvmspace->vm_map.token);
3907 		return;
3908 	}
3909 	newvmspace = vmspace_fork(oldvmspace);
3910 	lwkt_gettoken(&newvmspace->vm_map.token);
3911 	pmap_pinit2(vmspace_pmap(newvmspace));
3912 	pmap_replacevm(p, newvmspace, 0);
3913 	lwkt_reltoken(&newvmspace->vm_map.token);
3914 	lwkt_reltoken(&oldvmspace->vm_map.token);
3915 	vmspace_rel(oldvmspace);
3916 }
3917 
3918 /*
3919  * vm_map_hint: return the beginning of the best area suitable for
3920  * creating a new mapping with "prot" protection.
3921  *
3922  * No requirements.
3923  */
3924 vm_offset_t
3925 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
3926 {
3927 	struct vmspace *vms = p->p_vmspace;
3928 
3929 	if (!randomize_mmap || addr != 0) {
3930 		/*
3931 		 * Set a reasonable start point for the hint if it was
3932 		 * not specified or if it falls within the heap space.
3933 		 * Hinted mmap()s do not allocate out of the heap space.
3934 		 */
3935 		if (addr == 0 ||
3936 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
3937 		     addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) {
3938 			addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
3939 		}
3940 
3941 		return addr;
3942 	}
3943 
3944 #ifdef notyet
3945 #ifdef __i386__
3946 	/*
3947 	 * If executable skip first two pages, otherwise start
3948 	 * after data + heap region.
3949 	 */
3950 	if ((prot & VM_PROT_EXECUTE) &&
3951 	    ((vm_offset_t)vms->vm_daddr >= I386_MAX_EXE_ADDR)) {
3952 		addr = (PAGE_SIZE * 2) +
3953 		    (karc4random() & (I386_MAX_EXE_ADDR / 2 - 1));
3954 		return (round_page(addr));
3955 	}
3956 #endif /* __i386__ */
3957 #endif /* notyet */
3958 
3959 	addr = (vm_offset_t)vms->vm_daddr + MAXDSIZ;
3960 	addr += karc4random() & (MIN((256 * 1024 * 1024), MAXDSIZ) - 1);
3961 
3962 	return (round_page(addr));
3963 }
3964 
3965 /*
3966  * Finds the VM object, offset, and protection for a given virtual address
3967  * in the specified map, assuming a page fault of the type specified.
3968  *
3969  * Leaves the map in question locked for read; return values are guaranteed
3970  * until a vm_map_lookup_done call is performed.  Note that the map argument
3971  * is in/out; the returned map must be used in the call to vm_map_lookup_done.
3972  *
3973  * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
3974  * that fast.
3975  *
3976  * If a lookup is requested with "write protection" specified, the map may
3977  * be changed to perform virtual copying operations, although the data
3978  * referenced will remain the same.
3979  *
3980  * No requirements.
3981  */
3982 int
3983 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3984 	      vm_offset_t vaddr,
3985 	      vm_prot_t fault_typea,
3986 	      vm_map_entry_t *out_entry,	/* OUT */
3987 	      vm_object_t *object,		/* OUT */
3988 	      vm_pindex_t *pindex,		/* OUT */
3989 	      vm_prot_t *out_prot,		/* OUT */
3990 	      boolean_t *wired)			/* OUT */
3991 {
3992 	vm_map_entry_t entry;
3993 	vm_map_t map = *var_map;
3994 	vm_prot_t prot;
3995 	vm_prot_t fault_type = fault_typea;
3996 	int use_read_lock = 1;
3997 	int rv = KERN_SUCCESS;
3998 
3999 RetryLookup:
4000 	if (use_read_lock)
4001 		vm_map_lock_read(map);
4002 	else
4003 		vm_map_lock(map);
4004 
4005 	/*
4006 	 * If the map has an interesting hint, try it before calling full
4007 	 * blown lookup routine.
4008 	 */
4009 	entry = map->hint;
4010 	cpu_ccfence();
4011 	*out_entry = entry;
4012 	*object = NULL;
4013 
4014 	if ((entry == &map->header) ||
4015 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
4016 		vm_map_entry_t tmp_entry;
4017 
4018 		/*
4019 		 * Entry was either not a valid hint, or the vaddr was not
4020 		 * contained in the entry, so do a full lookup.
4021 		 */
4022 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
4023 			rv = KERN_INVALID_ADDRESS;
4024 			goto done;
4025 		}
4026 
4027 		entry = tmp_entry;
4028 		*out_entry = entry;
4029 	}
4030 
4031 	/*
4032 	 * Handle submaps.
4033 	 */
4034 	if (entry->maptype == VM_MAPTYPE_SUBMAP) {
4035 		vm_map_t old_map = map;
4036 
4037 		*var_map = map = entry->object.sub_map;
4038 		if (use_read_lock)
4039 			vm_map_unlock_read(old_map);
4040 		else
4041 			vm_map_unlock(old_map);
4042 		use_read_lock = 1;
4043 		goto RetryLookup;
4044 	}
4045 
4046 	/*
4047 	 * Check whether this task is allowed to have this page.
4048 	 * Note the special case for MAP_ENTRY_COW
4049 	 * pages with an override.  This is to implement a forced
4050 	 * COW for debuggers.
4051 	 */
4052 
4053 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
4054 		prot = entry->max_protection;
4055 	else
4056 		prot = entry->protection;
4057 
4058 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
4059 	if ((fault_type & prot) != fault_type) {
4060 		rv = KERN_PROTECTION_FAILURE;
4061 		goto done;
4062 	}
4063 
4064 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4065 	    (entry->eflags & MAP_ENTRY_COW) &&
4066 	    (fault_type & VM_PROT_WRITE) &&
4067 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
4068 		rv = KERN_PROTECTION_FAILURE;
4069 		goto done;
4070 	}
4071 
4072 	/*
4073 	 * If this page is not pageable, we have to get it for all possible
4074 	 * accesses.
4075 	 */
4076 	*wired = (entry->wired_count != 0);
4077 	if (*wired)
4078 		prot = fault_type = entry->protection;
4079 
4080 	/*
4081 	 * Virtual page tables may need to update the accessed (A) bit
4082 	 * in a page table entry.  Upgrade the fault to a write fault for
4083 	 * that case if the map will support it.  If the map does not support
4084 	 * it the page table entry simply will not be updated.
4085 	 */
4086 	if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
4087 		if (prot & VM_PROT_WRITE)
4088 			fault_type |= VM_PROT_WRITE;
4089 	}
4090 
4091 	if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
4092 	    pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
4093 		if ((prot & VM_PROT_WRITE) == 0)
4094 			fault_type |= VM_PROT_WRITE;
4095 	}
4096 
4097 	/*
4098 	 * Only NORMAL and VPAGETABLE maps are object-based.  UKSMAPs are not.
4099 	 */
4100 	if (entry->maptype != VM_MAPTYPE_NORMAL &&
4101 	    entry->maptype != VM_MAPTYPE_VPAGETABLE) {
4102 		*object = NULL;
4103 		goto skip;
4104 	}
4105 
4106 	/*
4107 	 * If the entry was copy-on-write, we either ...
4108 	 */
4109 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4110 		/*
4111 		 * If we want to write the page, we may as well handle that
4112 		 * now since we've got the map locked.
4113 		 *
4114 		 * If we don't need to write the page, we just demote the
4115 		 * permissions allowed.
4116 		 */
4117 
4118 		if (fault_type & VM_PROT_WRITE) {
4119 			/*
4120 			 * Make a new object, and place it in the object
4121 			 * chain.  Note that no new references have appeared
4122 			 * -- one just moved from the map to the new
4123 			 * object.
4124 			 */
4125 
4126 			if (use_read_lock && vm_map_lock_upgrade(map)) {
4127 				/* lost lock */
4128 				use_read_lock = 0;
4129 				goto RetryLookup;
4130 			}
4131 			use_read_lock = 0;
4132 
4133 			vm_map_entry_shadow(entry, 0);
4134 		} else {
4135 			/*
4136 			 * We're attempting to read a copy-on-write page --
4137 			 * don't allow writes.
4138 			 */
4139 
4140 			prot &= ~VM_PROT_WRITE;
4141 		}
4142 	}
4143 
4144 	/*
4145 	 * Create an object if necessary.
4146 	 */
4147 	if (entry->object.vm_object == NULL && !map->system_map) {
4148 		if (use_read_lock && vm_map_lock_upgrade(map))  {
4149 			/* lost lock */
4150 			use_read_lock = 0;
4151 			goto RetryLookup;
4152 		}
4153 		use_read_lock = 0;
4154 		vm_map_entry_allocate_object(entry);
4155 	}
4156 
4157 	/*
4158 	 * Return the object/offset from this entry.  If the entry was
4159 	 * copy-on-write or empty, it has been fixed up.
4160 	 */
4161 	*object = entry->object.vm_object;
4162 
4163 skip:
4164 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4165 
4166 	/*
4167 	 * Return whether this is the only map sharing this data.  On
4168 	 * success we return with a read lock held on the map.  On failure
4169 	 * we return with the map unlocked.
4170 	 */
4171 	*out_prot = prot;
4172 done:
4173 	if (rv == KERN_SUCCESS) {
4174 		if (use_read_lock == 0)
4175 			vm_map_lock_downgrade(map);
4176 	} else if (use_read_lock) {
4177 		vm_map_unlock_read(map);
4178 	} else {
4179 		vm_map_unlock(map);
4180 	}
4181 	return (rv);
4182 }
4183 
4184 /*
4185  * Releases locks acquired by a vm_map_lookup()
4186  * (according to the handle returned by that lookup).
4187  *
4188  * No other requirements.
4189  */
4190 void
4191 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
4192 {
4193 	/*
4194 	 * Unlock the main-level map
4195 	 */
4196 	vm_map_unlock_read(map);
4197 	if (count)
4198 		vm_map_entry_release(count);
4199 }
4200 
4201 #include "opt_ddb.h"
4202 #ifdef DDB
4203 #include <sys/kernel.h>
4204 
4205 #include <ddb/ddb.h>
4206 
4207 /*
4208  * Debugging only
4209  */
4210 DB_SHOW_COMMAND(map, vm_map_print)
4211 {
4212 	static int nlines;
4213 	/* XXX convert args. */
4214 	vm_map_t map = (vm_map_t)addr;
4215 	boolean_t full = have_addr;
4216 
4217 	vm_map_entry_t entry;
4218 
4219 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4220 	    (void *)map,
4221 	    (void *)map->pmap, map->nentries, map->timestamp);
4222 	nlines++;
4223 
4224 	if (!full && db_indent)
4225 		return;
4226 
4227 	db_indent += 2;
4228 	for (entry = map->header.next; entry != &map->header;
4229 	    entry = entry->next) {
4230 		db_iprintf("map entry %p: start=%p, end=%p\n",
4231 		    (void *)entry, (void *)entry->start, (void *)entry->end);
4232 		nlines++;
4233 		{
4234 			static char *inheritance_name[4] =
4235 			{"share", "copy", "none", "donate_copy"};
4236 
4237 			db_iprintf(" prot=%x/%x/%s",
4238 			    entry->protection,
4239 			    entry->max_protection,
4240 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4241 			if (entry->wired_count != 0)
4242 				db_printf(", wired");
4243 		}
4244 		switch(entry->maptype) {
4245 		case VM_MAPTYPE_SUBMAP:
4246 			/* XXX no %qd in kernel.  Truncate entry->offset. */
4247 			db_printf(", share=%p, offset=0x%lx\n",
4248 			    (void *)entry->object.sub_map,
4249 			    (long)entry->offset);
4250 			nlines++;
4251 			if ((entry->prev == &map->header) ||
4252 			    (entry->prev->object.sub_map !=
4253 				entry->object.sub_map)) {
4254 				db_indent += 2;
4255 				vm_map_print((db_expr_t)(intptr_t)
4256 					     entry->object.sub_map,
4257 					     full, 0, NULL);
4258 				db_indent -= 2;
4259 			}
4260 			break;
4261 		case VM_MAPTYPE_NORMAL:
4262 		case VM_MAPTYPE_VPAGETABLE:
4263 			/* XXX no %qd in kernel.  Truncate entry->offset. */
4264 			db_printf(", object=%p, offset=0x%lx",
4265 			    (void *)entry->object.vm_object,
4266 			    (long)entry->offset);
4267 			if (entry->eflags & MAP_ENTRY_COW)
4268 				db_printf(", copy (%s)",
4269 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4270 			db_printf("\n");
4271 			nlines++;
4272 
4273 			if ((entry->prev == &map->header) ||
4274 			    (entry->prev->object.vm_object !=
4275 				entry->object.vm_object)) {
4276 				db_indent += 2;
4277 				vm_object_print((db_expr_t)(intptr_t)
4278 						entry->object.vm_object,
4279 						full, 0, NULL);
4280 				nlines += 4;
4281 				db_indent -= 2;
4282 			}
4283 			break;
4284 		case VM_MAPTYPE_UKSMAP:
4285 			db_printf(", uksmap=%p, offset=0x%lx",
4286 			    (void *)entry->object.uksmap,
4287 			    (long)entry->offset);
4288 			if (entry->eflags & MAP_ENTRY_COW)
4289 				db_printf(", copy (%s)",
4290 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4291 			db_printf("\n");
4292 			nlines++;
4293 			break;
4294 		default:
4295 			break;
4296 		}
4297 	}
4298 	db_indent -= 2;
4299 	if (db_indent == 0)
4300 		nlines = 0;
4301 }
4302 
4303 /*
4304  * Debugging only
4305  */
4306 DB_SHOW_COMMAND(procvm, procvm)
4307 {
4308 	struct proc *p;
4309 
4310 	if (have_addr) {
4311 		p = (struct proc *) addr;
4312 	} else {
4313 		p = curproc;
4314 	}
4315 
4316 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4317 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4318 	    (void *)vmspace_pmap(p->p_vmspace));
4319 
4320 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
4321 }
4322 
4323 #endif /* DDB */
4324