xref: /dragonfly/sys/vm/vm_map.c (revision 1ab20d67)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
65  * $DragonFly: src/sys/vm/vm_map.c,v 1.27 2004/05/13 17:40:19 dillon Exp $
66  */
67 
68 /*
69  *	Virtual memory mapping module.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/lock.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/vnode.h>
79 #include <sys/resourcevar.h>
80 #include <sys/shm.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/swap_pager.h>
92 #include <vm/vm_zone.h>
93 
94 #include <sys/thread2.h>
95 
96 /*
97  *	Virtual memory maps provide for the mapping, protection,
98  *	and sharing of virtual memory objects.  In addition,
99  *	this module provides for an efficient virtual copy of
100  *	memory from one map to another.
101  *
102  *	Synchronization is required prior to most operations.
103  *
104  *	Maps consist of an ordered doubly-linked list of simple
105  *	entries; a single hint is used to speed up lookups.
106  *
107  *	Since portions of maps are specified by start/end addresses,
108  *	which may not align with existing map entries, all
109  *	routines merely "clip" entries to these start/end values.
110  *	[That is, an entry is split into two, bordering at a
111  *	start or end value.]  Note that these clippings may not
112  *	always be necessary (as the two resulting entries are then
113  *	not changed); however, the clipping is done for convenience.
114  *
115  *	As mentioned above, virtual copy operations are performed
116  *	by copying VM object references from one map to
117  *	another, and then marking both regions as copy-on-write.
118  */
119 
120 /*
121  *	vm_map_startup:
122  *
123  *	Initialize the vm_map module.  Must be called before
124  *	any other vm_map routines.
125  *
126  *	Map and entry structures are allocated from the general
127  *	purpose memory pool with some exceptions:
128  *
129  *	- The kernel map and kmem submap are allocated statically.
130  *	- Kernel map entries are allocated out of a static pool.
131  *
132  *	These restrictions are necessary since malloc() uses the
133  *	maps and requires map entries.
134  */
135 
136 static struct vm_zone mapentzone_store, mapzone_store;
137 static vm_zone_t mapentzone, mapzone, vmspace_zone;
138 static struct vm_object mapentobj, mapobj;
139 
140 static struct vm_map_entry map_entry_init[MAX_MAPENT];
141 static struct vm_map map_init[MAX_KMAP];
142 
143 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
144 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
145 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
146 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
147 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
148 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
149 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
150 		vm_map_entry_t);
151 static void vm_map_split (vm_map_entry_t);
152 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
153 
154 void
155 vm_map_startup(void)
156 {
157 	mapzone = &mapzone_store;
158 	zbootinit(mapzone, "MAP", sizeof (struct vm_map),
159 		map_init, MAX_KMAP);
160 	mapentzone = &mapentzone_store;
161 	zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
162 		map_entry_init, MAX_MAPENT);
163 }
164 
165 /*
166  * Allocate a vmspace structure, including a vm_map and pmap,
167  * and initialize those structures.  The refcnt is set to 1.
168  * The remaining fields must be initialized by the caller.
169  */
170 struct vmspace *
171 vmspace_alloc(vm_offset_t min, vm_offset_t max)
172 {
173 	struct vmspace *vm;
174 
175 	vm = zalloc(vmspace_zone);
176 	vm_map_init(&vm->vm_map, min, max);
177 	pmap_pinit(vmspace_pmap(vm));
178 	vm->vm_map.pmap = vmspace_pmap(vm);		/* XXX */
179 	vm->vm_refcnt = 1;
180 	vm->vm_shm = NULL;
181 	vm->vm_exitingcnt = 0;
182 	return (vm);
183 }
184 
185 void
186 vm_init2(void)
187 {
188 	zinitna(mapentzone, &mapentobj, NULL, 0, 0, ZONE_USE_RESERVE, 1);
189 	zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
190 	vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
191 	pmap_init2();
192 	vm_object_init2();
193 }
194 
195 static __inline void
196 vmspace_dofree(struct vmspace *vm)
197 {
198 	int count;
199 
200 	/*
201 	 * Make sure any SysV shm is freed, it might not have in
202 	 * exit1()
203 	 */
204 	shmexit(vm);
205 
206 	KKASSERT(vm->vm_upcalls == NULL);
207 
208 	/*
209 	 * Lock the map, to wait out all other references to it.
210 	 * Delete all of the mappings and pages they hold, then call
211 	 * the pmap module to reclaim anything left.
212 	 */
213 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
214 	vm_map_lock(&vm->vm_map);
215 	vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
216 		vm->vm_map.max_offset, &count);
217 	vm_map_unlock(&vm->vm_map);
218 	vm_map_entry_release(count);
219 
220 	pmap_release(vmspace_pmap(vm));
221 	zfree(vmspace_zone, vm);
222 }
223 
224 void
225 vmspace_free(struct vmspace *vm)
226 {
227 	if (vm->vm_refcnt == 0)
228 		panic("vmspace_free: attempt to free already freed vmspace");
229 
230 	if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
231 		vmspace_dofree(vm);
232 }
233 
234 void
235 vmspace_exitfree(struct proc *p)
236 {
237 	struct vmspace *vm;
238 
239 	vm = p->p_vmspace;
240 	p->p_vmspace = NULL;
241 
242 	/*
243 	 * cleanup by parent process wait()ing on exiting child.  vm_refcnt
244 	 * may not be 0 (e.g. fork() and child exits without exec()ing).
245 	 * exitingcnt may increment above 0 and drop back down to zero
246 	 * several times while vm_refcnt is held non-zero.  vm_refcnt
247 	 * may also increment above 0 and drop back down to zero several
248 	 * times while vm_exitingcnt is held non-zero.
249 	 *
250 	 * The last wait on the exiting child's vmspace will clean up
251 	 * the remainder of the vmspace.
252 	 */
253 	if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
254 		vmspace_dofree(vm);
255 }
256 
257 /*
258  * vmspace_swap_count() - count the approximate swap useage in pages for a
259  *			  vmspace.
260  *
261  *	Swap useage is determined by taking the proportional swap used by
262  *	VM objects backing the VM map.  To make up for fractional losses,
263  *	if the VM object has any swap use at all the associated map entries
264  *	count for at least 1 swap page.
265  */
266 int
267 vmspace_swap_count(struct vmspace *vmspace)
268 {
269 	vm_map_t map = &vmspace->vm_map;
270 	vm_map_entry_t cur;
271 	int count = 0;
272 
273 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
274 		vm_object_t object;
275 
276 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
277 		    (object = cur->object.vm_object) != NULL &&
278 		    object->type == OBJT_SWAP
279 		) {
280 			int n = (cur->end - cur->start) / PAGE_SIZE;
281 
282 			if (object->un_pager.swp.swp_bcount) {
283 				count += object->un_pager.swp.swp_bcount *
284 				    SWAP_META_PAGES * n / object->size + 1;
285 			}
286 		}
287 	}
288 	return(count);
289 }
290 
291 
292 /*
293  *	vm_map_create:
294  *
295  *	Creates and returns a new empty VM map with
296  *	the given physical map structure, and having
297  *	the given lower and upper address bounds.
298  */
299 vm_map_t
300 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
301 {
302 	vm_map_t result;
303 
304 	result = zalloc(mapzone);
305 	vm_map_init(result, min, max);
306 	result->pmap = pmap;
307 	return (result);
308 }
309 
310 /*
311  * Initialize an existing vm_map structure
312  * such as that in the vmspace structure.
313  * The pmap is set elsewhere.
314  */
315 void
316 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max)
317 {
318 	map->header.next = map->header.prev = &map->header;
319 	map->nentries = 0;
320 	map->size = 0;
321 	map->system_map = 0;
322 	map->infork = 0;
323 	map->min_offset = min;
324 	map->max_offset = max;
325 	map->first_free = &map->header;
326 	map->hint = &map->header;
327 	map->timestamp = 0;
328 	lockinit(&map->lock, 0, "thrd_sleep", 0, LK_NOPAUSE);
329 }
330 
331 /*
332  *      vm_map_entry_cpu_init:
333  *
334  *	Set an initial negative count so the first attempt to reserve
335  *	space preloads a bunch of vm_map_entry's for this cpu.  This
336  *	routine is called in early boot so we cannot just call
337  *	vm_map_entry_reserve().
338  *
339  *	May be called for a gd other then mycpu.
340  */
341 void
342 vm_map_entry_reserve_cpu_init(globaldata_t gd)
343 {
344 	gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
345 }
346 
347 /*
348  *	vm_map_entry_reserve:
349  *
350  *	Reserves vm_map_entry structures so code later on can manipulate
351  *	map_entry structures within a locked map without blocking trying
352  *	to allocate a new vm_map_entry.
353  */
354 int
355 vm_map_entry_reserve(int count)
356 {
357 	struct globaldata *gd = mycpu;
358 	vm_map_entry_t entry;
359 
360 	crit_enter();
361 	gd->gd_vme_avail -= count;
362 
363 	/*
364 	 * Make sure we have enough structures in gd_vme_base to handle
365 	 * the reservation request.
366 	 */
367 	while (gd->gd_vme_avail < 0) {
368 		entry = zalloc(mapentzone);
369 		entry->next = gd->gd_vme_base;
370 		gd->gd_vme_base = entry;
371 		++gd->gd_vme_avail;
372 	}
373 	crit_exit();
374 	return(count);
375 }
376 
377 /*
378  *	vm_map_entry_release:
379  *
380  *	Releases previously reserved vm_map_entry structures that were not
381  *	used.  If we have too much junk in our per-cpu cache clean some of
382  *	it out.
383  */
384 void
385 vm_map_entry_release(int count)
386 {
387 	struct globaldata *gd = mycpu;
388 	vm_map_entry_t entry;
389 
390 	crit_enter();
391 	gd->gd_vme_avail += count;
392 	while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
393 		entry = gd->gd_vme_base;
394 		KKASSERT(entry != NULL);
395 		gd->gd_vme_base = entry->next;
396 		--gd->gd_vme_avail;
397 		crit_exit();
398 		zfree(mapentzone, entry);
399 		crit_enter();
400 	}
401 	crit_exit();
402 }
403 
404 /*
405  *	vm_map_entry_kreserve:
406  *
407  *	Reserve map entry structures for use in kernel_map or (if it exists)
408  *	kmem_map.  These entries have *ALREADY* been reserved on a per-cpu
409  *	basis when the map was inited.  This function is used by zalloc()
410  *	to avoid a recursion when zalloc() itself needs to allocate additional
411  *	kernel memory.
412  *
413  *	This function should only be used when the caller intends to later
414  *	call vm_map_entry_reserve() to 'normalize' the reserve cache.
415  */
416 int
417 vm_map_entry_kreserve(int count)
418 {
419 	struct globaldata *gd = mycpu;
420 
421 	crit_enter();
422 	gd->gd_vme_kdeficit += count;
423 	crit_exit();
424 	KKASSERT(gd->gd_vme_base != NULL);
425 	return(count);
426 }
427 
428 /*
429  *	vm_map_entry_krelease:
430  *
431  *	Release previously reserved map entries for kernel_map or kmem_map
432  *	use.  This routine determines how many entries were actually used and
433  *	replentishes the kernel reserve supply from vme_avail.
434  *
435  *	If there is insufficient supply vme_avail will go negative, which is
436  *	ok.  We cannot safely call zalloc in this function without getting
437  *	into a recursion deadlock.  zalloc() will call vm_map_entry_reserve()
438  *	to regenerate the lost entries.
439  */
440 void
441 vm_map_entry_krelease(int count)
442 {
443 	struct globaldata *gd = mycpu;
444 
445 	crit_enter();
446 	gd->gd_vme_kdeficit -= count;
447 	gd->gd_vme_avail -= gd->gd_vme_kdeficit;	/* can go negative */
448 	gd->gd_vme_kdeficit = 0;
449 	crit_exit();
450 }
451 
452 /*
453  *	vm_map_entry_create:	[ internal use only ]
454  *
455  *	Allocates a VM map entry for insertion.  No entry fields are filled
456  *	in.
457  *
458  *	This routine may be called from an interrupt thread but not a FAST
459  *	interrupt.  This routine may recurse the map lock.
460  */
461 static vm_map_entry_t
462 vm_map_entry_create(vm_map_t map, int *countp)
463 {
464 	struct globaldata *gd = mycpu;
465 	vm_map_entry_t entry;
466 
467 	KKASSERT(*countp > 0);
468 	--*countp;
469 	crit_enter();
470 	entry = gd->gd_vme_base;
471 	KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
472 	gd->gd_vme_base = entry->next;
473 	crit_exit();
474 	return(entry);
475 }
476 
477 /*
478  *	vm_map_entry_dispose:	[ internal use only ]
479  *
480  *	Dispose of a vm_map_entry that is no longer being referenced.  This
481  *	function may be called from an interrupt.
482  */
483 static void
484 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
485 {
486 	struct globaldata *gd = mycpu;
487 
488 	++*countp;
489 	crit_enter();
490 	entry->next = gd->gd_vme_base;
491 	gd->gd_vme_base = entry;
492 	crit_exit();
493 }
494 
495 
496 /*
497  *	vm_map_entry_{un,}link:
498  *
499  *	Insert/remove entries from maps.
500  */
501 static __inline void
502 vm_map_entry_link(vm_map_t map,
503 		  vm_map_entry_t after_where,
504 		  vm_map_entry_t entry)
505 {
506 	map->nentries++;
507 	entry->prev = after_where;
508 	entry->next = after_where->next;
509 	entry->next->prev = entry;
510 	after_where->next = entry;
511 }
512 
513 static __inline void
514 vm_map_entry_unlink(vm_map_t map,
515 		    vm_map_entry_t entry)
516 {
517 	vm_map_entry_t prev;
518 	vm_map_entry_t next;
519 
520 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION)
521 		panic("vm_map_entry_unlink: attempt to mess with locked entry! %p", entry);
522 	prev = entry->prev;
523 	next = entry->next;
524 	next->prev = prev;
525 	prev->next = next;
526 	map->nentries--;
527 }
528 
529 /*
530  *	SAVE_HINT:
531  *
532  *	Saves the specified entry as the hint for
533  *	future lookups.
534  */
535 #define	SAVE_HINT(map,value) \
536 		(map)->hint = (value);
537 
538 /*
539  *	vm_map_lookup_entry:	[ internal use only ]
540  *
541  *	Finds the map entry containing (or
542  *	immediately preceding) the specified address
543  *	in the given map; the entry is returned
544  *	in the "entry" parameter.  The boolean
545  *	result indicates whether the address is
546  *	actually contained in the map.
547  */
548 boolean_t
549 vm_map_lookup_entry(vm_map_t map, vm_offset_t address,
550     vm_map_entry_t *entry /* OUT */)
551 {
552 	vm_map_entry_t cur;
553 	vm_map_entry_t last;
554 
555 	/*
556 	 * Start looking either from the head of the list, or from the hint.
557 	 */
558 
559 	cur = map->hint;
560 
561 	if (cur == &map->header)
562 		cur = cur->next;
563 
564 	if (address >= cur->start) {
565 		/*
566 		 * Go from hint to end of list.
567 		 *
568 		 * But first, make a quick check to see if we are already looking
569 		 * at the entry we want (which is usually the case). Note also
570 		 * that we don't need to save the hint here... it is the same
571 		 * hint (unless we are at the header, in which case the hint
572 		 * didn't buy us anything anyway).
573 		 */
574 		last = &map->header;
575 		if ((cur != last) && (cur->end > address)) {
576 			*entry = cur;
577 			return (TRUE);
578 		}
579 	} else {
580 		/*
581 		 * Go from start to hint, *inclusively*
582 		 */
583 		last = cur->next;
584 		cur = map->header.next;
585 	}
586 
587 	/*
588 	 * Search linearly
589 	 */
590 
591 	while (cur != last) {
592 		if (cur->end > address) {
593 			if (address >= cur->start) {
594 				/*
595 				 * Save this lookup for future hints, and
596 				 * return
597 				 */
598 
599 				*entry = cur;
600 				SAVE_HINT(map, cur);
601 				return (TRUE);
602 			}
603 			break;
604 		}
605 		cur = cur->next;
606 	}
607 	*entry = cur->prev;
608 	SAVE_HINT(map, *entry);
609 	return (FALSE);
610 }
611 
612 /*
613  *	vm_map_insert:
614  *
615  *	Inserts the given whole VM object into the target
616  *	map at the specified address range.  The object's
617  *	size should match that of the address range.
618  *
619  *	Requires that the map be locked, and leaves it so.  Requires that
620  *	sufficient vm_map_entry structures have been reserved and tracks
621  *	the use via countp.
622  *
623  *	If object is non-NULL, ref count must be bumped by caller
624  *	prior to making call to account for the new entry.
625  */
626 int
627 vm_map_insert(vm_map_t map, int *countp,
628 	      vm_object_t object, vm_ooffset_t offset,
629 	      vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
630 	      int cow)
631 {
632 	vm_map_entry_t new_entry;
633 	vm_map_entry_t prev_entry;
634 	vm_map_entry_t temp_entry;
635 	vm_eflags_t protoeflags;
636 
637 	/*
638 	 * Check that the start and end points are not bogus.
639 	 */
640 
641 	if ((start < map->min_offset) || (end > map->max_offset) ||
642 	    (start >= end))
643 		return (KERN_INVALID_ADDRESS);
644 
645 	/*
646 	 * Find the entry prior to the proposed starting address; if it's part
647 	 * of an existing entry, this range is bogus.
648 	 */
649 
650 	if (vm_map_lookup_entry(map, start, &temp_entry))
651 		return (KERN_NO_SPACE);
652 
653 	prev_entry = temp_entry;
654 
655 	/*
656 	 * Assert that the next entry doesn't overlap the end point.
657 	 */
658 
659 	if ((prev_entry->next != &map->header) &&
660 	    (prev_entry->next->start < end))
661 		return (KERN_NO_SPACE);
662 
663 	protoeflags = 0;
664 
665 	if (cow & MAP_COPY_ON_WRITE)
666 		protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
667 
668 	if (cow & MAP_NOFAULT) {
669 		protoeflags |= MAP_ENTRY_NOFAULT;
670 
671 		KASSERT(object == NULL,
672 			("vm_map_insert: paradoxical MAP_NOFAULT request"));
673 	}
674 	if (cow & MAP_DISABLE_SYNCER)
675 		protoeflags |= MAP_ENTRY_NOSYNC;
676 	if (cow & MAP_DISABLE_COREDUMP)
677 		protoeflags |= MAP_ENTRY_NOCOREDUMP;
678 
679 	if (object) {
680 		/*
681 		 * When object is non-NULL, it could be shared with another
682 		 * process.  We have to set or clear OBJ_ONEMAPPING
683 		 * appropriately.
684 		 */
685 		if ((object->ref_count > 1) || (object->shadow_count != 0)) {
686 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
687 		}
688 	}
689 	else if ((prev_entry != &map->header) &&
690 		 (prev_entry->eflags == protoeflags) &&
691 		 (prev_entry->end == start) &&
692 		 (prev_entry->wired_count == 0) &&
693 		 ((prev_entry->object.vm_object == NULL) ||
694 		  vm_object_coalesce(prev_entry->object.vm_object,
695 				     OFF_TO_IDX(prev_entry->offset),
696 				     (vm_size_t)(prev_entry->end - prev_entry->start),
697 				     (vm_size_t)(end - prev_entry->end)))) {
698 		/*
699 		 * We were able to extend the object.  Determine if we
700 		 * can extend the previous map entry to include the
701 		 * new range as well.
702 		 */
703 		if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
704 		    (prev_entry->protection == prot) &&
705 		    (prev_entry->max_protection == max)) {
706 			map->size += (end - prev_entry->end);
707 			prev_entry->end = end;
708 			vm_map_simplify_entry(map, prev_entry, countp);
709 			return (KERN_SUCCESS);
710 		}
711 
712 		/*
713 		 * If we can extend the object but cannot extend the
714 		 * map entry, we have to create a new map entry.  We
715 		 * must bump the ref count on the extended object to
716 		 * account for it.  object may be NULL.
717 		 */
718 		object = prev_entry->object.vm_object;
719 		offset = prev_entry->offset +
720 			(prev_entry->end - prev_entry->start);
721 		vm_object_reference(object);
722 	}
723 
724 	/*
725 	 * NOTE: if conditionals fail, object can be NULL here.  This occurs
726 	 * in things like the buffer map where we manage kva but do not manage
727 	 * backing objects.
728 	 */
729 
730 	/*
731 	 * Create a new entry
732 	 */
733 
734 	new_entry = vm_map_entry_create(map, countp);
735 	new_entry->start = start;
736 	new_entry->end = end;
737 
738 	new_entry->eflags = protoeflags;
739 	new_entry->object.vm_object = object;
740 	new_entry->offset = offset;
741 	new_entry->avail_ssize = 0;
742 
743 	new_entry->inheritance = VM_INHERIT_DEFAULT;
744 	new_entry->protection = prot;
745 	new_entry->max_protection = max;
746 	new_entry->wired_count = 0;
747 
748 	/*
749 	 * Insert the new entry into the list
750 	 */
751 
752 	vm_map_entry_link(map, prev_entry, new_entry);
753 	map->size += new_entry->end - new_entry->start;
754 
755 	/*
756 	 * Update the free space hint
757 	 */
758 	if ((map->first_free == prev_entry) &&
759 	    (prev_entry->end >= new_entry->start)) {
760 		map->first_free = new_entry;
761 	}
762 
763 #if 0
764 	/*
765 	 * Temporarily removed to avoid MAP_STACK panic, due to
766 	 * MAP_STACK being a huge hack.  Will be added back in
767 	 * when MAP_STACK (and the user stack mapping) is fixed.
768 	 */
769 	/*
770 	 * It may be possible to simplify the entry
771 	 */
772 	vm_map_simplify_entry(map, new_entry, countp);
773 #endif
774 
775 	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
776 		pmap_object_init_pt(map->pmap, start, prot,
777 				    object, OFF_TO_IDX(offset), end - start,
778 				    cow & MAP_PREFAULT_PARTIAL);
779 	}
780 
781 	return (KERN_SUCCESS);
782 }
783 
784 /*
785  * Find sufficient space for `length' bytes in the given map, starting at
786  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
787  *
788  * This function will returned an arbitrarily aligned pointer.  If no
789  * particular alignment is required you should pass align as 1.  Note that
790  * the map may return PAGE_SIZE aligned pointers if all the lengths used in
791  * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
792  * argument.
793  *
794  * 'align' should be a power of 2 but is not required to be.
795  */
796 int
797 vm_map_findspace(
798 	vm_map_t map,
799 	vm_offset_t start,
800 	vm_size_t length,
801 	vm_offset_t align,
802 	vm_offset_t *addr)
803 {
804 	vm_map_entry_t entry, next;
805 	vm_offset_t end;
806 	vm_offset_t align_mask;
807 
808 	if (start < map->min_offset)
809 		start = map->min_offset;
810 	if (start > map->max_offset)
811 		return (1);
812 
813 	/*
814 	 * If the alignment is not a power of 2 we will have to use
815 	 * a mod/division, set align_mask to a special value.
816 	 */
817 	if ((align | (align - 1)) + 1 != (align << 1))
818 		align_mask = (vm_offset_t)-1;
819 	else
820 		align_mask = align - 1;
821 
822 retry:
823 	/*
824 	 * Look for the first possible address; if there's already something
825 	 * at this address, we have to start after it.
826 	 */
827 	if (start == map->min_offset) {
828 		if ((entry = map->first_free) != &map->header)
829 			start = entry->end;
830 	} else {
831 		vm_map_entry_t tmp;
832 
833 		if (vm_map_lookup_entry(map, start, &tmp))
834 			start = tmp->end;
835 		entry = tmp;
836 	}
837 
838 	/*
839 	 * Look through the rest of the map, trying to fit a new region in the
840 	 * gap between existing regions, or after the very last region.
841 	 */
842 	for (;; start = (entry = next)->end) {
843 		/*
844 		 * Adjust the proposed start by the requested alignment,
845 		 * be sure that we didn't wrap the address.
846 		 */
847 		if (align_mask == (vm_offset_t)-1)
848 			end = ((start + align - 1) / align) * align;
849 		else
850 			end = (start + align_mask) & ~align_mask;
851 		if (end < start)
852 			return (1);
853 		start = end;
854 		/*
855 		 * Find the end of the proposed new region.  Be sure we didn't
856 		 * go beyond the end of the map, or wrap around the address.
857 		 * Then check to see if this is the last entry or if the
858 		 * proposed end fits in the gap between this and the next
859 		 * entry.
860 		 */
861 		end = start + length;
862 		if (end > map->max_offset || end < start)
863 			return (1);
864 		next = entry->next;
865 		if (next == &map->header || next->start >= end)
866 			break;
867 	}
868 	SAVE_HINT(map, entry);
869 	if (map == kernel_map) {
870 		vm_offset_t ksize;
871 		if ((ksize = round_page(start + length)) > kernel_vm_end) {
872 			pmap_growkernel(ksize);
873 			goto retry;
874 		}
875 	}
876 	*addr = start;
877 	return (0);
878 }
879 
880 /*
881  *	vm_map_find finds an unallocated region in the target address
882  *	map with the given length.  The search is defined to be
883  *	first-fit from the specified address; the region found is
884  *	returned in the same parameter.
885  *
886  *	If object is non-NULL, ref count must be bumped by caller
887  *	prior to making call to account for the new entry.
888  */
889 int
890 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
891 	    vm_offset_t *addr,	/* IN/OUT */
892 	    vm_size_t length, boolean_t find_space, vm_prot_t prot,
893 	    vm_prot_t max, int cow)
894 {
895 	vm_offset_t start;
896 	int result;
897 	int count;
898 
899 	start = *addr;
900 
901 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
902 	vm_map_lock(map);
903 	if (find_space) {
904 		if (vm_map_findspace(map, start, length, 1, addr)) {
905 			vm_map_unlock(map);
906 			vm_map_entry_release(count);
907 			return (KERN_NO_SPACE);
908 		}
909 		start = *addr;
910 	}
911 	result = vm_map_insert(map, &count, object, offset,
912 		start, start + length, prot, max, cow);
913 	vm_map_unlock(map);
914 	vm_map_entry_release(count);
915 
916 	return (result);
917 }
918 
919 /*
920  *	vm_map_simplify_entry:
921  *
922  *	Simplify the given map entry by merging with either neighbor.  This
923  *	routine also has the ability to merge with both neighbors.
924  *
925  *	The map must be locked.
926  *
927  *	This routine guarentees that the passed entry remains valid (though
928  *	possibly extended).  When merging, this routine may delete one or
929  *	both neighbors.  No action is taken on entries which have their
930  *	in-transition flag set.
931  */
932 void
933 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
934 {
935 	vm_map_entry_t next, prev;
936 	vm_size_t prevsize, esize;
937 
938 	if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) {
939 		++mycpu->gd_cnt.v_intrans_coll;
940 		return;
941 	}
942 
943 	prev = entry->prev;
944 	if (prev != &map->header) {
945 		prevsize = prev->end - prev->start;
946 		if ( (prev->end == entry->start) &&
947 		     (prev->object.vm_object == entry->object.vm_object) &&
948 		     (!prev->object.vm_object ||
949 			(prev->offset + prevsize == entry->offset)) &&
950 		     (prev->eflags == entry->eflags) &&
951 		     (prev->protection == entry->protection) &&
952 		     (prev->max_protection == entry->max_protection) &&
953 		     (prev->inheritance == entry->inheritance) &&
954 		     (prev->wired_count == entry->wired_count)) {
955 			if (map->first_free == prev)
956 				map->first_free = entry;
957 			if (map->hint == prev)
958 				map->hint = entry;
959 			vm_map_entry_unlink(map, prev);
960 			entry->start = prev->start;
961 			entry->offset = prev->offset;
962 			if (prev->object.vm_object)
963 				vm_object_deallocate(prev->object.vm_object);
964 			vm_map_entry_dispose(map, prev, countp);
965 		}
966 	}
967 
968 	next = entry->next;
969 	if (next != &map->header) {
970 		esize = entry->end - entry->start;
971 		if ((entry->end == next->start) &&
972 		    (next->object.vm_object == entry->object.vm_object) &&
973 		     (!entry->object.vm_object ||
974 			(entry->offset + esize == next->offset)) &&
975 		    (next->eflags == entry->eflags) &&
976 		    (next->protection == entry->protection) &&
977 		    (next->max_protection == entry->max_protection) &&
978 		    (next->inheritance == entry->inheritance) &&
979 		    (next->wired_count == entry->wired_count)) {
980 			if (map->first_free == next)
981 				map->first_free = entry;
982 			if (map->hint == next)
983 				map->hint = entry;
984 			vm_map_entry_unlink(map, next);
985 			entry->end = next->end;
986 			if (next->object.vm_object)
987 				vm_object_deallocate(next->object.vm_object);
988 			vm_map_entry_dispose(map, next, countp);
989 	        }
990 	}
991 }
992 /*
993  *	vm_map_clip_start:	[ internal use only ]
994  *
995  *	Asserts that the given entry begins at or after
996  *	the specified address; if necessary,
997  *	it splits the entry into two.
998  */
999 #define vm_map_clip_start(map, entry, startaddr, countp) \
1000 { \
1001 	if (startaddr > entry->start) \
1002 		_vm_map_clip_start(map, entry, startaddr, countp); \
1003 }
1004 
1005 /*
1006  *	This routine is called only when it is known that
1007  *	the entry must be split.
1008  */
1009 static void
1010 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start, int *countp)
1011 {
1012 	vm_map_entry_t new_entry;
1013 
1014 	/*
1015 	 * Split off the front portion -- note that we must insert the new
1016 	 * entry BEFORE this one, so that this entry has the specified
1017 	 * starting address.
1018 	 */
1019 
1020 	vm_map_simplify_entry(map, entry, countp);
1021 
1022 	/*
1023 	 * If there is no object backing this entry, we might as well create
1024 	 * one now.  If we defer it, an object can get created after the map
1025 	 * is clipped, and individual objects will be created for the split-up
1026 	 * map.  This is a bit of a hack, but is also about the best place to
1027 	 * put this improvement.
1028 	 */
1029 
1030 	if (entry->object.vm_object == NULL && !map->system_map) {
1031 		vm_object_t object;
1032 		object = vm_object_allocate(OBJT_DEFAULT,
1033 				atop(entry->end - entry->start));
1034 		entry->object.vm_object = object;
1035 		entry->offset = 0;
1036 	}
1037 
1038 	new_entry = vm_map_entry_create(map, countp);
1039 	*new_entry = *entry;
1040 
1041 	new_entry->end = start;
1042 	entry->offset += (start - entry->start);
1043 	entry->start = start;
1044 
1045 	vm_map_entry_link(map, entry->prev, new_entry);
1046 
1047 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1048 		vm_object_reference(new_entry->object.vm_object);
1049 	}
1050 }
1051 
1052 /*
1053  *	vm_map_clip_end:	[ internal use only ]
1054  *
1055  *	Asserts that the given entry ends at or before
1056  *	the specified address; if necessary,
1057  *	it splits the entry into two.
1058  */
1059 
1060 #define vm_map_clip_end(map, entry, endaddr, countp) \
1061 { \
1062 	if (endaddr < entry->end) \
1063 		_vm_map_clip_end(map, entry, endaddr, countp); \
1064 }
1065 
1066 /*
1067  *	This routine is called only when it is known that
1068  *	the entry must be split.
1069  */
1070 static void
1071 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end, int *countp)
1072 {
1073 	vm_map_entry_t new_entry;
1074 
1075 	/*
1076 	 * If there is no object backing this entry, we might as well create
1077 	 * one now.  If we defer it, an object can get created after the map
1078 	 * is clipped, and individual objects will be created for the split-up
1079 	 * map.  This is a bit of a hack, but is also about the best place to
1080 	 * put this improvement.
1081 	 */
1082 
1083 	if (entry->object.vm_object == NULL && !map->system_map) {
1084 		vm_object_t object;
1085 		object = vm_object_allocate(OBJT_DEFAULT,
1086 				atop(entry->end - entry->start));
1087 		entry->object.vm_object = object;
1088 		entry->offset = 0;
1089 	}
1090 
1091 	/*
1092 	 * Create a new entry and insert it AFTER the specified entry
1093 	 */
1094 
1095 	new_entry = vm_map_entry_create(map, countp);
1096 	*new_entry = *entry;
1097 
1098 	new_entry->start = entry->end = end;
1099 	new_entry->offset += (end - entry->start);
1100 
1101 	vm_map_entry_link(map, entry, new_entry);
1102 
1103 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1104 		vm_object_reference(new_entry->object.vm_object);
1105 	}
1106 }
1107 
1108 /*
1109  *	VM_MAP_RANGE_CHECK:	[ internal use only ]
1110  *
1111  *	Asserts that the starting and ending region
1112  *	addresses fall within the valid range of the map.
1113  */
1114 #define	VM_MAP_RANGE_CHECK(map, start, end)		\
1115 		{					\
1116 		if (start < vm_map_min(map))		\
1117 			start = vm_map_min(map);	\
1118 		if (end > vm_map_max(map))		\
1119 			end = vm_map_max(map);		\
1120 		if (start > end)			\
1121 			start = end;			\
1122 		}
1123 
1124 /*
1125  *	vm_map_transition_wait:	[ kernel use only ]
1126  *
1127  *	Used to block when an in-transition collison occurs.  The map
1128  *	is unlocked for the sleep and relocked before the return.
1129  */
1130 static
1131 void
1132 vm_map_transition_wait(vm_map_t map)
1133 {
1134 	vm_map_unlock(map);
1135 	tsleep(map, 0, "vment", 0);
1136 	vm_map_lock(map);
1137 }
1138 
1139 /*
1140  * CLIP_CHECK_BACK
1141  * CLIP_CHECK_FWD
1142  *
1143  *	When we do blocking operations with the map lock held it is
1144  *	possible that a clip might have occured on our in-transit entry,
1145  *	requiring an adjustment to the entry in our loop.  These macros
1146  *	help the pageable and clip_range code deal with the case.  The
1147  *	conditional costs virtually nothing if no clipping has occured.
1148  */
1149 
1150 #define CLIP_CHECK_BACK(entry, save_start)		\
1151     do {						\
1152 	    while (entry->start != save_start) {	\
1153 		    entry = entry->prev;		\
1154 		    KASSERT(entry != &map->header, ("bad entry clip")); \
1155 	    }						\
1156     } while(0)
1157 
1158 #define CLIP_CHECK_FWD(entry, save_end)			\
1159     do {						\
1160 	    while (entry->end != save_end) {		\
1161 		    entry = entry->next;		\
1162 		    KASSERT(entry != &map->header, ("bad entry clip")); \
1163 	    }						\
1164     } while(0)
1165 
1166 
1167 /*
1168  *	vm_map_clip_range:	[ kernel use only ]
1169  *
1170  *	Clip the specified range and return the base entry.  The
1171  *	range may cover several entries starting at the returned base
1172  *	and the first and last entry in the covering sequence will be
1173  *	properly clipped to the requested start and end address.
1174  *
1175  *	If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1176  *	flag.
1177  *
1178  *	The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1179  *	covered by the requested range.
1180  *
1181  *	The map must be exclusively locked on entry and will remain locked
1182  *	on return. If no range exists or the range contains holes and you
1183  *	specified that no holes were allowed, NULL will be returned.  This
1184  *	routine may temporarily unlock the map in order avoid a deadlock when
1185  *	sleeping.
1186  */
1187 static
1188 vm_map_entry_t
1189 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end,
1190 	int *countp, int flags)
1191 {
1192 	vm_map_entry_t start_entry;
1193 	vm_map_entry_t entry;
1194 
1195 	/*
1196 	 * Locate the entry and effect initial clipping.  The in-transition
1197 	 * case does not occur very often so do not try to optimize it.
1198 	 */
1199 again:
1200 	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1201 		return (NULL);
1202 	entry = start_entry;
1203 	if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1204 		entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1205 		++mycpu->gd_cnt.v_intrans_coll;
1206 		++mycpu->gd_cnt.v_intrans_wait;
1207 		vm_map_transition_wait(map);
1208 		/*
1209 		 * entry and/or start_entry may have been clipped while
1210 		 * we slept, or may have gone away entirely.  We have
1211 		 * to restart from the lookup.
1212 		 */
1213 		goto again;
1214 	}
1215 	/*
1216 	 * Since we hold an exclusive map lock we do not have to restart
1217 	 * after clipping, even though clipping may block in zalloc.
1218 	 */
1219 	vm_map_clip_start(map, entry, start, countp);
1220 	vm_map_clip_end(map, entry, end, countp);
1221 	entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1222 
1223 	/*
1224 	 * Scan entries covered by the range.  When working on the next
1225 	 * entry a restart need only re-loop on the current entry which
1226 	 * we have already locked, since 'next' may have changed.  Also,
1227 	 * even though entry is safe, it may have been clipped so we
1228 	 * have to iterate forwards through the clip after sleeping.
1229 	 */
1230 	while (entry->next != &map->header && entry->next->start < end) {
1231 		vm_map_entry_t next = entry->next;
1232 
1233 		if (flags & MAP_CLIP_NO_HOLES) {
1234 			if (next->start > entry->end) {
1235 				vm_map_unclip_range(map, start_entry,
1236 					start, entry->end, countp, flags);
1237 				return(NULL);
1238 			}
1239 		}
1240 
1241 		if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1242 			vm_offset_t save_end = entry->end;
1243 			next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1244 			++mycpu->gd_cnt.v_intrans_coll;
1245 			++mycpu->gd_cnt.v_intrans_wait;
1246 			vm_map_transition_wait(map);
1247 
1248 			/*
1249 			 * clips might have occured while we blocked.
1250 			 */
1251 			CLIP_CHECK_FWD(entry, save_end);
1252 			CLIP_CHECK_BACK(start_entry, start);
1253 			continue;
1254 		}
1255 		/*
1256 		 * No restart necessary even though clip_end may block, we
1257 		 * are holding the map lock.
1258 		 */
1259 		vm_map_clip_end(map, next, end, countp);
1260 		next->eflags |= MAP_ENTRY_IN_TRANSITION;
1261 		entry = next;
1262 	}
1263 	if (flags & MAP_CLIP_NO_HOLES) {
1264 		if (entry->end != end) {
1265 			vm_map_unclip_range(map, start_entry,
1266 				start, entry->end, countp, flags);
1267 			return(NULL);
1268 		}
1269 	}
1270 	return(start_entry);
1271 }
1272 
1273 /*
1274  *	vm_map_unclip_range:	[ kernel use only ]
1275  *
1276  *	Undo the effect of vm_map_clip_range().  You should pass the same
1277  *	flags and the same range that you passed to vm_map_clip_range().
1278  *	This code will clear the in-transition flag on the entries and
1279  *	wake up anyone waiting.  This code will also simplify the sequence
1280  *	and attempt to merge it with entries before and after the sequence.
1281  *
1282  *	The map must be locked on entry and will remain locked on return.
1283  *
1284  *	Note that you should also pass the start_entry returned by
1285  *	vm_map_clip_range().  However, if you block between the two calls
1286  *	with the map unlocked please be aware that the start_entry may
1287  *	have been clipped and you may need to scan it backwards to find
1288  *	the entry corresponding with the original start address.  You are
1289  *	responsible for this, vm_map_unclip_range() expects the correct
1290  *	start_entry to be passed to it and will KASSERT otherwise.
1291  */
1292 static
1293 void
1294 vm_map_unclip_range(
1295 	vm_map_t map,
1296 	vm_map_entry_t start_entry,
1297 	vm_offset_t start,
1298 	vm_offset_t end,
1299 	int *countp,
1300 	int flags)
1301 {
1302 	vm_map_entry_t entry;
1303 
1304 	entry = start_entry;
1305 
1306 	KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1307 	while (entry != &map->header && entry->start < end) {
1308 		KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("in-transition flag not set during unclip on: %p", entry));
1309 		KASSERT(entry->end <= end, ("unclip_range: tail wasn't clipped"));
1310 		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1311 		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1312 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1313 			wakeup(map);
1314 		}
1315 		entry = entry->next;
1316 	}
1317 
1318 	/*
1319 	 * Simplification does not block so there is no restart case.
1320 	 */
1321 	entry = start_entry;
1322 	while (entry != &map->header && entry->start < end) {
1323 		vm_map_simplify_entry(map, entry, countp);
1324 		entry = entry->next;
1325 	}
1326 }
1327 
1328 /*
1329  *	vm_map_submap:		[ kernel use only ]
1330  *
1331  *	Mark the given range as handled by a subordinate map.
1332  *
1333  *	This range must have been created with vm_map_find,
1334  *	and no other operations may have been performed on this
1335  *	range prior to calling vm_map_submap.
1336  *
1337  *	Only a limited number of operations can be performed
1338  *	within this rage after calling vm_map_submap:
1339  *		vm_fault
1340  *	[Don't try vm_map_copy!]
1341  *
1342  *	To remove a submapping, one must first remove the
1343  *	range from the superior map, and then destroy the
1344  *	submap (if desired).  [Better yet, don't try it.]
1345  */
1346 int
1347 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1348 {
1349 	vm_map_entry_t entry;
1350 	int result = KERN_INVALID_ARGUMENT;
1351 	int count;
1352 
1353 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1354 	vm_map_lock(map);
1355 
1356 	VM_MAP_RANGE_CHECK(map, start, end);
1357 
1358 	if (vm_map_lookup_entry(map, start, &entry)) {
1359 		vm_map_clip_start(map, entry, start, &count);
1360 	} else {
1361 		entry = entry->next;
1362 	}
1363 
1364 	vm_map_clip_end(map, entry, end, &count);
1365 
1366 	if ((entry->start == start) && (entry->end == end) &&
1367 	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1368 	    (entry->object.vm_object == NULL)) {
1369 		entry->object.sub_map = submap;
1370 		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1371 		result = KERN_SUCCESS;
1372 	}
1373 	vm_map_unlock(map);
1374 	vm_map_entry_release(count);
1375 
1376 	return (result);
1377 }
1378 
1379 /*
1380  *	vm_map_protect:
1381  *
1382  *	Sets the protection of the specified address
1383  *	region in the target map.  If "set_max" is
1384  *	specified, the maximum protection is to be set;
1385  *	otherwise, only the current protection is affected.
1386  */
1387 int
1388 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1389 	       vm_prot_t new_prot, boolean_t set_max)
1390 {
1391 	vm_map_entry_t current;
1392 	vm_map_entry_t entry;
1393 	int count;
1394 
1395 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1396 	vm_map_lock(map);
1397 
1398 	VM_MAP_RANGE_CHECK(map, start, end);
1399 
1400 	if (vm_map_lookup_entry(map, start, &entry)) {
1401 		vm_map_clip_start(map, entry, start, &count);
1402 	} else {
1403 		entry = entry->next;
1404 	}
1405 
1406 	/*
1407 	 * Make a first pass to check for protection violations.
1408 	 */
1409 
1410 	current = entry;
1411 	while ((current != &map->header) && (current->start < end)) {
1412 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1413 			vm_map_unlock(map);
1414 			vm_map_entry_release(count);
1415 			return (KERN_INVALID_ARGUMENT);
1416 		}
1417 		if ((new_prot & current->max_protection) != new_prot) {
1418 			vm_map_unlock(map);
1419 			vm_map_entry_release(count);
1420 			return (KERN_PROTECTION_FAILURE);
1421 		}
1422 		current = current->next;
1423 	}
1424 
1425 	/*
1426 	 * Go back and fix up protections. [Note that clipping is not
1427 	 * necessary the second time.]
1428 	 */
1429 	current = entry;
1430 
1431 	while ((current != &map->header) && (current->start < end)) {
1432 		vm_prot_t old_prot;
1433 
1434 		vm_map_clip_end(map, current, end, &count);
1435 
1436 		old_prot = current->protection;
1437 		if (set_max)
1438 			current->protection =
1439 			    (current->max_protection = new_prot) &
1440 			    old_prot;
1441 		else
1442 			current->protection = new_prot;
1443 
1444 		/*
1445 		 * Update physical map if necessary. Worry about copy-on-write
1446 		 * here -- CHECK THIS XXX
1447 		 */
1448 
1449 		if (current->protection != old_prot) {
1450 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1451 							VM_PROT_ALL)
1452 
1453 			pmap_protect(map->pmap, current->start,
1454 			    current->end,
1455 			    current->protection & MASK(current));
1456 #undef	MASK
1457 		}
1458 
1459 		vm_map_simplify_entry(map, current, &count);
1460 
1461 		current = current->next;
1462 	}
1463 
1464 	vm_map_unlock(map);
1465 	vm_map_entry_release(count);
1466 	return (KERN_SUCCESS);
1467 }
1468 
1469 /*
1470  *	vm_map_madvise:
1471  *
1472  * 	This routine traverses a processes map handling the madvise
1473  *	system call.  Advisories are classified as either those effecting
1474  *	the vm_map_entry structure, or those effecting the underlying
1475  *	objects.
1476  */
1477 
1478 int
1479 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, int behav)
1480 {
1481 	vm_map_entry_t current, entry;
1482 	int modify_map = 0;
1483 	int count;
1484 
1485 	/*
1486 	 * Some madvise calls directly modify the vm_map_entry, in which case
1487 	 * we need to use an exclusive lock on the map and we need to perform
1488 	 * various clipping operations.  Otherwise we only need a read-lock
1489 	 * on the map.
1490 	 */
1491 
1492 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1493 
1494 	switch(behav) {
1495 	case MADV_NORMAL:
1496 	case MADV_SEQUENTIAL:
1497 	case MADV_RANDOM:
1498 	case MADV_NOSYNC:
1499 	case MADV_AUTOSYNC:
1500 	case MADV_NOCORE:
1501 	case MADV_CORE:
1502 		modify_map = 1;
1503 		vm_map_lock(map);
1504 		break;
1505 	case MADV_WILLNEED:
1506 	case MADV_DONTNEED:
1507 	case MADV_FREE:
1508 		vm_map_lock_read(map);
1509 		break;
1510 	default:
1511 		vm_map_entry_release(count);
1512 		return (KERN_INVALID_ARGUMENT);
1513 	}
1514 
1515 	/*
1516 	 * Locate starting entry and clip if necessary.
1517 	 */
1518 
1519 	VM_MAP_RANGE_CHECK(map, start, end);
1520 
1521 	if (vm_map_lookup_entry(map, start, &entry)) {
1522 		if (modify_map)
1523 			vm_map_clip_start(map, entry, start, &count);
1524 	} else {
1525 		entry = entry->next;
1526 	}
1527 
1528 	if (modify_map) {
1529 		/*
1530 		 * madvise behaviors that are implemented in the vm_map_entry.
1531 		 *
1532 		 * We clip the vm_map_entry so that behavioral changes are
1533 		 * limited to the specified address range.
1534 		 */
1535 		for (current = entry;
1536 		     (current != &map->header) && (current->start < end);
1537 		     current = current->next
1538 		) {
1539 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1540 				continue;
1541 
1542 			vm_map_clip_end(map, current, end, &count);
1543 
1544 			switch (behav) {
1545 			case MADV_NORMAL:
1546 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1547 				break;
1548 			case MADV_SEQUENTIAL:
1549 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1550 				break;
1551 			case MADV_RANDOM:
1552 				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1553 				break;
1554 			case MADV_NOSYNC:
1555 				current->eflags |= MAP_ENTRY_NOSYNC;
1556 				break;
1557 			case MADV_AUTOSYNC:
1558 				current->eflags &= ~MAP_ENTRY_NOSYNC;
1559 				break;
1560 			case MADV_NOCORE:
1561 				current->eflags |= MAP_ENTRY_NOCOREDUMP;
1562 				break;
1563 			case MADV_CORE:
1564 				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1565 				break;
1566 			default:
1567 				break;
1568 			}
1569 			vm_map_simplify_entry(map, current, &count);
1570 		}
1571 		vm_map_unlock(map);
1572 	} else {
1573 		vm_pindex_t pindex;
1574 		int count;
1575 
1576 		/*
1577 		 * madvise behaviors that are implemented in the underlying
1578 		 * vm_object.
1579 		 *
1580 		 * Since we don't clip the vm_map_entry, we have to clip
1581 		 * the vm_object pindex and count.
1582 		 */
1583 		for (current = entry;
1584 		     (current != &map->header) && (current->start < end);
1585 		     current = current->next
1586 		) {
1587 			vm_offset_t useStart;
1588 
1589 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1590 				continue;
1591 
1592 			pindex = OFF_TO_IDX(current->offset);
1593 			count = atop(current->end - current->start);
1594 			useStart = current->start;
1595 
1596 			if (current->start < start) {
1597 				pindex += atop(start - current->start);
1598 				count -= atop(start - current->start);
1599 				useStart = start;
1600 			}
1601 			if (current->end > end)
1602 				count -= atop(current->end - end);
1603 
1604 			if (count <= 0)
1605 				continue;
1606 
1607 			vm_object_madvise(current->object.vm_object,
1608 					  pindex, count, behav);
1609 			if (behav == MADV_WILLNEED) {
1610 				pmap_object_init_pt(
1611 				    map->pmap,
1612 				    useStart,
1613 				    current->protection,
1614 				    current->object.vm_object,
1615 				    pindex,
1616 				    (count << PAGE_SHIFT),
1617 				    MAP_PREFAULT_MADVISE
1618 				);
1619 			}
1620 		}
1621 		vm_map_unlock_read(map);
1622 	}
1623 	vm_map_entry_release(count);
1624 	return(0);
1625 }
1626 
1627 
1628 /*
1629  *	vm_map_inherit:
1630  *
1631  *	Sets the inheritance of the specified address
1632  *	range in the target map.  Inheritance
1633  *	affects how the map will be shared with
1634  *	child maps at the time of vm_map_fork.
1635  */
1636 int
1637 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1638 	       vm_inherit_t new_inheritance)
1639 {
1640 	vm_map_entry_t entry;
1641 	vm_map_entry_t temp_entry;
1642 	int count;
1643 
1644 	switch (new_inheritance) {
1645 	case VM_INHERIT_NONE:
1646 	case VM_INHERIT_COPY:
1647 	case VM_INHERIT_SHARE:
1648 		break;
1649 	default:
1650 		return (KERN_INVALID_ARGUMENT);
1651 	}
1652 
1653 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1654 	vm_map_lock(map);
1655 
1656 	VM_MAP_RANGE_CHECK(map, start, end);
1657 
1658 	if (vm_map_lookup_entry(map, start, &temp_entry)) {
1659 		entry = temp_entry;
1660 		vm_map_clip_start(map, entry, start, &count);
1661 	} else
1662 		entry = temp_entry->next;
1663 
1664 	while ((entry != &map->header) && (entry->start < end)) {
1665 		vm_map_clip_end(map, entry, end, &count);
1666 
1667 		entry->inheritance = new_inheritance;
1668 
1669 		vm_map_simplify_entry(map, entry, &count);
1670 
1671 		entry = entry->next;
1672 	}
1673 	vm_map_unlock(map);
1674 	vm_map_entry_release(count);
1675 	return (KERN_SUCCESS);
1676 }
1677 
1678 /*
1679  * Implement the semantics of mlock
1680  */
1681 int
1682 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
1683     boolean_t new_pageable)
1684 {
1685 	vm_map_entry_t entry;
1686 	vm_map_entry_t start_entry;
1687 	vm_offset_t end;
1688 	int rv = KERN_SUCCESS;
1689 	int count;
1690 
1691 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1692 	vm_map_lock(map);
1693 	VM_MAP_RANGE_CHECK(map, start, real_end);
1694 	end = real_end;
1695 
1696 	start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
1697 	if (start_entry == NULL) {
1698 		vm_map_unlock(map);
1699 		vm_map_entry_release(count);
1700 		return (KERN_INVALID_ADDRESS);
1701 	}
1702 
1703 	if (new_pageable == 0) {
1704 		entry = start_entry;
1705 		while ((entry != &map->header) && (entry->start < end)) {
1706 			vm_offset_t save_start;
1707 			vm_offset_t save_end;
1708 
1709 			/*
1710 			 * Already user wired or hard wired (trivial cases)
1711 			 */
1712 			if (entry->eflags & MAP_ENTRY_USER_WIRED) {
1713 				entry = entry->next;
1714 				continue;
1715 			}
1716 			if (entry->wired_count != 0) {
1717 				entry->wired_count++;
1718 				entry->eflags |= MAP_ENTRY_USER_WIRED;
1719 				entry = entry->next;
1720 				continue;
1721 			}
1722 
1723 			/*
1724 			 * A new wiring requires instantiation of appropriate
1725 			 * management structures and the faulting in of the
1726 			 * page.
1727 			 */
1728 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1729 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1730 				if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1731 
1732 					vm_object_shadow(&entry->object.vm_object,
1733 					    &entry->offset,
1734 					    atop(entry->end - entry->start));
1735 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1736 
1737 				} else if (entry->object.vm_object == NULL &&
1738 					   !map->system_map) {
1739 
1740 					entry->object.vm_object =
1741 					    vm_object_allocate(OBJT_DEFAULT,
1742 						atop(entry->end - entry->start));
1743 					entry->offset = (vm_offset_t) 0;
1744 
1745 				}
1746 			}
1747 			entry->wired_count++;
1748 			entry->eflags |= MAP_ENTRY_USER_WIRED;
1749 
1750 			/*
1751 			 * Now fault in the area.  The map lock needs to be
1752 			 * manipulated to avoid deadlocks.  The in-transition
1753 			 * flag protects the entries.
1754 			 */
1755 			save_start = entry->start;
1756 			save_end = entry->end;
1757 			vm_map_unlock(map);
1758 			map->timestamp++;
1759 			rv = vm_fault_user_wire(map, save_start, save_end);
1760 			vm_map_lock(map);
1761 			if (rv) {
1762 				CLIP_CHECK_BACK(entry, save_start);
1763 				for (;;) {
1764 					KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
1765 					entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1766 					entry->wired_count = 0;
1767 					if (entry->end == save_end)
1768 						break;
1769 					entry = entry->next;
1770 					KASSERT(entry != &map->header, ("bad entry clip during backout"));
1771 				}
1772 				end = save_start;	/* unwire the rest */
1773 				break;
1774 			}
1775 			/*
1776 			 * note that even though the entry might have been
1777 			 * clipped, the USER_WIRED flag we set prevents
1778 			 * duplication so we do not have to do a
1779 			 * clip check.
1780 			 */
1781 			entry = entry->next;
1782 		}
1783 
1784 		/*
1785 		 * If we failed fall through to the unwiring section to
1786 		 * unwire what we had wired so far.  'end' has already
1787 		 * been adjusted.
1788 		 */
1789 		if (rv)
1790 			new_pageable = 1;
1791 
1792 		/*
1793 		 * start_entry might have been clipped if we unlocked the
1794 		 * map and blocked.  No matter how clipped it has gotten
1795 		 * there should be a fragment that is on our start boundary.
1796 		 */
1797 		CLIP_CHECK_BACK(start_entry, start);
1798 	}
1799 
1800 	/*
1801 	 * Deal with the unwiring case.
1802 	 */
1803 	if (new_pageable) {
1804 		/*
1805 		 * This is the unwiring case.  We must first ensure that the
1806 		 * range to be unwired is really wired down.  We know there
1807 		 * are no holes.
1808 		 */
1809 		entry = start_entry;
1810 		while ((entry != &map->header) && (entry->start < end)) {
1811 			if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1812 				rv = KERN_INVALID_ARGUMENT;
1813 				goto done;
1814 			}
1815 			KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
1816 			entry = entry->next;
1817 		}
1818 
1819 		/*
1820 		 * Now decrement the wiring count for each region. If a region
1821 		 * becomes completely unwired, unwire its physical pages and
1822 		 * mappings.
1823 		 */
1824 		/*
1825 		 * The map entries are processed in a loop, checking to
1826 		 * make sure the entry is wired and asserting it has a wired
1827 		 * count. However, another loop was inserted more-or-less in
1828 		 * the middle of the unwiring path. This loop picks up the
1829 		 * "entry" loop variable from the first loop without first
1830 		 * setting it to start_entry. Naturally, the secound loop
1831 		 * is never entered and the pages backing the entries are
1832 		 * never unwired. This can lead to a leak of wired pages.
1833 		 */
1834 		entry = start_entry;
1835 		while ((entry != &map->header) && (entry->start < end)) {
1836 			KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, ("expected USER_WIRED on entry %p", entry));
1837 			entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1838 			entry->wired_count--;
1839 			if (entry->wired_count == 0)
1840 				vm_fault_unwire(map, entry->start, entry->end);
1841 			entry = entry->next;
1842 		}
1843 	}
1844 done:
1845 	vm_map_unclip_range(map, start_entry, start, real_end, &count,
1846 		MAP_CLIP_NO_HOLES);
1847 	map->timestamp++;
1848 	vm_map_unlock(map);
1849 	vm_map_entry_release(count);
1850 	return (rv);
1851 }
1852 
1853 /*
1854  *	vm_map_wire:
1855  *
1856  *	Sets the pageability of the specified address
1857  *	range in the target map.  Regions specified
1858  *	as not pageable require locked-down physical
1859  *	memory and physical page maps.
1860  *
1861  *	The map must not be locked, but a reference
1862  *	must remain to the map throughout the call.
1863  *
1864  *	This function may be called via the zalloc path and must properly
1865  *	reserve map entries for kernel_map.
1866  */
1867 int
1868 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
1869 {
1870 	vm_map_entry_t entry;
1871 	vm_map_entry_t start_entry;
1872 	vm_offset_t end;
1873 	int rv = KERN_SUCCESS;
1874 	int count;
1875 	int s;
1876 
1877 	if (kmflags & KM_KRESERVE)
1878 		count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
1879 	else
1880 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1881 	vm_map_lock(map);
1882 	VM_MAP_RANGE_CHECK(map, start, real_end);
1883 	end = real_end;
1884 
1885 	start_entry = vm_map_clip_range(map, start, end, &count, MAP_CLIP_NO_HOLES);
1886 	if (start_entry == NULL) {
1887 		vm_map_unlock(map);
1888 		rv = KERN_INVALID_ADDRESS;
1889 		goto failure;
1890 	}
1891 	if ((kmflags & KM_PAGEABLE) == 0) {
1892 		/*
1893 		 * Wiring.
1894 		 *
1895 		 * 1.  Holding the write lock, we create any shadow or zero-fill
1896 		 * objects that need to be created. Then we clip each map
1897 		 * entry to the region to be wired and increment its wiring
1898 		 * count.  We create objects before clipping the map entries
1899 		 * to avoid object proliferation.
1900 		 *
1901 		 * 2.  We downgrade to a read lock, and call vm_fault_wire to
1902 		 * fault in the pages for any newly wired area (wired_count is
1903 		 * 1).
1904 		 *
1905 		 * Downgrading to a read lock for vm_fault_wire avoids a
1906 		 * possible deadlock with another process that may have faulted
1907 		 * on one of the pages to be wired (it would mark the page busy,
1908 		 * blocking us, then in turn block on the map lock that we
1909 		 * hold).  Because of problems in the recursive lock package,
1910 		 * we cannot upgrade to a write lock in vm_map_lookup.  Thus,
1911 		 * any actions that require the write lock must be done
1912 		 * beforehand.  Because we keep the read lock on the map, the
1913 		 * copy-on-write status of the entries we modify here cannot
1914 		 * change.
1915 		 */
1916 
1917 		entry = start_entry;
1918 		while ((entry != &map->header) && (entry->start < end)) {
1919 			/*
1920 			 * Trivial case if the entry is already wired
1921 			 */
1922 			if (entry->wired_count) {
1923 				entry->wired_count++;
1924 				entry = entry->next;
1925 				continue;
1926 			}
1927 
1928 			/*
1929 			 * The entry is being newly wired, we have to setup
1930 			 * appropriate management structures.  A shadow
1931 			 * object is required for a copy-on-write region,
1932 			 * or a normal object for a zero-fill region.  We
1933 			 * do not have to do this for entries that point to sub
1934 			 * maps because we won't hold the lock on the sub map.
1935 			 */
1936 			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1937 				int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1938 				if (copyflag &&
1939 				    ((entry->protection & VM_PROT_WRITE) != 0)) {
1940 
1941 					vm_object_shadow(&entry->object.vm_object,
1942 					    &entry->offset,
1943 					    atop(entry->end - entry->start));
1944 					entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1945 				} else if (entry->object.vm_object == NULL &&
1946 					   !map->system_map) {
1947 					entry->object.vm_object =
1948 					    vm_object_allocate(OBJT_DEFAULT,
1949 						atop(entry->end - entry->start));
1950 					entry->offset = (vm_offset_t) 0;
1951 				}
1952 			}
1953 
1954 			entry->wired_count++;
1955 			entry = entry->next;
1956 		}
1957 
1958 		/*
1959 		 * Pass 2.
1960 		 */
1961 
1962 		/*
1963 		 * HACK HACK HACK HACK
1964 		 *
1965 		 * Unlock the map to avoid deadlocks.  The in-transit flag
1966 		 * protects us from most changes but note that
1967 		 * clipping may still occur.  To prevent clipping from
1968 		 * occuring after the unlock, except for when we are
1969 		 * blocking in vm_fault_wire, we must run at splvm().
1970 		 * Otherwise our accesses to entry->start and entry->end
1971 		 * could be corrupted.  We have to set splvm() prior to
1972 		 * unlocking so start_entry does not change out from
1973 		 * under us at the very beginning of the loop.
1974 		 *
1975 		 * HACK HACK HACK HACK
1976 		 */
1977 
1978 		s = splvm();
1979 		vm_map_unlock(map);
1980 
1981 		entry = start_entry;
1982 		while (entry != &map->header && entry->start < end) {
1983 			/*
1984 			 * If vm_fault_wire fails for any page we need to undo
1985 			 * what has been done.  We decrement the wiring count
1986 			 * for those pages which have not yet been wired (now)
1987 			 * and unwire those that have (later).
1988 			 */
1989 			vm_offset_t save_start = entry->start;
1990 			vm_offset_t save_end = entry->end;
1991 
1992 			if (entry->wired_count == 1)
1993 				rv = vm_fault_wire(map, entry->start, entry->end);
1994 			if (rv) {
1995 				CLIP_CHECK_BACK(entry, save_start);
1996 				for (;;) {
1997 					KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
1998 					entry->wired_count = 0;
1999 					if (entry->end == save_end)
2000 						break;
2001 					entry = entry->next;
2002 					KASSERT(entry != &map->header, ("bad entry clip during backout"));
2003 				}
2004 				end = save_start;
2005 				break;
2006 			}
2007 			CLIP_CHECK_FWD(entry, save_end);
2008 			entry = entry->next;
2009 		}
2010 		splx(s);
2011 
2012 		/*
2013 		 * relock.  start_entry is still IN_TRANSITION and must
2014 		 * still exist, but may have been clipped (handled just
2015 		 * below).
2016 		 */
2017 		vm_map_lock(map);
2018 
2019 		/*
2020 		 * If a failure occured undo everything by falling through
2021 		 * to the unwiring code.  'end' has already been adjusted
2022 		 * appropriately.
2023 		 */
2024 		if (rv)
2025 			kmflags |= KM_PAGEABLE;
2026 
2027 		/*
2028 		 * start_entry might have been clipped if we unlocked the
2029 		 * map and blocked.  No matter how clipped it has gotten
2030 		 * there should be a fragment that is on our start boundary.
2031 		 */
2032 		CLIP_CHECK_BACK(start_entry, start);
2033 	}
2034 
2035 	if (kmflags & KM_PAGEABLE) {
2036 		/*
2037 		 * This is the unwiring case.  We must first ensure that the
2038 		 * range to be unwired is really wired down.  We know there
2039 		 * are no holes.
2040 		 */
2041 		entry = start_entry;
2042 		while ((entry != &map->header) && (entry->start < end)) {
2043 			if (entry->wired_count == 0) {
2044 				rv = KERN_INVALID_ARGUMENT;
2045 				goto done;
2046 			}
2047 			entry = entry->next;
2048 		}
2049 
2050 		/*
2051 		 * Now decrement the wiring count for each region. If a region
2052 		 * becomes completely unwired, unwire its physical pages and
2053 		 * mappings.
2054 		 */
2055 		entry = start_entry;
2056 		while ((entry != &map->header) && (entry->start < end)) {
2057 			entry->wired_count--;
2058 			if (entry->wired_count == 0)
2059 				vm_fault_unwire(map, entry->start, entry->end);
2060 			entry = entry->next;
2061 		}
2062 	}
2063 done:
2064 	vm_map_unclip_range(map, start_entry, start, real_end, &count,
2065 		MAP_CLIP_NO_HOLES);
2066 	map->timestamp++;
2067 	vm_map_unlock(map);
2068 failure:
2069 	if (kmflags & KM_KRESERVE)
2070 		vm_map_entry_krelease(count);
2071 	else
2072 		vm_map_entry_release(count);
2073 	return (rv);
2074 }
2075 
2076 /*
2077  * vm_map_set_wired_quick()
2078  *
2079  *	Mark a newly allocated address range as wired but do not fault in
2080  *	the pages.  The caller is expected to load the pages into the object.
2081  *
2082  *	The map must be locked on entry and will remain locked on return.
2083  */
2084 void
2085 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *countp)
2086 {
2087 	vm_map_entry_t scan;
2088 	vm_map_entry_t entry;
2089 
2090 	entry = vm_map_clip_range(map, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2091 	for (scan = entry; scan != &map->header && scan->start < addr + size; scan = scan->next) {
2092 	    KKASSERT(entry->wired_count == 0);
2093 	    entry->wired_count = 1;
2094 	}
2095 	vm_map_unclip_range(map, entry, addr, addr + size, countp, MAP_CLIP_NO_HOLES);
2096 }
2097 
2098 /*
2099  * vm_map_clean
2100  *
2101  * Push any dirty cached pages in the address range to their pager.
2102  * If syncio is TRUE, dirty pages are written synchronously.
2103  * If invalidate is TRUE, any cached pages are freed as well.
2104  *
2105  * Returns an error if any part of the specified range is not mapped.
2106  */
2107 int
2108 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t syncio,
2109     boolean_t invalidate)
2110 {
2111 	vm_map_entry_t current;
2112 	vm_map_entry_t entry;
2113 	vm_size_t size;
2114 	vm_object_t object;
2115 	vm_ooffset_t offset;
2116 
2117 	vm_map_lock_read(map);
2118 	VM_MAP_RANGE_CHECK(map, start, end);
2119 	if (!vm_map_lookup_entry(map, start, &entry)) {
2120 		vm_map_unlock_read(map);
2121 		return (KERN_INVALID_ADDRESS);
2122 	}
2123 	/*
2124 	 * Make a first pass to check for holes.
2125 	 */
2126 	for (current = entry; current->start < end; current = current->next) {
2127 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2128 			vm_map_unlock_read(map);
2129 			return (KERN_INVALID_ARGUMENT);
2130 		}
2131 		if (end > current->end &&
2132 		    (current->next == &map->header ||
2133 			current->end != current->next->start)) {
2134 			vm_map_unlock_read(map);
2135 			return (KERN_INVALID_ADDRESS);
2136 		}
2137 	}
2138 
2139 	if (invalidate)
2140 		pmap_remove(vm_map_pmap(map), start, end);
2141 	/*
2142 	 * Make a second pass, cleaning/uncaching pages from the indicated
2143 	 * objects as we go.
2144 	 */
2145 	for (current = entry; current->start < end; current = current->next) {
2146 		offset = current->offset + (start - current->start);
2147 		size = (end <= current->end ? end : current->end) - start;
2148 		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2149 			vm_map_t smap;
2150 			vm_map_entry_t tentry;
2151 			vm_size_t tsize;
2152 
2153 			smap = current->object.sub_map;
2154 			vm_map_lock_read(smap);
2155 			(void) vm_map_lookup_entry(smap, offset, &tentry);
2156 			tsize = tentry->end - offset;
2157 			if (tsize < size)
2158 				size = tsize;
2159 			object = tentry->object.vm_object;
2160 			offset = tentry->offset + (offset - tentry->start);
2161 			vm_map_unlock_read(smap);
2162 		} else {
2163 			object = current->object.vm_object;
2164 		}
2165 		/*
2166 		 * Note that there is absolutely no sense in writing out
2167 		 * anonymous objects, so we track down the vnode object
2168 		 * to write out.
2169 		 * We invalidate (remove) all pages from the address space
2170 		 * anyway, for semantic correctness.
2171 		 *
2172 		 * note: certain anonymous maps, such as MAP_NOSYNC maps,
2173 		 * may start out with a NULL object.
2174 		 */
2175 		while (object && object->backing_object) {
2176 			object = object->backing_object;
2177 			offset += object->backing_object_offset;
2178 			if (object->size < OFF_TO_IDX( offset + size))
2179 				size = IDX_TO_OFF(object->size) - offset;
2180 		}
2181 		if (object && (object->type == OBJT_VNODE) &&
2182 		    (current->protection & VM_PROT_WRITE)) {
2183 			/*
2184 			 * Flush pages if writing is allowed, invalidate them
2185 			 * if invalidation requested.  Pages undergoing I/O
2186 			 * will be ignored by vm_object_page_remove().
2187 			 *
2188 			 * We cannot lock the vnode and then wait for paging
2189 			 * to complete without deadlocking against vm_fault.
2190 			 * Instead we simply call vm_object_page_remove() and
2191 			 * allow it to block internally on a page-by-page
2192 			 * basis when it encounters pages undergoing async
2193 			 * I/O.
2194 			 */
2195 			int flags;
2196 
2197 			vm_object_reference(object);
2198 			vn_lock(object->handle, NULL,
2199 				LK_EXCLUSIVE | LK_RETRY, curthread);
2200 			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2201 			flags |= invalidate ? OBJPC_INVAL : 0;
2202 			vm_object_page_clean(object,
2203 			    OFF_TO_IDX(offset),
2204 			    OFF_TO_IDX(offset + size + PAGE_MASK),
2205 			    flags);
2206 			VOP_UNLOCK(object->handle, NULL, 0, curthread);
2207 			vm_object_deallocate(object);
2208 		}
2209 		if (object && invalidate &&
2210 		   ((object->type == OBJT_VNODE) ||
2211 		    (object->type == OBJT_DEVICE))) {
2212 			vm_object_reference(object);
2213 			vm_object_page_remove(object,
2214 			    OFF_TO_IDX(offset),
2215 			    OFF_TO_IDX(offset + size + PAGE_MASK),
2216 			    TRUE);
2217 			vm_object_deallocate(object);
2218 		}
2219 		start += size;
2220 	}
2221 
2222 	vm_map_unlock_read(map);
2223 	return (KERN_SUCCESS);
2224 }
2225 
2226 /*
2227  *	vm_map_entry_unwire:	[ internal use only ]
2228  *
2229  *	Make the region specified by this entry pageable.
2230  *
2231  *	The map in question should be locked.
2232  *	[This is the reason for this routine's existence.]
2233  */
2234 static void
2235 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2236 {
2237 	vm_fault_unwire(map, entry->start, entry->end);
2238 	entry->wired_count = 0;
2239 }
2240 
2241 /*
2242  *	vm_map_entry_delete:	[ internal use only ]
2243  *
2244  *	Deallocate the given entry from the target map.
2245  */
2246 static void
2247 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2248 {
2249 	vm_map_entry_unlink(map, entry);
2250 	map->size -= entry->end - entry->start;
2251 
2252 	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2253 		vm_object_deallocate(entry->object.vm_object);
2254 	}
2255 
2256 	vm_map_entry_dispose(map, entry, countp);
2257 }
2258 
2259 /*
2260  *	vm_map_delete:	[ internal use only ]
2261  *
2262  *	Deallocates the given address range from the target
2263  *	map.
2264  */
2265 int
2266 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2267 {
2268 	vm_object_t object;
2269 	vm_map_entry_t entry;
2270 	vm_map_entry_t first_entry;
2271 
2272 	/*
2273 	 * Find the start of the region, and clip it
2274 	 */
2275 
2276 again:
2277 	if (!vm_map_lookup_entry(map, start, &first_entry))
2278 		entry = first_entry->next;
2279 	else {
2280 		entry = first_entry;
2281 		vm_map_clip_start(map, entry, start, countp);
2282 		/*
2283 		 * Fix the lookup hint now, rather than each time though the
2284 		 * loop.
2285 		 */
2286 		SAVE_HINT(map, entry->prev);
2287 	}
2288 
2289 	/*
2290 	 * Save the free space hint
2291 	 */
2292 
2293 	if (entry == &map->header) {
2294 		map->first_free = &map->header;
2295 	} else if (map->first_free->start >= start) {
2296 		map->first_free = entry->prev;
2297 	}
2298 
2299 	/*
2300 	 * Step through all entries in this region
2301 	 */
2302 
2303 	while ((entry != &map->header) && (entry->start < end)) {
2304 		vm_map_entry_t next;
2305 		vm_offset_t s, e;
2306 		vm_pindex_t offidxstart, offidxend, count;
2307 
2308 		/*
2309 		 * If we hit an in-transition entry we have to sleep and
2310 		 * retry.  It's easier (and not really slower) to just retry
2311 		 * since this case occurs so rarely and the hint is already
2312 		 * pointing at the right place.  We have to reset the
2313 		 * start offset so as not to accidently delete an entry
2314 		 * another process just created in vacated space.
2315 		 */
2316 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2317 			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2318 			start = entry->start;
2319 			++mycpu->gd_cnt.v_intrans_coll;
2320 			++mycpu->gd_cnt.v_intrans_wait;
2321 			vm_map_transition_wait(map);
2322 			goto again;
2323 		}
2324 		vm_map_clip_end(map, entry, end, countp);
2325 
2326 		s = entry->start;
2327 		e = entry->end;
2328 		next = entry->next;
2329 
2330 		offidxstart = OFF_TO_IDX(entry->offset);
2331 		count = OFF_TO_IDX(e - s);
2332 		object = entry->object.vm_object;
2333 
2334 		/*
2335 		 * Unwire before removing addresses from the pmap; otherwise,
2336 		 * unwiring will put the entries back in the pmap.
2337 		 */
2338 		if (entry->wired_count != 0) {
2339 			vm_map_entry_unwire(map, entry);
2340 		}
2341 
2342 		offidxend = offidxstart + count;
2343 
2344 		if ((object == kernel_object) || (object == kmem_object)) {
2345 			vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2346 		} else {
2347 			pmap_remove(map->pmap, s, e);
2348 			if (object != NULL &&
2349 			    object->ref_count != 1 &&
2350 			    (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2351 			    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2352 				vm_object_collapse(object);
2353 				vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2354 				if (object->type == OBJT_SWAP) {
2355 					swap_pager_freespace(object, offidxstart, count);
2356 				}
2357 				if (offidxend >= object->size &&
2358 				    offidxstart < object->size) {
2359 					object->size = offidxstart;
2360 				}
2361 			}
2362 		}
2363 
2364 		/*
2365 		 * Delete the entry (which may delete the object) only after
2366 		 * removing all pmap entries pointing to its pages.
2367 		 * (Otherwise, its page frames may be reallocated, and any
2368 		 * modify bits will be set in the wrong object!)
2369 		 */
2370 		vm_map_entry_delete(map, entry, countp);
2371 		entry = next;
2372 	}
2373 	return (KERN_SUCCESS);
2374 }
2375 
2376 /*
2377  *	vm_map_remove:
2378  *
2379  *	Remove the given address range from the target map.
2380  *	This is the exported form of vm_map_delete.
2381  */
2382 int
2383 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2384 {
2385 	int result;
2386 	int count;
2387 
2388 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2389 	vm_map_lock(map);
2390 	VM_MAP_RANGE_CHECK(map, start, end);
2391 	result = vm_map_delete(map, start, end, &count);
2392 	vm_map_unlock(map);
2393 	vm_map_entry_release(count);
2394 
2395 	return (result);
2396 }
2397 
2398 /*
2399  *	vm_map_check_protection:
2400  *
2401  *	Assert that the target map allows the specified
2402  *	privilege on the entire address region given.
2403  *	The entire region must be allocated.
2404  */
2405 boolean_t
2406 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2407 			vm_prot_t protection)
2408 {
2409 	vm_map_entry_t entry;
2410 	vm_map_entry_t tmp_entry;
2411 
2412 	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2413 		return (FALSE);
2414 	}
2415 	entry = tmp_entry;
2416 
2417 	while (start < end) {
2418 		if (entry == &map->header) {
2419 			return (FALSE);
2420 		}
2421 		/*
2422 		 * No holes allowed!
2423 		 */
2424 
2425 		if (start < entry->start) {
2426 			return (FALSE);
2427 		}
2428 		/*
2429 		 * Check protection associated with entry.
2430 		 */
2431 
2432 		if ((entry->protection & protection) != protection) {
2433 			return (FALSE);
2434 		}
2435 		/* go to next entry */
2436 
2437 		start = entry->end;
2438 		entry = entry->next;
2439 	}
2440 	return (TRUE);
2441 }
2442 
2443 /*
2444  * Split the pages in a map entry into a new object.  This affords
2445  * easier removal of unused pages, and keeps object inheritance from
2446  * being a negative impact on memory usage.
2447  */
2448 static void
2449 vm_map_split(vm_map_entry_t entry)
2450 {
2451 	vm_page_t m;
2452 	vm_object_t orig_object, new_object, source;
2453 	vm_offset_t s, e;
2454 	vm_pindex_t offidxstart, offidxend, idx;
2455 	vm_size_t size;
2456 	vm_ooffset_t offset;
2457 
2458 	orig_object = entry->object.vm_object;
2459 	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
2460 		return;
2461 	if (orig_object->ref_count <= 1)
2462 		return;
2463 
2464 	offset = entry->offset;
2465 	s = entry->start;
2466 	e = entry->end;
2467 
2468 	offidxstart = OFF_TO_IDX(offset);
2469 	offidxend = offidxstart + OFF_TO_IDX(e - s);
2470 	size = offidxend - offidxstart;
2471 
2472 	new_object = vm_pager_allocate(orig_object->type,
2473 		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
2474 	if (new_object == NULL)
2475 		return;
2476 
2477 	source = orig_object->backing_object;
2478 	if (source != NULL) {
2479 		vm_object_reference(source);	/* Referenced by new_object */
2480 		LIST_INSERT_HEAD(&source->shadow_head,
2481 				  new_object, shadow_list);
2482 		vm_object_clear_flag(source, OBJ_ONEMAPPING);
2483 		new_object->backing_object_offset =
2484 			orig_object->backing_object_offset + IDX_TO_OFF(offidxstart);
2485 		new_object->backing_object = source;
2486 		source->shadow_count++;
2487 		source->generation++;
2488 	}
2489 
2490 	for (idx = 0; idx < size; idx++) {
2491 		vm_page_t m;
2492 		int ss;		/* s used */
2493 
2494 		/*
2495 		 * splvm protection is required to avoid a race between
2496 		 * the lookup and an interrupt/unbusy/free and our busy
2497 		 * check.
2498 		 */
2499 		ss = splvm();
2500 	retry:
2501 		m = vm_page_lookup(orig_object, offidxstart + idx);
2502 		if (m == NULL) {
2503 			splx(ss);
2504 			continue;
2505 		}
2506 
2507 		/*
2508 		 * We must wait for pending I/O to complete before we can
2509 		 * rename the page.
2510 		 *
2511 		 * We do not have to VM_PROT_NONE the page as mappings should
2512 		 * not be changed by this operation.
2513 		 */
2514 		if (vm_page_sleep_busy(m, TRUE, "spltwt"))
2515 			goto retry;
2516 		vm_page_busy(m);
2517 		vm_page_rename(m, new_object, idx);
2518 		/* page automatically made dirty by rename and cache handled */
2519 		vm_page_busy(m);
2520 		splx(ss);
2521 	}
2522 
2523 	if (orig_object->type == OBJT_SWAP) {
2524 		vm_object_pip_add(orig_object, 1);
2525 		/*
2526 		 * copy orig_object pages into new_object
2527 		 * and destroy unneeded pages in
2528 		 * shadow object.
2529 		 */
2530 		swap_pager_copy(orig_object, new_object, offidxstart, 0);
2531 		vm_object_pip_wakeup(orig_object);
2532 	}
2533 
2534 	/*
2535 	 * Wakeup the pages we played with.  No spl protection is needed
2536 	 * for a simple wakeup.
2537 	 */
2538 	for (idx = 0; idx < size; idx++) {
2539 		m = vm_page_lookup(new_object, idx);
2540 		if (m)
2541 			vm_page_wakeup(m);
2542 	}
2543 
2544 	entry->object.vm_object = new_object;
2545 	entry->offset = 0LL;
2546 	vm_object_deallocate(orig_object);
2547 }
2548 
2549 /*
2550  *	vm_map_copy_entry:
2551  *
2552  *	Copies the contents of the source entry to the destination
2553  *	entry.  The entries *must* be aligned properly.
2554  */
2555 static void
2556 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
2557 	vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
2558 {
2559 	vm_object_t src_object;
2560 
2561 	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2562 		return;
2563 
2564 	if (src_entry->wired_count == 0) {
2565 
2566 		/*
2567 		 * If the source entry is marked needs_copy, it is already
2568 		 * write-protected.
2569 		 */
2570 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2571 			pmap_protect(src_map->pmap,
2572 			    src_entry->start,
2573 			    src_entry->end,
2574 			    src_entry->protection & ~VM_PROT_WRITE);
2575 		}
2576 
2577 		/*
2578 		 * Make a copy of the object.
2579 		 */
2580 		if ((src_object = src_entry->object.vm_object) != NULL) {
2581 
2582 			if ((src_object->handle == NULL) &&
2583 				(src_object->type == OBJT_DEFAULT ||
2584 				 src_object->type == OBJT_SWAP)) {
2585 				vm_object_collapse(src_object);
2586 				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2587 					vm_map_split(src_entry);
2588 					src_object = src_entry->object.vm_object;
2589 				}
2590 			}
2591 
2592 			vm_object_reference(src_object);
2593 			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2594 			dst_entry->object.vm_object = src_object;
2595 			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2596 			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2597 			dst_entry->offset = src_entry->offset;
2598 		} else {
2599 			dst_entry->object.vm_object = NULL;
2600 			dst_entry->offset = 0;
2601 		}
2602 
2603 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2604 		    dst_entry->end - dst_entry->start, src_entry->start);
2605 	} else {
2606 		/*
2607 		 * Of course, wired down pages can't be set copy-on-write.
2608 		 * Cause wired pages to be copied into the new map by
2609 		 * simulating faults (the new pages are pageable)
2610 		 */
2611 		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2612 	}
2613 }
2614 
2615 /*
2616  * vmspace_fork:
2617  * Create a new process vmspace structure and vm_map
2618  * based on those of an existing process.  The new map
2619  * is based on the old map, according to the inheritance
2620  * values on the regions in that map.
2621  *
2622  * The source map must not be locked.
2623  */
2624 struct vmspace *
2625 vmspace_fork(struct vmspace *vm1)
2626 {
2627 	struct vmspace *vm2;
2628 	vm_map_t old_map = &vm1->vm_map;
2629 	vm_map_t new_map;
2630 	vm_map_entry_t old_entry;
2631 	vm_map_entry_t new_entry;
2632 	vm_object_t object;
2633 	int count;
2634 
2635 	vm_map_lock(old_map);
2636 	old_map->infork = 1;
2637 
2638 	/*
2639 	 * XXX Note: upcalls are not copied.
2640 	 */
2641 	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2642 	bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2643 	    (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
2644 	new_map = &vm2->vm_map;	/* XXX */
2645 	new_map->timestamp = 1;
2646 
2647 	count = 0;
2648 	old_entry = old_map->header.next;
2649 	while (old_entry != &old_map->header) {
2650 		++count;
2651 		old_entry = old_entry->next;
2652 	}
2653 
2654 	count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
2655 
2656 	old_entry = old_map->header.next;
2657 	while (old_entry != &old_map->header) {
2658 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2659 			panic("vm_map_fork: encountered a submap");
2660 
2661 		switch (old_entry->inheritance) {
2662 		case VM_INHERIT_NONE:
2663 			break;
2664 
2665 		case VM_INHERIT_SHARE:
2666 			/*
2667 			 * Clone the entry, creating the shared object if necessary.
2668 			 */
2669 			object = old_entry->object.vm_object;
2670 			if (object == NULL) {
2671 				object = vm_object_allocate(OBJT_DEFAULT,
2672 					atop(old_entry->end - old_entry->start));
2673 				old_entry->object.vm_object = object;
2674 				old_entry->offset = (vm_offset_t) 0;
2675 			}
2676 
2677 			/*
2678 			 * Add the reference before calling vm_object_shadow
2679 			 * to insure that a shadow object is created.
2680 			 */
2681 			vm_object_reference(object);
2682 			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2683 				vm_object_shadow(&old_entry->object.vm_object,
2684 					&old_entry->offset,
2685 					atop(old_entry->end - old_entry->start));
2686 				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2687 				/* Transfer the second reference too. */
2688 				vm_object_reference(
2689 				    old_entry->object.vm_object);
2690 				vm_object_deallocate(object);
2691 				object = old_entry->object.vm_object;
2692 			}
2693 			vm_object_clear_flag(object, OBJ_ONEMAPPING);
2694 
2695 			/*
2696 			 * Clone the entry, referencing the shared object.
2697 			 */
2698 			new_entry = vm_map_entry_create(new_map, &count);
2699 			*new_entry = *old_entry;
2700 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2701 			new_entry->wired_count = 0;
2702 
2703 			/*
2704 			 * Insert the entry into the new map -- we know we're
2705 			 * inserting at the end of the new map.
2706 			 */
2707 
2708 			vm_map_entry_link(new_map, new_map->header.prev,
2709 			    new_entry);
2710 
2711 			/*
2712 			 * Update the physical map
2713 			 */
2714 
2715 			pmap_copy(new_map->pmap, old_map->pmap,
2716 			    new_entry->start,
2717 			    (old_entry->end - old_entry->start),
2718 			    old_entry->start);
2719 			break;
2720 
2721 		case VM_INHERIT_COPY:
2722 			/*
2723 			 * Clone the entry and link into the map.
2724 			 */
2725 			new_entry = vm_map_entry_create(new_map, &count);
2726 			*new_entry = *old_entry;
2727 			new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2728 			new_entry->wired_count = 0;
2729 			new_entry->object.vm_object = NULL;
2730 			vm_map_entry_link(new_map, new_map->header.prev,
2731 			    new_entry);
2732 			vm_map_copy_entry(old_map, new_map, old_entry,
2733 			    new_entry);
2734 			break;
2735 		}
2736 		old_entry = old_entry->next;
2737 	}
2738 
2739 	new_map->size = old_map->size;
2740 	old_map->infork = 0;
2741 	vm_map_unlock(old_map);
2742 	vm_map_entry_release(count);
2743 
2744 	return (vm2);
2745 }
2746 
2747 int
2748 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2749 	      vm_prot_t prot, vm_prot_t max, int cow)
2750 {
2751 	vm_map_entry_t prev_entry;
2752 	vm_map_entry_t new_stack_entry;
2753 	vm_size_t      init_ssize;
2754 	int            rv;
2755 	int		count;
2756 
2757 	if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
2758 		return (KERN_NO_SPACE);
2759 
2760 	if (max_ssize < sgrowsiz)
2761 		init_ssize = max_ssize;
2762 	else
2763 		init_ssize = sgrowsiz;
2764 
2765 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2766 	vm_map_lock(map);
2767 
2768 	/* If addr is already mapped, no go */
2769 	if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2770 		vm_map_unlock(map);
2771 		vm_map_entry_release(count);
2772 		return (KERN_NO_SPACE);
2773 	}
2774 
2775 	/* If we would blow our VMEM resource limit, no go */
2776 	if (map->size + init_ssize >
2777 	    curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2778 		vm_map_unlock(map);
2779 		vm_map_entry_release(count);
2780 		return (KERN_NO_SPACE);
2781 	}
2782 
2783 	/* If we can't accomodate max_ssize in the current mapping,
2784 	 * no go.  However, we need to be aware that subsequent user
2785 	 * mappings might map into the space we have reserved for
2786 	 * stack, and currently this space is not protected.
2787 	 *
2788 	 * Hopefully we will at least detect this condition
2789 	 * when we try to grow the stack.
2790 	 */
2791 	if ((prev_entry->next != &map->header) &&
2792 	    (prev_entry->next->start < addrbos + max_ssize)) {
2793 		vm_map_unlock(map);
2794 		vm_map_entry_release(count);
2795 		return (KERN_NO_SPACE);
2796 	}
2797 
2798 	/* We initially map a stack of only init_ssize.  We will
2799 	 * grow as needed later.  Since this is to be a grow
2800 	 * down stack, we map at the top of the range.
2801 	 *
2802 	 * Note: we would normally expect prot and max to be
2803 	 * VM_PROT_ALL, and cow to be 0.  Possibly we should
2804 	 * eliminate these as input parameters, and just
2805 	 * pass these values here in the insert call.
2806 	 */
2807 	rv = vm_map_insert(map, &count,
2808 			   NULL, 0, addrbos + max_ssize - init_ssize,
2809 	                   addrbos + max_ssize, prot, max, cow);
2810 
2811 	/* Now set the avail_ssize amount */
2812 	if (rv == KERN_SUCCESS){
2813 		if (prev_entry != &map->header)
2814 			vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
2815 		new_stack_entry = prev_entry->next;
2816 		if (new_stack_entry->end   != addrbos + max_ssize ||
2817 		    new_stack_entry->start != addrbos + max_ssize - init_ssize)
2818 			panic ("Bad entry start/end for new stack entry");
2819 		else
2820 			new_stack_entry->avail_ssize = max_ssize - init_ssize;
2821 	}
2822 
2823 	vm_map_unlock(map);
2824 	vm_map_entry_release(count);
2825 	return (rv);
2826 }
2827 
2828 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2829  * desired address is already mapped, or if we successfully grow
2830  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2831  * stack range (this is strange, but preserves compatibility with
2832  * the grow function in vm_machdep.c).
2833  */
2834 int
2835 vm_map_growstack (struct proc *p, vm_offset_t addr)
2836 {
2837 	vm_map_entry_t prev_entry;
2838 	vm_map_entry_t stack_entry;
2839 	vm_map_entry_t new_stack_entry;
2840 	struct vmspace *vm = p->p_vmspace;
2841 	vm_map_t map = &vm->vm_map;
2842 	vm_offset_t    end;
2843 	int grow_amount;
2844 	int rv = KERN_SUCCESS;
2845 	int is_procstack;
2846 	int use_read_lock = 1;
2847 	int count;
2848 
2849 	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2850 Retry:
2851 	if (use_read_lock)
2852 		vm_map_lock_read(map);
2853 	else
2854 		vm_map_lock(map);
2855 
2856 	/* If addr is already in the entry range, no need to grow.*/
2857 	if (vm_map_lookup_entry(map, addr, &prev_entry))
2858 		goto done;
2859 
2860 	if ((stack_entry = prev_entry->next) == &map->header)
2861 		goto done;
2862 	if (prev_entry == &map->header)
2863 		end = stack_entry->start - stack_entry->avail_ssize;
2864 	else
2865 		end = prev_entry->end;
2866 
2867 	/* This next test mimics the old grow function in vm_machdep.c.
2868 	 * It really doesn't quite make sense, but we do it anyway
2869 	 * for compatibility.
2870 	 *
2871 	 * If not growable stack, return success.  This signals the
2872 	 * caller to proceed as he would normally with normal vm.
2873 	 */
2874 	if (stack_entry->avail_ssize < 1 ||
2875 	    addr >= stack_entry->start ||
2876 	    addr <  stack_entry->start - stack_entry->avail_ssize) {
2877 		goto done;
2878 	}
2879 
2880 	/* Find the minimum grow amount */
2881 	grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
2882 	if (grow_amount > stack_entry->avail_ssize) {
2883 		rv = KERN_NO_SPACE;
2884 		goto done;
2885 	}
2886 
2887 	/* If there is no longer enough space between the entries
2888 	 * nogo, and adjust the available space.  Note: this
2889 	 * should only happen if the user has mapped into the
2890 	 * stack area after the stack was created, and is
2891 	 * probably an error.
2892 	 *
2893 	 * This also effectively destroys any guard page the user
2894 	 * might have intended by limiting the stack size.
2895 	 */
2896 	if (grow_amount > stack_entry->start - end) {
2897 		if (use_read_lock && vm_map_lock_upgrade(map)) {
2898 			use_read_lock = 0;
2899 			goto Retry;
2900 		}
2901 		use_read_lock = 0;
2902 		stack_entry->avail_ssize = stack_entry->start - end;
2903 		rv = KERN_NO_SPACE;
2904 		goto done;
2905 	}
2906 
2907 	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
2908 
2909 	/* If this is the main process stack, see if we're over the
2910 	 * stack limit.
2911 	 */
2912 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2913 			     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2914 		rv = KERN_NO_SPACE;
2915 		goto done;
2916 	}
2917 
2918 	/* Round up the grow amount modulo SGROWSIZ */
2919 	grow_amount = roundup (grow_amount, sgrowsiz);
2920 	if (grow_amount > stack_entry->avail_ssize) {
2921 		grow_amount = stack_entry->avail_ssize;
2922 	}
2923 	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2924 	                     p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2925 		grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2926 		              ctob(vm->vm_ssize);
2927 	}
2928 
2929 	/* If we would blow our VMEM resource limit, no go */
2930 	if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2931 		rv = KERN_NO_SPACE;
2932 		goto done;
2933 	}
2934 
2935 	if (use_read_lock && vm_map_lock_upgrade(map)) {
2936 		use_read_lock = 0;
2937 		goto Retry;
2938 	}
2939 	use_read_lock = 0;
2940 
2941 	/* Get the preliminary new entry start value */
2942 	addr = stack_entry->start - grow_amount;
2943 
2944 	/* If this puts us into the previous entry, cut back our growth
2945 	 * to the available space.  Also, see the note above.
2946 	 */
2947 	if (addr < end) {
2948 		stack_entry->avail_ssize = stack_entry->start - end;
2949 		addr = end;
2950 	}
2951 
2952 	rv = vm_map_insert(map, &count,
2953 			   NULL, 0, addr, stack_entry->start,
2954 			   VM_PROT_ALL,
2955 			   VM_PROT_ALL,
2956 			   0);
2957 
2958 	/* Adjust the available stack space by the amount we grew. */
2959 	if (rv == KERN_SUCCESS) {
2960 		if (prev_entry != &map->header)
2961 			vm_map_clip_end(map, prev_entry, addr, &count);
2962 		new_stack_entry = prev_entry->next;
2963 		if (new_stack_entry->end   != stack_entry->start  ||
2964 		    new_stack_entry->start != addr)
2965 			panic ("Bad stack grow start/end in new stack entry");
2966 		else {
2967 			new_stack_entry->avail_ssize = stack_entry->avail_ssize -
2968 							(new_stack_entry->end -
2969 							 new_stack_entry->start);
2970 			if (is_procstack)
2971 				vm->vm_ssize += btoc(new_stack_entry->end -
2972 						     new_stack_entry->start);
2973 		}
2974 	}
2975 
2976 done:
2977 	if (use_read_lock)
2978 		vm_map_unlock_read(map);
2979 	else
2980 		vm_map_unlock(map);
2981 	vm_map_entry_release(count);
2982 	return (rv);
2983 }
2984 
2985 /*
2986  * Unshare the specified VM space for exec.  If other processes are
2987  * mapped to it, then create a new one.  The new vmspace is null.
2988  */
2989 
2990 void
2991 vmspace_exec(struct proc *p, struct vmspace *vmcopy)
2992 {
2993 	struct vmspace *oldvmspace = p->p_vmspace;
2994 	struct vmspace *newvmspace;
2995 	vm_map_t map = &p->p_vmspace->vm_map;
2996 
2997 	/*
2998 	 * If we are execing a resident vmspace we fork it, otherwise
2999 	 * we create a new vmspace.  Note that exitingcnt and upcalls
3000 	 * are not copied to the new vmspace.
3001 	 */
3002 	if (vmcopy)  {
3003 	    newvmspace = vmspace_fork(vmcopy);
3004 	} else {
3005 	    newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3006 	    bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3007 		(caddr_t)&oldvmspace->vm_endcopy -
3008 		    (caddr_t)&oldvmspace->vm_startcopy);
3009 	}
3010 
3011 	/*
3012 	 * This code is written like this for prototype purposes.  The
3013 	 * goal is to avoid running down the vmspace here, but let the
3014 	 * other process's that are still using the vmspace to finally
3015 	 * run it down.  Even though there is little or no chance of blocking
3016 	 * here, it is a good idea to keep this form for future mods.
3017 	 */
3018 	p->p_vmspace = newvmspace;
3019 	pmap_pinit2(vmspace_pmap(newvmspace));
3020 	if (p == curproc)
3021 		pmap_activate(p);
3022 	vmspace_free(oldvmspace);
3023 }
3024 
3025 /*
3026  * Unshare the specified VM space for forcing COW.  This
3027  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3028  *
3029  * The exitingcnt test is not strictly necessary but has been
3030  * included for code sanity (to make the code a bit more deterministic).
3031  */
3032 
3033 void
3034 vmspace_unshare(struct proc *p)
3035 {
3036 	struct vmspace *oldvmspace = p->p_vmspace;
3037 	struct vmspace *newvmspace;
3038 
3039 	if (oldvmspace->vm_refcnt == 1 && oldvmspace->vm_exitingcnt == 0)
3040 		return;
3041 	newvmspace = vmspace_fork(oldvmspace);
3042 	p->p_vmspace = newvmspace;
3043 	pmap_pinit2(vmspace_pmap(newvmspace));
3044 	if (p == curproc)
3045 		pmap_activate(p);
3046 	vmspace_free(oldvmspace);
3047 }
3048 
3049 /*
3050  *	vm_map_lookup:
3051  *
3052  *	Finds the VM object, offset, and
3053  *	protection for a given virtual address in the
3054  *	specified map, assuming a page fault of the
3055  *	type specified.
3056  *
3057  *	Leaves the map in question locked for read; return
3058  *	values are guaranteed until a vm_map_lookup_done
3059  *	call is performed.  Note that the map argument
3060  *	is in/out; the returned map must be used in
3061  *	the call to vm_map_lookup_done.
3062  *
3063  *	A handle (out_entry) is returned for use in
3064  *	vm_map_lookup_done, to make that fast.
3065  *
3066  *	If a lookup is requested with "write protection"
3067  *	specified, the map may be changed to perform virtual
3068  *	copying operations, although the data referenced will
3069  *	remain the same.
3070  */
3071 int
3072 vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3073 	      vm_offset_t vaddr,
3074 	      vm_prot_t fault_typea,
3075 	      vm_map_entry_t *out_entry,	/* OUT */
3076 	      vm_object_t *object,		/* OUT */
3077 	      vm_pindex_t *pindex,		/* OUT */
3078 	      vm_prot_t *out_prot,		/* OUT */
3079 	      boolean_t *wired)			/* OUT */
3080 {
3081 	vm_map_entry_t entry;
3082 	vm_map_t map = *var_map;
3083 	vm_prot_t prot;
3084 	vm_prot_t fault_type = fault_typea;
3085 	int use_read_lock = 1;
3086 	int rv = KERN_SUCCESS;
3087 
3088 RetryLookup:
3089 	if (use_read_lock)
3090 		vm_map_lock_read(map);
3091 	else
3092 		vm_map_lock(map);
3093 
3094 	/*
3095 	 * If the map has an interesting hint, try it before calling full
3096 	 * blown lookup routine.
3097 	 */
3098 	entry = map->hint;
3099 	*out_entry = entry;
3100 
3101 	if ((entry == &map->header) ||
3102 	    (vaddr < entry->start) || (vaddr >= entry->end)) {
3103 		vm_map_entry_t tmp_entry;
3104 
3105 		/*
3106 		 * Entry was either not a valid hint, or the vaddr was not
3107 		 * contained in the entry, so do a full lookup.
3108 		 */
3109 		if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
3110 			rv = KERN_INVALID_ADDRESS;
3111 			goto done;
3112 		}
3113 
3114 		entry = tmp_entry;
3115 		*out_entry = entry;
3116 	}
3117 
3118 	/*
3119 	 * Handle submaps.
3120 	 */
3121 
3122 	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3123 		vm_map_t old_map = map;
3124 
3125 		*var_map = map = entry->object.sub_map;
3126 		if (use_read_lock)
3127 			vm_map_unlock_read(old_map);
3128 		else
3129 			vm_map_unlock(old_map);
3130 		use_read_lock = 1;
3131 		goto RetryLookup;
3132 	}
3133 
3134 	/*
3135 	 * Check whether this task is allowed to have this page.
3136 	 * Note the special case for MAP_ENTRY_COW
3137 	 * pages with an override.  This is to implement a forced
3138 	 * COW for debuggers.
3139 	 */
3140 
3141 	if (fault_type & VM_PROT_OVERRIDE_WRITE)
3142 		prot = entry->max_protection;
3143 	else
3144 		prot = entry->protection;
3145 
3146 	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3147 	if ((fault_type & prot) != fault_type) {
3148 		rv = KERN_PROTECTION_FAILURE;
3149 		goto done;
3150 	}
3151 
3152 	if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3153 	    (entry->eflags & MAP_ENTRY_COW) &&
3154 	    (fault_type & VM_PROT_WRITE) &&
3155 	    (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3156 		rv = KERN_PROTECTION_FAILURE;
3157 		goto done;
3158 	}
3159 
3160 	/*
3161 	 * If this page is not pageable, we have to get it for all possible
3162 	 * accesses.
3163 	 */
3164 
3165 	*wired = (entry->wired_count != 0);
3166 	if (*wired)
3167 		prot = fault_type = entry->protection;
3168 
3169 	/*
3170 	 * If the entry was copy-on-write, we either ...
3171 	 */
3172 
3173 	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3174 		/*
3175 		 * If we want to write the page, we may as well handle that
3176 		 * now since we've got the map locked.
3177 		 *
3178 		 * If we don't need to write the page, we just demote the
3179 		 * permissions allowed.
3180 		 */
3181 
3182 		if (fault_type & VM_PROT_WRITE) {
3183 			/*
3184 			 * Make a new object, and place it in the object
3185 			 * chain.  Note that no new references have appeared
3186 			 * -- one just moved from the map to the new
3187 			 * object.
3188 			 */
3189 
3190 			if (use_read_lock && vm_map_lock_upgrade(map)) {
3191 				use_read_lock = 0;
3192 				goto RetryLookup;
3193 			}
3194 			use_read_lock = 0;
3195 
3196 			vm_object_shadow(
3197 			    &entry->object.vm_object,
3198 			    &entry->offset,
3199 			    atop(entry->end - entry->start));
3200 
3201 			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3202 		} else {
3203 			/*
3204 			 * We're attempting to read a copy-on-write page --
3205 			 * don't allow writes.
3206 			 */
3207 
3208 			prot &= ~VM_PROT_WRITE;
3209 		}
3210 	}
3211 
3212 	/*
3213 	 * Create an object if necessary.
3214 	 */
3215 	if (entry->object.vm_object == NULL &&
3216 	    !map->system_map) {
3217 		if (use_read_lock && vm_map_lock_upgrade(map))  {
3218 			use_read_lock = 0;
3219 			goto RetryLookup;
3220 		}
3221 		use_read_lock = 0;
3222 		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3223 		    atop(entry->end - entry->start));
3224 		entry->offset = 0;
3225 	}
3226 
3227 	/*
3228 	 * Return the object/offset from this entry.  If the entry was
3229 	 * copy-on-write or empty, it has been fixed up.
3230 	 */
3231 
3232 	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3233 	*object = entry->object.vm_object;
3234 
3235 	/*
3236 	 * Return whether this is the only map sharing this data.  On
3237 	 * success we return with a read lock held on the map.  On failure
3238 	 * we return with the map unlocked.
3239 	 */
3240 	*out_prot = prot;
3241 done:
3242 	if (rv == KERN_SUCCESS) {
3243 		if (use_read_lock == 0)
3244 			vm_map_lock_downgrade(map);
3245 	} else if (use_read_lock) {
3246 		vm_map_unlock_read(map);
3247 	} else {
3248 		vm_map_unlock(map);
3249 	}
3250 	return (rv);
3251 }
3252 
3253 /*
3254  *	vm_map_lookup_done:
3255  *
3256  *	Releases locks acquired by a vm_map_lookup
3257  *	(according to the handle returned by that lookup).
3258  */
3259 
3260 void
3261 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
3262 {
3263 	/*
3264 	 * Unlock the main-level map
3265 	 */
3266 	vm_map_unlock_read(map);
3267 	if (count)
3268 		vm_map_entry_release(count);
3269 }
3270 
3271 #ifdef ENABLE_VFS_IOOPT
3272 
3273 /*
3274  * Implement uiomove with VM operations.  This handles (and collateral changes)
3275  * support every combination of source object modification, and COW type
3276  * operations.
3277  *
3278  * XXX this is extremely dangerous, enabling this option is NOT recommended.
3279  */
3280 int
3281 vm_uiomove(vm_map_t mapa, vm_object_t srcobject, off_t cp, int cnta,
3282     vm_offset_t uaddra, int *npages)
3283 {
3284 	vm_map_t map;
3285 	vm_object_t first_object, oldobject, object;
3286 	vm_map_entry_t entry;
3287 	vm_prot_t prot;
3288 	boolean_t wired;
3289 	int tcnt, rv;
3290 	vm_offset_t uaddr, start, end, tend;
3291 	vm_pindex_t first_pindex, osize, oindex;
3292 	off_t ooffset;
3293 	int cnt;
3294 	int count;
3295 	int s;
3296 
3297 	if (npages)
3298 		*npages = 0;
3299 
3300 	cnt = cnta;
3301 	uaddr = uaddra;
3302 
3303 	while (cnt > 0) {
3304 		map = mapa;
3305 
3306 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3307 
3308 		if ((vm_map_lookup(&map, uaddr,
3309 			VM_PROT_READ, &entry, &first_object,
3310 			&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
3311 			return EFAULT;
3312 		}
3313 
3314 		vm_map_clip_start(map, entry, uaddr, &count);
3315 
3316 		tcnt = cnt;
3317 		tend = uaddr + tcnt;
3318 		if (tend > entry->end) {
3319 			tcnt = entry->end - uaddr;
3320 			tend = entry->end;
3321 		}
3322 
3323 		vm_map_clip_end(map, entry, tend, &count);
3324 
3325 		start = entry->start;
3326 		end = entry->end;
3327 
3328 		osize = atop(tcnt);
3329 
3330 		oindex = OFF_TO_IDX(cp);
3331 		if (npages) {
3332 			vm_pindex_t idx;
3333 
3334 			/*
3335 			 * spl protection is needed to avoid a race between
3336 			 * the lookup and an interrupt/unbusy/free occuring
3337 			 * prior to our busy check.
3338 			 */
3339 			s = splvm();
3340 			for (idx = 0; idx < osize; idx++) {
3341 				vm_page_t m;
3342 				if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
3343 					splx(s);
3344 					vm_map_lookup_done(map, entry, count);
3345 					return 0;
3346 				}
3347 				/*
3348 				 * disallow busy or invalid pages, but allow
3349 				 * m->busy pages if they are entirely valid.
3350 				 */
3351 				if ((m->flags & PG_BUSY) ||
3352 					((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
3353 					splx(s);
3354 					vm_map_lookup_done(map, entry, count);
3355 					return 0;
3356 				}
3357 			}
3358 			splx(s);
3359 		}
3360 
3361 /*
3362  * If we are changing an existing map entry, just redirect
3363  * the object, and change mappings.
3364  */
3365 		if ((first_object->type == OBJT_VNODE) &&
3366 			((oldobject = entry->object.vm_object) == first_object)) {
3367 
3368 			if ((entry->offset != cp) || (oldobject != srcobject)) {
3369 				/*
3370    				* Remove old window into the file
3371    				*/
3372 				pmap_remove (map->pmap, uaddr, tend);
3373 
3374 				/*
3375    				* Force copy on write for mmaped regions
3376    				*/
3377 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3378 
3379 				/*
3380    				* Point the object appropriately
3381    				*/
3382 				if (oldobject != srcobject) {
3383 
3384 				/*
3385    				* Set the object optimization hint flag
3386    				*/
3387 					vm_object_set_flag(srcobject, OBJ_OPT);
3388 					vm_object_reference(srcobject);
3389 					entry->object.vm_object = srcobject;
3390 
3391 					if (oldobject) {
3392 						vm_object_deallocate(oldobject);
3393 					}
3394 				}
3395 
3396 				entry->offset = cp;
3397 				map->timestamp++;
3398 			} else {
3399 				pmap_remove (map->pmap, uaddr, tend);
3400 			}
3401 
3402 		} else if ((first_object->ref_count == 1) &&
3403 			(first_object->size == osize) &&
3404 			((first_object->type == OBJT_DEFAULT) ||
3405 				(first_object->type == OBJT_SWAP)) ) {
3406 
3407 			oldobject = first_object->backing_object;
3408 
3409 			if ((first_object->backing_object_offset != cp) ||
3410 				(oldobject != srcobject)) {
3411 				/*
3412    				* Remove old window into the file
3413    				*/
3414 				pmap_remove (map->pmap, uaddr, tend);
3415 
3416 				/*
3417 				 * Remove unneeded old pages
3418 				 */
3419 				vm_object_page_remove(first_object, 0, 0, 0);
3420 
3421 				/*
3422 				 * Invalidate swap space
3423 				 */
3424 				if (first_object->type == OBJT_SWAP) {
3425 					swap_pager_freespace(first_object,
3426 						0,
3427 						first_object->size);
3428 				}
3429 
3430 				/*
3431    				* Force copy on write for mmaped regions
3432    				*/
3433 				vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3434 
3435 				/*
3436    				* Point the object appropriately
3437    				*/
3438 				if (oldobject != srcobject) {
3439 
3440 				/*
3441    				* Set the object optimization hint flag
3442    				*/
3443 					vm_object_set_flag(srcobject, OBJ_OPT);
3444 					vm_object_reference(srcobject);
3445 
3446 					if (oldobject) {
3447 						LIST_REMOVE(
3448 							first_object, shadow_list);
3449 						oldobject->shadow_count--;
3450 						/* XXX bump generation? */
3451 						vm_object_deallocate(oldobject);
3452 					}
3453 
3454 					LIST_INSERT_HEAD(&srcobject->shadow_head,
3455 						first_object, shadow_list);
3456 					srcobject->shadow_count++;
3457 					/* XXX bump generation? */
3458 
3459 					first_object->backing_object = srcobject;
3460 				}
3461 				first_object->backing_object_offset = cp;
3462 				map->timestamp++;
3463 			} else {
3464 				pmap_remove (map->pmap, uaddr, tend);
3465 			}
3466 /*
3467  * Otherwise, we have to do a logical mmap.
3468  */
3469 		} else {
3470 
3471 			vm_object_set_flag(srcobject, OBJ_OPT);
3472 			vm_object_reference(srcobject);
3473 
3474 			pmap_remove (map->pmap, uaddr, tend);
3475 
3476 			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3477 			vm_map_lock_upgrade(map);
3478 
3479 			if (entry == &map->header) {
3480 				map->first_free = &map->header;
3481 			} else if (map->first_free->start >= start) {
3482 				map->first_free = entry->prev;
3483 			}
3484 
3485 			SAVE_HINT(map, entry->prev);
3486 			vm_map_entry_delete(map, entry, &count);
3487 
3488 			object = srcobject;
3489 			ooffset = cp;
3490 
3491 			rv = vm_map_insert(map, &count,
3492 				object, ooffset, start, tend,
3493 				VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
3494 
3495 			if (rv != KERN_SUCCESS)
3496 				panic("vm_uiomove: could not insert new entry: %d", rv);
3497 		}
3498 
3499 /*
3500  * Map the window directly, if it is already in memory
3501  */
3502 		pmap_object_init_pt(map->pmap, uaddr, entry->protection,
3503 			srcobject, oindex, tcnt, 0);
3504 
3505 		map->timestamp++;
3506 		vm_map_unlock(map);
3507 		vm_map_entry_release(count);
3508 
3509 		cnt -= tcnt;
3510 		uaddr += tcnt;
3511 		cp += tcnt;
3512 		if (npages)
3513 			*npages += osize;
3514 	}
3515 	return 0;
3516 }
3517 
3518 #endif
3519 
3520 /*
3521  * Performs the copy_on_write operations necessary to allow the virtual copies
3522  * into user space to work.  This has to be called for write(2) system calls
3523  * from other processes, file unlinking, and file size shrinkage.
3524  */
3525 void
3526 vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
3527 {
3528 	int rv;
3529 	vm_object_t robject;
3530 	vm_pindex_t idx;
3531 
3532 	if ((object == NULL) ||
3533 		((object->flags & OBJ_OPT) == 0))
3534 		return;
3535 
3536 	if (object->shadow_count > object->ref_count)
3537 		panic("vm_freeze_copyopts: sc > rc");
3538 
3539 	while ((robject = LIST_FIRST(&object->shadow_head)) != NULL) {
3540 		vm_pindex_t bo_pindex;
3541 		vm_page_t m_in, m_out;
3542 
3543 		bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
3544 
3545 		vm_object_reference(robject);
3546 
3547 		vm_object_pip_wait(robject, "objfrz");
3548 
3549 		if (robject->ref_count == 1) {
3550 			vm_object_deallocate(robject);
3551 			continue;
3552 		}
3553 
3554 		vm_object_pip_add(robject, 1);
3555 
3556 		for (idx = 0; idx < robject->size; idx++) {
3557 
3558 			m_out = vm_page_grab(robject, idx,
3559 					    VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3560 
3561 			if (m_out->valid == 0) {
3562 				m_in = vm_page_grab(object, bo_pindex + idx,
3563 					    VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3564 				if (m_in->valid == 0) {
3565 					rv = vm_pager_get_pages(object, &m_in, 1, 0);
3566 					if (rv != VM_PAGER_OK) {
3567 						printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
3568 						continue;
3569 					}
3570 					vm_page_deactivate(m_in);
3571 				}
3572 
3573 				vm_page_protect(m_in, VM_PROT_NONE);
3574 				pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
3575 				m_out->valid = m_in->valid;
3576 				vm_page_dirty(m_out);
3577 				vm_page_activate(m_out);
3578 				vm_page_wakeup(m_in);
3579 			}
3580 			vm_page_wakeup(m_out);
3581 		}
3582 
3583 		object->shadow_count--;
3584 		object->ref_count--;
3585 		LIST_REMOVE(robject, shadow_list);
3586 		robject->backing_object = NULL;
3587 		robject->backing_object_offset = 0;
3588 
3589 		vm_object_pip_wakeup(robject);
3590 		vm_object_deallocate(robject);
3591 	}
3592 
3593 	vm_object_clear_flag(object, OBJ_OPT);
3594 }
3595 
3596 #include "opt_ddb.h"
3597 #ifdef DDB
3598 #include <sys/kernel.h>
3599 
3600 #include <ddb/ddb.h>
3601 
3602 /*
3603  *	vm_map_print:	[ debug ]
3604  */
3605 DB_SHOW_COMMAND(map, vm_map_print)
3606 {
3607 	static int nlines;
3608 	/* XXX convert args. */
3609 	vm_map_t map = (vm_map_t)addr;
3610 	boolean_t full = have_addr;
3611 
3612 	vm_map_entry_t entry;
3613 
3614 	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3615 	    (void *)map,
3616 	    (void *)map->pmap, map->nentries, map->timestamp);
3617 	nlines++;
3618 
3619 	if (!full && db_indent)
3620 		return;
3621 
3622 	db_indent += 2;
3623 	for (entry = map->header.next; entry != &map->header;
3624 	    entry = entry->next) {
3625 		db_iprintf("map entry %p: start=%p, end=%p\n",
3626 		    (void *)entry, (void *)entry->start, (void *)entry->end);
3627 		nlines++;
3628 		{
3629 			static char *inheritance_name[4] =
3630 			{"share", "copy", "none", "donate_copy"};
3631 
3632 			db_iprintf(" prot=%x/%x/%s",
3633 			    entry->protection,
3634 			    entry->max_protection,
3635 			    inheritance_name[(int)(unsigned char)entry->inheritance]);
3636 			if (entry->wired_count != 0)
3637 				db_printf(", wired");
3638 		}
3639 		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3640 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3641 			db_printf(", share=%p, offset=0x%lx\n",
3642 			    (void *)entry->object.sub_map,
3643 			    (long)entry->offset);
3644 			nlines++;
3645 			if ((entry->prev == &map->header) ||
3646 			    (entry->prev->object.sub_map !=
3647 				entry->object.sub_map)) {
3648 				db_indent += 2;
3649 				vm_map_print((db_expr_t)(intptr_t)
3650 					     entry->object.sub_map,
3651 					     full, 0, (char *)0);
3652 				db_indent -= 2;
3653 			}
3654 		} else {
3655 			/* XXX no %qd in kernel.  Truncate entry->offset. */
3656 			db_printf(", object=%p, offset=0x%lx",
3657 			    (void *)entry->object.vm_object,
3658 			    (long)entry->offset);
3659 			if (entry->eflags & MAP_ENTRY_COW)
3660 				db_printf(", copy (%s)",
3661 				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3662 			db_printf("\n");
3663 			nlines++;
3664 
3665 			if ((entry->prev == &map->header) ||
3666 			    (entry->prev->object.vm_object !=
3667 				entry->object.vm_object)) {
3668 				db_indent += 2;
3669 				vm_object_print((db_expr_t)(intptr_t)
3670 						entry->object.vm_object,
3671 						full, 0, (char *)0);
3672 				nlines += 4;
3673 				db_indent -= 2;
3674 			}
3675 		}
3676 	}
3677 	db_indent -= 2;
3678 	if (db_indent == 0)
3679 		nlines = 0;
3680 }
3681 
3682 
3683 DB_SHOW_COMMAND(procvm, procvm)
3684 {
3685 	struct proc *p;
3686 
3687 	if (have_addr) {
3688 		p = (struct proc *) addr;
3689 	} else {
3690 		p = curproc;
3691 	}
3692 
3693 	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3694 	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3695 	    (void *)vmspace_pmap(p->p_vmspace));
3696 
3697 	vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3698 }
3699 
3700 #endif /* DDB */
3701