xref: /dragonfly/sys/vm/vm_map.h (revision fb151170)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_map.h	8.9 (Berkeley) 5/17/95
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $
65  * $DragonFly: src/sys/vm/vm_map.h,v 1.30 2007/04/29 18:25:41 dillon Exp $
66  */
67 
68 /*
69  *	Virtual memory map module definitions.
70  */
71 
72 #ifndef	_VM_VM_MAP_H_
73 #define	_VM_VM_MAP_H_
74 
75 #ifndef _SYS_TYPES_H_
76 #include <sys/types.h>
77 #endif
78 #ifdef _KERNEL
79 #ifndef _SYS_KERNEL_H_
80 #include <sys/kernel.h>	/* ticks */
81 #endif
82 #endif
83 #ifndef _SYS_TREE_H_
84 #include <sys/tree.h>
85 #endif
86 #ifndef _SYS_SYSREF_H_
87 #include <sys/sysref.h>
88 #endif
89 #ifndef _SYS_LOCK_H_
90 #include <sys/lock.h>
91 #endif
92 #ifndef _SYS_VKERNEL_H_
93 #include <sys/vkernel.h>
94 #endif
95 #ifndef _VM_VM_H_
96 #include <vm/vm.h>
97 #endif
98 #ifndef _MACHINE_PMAP_H_
99 #include <machine/pmap.h>
100 #endif
101 #ifndef _VM_VM_OBJECT_H_
102 #include <vm/vm_object.h>
103 #endif
104 #ifndef _SYS_NULL_H_
105 #include <sys/_null.h>
106 #endif
107 
108 struct vm_map_rb_tree;
109 RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
110 
111 /*
112  *	Types defined:
113  *
114  *	vm_map_t		the high-level address map data structure.
115  *	vm_map_entry_t		an entry in an address map.
116  */
117 
118 typedef u_int vm_flags_t;
119 typedef u_int vm_eflags_t;
120 
121 /*
122  *	Objects which live in maps may be either VM objects, or
123  *	another map (called a "sharing map") which denotes read-write
124  *	sharing with other maps.
125  */
126 union vm_map_object {
127 	struct vm_object *vm_object;	/* object object */
128 	struct vm_map *sub_map;		/* belongs to another map */
129 };
130 
131 union vm_map_aux {
132 	vm_offset_t avail_ssize;	/* amt can grow if this is a stack */
133 	vpte_t master_pde;		/* virtual page table root */
134 };
135 
136 /*
137  *	Address map entries consist of start and end addresses,
138  *	a VM object (or sharing map) and offset into that object,
139  *	and user-exported inheritance and protection information.
140  *	Also included is control information for virtual copy operations.
141  *
142  *	When used with MAP_STACK, avail_ssize is used to determine the
143  *	limits of stack growth.
144  *
145  *	When used with VM_MAPTYPE_VPAGETABLE, avail_ssize stores the
146  *	page directory index.
147  */
148 struct vm_map_entry {
149 	struct vm_map_entry *prev;	/* previous entry */
150 	struct vm_map_entry *next;	/* next entry */
151 	RB_ENTRY(vm_map_entry) rb_entry;
152 	vm_offset_t start;		/* start address */
153 	vm_offset_t end;		/* end address */
154 	union vm_map_aux aux;		/* auxillary data */
155 	union vm_map_object object;	/* object I point to */
156 	vm_ooffset_t offset;		/* offset into object */
157 	vm_eflags_t eflags;		/* map entry flags */
158 	vm_maptype_t maptype;		/* type of VM mapping */
159 	vm_prot_t protection;		/* protection code */
160 	vm_prot_t max_protection;	/* maximum protection */
161 	vm_inherit_t inheritance;	/* inheritance */
162 	int wired_count;		/* can be paged if = 0 */
163 };
164 
165 #define MAP_ENTRY_NOSYNC		0x0001
166 #define MAP_ENTRY_STACK			0x0002
167 #define MAP_ENTRY_COW			0x0004
168 #define MAP_ENTRY_NEEDS_COPY		0x0008
169 #define MAP_ENTRY_NOFAULT		0x0010
170 #define MAP_ENTRY_USER_WIRED		0x0020
171 
172 #define MAP_ENTRY_BEHAV_NORMAL		0x0000	/* default behavior */
173 #define MAP_ENTRY_BEHAV_SEQUENTIAL	0x0040	/* expect sequential access */
174 #define MAP_ENTRY_BEHAV_RANDOM		0x0080	/* expect random access */
175 #define MAP_ENTRY_BEHAV_RESERVED	0x00C0	/* future use */
176 
177 #define MAP_ENTRY_BEHAV_MASK		0x00C0
178 
179 #define MAP_ENTRY_IN_TRANSITION		0x0100	/* entry being changed */
180 #define MAP_ENTRY_NEEDS_WAKEUP		0x0200	/* waiter's in transition */
181 #define MAP_ENTRY_NOCOREDUMP		0x0400	/* don't include in a core */
182 #define MAP_ENTRY_KSTACK		0x0800	/* guarded kernel stack */
183 
184 /*
185  * flags for vm_map_[un]clip_range()
186  */
187 #define MAP_CLIP_NO_HOLES		0x0001
188 
189 /*
190  * This reserve count for vm_map_entry_reserve() should cover all nominal
191  * single-insertion operations, including any necessary clipping.
192  */
193 #define MAP_RESERVE_COUNT	4
194 #define MAP_RESERVE_SLOP	32
195 
196 static __inline u_char
197 vm_map_entry_behavior(struct vm_map_entry *entry)
198 {
199 	return entry->eflags & MAP_ENTRY_BEHAV_MASK;
200 }
201 
202 static __inline void
203 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
204 {
205 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
206 		(behavior & MAP_ENTRY_BEHAV_MASK);
207 }
208 
209 /*
210  * Maps are doubly-linked lists of map entries, kept sorted by address.
211  * A single hint is provided to start searches again from the last
212  * successful search, insertion, or removal.
213  *
214  * NOTE: The lock structure cannot be the first element of vm_map
215  *	 because this can result in a running lockup between two or more
216  *	 system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
217  *	 and free tsleep/waking up 'map' and the underlying lockmgr also
218  *	 sleeping and waking up on 'map'.  The lockup occurs when the map fills
219  *	 up.  The 'exec' map, for example.
220  *
221  * NOTE: The vm_map structure can be hard-locked with the lockmgr lock
222  *	 or soft-serialized with the token, or both.
223  */
224 struct vm_map {
225 	struct vm_map_entry header;	/* List of entries */
226 	RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root;
227 	struct lock lock;		/* Lock for map data */
228 	int nentries;			/* Number of entries */
229 	vm_size_t size;			/* virtual size */
230 	u_char system_map;		/* Am I a system map? */
231 	vm_map_entry_t hint;		/* hint for quick lookups */
232 	unsigned int timestamp;		/* Version number */
233 	vm_map_entry_t first_free;	/* First free space hint */
234 	vm_flags_t flags;		/* flags for this vm_map */
235 	struct pmap *pmap;		/* Physical map */
236 	u_int president_cache;		/* Remember president count */
237 	u_int president_ticks;		/* Save ticks for cache */
238 	struct lwkt_token token;	/* Soft serializer */
239 #define	min_offset		header.start
240 #define max_offset		header.end
241 };
242 
243 /*
244  * vm_flags_t values
245  */
246 #define MAP_WIREFUTURE		0x0001	/* wire all future pages */
247 
248 /*
249  * Registered upcall
250  */
251 struct upcall;
252 
253 struct vmupcall {
254 	struct vmupcall	*vu_next;
255 	void		*vu_func;	/* user upcall function */
256 	void		*vu_data;	/* user data */
257 	void		*vu_ctx;	/* user context function */
258 	struct lwp	*vu_lwp;	/* process that registered upcall */
259 	int		vu_id;		/* upcall identifier */
260 	int		vu_pending;	/* upcall request pending */
261 };
262 
263 /*
264  * Shareable process virtual address space.
265  *
266  * Refd pointers from vmresident, proc
267  */
268 struct vmspace {
269 	struct vm_map vm_map;	/* VM address map */
270 	struct pmap vm_pmap;	/* private physical map */
271 	int vm_flags;
272 	caddr_t vm_shm;		/* SYS5 shared memory private data XXX */
273 /* we copy from vm_startcopy to the end of the structure on fork */
274 #define vm_startcopy vm_rssize
275 	segsz_t vm_rssize;	/* current resident set size in pages */
276 	segsz_t vm_swrss;	/* resident set size before last swap */
277 	segsz_t vm_tsize;	/* text size (pages) XXX */
278 	segsz_t vm_dsize;	/* data size (pages) XXX */
279 	segsz_t vm_ssize;	/* stack size (pages) */
280 	caddr_t vm_taddr;	/* user virtual address of text XXX */
281 	caddr_t vm_daddr;	/* user virtual address of data XXX */
282 	caddr_t vm_maxsaddr;	/* user VA at max stack growth */
283 	caddr_t vm_minsaddr;	/* user VA at max stack growth */
284 #define vm_endcopy	vm_exitingcnt
285 	int	vm_exitingcnt;  /* exit/wait context reaping */
286 	int	vm_upccount;	/* number of registered upcalls */
287 	int	vm_pagesupply;
288 	u_int	vm_holdcount;
289 	struct vmupcall *vm_upcalls;	/* registered upcalls */
290 	struct sysref vm_sysref;	/* sysref, refcnt, etc */
291 };
292 
293 #define VMSPACE_EXIT1	0x0001	/* partial exit */
294 #define VMSPACE_EXIT2	0x0002	/* full exit */
295 
296 /*
297  * Resident executable holding structure.  A user program can take a snapshot
298  * of just its VM address space (typically done just after dynamic link
299  * libraries have completed loading) and register it as a resident
300  * executable associated with the program binary's vnode, which is also
301  * locked into memory.  Future execs of the vnode will start with a copy
302  * of the resident vmspace instead of running the binary from scratch,
303  * avoiding both the kernel ELF loader *AND* all shared library mapping and
304  * relocation code, and will call a different entry point (the stack pointer
305  * is reset to the top of the stack) supplied when the vmspace was registered.
306  */
307 struct vmresident {
308 	struct vnode	*vr_vnode;		/* associated vnode */
309 	TAILQ_ENTRY(vmresident) vr_link;	/* linked list of res sts */
310 	struct vmspace	*vr_vmspace;		/* vmspace to fork */
311 	intptr_t	vr_entry_addr;		/* registered entry point */
312 	struct sysentvec *vr_sysent;		/* system call vects */
313 	int		vr_id;			/* registration id */
314 	int		vr_refs;		/* temporary refs */
315 };
316 
317 #ifdef _KERNEL
318 /*
319  *	Macros:		vm_map_lock, etc.
320  *	Function:
321  *		Perform locking on the data portion of a map.  Note that
322  *		these macros mimic procedure calls returning void.  The
323  *		semicolon is supplied by the user of these macros, not
324  *		by the macros themselves.  The macros can safely be used
325  *		as unbraced elements in a higher level statement.
326  */
327 
328 #define ASSERT_VM_MAP_LOCKED(map)	KKASSERT(lockowned(&(map)->lock))
329 
330 #ifdef DIAGNOSTIC
331 /* #define MAP_LOCK_DIAGNOSTIC 1 */
332 #ifdef MAP_LOCK_DIAGNOSTIC
333 #define	vm_map_lock(map) \
334 	do { \
335 		kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
336 		if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
337 			panic("vm_map_lock: failed to get lock"); \
338 		} \
339 		(map)->timestamp++; \
340 	} while(0)
341 #else
342 #define	vm_map_lock(map) \
343 	do { \
344 		if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
345 			panic("vm_map_lock: failed to get lock"); \
346 		} \
347 		(map)->timestamp++; \
348 	} while(0)
349 #endif
350 #else
351 #define	vm_map_lock(map) \
352 	do { \
353 		lockmgr(&(map)->lock, LK_EXCLUSIVE); \
354 		(map)->timestamp++; \
355 	} while(0)
356 #endif /* DIAGNOSTIC */
357 
358 #if defined(MAP_LOCK_DIAGNOSTIC)
359 #define	vm_map_unlock(map) \
360 	do { \
361 		kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
362 		lockmgr(&(map)->lock, LK_RELEASE); \
363 	} while (0)
364 #define	vm_map_lock_read(map) \
365 	do { \
366 		kprintf ("locking map LK_SHARED: 0x%x\n", map); \
367 		lockmgr(&(map)->lock, LK_SHARED); \
368 	} while (0)
369 #define	vm_map_unlock_read(map) \
370 	do { \
371 		kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
372 		lockmgr(&(map)->lock, LK_RELEASE); \
373 	} while (0)
374 #else
375 #define	vm_map_unlock(map) \
376 	lockmgr(&(map)->lock, LK_RELEASE)
377 #define	vm_map_lock_read(map) \
378 	lockmgr(&(map)->lock, LK_SHARED)
379 #define	vm_map_unlock_read(map) \
380 	lockmgr(&(map)->lock, LK_RELEASE)
381 #endif
382 
383 #define vm_map_lock_read_try(map) \
384 	lockmgr(&(map)->lock, LK_SHARED | LK_NOWAIT)
385 
386 static __inline__ int
387 vm_map_lock_read_to(vm_map_t map)
388 {
389 	int error;
390 
391 #if defined(MAP_LOCK_DIAGNOSTIC)
392 	kprintf ("locking map LK_SHARED: 0x%x\n", map);
393 #endif
394 	error = lockmgr(&(map)->lock, LK_SHARED | LK_TIMELOCK);
395 	return error;
396 }
397 
398 static __inline__ int
399 vm_map_lock_upgrade(vm_map_t map) {
400 	int error;
401 #if defined(MAP_LOCK_DIAGNOSTIC)
402 	kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
403 #endif
404 	error = lockmgr(&map->lock, LK_EXCLUPGRADE);
405 	if (error == 0)
406 		map->timestamp++;
407 	return error;
408 }
409 
410 #if defined(MAP_LOCK_DIAGNOSTIC)
411 #define vm_map_lock_downgrade(map) \
412 	do { \
413 		kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
414 		lockmgr(&(map)->lock, LK_DOWNGRADE); \
415 	} while (0)
416 #else
417 #define vm_map_lock_downgrade(map) \
418 	lockmgr(&(map)->lock, LK_DOWNGRADE)
419 #endif
420 
421 #endif /* _KERNEL */
422 
423 /*
424  *	Functions implemented as macros
425  */
426 #define		vm_map_min(map)		((map)->min_offset)
427 #define		vm_map_max(map)		((map)->max_offset)
428 #define		vm_map_pmap(map)	((map)->pmap)
429 
430 /*
431  * Must not block
432  */
433 static __inline struct pmap *
434 vmspace_pmap(struct vmspace *vmspace)
435 {
436 	return &vmspace->vm_pmap;
437 }
438 
439 /*
440  * Caller must hold the vmspace->vm_map.token
441  */
442 static __inline long
443 vmspace_resident_count(struct vmspace *vmspace)
444 {
445 	return pmap_resident_count(vmspace_pmap(vmspace));
446 }
447 
448 /*
449  * Calculates the proportional RSS and returning the
450  * accrued result.  This is a loose value for statistics/display
451  * purposes only and will only be updated if we can acquire
452  * a non-blocking map lock.
453  *
454  * (used by userland or the kernel)
455  */
456 static __inline u_int
457 vmspace_president_count(struct vmspace *vmspace)
458 {
459 	vm_map_t map = &vmspace->vm_map;
460 	vm_map_entry_t cur;
461 	vm_object_t object;
462 	u_int count = 0;
463 	u_int n;
464 
465 #ifdef _KERNEL
466 	if (map->president_ticks == ticks / hz || vm_map_lock_read_try(map))
467 		return(map->president_cache);
468 #endif
469 
470 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
471 		switch(cur->maptype) {
472 		case VM_MAPTYPE_NORMAL:
473 		case VM_MAPTYPE_VPAGETABLE:
474 			if ((object = cur->object.vm_object) == NULL)
475 				break;
476 			if (object->type != OBJT_DEFAULT &&
477 			    object->type != OBJT_SWAP) {
478 				break;
479 			}
480 			/*
481 			 * synchronize non-zero case, contents of field
482 			 * can change at any time due to pmap ops.
483 			 */
484 			if ((n = object->agg_pv_list_count) != 0) {
485 #ifdef _KERNEL
486 				cpu_ccfence();
487 #endif
488 				count += object->resident_page_count / n;
489 			}
490 			break;
491 		default:
492 			break;
493 		}
494 	}
495 #ifdef _KERNEL
496 	map->president_cache = count;
497 	map->president_ticks = ticks / hz;
498 	vm_map_unlock_read(map);
499 #endif
500 
501 	return(count);
502 }
503 
504 /*
505  * Number of kernel maps and entries to statically allocate, required
506  * during boot to bootstrap the VM system.
507  */
508 #define MAX_KMAP	10
509 #define	MAX_MAPENT	2048	/* required to support up to 64 cpus */
510 
511 /*
512  * Copy-on-write flags for vm_map operations
513  */
514 #define MAP_UNUSED_01		0x0001
515 #define MAP_COPY_ON_WRITE	0x0002
516 #define MAP_NOFAULT		0x0004
517 #define MAP_PREFAULT		0x0008
518 #define MAP_PREFAULT_PARTIAL	0x0010
519 #define MAP_DISABLE_SYNCER	0x0020
520 #define MAP_IS_STACK		0x0040
521 #define MAP_IS_KSTACK		0x0080
522 #define MAP_DISABLE_COREDUMP	0x0100
523 #define MAP_PREFAULT_MADVISE	0x0200	/* from (user) madvise request */
524 
525 /*
526  * vm_fault option flags
527  */
528 #define VM_FAULT_NORMAL		0x00	/* Nothing special */
529 #define VM_FAULT_CHANGE_WIRING	0x01	/* Change the wiring as appropriate */
530 #define VM_FAULT_USER_WIRE	0x02	/* Likewise, but for user purposes */
531 #define VM_FAULT_BURST		0x04	/* Burst fault can be done */
532 #define VM_FAULT_DIRTY		0x08	/* Dirty the page */
533 #define VM_FAULT_UNSWAP		0x10	/* Remove backing store from the page */
534 #define VM_FAULT_BURST_QUICK	0x20	/* Special case shared vm_object */
535 #define VM_FAULT_WIRE_MASK	(VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
536 
537 #ifdef _KERNEL
538 
539 extern struct sysref_class vmspace_sysref_class;
540 
541 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t,
542 		vm_prot_t, boolean_t);
543 struct pmap;
544 struct globaldata;
545 void vm_map_entry_allocate_object(vm_map_entry_t);
546 void vm_map_entry_reserve_cpu_init(struct globaldata *gd);
547 int vm_map_entry_reserve(int);
548 int vm_map_entry_kreserve(int);
549 void vm_map_entry_release(int);
550 void vm_map_entry_krelease(int);
551 vm_map_t vm_map_create (vm_map_t, struct pmap *, vm_offset_t, vm_offset_t);
552 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *);
553 int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t,
554 		 vm_offset_t *, vm_size_t, vm_size_t,
555 		 boolean_t, vm_maptype_t,
556 		 vm_prot_t, vm_prot_t,
557 		 int);
558 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t,
559 		      int, vm_offset_t *);
560 vm_offset_t vm_map_hint(struct proc *, vm_offset_t, vm_prot_t);
561 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
562 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t);
563 int vm_map_insert (vm_map_t, int *, vm_object_t, vm_ooffset_t,
564 		   vm_offset_t, vm_offset_t,
565 		   vm_maptype_t,
566 		   vm_prot_t, vm_prot_t,
567 		   int);
568 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
569     vm_pindex_t *, vm_prot_t *, boolean_t *);
570 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int);
571 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
572 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int);
573 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
574 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
575 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
576 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
577 void vm_map_startup (void);
578 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
579 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t);
580 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *);
581 void vm_init2 (void);
582 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *);
583 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, int,
584 		  vm_prot_t, vm_prot_t, int);
585 int vm_map_growstack (struct proc *p, vm_offset_t addr);
586 int vmspace_swap_count (struct vmspace *vmspace);
587 int vmspace_anonymous_count (struct vmspace *vmspace);
588 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *);
589 void vm_map_transition_wait(vm_map_t map);
590 
591 #if defined(__amd64__) && defined(_KERNEL_VIRTUAL)
592 int vkernel_module_memory_alloc(vm_offset_t *, size_t);
593 void vkernel_module_memory_free(vm_offset_t, size_t);
594 #endif
595 
596 #endif
597 #endif				/* _VM_VM_MAP_H_ */
598