xref: /dragonfly/sys/vm/vm_map.h (revision fae225dc)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)vm_map.h	8.9 (Berkeley) 5/17/95
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  *
60  * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $
61  */
62 
63 /*
64  *	Virtual memory map module definitions.
65  */
66 
67 #ifndef	_VM_VM_MAP_H_
68 #define	_VM_VM_MAP_H_
69 
70 #ifndef _SYS_TYPES_H_
71 #include <sys/types.h>
72 #endif
73 #ifdef _KERNEL
74 #ifndef _SYS_KERNEL_H_
75 #include <sys/kernel.h>	/* ticks */
76 #endif
77 #endif
78 #ifndef _SYS_TREE_H_
79 #include <sys/tree.h>
80 #endif
81 #ifndef _SYS_SYSREF_H_
82 #include <sys/sysref.h>
83 #endif
84 #ifndef _SYS_LOCK_H_
85 #include <sys/lock.h>
86 #endif
87 #ifndef _SYS_VKERNEL_H_
88 #include <sys/vkernel.h>
89 #endif
90 #ifndef _VM_VM_H_
91 #include <vm/vm.h>
92 #endif
93 #ifndef _MACHINE_PMAP_H_
94 #include <machine/pmap.h>
95 #endif
96 #ifndef _VM_VM_OBJECT_H_
97 #include <vm/vm_object.h>
98 #endif
99 #ifndef _SYS_NULL_H_
100 #include <sys/_null.h>
101 #endif
102 
103 struct vm_map_rb_tree;
104 RB_PROTOTYPE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
105 
106 /*
107  *	Types defined:
108  *
109  *	vm_map_t		the high-level address map data structure.
110  *	vm_map_entry_t		an entry in an address map.
111  */
112 
113 typedef u_int vm_flags_t;
114 typedef u_int vm_eflags_t;
115 
116 /*
117  * A vm_map_entry may reference an object, a submap, a uksmap, or a
118  * direct user-kernel shared map.
119  */
120 union vm_map_object {
121 	struct vm_object *vm_object;	/* object object */
122 	struct vm_map *sub_map;		/* belongs to another map */
123 	int	(*uksmap)(struct cdev *dev, vm_page_t fake);
124 	void	*map_object;		/* generic */
125 };
126 
127 union vm_map_aux {
128 	vm_offset_t avail_ssize;	/* amt can grow if this is a stack */
129 	vpte_t master_pde;		/* virtual page table root */
130 	struct cdev *dev;
131 	void	*map_aux;
132 };
133 
134 /*
135  * vm_map_entry identifiers, used as a debugging aid
136  */
137 typedef enum {
138 	VM_SUBSYS_UNKNOWN,
139 	VM_SUBSYS_KMALLOC,
140 	VM_SUBSYS_STACK,
141 	VM_SUBSYS_IMGACT,
142 	VM_SUBSYS_EFI,
143 	VM_SUBSYS_RESERVED,
144 	VM_SUBSYS_INIT,
145 	VM_SUBSYS_PIPE,
146 	VM_SUBSYS_PROC,
147 	VM_SUBSYS_SHMEM,
148 	VM_SUBSYS_SYSMAP,
149 	VM_SUBSYS_MMAP,
150 	VM_SUBSYS_BRK,
151 	VM_SUBSYS_BOGUS,
152 	VM_SUBSYS_BUF,
153 	VM_SUBSYS_BUFDATA,
154 	VM_SUBSYS_GD,
155 	VM_SUBSYS_IPIQ,
156 	VM_SUBSYS_PVENTRY,
157 	VM_SUBSYS_PML4,
158 	VM_SUBSYS_MAPDEV,
159 	VM_SUBSYS_ZALLOC,
160 
161 	VM_SUBSYS_DM,
162 	VM_SUBSYS_CONTIG,
163 	VM_SUBSYS_DRM,
164 	VM_SUBSYS_DRM_GEM,
165 	VM_SUBSYS_DRM_SCAT,
166 	VM_SUBSYS_DRM_VMAP,
167 	VM_SUBSYS_DRM_TTM,
168 	VM_SUBSYS_HAMMER,
169 
170 	VM_SUBSYS_LIMIT		/* end of list */
171 } vm_subsys_t;
172 
173 /*
174  *	Address map entries consist of start and end addresses,
175  *	a VM object (or sharing map) and offset into that object,
176  *	and user-exported inheritance and protection information.
177  *	Also included is control information for virtual copy operations.
178  *
179  *	When used with MAP_STACK, avail_ssize is used to determine the
180  *	limits of stack growth.
181  *
182  *	When used with VM_MAPTYPE_VPAGETABLE, avail_ssize stores the
183  *	page directory index.
184  */
185 struct vm_map_entry {
186 	struct vm_map_entry *prev;	/* previous entry */
187 	struct vm_map_entry *next;	/* next entry */
188 	RB_ENTRY(vm_map_entry) rb_entry;
189 	vm_offset_t start;		/* start address */
190 	vm_offset_t end;		/* end address */
191 	union vm_map_aux aux;		/* auxillary data */
192 	union vm_map_object object;	/* object I point to */
193 	vm_ooffset_t offset;		/* offset into object */
194 	vm_eflags_t eflags;		/* map entry flags */
195 	vm_maptype_t maptype;		/* type of VM mapping */
196 	vm_prot_t protection;		/* protection code */
197 	vm_prot_t max_protection;	/* maximum protection */
198 	vm_inherit_t inheritance;	/* inheritance */
199 	int wired_count;		/* can be paged if = 0 */
200 	vm_subsys_t id;			/* subsystem id */
201 };
202 
203 #define MAP_ENTRY_NOSYNC		0x0001
204 #define MAP_ENTRY_STACK			0x0002
205 #define MAP_ENTRY_COW			0x0004
206 #define MAP_ENTRY_NEEDS_COPY		0x0008
207 #define MAP_ENTRY_NOFAULT		0x0010
208 #define MAP_ENTRY_USER_WIRED		0x0020
209 
210 #define MAP_ENTRY_BEHAV_NORMAL		0x0000	/* default behavior */
211 #define MAP_ENTRY_BEHAV_SEQUENTIAL	0x0040	/* expect sequential access */
212 #define MAP_ENTRY_BEHAV_RANDOM		0x0080	/* expect random access */
213 #define MAP_ENTRY_BEHAV_RESERVED	0x00C0	/* future use */
214 
215 #define MAP_ENTRY_BEHAV_MASK		0x00C0
216 
217 #define MAP_ENTRY_IN_TRANSITION		0x0100	/* entry being changed */
218 #define MAP_ENTRY_NEEDS_WAKEUP		0x0200	/* waiter's in transition */
219 #define MAP_ENTRY_NOCOREDUMP		0x0400	/* don't include in a core */
220 #define MAP_ENTRY_KSTACK		0x0800	/* guarded kernel stack */
221 
222 /*
223  * flags for vm_map_[un]clip_range()
224  */
225 #define MAP_CLIP_NO_HOLES		0x0001
226 
227 /*
228  * This reserve count for vm_map_entry_reserve() should cover all nominal
229  * single-insertion operations, including any necessary clipping.
230  */
231 #define MAP_RESERVE_COUNT	4
232 #define MAP_RESERVE_SLOP	32
233 
234 static __inline u_char
235 vm_map_entry_behavior(struct vm_map_entry *entry)
236 {
237 	return entry->eflags & MAP_ENTRY_BEHAV_MASK;
238 }
239 
240 static __inline void
241 vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
242 {
243 	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
244 		(behavior & MAP_ENTRY_BEHAV_MASK);
245 }
246 
247 /*
248  * VA interlock for map (VPAGETABLE / vkernel support)
249  */
250 struct vm_map_ilock {
251 	struct vm_map_ilock *next;
252 	int	flags;
253 	vm_offset_t ran_beg;
254 	vm_offset_t ran_end;	/* non-inclusive */
255 };
256 
257 #define ILOCK_WAITING	0x00000001
258 
259 /*
260  * Hinting mechanism used by vm_map_findspace() to figure out where to start
261  * an iteration looking for a hole big enough for the requested allocation.
262  * This can be important in situations where large amounts of kernel memory
263  * are being managed.  For example, if the system is managing tens of
264  * thousands of processes or threads.
265  *
266  * If a hint is present it guarantees that no compatible hole exists prior
267  * to the (start) address.  The (start) address itself is not necessarily
268  * a hole.
269  */
270 #define VM_MAP_FFCOUNT	4
271 #define VM_MAP_FFMASK	(VM_MAP_FFCOUNT - 1)
272 
273 struct vm_map_freehint {
274 	vm_offset_t	start;
275 	vm_offset_t	length;
276 	vm_offset_t	align;
277 	int		unused01;
278 };
279 typedef struct vm_map_freehint vm_map_freehint_t;
280 
281 /*
282  * Maps are doubly-linked lists of map entries, kept sorted by address.
283  * A single hint is provided to start searches again from the last
284  * successful search, insertion, or removal.
285  *
286  * NOTE: The lock structure cannot be the first element of vm_map
287  *	 because this can result in a running lockup between two or more
288  *	 system processes trying to kmem_alloc_wait() due to kmem_alloc_wait()
289  *	 and free tsleep/waking up 'map' and the underlying lockmgr also
290  *	 sleeping and waking up on 'map'.  The lockup occurs when the map fills
291  *	 up.  The 'exec' map, for example.
292  *
293  * NOTE: The vm_map structure can be hard-locked with the lockmgr lock
294  *	 or soft-serialized with the token, or both.
295  */
296 struct vm_map {
297 	struct vm_map_entry header;	/* List of entries */
298 	RB_HEAD(vm_map_rb_tree, vm_map_entry) rb_root;
299 	struct lock lock;		/* Lock for map data */
300 	int nentries;			/* Number of entries */
301 	unsigned int timestamp;		/* Version number */
302 	vm_size_t size;			/* virtual size */
303 	u_char system_map;		/* Am I a system map? */
304 	u_char freehint_newindex;
305 	u_char unused02;
306 	u_char unused03;
307 	vm_flags_t flags;		/* flags for this vm_map */
308 	vm_map_freehint_t freehint[VM_MAP_FFCOUNT];
309 	struct pmap *pmap;		/* Physical map */
310 	u_int president_cache;		/* Remember president count */
311 	u_int president_ticks;		/* Save ticks for cache */
312 	struct vm_map_ilock *ilock_base;/* interlocks */
313 	struct spinlock ilock_spin;	/* interlocks (spinlock for) */
314 	struct lwkt_token token;	/* Soft serializer */
315 	vm_offset_t pgout_offset;	/* for RLIMIT_RSS scans */
316 #define	min_offset		header.start
317 #define max_offset		header.end
318 };
319 
320 /*
321  * vm_flags_t values
322  */
323 #define MAP_WIREFUTURE		0x0001	/* wire all future pages */
324 
325 /*
326  * Shareable process virtual address space.
327  *
328  * Refd pointers from vmresident, proc
329  */
330 struct vmspace {
331 	struct vm_map vm_map;	/* VM address map */
332 	struct pmap vm_pmap;	/* private physical map */
333 	int vm_flags;
334 	caddr_t vm_shm;		/* SYS5 shared memory private data XXX */
335 /* we copy from vm_startcopy to the end of the structure on fork */
336 #define vm_startcopy vm_rssize
337 	segsz_t vm_rssize;	/* current resident set size in pages */
338 	segsz_t vm_swrss;	/* resident set size before last swap */
339 	segsz_t vm_tsize;	/* text size (pages) XXX */
340 	segsz_t vm_dsize;	/* data size (pages) XXX */
341 	segsz_t vm_ssize;	/* stack size (pages) */
342 	caddr_t vm_taddr;	/* user virtual address of text XXX */
343 	caddr_t vm_daddr;	/* user virtual address of data XXX */
344 	caddr_t vm_maxsaddr;	/* user VA at max stack growth */
345 	caddr_t vm_minsaddr;	/* user VA at max stack growth */
346 #define vm_endcopy	vm_unused01
347 	int	vm_unused01;
348 	int	vm_unused02;
349 	int	vm_pagesupply;
350 	u_int	vm_holdcnt;	/* temporary hold count and exit sequencing */
351 	u_int	vm_refcnt;	/* normal ref count */
352 };
353 
354 #define VM_REF_DELETED		0x80000000U
355 
356 #define VMSPACE_EXIT1		0x0001	/* partial exit */
357 #define VMSPACE_EXIT2		0x0002	/* full exit */
358 
359 #define VMSPACE_HOLDEXIT	0x80000000
360 
361 /*
362  * Resident executable holding structure.  A user program can take a snapshot
363  * of just its VM address space (typically done just after dynamic link
364  * libraries have completed loading) and register it as a resident
365  * executable associated with the program binary's vnode, which is also
366  * locked into memory.  Future execs of the vnode will start with a copy
367  * of the resident vmspace instead of running the binary from scratch,
368  * avoiding both the kernel ELF loader *AND* all shared library mapping and
369  * relocation code, and will call a different entry point (the stack pointer
370  * is reset to the top of the stack) supplied when the vmspace was registered.
371  */
372 struct vmresident {
373 	struct vnode	*vr_vnode;		/* associated vnode */
374 	TAILQ_ENTRY(vmresident) vr_link;	/* linked list of res sts */
375 	struct vmspace	*vr_vmspace;		/* vmspace to fork */
376 	intptr_t	vr_entry_addr;		/* registered entry point */
377 	struct sysentvec *vr_sysent;		/* system call vects */
378 	int		vr_id;			/* registration id */
379 	int		vr_refs;		/* temporary refs */
380 };
381 
382 #ifdef _KERNEL
383 /*
384  *	Macros:		vm_map_lock, etc.
385  *	Function:
386  *		Perform locking on the data portion of a map.  Note that
387  *		these macros mimic procedure calls returning void.  The
388  *		semicolon is supplied by the user of these macros, not
389  *		by the macros themselves.  The macros can safely be used
390  *		as unbraced elements in a higher level statement.
391  */
392 
393 #define ASSERT_VM_MAP_LOCKED(map)	KKASSERT(lockowned(&(map)->lock))
394 
395 #ifdef DIAGNOSTIC
396 /* #define MAP_LOCK_DIAGNOSTIC 1 */
397 #ifdef MAP_LOCK_DIAGNOSTIC
398 #define	vm_map_lock(map) \
399 	do { \
400 		kprintf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
401 		if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
402 			panic("vm_map_lock: failed to get lock"); \
403 		} \
404 		(map)->timestamp++; \
405 	} while(0)
406 #else
407 #define	vm_map_lock(map) \
408 	do { \
409 		if (lockmgr(&(map)->lock, LK_EXCLUSIVE) != 0) { \
410 			panic("vm_map_lock: failed to get lock"); \
411 		} \
412 		(map)->timestamp++; \
413 	} while(0)
414 #endif
415 #else
416 #define	vm_map_lock(map) \
417 	do { \
418 		lockmgr(&(map)->lock, LK_EXCLUSIVE); \
419 		(map)->timestamp++; \
420 	} while(0)
421 #endif /* DIAGNOSTIC */
422 
423 #if defined(MAP_LOCK_DIAGNOSTIC)
424 #define	vm_map_unlock(map) \
425 	do { \
426 		kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
427 		lockmgr(&(map)->lock, LK_RELEASE); \
428 	} while (0)
429 #define	vm_map_lock_read(map) \
430 	do { \
431 		kprintf ("locking map LK_SHARED: 0x%x\n", map); \
432 		lockmgr(&(map)->lock, LK_SHARED); \
433 	} while (0)
434 #define	vm_map_unlock_read(map) \
435 	do { \
436 		kprintf ("locking map LK_RELEASE: 0x%x\n", map); \
437 		lockmgr(&(map)->lock, LK_RELEASE); \
438 	} while (0)
439 #else
440 #define	vm_map_unlock(map) \
441 	lockmgr(&(map)->lock, LK_RELEASE)
442 #define	vm_map_lock_read(map) \
443 	lockmgr(&(map)->lock, LK_SHARED)
444 #define	vm_map_unlock_read(map) \
445 	lockmgr(&(map)->lock, LK_RELEASE)
446 #endif
447 
448 #define vm_map_lock_read_try(map) \
449 	lockmgr(&(map)->lock, LK_SHARED | LK_NOWAIT)
450 
451 static __inline__ int
452 vm_map_lock_read_to(vm_map_t map)
453 {
454 	int error;
455 
456 #if defined(MAP_LOCK_DIAGNOSTIC)
457 	kprintf ("locking map LK_SHARED: 0x%x\n", map);
458 #endif
459 	error = lockmgr(&(map)->lock, LK_SHARED | LK_TIMELOCK);
460 	return error;
461 }
462 
463 static __inline__ int
464 vm_map_lock_upgrade(vm_map_t map) {
465 	int error;
466 #if defined(MAP_LOCK_DIAGNOSTIC)
467 	kprintf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
468 #endif
469 	error = lockmgr(&map->lock, LK_EXCLUPGRADE);
470 	if (error == 0)
471 		map->timestamp++;
472 	return error;
473 }
474 
475 #if defined(MAP_LOCK_DIAGNOSTIC)
476 #define vm_map_lock_downgrade(map) \
477 	do { \
478 		kprintf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
479 		lockmgr(&(map)->lock, LK_DOWNGRADE); \
480 	} while (0)
481 #else
482 #define vm_map_lock_downgrade(map) \
483 	lockmgr(&(map)->lock, LK_DOWNGRADE)
484 #endif
485 
486 #endif /* _KERNEL */
487 
488 /*
489  *	Functions implemented as macros
490  */
491 #define		vm_map_min(map)		((map)->min_offset)
492 #define		vm_map_max(map)		((map)->max_offset)
493 #define		vm_map_pmap(map)	((map)->pmap)
494 
495 /*
496  * Must not block
497  */
498 static __inline struct pmap *
499 vmspace_pmap(struct vmspace *vmspace)
500 {
501 	return &vmspace->vm_pmap;
502 }
503 
504 /*
505  * Caller must hold the vmspace->vm_map.token
506  */
507 static __inline long
508 vmspace_resident_count(struct vmspace *vmspace)
509 {
510 	return pmap_resident_count(vmspace_pmap(vmspace));
511 }
512 
513 /*
514  * Calculates the proportional RSS and returning the
515  * accrued result.  This is a loose value for statistics/display
516  * purposes only and will only be updated if we can acquire
517  * a non-blocking map lock.
518  *
519  * (used by userland or the kernel)
520  */
521 static __inline u_int
522 vmspace_president_count(struct vmspace *vmspace)
523 {
524 	vm_map_t map = &vmspace->vm_map;
525 	vm_map_entry_t cur;
526 	vm_object_t object;
527 	u_int count = 0;
528 
529 #ifdef _KERNEL
530 	if (map->president_ticks == ticks / hz || vm_map_lock_read_try(map))
531 		return(map->president_cache);
532 #endif
533 
534 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
535 		switch(cur->maptype) {
536 		case VM_MAPTYPE_NORMAL:
537 		case VM_MAPTYPE_VPAGETABLE:
538 			if ((object = cur->object.vm_object) == NULL)
539 				break;
540 			if (object->type != OBJT_DEFAULT &&
541 			    object->type != OBJT_SWAP) {
542 				break;
543 			}
544 
545 #if 0
546 			/*
547 			 * synchronize non-zero case, contents of field
548 			 * can change at any time due to pmap ops.
549 			 */
550 			if ((n = object->agg_pv_list_count) != 0) {
551 #ifdef _KERNEL
552 				cpu_ccfence();
553 #endif
554 				count += object->resident_page_count / n;
555 			}
556 #endif
557 			break;
558 		default:
559 			break;
560 		}
561 	}
562 #ifdef _KERNEL
563 	map->president_cache = count;
564 	map->president_ticks = ticks / hz;
565 	vm_map_unlock_read(map);
566 #endif
567 
568 	return(count);
569 }
570 
571 /*
572  * Number of kernel maps and entries to statically allocate, required
573  * during boot to bootstrap the VM system.
574  */
575 #define MAX_KMAP	10
576 #define MAX_MAPENT	(SMP_MAXCPU * 32 + 1024)
577 
578 /*
579  * Copy-on-write flags for vm_map operations
580  */
581 #define MAP_UNUSED_01		0x0001
582 #define MAP_COPY_ON_WRITE	0x0002
583 #define MAP_NOFAULT		0x0004
584 #define MAP_PREFAULT		0x0008
585 #define MAP_PREFAULT_PARTIAL	0x0010
586 #define MAP_DISABLE_SYNCER	0x0020
587 #define MAP_IS_STACK		0x0040
588 #define MAP_IS_KSTACK		0x0080
589 #define MAP_DISABLE_COREDUMP	0x0100
590 #define MAP_PREFAULT_MADVISE	0x0200	/* from (user) madvise request */
591 #define MAP_PREFAULT_RELOCK	0x0200
592 
593 /*
594  * vm_fault option flags
595  */
596 #define VM_FAULT_NORMAL		0x00	/* Nothing special */
597 #define VM_FAULT_CHANGE_WIRING	0x01	/* Change the wiring as appropriate */
598 #define VM_FAULT_USER_WIRE	0x02	/* Likewise, but for user purposes */
599 #define VM_FAULT_BURST		0x04	/* Burst fault can be done */
600 #define VM_FAULT_DIRTY		0x08	/* Dirty the page */
601 #define VM_FAULT_UNSWAP		0x10	/* Remove backing store from the page */
602 #define VM_FAULT_BURST_QUICK	0x20	/* Special case shared vm_object */
603 #define VM_FAULT_WIRE_MASK	(VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
604 #define VM_FAULT_USERMODE	0x40
605 
606 #ifdef _KERNEL
607 
608 boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t,
609 		vm_prot_t, boolean_t);
610 struct pmap;
611 struct globaldata;
612 void vm_map_entry_allocate_object(vm_map_entry_t);
613 void vm_map_entry_reserve_cpu_init(struct globaldata *gd);
614 int vm_map_entry_reserve(int);
615 int vm_map_entry_kreserve(int);
616 void vm_map_entry_release(int);
617 void vm_map_entry_krelease(int);
618 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *);
619 int vm_map_find (vm_map_t, void *, void *,
620 		 vm_ooffset_t, vm_offset_t *, vm_size_t,
621 		 vm_size_t, boolean_t,
622 		 vm_maptype_t, vm_subsys_t id,
623 		 vm_prot_t, vm_prot_t, int);
624 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t,
625 		      int, vm_offset_t *);
626 vm_offset_t vm_map_hint(struct proc *, vm_offset_t, vm_prot_t);
627 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
628 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t);
629 int vm_map_insert (vm_map_t, int *, void *, void *,
630 		   vm_ooffset_t, vm_offset_t, vm_offset_t,
631 		   vm_maptype_t, vm_subsys_t id,
632 		   vm_prot_t, vm_prot_t, int);
633 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
634     vm_pindex_t *, vm_prot_t *, boolean_t *);
635 void vm_map_lookup_done (vm_map_t, vm_map_entry_t, int);
636 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
637 int vm_map_wire (vm_map_t, vm_offset_t, vm_offset_t, int);
638 int vm_map_unwire (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
639 int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
640 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
641 int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
642 void vm_map_startup (void);
643 int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
644 int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t);
645 void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *);
646 void vm_init2 (void);
647 int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *);
648 int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, int,
649 		  vm_prot_t, vm_prot_t, int);
650 int vm_map_growstack (vm_map_t map, vm_offset_t addr);
651 vm_offset_t vmspace_swap_count (struct vmspace *vmspace);
652 vm_offset_t vmspace_anonymous_count (struct vmspace *vmspace);
653 void vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size, int *);
654 void vm_map_transition_wait(vm_map_t map);
655 
656 void vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock,
657 			vm_offset_t ran_beg, vm_offset_t ran_end);
658 void vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock);
659 
660 
661 #if defined(__x86_64__) && defined(_KERNEL_VIRTUAL)
662 int vkernel_module_memory_alloc(vm_offset_t *, size_t);
663 void vkernel_module_memory_free(vm_offset_t, size_t);
664 #endif
665 
666 #endif
667 #endif				/* _VM_VM_MAP_H_ */
668