xref: /original-bsd/sys/vm/vm_map.h (revision f17085de)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_map.h	8.9 (Berkeley) 05/17/95
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Virtual memory map module definitions.
41  */
42 
43 #ifndef	_VM_MAP_
44 #define	_VM_MAP_
45 
46 /*
47  *	Types defined:
48  *
49  *	vm_map_t		the high-level address map data structure.
50  *	vm_map_entry_t		an entry in an address map.
51  *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
52  */
53 
54 /*
55  *	Objects which live in maps may be either VM objects, or
56  *	another map (called a "sharing map") which denotes read-write
57  *	sharing with other maps.
58  */
59 
60 union vm_map_object {
61 	struct vm_object	*vm_object;	/* object object */
62 	struct vm_map		*share_map;	/* share map */
63 	struct vm_map		*sub_map;	/* belongs to another map */
64 };
65 
66 /*
67  *	Address map entries consist of start and end addresses,
68  *	a VM object (or sharing map) and offset into that object,
69  *	and user-exported inheritance and protection information.
70  *	Also included is control information for virtual copy operations.
71  */
72 struct vm_map_entry {
73 	struct vm_map_entry	*prev;		/* previous entry */
74 	struct vm_map_entry	*next;		/* next entry */
75 	vm_offset_t		start;		/* start address */
76 	vm_offset_t		end;		/* end address */
77 	union vm_map_object	object;		/* object I point to */
78 	vm_offset_t		offset;		/* offset into object */
79 	boolean_t		is_a_map;	/* Is "object" a map? */
80 	boolean_t		is_sub_map;	/* Is "object" a submap? */
81 		/* Only in sharing maps: */
82 	boolean_t		copy_on_write;	/* is data copy-on-write */
83 	boolean_t		needs_copy;	/* does object need to be copied */
84 		/* Only in task maps: */
85 	vm_prot_t		protection;	/* protection code */
86 	vm_prot_t		max_protection;	/* maximum protection */
87 	vm_inherit_t		inheritance;	/* inheritance */
88 	int			wired_count;	/* can be paged if = 0 */
89 };
90 
91 /*
92  *	Maps are doubly-linked lists of map entries, kept sorted
93  *	by address.  A single hint is provided to start
94  *	searches again from the last successful search,
95  *	insertion, or removal.
96  */
97 struct vm_map {
98 	struct pmap *		pmap;		/* Physical map */
99 	lock_data_t		lock;		/* Lock for map data */
100 	struct vm_map_entry	header;		/* List of entries */
101 	int			nentries;	/* Number of entries */
102 	vm_size_t		size;		/* virtual size */
103 	boolean_t		is_main_map;	/* Am I a main map? */
104 	int			ref_count;	/* Reference count */
105 	simple_lock_data_t	ref_lock;	/* Lock for ref_count field */
106 	vm_map_entry_t		hint;		/* hint for quick lookups */
107 	simple_lock_data_t	hint_lock;	/* lock for hint storage */
108 	vm_map_entry_t		first_free;	/* First free space hint */
109 	boolean_t		entries_pageable; /* map entries pageable?? */
110 	unsigned int		timestamp;	/* Version number */
111 #define	min_offset		header.start
112 #define max_offset		header.end
113 };
114 
115 /*
116  *	Map versions are used to validate a previous lookup attempt.
117  *
118  *	Since lookup operations may involve both a main map and
119  *	a sharing map, it is necessary to have a timestamp from each.
120  *	[If the main map timestamp has changed, the share_map and
121  *	associated timestamp are no longer valid; the map version
122  *	does not include a reference for the imbedded share_map.]
123  */
124 typedef struct {
125 	int		main_timestamp;
126 	vm_map_t	share_map;
127 	int		share_timestamp;
128 } vm_map_version_t;
129 
130 /*
131  *	Macros:		vm_map_lock, etc.
132  *	Function:
133  *		Perform locking on the data portion of a map.
134  */
135 
136 #include <sys/proc.h>	/* XXX for curproc and p_pid */
137 
138 #define	vm_map_lock_drain_interlock(map) { \
139 	lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
140 		&(map)->ref_lock, curproc); \
141 	(map)->timestamp++; \
142 }
143 #ifdef DIAGNOSTIC
144 #define	vm_map_lock(map) { \
145 	if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
146 		panic("vm_map_lock: failed to get lock"); \
147 	} \
148 	(map)->timestamp++; \
149 }
150 #else
151 #define	vm_map_lock(map) { \
152 	lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc); \
153 	(map)->timestamp++; \
154 }
155 #endif /* DIAGNOSTIC */
156 #define	vm_map_unlock(map) \
157 		lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
158 #define	vm_map_lock_read(map) \
159 		lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc)
160 #define	vm_map_unlock_read(map) \
161 		lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
162 #define vm_map_set_recursive(map) { \
163 	simple_lock(&(map)->lk_interlock); \
164 	(map)->lk_flags |= LK_CANRECURSE; \
165 	simple_unlock(&(map)->lk_interlock); \
166 }
167 #define vm_map_clear_recursive(map) { \
168 	simple_lock(&(map)->lk_interlock); \
169 	(map)->lk_flags &= ~LK_CANRECURSE; \
170 	simple_unlock(&(map)->lk_interlock); \
171 }
172 /*
173  *	Functions implemented as macros
174  */
175 #define		vm_map_min(map)		((map)->min_offset)
176 #define		vm_map_max(map)		((map)->max_offset)
177 #define		vm_map_pmap(map)	((map)->pmap)
178 
179 /* XXX: number of kernel maps and entries to statically allocate */
180 #define MAX_KMAP	10
181 #define	MAX_KMAPENT	500
182 
183 #ifdef KERNEL
184 boolean_t	 vm_map_check_protection __P((vm_map_t,
185 		    vm_offset_t, vm_offset_t, vm_prot_t));
186 int		 vm_map_copy __P((vm_map_t, vm_map_t, vm_offset_t,
187 		    vm_size_t, vm_offset_t, boolean_t, boolean_t));
188 void		 vm_map_copy_entry __P((vm_map_t,
189 		    vm_map_t, vm_map_entry_t, vm_map_entry_t));
190 struct pmap;
191 vm_map_t	 vm_map_create __P((struct pmap *,
192 		    vm_offset_t, vm_offset_t, boolean_t));
193 void		 vm_map_deallocate __P((vm_map_t));
194 int		 vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
195 vm_map_entry_t	 vm_map_entry_create __P((vm_map_t));
196 void		 vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
197 void		 vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
198 void		 vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
199 int		 vm_map_find __P((vm_map_t, vm_object_t,
200 		    vm_offset_t, vm_offset_t *, vm_size_t, boolean_t));
201 int		 vm_map_findspace __P((vm_map_t,
202 		    vm_offset_t, vm_size_t, vm_offset_t *));
203 int		 vm_map_inherit __P((vm_map_t,
204 		    vm_offset_t, vm_offset_t, vm_inherit_t));
205 void		 vm_map_init __P((struct vm_map *,
206 		    vm_offset_t, vm_offset_t, boolean_t));
207 int		 vm_map_insert __P((vm_map_t,
208 		    vm_object_t, vm_offset_t, vm_offset_t, vm_offset_t));
209 int		 vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t,
210 		    vm_map_entry_t *, vm_object_t *, vm_offset_t *, vm_prot_t *,
211 		    boolean_t *, boolean_t *));
212 void		 vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
213 boolean_t	 vm_map_lookup_entry __P((vm_map_t,
214 		    vm_offset_t, vm_map_entry_t *));
215 int		 vm_map_pageable __P((vm_map_t,
216 		    vm_offset_t, vm_offset_t, boolean_t));
217 int		 vm_map_clean __P((vm_map_t,
218 		    vm_offset_t, vm_offset_t, boolean_t, boolean_t));
219 void		 vm_map_print __P((vm_map_t, boolean_t));
220 int		 vm_map_protect __P((vm_map_t,
221 		    vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
222 void		 vm_map_reference __P((vm_map_t));
223 int		 vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
224 void		 vm_map_simplify __P((vm_map_t, vm_offset_t));
225 void		 vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
226 void		 vm_map_startup __P((void));
227 int		 vm_map_submap __P((vm_map_t,
228 		    vm_offset_t, vm_offset_t, vm_map_t));
229 #endif
230 #endif /* _VM_MAP_ */
231