1 /* $OpenBSD: uvm_map.h,v 1.93 2024/10/31 05:00:00 dlg Exp $ */ 2 /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ 3 4 /* 5 * Copyright (c) 2011 Ariane van der Steldt <ariane@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 * 19 * 20 * Copyright (c) 1997 Charles D. Cranor and Washington University. 21 * Copyright (c) 1991, 1993, The Regents of the University of California. 22 * 23 * All rights reserved. 24 * 25 * This code is derived from software contributed to Berkeley by 26 * The Mach Operating System project at Carnegie-Mellon University. 27 * 28 * Redistribution and use in source and binary forms, with or without 29 * modification, are permitted provided that the following conditions 30 * are met: 31 * 1. Redistributions of source code must retain the above copyright 32 * notice, this list of conditions and the following disclaimer. 33 * 2. Redistributions in binary form must reproduce the above copyright 34 * notice, this list of conditions and the following disclaimer in the 35 * documentation and/or other materials provided with the distribution. 36 * 3. Neither the name of the University nor the names of its contributors 37 * may be used to endorse or promote products derived from this software 38 * without specific prior written permission. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 * 52 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 53 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp 54 * 55 * 56 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 57 * All rights reserved. 58 * 59 * Permission to use, copy, modify and distribute this software and 60 * its documentation is hereby granted, provided that both the copyright 61 * notice and this permission notice appear in all copies of the 62 * software, derivative works or modified versions, and any portions 63 * thereof, and that both notices appear in supporting documentation. 64 * 65 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 66 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 67 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 68 * 69 * Carnegie Mellon requests users of this software to return to 70 * 71 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 72 * School of Computer Science 73 * Carnegie Mellon University 74 * Pittsburgh PA 15213-3890 75 * 76 * any improvements or extensions that they make and grant Carnegie the 77 * rights to redistribute these changes. 78 */ 79 80 #ifndef _UVM_UVM_MAP_H_ 81 #define _UVM_UVM_MAP_H_ 82 83 #include <sys/mutex.h> 84 #include <sys/rwlock.h> 85 86 #ifdef _KERNEL 87 88 /* 89 * UVM_MAP_CLIP_START: ensure that the entry begins at or after 90 * the starting address, if it doesn't we split the entry. 91 * 92 * => map must be locked by caller 93 */ 94 95 #define UVM_MAP_CLIP_START(_map, _entry, _addr) \ 96 do { \ 97 KASSERT((_entry)->end + (_entry)->fspace > (_addr)); \ 98 if ((_entry)->start < (_addr)) \ 99 uvm_map_clip_start((_map), (_entry), (_addr)); \ 100 } while (0) 101 102 /* 103 * UVM_MAP_CLIP_END: ensure that the entry ends at or before 104 * the ending address, if it doesn't we split the entry. 105 * 106 * => map must be locked by caller 107 */ 108 109 #define UVM_MAP_CLIP_END(_map, _entry, _addr) \ 110 do { \ 111 KASSERT((_entry)->start < (_addr)); \ 112 if ((_entry)->end > (_addr)) \ 113 uvm_map_clip_end((_map), (_entry), (_addr)); \ 114 } while (0) 115 116 /* 117 * extract flags 118 */ 119 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */ 120 121 #endif /* _KERNEL */ 122 123 #include <uvm/uvm_anon.h> 124 125 /* 126 * Address map entries consist of start and end addresses, 127 * a VM object (or sharing map) and offset into that object, 128 * and user-exported inheritance and protection information. 129 * Also included is control information for virtual copy operations. 130 */ 131 struct vm_map_entry { 132 union { 133 RBT_ENTRY(vm_map_entry) addr_entry; /* address tree */ 134 SLIST_ENTRY(vm_map_entry) addr_kentry; 135 } daddrs; 136 137 union { 138 RBT_ENTRY(vm_map_entry) rbtree; /* Link freespace tree. */ 139 TAILQ_ENTRY(vm_map_entry) tailq;/* Link freespace queue. */ 140 TAILQ_ENTRY(vm_map_entry) deadq;/* dead entry queue */ 141 } dfree; 142 143 #define uvm_map_entry_start_copy start 144 vaddr_t start; /* start address */ 145 vaddr_t end; /* end address */ 146 147 vsize_t guard; /* bytes in guard */ 148 vsize_t fspace; /* free space */ 149 150 union { 151 struct uvm_object *uvm_obj; /* uvm object */ 152 struct vm_map *sub_map; /* belongs to another map */ 153 } object; /* object I point to */ 154 voff_t offset; /* offset into object */ 155 struct vm_aref aref; /* anonymous overlay */ 156 int etype; /* entry type */ 157 vm_prot_t protection; /* protection code */ 158 vm_prot_t max_protection; /* maximum protection */ 159 vm_inherit_t inheritance; /* inheritance */ 160 int wired_count; /* can be paged if == 0 */ 161 int advice; /* madvise advice */ 162 #define uvm_map_entry_stop_copy flags 163 u_int8_t flags; /* flags */ 164 165 #define UVM_MAP_STATIC 0x01 /* static map entry */ 166 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ 167 168 vsize_t fspace_augment; /* max(fspace) in subtree */ 169 }; 170 171 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) 172 173 TAILQ_HEAD(uvm_map_deadq, vm_map_entry); /* dead entry queue */ 174 RBT_HEAD(uvm_map_addr, vm_map_entry); 175 #ifdef _KERNEL 176 RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, 177 uvm_mapentry_addrcmp); 178 #endif 179 180 /* 181 * A Map is a rbtree of map entries, kept sorted by address. 182 * In addition, free space entries are also kept in a rbtree, 183 * indexed by free size. 184 * 185 * 186 * 187 * LOCKING PROTOCOL NOTES: 188 * ----------------------- 189 * 190 * VM map locking is a little complicated. There are both shared 191 * and exclusive locks on maps. However, it is sometimes required 192 * to unlock a VM map (to prevent lock ordering issues) without 193 * allowing any other thread to modify it. 194 * 195 * In order to prevent this scenario, we introduce the notion of 196 * a `busy' map. A `busy' map is unlocked, but other threads 197 * attempting to write-lock wait for this flag to clear before 198 * entering the lock manager. A map may only be marked busy 199 * when the map is write-locked and may only be marked unbusy by 200 * the thread which marked it busy. 201 * 202 * Access to the map `flags' member is controlled by the `flags_lock' 203 * simple lock. Note that some flags are static (set once at map 204 * creation time, and never changed), and thus require no locking 205 * to check those flags. All flags which are r/w must be set or 206 * cleared while the `flags_lock' is asserted. Additional locking 207 * requirements are: 208 * 209 * VM_MAP_PAGEABLE r/o static flag; no locking required 210 * 211 * VM_MAP_INTRSAFE r/o static flag; no locking required 212 * 213 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when 214 * map is write-locked. may be tested 215 * without asserting `flags_lock'. 216 * 217 * VM_MAP_GUARDPAGES r/o; must be specified at map 218 * initialization time. 219 * If set, guards will appear between 220 * automatic allocations. 221 * No locking required. 222 * 223 * VM_MAP_ISVMSPACE r/o; set by uvmspace_alloc. 224 * Signifies that this map is a vmspace. 225 * (The implementation treats all maps 226 * without this bit as kernel maps.) 227 * No locking required. 228 * 229 * 230 * All automatic allocations (uvm_map without MAP_FIXED) will allocate 231 * from vm_map.free. 232 * If that allocation fails: 233 * - vmspace maps will spill over into vm_map.bfree, 234 * - all other maps will call uvm_map_kmem_grow() to increase the arena. 235 * 236 * vmspace maps have their data, brk() and stack arenas automatically 237 * updated when uvm_map() is invoked without MAP_FIXED. 238 * The spill over arena (vm_map.bfree) will contain the space in the brk() 239 * and stack ranges. 240 * Kernel maps never have a bfree arena and this tree will always be empty. 241 * 242 * 243 * read_locks and write_locks are used in lock debugging code. 244 * 245 * Locks used to protect struct members in this file: 246 * a atomic operations 247 * I immutable after creation or exec(2) 248 * v `vm_map_lock' (this map `lock' or `mtx') 249 * f flags_lock 250 */ 251 struct vm_map { 252 struct pmap *pmap; /* [I] Physical map */ 253 u_long sserial; /* [v] # stack changes */ 254 255 struct uvm_map_addr addr; /* [v] Entry tree, by addr */ 256 257 vsize_t size; /* virtual size */ 258 int ref_count; /* [a] Reference count */ 259 int flags; /* [f] flags */ 260 unsigned int timestamp; /* Version number */ 261 struct proc *busy; /* [f] thread holding map busy*/ 262 unsigned int nbusy; /* [f] waiters for busy */ 263 264 vaddr_t min_offset; /* [I] First address in map. */ 265 vaddr_t max_offset; /* [I] Last address in map. */ 266 267 /* 268 * Allocation overflow regions. 269 */ 270 vaddr_t b_start; /* [v] Start for brk() alloc. */ 271 vaddr_t b_end; /* [v] End for brk() alloc. */ 272 vaddr_t s_start; /* [v] Start for stack alloc. */ 273 vaddr_t s_end; /* [v] End for stack alloc. */ 274 275 /* 276 * Special address selectors. 277 * 278 * The uaddr_exe mapping is used if: 279 * - protX is selected 280 * - the pointer is not NULL 281 * 282 * If uaddr_exe is not used, the other mappings are checked in 283 * order of appearance. 284 * If a hint is given, the selection will only be used if the hint 285 * falls in the range described by the mapping. 286 * 287 * The states are pointers because: 288 * - they may not all be in use 289 * - the struct size for different schemes is variable 290 * 291 * The uaddr_brk_stack selector will select addresses that are in 292 * the brk/stack area of the map. 293 */ 294 struct uvm_addr_state *uaddr_exe; /* Executable selector. */ 295 struct uvm_addr_state *uaddr_any[4]; /* More selectors. */ 296 struct uvm_addr_state *uaddr_brk_stack; /* Brk/stack selector. */ 297 298 #define UVM_MAP_CHECK_COPYIN_MAX 4 /* main, sigtramp, ld.so, libc.so */ 299 struct uvm_check_copyin { 300 vaddr_t start, end; 301 } check_copyin[UVM_MAP_CHECK_COPYIN_MAX]; 302 int check_copyin_count; 303 304 /* 305 * XXX struct mutex changes size because of compile options, so 306 * place after fields which are inspected by libkvm / procmap(8) 307 */ 308 struct rwlock lock; /* Non-intrsafe lock */ 309 struct mutex mtx; /* Intrsafe lock */ 310 struct mutex flags_lock; /* flags lock */ 311 }; 312 313 /* vm_map flags */ 314 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ 315 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ 316 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ 317 #define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */ 318 #define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */ 319 #define VM_MAP_PINSYSCALL_ONCE 0x100 /* rw: pinsyscall done */ 320 321 /* Number of kernel maps and entries to statically allocate */ 322 #define MAX_KMAPENT 1024 /* Sufficient to make it to the scheduler. */ 323 324 #ifdef _KERNEL 325 /* 326 * globals: 327 */ 328 329 extern vaddr_t uvm_maxkaddr; 330 331 /* 332 * protos: the following prototypes define the interface to vm_map 333 */ 334 335 void uvm_map_deallocate(struct vm_map *); 336 337 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); 338 void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, 339 vaddr_t); 340 void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, 341 vaddr_t); 342 int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t, 343 vaddr_t *, int); 344 struct vm_map * uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); 345 vaddr_t uvm_map_pie(vaddr_t); 346 vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, vaddr_t); 347 int uvm_map_check_copyin_add(struct vm_map *, vaddr_t, vaddr_t); 348 int uvm_map_immutable(struct vm_map *, vaddr_t, vaddr_t, int); 349 int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t); 350 int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); 351 void uvm_map_init(void); 352 boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t, vm_map_entry_t *); 353 boolean_t uvm_map_is_stack_remappable(struct vm_map *, vaddr_t, vsize_t, int); 354 int uvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t); 355 void uvm_map_setup(struct vm_map *, pmap_t, vaddr_t, vaddr_t, int); 356 int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, 357 struct vm_map *); 358 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t); 359 void uvm_unmap_detach(struct uvm_map_deadq *, int); 360 int uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t, 361 struct uvm_map_deadq *, boolean_t, boolean_t, boolean_t); 362 void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**, 363 struct uvm_addr_state*); 364 int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int); 365 366 367 struct p_inentry; 368 369 int uvm_map_inentry_sp(vm_map_entry_t); 370 boolean_t uvm_map_inentry(struct proc *, struct p_inentry *, vaddr_t addr, 371 const char *fmt, int (*fn)(vm_map_entry_t), u_long serial); 372 373 struct kinfo_vmentry; 374 375 int uvm_map_fill_vmmap(struct vm_map *, struct kinfo_vmentry *, 376 size_t *); 377 378 /* 379 * VM map locking operations: 380 * 381 * These operations perform locking on the data portion of the 382 * map. 383 * 384 * vm_map_lock_try: try to lock a map, failing if it is already locked. 385 * 386 * vm_map_lock: acquire an exclusive (write) lock on a map. 387 * 388 * vm_map_lock_read: acquire a shared (read) lock on a map. 389 * 390 * vm_map_unlock: release an exclusive lock on a map. 391 * 392 * vm_map_unlock_read: release a shared lock on a map. 393 * 394 * vm_map_busy: mark a map as busy. 395 * 396 * vm_map_unbusy: clear busy status on a map. 397 * 398 */ 399 400 boolean_t vm_map_lock_try_ln(struct vm_map*, char*, int); 401 void vm_map_lock_ln(struct vm_map*, char*, int); 402 void vm_map_lock_read_ln(struct vm_map*, char*, int); 403 void vm_map_unlock_ln(struct vm_map*, char*, int); 404 void vm_map_unlock_read_ln(struct vm_map*, char*, int); 405 void vm_map_busy_ln(struct vm_map*, char*, int); 406 void vm_map_unbusy_ln(struct vm_map*, char*, int); 407 void vm_map_assert_anylock_ln(struct vm_map*, char*, int); 408 void vm_map_assert_wrlock_ln(struct vm_map*, char*, int); 409 410 #ifdef DIAGNOSTIC 411 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__) 412 #define vm_map_lock(map) vm_map_lock_ln(map, __FILE__, __LINE__) 413 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, __FILE__, __LINE__) 414 #define vm_map_unlock(map) vm_map_unlock_ln(map, __FILE__, __LINE__) 415 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, __FILE__, __LINE__) 416 #define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__) 417 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__) 418 #define vm_map_assert_anylock(map) \ 419 vm_map_assert_anylock_ln(map, __FILE__, __LINE__) 420 #define vm_map_assert_wrlock(map) \ 421 vm_map_assert_wrlock_ln(map, __FILE__, __LINE__) 422 #else 423 #define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0) 424 #define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0) 425 #define vm_map_lock_read(map) vm_map_lock_read_ln(map, NULL, 0) 426 #define vm_map_unlock(map) vm_map_unlock_ln(map, NULL, 0) 427 #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, NULL, 0) 428 #define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0) 429 #define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0) 430 #define vm_map_assert_anylock(map) vm_map_assert_anylock_ln(map, NULL, 0) 431 #define vm_map_assert_wrlock(map) vm_map_assert_wrlock_ln(map, NULL, 0) 432 #endif 433 434 void uvm_map_lock_entry(struct vm_map_entry *); 435 void uvm_map_unlock_entry(struct vm_map_entry *); 436 437 #endif /* _KERNEL */ 438 439 /* 440 * Functions implemented as macros 441 */ 442 #define vm_map_min(map) ((map)->min_offset) 443 #define vm_map_max(map) ((map)->max_offset) 444 #define vm_map_pmap(map) ((map)->pmap) 445 446 #endif /* _UVM_UVM_MAP_H_ */ 447