1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2003-2017 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * This code is derived from software contributed to The DragonFly Project 10 * by Matthew Dillon <dillon@backplane.com> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * Resident memory system definitions. 67 */ 68 69 #ifndef _VM_VM_PAGE_H_ 70 #define _VM_VM_PAGE_H_ 71 72 #ifndef _SYS_TYPES_H_ 73 #include <sys/types.h> 74 #endif 75 #ifndef _SYS_TREE_H_ 76 #include <sys/tree.h> 77 #endif 78 #ifndef _MACHINE_PMAP_H_ 79 #include <machine/pmap.h> 80 #endif 81 #ifndef _VM_PMAP_H_ 82 #include <vm/pmap.h> 83 #endif 84 #include <machine/atomic.h> 85 86 #ifdef _KERNEL 87 88 #ifndef _SYS_SYSTM_H_ 89 #include <sys/systm.h> 90 #endif 91 #ifndef _SYS_SPINLOCK_H_ 92 #include <sys/spinlock.h> 93 #endif 94 95 #ifdef __x86_64__ 96 #include <machine/vmparam.h> 97 #endif 98 99 #endif 100 101 /* 102 * vm_page structure 103 * 104 * hard-busy: (PBUSY_LOCKED) 105 * 106 * Hard-busying a page allows major manipulation of the page structure. 107 * No new soft-busies can accumulate while a page is hard-busied. The 108 * page busying code typically waits for all soft-busies to drop before 109 * allowing the hard-busy. 110 * 111 * soft-busy: (PBUSY_MASK) 112 * 113 * Soft-busying a page typically indicates I/O or read-only use of 114 * the content. A page can have multiple soft-busies on it. New 115 * soft-busies block on any hard-busied page (wait for the hard-busy 116 * to go away). 117 * 118 * hold_count 119 * 120 * This prevents a page from being freed. This does not prevent any 121 * other operation. The page may still be disassociated from its 122 * object and essentially scrapped. It just won't be reused while 123 * a non-zero hold_count is present. 124 * 125 * wire_count 126 * 127 * This indicates that the page has been wired into memory somewhere 128 * (typically a buffer cache buffer, or a user wire). The pageout 129 * daemon will skip wired pages. 130 */ 131 TAILQ_HEAD(pglist, vm_page); 132 133 struct vm_object; 134 135 int rb_vm_page_compare(struct vm_page *, struct vm_page *); 136 137 struct vm_page_rb_tree; 138 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, 139 rb_vm_page_compare, vm_pindex_t); 140 141 struct vm_page { 142 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */ 143 RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */ 144 struct spinlock spin; 145 struct vm_object *object; /* which object am I in (O,P)*/ 146 vm_pindex_t pindex; /* offset into object (O,P) */ 147 vm_paddr_t phys_addr; /* physical address of page */ 148 struct md_page md; /* machine dependant stuff */ 149 uint16_t queue; /* page queue index */ 150 uint16_t pc; /* page color */ 151 uint8_t act_count; /* page usage count */ 152 uint8_t pat_mode; /* hardware page attribute */ 153 uint8_t valid; /* map of valid DEV_BSIZE chunks */ 154 uint8_t dirty; /* map of dirty DEV_BSIZE chunks */ 155 uint32_t flags; /* see below */ 156 uint32_t wire_count; /* wired down maps refs (P) */ 157 uint32_t busy_count; /* soft-busy and hard-busy */ 158 int hold_count; /* page hold count */ 159 int ku_pagecnt; /* kmalloc helper */ 160 #ifdef VM_PAGE_DEBUG 161 const char *busy_func; 162 int busy_line; 163 #endif 164 }; 165 166 #define PBUSY_LOCKED 0x80000000U 167 #define PBUSY_WANTED 0x40000000U 168 #define PBUSY_SWAPINPROG 0x20000000U 169 #define PBUSY_MASK 0x1FFFFFFFU 170 171 #ifndef __VM_PAGE_T_DEFINED__ 172 #define __VM_PAGE_T_DEFINED__ 173 typedef struct vm_page *vm_page_t; 174 #endif 175 176 /* 177 * Page coloring parameters. We use generous parameters designed to 178 * statistically spread pages over available cpu cache space. This has 179 * become less important over time as cache associativity is higher 180 * in modern times but we still use the core algorithm to help reduce 181 * lock contention between cpus. 182 * 183 * Page coloring cannot be disabled. 184 * 185 * In today's world of many-core systems, we must be able to provide enough VM 186 * page queues for each logical cpu thread to cover the L1/L2/L3 cache set 187 * associativity. If we don't, the cpu caches will not be properly utilized. 188 * Using 2048 allows 8-way set-assoc with 256 logical cpus. 189 */ 190 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 191 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 192 #define PQ_L2_SIZE 2048 /* Must be enough for maximal ncpus x hw set-assoc */ 193 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 194 195 #define PQ_NONE 0 196 #define PQ_FREE (1 + 0*PQ_L2_SIZE) 197 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE) 198 #define PQ_ACTIVE (1 + 2*PQ_L2_SIZE) 199 #define PQ_CACHE (1 + 3*PQ_L2_SIZE) 200 #define PQ_HOLD (1 + 4*PQ_L2_SIZE) 201 #define PQ_COUNT (1 + 5*PQ_L2_SIZE) 202 203 /* 204 * Scan support 205 */ 206 struct vm_map; 207 208 struct rb_vm_page_scan_info { 209 vm_pindex_t start_pindex; 210 vm_pindex_t end_pindex; 211 int limit; 212 int desired; 213 int error; 214 int pagerflags; 215 int count; 216 int unused01; 217 vm_offset_t addr; 218 vm_pindex_t backing_offset_index; 219 struct vm_object *object; 220 struct vm_object *backing_object; 221 struct vm_page *mpte; 222 struct pmap *pmap; 223 struct vm_map *map; 224 }; 225 226 int rb_vm_page_scancmp(struct vm_page *, void *); 227 228 struct vpgqueues { 229 struct spinlock spin; 230 struct pglist pl; 231 long lcnt; 232 long adds; /* heuristic, add operations */ 233 int cnt_offset; /* offset into vmstats structure (int) */ 234 } __aligned(64); 235 236 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 237 238 /* 239 * These are the flags defined for vm_page. 240 * 241 * PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 242 * not under PV management but otherwise should be treated as a 243 * normal page. Pages not under PV management cannot be paged out 244 * via the object/vm_page_t because there is no knowledge of their 245 * pte mappings, nor can they be removed from their objects via 246 * the object, and such pages are also not on any PQ queue. The 247 * PG_MAPPED and PG_WRITEABLE flags are not applicable. 248 * 249 * PG_MAPPED only applies to managed pages, indicating whether the page 250 * is mapped onto one or more pmaps. A page might still be mapped to 251 * special pmaps in an unmanaged fashion, for example when mapped into a 252 * buffer cache buffer, without setting PG_MAPPED. 253 * 254 * PG_WRITEABLE indicates that there may be a writeable managed pmap entry 255 * somewhere, and that the page can be dirtied by hardware at any time 256 * and may have to be tested for that. The modified bit in unmanaged 257 * mappings or in the special clean map is not tested. 258 * 259 * PG_SWAPPED indicates that the page is backed by a swap block. Any 260 * VM object type other than OBJT_DEFAULT can have swap-backed pages now. 261 */ 262 #define PG_UNUSED0001 0x00000001 263 #define PG_UNUSED0002 0x00000002 264 #define PG_WINATCFLS 0x00000004 /* flush dirty page on inactive q */ 265 #define PG_FICTITIOUS 0x00000008 /* physical page doesn't exist (O) */ 266 #define PG_WRITEABLE 0x00000010 /* page is writeable */ 267 #define PG_MAPPED 0x00000020 /* page is mapped (managed) */ 268 #define PG_UNUSED0040 0x00000040 269 #define PG_REFERENCED 0x00000080 /* page has been referenced */ 270 #define PG_CLEANCHK 0x00000100 /* page will be checked for cleaning */ 271 #define PG_UNUSED0200 0x00000200 272 #define PG_NOSYNC 0x00000400 /* do not collect for syncer */ 273 #define PG_UNMANAGED 0x00000800 /* No PV management for page */ 274 #define PG_MARKER 0x00001000 /* special queue marker page */ 275 #define PG_RAM 0x00002000 /* read ahead mark */ 276 #define PG_SWAPPED 0x00004000 /* backed by swap */ 277 #define PG_NOTMETA 0x00008000 /* do not back with swap */ 278 #define PG_UNUSED10000 0x00010000 279 #define PG_UNUSED20000 0x00020000 280 #define PG_NEED_COMMIT 0x00040000 /* clean page requires commit */ 281 282 #define PG_KEEP_NEWPAGE_MASK (0) 283 284 /* 285 * Misc constants. 286 */ 287 288 #define ACT_DECLINE 1 289 #define ACT_ADVANCE 3 290 #define ACT_INIT 5 291 #define ACT_MAX 64 292 293 #ifdef VM_PAGE_DEBUG 294 #define VM_PAGE_DEBUG_EXT(name) name ## _debug 295 #define VM_PAGE_DEBUG_ARGS , const char *func, int lineno 296 #else 297 #define VM_PAGE_DEBUG_EXT(name) name 298 #define VM_PAGE_DEBUG_ARGS 299 #endif 300 301 #ifdef _KERNEL 302 /* 303 * Each pageable resident page falls into one of four lists: 304 * 305 * free 306 * Available for allocation now. 307 * 308 * The following are all LRU sorted: 309 * 310 * cache 311 * Almost available for allocation. Still in an 312 * object, but clean and immediately freeable at 313 * non-interrupt times. 314 * 315 * inactive 316 * Low activity, candidates for reclamation. 317 * This is the list of pages that should be 318 * paged out next. 319 * 320 * active 321 * Pages that are "active" i.e. they have been 322 * recently referenced. 323 * 324 * zero 325 * Pages that are really free and have been pre-zeroed 326 * 327 */ 328 329 extern struct vm_page *vm_page_array; /* First resident page in table */ 330 extern vm_pindex_t vm_page_array_size; /* number of vm_page_t's */ 331 extern vm_pindex_t first_page; /* first physical page number */ 332 333 #define VM_PAGE_TO_PHYS(entry) \ 334 ((entry)->phys_addr) 335 336 #define PHYS_TO_VM_PAGE(pa) \ 337 (&vm_page_array[atop(pa) - first_page]) 338 339 340 #if PAGE_SIZE == 4096 341 #define VM_PAGE_BITS_ALL 0xff 342 #endif 343 344 /* 345 * Note: the code will always use nominally free pages from the free list 346 * before trying other flag-specified sources. 347 * 348 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT 349 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL 350 * is also specified. 351 */ 352 #define VM_ALLOC_NORMAL 0x0001 /* ok to use cache pages */ 353 #define VM_ALLOC_SYSTEM 0x0002 /* ok to exhaust most of free list */ 354 #define VM_ALLOC_INTERRUPT 0x0004 /* ok to exhaust entire free list */ 355 #define VM_ALLOC_ZERO 0x0008 /* req pre-zero'd memory if avail */ 356 #define VM_ALLOC_QUICK 0x0010 /* like NORMAL but do not use cache */ 357 #define VM_ALLOC_FORCE_ZERO 0x0020 /* zero page even if already valid */ 358 #define VM_ALLOC_NULL_OK 0x0040 /* ok to return NULL on collision */ 359 #define VM_ALLOC_RETRY 0x0080 /* indefinite block (vm_page_grab()) */ 360 #define VM_ALLOC_USE_GD 0x0100 /* use per-gd cache */ 361 #define VM_ALLOC_CPU_SPEC 0x0200 362 363 #define VM_ALLOC_CPU_SHIFT 16 364 #define VM_ALLOC_CPU(n) (((n) << VM_ALLOC_CPU_SHIFT) | \ 365 VM_ALLOC_CPU_SPEC) 366 #define VM_ALLOC_GETCPU(flags) ((flags) >> VM_ALLOC_CPU_SHIFT) 367 368 void vm_page_queue_spin_lock(vm_page_t); 369 void vm_page_queues_spin_lock(u_short); 370 void vm_page_and_queue_spin_lock(vm_page_t); 371 372 void vm_page_queue_spin_unlock(vm_page_t); 373 void vm_page_queues_spin_unlock(u_short); 374 void vm_page_and_queue_spin_unlock(vm_page_t m); 375 376 void vm_page_init(vm_page_t m); 377 void vm_page_io_finish(vm_page_t m); 378 void vm_page_io_start(vm_page_t m); 379 void vm_page_need_commit(vm_page_t m); 380 void vm_page_clear_commit(vm_page_t m); 381 void vm_page_wakeup(vm_page_t m); 382 void vm_page_hold(vm_page_t); 383 void vm_page_unhold(vm_page_t); 384 void vm_page_activate (vm_page_t); 385 386 vm_size_t vm_contig_avail_pages(void); 387 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int); 388 vm_page_t vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high, 389 unsigned long alignment, unsigned long boundary, 390 unsigned long size, vm_memattr_t memattr); 391 392 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int); 393 void vm_page_cache (vm_page_t); 394 int vm_page_try_to_cache (vm_page_t); 395 int vm_page_try_to_free (vm_page_t); 396 void vm_page_dontneed (vm_page_t); 397 void vm_page_deactivate (vm_page_t); 398 void vm_page_deactivate_locked (vm_page_t); 399 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 400 int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t); 401 402 vm_page_t vm_page_hash_get(vm_object_t object, vm_pindex_t pindex); 403 404 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t); 405 vm_page_t vm_page_lookup_sbusy_try(struct vm_object *object, 406 vm_pindex_t pindex, int pgoff, int pgbytes); 407 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)( 408 struct vm_object *, vm_pindex_t, int, const char * 409 VM_PAGE_DEBUG_ARGS); 410 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)( 411 struct vm_object *, vm_pindex_t, int, int * 412 VM_PAGE_DEBUG_ARGS); 413 void vm_page_remove (vm_page_t); 414 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t); 415 void vm_page_startup (void); 416 void vm_numa_organize(vm_paddr_t ran_beg, vm_paddr_t bytes, int physid); 417 void vm_numa_organize_finalize(void); 418 void vm_page_unmanage (vm_page_t); 419 void vm_page_unwire (vm_page_t, int); 420 void vm_page_wire (vm_page_t); 421 void vm_page_unqueue (vm_page_t); 422 void vm_page_unqueue_nowakeup (vm_page_t); 423 vm_page_t vm_page_next (vm_page_t); 424 void vm_page_set_validclean (vm_page_t, int, int); 425 void vm_page_set_validdirty (vm_page_t, int, int); 426 void vm_page_set_valid (vm_page_t, int, int); 427 void vm_page_set_dirty (vm_page_t, int, int); 428 void vm_page_clear_dirty (vm_page_t, int, int); 429 void vm_page_set_invalid (vm_page_t, int, int); 430 int vm_page_is_valid (vm_page_t, int, int); 431 void vm_page_test_dirty (vm_page_t); 432 int vm_page_bits (int, int); 433 vm_page_t vm_page_list_find(int basequeue, int index); 434 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 435 void vm_page_free_toq(vm_page_t m); 436 void vm_page_free_contig(vm_page_t m, unsigned long size); 437 vm_page_t vm_page_free_fromq_fast(void); 438 void vm_page_dirty(vm_page_t m); 439 void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg); 440 int vm_page_sbusy_try(vm_page_t m); 441 void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m, 442 int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS); 443 int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, 444 int also_m_busy VM_PAGE_DEBUG_ARGS); 445 u_short vm_get_pg_color(int cpuid, vm_object_t object, vm_pindex_t pindex); 446 447 #ifdef VM_PAGE_DEBUG 448 449 #define vm_page_lookup_busy_wait(object, pindex, alsob, msg) \ 450 vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg, \ 451 __func__, __LINE__) 452 453 #define vm_page_lookup_busy_try(object, pindex, alsob, errorp) \ 454 vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp, \ 455 __func__, __LINE__) 456 457 #define vm_page_busy_wait(m, alsob, msg) \ 458 vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__) 459 460 #define vm_page_busy_try(m, alsob) \ 461 vm_page_busy_try_debug(m, alsob, __func__, __LINE__) 462 463 #endif 464 465 #endif /* _KERNEL */ 466 #endif /* !_VM_VM_PAGE_H_ */ 467