1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_page.h,v 1.22 2005/08/27 00:56:57 dillon Exp $ 66 */ 67 68 /* 69 * Resident memory system definitions. 70 */ 71 72 #ifndef _VM_PAGE_H_ 73 #define _VM_PAGE_H_ 74 75 #if !defined(KLD_MODULE) && defined(_KERNEL) 76 #include "opt_vmpage.h" 77 #endif 78 79 #include <vm/pmap.h> 80 #include <machine/atomic.h> 81 #ifdef _KERNEL 82 #include <sys/thread2.h> 83 #endif 84 85 /* 86 * Management of resident (logical) pages. 87 * 88 * A small structure is kept for each resident 89 * page, indexed by page number. Each structure 90 * is an element of several lists: 91 * 92 * A hash table bucket used to quickly 93 * perform object/offset lookups 94 * 95 * A list of all pages for a given object, 96 * so they can be quickly deactivated at 97 * time of deallocation. 98 * 99 * An ordered list of pages due for pageout. 100 * 101 * In addition, the structure contains the object 102 * and offset to which this page belongs (for pageout), 103 * and sundry status bits. 104 * 105 * Fields in this structure are locked either by the lock on the 106 * object that the page belongs to (O) or by the lock on the page 107 * queues (P). 108 * 109 * The 'valid' and 'dirty' fields are distinct. A page may have dirty 110 * bits set without having associated valid bits set. This is used by 111 * NFS to implement piecemeal writes. 112 */ 113 114 TAILQ_HEAD(pglist, vm_page); 115 116 struct msf_buf; 117 struct vm_page { 118 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */ 119 struct vm_page *hnext; /* hash table link (O,P) */ 120 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 121 122 vm_object_t object; /* which object am I in (O,P)*/ 123 vm_pindex_t pindex; /* offset into object (O,P) */ 124 vm_paddr_t phys_addr; /* physical address of page */ 125 struct md_page md; /* machine dependant stuff */ 126 u_short queue; /* page queue index */ 127 u_short flags; /* see below */ 128 u_short pc; /* page color */ 129 u_short wire_count; /* wired down maps refs (P) */ 130 short hold_count; /* page hold count */ 131 u_char act_count; /* page usage count */ 132 u_char busy; /* page busy count */ 133 134 /* 135 * NOTE that these must support one bit per DEV_BSIZE in a page!!! 136 * so, on normal X86 kernels, they must be at least 8 bits wide. 137 */ 138 #if PAGE_SIZE == 4096 139 u_char valid; /* map of valid DEV_BSIZE chunks */ 140 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 141 u_char unused1; 142 u_char unused2; 143 #elif PAGE_SIZE == 8192 144 u_short valid; /* map of valid DEV_BSIZE chunks */ 145 u_short dirty; /* map of dirty DEV_BSIZE chunks */ 146 #endif 147 struct msf_buf *msf_hint; /* first page of an msfbuf map */ 148 }; 149 150 /* 151 * note: currently use SWAPBLK_NONE as an absolute value rather then 152 * a flag bit. 153 */ 154 #define SWAPBLK_MASK ((daddr_t)((u_daddr_t)-1 >> 1)) /* mask */ 155 #define SWAPBLK_NONE ((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */ 156 157 /* 158 * Page coloring parameters. We default to a middle of the road optimization. 159 * Larger selections would not really hurt us but if a machine does not have 160 * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles 161 * looking for free pages. 162 * 163 * Page coloring cannot be disabled. Modules do not have access to most PQ 164 * constants because they can change between builds. 165 */ 166 #if defined(_KERNEL) && !defined(KLD_MODULE) 167 168 #if !defined(PQ_CACHESIZE) 169 #define PQ_CACHESIZE 256 /* max is 1024 (MB) */ 170 #endif 171 172 #if PQ_CACHESIZE >= 1024 173 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 174 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 175 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ 176 177 #elif PQ_CACHESIZE >= 512 178 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 179 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 180 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ 181 182 #elif PQ_CACHESIZE >= 256 183 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */ 184 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */ 185 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ 186 187 #elif PQ_CACHESIZE >= 128 188 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */ 189 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 190 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */ 191 192 #else 193 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 194 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */ 195 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ 196 197 #endif 198 199 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 200 201 #endif /* KERNEL && !KLD_MODULE */ 202 203 /* 204 * 205 * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual 206 * cache size chosen in order to present a uniform interface for modules. 207 */ 208 #define PQ_MAXL2_SIZE 256 /* fixed maximum (in pages) / module compat */ 209 210 #if PQ_L2_SIZE > PQ_MAXL2_SIZE 211 #error "Illegal PQ_L2_SIZE" 212 #endif 213 214 #define PQ_NONE 0 215 #define PQ_FREE 1 216 #define PQ_INACTIVE (1 + 1*PQ_MAXL2_SIZE) 217 #define PQ_ACTIVE (2 + 1*PQ_MAXL2_SIZE) 218 #define PQ_CACHE (3 + 1*PQ_MAXL2_SIZE) 219 #define PQ_HOLD (3 + 2*PQ_MAXL2_SIZE) 220 #define PQ_COUNT (4 + 2*PQ_MAXL2_SIZE) 221 222 struct vpgqueues { 223 struct pglist pl; 224 int *cnt; 225 int lcnt; 226 int flipflop; /* probably not the best place */ 227 }; 228 229 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 230 231 /* 232 * These are the flags defined for vm_page. 233 * 234 * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 235 * not under PV management but otherwise should be treated as a 236 * normal page. Pages not under PV management cannot be paged out 237 * via the object/vm_page_t because there is no knowledge of their 238 * pte mappings, nor can they be removed from their objects via 239 * the object, and such pages are also not on any PQ queue. 240 */ 241 #define PG_BUSY 0x0001 /* page is in transit (O) */ 242 #define PG_WANTED 0x0002 /* someone is waiting for page (O) */ 243 #define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */ 244 #define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */ 245 #define PG_WRITEABLE 0x0010 /* page is mapped writeable */ 246 #define PG_MAPPED 0x0020 /* page is mapped */ 247 #define PG_ZERO 0x0040 /* page is zeroed */ 248 #define PG_REFERENCED 0x0080 /* page has been referenced */ 249 #define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */ 250 #define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */ 251 #define PG_NOSYNC 0x0400 /* do not collect for syncer */ 252 #define PG_UNMANAGED 0x0800 /* No PV management for page */ 253 #define PG_MARKER 0x1000 /* special queue marker page */ 254 255 /* 256 * Misc constants. 257 */ 258 259 #define ACT_DECLINE 1 260 #define ACT_ADVANCE 3 261 #define ACT_INIT 5 262 #define ACT_MAX 64 263 264 #ifdef _KERNEL 265 /* 266 * Each pageable resident page falls into one of four lists: 267 * 268 * free 269 * Available for allocation now. 270 * 271 * The following are all LRU sorted: 272 * 273 * cache 274 * Almost available for allocation. Still in an 275 * object, but clean and immediately freeable at 276 * non-interrupt times. 277 * 278 * inactive 279 * Low activity, candidates for reclamation. 280 * This is the list of pages that should be 281 * paged out next. 282 * 283 * active 284 * Pages that are "active" i.e. they have been 285 * recently referenced. 286 * 287 * zero 288 * Pages that are really free and have been pre-zeroed 289 * 290 */ 291 292 extern int vm_page_zero_count; 293 extern vm_page_t vm_page_array; /* First resident page in table */ 294 extern int vm_page_array_size; /* number of vm_page_t's */ 295 extern long first_page; /* first physical page number */ 296 297 #define VM_PAGE_TO_PHYS(entry) \ 298 ((entry)->phys_addr) 299 300 #define PHYS_TO_VM_PAGE(pa) \ 301 (&vm_page_array[atop(pa) - first_page]) 302 303 /* 304 * Functions implemented as macros 305 */ 306 307 static __inline void 308 vm_page_flag_set(vm_page_t m, unsigned int bits) 309 { 310 atomic_set_short(&(m)->flags, bits); 311 } 312 313 static __inline void 314 vm_page_flag_clear(vm_page_t m, unsigned int bits) 315 { 316 atomic_clear_short(&(m)->flags, bits); 317 } 318 319 static __inline void 320 vm_page_busy(vm_page_t m) 321 { 322 KASSERT((m->flags & PG_BUSY) == 0, 323 ("vm_page_busy: page already busy!!!")); 324 vm_page_flag_set(m, PG_BUSY); 325 } 326 327 /* 328 * vm_page_flash: 329 * 330 * wakeup anyone waiting for the page. 331 */ 332 333 static __inline void 334 vm_page_flash(vm_page_t m) 335 { 336 if (m->flags & PG_WANTED) { 337 vm_page_flag_clear(m, PG_WANTED); 338 wakeup(m); 339 } 340 } 341 342 /* 343 * Clear the PG_BUSY flag and wakeup anyone waiting for the page. This 344 * is typically the last call you make on a page before moving onto 345 * other things. 346 */ 347 static __inline void 348 vm_page_wakeup(vm_page_t m) 349 { 350 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!")); 351 vm_page_flag_clear(m, PG_BUSY); 352 vm_page_flash(m); 353 } 354 355 /* 356 * These routines manipulate the 'soft busy' count for a page. A soft busy 357 * is almost like PG_BUSY except that it allows certain compatible operations 358 * to occur on the page while it is busy. For example, a page undergoing a 359 * write can still be mapped read-only. 360 */ 361 static __inline void 362 vm_page_io_start(vm_page_t m) 363 { 364 atomic_add_char(&(m)->busy, 1); 365 } 366 367 static __inline void 368 vm_page_io_finish(vm_page_t m) 369 { 370 atomic_subtract_char(&m->busy, 1); 371 if (m->busy == 0) 372 vm_page_flash(m); 373 } 374 375 376 #if PAGE_SIZE == 4096 377 #define VM_PAGE_BITS_ALL 0xff 378 #endif 379 380 #if PAGE_SIZE == 8192 381 #define VM_PAGE_BITS_ALL 0xffff 382 #endif 383 384 /* 385 * Note: the code will always use nominally free pages from the free list 386 * before trying other flag-specified sources. 387 * 388 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT 389 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL 390 * is also specified. 391 */ 392 #define VM_ALLOC_NORMAL 0x01 /* ok to use cache pages */ 393 #define VM_ALLOC_SYSTEM 0x02 /* ok to exhaust most of free list */ 394 #define VM_ALLOC_INTERRUPT 0x04 /* ok to exhaust entire free list */ 395 #define VM_ALLOC_ZERO 0x08 /* req pre-zero'd memory if avail */ 396 #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */ 397 398 void vm_page_unhold(vm_page_t mem); 399 void vm_page_activate (vm_page_t); 400 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int); 401 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); 402 void vm_page_cache (vm_page_t); 403 int vm_page_try_to_cache (vm_page_t); 404 int vm_page_try_to_free (vm_page_t); 405 void vm_page_dontneed (vm_page_t); 406 void vm_page_deactivate (vm_page_t); 407 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 408 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); 409 void vm_page_remove (vm_page_t); 410 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); 411 vm_offset_t vm_page_startup (vm_offset_t); 412 vm_page_t vm_add_new_page (vm_paddr_t pa); 413 void vm_page_unmanage (vm_page_t); 414 void vm_page_unwire (vm_page_t, int); 415 void vm_page_wire (vm_page_t); 416 void vm_page_unqueue (vm_page_t); 417 void vm_page_unqueue_nowakeup (vm_page_t); 418 void vm_page_set_validclean (vm_page_t, int, int); 419 void vm_page_set_dirty (vm_page_t, int, int); 420 void vm_page_clear_dirty (vm_page_t, int, int); 421 void vm_page_set_invalid (vm_page_t, int, int); 422 int vm_page_is_valid (vm_page_t, int, int); 423 void vm_page_test_dirty (vm_page_t); 424 int vm_page_bits (int, int); 425 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero); 426 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 427 void vm_page_free_toq(vm_page_t m); 428 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int); 429 void vm_contig_pg_free(int, u_long); 430 431 /* 432 * Holding a page keeps it from being reused. Other parts of the system 433 * can still disassociate the page from its current object and free it, or 434 * perform read or write I/O on it and/or otherwise manipulate the page, 435 * but if the page is held the VM system will leave the page and its data 436 * intact and not reuse the page for other purposes until the last hold 437 * reference is released. (see vm_page_wire() if you want to prevent the 438 * page from being disassociated from its object too). 439 * 440 * This routine must be called while at splvm() or better. 441 * 442 * The caller must still validate the contents of the page and, if necessary, 443 * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete 444 * before manipulating the page. 445 */ 446 static __inline void 447 vm_page_hold(vm_page_t mem) 448 { 449 mem->hold_count++; 450 } 451 452 /* 453 * Reduce the protection of a page. This routine never raises the 454 * protection and therefore can be safely called if the page is already 455 * at VM_PROT_NONE (it will be a NOP effectively ). 456 * 457 * VM_PROT_NONE will remove all user mappings of a page. This is often 458 * necessary when a page changes state (for example, turns into a copy-on-write 459 * page or needs to be frozen for write I/O) in order to force a fault, or 460 * to force a page's dirty bits to be synchronized and avoid hardware 461 * (modified/accessed) bit update races with pmap changes. 462 * 463 * Since 'prot' is usually a constant, this inline usually winds up optimizing 464 * out the primary conditional. 465 */ 466 static __inline void 467 vm_page_protect(vm_page_t mem, int prot) 468 { 469 if (prot == VM_PROT_NONE) { 470 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { 471 pmap_page_protect(mem, VM_PROT_NONE); 472 vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); 473 } 474 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { 475 pmap_page_protect(mem, VM_PROT_READ); 476 vm_page_flag_clear(mem, PG_WRITEABLE); 477 } 478 } 479 480 /* 481 * Zero-fill the specified page. The entire contents of the page will be 482 * zero'd out. 483 */ 484 static __inline boolean_t 485 vm_page_zero_fill(vm_page_t m) 486 { 487 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 488 return (TRUE); 489 } 490 491 /* 492 * Copy the contents of src_m to dest_m. The pages must be stable but spl 493 * and other protections depend on context. 494 */ 495 static __inline void 496 vm_page_copy(vm_page_t src_m, vm_page_t dest_m) 497 { 498 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 499 dest_m->valid = VM_PAGE_BITS_ALL; 500 } 501 502 /* 503 * Free a page. The page must be marked BUSY. 504 * 505 * The clearing of PG_ZERO is a temporary safety until the code can be 506 * reviewed to determine that PG_ZERO is being properly cleared on 507 * write faults or maps. PG_ZERO was previously cleared in 508 * vm_page_alloc(). 509 */ 510 static __inline void 511 vm_page_free(vm_page_t m) 512 { 513 vm_page_flag_clear(m, PG_ZERO); 514 vm_page_free_toq(m); 515 } 516 517 /* 518 * Free a page to the zerod-pages queue 519 */ 520 static __inline void 521 vm_page_free_zero(vm_page_t m) 522 { 523 vm_page_flag_set(m, PG_ZERO); 524 vm_page_free_toq(m); 525 } 526 527 /* 528 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE) 529 * m->busy is zero. Returns TRUE if it had to sleep ( including if 530 * it almost had to sleep and made temporary spl*() mods), FALSE 531 * otherwise. 532 * 533 * This routine assumes that interrupts can only remove the busy 534 * status from a page, not set the busy status or change it from 535 * PG_BUSY to m->busy or vise versa (which would create a timing 536 * window). 537 * 538 * Note: as an inline, 'also_m_busy' is usually a constant and well 539 * optimized. 540 */ 541 static __inline int 542 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg) 543 { 544 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 545 crit_enter(); 546 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 547 /* 548 * Page is busy. Wait and retry. 549 */ 550 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 551 tsleep(m, 0, msg, 0); 552 } 553 crit_exit(); 554 return(TRUE); 555 /* not reached */ 556 } 557 return(FALSE); 558 } 559 560 /* 561 * Make page all dirty 562 */ 563 static __inline void 564 _vm_page_dirty(vm_page_t m, const char *info) 565 { 566 #ifdef INVARIANTS 567 int pqtype = m->queue - m->pc; 568 #endif 569 KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE, 570 ("vm_page_dirty: page in free/cache queue!")); 571 m->dirty = VM_PAGE_BITS_ALL; 572 } 573 574 #define vm_page_dirty(m) _vm_page_dirty(m, __FUNCTION__) 575 576 /* 577 * Set page to not be dirty. Note: does not clear pmap modify bits . 578 */ 579 static __inline void 580 vm_page_undirty(vm_page_t m) 581 { 582 m->dirty = 0; 583 } 584 585 #endif /* _KERNEL */ 586 #endif /* !_VM_PAGE_ */ 587