1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_page.h,v 1.20 2005/06/14 17:16:00 dillon Exp $ 66 */ 67 68 /* 69 * Resident memory system definitions. 70 */ 71 72 #ifndef _VM_PAGE_H_ 73 #define _VM_PAGE_H_ 74 75 #if !defined(KLD_MODULE) && defined(_KERNEL) 76 #include "opt_vmpage.h" 77 #endif 78 79 #include <vm/pmap.h> 80 #include <machine/atomic.h> 81 #include <sys/thread2.h> 82 83 /* 84 * Management of resident (logical) pages. 85 * 86 * A small structure is kept for each resident 87 * page, indexed by page number. Each structure 88 * is an element of several lists: 89 * 90 * A hash table bucket used to quickly 91 * perform object/offset lookups 92 * 93 * A list of all pages for a given object, 94 * so they can be quickly deactivated at 95 * time of deallocation. 96 * 97 * An ordered list of pages due for pageout. 98 * 99 * In addition, the structure contains the object 100 * and offset to which this page belongs (for pageout), 101 * and sundry status bits. 102 * 103 * Fields in this structure are locked either by the lock on the 104 * object that the page belongs to (O) or by the lock on the page 105 * queues (P). 106 * 107 * The 'valid' and 'dirty' fields are distinct. A page may have dirty 108 * bits set without having associated valid bits set. This is used by 109 * NFS to implement piecemeal writes. 110 */ 111 112 TAILQ_HEAD(pglist, vm_page); 113 114 struct msf_buf; 115 struct vm_page { 116 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */ 117 struct vm_page *hnext; /* hash table link (O,P) */ 118 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 119 120 vm_object_t object; /* which object am I in (O,P)*/ 121 vm_pindex_t pindex; /* offset into object (O,P) */ 122 vm_paddr_t phys_addr; /* physical address of page */ 123 struct md_page md; /* machine dependant stuff */ 124 u_short queue; /* page queue index */ 125 u_short flags; /* see below */ 126 u_short pc; /* page color */ 127 u_short wire_count; /* wired down maps refs (P) */ 128 short hold_count; /* page hold count */ 129 u_char act_count; /* page usage count */ 130 u_char busy; /* page busy count */ 131 132 /* 133 * NOTE that these must support one bit per DEV_BSIZE in a page!!! 134 * so, on normal X86 kernels, they must be at least 8 bits wide. 135 */ 136 #if PAGE_SIZE == 4096 137 u_char valid; /* map of valid DEV_BSIZE chunks */ 138 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 139 u_char unused1; 140 u_char unused2; 141 #elif PAGE_SIZE == 8192 142 u_short valid; /* map of valid DEV_BSIZE chunks */ 143 u_short dirty; /* map of dirty DEV_BSIZE chunks */ 144 #endif 145 struct msf_buf *msf_hint; /* first page of an msfbuf map */ 146 }; 147 148 /* 149 * note: currently use SWAPBLK_NONE as an absolute value rather then 150 * a flag bit. 151 */ 152 #define SWAPBLK_MASK ((daddr_t)((u_daddr_t)-1 >> 1)) /* mask */ 153 #define SWAPBLK_NONE ((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */ 154 155 /* 156 * Page coloring parameters. We default to a middle of the road optimization. 157 * Larger selections would not really hurt us but if a machine does not have 158 * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles 159 * looking for free pages. 160 * 161 * Page coloring cannot be disabled. Modules do not have access to most PQ 162 * constants because they can change between builds. 163 */ 164 #if defined(_KERNEL) && !defined(KLD_MODULE) 165 166 #if !defined(PQ_CACHESIZE) 167 #define PQ_CACHESIZE 256 /* max is 1024 (MB) */ 168 #endif 169 170 #if PQ_CACHESIZE >= 1024 171 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 172 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 173 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ 174 175 #elif PQ_CACHESIZE >= 512 176 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 177 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 178 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ 179 180 #elif PQ_CACHESIZE >= 256 181 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */ 182 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */ 183 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ 184 185 #elif PQ_CACHESIZE >= 128 186 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */ 187 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 188 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */ 189 190 #else 191 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 192 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */ 193 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ 194 195 #endif 196 197 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 198 199 #endif /* KERNEL && !KLD_MODULE */ 200 201 /* 202 * 203 * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual 204 * cache size chosen in order to present a uniform interface for modules. 205 */ 206 #define PQ_MAXL2_SIZE 256 /* fixed maximum (in pages) / module compat */ 207 208 #if PQ_L2_SIZE > PQ_MAXL2_SIZE 209 #error "Illegal PQ_L2_SIZE" 210 #endif 211 212 #define PQ_NONE 0 213 #define PQ_FREE 1 214 #define PQ_INACTIVE (1 + 1*PQ_MAXL2_SIZE) 215 #define PQ_ACTIVE (2 + 1*PQ_MAXL2_SIZE) 216 #define PQ_CACHE (3 + 1*PQ_MAXL2_SIZE) 217 #define PQ_HOLD (3 + 2*PQ_MAXL2_SIZE) 218 #define PQ_COUNT (4 + 2*PQ_MAXL2_SIZE) 219 220 struct vpgqueues { 221 struct pglist pl; 222 int *cnt; 223 int lcnt; 224 int flipflop; /* probably not the best place */ 225 }; 226 227 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 228 229 /* 230 * These are the flags defined for vm_page. 231 * 232 * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 233 * not under PV management but otherwise should be treated as a 234 * normal page. Pages not under PV management cannot be paged out 235 * via the object/vm_page_t because there is no knowledge of their 236 * pte mappings, nor can they be removed from their objects via 237 * the object, and such pages are also not on any PQ queue. 238 */ 239 #define PG_BUSY 0x0001 /* page is in transit (O) */ 240 #define PG_WANTED 0x0002 /* someone is waiting for page (O) */ 241 #define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */ 242 #define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */ 243 #define PG_WRITEABLE 0x0010 /* page is mapped writeable */ 244 #define PG_MAPPED 0x0020 /* page is mapped */ 245 #define PG_ZERO 0x0040 /* page is zeroed */ 246 #define PG_REFERENCED 0x0080 /* page has been referenced */ 247 #define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */ 248 #define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */ 249 #define PG_NOSYNC 0x0400 /* do not collect for syncer */ 250 #define PG_UNMANAGED 0x0800 /* No PV management for page */ 251 #define PG_MARKER 0x1000 /* special queue marker page */ 252 253 /* 254 * Misc constants. 255 */ 256 257 #define ACT_DECLINE 1 258 #define ACT_ADVANCE 3 259 #define ACT_INIT 5 260 #define ACT_MAX 64 261 262 #ifdef _KERNEL 263 /* 264 * Each pageable resident page falls into one of four lists: 265 * 266 * free 267 * Available for allocation now. 268 * 269 * The following are all LRU sorted: 270 * 271 * cache 272 * Almost available for allocation. Still in an 273 * object, but clean and immediately freeable at 274 * non-interrupt times. 275 * 276 * inactive 277 * Low activity, candidates for reclamation. 278 * This is the list of pages that should be 279 * paged out next. 280 * 281 * active 282 * Pages that are "active" i.e. they have been 283 * recently referenced. 284 * 285 * zero 286 * Pages that are really free and have been pre-zeroed 287 * 288 */ 289 290 extern int vm_page_zero_count; 291 extern vm_page_t vm_page_array; /* First resident page in table */ 292 extern int vm_page_array_size; /* number of vm_page_t's */ 293 extern long first_page; /* first physical page number */ 294 295 #define VM_PAGE_TO_PHYS(entry) \ 296 ((entry)->phys_addr) 297 298 #define PHYS_TO_VM_PAGE(pa) \ 299 (&vm_page_array[atop(pa) - first_page]) 300 301 /* 302 * Functions implemented as macros 303 */ 304 305 static __inline void 306 vm_page_flag_set(vm_page_t m, unsigned int bits) 307 { 308 atomic_set_short(&(m)->flags, bits); 309 } 310 311 static __inline void 312 vm_page_flag_clear(vm_page_t m, unsigned int bits) 313 { 314 atomic_clear_short(&(m)->flags, bits); 315 } 316 317 static __inline void 318 vm_page_busy(vm_page_t m) 319 { 320 KASSERT((m->flags & PG_BUSY) == 0, 321 ("vm_page_busy: page already busy!!!")); 322 vm_page_flag_set(m, PG_BUSY); 323 } 324 325 /* 326 * vm_page_flash: 327 * 328 * wakeup anyone waiting for the page. 329 */ 330 331 static __inline void 332 vm_page_flash(vm_page_t m) 333 { 334 if (m->flags & PG_WANTED) { 335 vm_page_flag_clear(m, PG_WANTED); 336 wakeup(m); 337 } 338 } 339 340 /* 341 * Clear the PG_BUSY flag and wakeup anyone waiting for the page. This 342 * is typically the last call you make on a page before moving onto 343 * other things. 344 */ 345 static __inline void 346 vm_page_wakeup(vm_page_t m) 347 { 348 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!")); 349 vm_page_flag_clear(m, PG_BUSY); 350 vm_page_flash(m); 351 } 352 353 /* 354 * These routines manipulate the 'soft busy' count for a page. A soft busy 355 * is almost like PG_BUSY except that it allows certain compatible operations 356 * to occur on the page while it is busy. For example, a page undergoing a 357 * write can still be mapped read-only. 358 */ 359 static __inline void 360 vm_page_io_start(vm_page_t m) 361 { 362 atomic_add_char(&(m)->busy, 1); 363 } 364 365 static __inline void 366 vm_page_io_finish(vm_page_t m) 367 { 368 atomic_subtract_char(&m->busy, 1); 369 if (m->busy == 0) 370 vm_page_flash(m); 371 } 372 373 374 #if PAGE_SIZE == 4096 375 #define VM_PAGE_BITS_ALL 0xff 376 #endif 377 378 #if PAGE_SIZE == 8192 379 #define VM_PAGE_BITS_ALL 0xffff 380 #endif 381 382 /* 383 * Note: the code will always use nominally free pages from the free list 384 * before trying other flag-specified sources. 385 * 386 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT 387 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL 388 * is also specified. 389 */ 390 #define VM_ALLOC_NORMAL 0x01 /* ok to use cache pages */ 391 #define VM_ALLOC_SYSTEM 0x02 /* ok to exhaust most of free list */ 392 #define VM_ALLOC_INTERRUPT 0x04 /* ok to exhaust entire free list */ 393 #define VM_ALLOC_ZERO 0x08 /* req pre-zero'd memory if avail */ 394 #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */ 395 396 void vm_page_unhold(vm_page_t mem); 397 void vm_page_activate (vm_page_t); 398 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int); 399 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); 400 void vm_page_cache (vm_page_t); 401 int vm_page_try_to_cache (vm_page_t); 402 int vm_page_try_to_free (vm_page_t); 403 void vm_page_dontneed (vm_page_t); 404 void vm_page_deactivate (vm_page_t); 405 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 406 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); 407 void vm_page_remove (vm_page_t); 408 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); 409 vm_offset_t vm_page_startup (vm_offset_t); 410 vm_page_t vm_add_new_page (vm_paddr_t pa); 411 void vm_page_unmanage (vm_page_t); 412 void vm_page_unwire (vm_page_t, int); 413 void vm_page_wire (vm_page_t); 414 void vm_page_unqueue (vm_page_t); 415 void vm_page_unqueue_nowakeup (vm_page_t); 416 void vm_page_set_validclean (vm_page_t, int, int); 417 void vm_page_set_dirty (vm_page_t, int, int); 418 void vm_page_clear_dirty (vm_page_t, int, int); 419 void vm_page_set_invalid (vm_page_t, int, int); 420 int vm_page_is_valid (vm_page_t, int, int); 421 void vm_page_test_dirty (vm_page_t); 422 int vm_page_bits (int, int); 423 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero); 424 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 425 void vm_page_free_toq(vm_page_t m); 426 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int); 427 void vm_contig_pg_free(int, u_long); 428 429 /* 430 * Holding a page keeps it from being reused. Other parts of the system 431 * can still disassociate the page from its current object and free it, or 432 * perform read or write I/O on it and/or otherwise manipulate the page, 433 * but if the page is held the VM system will leave the page and its data 434 * intact and not reuse the page for other purposes until the last hold 435 * reference is released. (see vm_page_wire() if you want to prevent the 436 * page from being disassociated from its object too). 437 * 438 * This routine must be called while at splvm() or better. 439 * 440 * The caller must still validate the contents of the page and, if necessary, 441 * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete 442 * before manipulating the page. 443 */ 444 static __inline void 445 vm_page_hold(vm_page_t mem) 446 { 447 mem->hold_count++; 448 } 449 450 /* 451 * Reduce the protection of a page. This routine never raises the 452 * protection and therefore can be safely called if the page is already 453 * at VM_PROT_NONE (it will be a NOP effectively ). 454 * 455 * VM_PROT_NONE will remove all user mappings of a page. This is often 456 * necessary when a page changes state (for example, turns into a copy-on-write 457 * page or needs to be frozen for write I/O) in order to force a fault, or 458 * to force a page's dirty bits to be synchronized and avoid hardware 459 * (modified/accessed) bit update races with pmap changes. 460 * 461 * Since 'prot' is usually a constant, this inline usually winds up optimizing 462 * out the primary conditional. 463 */ 464 static __inline void 465 vm_page_protect(vm_page_t mem, int prot) 466 { 467 if (prot == VM_PROT_NONE) { 468 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { 469 pmap_page_protect(mem, VM_PROT_NONE); 470 vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); 471 } 472 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { 473 pmap_page_protect(mem, VM_PROT_READ); 474 vm_page_flag_clear(mem, PG_WRITEABLE); 475 } 476 } 477 478 /* 479 * Zero-fill the specified page. The entire contents of the page will be 480 * zero'd out. 481 */ 482 static __inline boolean_t 483 vm_page_zero_fill(vm_page_t m) 484 { 485 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 486 return (TRUE); 487 } 488 489 /* 490 * Copy the contents of src_m to dest_m. The pages must be stable but spl 491 * and other protections depend on context. 492 */ 493 static __inline void 494 vm_page_copy(vm_page_t src_m, vm_page_t dest_m) 495 { 496 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 497 dest_m->valid = VM_PAGE_BITS_ALL; 498 } 499 500 /* 501 * Free a page. The page must be marked BUSY. 502 * 503 * The clearing of PG_ZERO is a temporary safety until the code can be 504 * reviewed to determine that PG_ZERO is being properly cleared on 505 * write faults or maps. PG_ZERO was previously cleared in 506 * vm_page_alloc(). 507 */ 508 static __inline void 509 vm_page_free(vm_page_t m) 510 { 511 vm_page_flag_clear(m, PG_ZERO); 512 vm_page_free_toq(m); 513 } 514 515 /* 516 * Free a page to the zerod-pages queue 517 */ 518 static __inline void 519 vm_page_free_zero(vm_page_t m) 520 { 521 vm_page_flag_set(m, PG_ZERO); 522 vm_page_free_toq(m); 523 } 524 525 /* 526 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE) 527 * m->busy is zero. Returns TRUE if it had to sleep ( including if 528 * it almost had to sleep and made temporary spl*() mods), FALSE 529 * otherwise. 530 * 531 * This routine assumes that interrupts can only remove the busy 532 * status from a page, not set the busy status or change it from 533 * PG_BUSY to m->busy or vise versa (which would create a timing 534 * window). 535 * 536 * Note: as an inline, 'also_m_busy' is usually a constant and well 537 * optimized. 538 */ 539 static __inline int 540 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg) 541 { 542 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 543 crit_enter(); 544 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 545 /* 546 * Page is busy. Wait and retry. 547 */ 548 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 549 tsleep(m, 0, msg, 0); 550 } 551 crit_exit(); 552 return(TRUE); 553 /* not reached */ 554 } 555 return(FALSE); 556 } 557 558 /* 559 * Make page all dirty 560 */ 561 static __inline void 562 _vm_page_dirty(vm_page_t m, const char *info) 563 { 564 int pqtype = m->queue - m->pc; 565 KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE, 566 ("vm_page_dirty: page in free/cache queue!")); 567 m->dirty = VM_PAGE_BITS_ALL; 568 } 569 570 #define vm_page_dirty(m) _vm_page_dirty(m, __FUNCTION__) 571 572 /* 573 * Set page to not be dirty. Note: does not clear pmap modify bits . 574 */ 575 static __inline void 576 vm_page_undirty(vm_page_t m) 577 { 578 m->dirty = 0; 579 } 580 581 #endif /* _KERNEL */ 582 #endif /* !_VM_PAGE_ */ 583