1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $ 65 */ 66 67 /* 68 * Resident memory system definitions. 69 */ 70 71 #ifndef _VM_VM_PAGE_H_ 72 #define _VM_VM_PAGE_H_ 73 74 #ifndef _SYS_TYPES_H_ 75 #include <sys/types.h> 76 #endif 77 #ifndef _SYS_TREE_H_ 78 #include <sys/tree.h> 79 #endif 80 #ifndef _MACHINE_PMAP_H_ 81 #include <machine/pmap.h> 82 #endif 83 #ifndef _VM_PMAP_H_ 84 #include <vm/pmap.h> 85 #endif 86 #include <machine/atomic.h> 87 88 #ifdef _KERNEL 89 90 #ifndef _SYS_SYSTM_H_ 91 #include <sys/systm.h> 92 #endif 93 #ifndef _SYS_THREAD2_H_ 94 #include <sys/thread2.h> 95 #endif 96 97 #ifdef __x86_64__ 98 #include <machine/vmparam.h> 99 #endif 100 101 #endif 102 103 typedef enum vm_page_event { VMEVENT_NONE, VMEVENT_COW } vm_page_event_t; 104 105 struct vm_page_action { 106 LIST_ENTRY(vm_page_action) entry; 107 struct vm_page *m; 108 vm_page_event_t event; 109 void (*func)(struct vm_page *, 110 struct vm_page_action *); 111 void *data; 112 }; 113 114 typedef struct vm_page_action *vm_page_action_t; 115 116 /* 117 * Management of resident (logical) pages. 118 * 119 * A small structure is kept for each resident 120 * page, indexed by page number. Each structure 121 * is an element of several lists: 122 * 123 * A hash table bucket used to quickly 124 * perform object/offset lookups 125 * 126 * A list of all pages for a given object, 127 * so they can be quickly deactivated at 128 * time of deallocation. 129 * 130 * An ordered list of pages due for pageout. 131 * 132 * In addition, the structure contains the object 133 * and offset to which this page belongs (for pageout), 134 * and sundry status bits. 135 * 136 * Fields in this structure are locked either by the lock on the 137 * object that the page belongs to (O) or by the lock on the page 138 * queues (P). 139 * 140 * The 'valid' and 'dirty' fields are distinct. A page may have dirty 141 * bits set without having associated valid bits set. This is used by 142 * NFS to implement piecemeal writes. 143 */ 144 145 TAILQ_HEAD(pglist, vm_page); 146 147 struct vm_object; 148 149 int rb_vm_page_compare(struct vm_page *, struct vm_page *); 150 151 struct vm_page_rb_tree; 152 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, vm_pindex_t); 153 154 struct vm_page { 155 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */ 156 RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */ 157 158 struct vm_object *object; /* which object am I in (O,P)*/ 159 vm_pindex_t pindex; /* offset into object (O,P) */ 160 vm_paddr_t phys_addr; /* physical address of page */ 161 struct md_page md; /* machine dependant stuff */ 162 u_short queue; /* page queue index */ 163 u_short pc; /* page color */ 164 u_char act_count; /* page usage count */ 165 u_char busy; /* page busy count */ 166 u_char pat_mode; /* hardware page attribute */ 167 u_char unused02; 168 u_int32_t flags; /* see below */ 169 u_int wire_count; /* wired down maps refs (P) */ 170 int hold_count; /* page hold count */ 171 172 /* 173 * NOTE that these must support one bit per DEV_BSIZE in a page!!! 174 * so, on normal X86 kernels, they must be at least 8 bits wide. 175 */ 176 u_char valid; /* map of valid DEV_BSIZE chunks */ 177 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 178 179 int ku_pagecnt; /* kmalloc helper */ 180 #ifdef VM_PAGE_DEBUG 181 const char *busy_func; 182 int busy_line; 183 #endif 184 }; 185 186 #ifdef VM_PAGE_DEBUG 187 #define VM_PAGE_DEBUG_EXT(name) name ## _debug 188 #define VM_PAGE_DEBUG_ARGS , const char *func, int lineno 189 #else 190 #define VM_PAGE_DEBUG_EXT(name) name 191 #define VM_PAGE_DEBUG_ARGS 192 #endif 193 194 #ifndef __VM_PAGE_T_DEFINED__ 195 #define __VM_PAGE_T_DEFINED__ 196 typedef struct vm_page *vm_page_t; 197 #endif 198 199 /* 200 * Page coloring parameters. We use generous parameters designed to 201 * statistically spread pages over available cpu cache space. This has 202 * become less important over time as cache associativity is higher 203 * in modern times but we still use the core algorithm to help reduce 204 * lock contention between cpus. 205 * 206 * Page coloring cannot be disabled. 207 */ 208 209 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 210 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 211 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ 212 213 #if 0 214 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 215 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 216 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ 217 218 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */ 219 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */ 220 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ 221 222 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */ 223 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 224 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */ 225 226 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 227 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */ 228 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ 229 #endif 230 231 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 232 233 #define PQ_NONE 0 234 #define PQ_FREE (1 + 0*PQ_L2_SIZE) 235 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE) 236 #define PQ_ACTIVE (1 + 2*PQ_L2_SIZE) 237 #define PQ_CACHE (1 + 3*PQ_L2_SIZE) 238 #define PQ_HOLD (1 + 4*PQ_L2_SIZE) 239 #define PQ_COUNT (1 + 5*PQ_L2_SIZE) 240 241 /* 242 * Scan support 243 */ 244 struct vm_map; 245 246 struct rb_vm_page_scan_info { 247 vm_pindex_t start_pindex; 248 vm_pindex_t end_pindex; 249 int limit; 250 int desired; 251 int error; 252 int pagerflags; 253 vm_offset_t addr; 254 vm_pindex_t backing_offset_index; 255 struct vm_object *object; 256 struct vm_object *backing_object; 257 struct vm_page *mpte; 258 struct pmap *pmap; 259 struct vm_map *map; 260 }; 261 262 int rb_vm_page_scancmp(struct vm_page *, void *); 263 264 struct vpgqueues { 265 struct pglist pl; 266 int *cnt; 267 int lcnt; 268 int flipflop; /* probably not the best place */ 269 struct spinlock spin; 270 char unused[64 - sizeof(struct pglist) - 271 sizeof(int *) - sizeof(int) * 2]; 272 }; 273 274 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 275 276 #define PA_LOCKPTR(pa) &pa_lock[pa_index((pa)) % PA_LOCK_COUNT].data 277 278 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m)))) 279 280 /* 281 * These are the flags defined for vm_page. 282 * 283 * PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 284 * not under PV management but otherwise should be treated as a 285 * normal page. Pages not under PV management cannot be paged out 286 * via the object/vm_page_t because there is no knowledge of their 287 * pte mappings, nor can they be removed from their objects via 288 * the object, and such pages are also not on any PQ queue. The 289 * PG_MAPPED and PG_WRITEABLE flags are not applicable. 290 * 291 * PG_MAPPED only applies to managed pages, indicating whether the page 292 * is mapped onto one or more pmaps. A page might still be mapped to 293 * special pmaps in an unmanaged fashion, for example when mapped into a 294 * buffer cache buffer, without setting PG_MAPPED. 295 * 296 * PG_WRITEABLE indicates that there may be a writeable managed pmap entry 297 * somewhere, and that the page can be dirtied by hardware at any time 298 * and may have to be tested for that. The modified bit in unmanaged 299 * mappings or in the special clean map is not tested. 300 * 301 * PG_SWAPPED indicates that the page is backed by a swap block. Any 302 * VM object type other than OBJT_DEFAULT can have swap-backed pages now. 303 * 304 * PG_SBUSY is set when m->busy != 0. PG_SBUSY and m->busy are only modified 305 * when the page is PG_BUSY. 306 */ 307 #define PG_BUSY 0x00000001 /* page is in transit (O) */ 308 #define PG_WANTED 0x00000002 /* someone is waiting for page (O) */ 309 #define PG_WINATCFLS 0x00000004 /* flush dirty page on inactive q */ 310 #define PG_FICTITIOUS 0x00000008 /* physical page doesn't exist (O) */ 311 #define PG_WRITEABLE 0x00000010 /* page is writeable */ 312 #define PG_MAPPED 0x00000020 /* page is mapped (managed) */ 313 #define PG_ZERO 0x00000040 /* page is zeroed */ 314 #define PG_REFERENCED 0x00000080 /* page has been referenced */ 315 #define PG_CLEANCHK 0x00000100 /* page will be checked for cleaning */ 316 #define PG_SWAPINPROG 0x00000200 /* swap I/O in progress on page */ 317 #define PG_NOSYNC 0x00000400 /* do not collect for syncer */ 318 #define PG_UNMANAGED 0x00000800 /* No PV management for page */ 319 #define PG_MARKER 0x00001000 /* special queue marker page */ 320 #define PG_RAM 0x00002000 /* read ahead mark */ 321 #define PG_SWAPPED 0x00004000 /* backed by swap */ 322 #define PG_NOTMETA 0x00008000 /* do not back with swap */ 323 #define PG_ACTIONLIST 0x00010000 /* lookaside action list present */ 324 #define PG_SBUSY 0x00020000 /* soft-busy also set */ 325 #define PG_NEED_COMMIT 0x00040000 /* clean page requires commit */ 326 327 /* 328 * Misc constants. 329 */ 330 331 #define ACT_DECLINE 1 332 #define ACT_ADVANCE 3 333 #define ACT_INIT 5 334 #define ACT_MAX 64 335 336 #ifdef _KERNEL 337 /* 338 * Each pageable resident page falls into one of four lists: 339 * 340 * free 341 * Available for allocation now. 342 * 343 * The following are all LRU sorted: 344 * 345 * cache 346 * Almost available for allocation. Still in an 347 * object, but clean and immediately freeable at 348 * non-interrupt times. 349 * 350 * inactive 351 * Low activity, candidates for reclamation. 352 * This is the list of pages that should be 353 * paged out next. 354 * 355 * active 356 * Pages that are "active" i.e. they have been 357 * recently referenced. 358 * 359 * zero 360 * Pages that are really free and have been pre-zeroed 361 * 362 */ 363 364 extern int vm_page_zero_count; 365 extern struct vm_page *vm_page_array; /* First resident page in table */ 366 extern int vm_page_array_size; /* number of vm_page_t's */ 367 extern long first_page; /* first physical page number */ 368 369 #define VM_PAGE_TO_PHYS(entry) \ 370 ((entry)->phys_addr) 371 372 #define PHYS_TO_VM_PAGE(pa) \ 373 (&vm_page_array[atop(pa) - first_page]) 374 375 /* 376 * Functions implemented as macros 377 */ 378 379 static __inline void 380 vm_page_flag_set(vm_page_t m, unsigned int bits) 381 { 382 atomic_set_int(&(m)->flags, bits); 383 } 384 385 static __inline void 386 vm_page_flag_clear(vm_page_t m, unsigned int bits) 387 { 388 atomic_clear_int(&(m)->flags, bits); 389 } 390 391 /* 392 * Wakeup anyone waiting for the page after potentially unbusying 393 * (hard or soft) or doing other work on a page that might make a 394 * waiter ready. The setting of PG_WANTED is integrated into the 395 * related flags and it can't be set once the flags are already 396 * clear, so there should be no races here. 397 */ 398 399 static __inline void 400 vm_page_flash(vm_page_t m) 401 { 402 if (m->flags & PG_WANTED) { 403 vm_page_flag_clear(m, PG_WANTED); 404 wakeup(m); 405 } 406 } 407 408 #if PAGE_SIZE == 4096 409 #define VM_PAGE_BITS_ALL 0xff 410 #endif 411 412 /* 413 * Note: the code will always use nominally free pages from the free list 414 * before trying other flag-specified sources. 415 * 416 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT 417 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL 418 * is also specified. 419 */ 420 #define VM_ALLOC_NORMAL 0x0001 /* ok to use cache pages */ 421 #define VM_ALLOC_SYSTEM 0x0002 /* ok to exhaust most of free list */ 422 #define VM_ALLOC_INTERRUPT 0x0004 /* ok to exhaust entire free list */ 423 #define VM_ALLOC_ZERO 0x0008 /* req pre-zero'd memory if avail */ 424 #define VM_ALLOC_QUICK 0x0010 /* like NORMAL but do not use cache */ 425 #define VM_ALLOC_FORCE_ZERO 0x0020 /* zero page even if already valid */ 426 #define VM_ALLOC_NULL_OK 0x0040 /* ok to return NULL on collision */ 427 #define VM_ALLOC_RETRY 0x0080 /* indefinite block (vm_page_grab()) */ 428 #define VM_ALLOC_USE_GD 0x0100 /* use per-gd cache */ 429 430 void vm_page_queue_spin_lock(vm_page_t); 431 void vm_page_queues_spin_lock(u_short); 432 void vm_page_and_queue_spin_lock(vm_page_t); 433 434 void vm_page_queue_spin_unlock(vm_page_t); 435 void vm_page_queues_spin_unlock(u_short); 436 void vm_page_and_queue_spin_unlock(vm_page_t m); 437 438 void vm_page_io_finish(vm_page_t m); 439 void vm_page_io_start(vm_page_t m); 440 void vm_page_need_commit(vm_page_t m); 441 void vm_page_clear_commit(vm_page_t m); 442 void vm_page_wakeup(vm_page_t m); 443 void vm_page_hold(vm_page_t); 444 void vm_page_unhold(vm_page_t); 445 void vm_page_activate (vm_page_t); 446 void vm_page_pcpu_cache(void); 447 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int); 448 vm_page_t vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high, 449 unsigned long alignment, unsigned long boundary, 450 unsigned long size); 451 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int); 452 void vm_page_cache (vm_page_t); 453 int vm_page_try_to_cache (vm_page_t); 454 int vm_page_try_to_free (vm_page_t); 455 void vm_page_dontneed (vm_page_t); 456 void vm_page_deactivate (vm_page_t); 457 void vm_page_deactivate_locked (vm_page_t); 458 int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t); 459 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t); 460 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)( 461 struct vm_object *, vm_pindex_t, int, const char * 462 VM_PAGE_DEBUG_ARGS); 463 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)( 464 struct vm_object *, vm_pindex_t, int, int * 465 VM_PAGE_DEBUG_ARGS); 466 void vm_page_remove (vm_page_t); 467 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t); 468 void vm_page_startup (void); 469 void vm_page_unmanage (vm_page_t); 470 void vm_page_unhold_pages(vm_page_t *ma, int count); 471 void vm_page_unwire (vm_page_t, int); 472 void vm_page_wire (vm_page_t); 473 void vm_page_unqueue (vm_page_t); 474 void vm_page_unqueue_nowakeup (vm_page_t); 475 vm_page_t vm_page_next (vm_page_t); 476 void vm_page_set_validclean (vm_page_t, int, int); 477 void vm_page_set_validdirty (vm_page_t, int, int); 478 void vm_page_set_valid (vm_page_t, int, int); 479 void vm_page_set_dirty (vm_page_t, int, int); 480 void vm_page_clear_dirty (vm_page_t, int, int); 481 void vm_page_set_invalid (vm_page_t, int, int); 482 int vm_page_is_valid (vm_page_t, int, int); 483 void vm_page_test_dirty (vm_page_t); 484 int vm_page_bits (int, int); 485 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero); 486 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 487 void vm_page_free_toq(vm_page_t m); 488 void vm_page_free_contig(vm_page_t m, unsigned long size); 489 vm_page_t vm_page_free_fromq_fast(void); 490 void vm_page_event_internal(vm_page_t, vm_page_event_t); 491 void vm_page_dirty(vm_page_t m); 492 void vm_page_register_action(vm_page_action_t action, vm_page_event_t event); 493 void vm_page_unregister_action(vm_page_action_t action); 494 void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg); 495 void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m, int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS); 496 int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, int also_m_busy VM_PAGE_DEBUG_ARGS); 497 498 #ifdef VM_PAGE_DEBUG 499 500 #define vm_page_lookup_busy_wait(object, pindex, alsob, msg) \ 501 vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg, \ 502 __func__, __LINE__) 503 504 #define vm_page_lookup_busy_try(object, pindex, alsob, errorp) \ 505 vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp, \ 506 __func__, __LINE__) 507 508 #define vm_page_busy_wait(m, alsob, msg) \ 509 vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__) 510 511 #define vm_page_busy_try(m, alsob) \ 512 vm_page_busy_try_debug(m, alsob, __func__, __LINE__) 513 514 #endif 515 516 /* 517 * Reduce the protection of a page. This routine never raises the 518 * protection and therefore can be safely called if the page is already 519 * at VM_PROT_NONE (it will be a NOP effectively ). 520 * 521 * VM_PROT_NONE will remove all user mappings of a page. This is often 522 * necessary when a page changes state (for example, turns into a copy-on-write 523 * page or needs to be frozen for write I/O) in order to force a fault, or 524 * to force a page's dirty bits to be synchronized and avoid hardware 525 * (modified/accessed) bit update races with pmap changes. 526 * 527 * Since 'prot' is usually a constant, this inline usually winds up optimizing 528 * out the primary conditional. 529 * 530 * WARNING: VM_PROT_NONE can block, but will loop until all mappings have 531 * been cleared. Callers should be aware that other page related elements 532 * might have changed, however. 533 */ 534 static __inline void 535 vm_page_protect(vm_page_t m, int prot) 536 { 537 KKASSERT(m->flags & PG_BUSY); 538 if (prot == VM_PROT_NONE) { 539 if (m->flags & (PG_WRITEABLE|PG_MAPPED)) { 540 pmap_page_protect(m, VM_PROT_NONE); 541 /* PG_WRITEABLE & PG_MAPPED cleared by call */ 542 } 543 } else if ((prot == VM_PROT_READ) && (m->flags & PG_WRITEABLE)) { 544 pmap_page_protect(m, VM_PROT_READ); 545 /* PG_WRITEABLE cleared by call */ 546 } 547 } 548 549 /* 550 * Zero-fill the specified page. The entire contents of the page will be 551 * zero'd out. 552 */ 553 static __inline boolean_t 554 vm_page_zero_fill(vm_page_t m) 555 { 556 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 557 return (TRUE); 558 } 559 560 /* 561 * Copy the contents of src_m to dest_m. The pages must be stable but spl 562 * and other protections depend on context. 563 */ 564 static __inline void 565 vm_page_copy(vm_page_t src_m, vm_page_t dest_m) 566 { 567 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 568 dest_m->valid = VM_PAGE_BITS_ALL; 569 dest_m->dirty = VM_PAGE_BITS_ALL; 570 } 571 572 /* 573 * Free a page. The page must be marked BUSY. 574 * 575 * Always clear PG_ZERO when freeing a page, which ensures the flag is not 576 * set unless we are absolutely certain the page is zerod. This is 577 * particularly important when the vm_page_alloc*() code moves pages from 578 * PQ_CACHE to PQ_FREE. 579 */ 580 static __inline void 581 vm_page_free(vm_page_t m) 582 { 583 vm_page_flag_clear(m, PG_ZERO); 584 vm_page_free_toq(m); 585 } 586 587 /* 588 * Free a page to the zerod-pages queue. The caller must ensure that the 589 * page has been zerod. 590 */ 591 static __inline void 592 vm_page_free_zero(vm_page_t m) 593 { 594 #ifdef PMAP_DEBUG 595 #ifdef PHYS_TO_DMAP 596 char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 597 int i; 598 599 for (i = 0; i < PAGE_SIZE; i++) { 600 if (p[i] != 0) { 601 panic("non-zero page in vm_page_free_zero()"); 602 } 603 } 604 #endif 605 #endif 606 vm_page_flag_set(m, PG_ZERO); 607 vm_page_free_toq(m); 608 } 609 610 /* 611 * Set page to not be dirty. Note: does not clear pmap modify bits . 612 */ 613 static __inline void 614 vm_page_undirty(vm_page_t m) 615 { 616 m->dirty = 0; 617 } 618 619 #endif /* _KERNEL */ 620 #endif /* !_VM_VM_PAGE_H_ */ 621