1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_page.h,v 1.28 2008/05/09 07:24:48 dillon Exp $ 66 */ 67 68 /* 69 * Resident memory system definitions. 70 */ 71 72 #ifndef _VM_VM_PAGE_H_ 73 #define _VM_VM_PAGE_H_ 74 75 #if !defined(KLD_MODULE) && defined(_KERNEL) 76 #include "opt_vmpage.h" 77 #endif 78 79 #ifndef _SYS_TYPES_H_ 80 #include <sys/types.h> 81 #endif 82 #ifndef _SYS_TREE_H_ 83 #include <sys/tree.h> 84 #endif 85 #ifndef _MACHINE_PMAP_H_ 86 #include <machine/pmap.h> 87 #endif 88 #ifndef _VM_PMAP_H_ 89 #include <vm/pmap.h> 90 #endif 91 #ifndef _MACHINE_ATOMIC_H_ 92 #include <machine/atomic.h> 93 #endif 94 95 #ifdef _KERNEL 96 97 #ifndef _SYS_SYSTM_H_ 98 #include <sys/systm.h> 99 #endif 100 #ifndef _SYS_THREAD2_H_ 101 #include <sys/thread2.h> 102 #endif 103 104 #ifdef __x86_64__ 105 #include <machine/vmparam.h> 106 #endif 107 108 #endif 109 110 typedef enum vm_page_event { VMEVENT_NONE, VMEVENT_COW } vm_page_event_t; 111 112 struct vm_page_action { 113 LIST_ENTRY(vm_page_action) entry; 114 struct vm_page *m; 115 vm_page_event_t event; 116 void (*func)(struct vm_page *, 117 struct vm_page_action *); 118 void *data; 119 }; 120 121 typedef struct vm_page_action *vm_page_action_t; 122 123 /* 124 * Management of resident (logical) pages. 125 * 126 * A small structure is kept for each resident 127 * page, indexed by page number. Each structure 128 * is an element of several lists: 129 * 130 * A hash table bucket used to quickly 131 * perform object/offset lookups 132 * 133 * A list of all pages for a given object, 134 * so they can be quickly deactivated at 135 * time of deallocation. 136 * 137 * An ordered list of pages due for pageout. 138 * 139 * In addition, the structure contains the object 140 * and offset to which this page belongs (for pageout), 141 * and sundry status bits. 142 * 143 * Fields in this structure are locked either by the lock on the 144 * object that the page belongs to (O) or by the lock on the page 145 * queues (P). 146 * 147 * The 'valid' and 'dirty' fields are distinct. A page may have dirty 148 * bits set without having associated valid bits set. This is used by 149 * NFS to implement piecemeal writes. 150 */ 151 152 TAILQ_HEAD(pglist, vm_page); 153 154 struct vm_object; 155 156 int rb_vm_page_compare(struct vm_page *, struct vm_page *); 157 158 struct vm_page_rb_tree; 159 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, vm_pindex_t); 160 161 struct vm_page { 162 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */ 163 RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */ 164 165 struct vm_object *object; /* which object am I in (O,P)*/ 166 vm_pindex_t pindex; /* offset into object (O,P) */ 167 vm_paddr_t phys_addr; /* physical address of page */ 168 struct md_page md; /* machine dependant stuff */ 169 u_short queue; /* page queue index */ 170 u_short pc; /* page color */ 171 u_char act_count; /* page usage count */ 172 u_char busy; /* page busy count */ 173 u_char unused01; 174 u_char unused02; 175 u_int32_t flags; /* see below */ 176 u_int wire_count; /* wired down maps refs (P) */ 177 int hold_count; /* page hold count */ 178 179 /* 180 * NOTE that these must support one bit per DEV_BSIZE in a page!!! 181 * so, on normal X86 kernels, they must be at least 8 bits wide. 182 */ 183 #if PAGE_SIZE == 4096 184 u_char valid; /* map of valid DEV_BSIZE chunks */ 185 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 186 #elif PAGE_SIZE == 8192 187 u_short valid; /* map of valid DEV_BSIZE chunks */ 188 u_short dirty; /* map of dirty DEV_BSIZE chunks */ 189 #endif 190 int ku_pagecnt; /* kmalloc helper */ 191 }; 192 193 #ifndef __VM_PAGE_T_DEFINED__ 194 #define __VM_PAGE_T_DEFINED__ 195 typedef struct vm_page *vm_page_t; 196 #endif 197 198 /* 199 * Page coloring parameters. We default to a middle of the road optimization. 200 * Larger selections would not really hurt us but if a machine does not have 201 * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles 202 * looking for free pages. 203 * 204 * Page coloring cannot be disabled. Modules do not have access to most PQ 205 * constants because they can change between builds. 206 */ 207 #if defined(_KERNEL) && !defined(KLD_MODULE) 208 209 #if !defined(PQ_CACHESIZE) 210 #define PQ_CACHESIZE 256 /* max is 1024 (MB) */ 211 #endif 212 213 #if PQ_CACHESIZE >= 1024 214 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 215 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 216 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ 217 218 #elif PQ_CACHESIZE >= 512 219 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 220 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 221 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ 222 223 #elif PQ_CACHESIZE >= 256 224 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */ 225 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */ 226 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ 227 228 #elif PQ_CACHESIZE >= 128 229 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */ 230 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 231 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */ 232 233 #else 234 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 235 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */ 236 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ 237 238 #endif 239 240 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 241 242 #endif /* KERNEL && !KLD_MODULE */ 243 244 /* 245 * 246 * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual 247 * cache size chosen in order to present a uniform interface for modules. 248 */ 249 #define PQ_MAXL2_SIZE 256 /* fixed maximum (in pages) / module compat */ 250 251 #if PQ_L2_SIZE > PQ_MAXL2_SIZE 252 #error "Illegal PQ_L2_SIZE" 253 #endif 254 255 #define PQ_NONE 0 256 #define PQ_FREE 1 257 #define PQ_INACTIVE (1 + 1*PQ_MAXL2_SIZE) 258 #define PQ_ACTIVE (2 + 1*PQ_MAXL2_SIZE) 259 #define PQ_CACHE (3 + 1*PQ_MAXL2_SIZE) 260 #define PQ_HOLD (3 + 2*PQ_MAXL2_SIZE) 261 #define PQ_COUNT (4 + 2*PQ_MAXL2_SIZE) 262 263 /* 264 * Scan support 265 */ 266 struct vm_map; 267 268 struct rb_vm_page_scan_info { 269 vm_pindex_t start_pindex; 270 vm_pindex_t end_pindex; 271 int limit; 272 int desired; 273 int error; 274 int pagerflags; 275 vm_offset_t addr; 276 vm_pindex_t backing_offset_index; 277 struct vm_object *object; 278 struct vm_object *backing_object; 279 struct vm_page *mpte; 280 struct pmap *pmap; 281 struct vm_map *map; 282 }; 283 284 int rb_vm_page_scancmp(struct vm_page *, void *); 285 286 struct vpgqueues { 287 struct pglist pl; 288 int *cnt; 289 int lcnt; 290 int flipflop; /* probably not the best place */ 291 }; 292 293 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 294 295 /* 296 * These are the flags defined for vm_page. 297 * 298 * PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 299 * not under PV management but otherwise should be treated as a 300 * normal page. Pages not under PV management cannot be paged out 301 * via the object/vm_page_t because there is no knowledge of their 302 * pte mappings, nor can they be removed from their objects via 303 * the object, and such pages are also not on any PQ queue. The 304 * PG_MAPPED and PG_WRITEABLE flags are not applicable. 305 * 306 * PG_MAPPED only applies to managed pages, indicating whether the page 307 * is mapped onto one or more pmaps. A page might still be mapped to 308 * special pmaps in an unmanaged fashion, for example when mapped into a 309 * buffer cache buffer, without setting PG_MAPPED. 310 * 311 * PG_WRITEABLE indicates that there may be a writeable managed pmap entry 312 * somewhere, and that the page can be dirtied by hardware at any time 313 * and may have to be tested for that. The modified bit in unmanaged 314 * mappings or in the special clean map is not tested. 315 * 316 * PG_SWAPPED indicates that the page is backed by a swap block. Any 317 * VM object type other than OBJT_DEFAULT can have swap-backed pages now. 318 */ 319 #define PG_BUSY 0x00000001 /* page is in transit (O) */ 320 #define PG_WANTED 0x00000002 /* someone is waiting for page (O) */ 321 #define PG_WINATCFLS 0x00000004 /* flush dirty page on inactive q */ 322 #define PG_FICTITIOUS 0x00000008 /* physical page doesn't exist (O) */ 323 #define PG_WRITEABLE 0x00000010 /* page is writeable */ 324 #define PG_MAPPED 0x00000020 /* page is mapped (managed) */ 325 #define PG_ZERO 0x00000040 /* page is zeroed */ 326 #define PG_REFERENCED 0x00000080 /* page has been referenced */ 327 #define PG_CLEANCHK 0x00000100 /* page will be checked for cleaning */ 328 #define PG_SWAPINPROG 0x00000200 /* swap I/O in progress on page */ 329 #define PG_NOSYNC 0x00000400 /* do not collect for syncer */ 330 #define PG_UNMANAGED 0x00000800 /* No PV management for page */ 331 #define PG_MARKER 0x00001000 /* special queue marker page */ 332 #define PG_RAM 0x00002000 /* read ahead mark */ 333 #define PG_SWAPPED 0x00004000 /* backed by swap */ 334 #define PG_NOTMETA 0x00008000 /* do not back with swap */ 335 #define PG_ACTIONLIST 0x00010000 /* lookaside action list present */ 336 /* u_short, only 16 flag bits */ 337 338 /* 339 * Misc constants. 340 */ 341 342 #define ACT_DECLINE 1 343 #define ACT_ADVANCE 3 344 #define ACT_INIT 5 345 #define ACT_MAX 64 346 347 #ifdef _KERNEL 348 /* 349 * Each pageable resident page falls into one of four lists: 350 * 351 * free 352 * Available for allocation now. 353 * 354 * The following are all LRU sorted: 355 * 356 * cache 357 * Almost available for allocation. Still in an 358 * object, but clean and immediately freeable at 359 * non-interrupt times. 360 * 361 * inactive 362 * Low activity, candidates for reclamation. 363 * This is the list of pages that should be 364 * paged out next. 365 * 366 * active 367 * Pages that are "active" i.e. they have been 368 * recently referenced. 369 * 370 * zero 371 * Pages that are really free and have been pre-zeroed 372 * 373 */ 374 375 extern int vm_page_zero_count; 376 extern struct vm_page *vm_page_array; /* First resident page in table */ 377 extern int vm_page_array_size; /* number of vm_page_t's */ 378 extern long first_page; /* first physical page number */ 379 380 #define VM_PAGE_TO_PHYS(entry) \ 381 ((entry)->phys_addr) 382 383 #define PHYS_TO_VM_PAGE(pa) \ 384 (&vm_page_array[atop(pa) - first_page]) 385 386 /* 387 * Functions implemented as macros 388 */ 389 390 static __inline void 391 vm_page_flag_set(vm_page_t m, unsigned int bits) 392 { 393 atomic_set_int(&(m)->flags, bits); 394 } 395 396 static __inline void 397 vm_page_flag_clear(vm_page_t m, unsigned int bits) 398 { 399 atomic_clear_int(&(m)->flags, bits); 400 } 401 402 static __inline void 403 vm_page_busy(vm_page_t m) 404 { 405 ASSERT_LWKT_TOKEN_HELD(&vm_token); 406 KASSERT((m->flags & PG_BUSY) == 0, 407 ("vm_page_busy: page already busy!!!")); 408 vm_page_flag_set(m, PG_BUSY); 409 } 410 411 /* 412 * vm_page_flash: 413 * 414 * wakeup anyone waiting for the page. 415 */ 416 417 static __inline void 418 vm_page_flash(vm_page_t m) 419 { 420 lwkt_gettoken(&vm_token); 421 if (m->flags & PG_WANTED) { 422 vm_page_flag_clear(m, PG_WANTED); 423 wakeup(m); 424 } 425 lwkt_reltoken(&vm_token); 426 } 427 428 /* 429 * Clear the PG_BUSY flag and wakeup anyone waiting for the page. This 430 * is typically the last call you make on a page before moving onto 431 * other things. 432 */ 433 static __inline void 434 vm_page_wakeup(vm_page_t m) 435 { 436 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!")); 437 vm_page_flag_clear(m, PG_BUSY); 438 vm_page_flash(m); 439 } 440 441 /* 442 * These routines manipulate the 'soft busy' count for a page. A soft busy 443 * is almost like PG_BUSY except that it allows certain compatible operations 444 * to occur on the page while it is busy. For example, a page undergoing a 445 * write can still be mapped read-only. 446 */ 447 static __inline void 448 vm_page_io_start(vm_page_t m) 449 { 450 atomic_add_char(&(m)->busy, 1); 451 } 452 453 static __inline void 454 vm_page_io_finish(vm_page_t m) 455 { 456 atomic_subtract_char(&m->busy, 1); 457 if (m->busy == 0) 458 vm_page_flash(m); 459 } 460 461 462 #if PAGE_SIZE == 4096 463 #define VM_PAGE_BITS_ALL 0xff 464 #endif 465 466 #if PAGE_SIZE == 8192 467 #define VM_PAGE_BITS_ALL 0xffff 468 #endif 469 470 /* 471 * Note: the code will always use nominally free pages from the free list 472 * before trying other flag-specified sources. 473 * 474 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT 475 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL 476 * is also specified. 477 */ 478 #define VM_ALLOC_NORMAL 0x01 /* ok to use cache pages */ 479 #define VM_ALLOC_SYSTEM 0x02 /* ok to exhaust most of free list */ 480 #define VM_ALLOC_INTERRUPT 0x04 /* ok to exhaust entire free list */ 481 #define VM_ALLOC_ZERO 0x08 /* req pre-zero'd memory if avail */ 482 #define VM_ALLOC_QUICK 0x10 /* like NORMAL but do not use cache */ 483 #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */ 484 485 void vm_page_hold(vm_page_t); 486 void vm_page_unhold(vm_page_t); 487 void vm_page_activate (vm_page_t); 488 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int); 489 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int); 490 void vm_page_cache (vm_page_t); 491 int vm_page_try_to_cache (vm_page_t); 492 int vm_page_try_to_free (vm_page_t); 493 void vm_page_dontneed (vm_page_t); 494 void vm_page_deactivate (vm_page_t); 495 void vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t); 496 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t); 497 void vm_page_remove (vm_page_t); 498 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t); 499 vm_offset_t vm_page_startup (vm_offset_t); 500 vm_page_t vm_add_new_page (vm_paddr_t pa); 501 void vm_page_unmanage (vm_page_t); 502 void vm_page_unwire (vm_page_t, int); 503 void vm_page_wire (vm_page_t); 504 void vm_page_unqueue (vm_page_t); 505 void vm_page_unqueue_nowakeup (vm_page_t); 506 void vm_page_set_validclean (vm_page_t, int, int); 507 void vm_page_set_validdirty (vm_page_t, int, int); 508 void vm_page_set_valid (vm_page_t, int, int); 509 void vm_page_set_dirty (vm_page_t, int, int); 510 void vm_page_clear_dirty (vm_page_t, int, int); 511 void vm_page_set_invalid (vm_page_t, int, int); 512 int vm_page_is_valid (vm_page_t, int, int); 513 void vm_page_test_dirty (vm_page_t); 514 int vm_page_bits (int, int); 515 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero); 516 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 517 void vm_page_free_toq(vm_page_t m); 518 vm_page_t vm_page_free_fromq_fast(void); 519 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int); 520 void vm_contig_pg_free(int, u_long); 521 void vm_page_event_internal(vm_page_t, vm_page_event_t); 522 void vm_page_dirty(vm_page_t m); 523 void vm_page_register_action(vm_page_action_t action, vm_page_event_t event); 524 void vm_page_unregister_action(vm_page_action_t action); 525 526 /* 527 * Reduce the protection of a page. This routine never raises the 528 * protection and therefore can be safely called if the page is already 529 * at VM_PROT_NONE (it will be a NOP effectively ). 530 * 531 * VM_PROT_NONE will remove all user mappings of a page. This is often 532 * necessary when a page changes state (for example, turns into a copy-on-write 533 * page or needs to be frozen for write I/O) in order to force a fault, or 534 * to force a page's dirty bits to be synchronized and avoid hardware 535 * (modified/accessed) bit update races with pmap changes. 536 * 537 * Since 'prot' is usually a constant, this inline usually winds up optimizing 538 * out the primary conditional. 539 * 540 * WARNING: VM_PROT_NONE can block, but will loop until all mappings have 541 * been cleared. Callers should be aware that other page related elements 542 * might have changed, however. 543 */ 544 static __inline void 545 vm_page_protect(vm_page_t mem, int prot) 546 { 547 if (prot == VM_PROT_NONE) { 548 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { 549 pmap_page_protect(mem, VM_PROT_NONE); 550 /* PG_WRITEABLE & PG_MAPPED cleared by call */ 551 } 552 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { 553 pmap_page_protect(mem, VM_PROT_READ); 554 /* PG_WRITEABLE cleared by call */ 555 } 556 } 557 558 /* 559 * Zero-fill the specified page. The entire contents of the page will be 560 * zero'd out. 561 */ 562 static __inline boolean_t 563 vm_page_zero_fill(vm_page_t m) 564 { 565 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 566 return (TRUE); 567 } 568 569 /* 570 * Copy the contents of src_m to dest_m. The pages must be stable but spl 571 * and other protections depend on context. 572 */ 573 static __inline void 574 vm_page_copy(vm_page_t src_m, vm_page_t dest_m) 575 { 576 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 577 dest_m->valid = VM_PAGE_BITS_ALL; 578 dest_m->dirty = VM_PAGE_BITS_ALL; 579 } 580 581 /* 582 * Free a page. The page must be marked BUSY. 583 * 584 * The clearing of PG_ZERO is a temporary safety until the code can be 585 * reviewed to determine that PG_ZERO is being properly cleared on 586 * write faults or maps. PG_ZERO was previously cleared in 587 * vm_page_alloc(). 588 */ 589 static __inline void 590 vm_page_free(vm_page_t m) 591 { 592 vm_page_flag_clear(m, PG_ZERO); 593 vm_page_free_toq(m); 594 } 595 596 /* 597 * Free a page to the zerod-pages queue 598 */ 599 static __inline void 600 vm_page_free_zero(vm_page_t m) 601 { 602 #ifdef __x86_64__ 603 /* JG DEBUG64 We check if the page is really zeroed. */ 604 char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 605 int i; 606 607 for (i = 0; i < PAGE_SIZE; i++) { 608 if (p[i] != 0) { 609 panic("non-zero page in vm_page_free_zero()"); 610 } 611 } 612 613 #endif 614 vm_page_flag_set(m, PG_ZERO); 615 vm_page_free_toq(m); 616 } 617 618 /* 619 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE) 620 * m->busy is zero. Returns TRUE if it had to sleep ( including if 621 * it almost had to sleep and made temporary spl*() mods), FALSE 622 * otherwise. 623 * 624 * This routine assumes that interrupts can only remove the busy 625 * status from a page, not set the busy status or change it from 626 * PG_BUSY to m->busy or vise versa (which would create a timing 627 * window). 628 * 629 * Note: as an inline, 'also_m_busy' is usually a constant and well 630 * optimized. 631 */ 632 static __inline int 633 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg) 634 { 635 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 636 lwkt_gettoken(&vm_token); 637 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 638 /* 639 * Page is busy. Wait and retry. 640 */ 641 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 642 tsleep(m, 0, msg, 0); 643 } 644 lwkt_reltoken(&vm_token); 645 return(TRUE); 646 /* not reached */ 647 } 648 return(FALSE); 649 } 650 651 /* 652 * Set page to not be dirty. Note: does not clear pmap modify bits . 653 */ 654 static __inline void 655 vm_page_undirty(vm_page_t m) 656 { 657 m->dirty = 0; 658 } 659 660 #endif /* _KERNEL */ 661 #endif /* !_VM_VM_PAGE_H_ */ 662