1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_page.h,v 1.11 2004/05/13 17:40:19 dillon Exp $ 66 */ 67 68 /* 69 * Resident memory system definitions. 70 */ 71 72 #ifndef _VM_PAGE_ 73 #define _VM_PAGE_ 74 75 #if !defined(KLD_MODULE) 76 #include "opt_vmpage.h" 77 #endif 78 79 #include <vm/pmap.h> 80 #include <machine/atomic.h> 81 82 /* 83 * Management of resident (logical) pages. 84 * 85 * A small structure is kept for each resident 86 * page, indexed by page number. Each structure 87 * is an element of several lists: 88 * 89 * A hash table bucket used to quickly 90 * perform object/offset lookups 91 * 92 * A list of all pages for a given object, 93 * so they can be quickly deactivated at 94 * time of deallocation. 95 * 96 * An ordered list of pages due for pageout. 97 * 98 * In addition, the structure contains the object 99 * and offset to which this page belongs (for pageout), 100 * and sundry status bits. 101 * 102 * Fields in this structure are locked either by the lock on the 103 * object that the page belongs to (O) or by the lock on the page 104 * queues (P). 105 * 106 * The 'valid' and 'dirty' fields are distinct. A page may have dirty 107 * bits set without having associated valid bits set. This is used by 108 * NFS to implement piecemeal writes. 109 */ 110 111 TAILQ_HEAD(pglist, vm_page); 112 113 struct vm_page { 114 TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (P) */ 115 struct vm_page *hnext; /* hash table link (O,P) */ 116 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 117 118 vm_object_t object; /* which object am I in (O,P)*/ 119 vm_pindex_t pindex; /* offset into object (O,P) */ 120 vm_paddr_t phys_addr; /* physical address of page */ 121 struct md_page md; /* machine dependant stuff */ 122 u_short queue; /* page queue index */ 123 u_short flags, /* see below */ 124 pc; /* page color */ 125 u_short wire_count; /* wired down maps refs (P) */ 126 short hold_count; /* page hold count */ 127 u_char act_count; /* page usage count */ 128 u_char busy; /* page busy count */ 129 /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */ 130 /* so, on normal X86 kernels, they must be at least 8 bits wide */ 131 #if PAGE_SIZE == 4096 132 u_char valid; /* map of valid DEV_BSIZE chunks */ 133 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 134 #elif PAGE_SIZE == 8192 135 u_short valid; /* map of valid DEV_BSIZE chunks */ 136 u_short dirty; /* map of dirty DEV_BSIZE chunks */ 137 #endif 138 }; 139 140 /* 141 * note: currently use SWAPBLK_NONE as an absolute value rather then 142 * a flag bit. 143 */ 144 145 #define SWAPBLK_MASK ((daddr_t)((u_daddr_t)-1 >> 1)) /* mask */ 146 #define SWAPBLK_NONE ((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */ 147 148 #if !defined(KLD_MODULE) 149 150 /* 151 * Page coloring parameters 152 */ 153 /* Each of PQ_FREE, and PQ_CACHE have PQ_HASH_SIZE entries */ 154 155 /* Backward compatibility for existing PQ_*CACHE config options. */ 156 #if !defined(PQ_CACHESIZE) 157 #if defined(PQ_HUGECACHE) 158 #define PQ_CACHESIZE 1024 159 #elif defined(PQ_LARGECACHE) 160 #define PQ_CACHESIZE 512 161 #elif defined(PQ_MEDIUMCACHE) 162 #define PQ_CACHESIZE 256 163 #elif defined(PQ_NORMALCACHE) 164 #define PQ_CACHESIZE 64 165 #elif defined(PQ_NOOPT) 166 #define PQ_CACHESIZE 0 167 #else 168 #define PQ_CACHESIZE 128 169 #endif 170 #endif 171 172 #if PQ_CACHESIZE >= 1024 173 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 174 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 175 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ 176 177 #elif PQ_CACHESIZE >= 512 178 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 179 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 180 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ 181 182 #elif PQ_CACHESIZE >= 256 183 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */ 184 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */ 185 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ 186 187 #elif PQ_CACHESIZE >= 128 188 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */ 189 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 190 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */ 191 192 #elif PQ_CACHESIZE >= 64 193 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 194 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */ 195 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ 196 197 #else 198 #define PQ_PRIME1 1 /* Disable page coloring. */ 199 #define PQ_PRIME2 1 200 #define PQ_L2_SIZE 1 201 202 #endif 203 204 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 205 206 #define PQ_NONE 0 207 #define PQ_FREE 1 208 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE) 209 #define PQ_ACTIVE (2 + 1*PQ_L2_SIZE) 210 #define PQ_CACHE (3 + 1*PQ_L2_SIZE) 211 #define PQ_HOLD (3 + 2*PQ_L2_SIZE) 212 #define PQ_COUNT (4 + 2*PQ_L2_SIZE) 213 214 struct vpgqueues { 215 struct pglist pl; 216 int *cnt; 217 int lcnt; 218 int flipflop; /* probably not the best place */ 219 }; 220 221 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 222 223 #endif 224 225 /* 226 * These are the flags defined for vm_page. 227 * 228 * Note: PG_FILLED and PG_DIRTY are added for the filesystems. 229 * 230 * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 231 * not under PV management but otherwise should be treated as a 232 * normal page. Pages not under PV management cannot be paged out 233 * via the object/vm_page_t because there is no knowledge of their 234 * pte mappings, nor can they be removed from their objects via 235 * the object, and such pages are also not on any PQ queue. 236 */ 237 #define PG_BUSY 0x0001 /* page is in transit (O) */ 238 #define PG_WANTED 0x0002 /* someone is waiting for page (O) */ 239 #define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */ 240 #define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */ 241 #define PG_WRITEABLE 0x0010 /* page is mapped writeable */ 242 #define PG_MAPPED 0x0020 /* page is mapped */ 243 #define PG_ZERO 0x0040 /* page is zeroed */ 244 #define PG_REFERENCED 0x0080 /* page has been referenced */ 245 #define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */ 246 #define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */ 247 #define PG_NOSYNC 0x0400 /* do not collect for syncer */ 248 #define PG_UNMANAGED 0x0800 /* No PV management for page */ 249 #define PG_MARKER 0x1000 /* special queue marker page */ 250 251 /* 252 * Misc constants. 253 */ 254 255 #define ACT_DECLINE 1 256 #define ACT_ADVANCE 3 257 #define ACT_INIT 5 258 #define ACT_MAX 64 259 #define PFCLUSTER_BEHIND 3 260 #define PFCLUSTER_AHEAD 3 261 262 #ifdef _KERNEL 263 /* 264 * Each pageable resident page falls into one of four lists: 265 * 266 * free 267 * Available for allocation now. 268 * 269 * The following are all LRU sorted: 270 * 271 * cache 272 * Almost available for allocation. Still in an 273 * object, but clean and immediately freeable at 274 * non-interrupt times. 275 * 276 * inactive 277 * Low activity, candidates for reclamation. 278 * This is the list of pages that should be 279 * paged out next. 280 * 281 * active 282 * Pages that are "active" i.e. they have been 283 * recently referenced. 284 * 285 * zero 286 * Pages that are really free and have been pre-zeroed 287 * 288 */ 289 290 extern int vm_page_zero_count; 291 292 extern vm_page_t vm_page_array; /* First resident page in table */ 293 extern int vm_page_array_size; /* number of vm_page_t's */ 294 extern long first_page; /* first physical page number */ 295 296 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) 297 298 #define PHYS_TO_VM_PAGE(pa) \ 299 (&vm_page_array[atop(pa) - first_page ]) 300 301 /* 302 * Functions implemented as macros 303 */ 304 305 static __inline void 306 vm_page_flag_set(vm_page_t m, unsigned int bits) 307 { 308 atomic_set_short(&(m)->flags, bits); 309 } 310 311 static __inline void 312 vm_page_flag_clear(vm_page_t m, unsigned int bits) 313 { 314 atomic_clear_short(&(m)->flags, bits); 315 } 316 317 #if 0 318 static __inline void 319 vm_page_assert_wait(vm_page_t m, int interruptible) 320 { 321 vm_page_flag_set(m, PG_WANTED); 322 assert_wait((int) m, interruptible); 323 } 324 #endif 325 326 static __inline void 327 vm_page_busy(vm_page_t m) 328 { 329 KASSERT((m->flags & PG_BUSY) == 0, ("vm_page_busy: page already busy!!!")); 330 vm_page_flag_set(m, PG_BUSY); 331 } 332 333 /* 334 * vm_page_flash: 335 * 336 * wakeup anyone waiting for the page. 337 */ 338 339 static __inline void 340 vm_page_flash(vm_page_t m) 341 { 342 if (m->flags & PG_WANTED) { 343 vm_page_flag_clear(m, PG_WANTED); 344 wakeup(m); 345 } 346 } 347 348 /* 349 * vm_page_wakeup: 350 * 351 * clear the PG_BUSY flag and wakeup anyone waiting for the 352 * page. 353 * 354 */ 355 356 static __inline void 357 vm_page_wakeup(vm_page_t m) 358 { 359 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!")); 360 vm_page_flag_clear(m, PG_BUSY); 361 vm_page_flash(m); 362 } 363 364 /* 365 * 366 * 367 */ 368 369 static __inline void 370 vm_page_io_start(vm_page_t m) 371 { 372 atomic_add_char(&(m)->busy, 1); 373 } 374 375 static __inline void 376 vm_page_io_finish(vm_page_t m) 377 { 378 atomic_subtract_char(&m->busy, 1); 379 if (m->busy == 0) 380 vm_page_flash(m); 381 } 382 383 384 #if PAGE_SIZE == 4096 385 #define VM_PAGE_BITS_ALL 0xff 386 #endif 387 388 #if PAGE_SIZE == 8192 389 #define VM_PAGE_BITS_ALL 0xffff 390 #endif 391 392 /* 393 * Note: the code will always use nominally free pages from the free list 394 * before trying other flag-specified sources. 395 * 396 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT 397 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL 398 * is also specified. 399 */ 400 #define VM_ALLOC_NORMAL 0x01 /* ok to use cache pages */ 401 #define VM_ALLOC_SYSTEM 0x02 /* ok to exhaust most of free list */ 402 #define VM_ALLOC_INTERRUPT 0x04 /* ok to exhaust entire free list */ 403 #define VM_ALLOC_ZERO 0x08 /* req pre-zero'd memory if avail */ 404 #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */ 405 406 void vm_page_unhold(vm_page_t mem); 407 408 void vm_page_activate (vm_page_t); 409 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int); 410 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); 411 void vm_page_cache (vm_page_t); 412 int vm_page_try_to_cache (vm_page_t); 413 int vm_page_try_to_free (vm_page_t); 414 void vm_page_dontneed (vm_page_t); 415 static __inline void vm_page_copy (vm_page_t, vm_page_t); 416 static __inline void vm_page_free (vm_page_t); 417 static __inline void vm_page_free_zero (vm_page_t); 418 void vm_page_deactivate (vm_page_t); 419 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 420 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); 421 void vm_page_remove (vm_page_t); 422 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); 423 vm_offset_t vm_page_startup (vm_offset_t, vm_offset_t, vm_offset_t); 424 vm_page_t vm_add_new_page (vm_paddr_t pa); 425 void vm_page_unmanage (vm_page_t); 426 void vm_page_unwire (vm_page_t, int); 427 void vm_page_wire (vm_page_t); 428 void vm_page_unqueue (vm_page_t); 429 void vm_page_unqueue_nowakeup (vm_page_t); 430 void vm_page_set_validclean (vm_page_t, int, int); 431 void vm_page_set_dirty (vm_page_t, int, int); 432 void vm_page_clear_dirty (vm_page_t, int, int); 433 void vm_page_set_invalid (vm_page_t, int, int); 434 static __inline boolean_t vm_page_zero_fill (vm_page_t); 435 int vm_page_is_valid (vm_page_t, int, int); 436 void vm_page_test_dirty (vm_page_t); 437 int vm_page_bits (int, int); 438 vm_page_t _vm_page_list_find (int, int); 439 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 440 void vm_page_free_toq(vm_page_t m); 441 442 int vm_contig_pg_alloc(u_long, vm_paddr_t, vm_paddr_t, u_long, u_long); 443 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int); 444 void vm_contig_pg_free(int, u_long); 445 446 /* 447 * Holding a page keeps it from being reused. Other parts of the system 448 * can still disassociate the page from its current object and free it, or 449 * perform read or write I/O on it and/or otherwise manipulate the page, 450 * but if the page is held the VM system will leave the page and its data 451 * intact and not reuse the page for other purposes until the last hold 452 * reference is released. (see vm_page_wire() if you want to prevent the 453 * page from being disassociated from its object too). 454 * 455 * This routine must be called while at splvm() or better. 456 * 457 * The caller must still validate the contents of the page and, if necessary, 458 * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete 459 * before manipulating the page. 460 */ 461 static __inline void 462 vm_page_hold(vm_page_t mem) 463 { 464 mem->hold_count++; 465 } 466 467 /* 468 * Reduce the protection of a page. This routine never raises the 469 * protection and therefore can be safely called if the page is already 470 * at VM_PROT_NONE (it will be a NOP effectively ). 471 * 472 * VM_PROT_NONE will remove all user mappings of a page. This is often 473 * necessary when a page changes state (for example, turns into a copy-on-write 474 * page or needs to be frozen for write I/O) in order to force a fault, or 475 * to force a page's dirty bits to be synchronized and avoid hardware 476 * (modified/accessed) bit update races with pmap changes. 477 * 478 * Since 'prot' is usually a constant, this inline usually winds up optimizing 479 * out the primary conditional. 480 */ 481 static __inline void 482 vm_page_protect(vm_page_t mem, int prot) 483 { 484 if (prot == VM_PROT_NONE) { 485 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { 486 pmap_page_protect(mem, VM_PROT_NONE); 487 vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); 488 } 489 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { 490 pmap_page_protect(mem, VM_PROT_READ); 491 vm_page_flag_clear(mem, PG_WRITEABLE); 492 } 493 } 494 495 /* 496 * Zero-fill the specified page. The entire contents of the page will be 497 * zero'd out. 498 */ 499 static __inline boolean_t 500 vm_page_zero_fill(vm_page_t m) 501 { 502 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 503 return (TRUE); 504 } 505 506 /* 507 * Copy the contents of src_m to dest_m. The pages must be stable but spl 508 * and other protections depend on context. 509 */ 510 static __inline void 511 vm_page_copy(vm_page_t src_m, vm_page_t dest_m) 512 { 513 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 514 dest_m->valid = VM_PAGE_BITS_ALL; 515 } 516 517 /* 518 * vm_page_free: 519 * 520 * Free a page 521 * 522 * The clearing of PG_ZERO is a temporary safety until the code can be 523 * reviewed to determine that PG_ZERO is being properly cleared on 524 * write faults or maps. PG_ZERO was previously cleared in 525 * vm_page_alloc(). 526 */ 527 static __inline void 528 vm_page_free(m) 529 vm_page_t m; 530 { 531 vm_page_flag_clear(m, PG_ZERO); 532 vm_page_free_toq(m); 533 } 534 535 /* 536 * vm_page_free_zero: 537 * 538 * Free a page to the zerod-pages queue 539 */ 540 static __inline void 541 vm_page_free_zero(m) 542 vm_page_t m; 543 { 544 vm_page_flag_set(m, PG_ZERO); 545 vm_page_free_toq(m); 546 } 547 548 /* 549 * vm_page_sleep_busy: 550 * 551 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE) 552 * m->busy is zero. Returns TRUE if it had to sleep ( including if 553 * it almost had to sleep and made temporary spl*() mods), FALSE 554 * otherwise. 555 * 556 * This routine assumes that interrupts can only remove the busy 557 * status from a page, not set the busy status or change it from 558 * PG_BUSY to m->busy or vise versa (which would create a timing 559 * window). 560 * 561 * Note that being an inline, this code will be well optimized. 562 */ 563 564 static __inline int 565 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg) 566 { 567 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 568 int s = splvm(); 569 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { 570 /* 571 * Page is busy. Wait and retry. 572 */ 573 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 574 tsleep(m, 0, msg, 0); 575 } 576 splx(s); 577 return(TRUE); 578 /* not reached */ 579 } 580 return(FALSE); 581 } 582 583 /* 584 * vm_page_dirty: 585 * 586 * make page all dirty 587 */ 588 589 static __inline void 590 vm_page_dirty(vm_page_t m) 591 { 592 #if !defined(KLD_MODULE) 593 KASSERT(m->queue - m->pc != PQ_CACHE, ("vm_page_dirty: page in cache!")); 594 #endif 595 m->dirty = VM_PAGE_BITS_ALL; 596 } 597 598 /* 599 * vm_page_undirty: 600 * 601 * Set page to not be dirty. Note: does not clear pmap modify bits 602 */ 603 604 static __inline void 605 vm_page_undirty(vm_page_t m) 606 { 607 m->dirty = 0; 608 } 609 610 #if !defined(KLD_MODULE) 611 612 static __inline vm_page_t 613 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero) 614 { 615 vm_page_t m; 616 617 #if PQ_L2_SIZE > 1 618 if (prefer_zero) { 619 m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist); 620 } else { 621 m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl); 622 } 623 if (m == NULL) 624 m = _vm_page_list_find(basequeue, index); 625 #else 626 if (prefer_zero) { 627 m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist); 628 } else { 629 m = TAILQ_FIRST(&vm_page_queues[basequeue].pl); 630 } 631 #endif 632 return(m); 633 } 634 635 #endif 636 637 #endif /* _KERNEL */ 638 #endif /* !_VM_PAGE_ */ 639