1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $ 38 * $DragonFly: src/sys/vm/vm_page.c,v 1.25 2004/05/27 00:38:58 dillon Exp $ 39 */ 40 41 /* 42 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 43 * All rights reserved. 44 * 45 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 46 * 47 * Permission to use, copy, modify and distribute this software and 48 * its documentation is hereby granted, provided that both the copyright 49 * notice and this permission notice appear in all copies of the 50 * software, derivative works or modified versions, and any portions 51 * thereof, and that both notices appear in supporting documentation. 52 * 53 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 54 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 55 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 56 * 57 * Carnegie Mellon requests users of this software to return to 58 * 59 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 60 * School of Computer Science 61 * Carnegie Mellon University 62 * Pittsburgh PA 15213-3890 63 * 64 * any improvements or extensions that they make and grant Carnegie the 65 * rights to redistribute these changes. 66 */ 67 /* 68 * Resident memory management module. The module manipulates 'VM pages'. 69 * A VM page is the core building block for memory management. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/malloc.h> 75 #include <sys/proc.h> 76 #include <sys/vmmeter.h> 77 #include <sys/vnode.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <sys/lock.h> 82 #include <vm/vm_kern.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pageout.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_page2.h> 91 92 static void vm_page_queue_init(void); 93 static void vm_page_free_wakeup(void); 94 static vm_page_t vm_page_select_cache(vm_object_t, vm_pindex_t); 95 static vm_page_t _vm_page_list_find2(int basequeue, int index); 96 97 static int vm_page_bucket_count; /* How big is array? */ 98 static int vm_page_hash_mask; /* Mask for hash function */ 99 static struct vm_page **vm_page_buckets; /* Array of buckets */ 100 static volatile int vm_page_bucket_generation; 101 struct vpgqueues vm_page_queues[PQ_COUNT]; /* Array of tailq lists */ 102 103 static void 104 vm_page_queue_init(void) 105 { 106 int i; 107 108 for (i = 0; i < PQ_L2_SIZE; i++) 109 vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count; 110 for (i = 0; i < PQ_L2_SIZE; i++) 111 vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count; 112 113 vm_page_queues[PQ_INACTIVE].cnt = &vmstats.v_inactive_count; 114 vm_page_queues[PQ_ACTIVE].cnt = &vmstats.v_active_count; 115 vm_page_queues[PQ_HOLD].cnt = &vmstats.v_active_count; 116 /* PQ_NONE has no queue */ 117 118 for (i = 0; i < PQ_COUNT; i++) 119 TAILQ_INIT(&vm_page_queues[i].pl); 120 } 121 122 /* 123 * note: place in initialized data section? Is this necessary? 124 */ 125 long first_page = 0; 126 int vm_page_array_size = 0; 127 int vm_page_zero_count = 0; 128 vm_page_t vm_page_array = 0; 129 130 /* 131 * (low level boot) 132 * 133 * Sets the page size, perhaps based upon the memory size. 134 * Must be called before any use of page-size dependent functions. 135 */ 136 void 137 vm_set_page_size(void) 138 { 139 if (vmstats.v_page_size == 0) 140 vmstats.v_page_size = PAGE_SIZE; 141 if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0) 142 panic("vm_set_page_size: page size not a power of two"); 143 } 144 145 /* 146 * (low level boot) 147 * 148 * Add a new page to the freelist for use by the system. New pages 149 * are added to both the head and tail of the associated free page 150 * queue in a bottom-up fashion, so both zero'd and non-zero'd page 151 * requests pull 'recent' adds (higher physical addresses) first. 152 * 153 * Must be called at splhigh(). 154 */ 155 vm_page_t 156 vm_add_new_page(vm_paddr_t pa) 157 { 158 struct vpgqueues *vpq; 159 vm_page_t m; 160 161 ++vmstats.v_page_count; 162 ++vmstats.v_free_count; 163 m = PHYS_TO_VM_PAGE(pa); 164 m->phys_addr = pa; 165 m->flags = 0; 166 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 167 m->queue = m->pc + PQ_FREE; 168 169 vpq = &vm_page_queues[m->queue]; 170 if (vpq->flipflop) 171 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 172 else 173 TAILQ_INSERT_HEAD(&vpq->pl, m, pageq); 174 vpq->flipflop = 1 - vpq->flipflop; 175 176 vm_page_queues[m->queue].lcnt++; 177 return (m); 178 } 179 180 /* 181 * (low level boot) 182 * 183 * Initializes the resident memory module. 184 * 185 * Allocates memory for the page cells, and for the object/offset-to-page 186 * hash table headers. Each page cell is initialized and placed on the 187 * free list. 188 */ 189 vm_offset_t 190 vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr) 191 { 192 vm_offset_t mapped; 193 struct vm_page **bucket; 194 vm_size_t npages; 195 vm_paddr_t page_range; 196 vm_paddr_t new_end; 197 int i; 198 vm_paddr_t pa; 199 int nblocks; 200 vm_paddr_t last_pa; 201 202 /* the biggest memory array is the second group of pages */ 203 vm_paddr_t end; 204 vm_paddr_t biggestone, biggestsize; 205 206 vm_paddr_t total; 207 208 total = 0; 209 biggestsize = 0; 210 biggestone = 0; 211 nblocks = 0; 212 vaddr = round_page(vaddr); 213 214 for (i = 0; phys_avail[i + 1]; i += 2) { 215 phys_avail[i] = round_page(phys_avail[i]); 216 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 217 } 218 219 for (i = 0; phys_avail[i + 1]; i += 2) { 220 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 221 222 if (size > biggestsize) { 223 biggestone = i; 224 biggestsize = size; 225 } 226 ++nblocks; 227 total += size; 228 } 229 230 end = phys_avail[biggestone+1]; 231 232 /* 233 * Initialize the queue headers for the free queue, the active queue 234 * and the inactive queue. 235 */ 236 237 vm_page_queue_init(); 238 239 /* 240 * Allocate (and initialize) the hash table buckets. 241 * 242 * The number of buckets MUST BE a power of 2, and the actual value is 243 * the next power of 2 greater than the number of physical pages in 244 * the system. 245 * 246 * We make the hash table approximately 2x the number of pages to 247 * reduce the chain length. This is about the same size using the 248 * singly-linked list as the 1x hash table we were using before 249 * using TAILQ but the chain length will be smaller. 250 * 251 * Note: This computation can be tweaked if desired. 252 */ 253 vm_page_buckets = (struct vm_page **)vaddr; 254 bucket = vm_page_buckets; 255 if (vm_page_bucket_count == 0) { 256 vm_page_bucket_count = 1; 257 while (vm_page_bucket_count < atop(total)) 258 vm_page_bucket_count <<= 1; 259 } 260 vm_page_bucket_count <<= 1; 261 vm_page_hash_mask = vm_page_bucket_count - 1; 262 263 /* 264 * Validate these addresses. 265 */ 266 new_end = end - vm_page_bucket_count * sizeof(struct vm_page *); 267 new_end = trunc_page(new_end); 268 mapped = round_page(vaddr); 269 vaddr = pmap_map(mapped, new_end, end, 270 VM_PROT_READ | VM_PROT_WRITE); 271 vaddr = round_page(vaddr); 272 bzero((caddr_t) mapped, vaddr - mapped); 273 274 for (i = 0; i < vm_page_bucket_count; i++) { 275 *bucket = NULL; 276 bucket++; 277 } 278 279 /* 280 * Compute the number of pages of memory that will be available for 281 * use (taking into account the overhead of a page structure per 282 * page). 283 */ 284 first_page = phys_avail[0] / PAGE_SIZE; 285 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page; 286 npages = (total - (page_range * sizeof(struct vm_page)) - 287 (end - new_end)) / PAGE_SIZE; 288 289 end = new_end; 290 291 /* 292 * Initialize the mem entry structures now, and put them in the free 293 * queue. 294 */ 295 vm_page_array = (vm_page_t) vaddr; 296 mapped = vaddr; 297 298 /* 299 * Validate these addresses. 300 */ 301 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 302 mapped = pmap_map(mapped, new_end, end, 303 VM_PROT_READ | VM_PROT_WRITE); 304 305 /* 306 * Clear all of the page structures 307 */ 308 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 309 vm_page_array_size = page_range; 310 311 /* 312 * Construct the free queue(s) in ascending order (by physical 313 * address) so that the first 16MB of physical memory is allocated 314 * last rather than first. On large-memory machines, this avoids 315 * the exhaustion of low physical memory before isa_dmainit has run. 316 */ 317 vmstats.v_page_count = 0; 318 vmstats.v_free_count = 0; 319 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 320 pa = phys_avail[i]; 321 if (i == biggestone) 322 last_pa = new_end; 323 else 324 last_pa = phys_avail[i + 1]; 325 while (pa < last_pa && npages-- > 0) { 326 vm_add_new_page(pa); 327 pa += PAGE_SIZE; 328 } 329 } 330 return (mapped); 331 } 332 333 /* 334 * Distributes the object/offset key pair among hash buckets. 335 * 336 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 337 * This routine may not block. 338 * 339 * We try to randomize the hash based on the object to spread the pages 340 * out in the hash table without it costing us too much. 341 */ 342 static __inline int 343 vm_page_hash(vm_object_t object, vm_pindex_t pindex) 344 { 345 int i = ((uintptr_t)object + pindex) ^ object->hash_rand; 346 347 return(i & vm_page_hash_mask); 348 } 349 350 /* 351 * The opposite of vm_page_hold(). A page can be freed while being held, 352 * which places it on the PQ_HOLD queue. We must call vm_page_free_toq() 353 * in this case to actually free it once the hold count drops to 0. 354 * 355 * This routine must be called at splvm(). 356 */ 357 void 358 vm_page_unhold(vm_page_t mem) 359 { 360 --mem->hold_count; 361 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 362 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) 363 vm_page_free_toq(mem); 364 } 365 366 /* 367 * Inserts the given mem entry into the object and object list. 368 * 369 * The pagetables are not updated but will presumably fault the page 370 * in if necessary, or if a kernel page the caller will at some point 371 * enter the page into the kernel's pmap. We are not allowed to block 372 * here so we *can't* do this anyway. 373 * 374 * This routine may not block. 375 * This routine must be called at splvm(). 376 */ 377 void 378 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 379 { 380 struct vm_page **bucket; 381 382 if (m->object != NULL) 383 panic("vm_page_insert: already inserted"); 384 385 /* 386 * Record the object/offset pair in this page 387 */ 388 m->object = object; 389 m->pindex = pindex; 390 391 /* 392 * Insert it into the object_object/offset hash table 393 */ 394 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 395 m->hnext = *bucket; 396 *bucket = m; 397 vm_page_bucket_generation++; 398 399 /* 400 * Now link into the object's list of backed pages. 401 */ 402 TAILQ_INSERT_TAIL(&object->memq, m, listq); 403 object->generation++; 404 405 /* 406 * show that the object has one more resident page. 407 */ 408 object->resident_page_count++; 409 410 /* 411 * Since we are inserting a new and possibly dirty page, 412 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags. 413 */ 414 if (m->flags & PG_WRITEABLE) 415 vm_object_set_writeable_dirty(object); 416 } 417 418 /* 419 * Removes the given mem entry from the object/offset-page table and 420 * the object page list, but do not invalidate/terminate the backing store. 421 * 422 * This routine must be called at splvm(). 423 * The underlying pmap entry (if any) is NOT removed here. 424 * This routine may not block. 425 * The page must be BUSY. 426 */ 427 void 428 vm_page_remove(vm_page_t m) 429 { 430 vm_object_t object; 431 432 if (m->object == NULL) 433 return; 434 435 if ((m->flags & PG_BUSY) == 0) 436 panic("vm_page_remove: page not busy"); 437 438 /* 439 * Basically destroy the page. 440 */ 441 vm_page_wakeup(m); 442 443 object = m->object; 444 445 /* 446 * Remove from the object_object/offset hash table. The object 447 * must be on the hash queue, we will panic if it isn't 448 * 449 * Note: we must NULL-out m->hnext to prevent loops in detached 450 * buffers with vm_page_lookup(). 451 */ 452 { 453 struct vm_page **bucket; 454 455 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 456 while (*bucket != m) { 457 if (*bucket == NULL) 458 panic("vm_page_remove(): page not found in hash"); 459 bucket = &(*bucket)->hnext; 460 } 461 *bucket = m->hnext; 462 m->hnext = NULL; 463 vm_page_bucket_generation++; 464 } 465 466 /* 467 * Now remove from the object's list of backed pages. 468 */ 469 TAILQ_REMOVE(&object->memq, m, listq); 470 471 /* 472 * And show that the object has one fewer resident page. 473 */ 474 object->resident_page_count--; 475 object->generation++; 476 477 m->object = NULL; 478 } 479 480 /* 481 * Locate and return the page at (object, pindex), or NULL if the 482 * page could not be found. 483 * 484 * This routine will operate properly without spl protection, but 485 * the returned page could be in flux if it is busy. Because an 486 * interrupt can race a caller's busy check (unbusying and freeing the 487 * page we return before the caller is able to check the busy bit), 488 * the caller should generally call this routine at splvm(). 489 * 490 * Callers may call this routine without spl protection if they know 491 * 'for sure' that the page will not be ripped out from under them 492 * by an interrupt. 493 */ 494 vm_page_t 495 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 496 { 497 vm_page_t m; 498 struct vm_page **bucket; 499 int generation; 500 501 /* 502 * Search the hash table for this object/offset pair 503 */ 504 retry: 505 generation = vm_page_bucket_generation; 506 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 507 for (m = *bucket; m != NULL; m = m->hnext) { 508 if ((m->object == object) && (m->pindex == pindex)) { 509 if (vm_page_bucket_generation != generation) 510 goto retry; 511 return (m); 512 } 513 } 514 if (vm_page_bucket_generation != generation) 515 goto retry; 516 return (NULL); 517 } 518 519 /* 520 * vm_page_rename() 521 * 522 * Move the given memory entry from its current object to the specified 523 * target object/offset. 524 * 525 * The object must be locked. 526 * This routine may not block. 527 * 528 * Note: This routine will raise itself to splvm(), the caller need not. 529 * 530 * Note: Swap associated with the page must be invalidated by the move. We 531 * have to do this for several reasons: (1) we aren't freeing the 532 * page, (2) we are dirtying the page, (3) the VM system is probably 533 * moving the page from object A to B, and will then later move 534 * the backing store from A to B and we can't have a conflict. 535 * 536 * Note: We *always* dirty the page. It is necessary both for the 537 * fact that we moved it, and because we may be invalidating 538 * swap. If the page is on the cache, we have to deactivate it 539 * or vm_page_dirty() will panic. Dirty pages are not allowed 540 * on the cache. 541 */ 542 void 543 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 544 { 545 int s; 546 547 s = splvm(); 548 vm_page_remove(m); 549 vm_page_insert(m, new_object, new_pindex); 550 if (m->queue - m->pc == PQ_CACHE) 551 vm_page_deactivate(m); 552 vm_page_dirty(m); 553 splx(s); 554 } 555 556 /* 557 * vm_page_unqueue() without any wakeup. This routine is used when a page 558 * is being moved between queues or otherwise is to remain BUSYied by the 559 * caller. 560 * 561 * This routine must be called at splhigh(). 562 * This routine may not block. 563 */ 564 void 565 vm_page_unqueue_nowakeup(vm_page_t m) 566 { 567 int queue = m->queue; 568 struct vpgqueues *pq; 569 570 if (queue != PQ_NONE) { 571 pq = &vm_page_queues[queue]; 572 m->queue = PQ_NONE; 573 TAILQ_REMOVE(&pq->pl, m, pageq); 574 (*pq->cnt)--; 575 pq->lcnt--; 576 } 577 } 578 579 /* 580 * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon 581 * if necessary. 582 * 583 * This routine must be called at splhigh(). 584 * This routine may not block. 585 */ 586 void 587 vm_page_unqueue(vm_page_t m) 588 { 589 int queue = m->queue; 590 struct vpgqueues *pq; 591 592 if (queue != PQ_NONE) { 593 m->queue = PQ_NONE; 594 pq = &vm_page_queues[queue]; 595 TAILQ_REMOVE(&pq->pl, m, pageq); 596 (*pq->cnt)--; 597 pq->lcnt--; 598 if ((queue - m->pc) == PQ_CACHE) { 599 if (vm_paging_needed()) 600 pagedaemon_wakeup(); 601 } 602 } 603 } 604 605 /* 606 * vm_page_list_find() 607 * 608 * Find a page on the specified queue with color optimization. 609 * 610 * The page coloring optimization attempts to locate a page that does 611 * not overload other nearby pages in the object in the cpu's L1 or L2 612 * caches. We need this optimization because cpu caches tend to be 613 * physical caches, while object spaces tend to be virtual. 614 * 615 * This routine must be called at splvm(). 616 * This routine may not block. 617 * 618 * Note that this routine is carefully inlined. A non-inlined version 619 * is available for outside callers but the only critical path is 620 * from within this source file. 621 */ 622 static __inline 623 vm_page_t 624 _vm_page_list_find(int basequeue, int index, boolean_t prefer_zero) 625 { 626 vm_page_t m; 627 628 if (prefer_zero) 629 m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist); 630 else 631 m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl); 632 if (m == NULL) 633 m = _vm_page_list_find2(basequeue, index); 634 return(m); 635 } 636 637 static vm_page_t 638 _vm_page_list_find2(int basequeue, int index) 639 { 640 int i; 641 vm_page_t m = NULL; 642 struct vpgqueues *pq; 643 644 pq = &vm_page_queues[basequeue]; 645 646 /* 647 * Note that for the first loop, index+i and index-i wind up at the 648 * same place. Even though this is not totally optimal, we've already 649 * blown it by missing the cache case so we do not care. 650 */ 651 652 for(i = PQ_L2_SIZE / 2; i > 0; --i) { 653 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL) 654 break; 655 656 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL) 657 break; 658 } 659 return(m); 660 } 661 662 vm_page_t 663 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero) 664 { 665 return(_vm_page_list_find(basequeue, index, prefer_zero)); 666 } 667 668 /* 669 * Find a page on the cache queue with color optimization. As pages 670 * might be found, but not applicable, they are deactivated. This 671 * keeps us from using potentially busy cached pages. 672 * 673 * This routine must be called at splvm(). 674 * This routine may not block. 675 */ 676 vm_page_t 677 vm_page_select_cache(vm_object_t object, vm_pindex_t pindex) 678 { 679 vm_page_t m; 680 681 while (TRUE) { 682 m = _vm_page_list_find( 683 PQ_CACHE, 684 (pindex + object->pg_color) & PQ_L2_MASK, 685 FALSE 686 ); 687 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || 688 m->hold_count || m->wire_count)) { 689 vm_page_deactivate(m); 690 continue; 691 } 692 return m; 693 } 694 /* not reached */ 695 } 696 697 /* 698 * Find a free or zero page, with specified preference. We attempt to 699 * inline the nominal case and fall back to _vm_page_select_free() 700 * otherwise. 701 * 702 * This routine must be called at splvm(). 703 * This routine may not block. 704 */ 705 static __inline vm_page_t 706 vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero) 707 { 708 vm_page_t m; 709 710 m = _vm_page_list_find( 711 PQ_FREE, 712 (pindex + object->pg_color) & PQ_L2_MASK, 713 prefer_zero 714 ); 715 return(m); 716 } 717 718 /* 719 * vm_page_alloc() 720 * 721 * Allocate and return a memory cell associated with this VM object/offset 722 * pair. 723 * 724 * page_req classes: 725 * 726 * VM_ALLOC_NORMAL allow use of cache pages, nominal free drain 727 * VM_ALLOC_SYSTEM greater free drain 728 * VM_ALLOC_INTERRUPT allow free list to be completely drained 729 * VM_ALLOC_ZERO advisory request for pre-zero'd page 730 * 731 * The object must be locked. 732 * This routine may not block. 733 * 734 * Additional special handling is required when called from an interrupt 735 * (VM_ALLOC_INTERRUPT). We are not allowed to mess with the page cache 736 * in this case. 737 */ 738 vm_page_t 739 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) 740 { 741 vm_page_t m = NULL; 742 int s; 743 744 KASSERT(!vm_page_lookup(object, pindex), 745 ("vm_page_alloc: page already allocated")); 746 KKASSERT(page_req & 747 (VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM)); 748 749 /* 750 * The pager is allowed to eat deeper into the free page list. 751 */ 752 if (curthread == pagethread) 753 page_req |= VM_ALLOC_SYSTEM; 754 755 s = splvm(); 756 loop: 757 if (vmstats.v_free_count > vmstats.v_free_reserved || 758 ((page_req & VM_ALLOC_INTERRUPT) && vmstats.v_free_count > 0) || 759 ((page_req & VM_ALLOC_SYSTEM) && vmstats.v_cache_count == 0 && 760 vmstats.v_free_count > vmstats.v_interrupt_free_min) 761 ) { 762 /* 763 * The free queue has sufficient free pages to take one out. 764 */ 765 if (page_req & VM_ALLOC_ZERO) 766 m = vm_page_select_free(object, pindex, TRUE); 767 else 768 m = vm_page_select_free(object, pindex, FALSE); 769 } else if (page_req & VM_ALLOC_NORMAL) { 770 /* 771 * Allocatable from the cache (non-interrupt only). On 772 * success, we must free the page and try again, thus 773 * ensuring that vmstats.v_*_free_min counters are replenished. 774 */ 775 #ifdef INVARIANTS 776 if (curthread->td_preempted) { 777 printf("vm_page_alloc(): warning, attempt to allocate" 778 " cache page from preempting interrupt\n"); 779 m = NULL; 780 } else { 781 m = vm_page_select_cache(object, pindex); 782 } 783 #else 784 m = vm_page_select_cache(object, pindex); 785 #endif 786 /* 787 * On succuess move the page into the free queue and loop. 788 */ 789 if (m != NULL) { 790 KASSERT(m->dirty == 0, 791 ("Found dirty cache page %p", m)); 792 vm_page_busy(m); 793 vm_page_protect(m, VM_PROT_NONE); 794 vm_page_free(m); 795 goto loop; 796 } 797 798 /* 799 * On failure return NULL 800 */ 801 splx(s); 802 #if defined(DIAGNOSTIC) 803 if (vmstats.v_cache_count > 0) 804 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count); 805 #endif 806 vm_pageout_deficit++; 807 pagedaemon_wakeup(); 808 return (NULL); 809 } else { 810 /* 811 * No pages available, wakeup the pageout daemon and give up. 812 */ 813 splx(s); 814 vm_pageout_deficit++; 815 pagedaemon_wakeup(); 816 return (NULL); 817 } 818 819 /* 820 * Good page found. 821 */ 822 KASSERT(m != NULL, ("vm_page_alloc(): missing page on free queue\n")); 823 824 /* 825 * Remove from free queue 826 */ 827 vm_page_unqueue_nowakeup(m); 828 829 /* 830 * Initialize structure. Only the PG_ZERO flag is inherited. 831 */ 832 if (m->flags & PG_ZERO) { 833 vm_page_zero_count--; 834 m->flags = PG_ZERO | PG_BUSY; 835 } else { 836 m->flags = PG_BUSY; 837 } 838 m->wire_count = 0; 839 m->hold_count = 0; 840 m->act_count = 0; 841 m->busy = 0; 842 m->valid = 0; 843 KASSERT(m->dirty == 0, 844 ("vm_page_alloc: free/cache page %p was dirty", m)); 845 846 /* 847 * vm_page_insert() is safe prior to the splx(). Note also that 848 * inserting a page here does not insert it into the pmap (which 849 * could cause us to block allocating memory). We cannot block 850 * anywhere. 851 */ 852 vm_page_insert(m, object, pindex); 853 854 /* 855 * Don't wakeup too often - wakeup the pageout daemon when 856 * we would be nearly out of memory. 857 */ 858 if (vm_paging_needed()) 859 pagedaemon_wakeup(); 860 861 splx(s); 862 return (m); 863 } 864 865 /* 866 * Block until free pages are available for allocation, called in various 867 * places before memory allocations. 868 */ 869 void 870 vm_wait(void) 871 { 872 int s; 873 874 s = splvm(); 875 if (curthread == pagethread) { 876 vm_pageout_pages_needed = 1; 877 tsleep(&vm_pageout_pages_needed, 0, "VMWait", 0); 878 } else { 879 if (!vm_pages_needed) { 880 vm_pages_needed = 1; 881 wakeup(&vm_pages_needed); 882 } 883 tsleep(&vmstats.v_free_count, 0, "vmwait", 0); 884 } 885 splx(s); 886 } 887 888 /* 889 * Block until free pages are available for allocation 890 * 891 * Called only in vm_fault so that processes page faulting can be 892 * easily tracked. 893 * 894 * Sleeps at a lower priority than vm_wait() so that vm_wait()ing 895 * processes will be able to grab memory first. Do not change 896 * this balance without careful testing first. 897 */ 898 void 899 vm_waitpfault(void) 900 { 901 int s; 902 903 s = splvm(); 904 if (!vm_pages_needed) { 905 vm_pages_needed = 1; 906 wakeup(&vm_pages_needed); 907 } 908 tsleep(&vmstats.v_free_count, 0, "pfault", 0); 909 splx(s); 910 } 911 912 /* 913 * Put the specified page on the active list (if appropriate). Ensure 914 * that act_count is at least ACT_INIT but do not otherwise mess with it. 915 * 916 * The page queues must be locked. 917 * This routine may not block. 918 */ 919 void 920 vm_page_activate(vm_page_t m) 921 { 922 int s; 923 924 s = splvm(); 925 if (m->queue != PQ_ACTIVE) { 926 if ((m->queue - m->pc) == PQ_CACHE) 927 mycpu->gd_cnt.v_reactivated++; 928 929 vm_page_unqueue(m); 930 931 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 932 m->queue = PQ_ACTIVE; 933 vm_page_queues[PQ_ACTIVE].lcnt++; 934 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, 935 m, pageq); 936 if (m->act_count < ACT_INIT) 937 m->act_count = ACT_INIT; 938 vmstats.v_active_count++; 939 } 940 } else { 941 if (m->act_count < ACT_INIT) 942 m->act_count = ACT_INIT; 943 } 944 945 splx(s); 946 } 947 948 /* 949 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 950 * routine is called when a page has been added to the cache or free 951 * queues. 952 * 953 * This routine may not block. 954 * This routine must be called at splvm() 955 */ 956 static __inline void 957 vm_page_free_wakeup(void) 958 { 959 /* 960 * if pageout daemon needs pages, then tell it that there are 961 * some free. 962 */ 963 if (vm_pageout_pages_needed && 964 vmstats.v_cache_count + vmstats.v_free_count >= 965 vmstats.v_pageout_free_min 966 ) { 967 wakeup(&vm_pageout_pages_needed); 968 vm_pageout_pages_needed = 0; 969 } 970 971 /* 972 * wakeup processes that are waiting on memory if we hit a 973 * high water mark. And wakeup scheduler process if we have 974 * lots of memory. this process will swapin processes. 975 */ 976 if (vm_pages_needed && !vm_page_count_min()) { 977 vm_pages_needed = 0; 978 wakeup(&vmstats.v_free_count); 979 } 980 } 981 982 /* 983 * vm_page_free_toq: 984 * 985 * Returns the given page to the PQ_FREE list, 986 * disassociating it with any VM object. 987 * 988 * Object and page must be locked prior to entry. 989 * This routine may not block. 990 */ 991 void 992 vm_page_free_toq(vm_page_t m) 993 { 994 int s; 995 struct vpgqueues *pq; 996 997 s = splvm(); 998 mycpu->gd_cnt.v_tfree++; 999 1000 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) { 1001 printf( 1002 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n", 1003 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, 1004 m->hold_count); 1005 if ((m->queue - m->pc) == PQ_FREE) 1006 panic("vm_page_free: freeing free page"); 1007 else 1008 panic("vm_page_free: freeing busy page"); 1009 } 1010 1011 /* 1012 * unqueue, then remove page. Note that we cannot destroy 1013 * the page here because we do not want to call the pager's 1014 * callback routine until after we've put the page on the 1015 * appropriate free queue. 1016 */ 1017 vm_page_unqueue_nowakeup(m); 1018 vm_page_remove(m); 1019 1020 /* 1021 * No further management of fictitious pages occurs beyond object 1022 * and queue removal. 1023 */ 1024 if ((m->flags & PG_FICTITIOUS) != 0) { 1025 splx(s); 1026 return; 1027 } 1028 1029 m->valid = 0; 1030 vm_page_undirty(m); 1031 1032 if (m->wire_count != 0) { 1033 if (m->wire_count > 1) { 1034 panic( 1035 "vm_page_free: invalid wire count (%d), pindex: 0x%lx", 1036 m->wire_count, (long)m->pindex); 1037 } 1038 panic("vm_page_free: freeing wired page"); 1039 } 1040 1041 /* 1042 * Clear the UNMANAGED flag when freeing an unmanaged page. 1043 */ 1044 if (m->flags & PG_UNMANAGED) { 1045 m->flags &= ~PG_UNMANAGED; 1046 } else { 1047 #ifdef __alpha__ 1048 pmap_page_is_free(m); 1049 #endif 1050 } 1051 1052 if (m->hold_count != 0) { 1053 m->flags &= ~PG_ZERO; 1054 m->queue = PQ_HOLD; 1055 } else { 1056 m->queue = PQ_FREE + m->pc; 1057 } 1058 pq = &vm_page_queues[m->queue]; 1059 pq->lcnt++; 1060 ++(*pq->cnt); 1061 1062 /* 1063 * Put zero'd pages on the end ( where we look for zero'd pages 1064 * first ) and non-zerod pages at the head. 1065 */ 1066 if (m->flags & PG_ZERO) { 1067 TAILQ_INSERT_TAIL(&pq->pl, m, pageq); 1068 ++vm_page_zero_count; 1069 } else { 1070 TAILQ_INSERT_HEAD(&pq->pl, m, pageq); 1071 } 1072 1073 vm_page_free_wakeup(); 1074 splx(s); 1075 } 1076 1077 /* 1078 * vm_page_unmanage() 1079 * 1080 * Prevent PV management from being done on the page. The page is 1081 * removed from the paging queues as if it were wired, and as a 1082 * consequence of no longer being managed the pageout daemon will not 1083 * touch it (since there is no way to locate the pte mappings for the 1084 * page). madvise() calls that mess with the pmap will also no longer 1085 * operate on the page. 1086 * 1087 * Beyond that the page is still reasonably 'normal'. Freeing the page 1088 * will clear the flag. 1089 * 1090 * This routine is used by OBJT_PHYS objects - objects using unswappable 1091 * physical memory as backing store rather then swap-backed memory and 1092 * will eventually be extended to support 4MB unmanaged physical 1093 * mappings. 1094 */ 1095 void 1096 vm_page_unmanage(vm_page_t m) 1097 { 1098 int s; 1099 1100 s = splvm(); 1101 if ((m->flags & PG_UNMANAGED) == 0) { 1102 if (m->wire_count == 0) 1103 vm_page_unqueue(m); 1104 } 1105 vm_page_flag_set(m, PG_UNMANAGED); 1106 splx(s); 1107 } 1108 1109 /* 1110 * Mark this page as wired down by yet another map, removing it from 1111 * paging queues as necessary. 1112 * 1113 * The page queues must be locked. 1114 * This routine may not block. 1115 */ 1116 void 1117 vm_page_wire(vm_page_t m) 1118 { 1119 int s; 1120 1121 /* 1122 * Only bump the wire statistics if the page is not already wired, 1123 * and only unqueue the page if it is on some queue (if it is unmanaged 1124 * it is already off the queues). Don't do anything with fictitious 1125 * pages because they are always wired. 1126 */ 1127 s = splvm(); 1128 if ((m->flags & PG_FICTITIOUS) == 0) { 1129 if (m->wire_count == 0) { 1130 if ((m->flags & PG_UNMANAGED) == 0) 1131 vm_page_unqueue(m); 1132 vmstats.v_wire_count++; 1133 } 1134 m->wire_count++; 1135 KASSERT(m->wire_count != 0, 1136 ("vm_page_wire: wire_count overflow m=%p", m)); 1137 } 1138 splx(s); 1139 vm_page_flag_set(m, PG_MAPPED); 1140 } 1141 1142 /* 1143 * Release one wiring of this page, potentially enabling it to be paged again. 1144 * 1145 * Many pages placed on the inactive queue should actually go 1146 * into the cache, but it is difficult to figure out which. What 1147 * we do instead, if the inactive target is well met, is to put 1148 * clean pages at the head of the inactive queue instead of the tail. 1149 * This will cause them to be moved to the cache more quickly and 1150 * if not actively re-referenced, freed more quickly. If we just 1151 * stick these pages at the end of the inactive queue, heavy filesystem 1152 * meta-data accesses can cause an unnecessary paging load on memory bound 1153 * processes. This optimization causes one-time-use metadata to be 1154 * reused more quickly. 1155 * 1156 * BUT, if we are in a low-memory situation we have no choice but to 1157 * put clean pages on the cache queue. 1158 * 1159 * A number of routines use vm_page_unwire() to guarantee that the page 1160 * will go into either the inactive or active queues, and will NEVER 1161 * be placed in the cache - for example, just after dirtying a page. 1162 * dirty pages in the cache are not allowed. 1163 * 1164 * The page queues must be locked. 1165 * This routine may not block. 1166 */ 1167 void 1168 vm_page_unwire(vm_page_t m, int activate) 1169 { 1170 int s; 1171 1172 s = splvm(); 1173 if (m->flags & PG_FICTITIOUS) { 1174 /* do nothing */ 1175 } else if (m->wire_count <= 0) { 1176 panic("vm_page_unwire: invalid wire count: %d", m->wire_count); 1177 } else { 1178 if (--m->wire_count == 0) { 1179 --vmstats.v_wire_count; 1180 if (m->flags & PG_UNMANAGED) { 1181 ; 1182 } else if (activate) { 1183 TAILQ_INSERT_TAIL( 1184 &vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1185 m->queue = PQ_ACTIVE; 1186 vm_page_queues[PQ_ACTIVE].lcnt++; 1187 vmstats.v_active_count++; 1188 } else { 1189 vm_page_flag_clear(m, PG_WINATCFLS); 1190 TAILQ_INSERT_TAIL( 1191 &vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1192 m->queue = PQ_INACTIVE; 1193 vm_page_queues[PQ_INACTIVE].lcnt++; 1194 vmstats.v_inactive_count++; 1195 } 1196 } 1197 } 1198 splx(s); 1199 } 1200 1201 1202 /* 1203 * Move the specified page to the inactive queue. If the page has 1204 * any associated swap, the swap is deallocated. 1205 * 1206 * Normally athead is 0 resulting in LRU operation. athead is set 1207 * to 1 if we want this page to be 'as if it were placed in the cache', 1208 * except without unmapping it from the process address space. 1209 * 1210 * This routine may not block. 1211 */ 1212 static __inline void 1213 _vm_page_deactivate(vm_page_t m, int athead) 1214 { 1215 int s; 1216 1217 /* 1218 * Ignore if already inactive. 1219 */ 1220 if (m->queue == PQ_INACTIVE) 1221 return; 1222 1223 s = splvm(); 1224 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1225 if ((m->queue - m->pc) == PQ_CACHE) 1226 mycpu->gd_cnt.v_reactivated++; 1227 vm_page_flag_clear(m, PG_WINATCFLS); 1228 vm_page_unqueue(m); 1229 if (athead) 1230 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1231 else 1232 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1233 m->queue = PQ_INACTIVE; 1234 vm_page_queues[PQ_INACTIVE].lcnt++; 1235 vmstats.v_inactive_count++; 1236 } 1237 splx(s); 1238 } 1239 1240 void 1241 vm_page_deactivate(vm_page_t m) 1242 { 1243 _vm_page_deactivate(m, 0); 1244 } 1245 1246 /* 1247 * vm_page_try_to_cache: 1248 * 1249 * Returns 0 on failure, 1 on success 1250 */ 1251 int 1252 vm_page_try_to_cache(vm_page_t m) 1253 { 1254 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1255 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1256 return(0); 1257 } 1258 vm_page_test_dirty(m); 1259 if (m->dirty) 1260 return(0); 1261 vm_page_cache(m); 1262 return(1); 1263 } 1264 1265 /* 1266 * Attempt to free the page. If we cannot free it, we do nothing. 1267 * 1 is returned on success, 0 on failure. 1268 */ 1269 int 1270 vm_page_try_to_free(vm_page_t m) 1271 { 1272 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1273 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1274 return(0); 1275 } 1276 vm_page_test_dirty(m); 1277 if (m->dirty) 1278 return(0); 1279 vm_page_busy(m); 1280 vm_page_protect(m, VM_PROT_NONE); 1281 vm_page_free(m); 1282 return(1); 1283 } 1284 1285 /* 1286 * vm_page_cache 1287 * 1288 * Put the specified page onto the page cache queue (if appropriate). 1289 * 1290 * This routine may not block. 1291 */ 1292 void 1293 vm_page_cache(vm_page_t m) 1294 { 1295 int s; 1296 1297 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || 1298 m->wire_count || m->hold_count) { 1299 printf("vm_page_cache: attempting to cache busy/held page\n"); 1300 return; 1301 } 1302 if ((m->queue - m->pc) == PQ_CACHE) 1303 return; 1304 1305 /* 1306 * Remove all pmaps and indicate that the page is not 1307 * writeable or mapped. 1308 */ 1309 1310 vm_page_protect(m, VM_PROT_NONE); 1311 if (m->dirty != 0) { 1312 panic("vm_page_cache: caching a dirty page, pindex: %ld", 1313 (long)m->pindex); 1314 } 1315 s = splvm(); 1316 vm_page_unqueue_nowakeup(m); 1317 m->queue = PQ_CACHE + m->pc; 1318 vm_page_queues[m->queue].lcnt++; 1319 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq); 1320 vmstats.v_cache_count++; 1321 vm_page_free_wakeup(); 1322 splx(s); 1323 } 1324 1325 /* 1326 * vm_page_dontneed() 1327 * 1328 * Cache, deactivate, or do nothing as appropriate. This routine 1329 * is typically used by madvise() MADV_DONTNEED. 1330 * 1331 * Generally speaking we want to move the page into the cache so 1332 * it gets reused quickly. However, this can result in a silly syndrome 1333 * due to the page recycling too quickly. Small objects will not be 1334 * fully cached. On the otherhand, if we move the page to the inactive 1335 * queue we wind up with a problem whereby very large objects 1336 * unnecessarily blow away our inactive and cache queues. 1337 * 1338 * The solution is to move the pages based on a fixed weighting. We 1339 * either leave them alone, deactivate them, or move them to the cache, 1340 * where moving them to the cache has the highest weighting. 1341 * By forcing some pages into other queues we eventually force the 1342 * system to balance the queues, potentially recovering other unrelated 1343 * space from active. The idea is to not force this to happen too 1344 * often. 1345 */ 1346 void 1347 vm_page_dontneed(vm_page_t m) 1348 { 1349 static int dnweight; 1350 int dnw; 1351 int head; 1352 1353 dnw = ++dnweight; 1354 1355 /* 1356 * occassionally leave the page alone 1357 */ 1358 1359 if ((dnw & 0x01F0) == 0 || 1360 m->queue == PQ_INACTIVE || 1361 m->queue - m->pc == PQ_CACHE 1362 ) { 1363 if (m->act_count >= ACT_INIT) 1364 --m->act_count; 1365 return; 1366 } 1367 1368 if (m->dirty == 0) 1369 vm_page_test_dirty(m); 1370 1371 if (m->dirty || (dnw & 0x0070) == 0) { 1372 /* 1373 * Deactivate the page 3 times out of 32. 1374 */ 1375 head = 0; 1376 } else { 1377 /* 1378 * Cache the page 28 times out of every 32. Note that 1379 * the page is deactivated instead of cached, but placed 1380 * at the head of the queue instead of the tail. 1381 */ 1382 head = 1; 1383 } 1384 _vm_page_deactivate(m, head); 1385 } 1386 1387 /* 1388 * Grab a page, blocking if it is busy and allocating a page if necessary. 1389 * A busy page is returned or NULL. 1390 * 1391 * If VM_ALLOC_RETRY is specified VM_ALLOC_NORMAL must also be specified. 1392 * If VM_ALLOC_RETRY is not specified 1393 * 1394 * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is 1395 * always returned if we had blocked. 1396 * This routine will never return NULL if VM_ALLOC_RETRY is set. 1397 * This routine may not be called from an interrupt. 1398 * The returned page may not be entirely valid. 1399 * 1400 * This routine may be called from mainline code without spl protection and 1401 * be guarenteed a busied page associated with the object at the specified 1402 * index. 1403 */ 1404 vm_page_t 1405 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 1406 { 1407 vm_page_t m; 1408 int s, generation; 1409 1410 KKASSERT(allocflags & 1411 (VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM)); 1412 s = splvm(); 1413 retrylookup: 1414 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1415 if (m->busy || (m->flags & PG_BUSY)) { 1416 generation = object->generation; 1417 1418 while ((object->generation == generation) && 1419 (m->busy || (m->flags & PG_BUSY))) { 1420 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1421 tsleep(m, 0, "pgrbwt", 0); 1422 if ((allocflags & VM_ALLOC_RETRY) == 0) { 1423 m = NULL; 1424 goto done; 1425 } 1426 } 1427 goto retrylookup; 1428 } else { 1429 vm_page_busy(m); 1430 goto done; 1431 } 1432 } 1433 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1434 if (m == NULL) { 1435 vm_wait(); 1436 if ((allocflags & VM_ALLOC_RETRY) == 0) 1437 goto done; 1438 goto retrylookup; 1439 } 1440 done: 1441 splx(s); 1442 return(m); 1443 } 1444 1445 /* 1446 * Mapping function for valid bits or for dirty bits in 1447 * a page. May not block. 1448 * 1449 * Inputs are required to range within a page. 1450 */ 1451 __inline int 1452 vm_page_bits(int base, int size) 1453 { 1454 int first_bit; 1455 int last_bit; 1456 1457 KASSERT( 1458 base + size <= PAGE_SIZE, 1459 ("vm_page_bits: illegal base/size %d/%d", base, size) 1460 ); 1461 1462 if (size == 0) /* handle degenerate case */ 1463 return(0); 1464 1465 first_bit = base >> DEV_BSHIFT; 1466 last_bit = (base + size - 1) >> DEV_BSHIFT; 1467 1468 return ((2 << last_bit) - (1 << first_bit)); 1469 } 1470 1471 /* 1472 * Sets portions of a page valid and clean. The arguments are expected 1473 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 1474 * of any partial chunks touched by the range. The invalid portion of 1475 * such chunks will be zero'd. 1476 * 1477 * This routine may not block. 1478 * 1479 * (base + size) must be less then or equal to PAGE_SIZE. 1480 */ 1481 void 1482 vm_page_set_validclean(vm_page_t m, int base, int size) 1483 { 1484 int pagebits; 1485 int frag; 1486 int endoff; 1487 1488 if (size == 0) /* handle degenerate case */ 1489 return; 1490 1491 /* 1492 * If the base is not DEV_BSIZE aligned and the valid 1493 * bit is clear, we have to zero out a portion of the 1494 * first block. 1495 */ 1496 1497 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 1498 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0 1499 ) { 1500 pmap_zero_page_area( 1501 VM_PAGE_TO_PHYS(m), 1502 frag, 1503 base - frag 1504 ); 1505 } 1506 1507 /* 1508 * If the ending offset is not DEV_BSIZE aligned and the 1509 * valid bit is clear, we have to zero out a portion of 1510 * the last block. 1511 */ 1512 1513 endoff = base + size; 1514 1515 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 1516 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0 1517 ) { 1518 pmap_zero_page_area( 1519 VM_PAGE_TO_PHYS(m), 1520 endoff, 1521 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)) 1522 ); 1523 } 1524 1525 /* 1526 * Set valid, clear dirty bits. If validating the entire 1527 * page we can safely clear the pmap modify bit. We also 1528 * use this opportunity to clear the PG_NOSYNC flag. If a process 1529 * takes a write fault on a MAP_NOSYNC memory area the flag will 1530 * be set again. 1531 * 1532 * We set valid bits inclusive of any overlap, but we can only 1533 * clear dirty bits for DEV_BSIZE chunks that are fully within 1534 * the range. 1535 */ 1536 1537 pagebits = vm_page_bits(base, size); 1538 m->valid |= pagebits; 1539 #if 0 /* NOT YET */ 1540 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 1541 frag = DEV_BSIZE - frag; 1542 base += frag; 1543 size -= frag; 1544 if (size < 0) 1545 size = 0; 1546 } 1547 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 1548 #endif 1549 m->dirty &= ~pagebits; 1550 if (base == 0 && size == PAGE_SIZE) { 1551 pmap_clear_modify(m); 1552 vm_page_flag_clear(m, PG_NOSYNC); 1553 } 1554 } 1555 1556 void 1557 vm_page_clear_dirty(vm_page_t m, int base, int size) 1558 { 1559 m->dirty &= ~vm_page_bits(base, size); 1560 } 1561 1562 /* 1563 * Invalidates DEV_BSIZE'd chunks within a page. Both the 1564 * valid and dirty bits for the effected areas are cleared. 1565 * 1566 * May not block. 1567 */ 1568 void 1569 vm_page_set_invalid(vm_page_t m, int base, int size) 1570 { 1571 int bits; 1572 1573 bits = vm_page_bits(base, size); 1574 m->valid &= ~bits; 1575 m->dirty &= ~bits; 1576 m->object->generation++; 1577 } 1578 1579 /* 1580 * The kernel assumes that the invalid portions of a page contain 1581 * garbage, but such pages can be mapped into memory by user code. 1582 * When this occurs, we must zero out the non-valid portions of the 1583 * page so user code sees what it expects. 1584 * 1585 * Pages are most often semi-valid when the end of a file is mapped 1586 * into memory and the file's size is not page aligned. 1587 */ 1588 void 1589 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 1590 { 1591 int b; 1592 int i; 1593 1594 /* 1595 * Scan the valid bits looking for invalid sections that 1596 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 1597 * valid bit may be set ) have already been zerod by 1598 * vm_page_set_validclean(). 1599 */ 1600 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 1601 if (i == (PAGE_SIZE / DEV_BSIZE) || 1602 (m->valid & (1 << i)) 1603 ) { 1604 if (i > b) { 1605 pmap_zero_page_area( 1606 VM_PAGE_TO_PHYS(m), 1607 b << DEV_BSHIFT, 1608 (i - b) << DEV_BSHIFT 1609 ); 1610 } 1611 b = i + 1; 1612 } 1613 } 1614 1615 /* 1616 * setvalid is TRUE when we can safely set the zero'd areas 1617 * as being valid. We can do this if there are no cache consistency 1618 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 1619 */ 1620 if (setvalid) 1621 m->valid = VM_PAGE_BITS_ALL; 1622 } 1623 1624 /* 1625 * Is a (partial) page valid? Note that the case where size == 0 1626 * will return FALSE in the degenerate case where the page is entirely 1627 * invalid, and TRUE otherwise. 1628 * 1629 * May not block. 1630 */ 1631 int 1632 vm_page_is_valid(vm_page_t m, int base, int size) 1633 { 1634 int bits = vm_page_bits(base, size); 1635 1636 if (m->valid && ((m->valid & bits) == bits)) 1637 return 1; 1638 else 1639 return 0; 1640 } 1641 1642 /* 1643 * update dirty bits from pmap/mmu. May not block. 1644 */ 1645 void 1646 vm_page_test_dirty(vm_page_t m) 1647 { 1648 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { 1649 vm_page_dirty(m); 1650 } 1651 } 1652 1653 #include "opt_ddb.h" 1654 #ifdef DDB 1655 #include <sys/kernel.h> 1656 1657 #include <ddb/ddb.h> 1658 1659 DB_SHOW_COMMAND(page, vm_page_print_page_info) 1660 { 1661 db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count); 1662 db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count); 1663 db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count); 1664 db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count); 1665 db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count); 1666 db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved); 1667 db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min); 1668 db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target); 1669 db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min); 1670 db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target); 1671 } 1672 1673 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1674 { 1675 int i; 1676 db_printf("PQ_FREE:"); 1677 for(i=0;i<PQ_L2_SIZE;i++) { 1678 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt); 1679 } 1680 db_printf("\n"); 1681 1682 db_printf("PQ_CACHE:"); 1683 for(i=0;i<PQ_L2_SIZE;i++) { 1684 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt); 1685 } 1686 db_printf("\n"); 1687 1688 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1689 vm_page_queues[PQ_ACTIVE].lcnt, 1690 vm_page_queues[PQ_INACTIVE].lcnt); 1691 } 1692 #endif /* DDB */ 1693