1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $ 38 * $DragonFly: src/sys/vm/vm_page.c,v 1.15 2003/11/03 17:11:23 dillon Exp $ 39 */ 40 41 /* 42 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 43 * All rights reserved. 44 * 45 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 46 * 47 * Permission to use, copy, modify and distribute this software and 48 * its documentation is hereby granted, provided that both the copyright 49 * notice and this permission notice appear in all copies of the 50 * software, derivative works or modified versions, and any portions 51 * thereof, and that both notices appear in supporting documentation. 52 * 53 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 54 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 55 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 56 * 57 * Carnegie Mellon requests users of this software to return to 58 * 59 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 60 * School of Computer Science 61 * Carnegie Mellon University 62 * Pittsburgh PA 15213-3890 63 * 64 * any improvements or extensions that they make and grant Carnegie the 65 * rights to redistribute these changes. 66 */ 67 68 /* 69 * Resident memory management module. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/malloc.h> 75 #include <sys/proc.h> 76 #include <sys/vmmeter.h> 77 #include <sys/vnode.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <sys/lock.h> 82 #include <vm/vm_kern.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pageout.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_page2.h> 91 92 static void vm_page_queue_init (void); 93 static vm_page_t vm_page_select_cache (vm_object_t, vm_pindex_t); 94 95 /* 96 * Associated with page of user-allocatable memory is a 97 * page structure. 98 */ 99 100 static struct vm_page **vm_page_buckets; /* Array of buckets */ 101 static int vm_page_bucket_count; /* How big is array? */ 102 static int vm_page_hash_mask; /* Mask for hash function */ 103 static volatile int vm_page_bucket_generation; 104 105 struct vpgqueues vm_page_queues[PQ_COUNT]; 106 107 static void 108 vm_page_queue_init(void) { 109 int i; 110 111 for(i=0;i<PQ_L2_SIZE;i++) { 112 vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count; 113 } 114 vm_page_queues[PQ_INACTIVE].cnt = &vmstats.v_inactive_count; 115 116 vm_page_queues[PQ_ACTIVE].cnt = &vmstats.v_active_count; 117 vm_page_queues[PQ_HOLD].cnt = &vmstats.v_active_count; 118 for(i=0;i<PQ_L2_SIZE;i++) { 119 vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count; 120 } 121 for(i=0;i<PQ_COUNT;i++) { 122 TAILQ_INIT(&vm_page_queues[i].pl); 123 } 124 } 125 126 vm_page_t vm_page_array = 0; 127 int vm_page_array_size = 0; 128 long first_page = 0; 129 int vm_page_zero_count = 0; 130 131 static __inline int vm_page_hash (vm_object_t object, vm_pindex_t pindex); 132 static void vm_page_free_wakeup (void); 133 134 /* 135 * vm_set_page_size: 136 * 137 * Sets the page size, perhaps based upon the memory 138 * size. Must be called before any use of page-size 139 * dependent functions. 140 */ 141 void 142 vm_set_page_size(void) 143 { 144 if (vmstats.v_page_size == 0) 145 vmstats.v_page_size = PAGE_SIZE; 146 if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0) 147 panic("vm_set_page_size: page size not a power of two"); 148 } 149 150 /* 151 * vm_add_new_page: 152 * 153 * Add a new page to the freelist for use by the system. New pages 154 * are added to both the head and tail of the associated free page 155 * queue in a bottom-up fashion, so both zero'd and non-zero'd page 156 * requests pull 'recent' adds (higher physical addresses) first. 157 * 158 * Must be called at splhigh(). 159 */ 160 vm_page_t 161 vm_add_new_page(vm_paddr_t pa) 162 { 163 vm_page_t m; 164 struct vpgqueues *vpq; 165 166 ++vmstats.v_page_count; 167 ++vmstats.v_free_count; 168 m = PHYS_TO_VM_PAGE(pa); 169 m->phys_addr = pa; 170 m->flags = 0; 171 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 172 m->queue = m->pc + PQ_FREE; 173 vpq = &vm_page_queues[m->queue]; 174 if (vpq->flipflop) 175 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 176 else 177 TAILQ_INSERT_HEAD(&vpq->pl, m, pageq); 178 vpq->flipflop = 1 - vpq->flipflop; 179 vm_page_queues[m->queue].lcnt++; 180 return (m); 181 } 182 183 /* 184 * vm_page_startup: 185 * 186 * Initializes the resident memory module. 187 * 188 * Allocates memory for the page cells, and 189 * for the object/offset-to-page hash table headers. 190 * Each page cell is initialized and placed on the free list. 191 */ 192 193 vm_offset_t 194 vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr) 195 { 196 vm_offset_t mapped; 197 struct vm_page **bucket; 198 vm_size_t npages; 199 vm_paddr_t page_range; 200 vm_paddr_t new_end; 201 int i; 202 vm_paddr_t pa; 203 int nblocks; 204 vm_paddr_t last_pa; 205 206 /* the biggest memory array is the second group of pages */ 207 vm_paddr_t end; 208 vm_paddr_t biggestone, biggestsize; 209 210 vm_paddr_t total; 211 212 total = 0; 213 biggestsize = 0; 214 biggestone = 0; 215 nblocks = 0; 216 vaddr = round_page(vaddr); 217 218 for (i = 0; phys_avail[i + 1]; i += 2) { 219 phys_avail[i] = round_page(phys_avail[i]); 220 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 221 } 222 223 for (i = 0; phys_avail[i + 1]; i += 2) { 224 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 225 226 if (size > biggestsize) { 227 biggestone = i; 228 biggestsize = size; 229 } 230 ++nblocks; 231 total += size; 232 } 233 234 end = phys_avail[biggestone+1]; 235 236 /* 237 * Initialize the queue headers for the free queue, the active queue 238 * and the inactive queue. 239 */ 240 241 vm_page_queue_init(); 242 243 /* 244 * Allocate (and initialize) the hash table buckets. 245 * 246 * The number of buckets MUST BE a power of 2, and the actual value is 247 * the next power of 2 greater than the number of physical pages in 248 * the system. 249 * 250 * We make the hash table approximately 2x the number of pages to 251 * reduce the chain length. This is about the same size using the 252 * singly-linked list as the 1x hash table we were using before 253 * using TAILQ but the chain length will be smaller. 254 * 255 * Note: This computation can be tweaked if desired. 256 */ 257 vm_page_buckets = (struct vm_page **)vaddr; 258 bucket = vm_page_buckets; 259 if (vm_page_bucket_count == 0) { 260 vm_page_bucket_count = 1; 261 while (vm_page_bucket_count < atop(total)) 262 vm_page_bucket_count <<= 1; 263 } 264 vm_page_bucket_count <<= 1; 265 vm_page_hash_mask = vm_page_bucket_count - 1; 266 267 /* 268 * Validate these addresses. 269 */ 270 new_end = end - vm_page_bucket_count * sizeof(struct vm_page *); 271 new_end = trunc_page(new_end); 272 mapped = round_page(vaddr); 273 vaddr = pmap_map(mapped, new_end, end, 274 VM_PROT_READ | VM_PROT_WRITE); 275 vaddr = round_page(vaddr); 276 bzero((caddr_t) mapped, vaddr - mapped); 277 278 for (i = 0; i < vm_page_bucket_count; i++) { 279 *bucket = NULL; 280 bucket++; 281 } 282 283 /* 284 * Compute the number of pages of memory that will be available for 285 * use (taking into account the overhead of a page structure per 286 * page). 287 */ 288 289 first_page = phys_avail[0] / PAGE_SIZE; 290 291 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page; 292 npages = (total - (page_range * sizeof(struct vm_page)) - 293 (end - new_end)) / PAGE_SIZE; 294 295 end = new_end; 296 /* 297 * Initialize the mem entry structures now, and put them in the free 298 * queue. 299 */ 300 vm_page_array = (vm_page_t) vaddr; 301 mapped = vaddr; 302 303 /* 304 * Validate these addresses. 305 */ 306 307 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 308 mapped = pmap_map(mapped, new_end, end, 309 VM_PROT_READ | VM_PROT_WRITE); 310 311 /* 312 * Clear all of the page structures 313 */ 314 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 315 vm_page_array_size = page_range; 316 317 /* 318 * Construct the free queue(s) in ascending order (by physical 319 * address) so that the first 16MB of physical memory is allocated 320 * last rather than first. On large-memory machines, this avoids 321 * the exhaustion of low physical memory before isa_dmainit has run. 322 */ 323 vmstats.v_page_count = 0; 324 vmstats.v_free_count = 0; 325 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 326 pa = phys_avail[i]; 327 if (i == biggestone) 328 last_pa = new_end; 329 else 330 last_pa = phys_avail[i + 1]; 331 while (pa < last_pa && npages-- > 0) { 332 vm_add_new_page(pa); 333 pa += PAGE_SIZE; 334 } 335 } 336 return (mapped); 337 } 338 339 /* 340 * vm_page_hash: 341 * 342 * Distributes the object/offset key pair among hash buckets. 343 * 344 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 345 * This routine may not block. 346 * 347 * We try to randomize the hash based on the object to spread the pages 348 * out in the hash table without it costing us too much. 349 */ 350 static __inline int 351 vm_page_hash(vm_object_t object, vm_pindex_t pindex) 352 { 353 int i = ((uintptr_t)object + pindex) ^ object->hash_rand; 354 355 return(i & vm_page_hash_mask); 356 } 357 358 void 359 vm_page_unhold(vm_page_t mem) 360 { 361 --mem->hold_count; 362 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 363 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) 364 vm_page_free_toq(mem); 365 } 366 367 /* 368 * vm_page_insert: [ internal use only ] 369 * 370 * Inserts the given mem entry into the object and object list. 371 * 372 * The pagetables are not updated but will presumably fault the page 373 * in if necessary, or if a kernel page the caller will at some point 374 * enter the page into the kernel's pmap. We are not allowed to block 375 * here so we *can't* do this anyway. 376 * 377 * The object and page must be locked, and must be splhigh. 378 * This routine may not block. 379 */ 380 381 void 382 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 383 { 384 struct vm_page **bucket; 385 386 if (m->object != NULL) 387 panic("vm_page_insert: already inserted"); 388 389 /* 390 * Record the object/offset pair in this page 391 */ 392 393 m->object = object; 394 m->pindex = pindex; 395 396 /* 397 * Insert it into the object_object/offset hash table 398 */ 399 400 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 401 m->hnext = *bucket; 402 *bucket = m; 403 vm_page_bucket_generation++; 404 405 /* 406 * Now link into the object's list of backed pages. 407 */ 408 409 TAILQ_INSERT_TAIL(&object->memq, m, listq); 410 object->generation++; 411 412 /* 413 * show that the object has one more resident page. 414 */ 415 416 object->resident_page_count++; 417 418 /* 419 * Since we are inserting a new and possibly dirty page, 420 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags. 421 */ 422 if (m->flags & PG_WRITEABLE) 423 vm_object_set_writeable_dirty(object); 424 } 425 426 /* 427 * vm_page_remove: 428 * NOTE: used by device pager as well -wfj 429 * 430 * Removes the given mem entry from the object/offset-page 431 * table and the object page list, but do not invalidate/terminate 432 * the backing store. 433 * 434 * The object and page must be locked, and at splhigh. 435 * The underlying pmap entry (if any) is NOT removed here. 436 * This routine may not block. 437 */ 438 439 void 440 vm_page_remove(vm_page_t m) 441 { 442 vm_object_t object; 443 444 if (m->object == NULL) 445 return; 446 447 if ((m->flags & PG_BUSY) == 0) { 448 panic("vm_page_remove: page not busy"); 449 } 450 451 /* 452 * Basically destroy the page. 453 */ 454 455 vm_page_wakeup(m); 456 457 object = m->object; 458 459 /* 460 * Remove from the object_object/offset hash table. The object 461 * must be on the hash queue, we will panic if it isn't 462 * 463 * Note: we must NULL-out m->hnext to prevent loops in detached 464 * buffers with vm_page_lookup(). 465 */ 466 467 { 468 struct vm_page **bucket; 469 470 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 471 while (*bucket != m) { 472 if (*bucket == NULL) 473 panic("vm_page_remove(): page not found in hash"); 474 bucket = &(*bucket)->hnext; 475 } 476 *bucket = m->hnext; 477 m->hnext = NULL; 478 vm_page_bucket_generation++; 479 } 480 481 /* 482 * Now remove from the object's list of backed pages. 483 */ 484 485 TAILQ_REMOVE(&object->memq, m, listq); 486 487 /* 488 * And show that the object has one fewer resident page. 489 */ 490 491 object->resident_page_count--; 492 object->generation++; 493 494 m->object = NULL; 495 } 496 497 /* 498 * vm_page_lookup: 499 * 500 * Returns the page associated with the object/offset 501 * pair specified; if none is found, NULL is returned. 502 * 503 * NOTE: the code below does not lock. It will operate properly if 504 * an interrupt makes a change, but the generation algorithm will not 505 * operate properly in an SMP environment where both cpu's are able to run 506 * kernel code simultaneously. 507 * 508 * The object must be locked. No side effects. 509 * This routine may not block. 510 * This is a critical path routine 511 */ 512 513 vm_page_t 514 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 515 { 516 vm_page_t m; 517 struct vm_page **bucket; 518 int generation; 519 520 /* 521 * Search the hash table for this object/offset pair 522 */ 523 524 retry: 525 generation = vm_page_bucket_generation; 526 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 527 for (m = *bucket; m != NULL; m = m->hnext) { 528 if ((m->object == object) && (m->pindex == pindex)) { 529 if (vm_page_bucket_generation != generation) 530 goto retry; 531 return (m); 532 } 533 } 534 if (vm_page_bucket_generation != generation) 535 goto retry; 536 return (NULL); 537 } 538 539 /* 540 * vm_page_rename: 541 * 542 * Move the given memory entry from its 543 * current object to the specified target object/offset. 544 * 545 * The object must be locked. 546 * This routine may not block. 547 * 548 * Note: this routine will raise itself to splvm(), the caller need not. 549 * 550 * Note: swap associated with the page must be invalidated by the move. We 551 * have to do this for several reasons: (1) we aren't freeing the 552 * page, (2) we are dirtying the page, (3) the VM system is probably 553 * moving the page from object A to B, and will then later move 554 * the backing store from A to B and we can't have a conflict. 555 * 556 * Note: we *always* dirty the page. It is necessary both for the 557 * fact that we moved it, and because we may be invalidating 558 * swap. If the page is on the cache, we have to deactivate it 559 * or vm_page_dirty() will panic. Dirty pages are not allowed 560 * on the cache. 561 */ 562 563 void 564 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 565 { 566 int s; 567 568 s = splvm(); 569 vm_page_remove(m); 570 vm_page_insert(m, new_object, new_pindex); 571 if (m->queue - m->pc == PQ_CACHE) 572 vm_page_deactivate(m); 573 vm_page_dirty(m); 574 splx(s); 575 } 576 577 /* 578 * vm_page_unqueue_nowakeup: 579 * 580 * vm_page_unqueue() without any wakeup 581 * 582 * This routine must be called at splhigh(). 583 * This routine may not block. 584 */ 585 586 void 587 vm_page_unqueue_nowakeup(vm_page_t m) 588 { 589 int queue = m->queue; 590 struct vpgqueues *pq; 591 if (queue != PQ_NONE) { 592 pq = &vm_page_queues[queue]; 593 m->queue = PQ_NONE; 594 TAILQ_REMOVE(&pq->pl, m, pageq); 595 (*pq->cnt)--; 596 pq->lcnt--; 597 } 598 } 599 600 /* 601 * vm_page_unqueue: 602 * 603 * Remove a page from its queue. 604 * 605 * This routine must be called at splhigh(). 606 * This routine may not block. 607 */ 608 609 void 610 vm_page_unqueue(vm_page_t m) 611 { 612 int queue = m->queue; 613 struct vpgqueues *pq; 614 if (queue != PQ_NONE) { 615 m->queue = PQ_NONE; 616 pq = &vm_page_queues[queue]; 617 TAILQ_REMOVE(&pq->pl, m, pageq); 618 (*pq->cnt)--; 619 pq->lcnt--; 620 if ((queue - m->pc) == PQ_CACHE) { 621 if (vm_paging_needed()) 622 pagedaemon_wakeup(); 623 } 624 } 625 } 626 627 #if PQ_L2_SIZE > 1 628 629 /* 630 * vm_page_list_find: 631 * 632 * Find a page on the specified queue with color optimization. 633 * 634 * The page coloring optimization attempts to locate a page 635 * that does not overload other nearby pages in the object in 636 * the cpu's L1 or L2 caches. We need this optimization because 637 * cpu caches tend to be physical caches, while object spaces tend 638 * to be virtual. 639 * 640 * This routine must be called at splvm(). 641 * This routine may not block. 642 * 643 * This routine may only be called from the vm_page_list_find() macro 644 * in vm_page.h 645 */ 646 vm_page_t 647 _vm_page_list_find(int basequeue, int index) 648 { 649 int i; 650 vm_page_t m = NULL; 651 struct vpgqueues *pq; 652 653 pq = &vm_page_queues[basequeue]; 654 655 /* 656 * Note that for the first loop, index+i and index-i wind up at the 657 * same place. Even though this is not totally optimal, we've already 658 * blown it by missing the cache case so we do not care. 659 */ 660 661 for(i = PQ_L2_SIZE / 2; i > 0; --i) { 662 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL) 663 break; 664 665 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL) 666 break; 667 } 668 return(m); 669 } 670 671 #endif 672 673 /* 674 * vm_page_select_cache: 675 * 676 * Find a page on the cache queue with color optimization. As pages 677 * might be found, but not applicable, they are deactivated. This 678 * keeps us from using potentially busy cached pages. 679 * 680 * This routine must be called at splvm(). 681 * This routine may not block. 682 */ 683 vm_page_t 684 vm_page_select_cache(vm_object_t object, vm_pindex_t pindex) 685 { 686 vm_page_t m; 687 688 while (TRUE) { 689 m = vm_page_list_find( 690 PQ_CACHE, 691 (pindex + object->pg_color) & PQ_L2_MASK, 692 FALSE 693 ); 694 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || 695 m->hold_count || m->wire_count)) { 696 vm_page_deactivate(m); 697 continue; 698 } 699 return m; 700 } 701 } 702 703 /* 704 * vm_page_select_free: 705 * 706 * Find a free or zero page, with specified preference. We attempt to 707 * inline the nominal case and fall back to _vm_page_select_free() 708 * otherwise. 709 * 710 * This routine must be called at splvm(). 711 * This routine may not block. 712 */ 713 714 static __inline vm_page_t 715 vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero) 716 { 717 vm_page_t m; 718 719 m = vm_page_list_find( 720 PQ_FREE, 721 (pindex + object->pg_color) & PQ_L2_MASK, 722 prefer_zero 723 ); 724 return(m); 725 } 726 727 /* 728 * vm_page_alloc: 729 * 730 * Allocate and return a memory cell associated 731 * with this VM object/offset pair. 732 * 733 * page_req classes: 734 * VM_ALLOC_NORMAL normal process request 735 * VM_ALLOC_SYSTEM system *really* needs a page 736 * VM_ALLOC_INTERRUPT interrupt time request 737 * VM_ALLOC_ZERO zero page 738 * 739 * Object must be locked. 740 * This routine may not block. 741 * 742 * Additional special handling is required when called from an 743 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with 744 * the page cache in this case. 745 */ 746 747 vm_page_t 748 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) 749 { 750 vm_page_t m = NULL; 751 int s; 752 753 KASSERT(!vm_page_lookup(object, pindex), 754 ("vm_page_alloc: page already allocated")); 755 756 /* 757 * The pager is allowed to eat deeper into the free page list. 758 */ 759 760 if ((curthread == pagethread) && (page_req != VM_ALLOC_INTERRUPT)) { 761 page_req = VM_ALLOC_SYSTEM; 762 }; 763 764 s = splvm(); 765 766 loop: 767 if (vmstats.v_free_count > vmstats.v_free_reserved) { 768 /* 769 * Allocate from the free queue if there are plenty of pages 770 * in it. 771 */ 772 if (page_req == VM_ALLOC_ZERO) 773 m = vm_page_select_free(object, pindex, TRUE); 774 else 775 m = vm_page_select_free(object, pindex, FALSE); 776 } else if ( 777 (page_req == VM_ALLOC_SYSTEM && 778 vmstats.v_cache_count == 0 && 779 vmstats.v_free_count > vmstats.v_interrupt_free_min) || 780 (page_req == VM_ALLOC_INTERRUPT && vmstats.v_free_count > 0) 781 ) { 782 /* 783 * Interrupt or system, dig deeper into the free list. 784 */ 785 m = vm_page_select_free(object, pindex, FALSE); 786 } else if (page_req != VM_ALLOC_INTERRUPT) { 787 /* 788 * Allocatable from cache (non-interrupt only). On success, 789 * we must free the page and try again, thus ensuring that 790 * vmstats.v_*_free_min counters are replenished. 791 */ 792 m = vm_page_select_cache(object, pindex); 793 if (m == NULL) { 794 splx(s); 795 #if defined(DIAGNOSTIC) 796 if (vmstats.v_cache_count > 0) 797 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count); 798 #endif 799 vm_pageout_deficit++; 800 pagedaemon_wakeup(); 801 return (NULL); 802 } 803 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m)); 804 vm_page_busy(m); 805 vm_page_protect(m, VM_PROT_NONE); 806 vm_page_free(m); 807 goto loop; 808 } else { 809 /* 810 * Not allocatable from cache from interrupt, give up. 811 */ 812 splx(s); 813 vm_pageout_deficit++; 814 pagedaemon_wakeup(); 815 return (NULL); 816 } 817 818 /* 819 * At this point we had better have found a good page. 820 */ 821 822 KASSERT( 823 m != NULL, 824 ("vm_page_alloc(): missing page on free queue\n") 825 ); 826 827 /* 828 * Remove from free queue 829 */ 830 831 vm_page_unqueue_nowakeup(m); 832 833 /* 834 * Initialize structure. Only the PG_ZERO flag is inherited. 835 */ 836 837 if (m->flags & PG_ZERO) { 838 vm_page_zero_count--; 839 m->flags = PG_ZERO | PG_BUSY; 840 } else { 841 m->flags = PG_BUSY; 842 } 843 m->wire_count = 0; 844 m->hold_count = 0; 845 m->act_count = 0; 846 m->busy = 0; 847 m->valid = 0; 848 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m)); 849 850 /* 851 * vm_page_insert() is safe prior to the splx(). Note also that 852 * inserting a page here does not insert it into the pmap (which 853 * could cause us to block allocating memory). We cannot block 854 * anywhere. 855 */ 856 857 vm_page_insert(m, object, pindex); 858 859 /* 860 * Don't wakeup too often - wakeup the pageout daemon when 861 * we would be nearly out of memory. 862 */ 863 if (vm_paging_needed()) 864 pagedaemon_wakeup(); 865 866 splx(s); 867 868 return (m); 869 } 870 871 /* 872 * vm_wait: (also see VM_WAIT macro) 873 * 874 * Block until free pages are available for allocation 875 * - Called in various places before memory allocations. 876 */ 877 878 void 879 vm_wait(void) 880 { 881 int s; 882 883 s = splvm(); 884 if (curthread == pagethread) { 885 vm_pageout_pages_needed = 1; 886 tsleep(&vm_pageout_pages_needed, 0, "VMWait", 0); 887 } else { 888 if (!vm_pages_needed) { 889 vm_pages_needed = 1; 890 wakeup(&vm_pages_needed); 891 } 892 tsleep(&vmstats.v_free_count, 0, "vmwait", 0); 893 } 894 splx(s); 895 } 896 897 /* 898 * vm_waitpfault: (also see VM_WAITPFAULT macro) 899 * 900 * Block until free pages are available for allocation 901 * - Called only in vm_fault so that processes page faulting 902 * can be easily tracked. 903 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 904 * processes will be able to grab memory first. Do not change 905 * this balance without careful testing first. 906 */ 907 908 void 909 vm_waitpfault(void) 910 { 911 int s; 912 913 s = splvm(); 914 if (!vm_pages_needed) { 915 vm_pages_needed = 1; 916 wakeup(&vm_pages_needed); 917 } 918 tsleep(&vmstats.v_free_count, 0, "pfault", 0); 919 splx(s); 920 } 921 922 /* 923 * vm_page_activate: 924 * 925 * Put the specified page on the active list (if appropriate). 926 * Ensure that act_count is at least ACT_INIT but do not otherwise 927 * mess with it. 928 * 929 * The page queues must be locked. 930 * This routine may not block. 931 */ 932 void 933 vm_page_activate(vm_page_t m) 934 { 935 int s; 936 937 s = splvm(); 938 if (m->queue != PQ_ACTIVE) { 939 if ((m->queue - m->pc) == PQ_CACHE) 940 mycpu->gd_cnt.v_reactivated++; 941 942 vm_page_unqueue(m); 943 944 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 945 m->queue = PQ_ACTIVE; 946 vm_page_queues[PQ_ACTIVE].lcnt++; 947 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 948 if (m->act_count < ACT_INIT) 949 m->act_count = ACT_INIT; 950 vmstats.v_active_count++; 951 } 952 } else { 953 if (m->act_count < ACT_INIT) 954 m->act_count = ACT_INIT; 955 } 956 957 splx(s); 958 } 959 960 /* 961 * vm_page_free_wakeup: 962 * 963 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 964 * routine is called when a page has been added to the cache or free 965 * queues. 966 * 967 * This routine may not block. 968 * This routine must be called at splvm() 969 */ 970 static __inline void 971 vm_page_free_wakeup(void) 972 { 973 /* 974 * if pageout daemon needs pages, then tell it that there are 975 * some free. 976 */ 977 if (vm_pageout_pages_needed && 978 vmstats.v_cache_count + vmstats.v_free_count >= vmstats.v_pageout_free_min) { 979 wakeup(&vm_pageout_pages_needed); 980 vm_pageout_pages_needed = 0; 981 } 982 /* 983 * wakeup processes that are waiting on memory if we hit a 984 * high water mark. And wakeup scheduler process if we have 985 * lots of memory. this process will swapin processes. 986 */ 987 if (vm_pages_needed && !vm_page_count_min()) { 988 vm_pages_needed = 0; 989 wakeup(&vmstats.v_free_count); 990 } 991 } 992 993 /* 994 * vm_page_free_toq: 995 * 996 * Returns the given page to the PQ_FREE list, 997 * disassociating it with any VM object. 998 * 999 * Object and page must be locked prior to entry. 1000 * This routine may not block. 1001 */ 1002 1003 void 1004 vm_page_free_toq(vm_page_t m) 1005 { 1006 int s; 1007 struct vpgqueues *pq; 1008 vm_object_t object = m->object; 1009 1010 s = splvm(); 1011 1012 mycpu->gd_cnt.v_tfree++; 1013 1014 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) { 1015 printf( 1016 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n", 1017 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, 1018 m->hold_count); 1019 if ((m->queue - m->pc) == PQ_FREE) 1020 panic("vm_page_free: freeing free page"); 1021 else 1022 panic("vm_page_free: freeing busy page"); 1023 } 1024 1025 /* 1026 * unqueue, then remove page. Note that we cannot destroy 1027 * the page here because we do not want to call the pager's 1028 * callback routine until after we've put the page on the 1029 * appropriate free queue. 1030 */ 1031 1032 vm_page_unqueue_nowakeup(m); 1033 vm_page_remove(m); 1034 1035 /* 1036 * If fictitious remove object association and 1037 * return, otherwise delay object association removal. 1038 */ 1039 1040 if ((m->flags & PG_FICTITIOUS) != 0) { 1041 splx(s); 1042 return; 1043 } 1044 1045 m->valid = 0; 1046 vm_page_undirty(m); 1047 1048 if (m->wire_count != 0) { 1049 if (m->wire_count > 1) { 1050 panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx", 1051 m->wire_count, (long)m->pindex); 1052 } 1053 panic("vm_page_free: freeing wired page\n"); 1054 } 1055 1056 /* 1057 * If we've exhausted the object's resident pages we want to free 1058 * it up. 1059 */ 1060 1061 if (object && 1062 (object->type == OBJT_VNODE) && 1063 ((object->flags & OBJ_DEAD) == 0) 1064 ) { 1065 struct vnode *vp = (struct vnode *)object->handle; 1066 1067 if (vp && VSHOULDFREE(vp)) 1068 vfree(vp); 1069 } 1070 1071 /* 1072 * Clear the UNMANAGED flag when freeing an unmanaged page. 1073 */ 1074 1075 if (m->flags & PG_UNMANAGED) { 1076 m->flags &= ~PG_UNMANAGED; 1077 } else { 1078 #ifdef __alpha__ 1079 pmap_page_is_free(m); 1080 #endif 1081 } 1082 1083 if (m->hold_count != 0) { 1084 m->flags &= ~PG_ZERO; 1085 m->queue = PQ_HOLD; 1086 } else 1087 m->queue = PQ_FREE + m->pc; 1088 pq = &vm_page_queues[m->queue]; 1089 pq->lcnt++; 1090 ++(*pq->cnt); 1091 1092 /* 1093 * Put zero'd pages on the end ( where we look for zero'd pages 1094 * first ) and non-zerod pages at the head. 1095 */ 1096 1097 if (m->flags & PG_ZERO) { 1098 TAILQ_INSERT_TAIL(&pq->pl, m, pageq); 1099 ++vm_page_zero_count; 1100 } else { 1101 TAILQ_INSERT_HEAD(&pq->pl, m, pageq); 1102 } 1103 1104 vm_page_free_wakeup(); 1105 1106 splx(s); 1107 } 1108 1109 /* 1110 * vm_page_unmanage: 1111 * 1112 * Prevent PV management from being done on the page. The page is 1113 * removed from the paging queues as if it were wired, and as a 1114 * consequence of no longer being managed the pageout daemon will not 1115 * touch it (since there is no way to locate the pte mappings for the 1116 * page). madvise() calls that mess with the pmap will also no longer 1117 * operate on the page. 1118 * 1119 * Beyond that the page is still reasonably 'normal'. Freeing the page 1120 * will clear the flag. 1121 * 1122 * This routine is used by OBJT_PHYS objects - objects using unswappable 1123 * physical memory as backing store rather then swap-backed memory and 1124 * will eventually be extended to support 4MB unmanaged physical 1125 * mappings. 1126 */ 1127 1128 void 1129 vm_page_unmanage(vm_page_t m) 1130 { 1131 int s; 1132 1133 s = splvm(); 1134 if ((m->flags & PG_UNMANAGED) == 0) { 1135 if (m->wire_count == 0) 1136 vm_page_unqueue(m); 1137 } 1138 vm_page_flag_set(m, PG_UNMANAGED); 1139 splx(s); 1140 } 1141 1142 /* 1143 * vm_page_wire: 1144 * 1145 * Mark this page as wired down by yet 1146 * another map, removing it from paging queues 1147 * as necessary. 1148 * 1149 * The page queues must be locked. 1150 * This routine may not block. 1151 */ 1152 void 1153 vm_page_wire(vm_page_t m) 1154 { 1155 int s; 1156 1157 /* 1158 * Only bump the wire statistics if the page is not already wired, 1159 * and only unqueue the page if it is on some queue (if it is unmanaged 1160 * it is already off the queues). 1161 */ 1162 s = splvm(); 1163 if (m->wire_count == 0) { 1164 if ((m->flags & PG_UNMANAGED) == 0) 1165 vm_page_unqueue(m); 1166 vmstats.v_wire_count++; 1167 } 1168 m->wire_count++; 1169 KASSERT(m->wire_count != 0, 1170 ("vm_page_wire: wire_count overflow m=%p", m)); 1171 1172 splx(s); 1173 vm_page_flag_set(m, PG_MAPPED); 1174 } 1175 1176 /* 1177 * vm_page_unwire: 1178 * 1179 * Release one wiring of this page, potentially 1180 * enabling it to be paged again. 1181 * 1182 * Many pages placed on the inactive queue should actually go 1183 * into the cache, but it is difficult to figure out which. What 1184 * we do instead, if the inactive target is well met, is to put 1185 * clean pages at the head of the inactive queue instead of the tail. 1186 * This will cause them to be moved to the cache more quickly and 1187 * if not actively re-referenced, freed more quickly. If we just 1188 * stick these pages at the end of the inactive queue, heavy filesystem 1189 * meta-data accesses can cause an unnecessary paging load on memory bound 1190 * processes. This optimization causes one-time-use metadata to be 1191 * reused more quickly. 1192 * 1193 * BUT, if we are in a low-memory situation we have no choice but to 1194 * put clean pages on the cache queue. 1195 * 1196 * A number of routines use vm_page_unwire() to guarantee that the page 1197 * will go into either the inactive or active queues, and will NEVER 1198 * be placed in the cache - for example, just after dirtying a page. 1199 * dirty pages in the cache are not allowed. 1200 * 1201 * The page queues must be locked. 1202 * This routine may not block. 1203 */ 1204 void 1205 vm_page_unwire(vm_page_t m, int activate) 1206 { 1207 int s; 1208 1209 s = splvm(); 1210 1211 if (m->wire_count > 0) { 1212 m->wire_count--; 1213 if (m->wire_count == 0) { 1214 vmstats.v_wire_count--; 1215 if (m->flags & PG_UNMANAGED) { 1216 ; 1217 } else if (activate) { 1218 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1219 m->queue = PQ_ACTIVE; 1220 vm_page_queues[PQ_ACTIVE].lcnt++; 1221 vmstats.v_active_count++; 1222 } else { 1223 vm_page_flag_clear(m, PG_WINATCFLS); 1224 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1225 m->queue = PQ_INACTIVE; 1226 vm_page_queues[PQ_INACTIVE].lcnt++; 1227 vmstats.v_inactive_count++; 1228 } 1229 } 1230 } else { 1231 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count); 1232 } 1233 splx(s); 1234 } 1235 1236 1237 /* 1238 * Move the specified page to the inactive queue. If the page has 1239 * any associated swap, the swap is deallocated. 1240 * 1241 * Normally athead is 0 resulting in LRU operation. athead is set 1242 * to 1 if we want this page to be 'as if it were placed in the cache', 1243 * except without unmapping it from the process address space. 1244 * 1245 * This routine may not block. 1246 */ 1247 static __inline void 1248 _vm_page_deactivate(vm_page_t m, int athead) 1249 { 1250 int s; 1251 1252 /* 1253 * Ignore if already inactive. 1254 */ 1255 if (m->queue == PQ_INACTIVE) 1256 return; 1257 1258 s = splvm(); 1259 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1260 if ((m->queue - m->pc) == PQ_CACHE) 1261 mycpu->gd_cnt.v_reactivated++; 1262 vm_page_flag_clear(m, PG_WINATCFLS); 1263 vm_page_unqueue(m); 1264 if (athead) 1265 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1266 else 1267 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1268 m->queue = PQ_INACTIVE; 1269 vm_page_queues[PQ_INACTIVE].lcnt++; 1270 vmstats.v_inactive_count++; 1271 } 1272 splx(s); 1273 } 1274 1275 void 1276 vm_page_deactivate(vm_page_t m) 1277 { 1278 _vm_page_deactivate(m, 0); 1279 } 1280 1281 /* 1282 * vm_page_try_to_cache: 1283 * 1284 * Returns 0 on failure, 1 on success 1285 */ 1286 int 1287 vm_page_try_to_cache(vm_page_t m) 1288 { 1289 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1290 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1291 return(0); 1292 } 1293 vm_page_test_dirty(m); 1294 if (m->dirty) 1295 return(0); 1296 vm_page_cache(m); 1297 return(1); 1298 } 1299 1300 /* 1301 * vm_page_try_to_free() 1302 * 1303 * Attempt to free the page. If we cannot free it, we do nothing. 1304 * 1 is returned on success, 0 on failure. 1305 */ 1306 1307 int 1308 vm_page_try_to_free(vm_page_t m) 1309 { 1310 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1311 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1312 return(0); 1313 } 1314 vm_page_test_dirty(m); 1315 if (m->dirty) 1316 return(0); 1317 vm_page_busy(m); 1318 vm_page_protect(m, VM_PROT_NONE); 1319 vm_page_free(m); 1320 return(1); 1321 } 1322 1323 1324 /* 1325 * vm_page_cache 1326 * 1327 * Put the specified page onto the page cache queue (if appropriate). 1328 * 1329 * This routine may not block. 1330 */ 1331 void 1332 vm_page_cache(vm_page_t m) 1333 { 1334 int s; 1335 1336 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) { 1337 printf("vm_page_cache: attempting to cache busy page\n"); 1338 return; 1339 } 1340 if ((m->queue - m->pc) == PQ_CACHE) 1341 return; 1342 1343 /* 1344 * Remove all pmaps and indicate that the page is not 1345 * writeable or mapped. 1346 */ 1347 1348 vm_page_protect(m, VM_PROT_NONE); 1349 if (m->dirty != 0) { 1350 panic("vm_page_cache: caching a dirty page, pindex: %ld", 1351 (long)m->pindex); 1352 } 1353 s = splvm(); 1354 vm_page_unqueue_nowakeup(m); 1355 m->queue = PQ_CACHE + m->pc; 1356 vm_page_queues[m->queue].lcnt++; 1357 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq); 1358 vmstats.v_cache_count++; 1359 vm_page_free_wakeup(); 1360 splx(s); 1361 } 1362 1363 /* 1364 * vm_page_dontneed 1365 * 1366 * Cache, deactivate, or do nothing as appropriate. This routine 1367 * is typically used by madvise() MADV_DONTNEED. 1368 * 1369 * Generally speaking we want to move the page into the cache so 1370 * it gets reused quickly. However, this can result in a silly syndrome 1371 * due to the page recycling too quickly. Small objects will not be 1372 * fully cached. On the otherhand, if we move the page to the inactive 1373 * queue we wind up with a problem whereby very large objects 1374 * unnecessarily blow away our inactive and cache queues. 1375 * 1376 * The solution is to move the pages based on a fixed weighting. We 1377 * either leave them alone, deactivate them, or move them to the cache, 1378 * where moving them to the cache has the highest weighting. 1379 * By forcing some pages into other queues we eventually force the 1380 * system to balance the queues, potentially recovering other unrelated 1381 * space from active. The idea is to not force this to happen too 1382 * often. 1383 */ 1384 1385 void 1386 vm_page_dontneed(vm_page_t m) 1387 { 1388 static int dnweight; 1389 int dnw; 1390 int head; 1391 1392 dnw = ++dnweight; 1393 1394 /* 1395 * occassionally leave the page alone 1396 */ 1397 1398 if ((dnw & 0x01F0) == 0 || 1399 m->queue == PQ_INACTIVE || 1400 m->queue - m->pc == PQ_CACHE 1401 ) { 1402 if (m->act_count >= ACT_INIT) 1403 --m->act_count; 1404 return; 1405 } 1406 1407 if (m->dirty == 0) 1408 vm_page_test_dirty(m); 1409 1410 if (m->dirty || (dnw & 0x0070) == 0) { 1411 /* 1412 * Deactivate the page 3 times out of 32. 1413 */ 1414 head = 0; 1415 } else { 1416 /* 1417 * Cache the page 28 times out of every 32. Note that 1418 * the page is deactivated instead of cached, but placed 1419 * at the head of the queue instead of the tail. 1420 */ 1421 head = 1; 1422 } 1423 _vm_page_deactivate(m, head); 1424 } 1425 1426 /* 1427 * Grab a page, waiting until we are waken up due to the page 1428 * changing state. We keep on waiting, if the page continues 1429 * to be in the object. If the page doesn't exist, allocate it. 1430 * 1431 * This routine may block. 1432 */ 1433 vm_page_t 1434 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 1435 { 1436 1437 vm_page_t m; 1438 int s, generation; 1439 1440 retrylookup: 1441 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1442 if (m->busy || (m->flags & PG_BUSY)) { 1443 generation = object->generation; 1444 1445 s = splvm(); 1446 while ((object->generation == generation) && 1447 (m->busy || (m->flags & PG_BUSY))) { 1448 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1449 tsleep(m, 0, "pgrbwt", 0); 1450 if ((allocflags & VM_ALLOC_RETRY) == 0) { 1451 splx(s); 1452 return NULL; 1453 } 1454 } 1455 splx(s); 1456 goto retrylookup; 1457 } else { 1458 vm_page_busy(m); 1459 return m; 1460 } 1461 } 1462 1463 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1464 if (m == NULL) { 1465 VM_WAIT; 1466 if ((allocflags & VM_ALLOC_RETRY) == 0) 1467 return NULL; 1468 goto retrylookup; 1469 } 1470 1471 return m; 1472 } 1473 1474 /* 1475 * Mapping function for valid bits or for dirty bits in 1476 * a page. May not block. 1477 * 1478 * Inputs are required to range within a page. 1479 */ 1480 1481 __inline int 1482 vm_page_bits(int base, int size) 1483 { 1484 int first_bit; 1485 int last_bit; 1486 1487 KASSERT( 1488 base + size <= PAGE_SIZE, 1489 ("vm_page_bits: illegal base/size %d/%d", base, size) 1490 ); 1491 1492 if (size == 0) /* handle degenerate case */ 1493 return(0); 1494 1495 first_bit = base >> DEV_BSHIFT; 1496 last_bit = (base + size - 1) >> DEV_BSHIFT; 1497 1498 return ((2 << last_bit) - (1 << first_bit)); 1499 } 1500 1501 /* 1502 * vm_page_set_validclean: 1503 * 1504 * Sets portions of a page valid and clean. The arguments are expected 1505 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 1506 * of any partial chunks touched by the range. The invalid portion of 1507 * such chunks will be zero'd. 1508 * 1509 * This routine may not block. 1510 * 1511 * (base + size) must be less then or equal to PAGE_SIZE. 1512 */ 1513 void 1514 vm_page_set_validclean(vm_page_t m, int base, int size) 1515 { 1516 int pagebits; 1517 int frag; 1518 int endoff; 1519 1520 if (size == 0) /* handle degenerate case */ 1521 return; 1522 1523 /* 1524 * If the base is not DEV_BSIZE aligned and the valid 1525 * bit is clear, we have to zero out a portion of the 1526 * first block. 1527 */ 1528 1529 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 1530 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0 1531 ) { 1532 pmap_zero_page_area( 1533 VM_PAGE_TO_PHYS(m), 1534 frag, 1535 base - frag 1536 ); 1537 } 1538 1539 /* 1540 * If the ending offset is not DEV_BSIZE aligned and the 1541 * valid bit is clear, we have to zero out a portion of 1542 * the last block. 1543 */ 1544 1545 endoff = base + size; 1546 1547 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 1548 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0 1549 ) { 1550 pmap_zero_page_area( 1551 VM_PAGE_TO_PHYS(m), 1552 endoff, 1553 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)) 1554 ); 1555 } 1556 1557 /* 1558 * Set valid, clear dirty bits. If validating the entire 1559 * page we can safely clear the pmap modify bit. We also 1560 * use this opportunity to clear the PG_NOSYNC flag. If a process 1561 * takes a write fault on a MAP_NOSYNC memory area the flag will 1562 * be set again. 1563 * 1564 * We set valid bits inclusive of any overlap, but we can only 1565 * clear dirty bits for DEV_BSIZE chunks that are fully within 1566 * the range. 1567 */ 1568 1569 pagebits = vm_page_bits(base, size); 1570 m->valid |= pagebits; 1571 #if 0 /* NOT YET */ 1572 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 1573 frag = DEV_BSIZE - frag; 1574 base += frag; 1575 size -= frag; 1576 if (size < 0) 1577 size = 0; 1578 } 1579 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 1580 #endif 1581 m->dirty &= ~pagebits; 1582 if (base == 0 && size == PAGE_SIZE) { 1583 pmap_clear_modify(m); 1584 vm_page_flag_clear(m, PG_NOSYNC); 1585 } 1586 } 1587 1588 #if 0 1589 1590 void 1591 vm_page_set_dirty(vm_page_t m, int base, int size) 1592 { 1593 m->dirty |= vm_page_bits(base, size); 1594 } 1595 1596 #endif 1597 1598 void 1599 vm_page_clear_dirty(vm_page_t m, int base, int size) 1600 { 1601 m->dirty &= ~vm_page_bits(base, size); 1602 } 1603 1604 /* 1605 * vm_page_set_invalid: 1606 * 1607 * Invalidates DEV_BSIZE'd chunks within a page. Both the 1608 * valid and dirty bits for the effected areas are cleared. 1609 * 1610 * May not block. 1611 */ 1612 void 1613 vm_page_set_invalid(vm_page_t m, int base, int size) 1614 { 1615 int bits; 1616 1617 bits = vm_page_bits(base, size); 1618 m->valid &= ~bits; 1619 m->dirty &= ~bits; 1620 m->object->generation++; 1621 } 1622 1623 /* 1624 * vm_page_zero_invalid() 1625 * 1626 * The kernel assumes that the invalid portions of a page contain 1627 * garbage, but such pages can be mapped into memory by user code. 1628 * When this occurs, we must zero out the non-valid portions of the 1629 * page so user code sees what it expects. 1630 * 1631 * Pages are most often semi-valid when the end of a file is mapped 1632 * into memory and the file's size is not page aligned. 1633 */ 1634 1635 void 1636 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 1637 { 1638 int b; 1639 int i; 1640 1641 /* 1642 * Scan the valid bits looking for invalid sections that 1643 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 1644 * valid bit may be set ) have already been zerod by 1645 * vm_page_set_validclean(). 1646 */ 1647 1648 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 1649 if (i == (PAGE_SIZE / DEV_BSIZE) || 1650 (m->valid & (1 << i)) 1651 ) { 1652 if (i > b) { 1653 pmap_zero_page_area( 1654 VM_PAGE_TO_PHYS(m), 1655 b << DEV_BSHIFT, 1656 (i - b) << DEV_BSHIFT 1657 ); 1658 } 1659 b = i + 1; 1660 } 1661 } 1662 1663 /* 1664 * setvalid is TRUE when we can safely set the zero'd areas 1665 * as being valid. We can do this if there are no cache consistency 1666 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 1667 */ 1668 1669 if (setvalid) 1670 m->valid = VM_PAGE_BITS_ALL; 1671 } 1672 1673 /* 1674 * vm_page_is_valid: 1675 * 1676 * Is (partial) page valid? Note that the case where size == 0 1677 * will return FALSE in the degenerate case where the page is 1678 * entirely invalid, and TRUE otherwise. 1679 * 1680 * May not block. 1681 */ 1682 1683 int 1684 vm_page_is_valid(vm_page_t m, int base, int size) 1685 { 1686 int bits = vm_page_bits(base, size); 1687 1688 if (m->valid && ((m->valid & bits) == bits)) 1689 return 1; 1690 else 1691 return 0; 1692 } 1693 1694 /* 1695 * update dirty bits from pmap/mmu. May not block. 1696 */ 1697 1698 void 1699 vm_page_test_dirty(vm_page_t m) 1700 { 1701 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { 1702 vm_page_dirty(m); 1703 } 1704 } 1705 1706 #include "opt_ddb.h" 1707 #ifdef DDB 1708 #include <sys/kernel.h> 1709 1710 #include <ddb/ddb.h> 1711 1712 DB_SHOW_COMMAND(page, vm_page_print_page_info) 1713 { 1714 db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count); 1715 db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count); 1716 db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count); 1717 db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count); 1718 db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count); 1719 db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved); 1720 db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min); 1721 db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target); 1722 db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min); 1723 db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target); 1724 } 1725 1726 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1727 { 1728 int i; 1729 db_printf("PQ_FREE:"); 1730 for(i=0;i<PQ_L2_SIZE;i++) { 1731 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt); 1732 } 1733 db_printf("\n"); 1734 1735 db_printf("PQ_CACHE:"); 1736 for(i=0;i<PQ_L2_SIZE;i++) { 1737 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt); 1738 } 1739 db_printf("\n"); 1740 1741 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1742 vm_page_queues[PQ_ACTIVE].lcnt, 1743 vm_page_queues[PQ_INACTIVE].lcnt); 1744 } 1745 #endif /* DDB */ 1746