1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $ 38 */ 39 40 /* 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 */ 66 67 /* 68 * Resident memory management module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/vmmeter.h> 76 #include <sys/vnode.h> 77 78 #include <vm/vm.h> 79 #include <vm/vm_param.h> 80 #include <sys/lock.h> 81 #include <vm/vm_kern.h> 82 #include <vm/pmap.h> 83 #include <vm/vm_map.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_pageout.h> 87 #include <vm/vm_pager.h> 88 #include <vm/vm_extern.h> 89 90 static void vm_page_queue_init (void); 91 static vm_page_t vm_page_select_cache (vm_object_t, vm_pindex_t); 92 93 /* 94 * Associated with page of user-allocatable memory is a 95 * page structure. 96 */ 97 98 static struct vm_page **vm_page_buckets; /* Array of buckets */ 99 static int vm_page_bucket_count; /* How big is array? */ 100 static int vm_page_hash_mask; /* Mask for hash function */ 101 static volatile int vm_page_bucket_generation; 102 103 struct vpgqueues vm_page_queues[PQ_COUNT]; 104 105 static void 106 vm_page_queue_init(void) { 107 int i; 108 109 for(i=0;i<PQ_L2_SIZE;i++) { 110 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count; 111 } 112 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; 113 114 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; 115 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; 116 for(i=0;i<PQ_L2_SIZE;i++) { 117 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count; 118 } 119 for(i=0;i<PQ_COUNT;i++) { 120 TAILQ_INIT(&vm_page_queues[i].pl); 121 } 122 } 123 124 vm_page_t vm_page_array = 0; 125 int vm_page_array_size = 0; 126 long first_page = 0; 127 int vm_page_zero_count = 0; 128 129 static __inline int vm_page_hash (vm_object_t object, vm_pindex_t pindex); 130 static void vm_page_free_wakeup (void); 131 132 /* 133 * vm_set_page_size: 134 * 135 * Sets the page size, perhaps based upon the memory 136 * size. Must be called before any use of page-size 137 * dependent functions. 138 */ 139 void 140 vm_set_page_size(void) 141 { 142 if (cnt.v_page_size == 0) 143 cnt.v_page_size = PAGE_SIZE; 144 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) 145 panic("vm_set_page_size: page size not a power of two"); 146 } 147 148 /* 149 * vm_add_new_page: 150 * 151 * Add a new page to the freelist for use by the system. 152 * Must be called at splhigh(). 153 */ 154 vm_page_t 155 vm_add_new_page(vm_offset_t pa) 156 { 157 vm_page_t m; 158 159 ++cnt.v_page_count; 160 ++cnt.v_free_count; 161 m = PHYS_TO_VM_PAGE(pa); 162 m->phys_addr = pa; 163 m->flags = 0; 164 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 165 m->queue = m->pc + PQ_FREE; 166 TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); 167 vm_page_queues[m->queue].lcnt++; 168 return (m); 169 } 170 171 /* 172 * vm_page_startup: 173 * 174 * Initializes the resident memory module. 175 * 176 * Allocates memory for the page cells, and 177 * for the object/offset-to-page hash table headers. 178 * Each page cell is initialized and placed on the free list. 179 */ 180 181 vm_offset_t 182 vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr) 183 { 184 vm_offset_t mapped; 185 struct vm_page **bucket; 186 vm_size_t npages, page_range; 187 vm_offset_t new_end; 188 int i; 189 vm_offset_t pa; 190 int nblocks; 191 vm_offset_t last_pa; 192 193 /* the biggest memory array is the second group of pages */ 194 vm_offset_t end; 195 vm_offset_t biggestone, biggestsize; 196 197 vm_offset_t total; 198 199 total = 0; 200 biggestsize = 0; 201 biggestone = 0; 202 nblocks = 0; 203 vaddr = round_page(vaddr); 204 205 for (i = 0; phys_avail[i + 1]; i += 2) { 206 phys_avail[i] = round_page(phys_avail[i]); 207 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 208 } 209 210 for (i = 0; phys_avail[i + 1]; i += 2) { 211 int size = phys_avail[i + 1] - phys_avail[i]; 212 213 if (size > biggestsize) { 214 biggestone = i; 215 biggestsize = size; 216 } 217 ++nblocks; 218 total += size; 219 } 220 221 end = phys_avail[biggestone+1]; 222 223 /* 224 * Initialize the queue headers for the free queue, the active queue 225 * and the inactive queue. 226 */ 227 228 vm_page_queue_init(); 229 230 /* 231 * Allocate (and initialize) the hash table buckets. 232 * 233 * The number of buckets MUST BE a power of 2, and the actual value is 234 * the next power of 2 greater than the number of physical pages in 235 * the system. 236 * 237 * We make the hash table approximately 2x the number of pages to 238 * reduce the chain length. This is about the same size using the 239 * singly-linked list as the 1x hash table we were using before 240 * using TAILQ but the chain length will be smaller. 241 * 242 * Note: This computation can be tweaked if desired. 243 */ 244 vm_page_buckets = (struct vm_page **)vaddr; 245 bucket = vm_page_buckets; 246 if (vm_page_bucket_count == 0) { 247 vm_page_bucket_count = 1; 248 while (vm_page_bucket_count < atop(total)) 249 vm_page_bucket_count <<= 1; 250 } 251 vm_page_bucket_count <<= 1; 252 vm_page_hash_mask = vm_page_bucket_count - 1; 253 254 /* 255 * Validate these addresses. 256 */ 257 new_end = end - vm_page_bucket_count * sizeof(struct vm_page *); 258 new_end = trunc_page(new_end); 259 mapped = round_page(vaddr); 260 vaddr = pmap_map(mapped, new_end, end, 261 VM_PROT_READ | VM_PROT_WRITE); 262 vaddr = round_page(vaddr); 263 bzero((caddr_t) mapped, vaddr - mapped); 264 265 for (i = 0; i < vm_page_bucket_count; i++) { 266 *bucket = NULL; 267 bucket++; 268 } 269 270 /* 271 * Compute the number of pages of memory that will be available for 272 * use (taking into account the overhead of a page structure per 273 * page). 274 */ 275 276 first_page = phys_avail[0] / PAGE_SIZE; 277 278 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page; 279 npages = (total - (page_range * sizeof(struct vm_page)) - 280 (end - new_end)) / PAGE_SIZE; 281 282 end = new_end; 283 /* 284 * Initialize the mem entry structures now, and put them in the free 285 * queue. 286 */ 287 vm_page_array = (vm_page_t) vaddr; 288 mapped = vaddr; 289 290 /* 291 * Validate these addresses. 292 */ 293 294 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 295 mapped = pmap_map(mapped, new_end, end, 296 VM_PROT_READ | VM_PROT_WRITE); 297 298 /* 299 * Clear all of the page structures 300 */ 301 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 302 vm_page_array_size = page_range; 303 304 /* 305 * Construct the free queue(s) in descending order (by physical 306 * address) so that the first 16MB of physical memory is allocated 307 * last rather than first. On large-memory machines, this avoids 308 * the exhaustion of low physical memory before isa_dmainit has run. 309 */ 310 cnt.v_page_count = 0; 311 cnt.v_free_count = 0; 312 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 313 pa = phys_avail[i]; 314 if (i == biggestone) 315 last_pa = new_end; 316 else 317 last_pa = phys_avail[i + 1]; 318 while (pa < last_pa && npages-- > 0) { 319 vm_add_new_page(pa); 320 pa += PAGE_SIZE; 321 } 322 } 323 return (mapped); 324 } 325 326 /* 327 * vm_page_hash: 328 * 329 * Distributes the object/offset key pair among hash buckets. 330 * 331 * NOTE: This macro depends on vm_page_bucket_count being a power of 2. 332 * This routine may not block. 333 * 334 * We try to randomize the hash based on the object to spread the pages 335 * out in the hash table without it costing us too much. 336 */ 337 static __inline int 338 vm_page_hash(vm_object_t object, vm_pindex_t pindex) 339 { 340 int i = ((uintptr_t)object + pindex) ^ object->hash_rand; 341 342 return(i & vm_page_hash_mask); 343 } 344 345 void 346 vm_page_unhold(vm_page_t mem) 347 { 348 --mem->hold_count; 349 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 350 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) 351 vm_page_free_toq(mem); 352 } 353 354 /* 355 * vm_page_insert: [ internal use only ] 356 * 357 * Inserts the given mem entry into the object and object list. 358 * 359 * The pagetables are not updated but will presumably fault the page 360 * in if necessary, or if a kernel page the caller will at some point 361 * enter the page into the kernel's pmap. We are not allowed to block 362 * here so we *can't* do this anyway. 363 * 364 * The object and page must be locked, and must be splhigh. 365 * This routine may not block. 366 */ 367 368 void 369 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 370 { 371 struct vm_page **bucket; 372 373 if (m->object != NULL) 374 panic("vm_page_insert: already inserted"); 375 376 /* 377 * Record the object/offset pair in this page 378 */ 379 380 m->object = object; 381 m->pindex = pindex; 382 383 /* 384 * Insert it into the object_object/offset hash table 385 */ 386 387 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 388 m->hnext = *bucket; 389 *bucket = m; 390 vm_page_bucket_generation++; 391 392 /* 393 * Now link into the object's list of backed pages. 394 */ 395 396 TAILQ_INSERT_TAIL(&object->memq, m, listq); 397 object->generation++; 398 399 /* 400 * show that the object has one more resident page. 401 */ 402 403 object->resident_page_count++; 404 405 /* 406 * Since we are inserting a new and possibly dirty page, 407 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags. 408 */ 409 if (m->flags & PG_WRITEABLE) 410 vm_object_set_writeable_dirty(object); 411 } 412 413 /* 414 * vm_page_remove: 415 * NOTE: used by device pager as well -wfj 416 * 417 * Removes the given mem entry from the object/offset-page 418 * table and the object page list, but do not invalidate/terminate 419 * the backing store. 420 * 421 * The object and page must be locked, and at splhigh. 422 * The underlying pmap entry (if any) is NOT removed here. 423 * This routine may not block. 424 */ 425 426 void 427 vm_page_remove(vm_page_t m) 428 { 429 vm_object_t object; 430 431 if (m->object == NULL) 432 return; 433 434 if ((m->flags & PG_BUSY) == 0) { 435 panic("vm_page_remove: page not busy"); 436 } 437 438 /* 439 * Basically destroy the page. 440 */ 441 442 vm_page_wakeup(m); 443 444 object = m->object; 445 446 /* 447 * Remove from the object_object/offset hash table. The object 448 * must be on the hash queue, we will panic if it isn't 449 * 450 * Note: we must NULL-out m->hnext to prevent loops in detached 451 * buffers with vm_page_lookup(). 452 */ 453 454 { 455 struct vm_page **bucket; 456 457 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; 458 while (*bucket != m) { 459 if (*bucket == NULL) 460 panic("vm_page_remove(): page not found in hash"); 461 bucket = &(*bucket)->hnext; 462 } 463 *bucket = m->hnext; 464 m->hnext = NULL; 465 vm_page_bucket_generation++; 466 } 467 468 /* 469 * Now remove from the object's list of backed pages. 470 */ 471 472 TAILQ_REMOVE(&object->memq, m, listq); 473 474 /* 475 * And show that the object has one fewer resident page. 476 */ 477 478 object->resident_page_count--; 479 object->generation++; 480 481 m->object = NULL; 482 } 483 484 /* 485 * vm_page_lookup: 486 * 487 * Returns the page associated with the object/offset 488 * pair specified; if none is found, NULL is returned. 489 * 490 * NOTE: the code below does not lock. It will operate properly if 491 * an interrupt makes a change, but the generation algorithm will not 492 * operate properly in an SMP environment where both cpu's are able to run 493 * kernel code simultaneously. 494 * 495 * The object must be locked. No side effects. 496 * This routine may not block. 497 * This is a critical path routine 498 */ 499 500 vm_page_t 501 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 502 { 503 vm_page_t m; 504 struct vm_page **bucket; 505 int generation; 506 507 /* 508 * Search the hash table for this object/offset pair 509 */ 510 511 retry: 512 generation = vm_page_bucket_generation; 513 bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; 514 for (m = *bucket; m != NULL; m = m->hnext) { 515 if ((m->object == object) && (m->pindex == pindex)) { 516 if (vm_page_bucket_generation != generation) 517 goto retry; 518 return (m); 519 } 520 } 521 if (vm_page_bucket_generation != generation) 522 goto retry; 523 return (NULL); 524 } 525 526 /* 527 * vm_page_rename: 528 * 529 * Move the given memory entry from its 530 * current object to the specified target object/offset. 531 * 532 * The object must be locked. 533 * This routine may not block. 534 * 535 * Note: this routine will raise itself to splvm(), the caller need not. 536 * 537 * Note: swap associated with the page must be invalidated by the move. We 538 * have to do this for several reasons: (1) we aren't freeing the 539 * page, (2) we are dirtying the page, (3) the VM system is probably 540 * moving the page from object A to B, and will then later move 541 * the backing store from A to B and we can't have a conflict. 542 * 543 * Note: we *always* dirty the page. It is necessary both for the 544 * fact that we moved it, and because we may be invalidating 545 * swap. If the page is on the cache, we have to deactivate it 546 * or vm_page_dirty() will panic. Dirty pages are not allowed 547 * on the cache. 548 */ 549 550 void 551 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 552 { 553 int s; 554 555 s = splvm(); 556 vm_page_remove(m); 557 vm_page_insert(m, new_object, new_pindex); 558 if (m->queue - m->pc == PQ_CACHE) 559 vm_page_deactivate(m); 560 vm_page_dirty(m); 561 splx(s); 562 } 563 564 /* 565 * vm_page_unqueue_nowakeup: 566 * 567 * vm_page_unqueue() without any wakeup 568 * 569 * This routine must be called at splhigh(). 570 * This routine may not block. 571 */ 572 573 void 574 vm_page_unqueue_nowakeup(vm_page_t m) 575 { 576 int queue = m->queue; 577 struct vpgqueues *pq; 578 if (queue != PQ_NONE) { 579 pq = &vm_page_queues[queue]; 580 m->queue = PQ_NONE; 581 TAILQ_REMOVE(&pq->pl, m, pageq); 582 (*pq->cnt)--; 583 pq->lcnt--; 584 } 585 } 586 587 /* 588 * vm_page_unqueue: 589 * 590 * Remove a page from its queue. 591 * 592 * This routine must be called at splhigh(). 593 * This routine may not block. 594 */ 595 596 void 597 vm_page_unqueue(vm_page_t m) 598 { 599 int queue = m->queue; 600 struct vpgqueues *pq; 601 if (queue != PQ_NONE) { 602 m->queue = PQ_NONE; 603 pq = &vm_page_queues[queue]; 604 TAILQ_REMOVE(&pq->pl, m, pageq); 605 (*pq->cnt)--; 606 pq->lcnt--; 607 if ((queue - m->pc) == PQ_CACHE) { 608 if (vm_paging_needed()) 609 pagedaemon_wakeup(); 610 } 611 } 612 } 613 614 #if PQ_L2_SIZE > 1 615 616 /* 617 * vm_page_list_find: 618 * 619 * Find a page on the specified queue with color optimization. 620 * 621 * The page coloring optimization attempts to locate a page 622 * that does not overload other nearby pages in the object in 623 * the cpu's L1 or L2 caches. We need this optimization because 624 * cpu caches tend to be physical caches, while object spaces tend 625 * to be virtual. 626 * 627 * This routine must be called at splvm(). 628 * This routine may not block. 629 * 630 * This routine may only be called from the vm_page_list_find() macro 631 * in vm_page.h 632 */ 633 vm_page_t 634 _vm_page_list_find(int basequeue, int index) 635 { 636 int i; 637 vm_page_t m = NULL; 638 struct vpgqueues *pq; 639 640 pq = &vm_page_queues[basequeue]; 641 642 /* 643 * Note that for the first loop, index+i and index-i wind up at the 644 * same place. Even though this is not totally optimal, we've already 645 * blown it by missing the cache case so we do not care. 646 */ 647 648 for(i = PQ_L2_SIZE / 2; i > 0; --i) { 649 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL) 650 break; 651 652 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL) 653 break; 654 } 655 return(m); 656 } 657 658 #endif 659 660 /* 661 * vm_page_select_cache: 662 * 663 * Find a page on the cache queue with color optimization. As pages 664 * might be found, but not applicable, they are deactivated. This 665 * keeps us from using potentially busy cached pages. 666 * 667 * This routine must be called at splvm(). 668 * This routine may not block. 669 */ 670 vm_page_t 671 vm_page_select_cache(vm_object_t object, vm_pindex_t pindex) 672 { 673 vm_page_t m; 674 675 while (TRUE) { 676 m = vm_page_list_find( 677 PQ_CACHE, 678 (pindex + object->pg_color) & PQ_L2_MASK, 679 FALSE 680 ); 681 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || 682 m->hold_count || m->wire_count)) { 683 vm_page_deactivate(m); 684 continue; 685 } 686 return m; 687 } 688 } 689 690 /* 691 * vm_page_select_free: 692 * 693 * Find a free or zero page, with specified preference. We attempt to 694 * inline the nominal case and fall back to _vm_page_select_free() 695 * otherwise. 696 * 697 * This routine must be called at splvm(). 698 * This routine may not block. 699 */ 700 701 static __inline vm_page_t 702 vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero) 703 { 704 vm_page_t m; 705 706 m = vm_page_list_find( 707 PQ_FREE, 708 (pindex + object->pg_color) & PQ_L2_MASK, 709 prefer_zero 710 ); 711 return(m); 712 } 713 714 /* 715 * vm_page_alloc: 716 * 717 * Allocate and return a memory cell associated 718 * with this VM object/offset pair. 719 * 720 * page_req classes: 721 * VM_ALLOC_NORMAL normal process request 722 * VM_ALLOC_SYSTEM system *really* needs a page 723 * VM_ALLOC_INTERRUPT interrupt time request 724 * VM_ALLOC_ZERO zero page 725 * 726 * Object must be locked. 727 * This routine may not block. 728 * 729 * Additional special handling is required when called from an 730 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with 731 * the page cache in this case. 732 */ 733 734 vm_page_t 735 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) 736 { 737 vm_page_t m = NULL; 738 int s; 739 740 KASSERT(!vm_page_lookup(object, pindex), 741 ("vm_page_alloc: page already allocated")); 742 743 /* 744 * The pager is allowed to eat deeper into the free page list. 745 */ 746 747 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { 748 page_req = VM_ALLOC_SYSTEM; 749 }; 750 751 s = splvm(); 752 753 loop: 754 if (cnt.v_free_count > cnt.v_free_reserved) { 755 /* 756 * Allocate from the free queue if there are plenty of pages 757 * in it. 758 */ 759 if (page_req == VM_ALLOC_ZERO) 760 m = vm_page_select_free(object, pindex, TRUE); 761 else 762 m = vm_page_select_free(object, pindex, FALSE); 763 } else if ( 764 (page_req == VM_ALLOC_SYSTEM && 765 cnt.v_cache_count == 0 && 766 cnt.v_free_count > cnt.v_interrupt_free_min) || 767 (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0) 768 ) { 769 /* 770 * Interrupt or system, dig deeper into the free list. 771 */ 772 m = vm_page_select_free(object, pindex, FALSE); 773 } else if (page_req != VM_ALLOC_INTERRUPT) { 774 /* 775 * Allocatable from cache (non-interrupt only). On success, 776 * we must free the page and try again, thus ensuring that 777 * cnt.v_*_free_min counters are replenished. 778 */ 779 m = vm_page_select_cache(object, pindex); 780 if (m == NULL) { 781 splx(s); 782 #if defined(DIAGNOSTIC) 783 if (cnt.v_cache_count > 0) 784 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); 785 #endif 786 vm_pageout_deficit++; 787 pagedaemon_wakeup(); 788 return (NULL); 789 } 790 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m)); 791 vm_page_busy(m); 792 vm_page_protect(m, VM_PROT_NONE); 793 vm_page_free(m); 794 goto loop; 795 } else { 796 /* 797 * Not allocatable from cache from interrupt, give up. 798 */ 799 splx(s); 800 vm_pageout_deficit++; 801 pagedaemon_wakeup(); 802 return (NULL); 803 } 804 805 /* 806 * At this point we had better have found a good page. 807 */ 808 809 KASSERT( 810 m != NULL, 811 ("vm_page_alloc(): missing page on free queue\n") 812 ); 813 814 /* 815 * Remove from free queue 816 */ 817 818 vm_page_unqueue_nowakeup(m); 819 820 /* 821 * Initialize structure. Only the PG_ZERO flag is inherited. 822 */ 823 824 if (m->flags & PG_ZERO) { 825 vm_page_zero_count--; 826 m->flags = PG_ZERO | PG_BUSY; 827 } else { 828 m->flags = PG_BUSY; 829 } 830 m->wire_count = 0; 831 m->hold_count = 0; 832 m->act_count = 0; 833 m->busy = 0; 834 m->valid = 0; 835 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m)); 836 837 /* 838 * vm_page_insert() is safe prior to the splx(). Note also that 839 * inserting a page here does not insert it into the pmap (which 840 * could cause us to block allocating memory). We cannot block 841 * anywhere. 842 */ 843 844 vm_page_insert(m, object, pindex); 845 846 /* 847 * Don't wakeup too often - wakeup the pageout daemon when 848 * we would be nearly out of memory. 849 */ 850 if (vm_paging_needed()) 851 pagedaemon_wakeup(); 852 853 splx(s); 854 855 return (m); 856 } 857 858 /* 859 * vm_wait: (also see VM_WAIT macro) 860 * 861 * Block until free pages are available for allocation 862 * - Called in various places before memory allocations. 863 */ 864 865 void 866 vm_wait(void) 867 { 868 int s; 869 870 s = splvm(); 871 if (curproc == pageproc) { 872 vm_pageout_pages_needed = 1; 873 tsleep(&vm_pageout_pages_needed, PSWP, "VMWait", 0); 874 } else { 875 if (!vm_pages_needed) { 876 vm_pages_needed = 1; 877 wakeup(&vm_pages_needed); 878 } 879 tsleep(&cnt.v_free_count, PVM, "vmwait", 0); 880 } 881 splx(s); 882 } 883 884 /* 885 * vm_waitpfault: (also see VM_WAITPFAULT macro) 886 * 887 * Block until free pages are available for allocation 888 * - Called only in vm_fault so that processes page faulting 889 * can be easily tracked. 890 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing 891 * processes will be able to grab memory first. Do not change 892 * this balance without careful testing first. 893 */ 894 895 void 896 vm_waitpfault(void) 897 { 898 int s; 899 900 s = splvm(); 901 if (!vm_pages_needed) { 902 vm_pages_needed = 1; 903 wakeup(&vm_pages_needed); 904 } 905 tsleep(&cnt.v_free_count, PUSER, "pfault", 0); 906 splx(s); 907 } 908 909 /* 910 * vm_await: (also see VM_AWAIT macro) 911 * 912 * asleep on an event that will signal when free pages are available 913 * for allocation. 914 */ 915 916 void 917 vm_await(void) 918 { 919 int s; 920 921 s = splvm(); 922 if (curproc == pageproc) { 923 vm_pageout_pages_needed = 1; 924 asleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0); 925 } else { 926 if (!vm_pages_needed) { 927 vm_pages_needed++; 928 wakeup(&vm_pages_needed); 929 } 930 asleep(&cnt.v_free_count, PVM, "vmwait", 0); 931 } 932 splx(s); 933 } 934 935 #if 0 936 /* 937 * vm_page_sleep: 938 * 939 * Block until page is no longer busy. 940 */ 941 942 int 943 vm_page_sleep(vm_page_t m, char *msg, char *busy) 944 { 945 int slept = 0; 946 if ((busy && *busy) || (m->flags & PG_BUSY)) { 947 int s; 948 s = splvm(); 949 if ((busy && *busy) || (m->flags & PG_BUSY)) { 950 vm_page_flag_set(m, PG_WANTED); 951 tsleep(m, PVM, msg, 0); 952 slept = 1; 953 } 954 splx(s); 955 } 956 return slept; 957 } 958 959 #endif 960 961 #if 0 962 963 /* 964 * vm_page_asleep: 965 * 966 * Similar to vm_page_sleep(), but does not block. Returns 0 if 967 * the page is not busy, or 1 if the page is busy. 968 * 969 * This routine has the side effect of calling asleep() if the page 970 * was busy (1 returned). 971 */ 972 973 int 974 vm_page_asleep(vm_page_t m, char *msg, char *busy) 975 { 976 int slept = 0; 977 if ((busy && *busy) || (m->flags & PG_BUSY)) { 978 int s; 979 s = splvm(); 980 if ((busy && *busy) || (m->flags & PG_BUSY)) { 981 vm_page_flag_set(m, PG_WANTED); 982 asleep(m, PVM, msg, 0); 983 slept = 1; 984 } 985 splx(s); 986 } 987 return slept; 988 } 989 990 #endif 991 992 /* 993 * vm_page_activate: 994 * 995 * Put the specified page on the active list (if appropriate). 996 * Ensure that act_count is at least ACT_INIT but do not otherwise 997 * mess with it. 998 * 999 * The page queues must be locked. 1000 * This routine may not block. 1001 */ 1002 void 1003 vm_page_activate(vm_page_t m) 1004 { 1005 int s; 1006 1007 s = splvm(); 1008 if (m->queue != PQ_ACTIVE) { 1009 if ((m->queue - m->pc) == PQ_CACHE) 1010 cnt.v_reactivated++; 1011 1012 vm_page_unqueue(m); 1013 1014 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1015 m->queue = PQ_ACTIVE; 1016 vm_page_queues[PQ_ACTIVE].lcnt++; 1017 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1018 if (m->act_count < ACT_INIT) 1019 m->act_count = ACT_INIT; 1020 cnt.v_active_count++; 1021 } 1022 } else { 1023 if (m->act_count < ACT_INIT) 1024 m->act_count = ACT_INIT; 1025 } 1026 1027 splx(s); 1028 } 1029 1030 /* 1031 * vm_page_free_wakeup: 1032 * 1033 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 1034 * routine is called when a page has been added to the cache or free 1035 * queues. 1036 * 1037 * This routine may not block. 1038 * This routine must be called at splvm() 1039 */ 1040 static __inline void 1041 vm_page_free_wakeup(void) 1042 { 1043 /* 1044 * if pageout daemon needs pages, then tell it that there are 1045 * some free. 1046 */ 1047 if (vm_pageout_pages_needed && 1048 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { 1049 wakeup(&vm_pageout_pages_needed); 1050 vm_pageout_pages_needed = 0; 1051 } 1052 /* 1053 * wakeup processes that are waiting on memory if we hit a 1054 * high water mark. And wakeup scheduler process if we have 1055 * lots of memory. this process will swapin processes. 1056 */ 1057 if (vm_pages_needed && !vm_page_count_min()) { 1058 vm_pages_needed = 0; 1059 wakeup(&cnt.v_free_count); 1060 } 1061 } 1062 1063 /* 1064 * vm_page_free_toq: 1065 * 1066 * Returns the given page to the PQ_FREE list, 1067 * disassociating it with any VM object. 1068 * 1069 * Object and page must be locked prior to entry. 1070 * This routine may not block. 1071 */ 1072 1073 void 1074 vm_page_free_toq(vm_page_t m) 1075 { 1076 int s; 1077 struct vpgqueues *pq; 1078 vm_object_t object = m->object; 1079 1080 s = splvm(); 1081 1082 cnt.v_tfree++; 1083 1084 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) { 1085 printf( 1086 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n", 1087 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, 1088 m->hold_count); 1089 if ((m->queue - m->pc) == PQ_FREE) 1090 panic("vm_page_free: freeing free page"); 1091 else 1092 panic("vm_page_free: freeing busy page"); 1093 } 1094 1095 /* 1096 * unqueue, then remove page. Note that we cannot destroy 1097 * the page here because we do not want to call the pager's 1098 * callback routine until after we've put the page on the 1099 * appropriate free queue. 1100 */ 1101 1102 vm_page_unqueue_nowakeup(m); 1103 vm_page_remove(m); 1104 1105 /* 1106 * If fictitious remove object association and 1107 * return, otherwise delay object association removal. 1108 */ 1109 1110 if ((m->flags & PG_FICTITIOUS) != 0) { 1111 splx(s); 1112 return; 1113 } 1114 1115 m->valid = 0; 1116 vm_page_undirty(m); 1117 1118 if (m->wire_count != 0) { 1119 if (m->wire_count > 1) { 1120 panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx", 1121 m->wire_count, (long)m->pindex); 1122 } 1123 panic("vm_page_free: freeing wired page\n"); 1124 } 1125 1126 /* 1127 * If we've exhausted the object's resident pages we want to free 1128 * it up. 1129 */ 1130 1131 if (object && 1132 (object->type == OBJT_VNODE) && 1133 ((object->flags & OBJ_DEAD) == 0) 1134 ) { 1135 struct vnode *vp = (struct vnode *)object->handle; 1136 1137 if (vp && VSHOULDFREE(vp)) 1138 vfree(vp); 1139 } 1140 1141 /* 1142 * Clear the UNMANAGED flag when freeing an unmanaged page. 1143 */ 1144 1145 if (m->flags & PG_UNMANAGED) { 1146 m->flags &= ~PG_UNMANAGED; 1147 } else { 1148 #ifdef __alpha__ 1149 pmap_page_is_free(m); 1150 #endif 1151 } 1152 1153 if (m->hold_count != 0) { 1154 m->flags &= ~PG_ZERO; 1155 m->queue = PQ_HOLD; 1156 } else 1157 m->queue = PQ_FREE + m->pc; 1158 pq = &vm_page_queues[m->queue]; 1159 pq->lcnt++; 1160 ++(*pq->cnt); 1161 1162 /* 1163 * Put zero'd pages on the end ( where we look for zero'd pages 1164 * first ) and non-zerod pages at the head. 1165 */ 1166 1167 if (m->flags & PG_ZERO) { 1168 TAILQ_INSERT_TAIL(&pq->pl, m, pageq); 1169 ++vm_page_zero_count; 1170 } else { 1171 TAILQ_INSERT_HEAD(&pq->pl, m, pageq); 1172 } 1173 1174 vm_page_free_wakeup(); 1175 1176 splx(s); 1177 } 1178 1179 /* 1180 * vm_page_unmanage: 1181 * 1182 * Prevent PV management from being done on the page. The page is 1183 * removed from the paging queues as if it were wired, and as a 1184 * consequence of no longer being managed the pageout daemon will not 1185 * touch it (since there is no way to locate the pte mappings for the 1186 * page). madvise() calls that mess with the pmap will also no longer 1187 * operate on the page. 1188 * 1189 * Beyond that the page is still reasonably 'normal'. Freeing the page 1190 * will clear the flag. 1191 * 1192 * This routine is used by OBJT_PHYS objects - objects using unswappable 1193 * physical memory as backing store rather then swap-backed memory and 1194 * will eventually be extended to support 4MB unmanaged physical 1195 * mappings. 1196 */ 1197 1198 void 1199 vm_page_unmanage(vm_page_t m) 1200 { 1201 int s; 1202 1203 s = splvm(); 1204 if ((m->flags & PG_UNMANAGED) == 0) { 1205 if (m->wire_count == 0) 1206 vm_page_unqueue(m); 1207 } 1208 vm_page_flag_set(m, PG_UNMANAGED); 1209 splx(s); 1210 } 1211 1212 /* 1213 * vm_page_wire: 1214 * 1215 * Mark this page as wired down by yet 1216 * another map, removing it from paging queues 1217 * as necessary. 1218 * 1219 * The page queues must be locked. 1220 * This routine may not block. 1221 */ 1222 void 1223 vm_page_wire(vm_page_t m) 1224 { 1225 int s; 1226 1227 /* 1228 * Only bump the wire statistics if the page is not already wired, 1229 * and only unqueue the page if it is on some queue (if it is unmanaged 1230 * it is already off the queues). 1231 */ 1232 s = splvm(); 1233 if (m->wire_count == 0) { 1234 if ((m->flags & PG_UNMANAGED) == 0) 1235 vm_page_unqueue(m); 1236 cnt.v_wire_count++; 1237 } 1238 m->wire_count++; 1239 KASSERT(m->wire_count != 0, 1240 ("vm_page_wire: wire_count overflow m=%p", m)); 1241 1242 splx(s); 1243 vm_page_flag_set(m, PG_MAPPED); 1244 } 1245 1246 /* 1247 * vm_page_unwire: 1248 * 1249 * Release one wiring of this page, potentially 1250 * enabling it to be paged again. 1251 * 1252 * Many pages placed on the inactive queue should actually go 1253 * into the cache, but it is difficult to figure out which. What 1254 * we do instead, if the inactive target is well met, is to put 1255 * clean pages at the head of the inactive queue instead of the tail. 1256 * This will cause them to be moved to the cache more quickly and 1257 * if not actively re-referenced, freed more quickly. If we just 1258 * stick these pages at the end of the inactive queue, heavy filesystem 1259 * meta-data accesses can cause an unnecessary paging load on memory bound 1260 * processes. This optimization causes one-time-use metadata to be 1261 * reused more quickly. 1262 * 1263 * BUT, if we are in a low-memory situation we have no choice but to 1264 * put clean pages on the cache queue. 1265 * 1266 * A number of routines use vm_page_unwire() to guarantee that the page 1267 * will go into either the inactive or active queues, and will NEVER 1268 * be placed in the cache - for example, just after dirtying a page. 1269 * dirty pages in the cache are not allowed. 1270 * 1271 * The page queues must be locked. 1272 * This routine may not block. 1273 */ 1274 void 1275 vm_page_unwire(vm_page_t m, int activate) 1276 { 1277 int s; 1278 1279 s = splvm(); 1280 1281 if (m->wire_count > 0) { 1282 m->wire_count--; 1283 if (m->wire_count == 0) { 1284 cnt.v_wire_count--; 1285 if (m->flags & PG_UNMANAGED) { 1286 ; 1287 } else if (activate) { 1288 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1289 m->queue = PQ_ACTIVE; 1290 vm_page_queues[PQ_ACTIVE].lcnt++; 1291 cnt.v_active_count++; 1292 } else { 1293 vm_page_flag_clear(m, PG_WINATCFLS); 1294 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1295 m->queue = PQ_INACTIVE; 1296 vm_page_queues[PQ_INACTIVE].lcnt++; 1297 cnt.v_inactive_count++; 1298 } 1299 } 1300 } else { 1301 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count); 1302 } 1303 splx(s); 1304 } 1305 1306 1307 /* 1308 * Move the specified page to the inactive queue. If the page has 1309 * any associated swap, the swap is deallocated. 1310 * 1311 * Normally athead is 0 resulting in LRU operation. athead is set 1312 * to 1 if we want this page to be 'as if it were placed in the cache', 1313 * except without unmapping it from the process address space. 1314 * 1315 * This routine may not block. 1316 */ 1317 static __inline void 1318 _vm_page_deactivate(vm_page_t m, int athead) 1319 { 1320 int s; 1321 1322 /* 1323 * Ignore if already inactive. 1324 */ 1325 if (m->queue == PQ_INACTIVE) 1326 return; 1327 1328 s = splvm(); 1329 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1330 if ((m->queue - m->pc) == PQ_CACHE) 1331 cnt.v_reactivated++; 1332 vm_page_flag_clear(m, PG_WINATCFLS); 1333 vm_page_unqueue(m); 1334 if (athead) 1335 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1336 else 1337 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1338 m->queue = PQ_INACTIVE; 1339 vm_page_queues[PQ_INACTIVE].lcnt++; 1340 cnt.v_inactive_count++; 1341 } 1342 splx(s); 1343 } 1344 1345 void 1346 vm_page_deactivate(vm_page_t m) 1347 { 1348 _vm_page_deactivate(m, 0); 1349 } 1350 1351 /* 1352 * vm_page_try_to_cache: 1353 * 1354 * Returns 0 on failure, 1 on success 1355 */ 1356 int 1357 vm_page_try_to_cache(vm_page_t m) 1358 { 1359 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1360 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1361 return(0); 1362 } 1363 vm_page_test_dirty(m); 1364 if (m->dirty) 1365 return(0); 1366 vm_page_cache(m); 1367 return(1); 1368 } 1369 1370 /* 1371 * vm_page_try_to_free() 1372 * 1373 * Attempt to free the page. If we cannot free it, we do nothing. 1374 * 1 is returned on success, 0 on failure. 1375 */ 1376 1377 int 1378 vm_page_try_to_free(vm_page_t m) 1379 { 1380 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1381 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1382 return(0); 1383 } 1384 vm_page_test_dirty(m); 1385 if (m->dirty) 1386 return(0); 1387 vm_page_busy(m); 1388 vm_page_protect(m, VM_PROT_NONE); 1389 vm_page_free(m); 1390 return(1); 1391 } 1392 1393 1394 /* 1395 * vm_page_cache 1396 * 1397 * Put the specified page onto the page cache queue (if appropriate). 1398 * 1399 * This routine may not block. 1400 */ 1401 void 1402 vm_page_cache(vm_page_t m) 1403 { 1404 int s; 1405 1406 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) { 1407 printf("vm_page_cache: attempting to cache busy page\n"); 1408 return; 1409 } 1410 if ((m->queue - m->pc) == PQ_CACHE) 1411 return; 1412 1413 /* 1414 * Remove all pmaps and indicate that the page is not 1415 * writeable or mapped. 1416 */ 1417 1418 vm_page_protect(m, VM_PROT_NONE); 1419 if (m->dirty != 0) { 1420 panic("vm_page_cache: caching a dirty page, pindex: %ld", 1421 (long)m->pindex); 1422 } 1423 s = splvm(); 1424 vm_page_unqueue_nowakeup(m); 1425 m->queue = PQ_CACHE + m->pc; 1426 vm_page_queues[m->queue].lcnt++; 1427 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq); 1428 cnt.v_cache_count++; 1429 vm_page_free_wakeup(); 1430 splx(s); 1431 } 1432 1433 /* 1434 * vm_page_dontneed 1435 * 1436 * Cache, deactivate, or do nothing as appropriate. This routine 1437 * is typically used by madvise() MADV_DONTNEED. 1438 * 1439 * Generally speaking we want to move the page into the cache so 1440 * it gets reused quickly. However, this can result in a silly syndrome 1441 * due to the page recycling too quickly. Small objects will not be 1442 * fully cached. On the otherhand, if we move the page to the inactive 1443 * queue we wind up with a problem whereby very large objects 1444 * unnecessarily blow away our inactive and cache queues. 1445 * 1446 * The solution is to move the pages based on a fixed weighting. We 1447 * either leave them alone, deactivate them, or move them to the cache, 1448 * where moving them to the cache has the highest weighting. 1449 * By forcing some pages into other queues we eventually force the 1450 * system to balance the queues, potentially recovering other unrelated 1451 * space from active. The idea is to not force this to happen too 1452 * often. 1453 */ 1454 1455 void 1456 vm_page_dontneed(vm_page_t m) 1457 { 1458 static int dnweight; 1459 int dnw; 1460 int head; 1461 1462 dnw = ++dnweight; 1463 1464 /* 1465 * occassionally leave the page alone 1466 */ 1467 1468 if ((dnw & 0x01F0) == 0 || 1469 m->queue == PQ_INACTIVE || 1470 m->queue - m->pc == PQ_CACHE 1471 ) { 1472 if (m->act_count >= ACT_INIT) 1473 --m->act_count; 1474 return; 1475 } 1476 1477 if (m->dirty == 0) 1478 vm_page_test_dirty(m); 1479 1480 if (m->dirty || (dnw & 0x0070) == 0) { 1481 /* 1482 * Deactivate the page 3 times out of 32. 1483 */ 1484 head = 0; 1485 } else { 1486 /* 1487 * Cache the page 28 times out of every 32. Note that 1488 * the page is deactivated instead of cached, but placed 1489 * at the head of the queue instead of the tail. 1490 */ 1491 head = 1; 1492 } 1493 _vm_page_deactivate(m, head); 1494 } 1495 1496 /* 1497 * Grab a page, waiting until we are waken up due to the page 1498 * changing state. We keep on waiting, if the page continues 1499 * to be in the object. If the page doesn't exist, allocate it. 1500 * 1501 * This routine may block. 1502 */ 1503 vm_page_t 1504 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 1505 { 1506 1507 vm_page_t m; 1508 int s, generation; 1509 1510 retrylookup: 1511 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1512 if (m->busy || (m->flags & PG_BUSY)) { 1513 generation = object->generation; 1514 1515 s = splvm(); 1516 while ((object->generation == generation) && 1517 (m->busy || (m->flags & PG_BUSY))) { 1518 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1519 tsleep(m, PVM, "pgrbwt", 0); 1520 if ((allocflags & VM_ALLOC_RETRY) == 0) { 1521 splx(s); 1522 return NULL; 1523 } 1524 } 1525 splx(s); 1526 goto retrylookup; 1527 } else { 1528 vm_page_busy(m); 1529 return m; 1530 } 1531 } 1532 1533 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1534 if (m == NULL) { 1535 VM_WAIT; 1536 if ((allocflags & VM_ALLOC_RETRY) == 0) 1537 return NULL; 1538 goto retrylookup; 1539 } 1540 1541 return m; 1542 } 1543 1544 /* 1545 * Mapping function for valid bits or for dirty bits in 1546 * a page. May not block. 1547 * 1548 * Inputs are required to range within a page. 1549 */ 1550 1551 __inline int 1552 vm_page_bits(int base, int size) 1553 { 1554 int first_bit; 1555 int last_bit; 1556 1557 KASSERT( 1558 base + size <= PAGE_SIZE, 1559 ("vm_page_bits: illegal base/size %d/%d", base, size) 1560 ); 1561 1562 if (size == 0) /* handle degenerate case */ 1563 return(0); 1564 1565 first_bit = base >> DEV_BSHIFT; 1566 last_bit = (base + size - 1) >> DEV_BSHIFT; 1567 1568 return ((2 << last_bit) - (1 << first_bit)); 1569 } 1570 1571 /* 1572 * vm_page_set_validclean: 1573 * 1574 * Sets portions of a page valid and clean. The arguments are expected 1575 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 1576 * of any partial chunks touched by the range. The invalid portion of 1577 * such chunks will be zero'd. 1578 * 1579 * This routine may not block. 1580 * 1581 * (base + size) must be less then or equal to PAGE_SIZE. 1582 */ 1583 void 1584 vm_page_set_validclean(vm_page_t m, int base, int size) 1585 { 1586 int pagebits; 1587 int frag; 1588 int endoff; 1589 1590 if (size == 0) /* handle degenerate case */ 1591 return; 1592 1593 /* 1594 * If the base is not DEV_BSIZE aligned and the valid 1595 * bit is clear, we have to zero out a portion of the 1596 * first block. 1597 */ 1598 1599 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 1600 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0 1601 ) { 1602 pmap_zero_page_area( 1603 VM_PAGE_TO_PHYS(m), 1604 frag, 1605 base - frag 1606 ); 1607 } 1608 1609 /* 1610 * If the ending offset is not DEV_BSIZE aligned and the 1611 * valid bit is clear, we have to zero out a portion of 1612 * the last block. 1613 */ 1614 1615 endoff = base + size; 1616 1617 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 1618 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0 1619 ) { 1620 pmap_zero_page_area( 1621 VM_PAGE_TO_PHYS(m), 1622 endoff, 1623 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)) 1624 ); 1625 } 1626 1627 /* 1628 * Set valid, clear dirty bits. If validating the entire 1629 * page we can safely clear the pmap modify bit. We also 1630 * use this opportunity to clear the PG_NOSYNC flag. If a process 1631 * takes a write fault on a MAP_NOSYNC memory area the flag will 1632 * be set again. 1633 * 1634 * We set valid bits inclusive of any overlap, but we can only 1635 * clear dirty bits for DEV_BSIZE chunks that are fully within 1636 * the range. 1637 */ 1638 1639 pagebits = vm_page_bits(base, size); 1640 m->valid |= pagebits; 1641 #if 0 /* NOT YET */ 1642 if ((frag = base & (DEV_BSIZE - 1)) != 0) { 1643 frag = DEV_BSIZE - frag; 1644 base += frag; 1645 size -= frag; 1646 if (size < 0) 1647 size = 0; 1648 } 1649 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); 1650 #endif 1651 m->dirty &= ~pagebits; 1652 if (base == 0 && size == PAGE_SIZE) { 1653 pmap_clear_modify(m); 1654 vm_page_flag_clear(m, PG_NOSYNC); 1655 } 1656 } 1657 1658 #if 0 1659 1660 void 1661 vm_page_set_dirty(vm_page_t m, int base, int size) 1662 { 1663 m->dirty |= vm_page_bits(base, size); 1664 } 1665 1666 #endif 1667 1668 void 1669 vm_page_clear_dirty(vm_page_t m, int base, int size) 1670 { 1671 m->dirty &= ~vm_page_bits(base, size); 1672 } 1673 1674 /* 1675 * vm_page_set_invalid: 1676 * 1677 * Invalidates DEV_BSIZE'd chunks within a page. Both the 1678 * valid and dirty bits for the effected areas are cleared. 1679 * 1680 * May not block. 1681 */ 1682 void 1683 vm_page_set_invalid(vm_page_t m, int base, int size) 1684 { 1685 int bits; 1686 1687 bits = vm_page_bits(base, size); 1688 m->valid &= ~bits; 1689 m->dirty &= ~bits; 1690 m->object->generation++; 1691 } 1692 1693 /* 1694 * vm_page_zero_invalid() 1695 * 1696 * The kernel assumes that the invalid portions of a page contain 1697 * garbage, but such pages can be mapped into memory by user code. 1698 * When this occurs, we must zero out the non-valid portions of the 1699 * page so user code sees what it expects. 1700 * 1701 * Pages are most often semi-valid when the end of a file is mapped 1702 * into memory and the file's size is not page aligned. 1703 */ 1704 1705 void 1706 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 1707 { 1708 int b; 1709 int i; 1710 1711 /* 1712 * Scan the valid bits looking for invalid sections that 1713 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 1714 * valid bit may be set ) have already been zerod by 1715 * vm_page_set_validclean(). 1716 */ 1717 1718 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 1719 if (i == (PAGE_SIZE / DEV_BSIZE) || 1720 (m->valid & (1 << i)) 1721 ) { 1722 if (i > b) { 1723 pmap_zero_page_area( 1724 VM_PAGE_TO_PHYS(m), 1725 b << DEV_BSHIFT, 1726 (i - b) << DEV_BSHIFT 1727 ); 1728 } 1729 b = i + 1; 1730 } 1731 } 1732 1733 /* 1734 * setvalid is TRUE when we can safely set the zero'd areas 1735 * as being valid. We can do this if there are no cache consistency 1736 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 1737 */ 1738 1739 if (setvalid) 1740 m->valid = VM_PAGE_BITS_ALL; 1741 } 1742 1743 /* 1744 * vm_page_is_valid: 1745 * 1746 * Is (partial) page valid? Note that the case where size == 0 1747 * will return FALSE in the degenerate case where the page is 1748 * entirely invalid, and TRUE otherwise. 1749 * 1750 * May not block. 1751 */ 1752 1753 int 1754 vm_page_is_valid(vm_page_t m, int base, int size) 1755 { 1756 int bits = vm_page_bits(base, size); 1757 1758 if (m->valid && ((m->valid & bits) == bits)) 1759 return 1; 1760 else 1761 return 0; 1762 } 1763 1764 /* 1765 * update dirty bits from pmap/mmu. May not block. 1766 */ 1767 1768 void 1769 vm_page_test_dirty(vm_page_t m) 1770 { 1771 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { 1772 vm_page_dirty(m); 1773 } 1774 } 1775 1776 /* 1777 * This interface is for merging with malloc() someday. 1778 * Even if we never implement compaction so that contiguous allocation 1779 * works after initialization time, malloc()'s data structures are good 1780 * for statistics and for allocations of less than a page. 1781 */ 1782 void * 1783 contigmalloc1( 1784 unsigned long size, /* should be size_t here and for malloc() */ 1785 struct malloc_type *type, 1786 int flags, 1787 unsigned long low, 1788 unsigned long high, 1789 unsigned long alignment, 1790 unsigned long boundary, 1791 vm_map_t map) 1792 { 1793 int i, s, start; 1794 vm_offset_t addr, phys, tmp_addr; 1795 int pass; 1796 vm_page_t pga = vm_page_array; 1797 1798 size = round_page(size); 1799 if (size == 0) 1800 panic("contigmalloc1: size must not be 0"); 1801 if ((alignment & (alignment - 1)) != 0) 1802 panic("contigmalloc1: alignment must be a power of 2"); 1803 if ((boundary & (boundary - 1)) != 0) 1804 panic("contigmalloc1: boundary must be a power of 2"); 1805 1806 start = 0; 1807 for (pass = 0; pass <= 1; pass++) { 1808 s = splvm(); 1809 again: 1810 /* 1811 * Find first page in array that is free, within range, aligned, and 1812 * such that the boundary won't be crossed. 1813 */ 1814 for (i = start; i < cnt.v_page_count; i++) { 1815 int pqtype; 1816 phys = VM_PAGE_TO_PHYS(&pga[i]); 1817 pqtype = pga[i].queue - pga[i].pc; 1818 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && 1819 (phys >= low) && (phys < high) && 1820 ((phys & (alignment - 1)) == 0) && 1821 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) 1822 break; 1823 } 1824 1825 /* 1826 * If the above failed or we will exceed the upper bound, fail. 1827 */ 1828 if ((i == cnt.v_page_count) || 1829 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { 1830 vm_page_t m, next; 1831 1832 again1: 1833 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 1834 m != NULL; 1835 m = next) { 1836 1837 KASSERT(m->queue == PQ_INACTIVE, 1838 ("contigmalloc1: page %p is not PQ_INACTIVE", m)); 1839 1840 next = TAILQ_NEXT(m, pageq); 1841 if (vm_page_sleep_busy(m, TRUE, "vpctw0")) 1842 goto again1; 1843 vm_page_test_dirty(m); 1844 if (m->dirty) { 1845 if (m->object->type == OBJT_VNODE) { 1846 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1847 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); 1848 VOP_UNLOCK(m->object->handle, 0, curproc); 1849 goto again1; 1850 } else if (m->object->type == OBJT_SWAP || 1851 m->object->type == OBJT_DEFAULT) { 1852 vm_pageout_flush(&m, 1, 0); 1853 goto again1; 1854 } 1855 } 1856 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) 1857 vm_page_cache(m); 1858 } 1859 1860 for (m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1861 m != NULL; 1862 m = next) { 1863 1864 KASSERT(m->queue == PQ_ACTIVE, 1865 ("contigmalloc1: page %p is not PQ_ACTIVE", m)); 1866 1867 next = TAILQ_NEXT(m, pageq); 1868 if (vm_page_sleep_busy(m, TRUE, "vpctw1")) 1869 goto again1; 1870 vm_page_test_dirty(m); 1871 if (m->dirty) { 1872 if (m->object->type == OBJT_VNODE) { 1873 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); 1874 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); 1875 VOP_UNLOCK(m->object->handle, 0, curproc); 1876 goto again1; 1877 } else if (m->object->type == OBJT_SWAP || 1878 m->object->type == OBJT_DEFAULT) { 1879 vm_pageout_flush(&m, 1, 0); 1880 goto again1; 1881 } 1882 } 1883 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) 1884 vm_page_cache(m); 1885 } 1886 1887 splx(s); 1888 continue; 1889 } 1890 start = i; 1891 1892 /* 1893 * Check successive pages for contiguous and free. 1894 */ 1895 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { 1896 int pqtype; 1897 pqtype = pga[i].queue - pga[i].pc; 1898 if ((VM_PAGE_TO_PHYS(&pga[i]) != 1899 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || 1900 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) { 1901 start++; 1902 goto again; 1903 } 1904 } 1905 1906 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1907 int pqtype; 1908 vm_page_t m = &pga[i]; 1909 1910 pqtype = m->queue - m->pc; 1911 if (pqtype == PQ_CACHE) { 1912 vm_page_busy(m); 1913 vm_page_free(m); 1914 } 1915 vm_page_unqueue_nowakeup(m); 1916 m->valid = VM_PAGE_BITS_ALL; 1917 if (m->flags & PG_ZERO) 1918 vm_page_zero_count--; 1919 m->flags = 0; 1920 KASSERT(m->dirty == 0, ("contigmalloc1: page %p was dirty", m)); 1921 m->wire_count = 0; 1922 m->busy = 0; 1923 m->object = NULL; 1924 } 1925 1926 /* 1927 * We've found a contiguous chunk that meets are requirements. 1928 * Allocate kernel VM, unfree and assign the physical pages to it and 1929 * return kernel VM pointer. 1930 */ 1931 vm_map_lock(map); 1932 if (vm_map_findspace(map, vm_map_min(map), size, &addr) != 1933 KERN_SUCCESS) { 1934 /* 1935 * XXX We almost never run out of kernel virtual 1936 * space, so we don't make the allocated memory 1937 * above available. 1938 */ 1939 vm_map_unlock(map); 1940 splx(s); 1941 return (NULL); 1942 } 1943 vm_object_reference(kernel_object); 1944 vm_map_insert(map, kernel_object, addr - VM_MIN_KERNEL_ADDRESS, 1945 addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 1946 vm_map_unlock(map); 1947 1948 tmp_addr = addr; 1949 for (i = start; i < (start + size / PAGE_SIZE); i++) { 1950 vm_page_t m = &pga[i]; 1951 vm_page_insert(m, kernel_object, 1952 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); 1953 tmp_addr += PAGE_SIZE; 1954 } 1955 vm_map_pageable(map, addr, addr + size, FALSE); 1956 1957 splx(s); 1958 return ((void *)addr); 1959 } 1960 return NULL; 1961 } 1962 1963 void * 1964 contigmalloc( 1965 unsigned long size, /* should be size_t here and for malloc() */ 1966 struct malloc_type *type, 1967 int flags, 1968 unsigned long low, 1969 unsigned long high, 1970 unsigned long alignment, 1971 unsigned long boundary) 1972 { 1973 return contigmalloc1(size, type, flags, low, high, alignment, boundary, 1974 kernel_map); 1975 } 1976 1977 void 1978 contigfree(void *addr, unsigned long size, struct malloc_type *type) 1979 { 1980 kmem_free(kernel_map, (vm_offset_t)addr, size); 1981 } 1982 1983 vm_offset_t 1984 vm_page_alloc_contig( 1985 vm_offset_t size, 1986 vm_offset_t low, 1987 vm_offset_t high, 1988 vm_offset_t alignment) 1989 { 1990 return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high, 1991 alignment, 0ul, kernel_map)); 1992 } 1993 1994 #include "opt_ddb.h" 1995 #ifdef DDB 1996 #include <sys/kernel.h> 1997 1998 #include <ddb/ddb.h> 1999 2000 DB_SHOW_COMMAND(page, vm_page_print_page_info) 2001 { 2002 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); 2003 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); 2004 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); 2005 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); 2006 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); 2007 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); 2008 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); 2009 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); 2010 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); 2011 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); 2012 } 2013 2014 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 2015 { 2016 int i; 2017 db_printf("PQ_FREE:"); 2018 for(i=0;i<PQ_L2_SIZE;i++) { 2019 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt); 2020 } 2021 db_printf("\n"); 2022 2023 db_printf("PQ_CACHE:"); 2024 for(i=0;i<PQ_L2_SIZE;i++) { 2025 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt); 2026 } 2027 db_printf("\n"); 2028 2029 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 2030 vm_page_queues[PQ_ACTIVE].lcnt, 2031 vm_page_queues[PQ_INACTIVE].lcnt); 2032 } 2033 #endif /* DDB */ 2034