1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $ 38 * $DragonFly: src/sys/vm/vm_page.c,v 1.40 2008/08/25 17:01:42 dillon Exp $ 39 */ 40 41 /* 42 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 43 * All rights reserved. 44 * 45 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 46 * 47 * Permission to use, copy, modify and distribute this software and 48 * its documentation is hereby granted, provided that both the copyright 49 * notice and this permission notice appear in all copies of the 50 * software, derivative works or modified versions, and any portions 51 * thereof, and that both notices appear in supporting documentation. 52 * 53 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 54 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 55 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 56 * 57 * Carnegie Mellon requests users of this software to return to 58 * 59 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 60 * School of Computer Science 61 * Carnegie Mellon University 62 * Pittsburgh PA 15213-3890 63 * 64 * any improvements or extensions that they make and grant Carnegie the 65 * rights to redistribute these changes. 66 */ 67 /* 68 * Resident memory management module. The module manipulates 'VM pages'. 69 * A VM page is the core building block for memory management. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/malloc.h> 75 #include <sys/proc.h> 76 #include <sys/vmmeter.h> 77 #include <sys/vnode.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <sys/lock.h> 82 #include <vm/vm_kern.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pageout.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_page2.h> 91 92 static void vm_page_queue_init(void); 93 static void vm_page_free_wakeup(void); 94 static vm_page_t vm_page_select_cache(vm_object_t, vm_pindex_t); 95 static vm_page_t _vm_page_list_find2(int basequeue, int index); 96 97 struct vpgqueues vm_page_queues[PQ_COUNT]; /* Array of tailq lists */ 98 99 #define ASSERT_IN_CRIT_SECTION() KKASSERT(crit_test(curthread)); 100 101 RB_GENERATE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, 102 vm_pindex_t, pindex); 103 104 static void 105 vm_page_queue_init(void) 106 { 107 int i; 108 109 for (i = 0; i < PQ_L2_SIZE; i++) 110 vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count; 111 for (i = 0; i < PQ_L2_SIZE; i++) 112 vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count; 113 114 vm_page_queues[PQ_INACTIVE].cnt = &vmstats.v_inactive_count; 115 vm_page_queues[PQ_ACTIVE].cnt = &vmstats.v_active_count; 116 vm_page_queues[PQ_HOLD].cnt = &vmstats.v_active_count; 117 /* PQ_NONE has no queue */ 118 119 for (i = 0; i < PQ_COUNT; i++) 120 TAILQ_INIT(&vm_page_queues[i].pl); 121 } 122 123 /* 124 * note: place in initialized data section? Is this necessary? 125 */ 126 long first_page = 0; 127 int vm_page_array_size = 0; 128 int vm_page_zero_count = 0; 129 vm_page_t vm_page_array = 0; 130 131 /* 132 * (low level boot) 133 * 134 * Sets the page size, perhaps based upon the memory size. 135 * Must be called before any use of page-size dependent functions. 136 */ 137 void 138 vm_set_page_size(void) 139 { 140 if (vmstats.v_page_size == 0) 141 vmstats.v_page_size = PAGE_SIZE; 142 if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0) 143 panic("vm_set_page_size: page size not a power of two"); 144 } 145 146 /* 147 * (low level boot) 148 * 149 * Add a new page to the freelist for use by the system. New pages 150 * are added to both the head and tail of the associated free page 151 * queue in a bottom-up fashion, so both zero'd and non-zero'd page 152 * requests pull 'recent' adds (higher physical addresses) first. 153 * 154 * Must be called in a critical section. 155 */ 156 vm_page_t 157 vm_add_new_page(vm_paddr_t pa) 158 { 159 struct vpgqueues *vpq; 160 vm_page_t m; 161 162 ++vmstats.v_page_count; 163 ++vmstats.v_free_count; 164 m = PHYS_TO_VM_PAGE(pa); 165 m->phys_addr = pa; 166 m->flags = 0; 167 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; 168 m->queue = m->pc + PQ_FREE; 169 KKASSERT(m->dirty == 0); 170 171 vpq = &vm_page_queues[m->queue]; 172 if (vpq->flipflop) 173 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); 174 else 175 TAILQ_INSERT_HEAD(&vpq->pl, m, pageq); 176 vpq->flipflop = 1 - vpq->flipflop; 177 178 vm_page_queues[m->queue].lcnt++; 179 return (m); 180 } 181 182 /* 183 * (low level boot) 184 * 185 * Initializes the resident memory module. 186 * 187 * Allocates memory for the page cells, and for the object/offset-to-page 188 * hash table headers. Each page cell is initialized and placed on the 189 * free list. 190 * 191 * starta/enda represents the range of physical memory addresses available 192 * for use (skipping memory already used by the kernel), subject to 193 * phys_avail[]. Note that phys_avail[] has already mapped out memory 194 * already in use by the kernel. 195 */ 196 vm_offset_t 197 vm_page_startup(vm_offset_t vaddr) 198 { 199 vm_offset_t mapped; 200 vm_size_t npages; 201 vm_paddr_t page_range; 202 vm_paddr_t new_end; 203 int i; 204 vm_paddr_t pa; 205 int nblocks; 206 vm_paddr_t last_pa; 207 vm_paddr_t end; 208 vm_paddr_t biggestone, biggestsize; 209 vm_paddr_t total; 210 211 total = 0; 212 biggestsize = 0; 213 biggestone = 0; 214 nblocks = 0; 215 vaddr = round_page(vaddr); 216 217 for (i = 0; phys_avail[i + 1]; i += 2) { 218 phys_avail[i] = round_page(phys_avail[i]); 219 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 220 } 221 222 for (i = 0; phys_avail[i + 1]; i += 2) { 223 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; 224 225 if (size > biggestsize) { 226 biggestone = i; 227 biggestsize = size; 228 } 229 ++nblocks; 230 total += size; 231 } 232 233 end = phys_avail[biggestone+1]; 234 end = trunc_page(end); 235 236 /* 237 * Initialize the queue headers for the free queue, the active queue 238 * and the inactive queue. 239 */ 240 241 vm_page_queue_init(); 242 243 /* 244 * Compute the number of pages of memory that will be available for 245 * use (taking into account the overhead of a page structure per 246 * page). 247 */ 248 first_page = phys_avail[0] / PAGE_SIZE; 249 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page; 250 npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE; 251 252 /* 253 * Initialize the mem entry structures now, and put them in the free 254 * queue. 255 */ 256 vm_page_array = (vm_page_t) vaddr; 257 mapped = vaddr; 258 259 /* 260 * Validate these addresses. 261 */ 262 new_end = trunc_page(end - page_range * sizeof(struct vm_page)); 263 mapped = pmap_map(mapped, new_end, end, 264 VM_PROT_READ | VM_PROT_WRITE); 265 #ifdef __amd64__ 266 /* pmap_map() returns an address in the DMAP region */ 267 vm_page_array = (vm_page_t) mapped; 268 mapped = vaddr; 269 #endif 270 271 /* 272 * Clear all of the page structures 273 */ 274 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); 275 vm_page_array_size = page_range; 276 277 /* 278 * Construct the free queue(s) in ascending order (by physical 279 * address) so that the first 16MB of physical memory is allocated 280 * last rather than first. On large-memory machines, this avoids 281 * the exhaustion of low physical memory before isa_dmainit has run. 282 */ 283 vmstats.v_page_count = 0; 284 vmstats.v_free_count = 0; 285 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { 286 pa = phys_avail[i]; 287 if (i == biggestone) 288 last_pa = new_end; 289 else 290 last_pa = phys_avail[i + 1]; 291 while (pa < last_pa && npages-- > 0) { 292 vm_add_new_page(pa); 293 pa += PAGE_SIZE; 294 } 295 } 296 return (mapped); 297 } 298 299 /* 300 * Scan comparison function for Red-Black tree scans. An inclusive 301 * (start,end) is expected. Other fields are not used. 302 */ 303 int 304 rb_vm_page_scancmp(struct vm_page *p, void *data) 305 { 306 struct rb_vm_page_scan_info *info = data; 307 308 if (p->pindex < info->start_pindex) 309 return(-1); 310 if (p->pindex > info->end_pindex) 311 return(1); 312 return(0); 313 } 314 315 int 316 rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2) 317 { 318 if (p1->pindex < p2->pindex) 319 return(-1); 320 if (p1->pindex > p2->pindex) 321 return(1); 322 return(0); 323 } 324 325 /* 326 * The opposite of vm_page_hold(). A page can be freed while being held, 327 * which places it on the PQ_HOLD queue. We must call vm_page_free_toq() 328 * in this case to actually free it once the hold count drops to 0. 329 * 330 * This routine must be called at splvm(). 331 */ 332 void 333 vm_page_unhold(vm_page_t mem) 334 { 335 --mem->hold_count; 336 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); 337 if (mem->hold_count == 0 && mem->queue == PQ_HOLD) { 338 vm_page_busy(mem); 339 vm_page_free_toq(mem); 340 } 341 } 342 343 /* 344 * Inserts the given mem entry into the object and object list. 345 * 346 * The pagetables are not updated but will presumably fault the page 347 * in if necessary, or if a kernel page the caller will at some point 348 * enter the page into the kernel's pmap. We are not allowed to block 349 * here so we *can't* do this anyway. 350 * 351 * This routine may not block. 352 * This routine must be called with a critical section held. 353 */ 354 void 355 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) 356 { 357 ASSERT_IN_CRIT_SECTION(); 358 if (m->object != NULL) 359 panic("vm_page_insert: already inserted"); 360 361 /* 362 * Record the object/offset pair in this page 363 */ 364 m->object = object; 365 m->pindex = pindex; 366 367 /* 368 * Insert it into the object. 369 */ 370 vm_page_rb_tree_RB_INSERT(&object->rb_memq, m); 371 object->generation++; 372 373 /* 374 * show that the object has one more resident page. 375 */ 376 object->resident_page_count++; 377 378 /* 379 * Since we are inserting a new and possibly dirty page, 380 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags. 381 */ 382 if ((m->valid & m->dirty) || (m->flags & PG_WRITEABLE)) 383 vm_object_set_writeable_dirty(object); 384 } 385 386 /* 387 * Removes the given vm_page_t from the global (object,index) hash table 388 * and from the object's memq. 389 * 390 * The underlying pmap entry (if any) is NOT removed here. 391 * This routine may not block. 392 * 393 * The page must be BUSY and will remain BUSY on return. No spl needs to be 394 * held on call to this routine. 395 * 396 * note: FreeBSD side effect was to unbusy the page on return. We leave 397 * it busy. 398 */ 399 void 400 vm_page_remove(vm_page_t m) 401 { 402 vm_object_t object; 403 404 crit_enter(); 405 if (m->object == NULL) { 406 crit_exit(); 407 return; 408 } 409 410 if ((m->flags & PG_BUSY) == 0) 411 panic("vm_page_remove: page not busy"); 412 413 object = m->object; 414 415 /* 416 * Remove the page from the object and update the object. 417 */ 418 vm_page_rb_tree_RB_REMOVE(&object->rb_memq, m); 419 object->resident_page_count--; 420 object->generation++; 421 m->object = NULL; 422 423 crit_exit(); 424 } 425 426 /* 427 * Locate and return the page at (object, pindex), or NULL if the 428 * page could not be found. 429 * 430 * This routine will operate properly without spl protection, but 431 * the returned page could be in flux if it is busy. Because an 432 * interrupt can race a caller's busy check (unbusying and freeing the 433 * page we return before the caller is able to check the busy bit), 434 * the caller should generally call this routine with a critical 435 * section held. 436 * 437 * Callers may call this routine without spl protection if they know 438 * 'for sure' that the page will not be ripped out from under them 439 * by an interrupt. 440 */ 441 vm_page_t 442 vm_page_lookup(vm_object_t object, vm_pindex_t pindex) 443 { 444 vm_page_t m; 445 446 /* 447 * Search the hash table for this object/offset pair 448 */ 449 crit_enter(); 450 m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex); 451 crit_exit(); 452 KKASSERT(m == NULL || (m->object == object && m->pindex == pindex)); 453 return(m); 454 } 455 456 /* 457 * vm_page_rename() 458 * 459 * Move the given memory entry from its current object to the specified 460 * target object/offset. 461 * 462 * The object must be locked. 463 * This routine may not block. 464 * 465 * Note: This routine will raise itself to splvm(), the caller need not. 466 * 467 * Note: Swap associated with the page must be invalidated by the move. We 468 * have to do this for several reasons: (1) we aren't freeing the 469 * page, (2) we are dirtying the page, (3) the VM system is probably 470 * moving the page from object A to B, and will then later move 471 * the backing store from A to B and we can't have a conflict. 472 * 473 * Note: We *always* dirty the page. It is necessary both for the 474 * fact that we moved it, and because we may be invalidating 475 * swap. If the page is on the cache, we have to deactivate it 476 * or vm_page_dirty() will panic. Dirty pages are not allowed 477 * on the cache. 478 */ 479 void 480 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) 481 { 482 crit_enter(); 483 vm_page_remove(m); 484 vm_page_insert(m, new_object, new_pindex); 485 if (m->queue - m->pc == PQ_CACHE) 486 vm_page_deactivate(m); 487 vm_page_dirty(m); 488 vm_page_wakeup(m); 489 crit_exit(); 490 } 491 492 /* 493 * vm_page_unqueue() without any wakeup. This routine is used when a page 494 * is being moved between queues or otherwise is to remain BUSYied by the 495 * caller. 496 * 497 * This routine must be called at splhigh(). 498 * This routine may not block. 499 */ 500 void 501 vm_page_unqueue_nowakeup(vm_page_t m) 502 { 503 int queue = m->queue; 504 struct vpgqueues *pq; 505 506 if (queue != PQ_NONE) { 507 pq = &vm_page_queues[queue]; 508 m->queue = PQ_NONE; 509 TAILQ_REMOVE(&pq->pl, m, pageq); 510 (*pq->cnt)--; 511 pq->lcnt--; 512 } 513 } 514 515 /* 516 * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon 517 * if necessary. 518 * 519 * This routine must be called at splhigh(). 520 * This routine may not block. 521 */ 522 void 523 vm_page_unqueue(vm_page_t m) 524 { 525 int queue = m->queue; 526 struct vpgqueues *pq; 527 528 if (queue != PQ_NONE) { 529 m->queue = PQ_NONE; 530 pq = &vm_page_queues[queue]; 531 TAILQ_REMOVE(&pq->pl, m, pageq); 532 (*pq->cnt)--; 533 pq->lcnt--; 534 if ((queue - m->pc) == PQ_CACHE || (queue - m->pc) == PQ_FREE) 535 pagedaemon_wakeup(); 536 } 537 } 538 539 /* 540 * vm_page_list_find() 541 * 542 * Find a page on the specified queue with color optimization. 543 * 544 * The page coloring optimization attempts to locate a page that does 545 * not overload other nearby pages in the object in the cpu's L1 or L2 546 * caches. We need this optimization because cpu caches tend to be 547 * physical caches, while object spaces tend to be virtual. 548 * 549 * This routine must be called at splvm(). 550 * This routine may not block. 551 * 552 * Note that this routine is carefully inlined. A non-inlined version 553 * is available for outside callers but the only critical path is 554 * from within this source file. 555 */ 556 static __inline 557 vm_page_t 558 _vm_page_list_find(int basequeue, int index, boolean_t prefer_zero) 559 { 560 vm_page_t m; 561 562 if (prefer_zero) 563 m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist); 564 else 565 m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl); 566 if (m == NULL) 567 m = _vm_page_list_find2(basequeue, index); 568 return(m); 569 } 570 571 static vm_page_t 572 _vm_page_list_find2(int basequeue, int index) 573 { 574 int i; 575 vm_page_t m = NULL; 576 struct vpgqueues *pq; 577 578 pq = &vm_page_queues[basequeue]; 579 580 /* 581 * Note that for the first loop, index+i and index-i wind up at the 582 * same place. Even though this is not totally optimal, we've already 583 * blown it by missing the cache case so we do not care. 584 */ 585 586 for(i = PQ_L2_SIZE / 2; i > 0; --i) { 587 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL) 588 break; 589 590 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL) 591 break; 592 } 593 return(m); 594 } 595 596 vm_page_t 597 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero) 598 { 599 return(_vm_page_list_find(basequeue, index, prefer_zero)); 600 } 601 602 /* 603 * Find a page on the cache queue with color optimization. As pages 604 * might be found, but not applicable, they are deactivated. This 605 * keeps us from using potentially busy cached pages. 606 * 607 * This routine must be called with a critical section held. 608 * This routine may not block. 609 */ 610 vm_page_t 611 vm_page_select_cache(vm_object_t object, vm_pindex_t pindex) 612 { 613 vm_page_t m; 614 615 while (TRUE) { 616 m = _vm_page_list_find( 617 PQ_CACHE, 618 (pindex + object->pg_color) & PQ_L2_MASK, 619 FALSE 620 ); 621 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || 622 m->hold_count || m->wire_count)) { 623 vm_page_deactivate(m); 624 continue; 625 } 626 return m; 627 } 628 /* not reached */ 629 } 630 631 /* 632 * Find a free or zero page, with specified preference. We attempt to 633 * inline the nominal case and fall back to _vm_page_select_free() 634 * otherwise. 635 * 636 * This routine must be called with a critical section held. 637 * This routine may not block. 638 */ 639 static __inline vm_page_t 640 vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero) 641 { 642 vm_page_t m; 643 644 m = _vm_page_list_find( 645 PQ_FREE, 646 (pindex + object->pg_color) & PQ_L2_MASK, 647 prefer_zero 648 ); 649 return(m); 650 } 651 652 /* 653 * vm_page_alloc() 654 * 655 * Allocate and return a memory cell associated with this VM object/offset 656 * pair. 657 * 658 * page_req classes: 659 * 660 * VM_ALLOC_NORMAL allow use of cache pages, nominal free drain 661 * VM_ALLOC_SYSTEM greater free drain 662 * VM_ALLOC_INTERRUPT allow free list to be completely drained 663 * VM_ALLOC_ZERO advisory request for pre-zero'd page 664 * 665 * The object must be locked. 666 * This routine may not block. 667 * The returned page will be marked PG_BUSY 668 * 669 * Additional special handling is required when called from an interrupt 670 * (VM_ALLOC_INTERRUPT). We are not allowed to mess with the page cache 671 * in this case. 672 */ 673 vm_page_t 674 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) 675 { 676 vm_page_t m = NULL; 677 678 KKASSERT(object != NULL); 679 KASSERT(!vm_page_lookup(object, pindex), 680 ("vm_page_alloc: page already allocated")); 681 KKASSERT(page_req & 682 (VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM)); 683 684 /* 685 * Certain system threads (pageout daemon, buf_daemon's) are 686 * allowed to eat deeper into the free page list. 687 */ 688 if (curthread->td_flags & TDF_SYSTHREAD) 689 page_req |= VM_ALLOC_SYSTEM; 690 691 crit_enter(); 692 loop: 693 if (vmstats.v_free_count > vmstats.v_free_reserved || 694 ((page_req & VM_ALLOC_INTERRUPT) && vmstats.v_free_count > 0) || 695 ((page_req & VM_ALLOC_SYSTEM) && vmstats.v_cache_count == 0 && 696 vmstats.v_free_count > vmstats.v_interrupt_free_min) 697 ) { 698 /* 699 * The free queue has sufficient free pages to take one out. 700 */ 701 if (page_req & VM_ALLOC_ZERO) 702 m = vm_page_select_free(object, pindex, TRUE); 703 else 704 m = vm_page_select_free(object, pindex, FALSE); 705 } else if (page_req & VM_ALLOC_NORMAL) { 706 /* 707 * Allocatable from the cache (non-interrupt only). On 708 * success, we must free the page and try again, thus 709 * ensuring that vmstats.v_*_free_min counters are replenished. 710 */ 711 #ifdef INVARIANTS 712 if (curthread->td_preempted) { 713 kprintf("vm_page_alloc(): warning, attempt to allocate" 714 " cache page from preempting interrupt\n"); 715 m = NULL; 716 } else { 717 m = vm_page_select_cache(object, pindex); 718 } 719 #else 720 m = vm_page_select_cache(object, pindex); 721 #endif 722 /* 723 * On success move the page into the free queue and loop. 724 */ 725 if (m != NULL) { 726 KASSERT(m->dirty == 0, 727 ("Found dirty cache page %p", m)); 728 vm_page_busy(m); 729 vm_page_protect(m, VM_PROT_NONE); 730 vm_page_free(m); 731 goto loop; 732 } 733 734 /* 735 * On failure return NULL 736 */ 737 crit_exit(); 738 #if defined(DIAGNOSTIC) 739 if (vmstats.v_cache_count > 0) 740 kprintf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count); 741 #endif 742 vm_pageout_deficit++; 743 pagedaemon_wakeup(); 744 return (NULL); 745 } else { 746 /* 747 * No pages available, wakeup the pageout daemon and give up. 748 */ 749 crit_exit(); 750 vm_pageout_deficit++; 751 pagedaemon_wakeup(); 752 return (NULL); 753 } 754 755 /* 756 * Good page found. The page has not yet been busied. We are in 757 * a critical section. 758 */ 759 KASSERT(m != NULL, ("vm_page_alloc(): missing page on free queue\n")); 760 KASSERT(m->dirty == 0, 761 ("vm_page_alloc: free/cache page %p was dirty", m)); 762 763 /* 764 * Remove from free queue 765 */ 766 vm_page_unqueue_nowakeup(m); 767 768 /* 769 * Initialize structure. Only the PG_ZERO flag is inherited. Set 770 * the page PG_BUSY 771 */ 772 if (m->flags & PG_ZERO) { 773 vm_page_zero_count--; 774 m->flags = PG_ZERO | PG_BUSY; 775 } else { 776 m->flags = PG_BUSY; 777 } 778 m->wire_count = 0; 779 m->hold_count = 0; 780 m->act_count = 0; 781 m->busy = 0; 782 m->valid = 0; 783 784 /* 785 * vm_page_insert() is safe prior to the crit_exit(). Note also that 786 * inserting a page here does not insert it into the pmap (which 787 * could cause us to block allocating memory). We cannot block 788 * anywhere. 789 */ 790 vm_page_insert(m, object, pindex); 791 792 /* 793 * Don't wakeup too often - wakeup the pageout daemon when 794 * we would be nearly out of memory. 795 */ 796 pagedaemon_wakeup(); 797 798 crit_exit(); 799 800 /* 801 * A PG_BUSY page is returned. 802 */ 803 return (m); 804 } 805 806 /* 807 * Block until free pages are available for allocation, called in various 808 * places before memory allocations. 809 */ 810 void 811 vm_wait(int timo) 812 { 813 crit_enter(); 814 if (curthread == pagethread) { 815 vm_pageout_pages_needed = 1; 816 tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo); 817 } else { 818 if (vm_pages_needed == 0) { 819 vm_pages_needed = 1; 820 wakeup(&vm_pages_needed); 821 } 822 tsleep(&vmstats.v_free_count, 0, "vmwait", timo); 823 } 824 crit_exit(); 825 } 826 827 /* 828 * Block until free pages are available for allocation 829 * 830 * Called only in vm_fault so that processes page faulting can be 831 * easily tracked. 832 */ 833 void 834 vm_waitpfault(void) 835 { 836 crit_enter(); 837 if (vm_pages_needed == 0) { 838 vm_pages_needed = 1; 839 wakeup(&vm_pages_needed); 840 } 841 tsleep(&vmstats.v_free_count, 0, "pfault", 0); 842 crit_exit(); 843 } 844 845 /* 846 * Put the specified page on the active list (if appropriate). Ensure 847 * that act_count is at least ACT_INIT but do not otherwise mess with it. 848 * 849 * The page queues must be locked. 850 * This routine may not block. 851 */ 852 void 853 vm_page_activate(vm_page_t m) 854 { 855 crit_enter(); 856 if (m->queue != PQ_ACTIVE) { 857 if ((m->queue - m->pc) == PQ_CACHE) 858 mycpu->gd_cnt.v_reactivated++; 859 860 vm_page_unqueue(m); 861 862 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 863 m->queue = PQ_ACTIVE; 864 vm_page_queues[PQ_ACTIVE].lcnt++; 865 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, 866 m, pageq); 867 if (m->act_count < ACT_INIT) 868 m->act_count = ACT_INIT; 869 vmstats.v_active_count++; 870 } 871 } else { 872 if (m->act_count < ACT_INIT) 873 m->act_count = ACT_INIT; 874 } 875 crit_exit(); 876 } 877 878 /* 879 * Helper routine for vm_page_free_toq() and vm_page_cache(). This 880 * routine is called when a page has been added to the cache or free 881 * queues. 882 * 883 * This routine may not block. 884 * This routine must be called at splvm() 885 */ 886 static __inline void 887 vm_page_free_wakeup(void) 888 { 889 /* 890 * if pageout daemon needs pages, then tell it that there are 891 * some free. 892 */ 893 if (vm_pageout_pages_needed && 894 vmstats.v_cache_count + vmstats.v_free_count >= 895 vmstats.v_pageout_free_min 896 ) { 897 wakeup(&vm_pageout_pages_needed); 898 vm_pageout_pages_needed = 0; 899 } 900 901 /* 902 * wakeup processes that are waiting on memory if we hit a 903 * high water mark. And wakeup scheduler process if we have 904 * lots of memory. this process will swapin processes. 905 */ 906 if (vm_pages_needed && !vm_page_count_min(0)) { 907 vm_pages_needed = 0; 908 wakeup(&vmstats.v_free_count); 909 } 910 } 911 912 /* 913 * vm_page_free_toq: 914 * 915 * Returns the given page to the PQ_FREE list, disassociating it with 916 * any VM object. 917 * 918 * The vm_page must be PG_BUSY on entry. PG_BUSY will be released on 919 * return (the page will have been freed). No particular spl is required 920 * on entry. 921 * 922 * This routine may not block. 923 */ 924 void 925 vm_page_free_toq(vm_page_t m) 926 { 927 struct vpgqueues *pq; 928 929 crit_enter(); 930 mycpu->gd_cnt.v_tfree++; 931 932 KKASSERT((m->flags & PG_MAPPED) == 0); 933 934 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) { 935 kprintf( 936 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n", 937 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0, 938 m->hold_count); 939 if ((m->queue - m->pc) == PQ_FREE) 940 panic("vm_page_free: freeing free page"); 941 else 942 panic("vm_page_free: freeing busy page"); 943 } 944 945 /* 946 * unqueue, then remove page. Note that we cannot destroy 947 * the page here because we do not want to call the pager's 948 * callback routine until after we've put the page on the 949 * appropriate free queue. 950 */ 951 vm_page_unqueue_nowakeup(m); 952 vm_page_remove(m); 953 954 /* 955 * No further management of fictitious pages occurs beyond object 956 * and queue removal. 957 */ 958 if ((m->flags & PG_FICTITIOUS) != 0) { 959 vm_page_wakeup(m); 960 crit_exit(); 961 return; 962 } 963 964 m->valid = 0; 965 vm_page_undirty(m); 966 967 if (m->wire_count != 0) { 968 if (m->wire_count > 1) { 969 panic( 970 "vm_page_free: invalid wire count (%d), pindex: 0x%lx", 971 m->wire_count, (long)m->pindex); 972 } 973 panic("vm_page_free: freeing wired page"); 974 } 975 976 /* 977 * Clear the UNMANAGED flag when freeing an unmanaged page. 978 */ 979 if (m->flags & PG_UNMANAGED) { 980 m->flags &= ~PG_UNMANAGED; 981 } 982 983 if (m->hold_count != 0) { 984 m->flags &= ~PG_ZERO; 985 m->queue = PQ_HOLD; 986 } else { 987 m->queue = PQ_FREE + m->pc; 988 } 989 pq = &vm_page_queues[m->queue]; 990 pq->lcnt++; 991 ++(*pq->cnt); 992 993 /* 994 * Put zero'd pages on the end ( where we look for zero'd pages 995 * first ) and non-zerod pages at the head. 996 */ 997 if (m->flags & PG_ZERO) { 998 TAILQ_INSERT_TAIL(&pq->pl, m, pageq); 999 ++vm_page_zero_count; 1000 } else { 1001 TAILQ_INSERT_HEAD(&pq->pl, m, pageq); 1002 } 1003 vm_page_wakeup(m); 1004 vm_page_free_wakeup(); 1005 crit_exit(); 1006 } 1007 1008 /* 1009 * vm_page_unmanage() 1010 * 1011 * Prevent PV management from being done on the page. The page is 1012 * removed from the paging queues as if it were wired, and as a 1013 * consequence of no longer being managed the pageout daemon will not 1014 * touch it (since there is no way to locate the pte mappings for the 1015 * page). madvise() calls that mess with the pmap will also no longer 1016 * operate on the page. 1017 * 1018 * Beyond that the page is still reasonably 'normal'. Freeing the page 1019 * will clear the flag. 1020 * 1021 * This routine is used by OBJT_PHYS objects - objects using unswappable 1022 * physical memory as backing store rather then swap-backed memory and 1023 * will eventually be extended to support 4MB unmanaged physical 1024 * mappings. 1025 * 1026 * Must be called with a critical section held. 1027 */ 1028 void 1029 vm_page_unmanage(vm_page_t m) 1030 { 1031 ASSERT_IN_CRIT_SECTION(); 1032 if ((m->flags & PG_UNMANAGED) == 0) { 1033 if (m->wire_count == 0) 1034 vm_page_unqueue(m); 1035 } 1036 vm_page_flag_set(m, PG_UNMANAGED); 1037 } 1038 1039 /* 1040 * Mark this page as wired down by yet another map, removing it from 1041 * paging queues as necessary. 1042 * 1043 * The page queues must be locked. 1044 * This routine may not block. 1045 */ 1046 void 1047 vm_page_wire(vm_page_t m) 1048 { 1049 /* 1050 * Only bump the wire statistics if the page is not already wired, 1051 * and only unqueue the page if it is on some queue (if it is unmanaged 1052 * it is already off the queues). Don't do anything with fictitious 1053 * pages because they are always wired. 1054 */ 1055 crit_enter(); 1056 if ((m->flags & PG_FICTITIOUS) == 0) { 1057 if (m->wire_count == 0) { 1058 if ((m->flags & PG_UNMANAGED) == 0) 1059 vm_page_unqueue(m); 1060 vmstats.v_wire_count++; 1061 } 1062 m->wire_count++; 1063 KASSERT(m->wire_count != 0, 1064 ("vm_page_wire: wire_count overflow m=%p", m)); 1065 } 1066 crit_exit(); 1067 } 1068 1069 /* 1070 * Release one wiring of this page, potentially enabling it to be paged again. 1071 * 1072 * Many pages placed on the inactive queue should actually go 1073 * into the cache, but it is difficult to figure out which. What 1074 * we do instead, if the inactive target is well met, is to put 1075 * clean pages at the head of the inactive queue instead of the tail. 1076 * This will cause them to be moved to the cache more quickly and 1077 * if not actively re-referenced, freed more quickly. If we just 1078 * stick these pages at the end of the inactive queue, heavy filesystem 1079 * meta-data accesses can cause an unnecessary paging load on memory bound 1080 * processes. This optimization causes one-time-use metadata to be 1081 * reused more quickly. 1082 * 1083 * BUT, if we are in a low-memory situation we have no choice but to 1084 * put clean pages on the cache queue. 1085 * 1086 * A number of routines use vm_page_unwire() to guarantee that the page 1087 * will go into either the inactive or active queues, and will NEVER 1088 * be placed in the cache - for example, just after dirtying a page. 1089 * dirty pages in the cache are not allowed. 1090 * 1091 * The page queues must be locked. 1092 * This routine may not block. 1093 */ 1094 void 1095 vm_page_unwire(vm_page_t m, int activate) 1096 { 1097 crit_enter(); 1098 if (m->flags & PG_FICTITIOUS) { 1099 /* do nothing */ 1100 } else if (m->wire_count <= 0) { 1101 panic("vm_page_unwire: invalid wire count: %d", m->wire_count); 1102 } else { 1103 if (--m->wire_count == 0) { 1104 --vmstats.v_wire_count; 1105 if (m->flags & PG_UNMANAGED) { 1106 ; 1107 } else if (activate) { 1108 TAILQ_INSERT_TAIL( 1109 &vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1110 m->queue = PQ_ACTIVE; 1111 vm_page_queues[PQ_ACTIVE].lcnt++; 1112 vmstats.v_active_count++; 1113 } else { 1114 vm_page_flag_clear(m, PG_WINATCFLS); 1115 TAILQ_INSERT_TAIL( 1116 &vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1117 m->queue = PQ_INACTIVE; 1118 vm_page_queues[PQ_INACTIVE].lcnt++; 1119 vmstats.v_inactive_count++; 1120 } 1121 } 1122 } 1123 crit_exit(); 1124 } 1125 1126 1127 /* 1128 * Move the specified page to the inactive queue. If the page has 1129 * any associated swap, the swap is deallocated. 1130 * 1131 * Normally athead is 0 resulting in LRU operation. athead is set 1132 * to 1 if we want this page to be 'as if it were placed in the cache', 1133 * except without unmapping it from the process address space. 1134 * 1135 * This routine may not block. 1136 */ 1137 static __inline void 1138 _vm_page_deactivate(vm_page_t m, int athead) 1139 { 1140 /* 1141 * Ignore if already inactive. 1142 */ 1143 if (m->queue == PQ_INACTIVE) 1144 return; 1145 1146 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { 1147 if ((m->queue - m->pc) == PQ_CACHE) 1148 mycpu->gd_cnt.v_reactivated++; 1149 vm_page_flag_clear(m, PG_WINATCFLS); 1150 vm_page_unqueue(m); 1151 if (athead) 1152 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1153 else 1154 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 1155 m->queue = PQ_INACTIVE; 1156 vm_page_queues[PQ_INACTIVE].lcnt++; 1157 vmstats.v_inactive_count++; 1158 } 1159 } 1160 1161 void 1162 vm_page_deactivate(vm_page_t m) 1163 { 1164 crit_enter(); 1165 _vm_page_deactivate(m, 0); 1166 crit_exit(); 1167 } 1168 1169 /* 1170 * vm_page_try_to_cache: 1171 * 1172 * Returns 0 on failure, 1 on success 1173 */ 1174 int 1175 vm_page_try_to_cache(vm_page_t m) 1176 { 1177 crit_enter(); 1178 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1179 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1180 crit_exit(); 1181 return(0); 1182 } 1183 vm_page_test_dirty(m); 1184 if (m->dirty) { 1185 crit_exit(); 1186 return(0); 1187 } 1188 vm_page_cache(m); 1189 crit_exit(); 1190 return(1); 1191 } 1192 1193 /* 1194 * Attempt to free the page. If we cannot free it, we do nothing. 1195 * 1 is returned on success, 0 on failure. 1196 */ 1197 int 1198 vm_page_try_to_free(vm_page_t m) 1199 { 1200 crit_enter(); 1201 if (m->dirty || m->hold_count || m->busy || m->wire_count || 1202 (m->flags & (PG_BUSY|PG_UNMANAGED))) { 1203 crit_exit(); 1204 return(0); 1205 } 1206 vm_page_test_dirty(m); 1207 if (m->dirty) { 1208 crit_exit(); 1209 return(0); 1210 } 1211 vm_page_busy(m); 1212 vm_page_protect(m, VM_PROT_NONE); 1213 vm_page_free(m); 1214 crit_exit(); 1215 return(1); 1216 } 1217 1218 /* 1219 * vm_page_cache 1220 * 1221 * Put the specified page onto the page cache queue (if appropriate). 1222 * 1223 * This routine may not block. 1224 */ 1225 void 1226 vm_page_cache(vm_page_t m) 1227 { 1228 ASSERT_IN_CRIT_SECTION(); 1229 1230 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || 1231 m->wire_count || m->hold_count) { 1232 kprintf("vm_page_cache: attempting to cache busy/held page\n"); 1233 return; 1234 } 1235 1236 /* 1237 * Already in the cache (and thus not mapped) 1238 */ 1239 if ((m->queue - m->pc) == PQ_CACHE) { 1240 KKASSERT((m->flags & PG_MAPPED) == 0); 1241 return; 1242 } 1243 1244 /* 1245 * Caller is required to test m->dirty, but note that the act of 1246 * removing the page from its maps can cause it to become dirty 1247 * on an SMP system due to another cpu running in usermode. 1248 */ 1249 if (m->dirty) { 1250 panic("vm_page_cache: caching a dirty page, pindex: %ld", 1251 (long)m->pindex); 1252 } 1253 1254 /* 1255 * Remove all pmaps and indicate that the page is not 1256 * writeable or mapped. Our vm_page_protect() call may 1257 * have blocked (especially w/ VM_PROT_NONE), so recheck 1258 * everything. 1259 */ 1260 vm_page_busy(m); 1261 vm_page_protect(m, VM_PROT_NONE); 1262 vm_page_wakeup(m); 1263 if ((m->flags & (PG_BUSY|PG_UNMANAGED|PG_MAPPED)) || m->busy || 1264 m->wire_count || m->hold_count) { 1265 /* do nothing */ 1266 } else if (m->dirty) { 1267 vm_page_deactivate(m); 1268 } else { 1269 vm_page_unqueue_nowakeup(m); 1270 m->queue = PQ_CACHE + m->pc; 1271 vm_page_queues[m->queue].lcnt++; 1272 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq); 1273 vmstats.v_cache_count++; 1274 vm_page_free_wakeup(); 1275 } 1276 } 1277 1278 /* 1279 * vm_page_dontneed() 1280 * 1281 * Cache, deactivate, or do nothing as appropriate. This routine 1282 * is typically used by madvise() MADV_DONTNEED. 1283 * 1284 * Generally speaking we want to move the page into the cache so 1285 * it gets reused quickly. However, this can result in a silly syndrome 1286 * due to the page recycling too quickly. Small objects will not be 1287 * fully cached. On the otherhand, if we move the page to the inactive 1288 * queue we wind up with a problem whereby very large objects 1289 * unnecessarily blow away our inactive and cache queues. 1290 * 1291 * The solution is to move the pages based on a fixed weighting. We 1292 * either leave them alone, deactivate them, or move them to the cache, 1293 * where moving them to the cache has the highest weighting. 1294 * By forcing some pages into other queues we eventually force the 1295 * system to balance the queues, potentially recovering other unrelated 1296 * space from active. The idea is to not force this to happen too 1297 * often. 1298 */ 1299 void 1300 vm_page_dontneed(vm_page_t m) 1301 { 1302 static int dnweight; 1303 int dnw; 1304 int head; 1305 1306 dnw = ++dnweight; 1307 1308 /* 1309 * occassionally leave the page alone 1310 */ 1311 crit_enter(); 1312 if ((dnw & 0x01F0) == 0 || 1313 m->queue == PQ_INACTIVE || 1314 m->queue - m->pc == PQ_CACHE 1315 ) { 1316 if (m->act_count >= ACT_INIT) 1317 --m->act_count; 1318 crit_exit(); 1319 return; 1320 } 1321 1322 if (m->dirty == 0) 1323 vm_page_test_dirty(m); 1324 1325 if (m->dirty || (dnw & 0x0070) == 0) { 1326 /* 1327 * Deactivate the page 3 times out of 32. 1328 */ 1329 head = 0; 1330 } else { 1331 /* 1332 * Cache the page 28 times out of every 32. Note that 1333 * the page is deactivated instead of cached, but placed 1334 * at the head of the queue instead of the tail. 1335 */ 1336 head = 1; 1337 } 1338 _vm_page_deactivate(m, head); 1339 crit_exit(); 1340 } 1341 1342 /* 1343 * Grab a page, blocking if it is busy and allocating a page if necessary. 1344 * A busy page is returned or NULL. 1345 * 1346 * If VM_ALLOC_RETRY is specified VM_ALLOC_NORMAL must also be specified. 1347 * If VM_ALLOC_RETRY is not specified 1348 * 1349 * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is 1350 * always returned if we had blocked. 1351 * This routine will never return NULL if VM_ALLOC_RETRY is set. 1352 * This routine may not be called from an interrupt. 1353 * The returned page may not be entirely valid. 1354 * 1355 * This routine may be called from mainline code without spl protection and 1356 * be guarenteed a busied page associated with the object at the specified 1357 * index. 1358 */ 1359 vm_page_t 1360 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) 1361 { 1362 vm_page_t m; 1363 int generation; 1364 1365 KKASSERT(allocflags & 1366 (VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM)); 1367 crit_enter(); 1368 retrylookup: 1369 if ((m = vm_page_lookup(object, pindex)) != NULL) { 1370 if (m->busy || (m->flags & PG_BUSY)) { 1371 generation = object->generation; 1372 1373 while ((object->generation == generation) && 1374 (m->busy || (m->flags & PG_BUSY))) { 1375 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1376 tsleep(m, 0, "pgrbwt", 0); 1377 if ((allocflags & VM_ALLOC_RETRY) == 0) { 1378 m = NULL; 1379 goto done; 1380 } 1381 } 1382 goto retrylookup; 1383 } else { 1384 vm_page_busy(m); 1385 goto done; 1386 } 1387 } 1388 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); 1389 if (m == NULL) { 1390 vm_wait(0); 1391 if ((allocflags & VM_ALLOC_RETRY) == 0) 1392 goto done; 1393 goto retrylookup; 1394 } 1395 done: 1396 crit_exit(); 1397 return(m); 1398 } 1399 1400 /* 1401 * Mapping function for valid bits or for dirty bits in 1402 * a page. May not block. 1403 * 1404 * Inputs are required to range within a page. 1405 */ 1406 __inline int 1407 vm_page_bits(int base, int size) 1408 { 1409 int first_bit; 1410 int last_bit; 1411 1412 KASSERT( 1413 base + size <= PAGE_SIZE, 1414 ("vm_page_bits: illegal base/size %d/%d", base, size) 1415 ); 1416 1417 if (size == 0) /* handle degenerate case */ 1418 return(0); 1419 1420 first_bit = base >> DEV_BSHIFT; 1421 last_bit = (base + size - 1) >> DEV_BSHIFT; 1422 1423 return ((2 << last_bit) - (1 << first_bit)); 1424 } 1425 1426 /* 1427 * Sets portions of a page valid and clean. The arguments are expected 1428 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive 1429 * of any partial chunks touched by the range. The invalid portion of 1430 * such chunks will be zero'd. 1431 * 1432 * NOTE: When truncating a buffer vnode_pager_setsize() will automatically 1433 * align base to DEV_BSIZE so as not to mark clean a partially 1434 * truncated device block. Otherwise the dirty page status might be 1435 * lost. 1436 * 1437 * This routine may not block. 1438 * 1439 * (base + size) must be less then or equal to PAGE_SIZE. 1440 */ 1441 static void 1442 _vm_page_zero_valid(vm_page_t m, int base, int size) 1443 { 1444 int frag; 1445 int endoff; 1446 1447 if (size == 0) /* handle degenerate case */ 1448 return; 1449 1450 /* 1451 * If the base is not DEV_BSIZE aligned and the valid 1452 * bit is clear, we have to zero out a portion of the 1453 * first block. 1454 */ 1455 1456 if ((frag = base & ~(DEV_BSIZE - 1)) != base && 1457 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0 1458 ) { 1459 pmap_zero_page_area( 1460 VM_PAGE_TO_PHYS(m), 1461 frag, 1462 base - frag 1463 ); 1464 } 1465 1466 /* 1467 * If the ending offset is not DEV_BSIZE aligned and the 1468 * valid bit is clear, we have to zero out a portion of 1469 * the last block. 1470 */ 1471 1472 endoff = base + size; 1473 1474 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && 1475 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0 1476 ) { 1477 pmap_zero_page_area( 1478 VM_PAGE_TO_PHYS(m), 1479 endoff, 1480 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)) 1481 ); 1482 } 1483 } 1484 1485 /* 1486 * Set valid, clear dirty bits. If validating the entire 1487 * page we can safely clear the pmap modify bit. We also 1488 * use this opportunity to clear the PG_NOSYNC flag. If a process 1489 * takes a write fault on a MAP_NOSYNC memory area the flag will 1490 * be set again. 1491 * 1492 * We set valid bits inclusive of any overlap, but we can only 1493 * clear dirty bits for DEV_BSIZE chunks that are fully within 1494 * the range. 1495 */ 1496 void 1497 vm_page_set_valid(vm_page_t m, int base, int size) 1498 { 1499 _vm_page_zero_valid(m, base, size); 1500 m->valid |= vm_page_bits(base, size); 1501 } 1502 1503 1504 /* 1505 * Set valid bits and clear dirty bits. 1506 * 1507 * NOTE: This function does not clear the pmap modified bit. 1508 * Also note that e.g. NFS may use a byte-granular base 1509 * and size. 1510 */ 1511 void 1512 vm_page_set_validclean(vm_page_t m, int base, int size) 1513 { 1514 int pagebits; 1515 1516 _vm_page_zero_valid(m, base, size); 1517 pagebits = vm_page_bits(base, size); 1518 m->valid |= pagebits; 1519 m->dirty &= ~pagebits; 1520 if (base == 0 && size == PAGE_SIZE) { 1521 /*pmap_clear_modify(m);*/ 1522 vm_page_flag_clear(m, PG_NOSYNC); 1523 } 1524 } 1525 1526 /* 1527 * Clear dirty bits. 1528 * 1529 * NOTE: This function does not clear the pmap modified bit. 1530 * Also note that e.g. NFS may use a byte-granular base 1531 * and size. 1532 */ 1533 void 1534 vm_page_clear_dirty(vm_page_t m, int base, int size) 1535 { 1536 m->dirty &= ~vm_page_bits(base, size); 1537 if (base == 0 && size == PAGE_SIZE) { 1538 /*pmap_clear_modify(m);*/ 1539 vm_page_flag_clear(m, PG_NOSYNC); 1540 } 1541 } 1542 1543 /* 1544 * Make the page all-dirty. 1545 * 1546 * Also make sure the related object and vnode reflect the fact that the 1547 * object may now contain a dirty page. 1548 */ 1549 void 1550 vm_page_dirty(vm_page_t m) 1551 { 1552 #ifdef INVARIANTS 1553 int pqtype = m->queue - m->pc; 1554 #endif 1555 KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE, 1556 ("vm_page_dirty: page in free/cache queue!")); 1557 if (m->dirty != VM_PAGE_BITS_ALL) { 1558 m->dirty = VM_PAGE_BITS_ALL; 1559 if (m->object) 1560 vm_object_set_writeable_dirty(m->object); 1561 } 1562 } 1563 1564 /* 1565 * Invalidates DEV_BSIZE'd chunks within a page. Both the 1566 * valid and dirty bits for the effected areas are cleared. 1567 * 1568 * May not block. 1569 */ 1570 void 1571 vm_page_set_invalid(vm_page_t m, int base, int size) 1572 { 1573 int bits; 1574 1575 bits = vm_page_bits(base, size); 1576 m->valid &= ~bits; 1577 m->dirty &= ~bits; 1578 m->object->generation++; 1579 } 1580 1581 /* 1582 * The kernel assumes that the invalid portions of a page contain 1583 * garbage, but such pages can be mapped into memory by user code. 1584 * When this occurs, we must zero out the non-valid portions of the 1585 * page so user code sees what it expects. 1586 * 1587 * Pages are most often semi-valid when the end of a file is mapped 1588 * into memory and the file's size is not page aligned. 1589 */ 1590 void 1591 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) 1592 { 1593 int b; 1594 int i; 1595 1596 /* 1597 * Scan the valid bits looking for invalid sections that 1598 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the 1599 * valid bit may be set ) have already been zerod by 1600 * vm_page_set_validclean(). 1601 */ 1602 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { 1603 if (i == (PAGE_SIZE / DEV_BSIZE) || 1604 (m->valid & (1 << i)) 1605 ) { 1606 if (i > b) { 1607 pmap_zero_page_area( 1608 VM_PAGE_TO_PHYS(m), 1609 b << DEV_BSHIFT, 1610 (i - b) << DEV_BSHIFT 1611 ); 1612 } 1613 b = i + 1; 1614 } 1615 } 1616 1617 /* 1618 * setvalid is TRUE when we can safely set the zero'd areas 1619 * as being valid. We can do this if there are no cache consistency 1620 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. 1621 */ 1622 if (setvalid) 1623 m->valid = VM_PAGE_BITS_ALL; 1624 } 1625 1626 /* 1627 * Is a (partial) page valid? Note that the case where size == 0 1628 * will return FALSE in the degenerate case where the page is entirely 1629 * invalid, and TRUE otherwise. 1630 * 1631 * May not block. 1632 */ 1633 int 1634 vm_page_is_valid(vm_page_t m, int base, int size) 1635 { 1636 int bits = vm_page_bits(base, size); 1637 1638 if (m->valid && ((m->valid & bits) == bits)) 1639 return 1; 1640 else 1641 return 0; 1642 } 1643 1644 /* 1645 * update dirty bits from pmap/mmu. May not block. 1646 */ 1647 void 1648 vm_page_test_dirty(vm_page_t m) 1649 { 1650 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { 1651 vm_page_dirty(m); 1652 } 1653 } 1654 1655 /* 1656 * Issue an event on a VM page. Corresponding action structures are 1657 * removed from the page's list and called. 1658 */ 1659 void 1660 vm_page_event_internal(vm_page_t m, vm_page_event_t event) 1661 { 1662 struct vm_page_action *scan, *next; 1663 1664 LIST_FOREACH_MUTABLE(scan, &m->action_list, entry, next) { 1665 if (scan->event == event) { 1666 scan->event = VMEVENT_NONE; 1667 LIST_REMOVE(scan, entry); 1668 scan->func(m, scan); 1669 } 1670 } 1671 } 1672 1673 #include "opt_ddb.h" 1674 #ifdef DDB 1675 #include <sys/kernel.h> 1676 1677 #include <ddb/ddb.h> 1678 1679 DB_SHOW_COMMAND(page, vm_page_print_page_info) 1680 { 1681 db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count); 1682 db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count); 1683 db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count); 1684 db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count); 1685 db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count); 1686 db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved); 1687 db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min); 1688 db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target); 1689 db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min); 1690 db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target); 1691 } 1692 1693 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) 1694 { 1695 int i; 1696 db_printf("PQ_FREE:"); 1697 for(i=0;i<PQ_L2_SIZE;i++) { 1698 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt); 1699 } 1700 db_printf("\n"); 1701 1702 db_printf("PQ_CACHE:"); 1703 for(i=0;i<PQ_L2_SIZE;i++) { 1704 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt); 1705 } 1706 db_printf("\n"); 1707 1708 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", 1709 vm_page_queues[PQ_ACTIVE].lcnt, 1710 vm_page_queues[PQ_INACTIVE].lcnt); 1711 } 1712 #endif /* DDB */ 1713