1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $ 65 * $DragonFly: src/sys/vm/vm_object.c,v 1.22 2005/06/02 20:57:21 swildner Exp $ 66 */ 67 68 /* 69 * Virtual memory object module. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/proc.h> /* for curproc, pageproc */ 75 #include <sys/vnode.h> 76 #include <sys/vmmeter.h> 77 #include <sys/mman.h> 78 #include <sys/mount.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_pager.h> 90 #include <vm/swap_pager.h> 91 #include <vm/vm_kern.h> 92 #include <vm/vm_extern.h> 93 #include <vm/vm_zone.h> 94 95 #define EASY_SCAN_FACTOR 8 96 97 #define MSYNC_FLUSH_HARDSEQ 0x01 98 #define MSYNC_FLUSH_SOFTSEQ 0x02 99 100 static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; 101 SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, 102 CTLFLAG_RW, &msync_flush_flags, 0, ""); 103 104 static void vm_object_qcollapse (vm_object_t object); 105 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); 106 107 /* 108 * Virtual memory objects maintain the actual data 109 * associated with allocated virtual memory. A given 110 * page of memory exists within exactly one object. 111 * 112 * An object is only deallocated when all "references" 113 * are given up. Only one "reference" to a given 114 * region of an object should be writeable. 115 * 116 * Associated with each object is a list of all resident 117 * memory pages belonging to that object; this list is 118 * maintained by the "vm_page" module, and locked by the object's 119 * lock. 120 * 121 * Each object also records a "pager" routine which is 122 * used to retrieve (and store) pages to the proper backing 123 * storage. In addition, objects may be backed by other 124 * objects from which they were virtual-copied. 125 * 126 * The only items within the object structure which are 127 * modified after time of creation are: 128 * reference count locked by object's lock 129 * pager routine locked by object's lock 130 * 131 */ 132 133 struct object_q vm_object_list; 134 static long vm_object_count; /* count of all objects */ 135 vm_object_t kernel_object; 136 vm_object_t kmem_object; 137 static struct vm_object kernel_object_store; 138 static struct vm_object kmem_object_store; 139 extern int vm_pageout_page_count; 140 141 static long object_collapses; 142 static long object_bypasses; 143 static int next_index; 144 static vm_zone_t obj_zone; 145 static struct vm_zone obj_zone_store; 146 static int object_hash_rand; 147 #define VM_OBJECTS_INIT 256 148 static struct vm_object vm_objects_init[VM_OBJECTS_INIT]; 149 150 void 151 _vm_object_allocate(objtype_t type, vm_size_t size, vm_object_t object) 152 { 153 int incr; 154 TAILQ_INIT(&object->memq); 155 LIST_INIT(&object->shadow_head); 156 157 object->type = type; 158 object->size = size; 159 object->ref_count = 1; 160 object->flags = 0; 161 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 162 vm_object_set_flag(object, OBJ_ONEMAPPING); 163 object->paging_in_progress = 0; 164 object->resident_page_count = 0; 165 object->shadow_count = 0; 166 object->pg_color = next_index; 167 if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 168 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 169 else 170 incr = size; 171 next_index = (next_index + incr) & PQ_L2_MASK; 172 object->handle = NULL; 173 object->backing_object = NULL; 174 object->backing_object_offset = (vm_ooffset_t) 0; 175 /* 176 * Try to generate a number that will spread objects out in the 177 * hash table. We 'wipe' new objects across the hash in 128 page 178 * increments plus 1 more to offset it a little more by the time 179 * it wraps around. 180 */ 181 object->hash_rand = object_hash_rand - 129; 182 183 object->generation++; 184 185 crit_enter(); 186 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 187 vm_object_count++; 188 object_hash_rand = object->hash_rand; 189 crit_exit(); 190 } 191 192 /* 193 * vm_object_init: 194 * 195 * Initialize the VM objects module. 196 */ 197 void 198 vm_object_init(void) 199 { 200 TAILQ_INIT(&vm_object_list); 201 202 kernel_object = &kernel_object_store; 203 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 204 kernel_object); 205 206 kmem_object = &kmem_object_store; 207 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 208 kmem_object); 209 210 obj_zone = &obj_zone_store; 211 zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object), 212 vm_objects_init, VM_OBJECTS_INIT); 213 } 214 215 void 216 vm_object_init2(void) 217 { 218 zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1); 219 } 220 221 /* 222 * vm_object_allocate: 223 * 224 * Returns a new object with the given size. 225 */ 226 227 vm_object_t 228 vm_object_allocate(objtype_t type, vm_size_t size) 229 { 230 vm_object_t result; 231 232 result = (vm_object_t) zalloc(obj_zone); 233 234 _vm_object_allocate(type, size, result); 235 236 return (result); 237 } 238 239 240 /* 241 * vm_object_reference: 242 * 243 * Gets another reference to the given object. 244 */ 245 void 246 vm_object_reference(vm_object_t object) 247 { 248 if (object == NULL) 249 return; 250 251 #if 0 252 /* object can be re-referenced during final cleaning */ 253 KASSERT(!(object->flags & OBJ_DEAD), 254 ("vm_object_reference: attempting to reference dead obj")); 255 #endif 256 257 object->ref_count++; 258 if (object->type == OBJT_VNODE) { 259 vref(object->handle); 260 /* XXX what if the vnode is being destroyed? */ 261 #if 0 262 while (vget((struct vnode *) object->handle, 263 LK_RETRY|LK_NOOBJ, curthread)) { 264 printf("vm_object_reference: delay in getting object\n"); 265 } 266 #endif 267 } 268 } 269 270 void 271 vm_object_vndeallocate(vm_object_t object) 272 { 273 struct vnode *vp = (struct vnode *) object->handle; 274 275 KASSERT(object->type == OBJT_VNODE, 276 ("vm_object_vndeallocate: not a vnode object")); 277 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 278 #ifdef INVARIANTS 279 if (object->ref_count == 0) { 280 vprint("vm_object_vndeallocate", vp); 281 panic("vm_object_vndeallocate: bad object reference count"); 282 } 283 #endif 284 285 object->ref_count--; 286 if (object->ref_count == 0) { 287 vp->v_flag &= ~VTEXT; 288 vm_object_clear_flag(object, OBJ_OPT); 289 } 290 vrele(vp); 291 } 292 293 /* 294 * vm_object_deallocate: 295 * 296 * Release a reference to the specified object, 297 * gained either through a vm_object_allocate 298 * or a vm_object_reference call. When all references 299 * are gone, storage associated with this object 300 * may be relinquished. 301 * 302 * No object may be locked. 303 */ 304 void 305 vm_object_deallocate(vm_object_t object) 306 { 307 vm_object_t temp; 308 309 while (object != NULL) { 310 311 if (object->type == OBJT_VNODE) { 312 vm_object_vndeallocate(object); 313 return; 314 } 315 316 if (object->ref_count == 0) { 317 panic("vm_object_deallocate: object deallocated too many times: %d", object->type); 318 } else if (object->ref_count > 2) { 319 object->ref_count--; 320 return; 321 } 322 323 /* 324 * Here on ref_count of one or two, which are special cases for 325 * objects. 326 */ 327 if ((object->ref_count == 2) && (object->shadow_count == 0)) { 328 vm_object_set_flag(object, OBJ_ONEMAPPING); 329 object->ref_count--; 330 return; 331 } else if ((object->ref_count == 2) && (object->shadow_count == 1)) { 332 object->ref_count--; 333 if ((object->handle == NULL) && 334 (object->type == OBJT_DEFAULT || 335 object->type == OBJT_SWAP)) { 336 vm_object_t robject; 337 338 robject = LIST_FIRST(&object->shadow_head); 339 KASSERT(robject != NULL, 340 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 341 object->ref_count, 342 object->shadow_count)); 343 if ((robject->handle == NULL) && 344 (robject->type == OBJT_DEFAULT || 345 robject->type == OBJT_SWAP)) { 346 347 robject->ref_count++; 348 349 while ( 350 robject->paging_in_progress || 351 object->paging_in_progress 352 ) { 353 vm_object_pip_sleep(robject, "objde1"); 354 vm_object_pip_sleep(object, "objde2"); 355 } 356 357 if (robject->ref_count == 1) { 358 robject->ref_count--; 359 object = robject; 360 goto doterm; 361 } 362 363 object = robject; 364 vm_object_collapse(object); 365 continue; 366 } 367 } 368 369 return; 370 371 } else { 372 object->ref_count--; 373 if (object->ref_count != 0) 374 return; 375 } 376 377 doterm: 378 379 temp = object->backing_object; 380 if (temp) { 381 LIST_REMOVE(object, shadow_list); 382 temp->shadow_count--; 383 if (temp->ref_count == 0) 384 vm_object_clear_flag(temp, OBJ_OPT); 385 temp->generation++; 386 object->backing_object = NULL; 387 } 388 389 /* 390 * Don't double-terminate, we could be in a termination 391 * recursion due to the terminate having to sync data 392 * to disk. 393 */ 394 if ((object->flags & OBJ_DEAD) == 0) 395 vm_object_terminate(object); 396 object = temp; 397 } 398 } 399 400 /* 401 * vm_object_terminate actually destroys the specified object, freeing 402 * up all previously used resources. 403 * 404 * The object must be locked. 405 * This routine may block. 406 */ 407 void 408 vm_object_terminate(vm_object_t object) 409 { 410 vm_page_t p; 411 412 /* 413 * Make sure no one uses us. 414 */ 415 vm_object_set_flag(object, OBJ_DEAD); 416 417 /* 418 * wait for the pageout daemon to be done with the object 419 */ 420 vm_object_pip_wait(object, "objtrm"); 421 422 KASSERT(!object->paging_in_progress, 423 ("vm_object_terminate: pageout in progress")); 424 425 /* 426 * Clean and free the pages, as appropriate. All references to the 427 * object are gone, so we don't need to lock it. 428 */ 429 if (object->type == OBJT_VNODE) { 430 struct vnode *vp; 431 432 /* 433 * Freeze optimized copies. 434 */ 435 vm_freeze_copyopts(object, 0, object->size); 436 437 /* 438 * Clean pages and flush buffers. 439 */ 440 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 441 442 vp = (struct vnode *) object->handle; 443 vinvalbuf(vp, V_SAVE, NULL, 0, 0); 444 } 445 446 /* 447 * Wait for any I/O to complete, after which there had better not 448 * be any references left on the object. 449 */ 450 vm_object_pip_wait(object, "objtrm"); 451 452 if (object->ref_count != 0) 453 panic("vm_object_terminate: object with references, ref_count=%d", object->ref_count); 454 455 /* 456 * Now free any remaining pages. For internal objects, this also 457 * removes them from paging queues. Don't free wired pages, just 458 * remove them from the object. 459 */ 460 crit_enter(); 461 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 462 if (p->busy || (p->flags & PG_BUSY)) 463 panic("vm_object_terminate: freeing busy page %p", p); 464 if (p->wire_count == 0) { 465 vm_page_busy(p); 466 vm_page_free(p); 467 mycpu->gd_cnt.v_pfree++; 468 } else { 469 vm_page_busy(p); 470 vm_page_remove(p); 471 vm_page_wakeup(p); 472 } 473 } 474 crit_exit(); 475 476 /* 477 * Let the pager know object is dead. 478 */ 479 vm_pager_deallocate(object); 480 481 /* 482 * Remove the object from the global object list. 483 */ 484 crit_enter(); 485 TAILQ_REMOVE(&vm_object_list, object, object_list); 486 vm_object_count--; 487 crit_exit(); 488 489 wakeup(object); 490 if (object->ref_count != 0) 491 panic("vm_object_terminate2: object with references, ref_count=%d", object->ref_count); 492 493 /* 494 * Free the space for the object. 495 */ 496 zfree(obj_zone, object); 497 } 498 499 /* 500 * vm_object_page_clean 501 * 502 * Clean all dirty pages in the specified range of object. Leaves page 503 * on whatever queue it is currently on. If NOSYNC is set then do not 504 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 505 * leaving the object dirty. 506 * 507 * When stuffing pages asynchronously, allow clustering. XXX we need a 508 * synchronous clustering mode implementation. 509 * 510 * Odd semantics: if start == end, we clean everything. 511 */ 512 513 void 514 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 515 int flags) 516 { 517 vm_page_t p, np; 518 vm_offset_t tstart, tend; 519 vm_pindex_t pi; 520 struct vnode *vp; 521 int clearobjflags; 522 int pagerflags; 523 int curgeneration; 524 525 if (object->type != OBJT_VNODE || 526 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 527 return; 528 529 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 530 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 531 532 vp = object->handle; 533 534 vm_object_set_flag(object, OBJ_CLEANING); 535 536 /* 537 * Handle 'entire object' case 538 */ 539 tstart = start; 540 if (end == 0) { 541 tend = object->size; 542 } else { 543 tend = end; 544 } 545 546 /* 547 * If the caller is smart and only msync()s a range he knows is 548 * dirty, we may be able to avoid an object scan. This results in 549 * a phenominal improvement in performance. We cannot do this 550 * as a matter of course because the object may be huge - e.g. 551 * the size might be in the gigabytes or terrabytes. 552 */ 553 if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { 554 vm_offset_t tscan; 555 int scanlimit; 556 int scanreset; 557 558 scanreset = object->resident_page_count / EASY_SCAN_FACTOR; 559 if (scanreset < 16) 560 scanreset = 16; 561 pagerflags |= VM_PAGER_IGNORE_CLEANCHK; 562 563 scanlimit = scanreset; 564 tscan = tstart; 565 566 /* 567 * spl protection is required despite the obj generation 568 * tracking because we cannot safely call vm_page_test_dirty() 569 * or avoid page field tests against an interrupt unbusy/free 570 * race that might occur prior to the busy check in 571 * vm_object_page_collect_flush(). 572 */ 573 crit_enter(); 574 while (tscan < tend) { 575 curgeneration = object->generation; 576 p = vm_page_lookup(object, tscan); 577 if (p == NULL || p->valid == 0 || 578 (p->queue - p->pc) == PQ_CACHE) { 579 if (--scanlimit == 0) 580 break; 581 ++tscan; 582 continue; 583 } 584 vm_page_test_dirty(p); 585 if ((p->dirty & p->valid) == 0) { 586 if (--scanlimit == 0) 587 break; 588 ++tscan; 589 continue; 590 } 591 /* 592 * If we have been asked to skip nosync pages and 593 * this is a nosync page, we can't continue. 594 */ 595 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 596 if (--scanlimit == 0) 597 break; 598 ++tscan; 599 continue; 600 } 601 scanlimit = scanreset; 602 603 /* 604 * This returns 0 if it was unable to busy the first 605 * page (i.e. had to sleep). 606 */ 607 tscan += vm_object_page_collect_flush(object, p, 608 curgeneration, pagerflags); 609 } 610 crit_exit(); 611 612 /* 613 * If everything was dirty and we flushed it successfully, 614 * and the requested range is not the entire object, we 615 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can 616 * return immediately. 617 */ 618 if (tscan >= tend && (tstart || tend < object->size)) { 619 vm_object_clear_flag(object, OBJ_CLEANING); 620 return; 621 } 622 pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK; 623 } 624 625 /* 626 * Generally set CLEANCHK interlock and make the page read-only so 627 * we can then clear the object flags. 628 * 629 * However, if this is a nosync mmap then the object is likely to 630 * stay dirty so do not mess with the page and do not clear the 631 * object flags. 632 * 633 * spl protection is required because an interrupt can remove page 634 * from the object. 635 */ 636 clearobjflags = 1; 637 638 crit_enter(); 639 for (p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) { 640 vm_page_flag_set(p, PG_CLEANCHK); 641 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) 642 clearobjflags = 0; 643 else 644 vm_page_protect(p, VM_PROT_READ); 645 } 646 crit_exit(); 647 648 if (clearobjflags && (tstart == 0) && (tend == object->size)) { 649 struct vnode *vp; 650 651 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 652 if (object->type == OBJT_VNODE && 653 (vp = (struct vnode *)object->handle) != NULL) { 654 if (vp->v_flag & VOBJDIRTY) 655 vclrflags(vp, VOBJDIRTY); 656 } 657 } 658 659 /* 660 * spl protection is required both to avoid an interrupt unbusy/free 661 * race against a vm_page_lookup(), and also to ensure that the 662 * memq is consistent. We do not want a busy page to be ripped out 663 * from under us. 664 */ 665 crit_enter(); 666 rescan: 667 crit_exit(); 668 crit_enter(); 669 curgeneration = object->generation; 670 671 for (p = TAILQ_FIRST(&object->memq); p; p = np) { 672 int n; 673 674 np = TAILQ_NEXT(p, listq); 675 676 again: 677 pi = p->pindex; 678 if (((p->flags & PG_CLEANCHK) == 0) || 679 (pi < tstart) || (pi >= tend) || 680 (p->valid == 0) || 681 ((p->queue - p->pc) == PQ_CACHE)) { 682 vm_page_flag_clear(p, PG_CLEANCHK); 683 continue; 684 } 685 686 vm_page_test_dirty(p); 687 if ((p->dirty & p->valid) == 0) { 688 vm_page_flag_clear(p, PG_CLEANCHK); 689 continue; 690 } 691 692 /* 693 * If we have been asked to skip nosync pages and this is a 694 * nosync page, skip it. Note that the object flags were 695 * not cleared in this case so we do not have to set them. 696 */ 697 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 698 vm_page_flag_clear(p, PG_CLEANCHK); 699 continue; 700 } 701 702 n = vm_object_page_collect_flush(object, p, 703 curgeneration, pagerflags); 704 if (n == 0) 705 goto rescan; 706 if (object->generation != curgeneration) 707 goto rescan; 708 709 /* 710 * Try to optimize the next page. If we can't we pick up 711 * our (random) scan where we left off. 712 */ 713 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { 714 if ((p = vm_page_lookup(object, pi + n)) != NULL) 715 goto again; 716 } 717 } 718 crit_exit(); 719 720 #if 0 721 VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 722 #endif 723 724 vm_object_clear_flag(object, OBJ_CLEANING); 725 return; 726 } 727 728 /* 729 * This routine must be called within a critical section to properly avoid 730 * an interrupt unbusy/free race that can occur prior to the busy check. 731 * 732 * Using the object generation number here to detect page ripout is not 733 * the best idea in the world. XXX 734 * 735 * NOTE: we operate under the assumption that a page found to not be busy 736 * will not be ripped out from under us by an interrupt. XXX we should 737 * recode this to explicitly busy the pages. 738 */ 739 static int 740 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) 741 { 742 int runlen; 743 int maxf; 744 int chkb; 745 int maxb; 746 int i; 747 vm_pindex_t pi; 748 vm_page_t maf[vm_pageout_page_count]; 749 vm_page_t mab[vm_pageout_page_count]; 750 vm_page_t ma[vm_pageout_page_count]; 751 752 pi = p->pindex; 753 while (vm_page_sleep_busy(p, TRUE, "vpcwai")) { 754 if (object->generation != curgeneration) { 755 return(0); 756 } 757 } 758 759 maxf = 0; 760 for(i = 1; i < vm_pageout_page_count; i++) { 761 vm_page_t tp; 762 763 if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 764 if ((tp->flags & PG_BUSY) || 765 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 766 (tp->flags & PG_CLEANCHK) == 0) || 767 (tp->busy != 0)) 768 break; 769 if((tp->queue - tp->pc) == PQ_CACHE) { 770 vm_page_flag_clear(tp, PG_CLEANCHK); 771 break; 772 } 773 vm_page_test_dirty(tp); 774 if ((tp->dirty & tp->valid) == 0) { 775 vm_page_flag_clear(tp, PG_CLEANCHK); 776 break; 777 } 778 maf[ i - 1 ] = tp; 779 maxf++; 780 continue; 781 } 782 break; 783 } 784 785 maxb = 0; 786 chkb = vm_pageout_page_count - maxf; 787 if (chkb) { 788 for(i = 1; i < chkb;i++) { 789 vm_page_t tp; 790 791 if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 792 if ((tp->flags & PG_BUSY) || 793 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 794 (tp->flags & PG_CLEANCHK) == 0) || 795 (tp->busy != 0)) 796 break; 797 if((tp->queue - tp->pc) == PQ_CACHE) { 798 vm_page_flag_clear(tp, PG_CLEANCHK); 799 break; 800 } 801 vm_page_test_dirty(tp); 802 if ((tp->dirty & tp->valid) == 0) { 803 vm_page_flag_clear(tp, PG_CLEANCHK); 804 break; 805 } 806 mab[ i - 1 ] = tp; 807 maxb++; 808 continue; 809 } 810 break; 811 } 812 } 813 814 for(i = 0; i < maxb; i++) { 815 int index = (maxb - i) - 1; 816 ma[index] = mab[i]; 817 vm_page_flag_clear(ma[index], PG_CLEANCHK); 818 } 819 vm_page_flag_clear(p, PG_CLEANCHK); 820 ma[maxb] = p; 821 for(i = 0; i < maxf; i++) { 822 int index = (maxb + i) + 1; 823 ma[index] = maf[i]; 824 vm_page_flag_clear(ma[index], PG_CLEANCHK); 825 } 826 runlen = maxb + maxf + 1; 827 828 vm_pageout_flush(ma, runlen, pagerflags); 829 for (i = 0; i < runlen; i++) { 830 if (ma[i]->valid & ma[i]->dirty) { 831 vm_page_protect(ma[i], VM_PROT_READ); 832 vm_page_flag_set(ma[i], PG_CLEANCHK); 833 834 /* 835 * maxf will end up being the actual number of pages 836 * we wrote out contiguously, non-inclusive of the 837 * first page. We do not count look-behind pages. 838 */ 839 if (i >= maxb + 1 && (maxf > i - maxb - 1)) 840 maxf = i - maxb - 1; 841 } 842 } 843 return(maxf + 1); 844 } 845 846 #ifdef not_used 847 /* XXX I cannot tell if this should be an exported symbol */ 848 /* 849 * vm_object_deactivate_pages 850 * 851 * Deactivate all pages in the specified object. (Keep its pages 852 * in memory even though it is no longer referenced.) 853 * 854 * The object must be locked. 855 */ 856 static void 857 vm_object_deactivate_pages(vm_object_t object) 858 { 859 vm_page_t p, next; 860 861 crit_enter(); 862 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 863 next = TAILQ_NEXT(p, listq); 864 vm_page_deactivate(p); 865 } 866 crit_exit(); 867 } 868 #endif 869 870 /* 871 * Same as vm_object_pmap_copy, except range checking really 872 * works, and is meant for small sections of an object. 873 * 874 * This code protects resident pages by making them read-only 875 * and is typically called on a fork or split when a page 876 * is converted to copy-on-write. 877 * 878 * NOTE: If the page is already at VM_PROT_NONE, calling 879 * vm_page_protect will have no effect. 880 */ 881 void 882 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 883 { 884 vm_pindex_t idx; 885 vm_page_t p; 886 887 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 888 return; 889 890 /* 891 * spl protection needed to prevent races between the lookup, 892 * an interrupt unbusy/free, and our protect call. 893 */ 894 crit_enter(); 895 for (idx = start; idx < end; idx++) { 896 p = vm_page_lookup(object, idx); 897 if (p == NULL) 898 continue; 899 vm_page_protect(p, VM_PROT_READ); 900 } 901 crit_exit(); 902 } 903 904 /* 905 * vm_object_pmap_remove: 906 * 907 * Removes all physical pages in the specified 908 * object range from all physical maps. 909 * 910 * The object must *not* be locked. 911 */ 912 void 913 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 914 { 915 vm_page_t p; 916 917 if (object == NULL) 918 return; 919 920 /* 921 * spl protection is required because an interrupt can unbusy/free 922 * a page. 923 */ 924 crit_enter(); 925 for (p = TAILQ_FIRST(&object->memq); 926 p != NULL; 927 p = TAILQ_NEXT(p, listq) 928 ) { 929 if (p->pindex >= start && p->pindex < end) 930 vm_page_protect(p, VM_PROT_NONE); 931 } 932 crit_exit(); 933 if ((start == 0) && (object->size == end)) 934 vm_object_clear_flag(object, OBJ_WRITEABLE); 935 } 936 937 /* 938 * vm_object_madvise: 939 * 940 * Implements the madvise function at the object/page level. 941 * 942 * MADV_WILLNEED (any object) 943 * 944 * Activate the specified pages if they are resident. 945 * 946 * MADV_DONTNEED (any object) 947 * 948 * Deactivate the specified pages if they are resident. 949 * 950 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 951 * OBJ_ONEMAPPING only) 952 * 953 * Deactivate and clean the specified pages if they are 954 * resident. This permits the process to reuse the pages 955 * without faulting or the kernel to reclaim the pages 956 * without I/O. 957 */ 958 void 959 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 960 { 961 vm_pindex_t end, tpindex; 962 vm_object_t tobject; 963 vm_page_t m; 964 965 if (object == NULL) 966 return; 967 968 end = pindex + count; 969 970 /* 971 * Locate and adjust resident pages 972 */ 973 974 for (; pindex < end; pindex += 1) { 975 relookup: 976 tobject = object; 977 tpindex = pindex; 978 shadowlookup: 979 /* 980 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 981 * and those pages must be OBJ_ONEMAPPING. 982 */ 983 if (advise == MADV_FREE) { 984 if ((tobject->type != OBJT_DEFAULT && 985 tobject->type != OBJT_SWAP) || 986 (tobject->flags & OBJ_ONEMAPPING) == 0) { 987 continue; 988 } 989 } 990 991 /* 992 * spl protection is required to avoid a race between the 993 * lookup, an interrupt unbusy/free, and our busy check. 994 */ 995 996 crit_enter(); 997 m = vm_page_lookup(tobject, tpindex); 998 999 if (m == NULL) { 1000 /* 1001 * There may be swap even if there is no backing page 1002 */ 1003 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1004 swap_pager_freespace(tobject, tpindex, 1); 1005 1006 /* 1007 * next object 1008 */ 1009 crit_exit(); 1010 if (tobject->backing_object == NULL) 1011 continue; 1012 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1013 tobject = tobject->backing_object; 1014 goto shadowlookup; 1015 } 1016 1017 /* 1018 * If the page is busy or not in a normal active state, 1019 * we skip it. If the page is not managed there are no 1020 * page queues to mess with. Things can break if we mess 1021 * with pages in any of the below states. 1022 */ 1023 if ( 1024 m->hold_count || 1025 m->wire_count || 1026 (m->flags & PG_UNMANAGED) || 1027 m->valid != VM_PAGE_BITS_ALL 1028 ) { 1029 crit_exit(); 1030 continue; 1031 } 1032 1033 if (vm_page_sleep_busy(m, TRUE, "madvpo")) { 1034 crit_exit(); 1035 goto relookup; 1036 } 1037 crit_exit(); 1038 1039 /* 1040 * Theoretically once a page is known not to be busy, an 1041 * interrupt cannot come along and rip it out from under us. 1042 */ 1043 1044 if (advise == MADV_WILLNEED) { 1045 vm_page_activate(m); 1046 } else if (advise == MADV_DONTNEED) { 1047 vm_page_dontneed(m); 1048 } else if (advise == MADV_FREE) { 1049 /* 1050 * Mark the page clean. This will allow the page 1051 * to be freed up by the system. However, such pages 1052 * are often reused quickly by malloc()/free() 1053 * so we do not do anything that would cause 1054 * a page fault if we can help it. 1055 * 1056 * Specifically, we do not try to actually free 1057 * the page now nor do we try to put it in the 1058 * cache (which would cause a page fault on reuse). 1059 * 1060 * But we do make the page is freeable as we 1061 * can without actually taking the step of unmapping 1062 * it. 1063 */ 1064 pmap_clear_modify(m); 1065 m->dirty = 0; 1066 m->act_count = 0; 1067 vm_page_dontneed(m); 1068 if (tobject->type == OBJT_SWAP) 1069 swap_pager_freespace(tobject, tpindex, 1); 1070 } 1071 } 1072 } 1073 1074 /* 1075 * vm_object_shadow: 1076 * 1077 * Create a new object which is backed by the 1078 * specified existing object range. The source 1079 * object reference is deallocated. 1080 * 1081 * The new object and offset into that object 1082 * are returned in the source parameters. 1083 */ 1084 1085 void 1086 vm_object_shadow(vm_object_t *object, /* IN/OUT */ 1087 vm_ooffset_t *offset, /* IN/OUT */ 1088 vm_size_t length) 1089 { 1090 vm_object_t source; 1091 vm_object_t result; 1092 1093 source = *object; 1094 1095 /* 1096 * Don't create the new object if the old object isn't shared. 1097 */ 1098 1099 if (source != NULL && 1100 source->ref_count == 1 && 1101 source->handle == NULL && 1102 (source->type == OBJT_DEFAULT || 1103 source->type == OBJT_SWAP)) 1104 return; 1105 1106 /* 1107 * Allocate a new object with the given length 1108 */ 1109 1110 if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL) 1111 panic("vm_object_shadow: no object for shadowing"); 1112 1113 /* 1114 * The new object shadows the source object, adding a reference to it. 1115 * Our caller changes his reference to point to the new object, 1116 * removing a reference to the source object. Net result: no change 1117 * of reference count. 1118 * 1119 * Try to optimize the result object's page color when shadowing 1120 * in order to maintain page coloring consistency in the combined 1121 * shadowed object. 1122 */ 1123 result->backing_object = source; 1124 if (source) { 1125 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1126 source->shadow_count++; 1127 source->generation++; 1128 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK; 1129 } 1130 1131 /* 1132 * Store the offset into the source object, and fix up the offset into 1133 * the new object. 1134 */ 1135 1136 result->backing_object_offset = *offset; 1137 1138 /* 1139 * Return the new things 1140 */ 1141 1142 *offset = 0; 1143 *object = result; 1144 } 1145 1146 #define OBSC_TEST_ALL_SHADOWED 0x0001 1147 #define OBSC_COLLAPSE_NOWAIT 0x0002 1148 #define OBSC_COLLAPSE_WAIT 0x0004 1149 1150 static __inline int 1151 vm_object_backing_scan(vm_object_t object, int op) 1152 { 1153 int r = 1; 1154 vm_page_t p; 1155 vm_object_t backing_object; 1156 vm_pindex_t backing_offset_index; 1157 1158 /* 1159 * spl protection is required to avoid races between the memq/lookup, 1160 * an interrupt doing an unbusy/free, and our busy check. Amoung 1161 * other things. 1162 */ 1163 crit_enter(); 1164 1165 backing_object = object->backing_object; 1166 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1167 1168 /* 1169 * Initial conditions 1170 */ 1171 1172 if (op & OBSC_TEST_ALL_SHADOWED) { 1173 /* 1174 * We do not want to have to test for the existence of 1175 * swap pages in the backing object. XXX but with the 1176 * new swapper this would be pretty easy to do. 1177 * 1178 * XXX what about anonymous MAP_SHARED memory that hasn't 1179 * been ZFOD faulted yet? If we do not test for this, the 1180 * shadow test may succeed! XXX 1181 */ 1182 if (backing_object->type != OBJT_DEFAULT) { 1183 crit_exit(); 1184 return(0); 1185 } 1186 } 1187 if (op & OBSC_COLLAPSE_WAIT) { 1188 KKASSERT((backing_object->flags & OBJ_DEAD) == 0); 1189 vm_object_set_flag(backing_object, OBJ_DEAD); 1190 } 1191 1192 /* 1193 * Our scan 1194 */ 1195 1196 p = TAILQ_FIRST(&backing_object->memq); 1197 while (p) { 1198 vm_page_t next = TAILQ_NEXT(p, listq); 1199 vm_pindex_t new_pindex = p->pindex - backing_offset_index; 1200 1201 if (op & OBSC_TEST_ALL_SHADOWED) { 1202 vm_page_t pp; 1203 1204 /* 1205 * Ignore pages outside the parent object's range 1206 * and outside the parent object's mapping of the 1207 * backing object. 1208 * 1209 * note that we do not busy the backing object's 1210 * page. 1211 */ 1212 1213 if ( 1214 p->pindex < backing_offset_index || 1215 new_pindex >= object->size 1216 ) { 1217 p = next; 1218 continue; 1219 } 1220 1221 /* 1222 * See if the parent has the page or if the parent's 1223 * object pager has the page. If the parent has the 1224 * page but the page is not valid, the parent's 1225 * object pager must have the page. 1226 * 1227 * If this fails, the parent does not completely shadow 1228 * the object and we might as well give up now. 1229 */ 1230 1231 pp = vm_page_lookup(object, new_pindex); 1232 if ( 1233 (pp == NULL || pp->valid == 0) && 1234 !vm_pager_has_page(object, new_pindex, NULL, NULL) 1235 ) { 1236 r = 0; 1237 break; 1238 } 1239 } 1240 1241 /* 1242 * Check for busy page 1243 */ 1244 1245 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 1246 vm_page_t pp; 1247 1248 if (op & OBSC_COLLAPSE_NOWAIT) { 1249 if ( 1250 (p->flags & PG_BUSY) || 1251 !p->valid || 1252 p->hold_count || 1253 p->wire_count || 1254 p->busy 1255 ) { 1256 p = next; 1257 continue; 1258 } 1259 } else if (op & OBSC_COLLAPSE_WAIT) { 1260 if (vm_page_sleep_busy(p, TRUE, "vmocol")) { 1261 /* 1262 * If we slept, anything could have 1263 * happened. Since the object is 1264 * marked dead, the backing offset 1265 * should not have changed so we 1266 * just restart our scan. 1267 */ 1268 p = TAILQ_FIRST(&backing_object->memq); 1269 continue; 1270 } 1271 } 1272 1273 /* 1274 * Busy the page 1275 */ 1276 vm_page_busy(p); 1277 1278 KASSERT( 1279 p->object == backing_object, 1280 ("vm_object_qcollapse(): object mismatch") 1281 ); 1282 1283 /* 1284 * Destroy any associated swap 1285 */ 1286 if (backing_object->type == OBJT_SWAP) { 1287 swap_pager_freespace( 1288 backing_object, 1289 p->pindex, 1290 1 1291 ); 1292 } 1293 1294 if ( 1295 p->pindex < backing_offset_index || 1296 new_pindex >= object->size 1297 ) { 1298 /* 1299 * Page is out of the parent object's range, we 1300 * can simply destroy it. 1301 */ 1302 vm_page_protect(p, VM_PROT_NONE); 1303 vm_page_free(p); 1304 p = next; 1305 continue; 1306 } 1307 1308 pp = vm_page_lookup(object, new_pindex); 1309 if ( 1310 pp != NULL || 1311 vm_pager_has_page(object, new_pindex, NULL, NULL) 1312 ) { 1313 /* 1314 * page already exists in parent OR swap exists 1315 * for this location in the parent. Destroy 1316 * the original page from the backing object. 1317 * 1318 * Leave the parent's page alone 1319 */ 1320 vm_page_protect(p, VM_PROT_NONE); 1321 vm_page_free(p); 1322 p = next; 1323 continue; 1324 } 1325 1326 /* 1327 * Page does not exist in parent, rename the 1328 * page from the backing object to the main object. 1329 * 1330 * If the page was mapped to a process, it can remain 1331 * mapped through the rename. 1332 */ 1333 if ((p->queue - p->pc) == PQ_CACHE) 1334 vm_page_deactivate(p); 1335 1336 vm_page_rename(p, object, new_pindex); 1337 /* page automatically made dirty by rename */ 1338 } 1339 p = next; 1340 } 1341 crit_exit(); 1342 return(r); 1343 } 1344 1345 1346 /* 1347 * this version of collapse allows the operation to occur earlier and 1348 * when paging_in_progress is true for an object... This is not a complete 1349 * operation, but should plug 99.9% of the rest of the leaks. 1350 */ 1351 static void 1352 vm_object_qcollapse(vm_object_t object) 1353 { 1354 vm_object_t backing_object = object->backing_object; 1355 1356 if (backing_object->ref_count != 1) 1357 return; 1358 1359 backing_object->ref_count += 2; 1360 1361 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 1362 1363 backing_object->ref_count -= 2; 1364 } 1365 1366 /* 1367 * vm_object_collapse: 1368 * 1369 * Collapse an object with the object backing it. 1370 * Pages in the backing object are moved into the 1371 * parent, and the backing object is deallocated. 1372 */ 1373 void 1374 vm_object_collapse(vm_object_t object) 1375 { 1376 while (TRUE) { 1377 vm_object_t backing_object; 1378 1379 /* 1380 * Verify that the conditions are right for collapse: 1381 * 1382 * The object exists and the backing object exists. 1383 */ 1384 if (object == NULL) 1385 break; 1386 1387 if ((backing_object = object->backing_object) == NULL) 1388 break; 1389 1390 /* 1391 * we check the backing object first, because it is most likely 1392 * not collapsable. 1393 */ 1394 if (backing_object->handle != NULL || 1395 (backing_object->type != OBJT_DEFAULT && 1396 backing_object->type != OBJT_SWAP) || 1397 (backing_object->flags & OBJ_DEAD) || 1398 object->handle != NULL || 1399 (object->type != OBJT_DEFAULT && 1400 object->type != OBJT_SWAP) || 1401 (object->flags & OBJ_DEAD)) { 1402 break; 1403 } 1404 1405 if ( 1406 object->paging_in_progress != 0 || 1407 backing_object->paging_in_progress != 0 1408 ) { 1409 vm_object_qcollapse(object); 1410 break; 1411 } 1412 1413 /* 1414 * We know that we can either collapse the backing object (if 1415 * the parent is the only reference to it) or (perhaps) have 1416 * the parent bypass the object if the parent happens to shadow 1417 * all the resident pages in the entire backing object. 1418 * 1419 * This is ignoring pager-backed pages such as swap pages. 1420 * vm_object_backing_scan fails the shadowing test in this 1421 * case. 1422 */ 1423 1424 if (backing_object->ref_count == 1) { 1425 /* 1426 * If there is exactly one reference to the backing 1427 * object, we can collapse it into the parent. 1428 */ 1429 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1430 1431 /* 1432 * Move the pager from backing_object to object. 1433 */ 1434 1435 if (backing_object->type == OBJT_SWAP) { 1436 vm_object_pip_add(backing_object, 1); 1437 1438 /* 1439 * scrap the paging_offset junk and do a 1440 * discrete copy. This also removes major 1441 * assumptions about how the swap-pager 1442 * works from where it doesn't belong. The 1443 * new swapper is able to optimize the 1444 * destroy-source case. 1445 */ 1446 1447 vm_object_pip_add(object, 1); 1448 swap_pager_copy( 1449 backing_object, 1450 object, 1451 OFF_TO_IDX(object->backing_object_offset), TRUE); 1452 vm_object_pip_wakeup(object); 1453 1454 vm_object_pip_wakeup(backing_object); 1455 } 1456 /* 1457 * Object now shadows whatever backing_object did. 1458 * Note that the reference to 1459 * backing_object->backing_object moves from within 1460 * backing_object to within object. 1461 */ 1462 1463 LIST_REMOVE(object, shadow_list); 1464 object->backing_object->shadow_count--; 1465 object->backing_object->generation++; 1466 if (backing_object->backing_object) { 1467 LIST_REMOVE(backing_object, shadow_list); 1468 backing_object->backing_object->shadow_count--; 1469 backing_object->backing_object->generation++; 1470 } 1471 object->backing_object = backing_object->backing_object; 1472 if (object->backing_object) { 1473 LIST_INSERT_HEAD( 1474 &object->backing_object->shadow_head, 1475 object, 1476 shadow_list 1477 ); 1478 object->backing_object->shadow_count++; 1479 object->backing_object->generation++; 1480 } 1481 1482 object->backing_object_offset += 1483 backing_object->backing_object_offset; 1484 1485 /* 1486 * Discard backing_object. 1487 * 1488 * Since the backing object has no pages, no pager left, 1489 * and no object references within it, all that is 1490 * necessary is to dispose of it. 1491 */ 1492 1493 KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); 1494 KASSERT(TAILQ_FIRST(&backing_object->memq) == NULL, ("backing_object %p somehow has left over pages during collapse!", backing_object)); 1495 crit_enter(); 1496 TAILQ_REMOVE( 1497 &vm_object_list, 1498 backing_object, 1499 object_list 1500 ); 1501 vm_object_count--; 1502 crit_exit(); 1503 1504 zfree(obj_zone, backing_object); 1505 1506 object_collapses++; 1507 } else { 1508 vm_object_t new_backing_object; 1509 1510 /* 1511 * If we do not entirely shadow the backing object, 1512 * there is nothing we can do so we give up. 1513 */ 1514 1515 if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { 1516 break; 1517 } 1518 1519 /* 1520 * Make the parent shadow the next object in the 1521 * chain. Deallocating backing_object will not remove 1522 * it, since its reference count is at least 2. 1523 */ 1524 1525 LIST_REMOVE(object, shadow_list); 1526 backing_object->shadow_count--; 1527 backing_object->generation++; 1528 1529 new_backing_object = backing_object->backing_object; 1530 if ((object->backing_object = new_backing_object) != NULL) { 1531 vm_object_reference(new_backing_object); 1532 LIST_INSERT_HEAD( 1533 &new_backing_object->shadow_head, 1534 object, 1535 shadow_list 1536 ); 1537 new_backing_object->shadow_count++; 1538 new_backing_object->generation++; 1539 object->backing_object_offset += 1540 backing_object->backing_object_offset; 1541 } 1542 1543 /* 1544 * Drop the reference count on backing_object. Since 1545 * its ref_count was at least 2, it will not vanish; 1546 * so we don't need to call vm_object_deallocate, but 1547 * we do anyway. 1548 */ 1549 vm_object_deallocate(backing_object); 1550 object_bypasses++; 1551 } 1552 1553 /* 1554 * Try again with this object's new backing object. 1555 */ 1556 } 1557 } 1558 1559 /* 1560 * vm_object_page_remove: [internal] 1561 * 1562 * Removes all physical pages in the specified 1563 * object range from the object's list of pages. 1564 */ 1565 void 1566 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1567 boolean_t clean_only) 1568 { 1569 vm_page_t p, next; 1570 unsigned int size; 1571 int all; 1572 1573 if (object == NULL || object->resident_page_count == 0) 1574 return; 1575 1576 all = ((end == 0) && (start == 0)); 1577 1578 /* 1579 * Since physically-backed objects do not use managed pages, we can't 1580 * remove pages from the object (we must instead remove the page 1581 * references, and then destroy the object). 1582 */ 1583 KASSERT(object->type != OBJT_PHYS, 1584 ("attempt to remove pages from a physical object")); 1585 1586 /* 1587 * Indicating that the object is undergoing paging. 1588 * 1589 * spl protection is required to avoid a race between the memq scan, 1590 * an interrupt unbusy/free, and the busy check. 1591 */ 1592 vm_object_pip_add(object, 1); 1593 crit_enter(); 1594 again: 1595 size = end - start; 1596 if (all || size > object->resident_page_count / 4) { 1597 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1598 next = TAILQ_NEXT(p, listq); 1599 if (all || ((start <= p->pindex) && (p->pindex < end))) { 1600 if (p->wire_count != 0) { 1601 vm_page_protect(p, VM_PROT_NONE); 1602 if (!clean_only) 1603 p->valid = 0; 1604 continue; 1605 } 1606 1607 /* 1608 * The busy flags are only cleared at 1609 * interrupt -- minimize the spl transitions 1610 */ 1611 1612 if (vm_page_sleep_busy(p, TRUE, "vmopar")) 1613 goto again; 1614 1615 if (clean_only && p->valid) { 1616 vm_page_test_dirty(p); 1617 if (p->valid & p->dirty) 1618 continue; 1619 } 1620 1621 vm_page_busy(p); 1622 vm_page_protect(p, VM_PROT_NONE); 1623 vm_page_free(p); 1624 } 1625 } 1626 } else { 1627 while (size > 0) { 1628 if ((p = vm_page_lookup(object, start)) != 0) { 1629 if (p->wire_count != 0) { 1630 vm_page_protect(p, VM_PROT_NONE); 1631 if (!clean_only) 1632 p->valid = 0; 1633 start += 1; 1634 size -= 1; 1635 continue; 1636 } 1637 1638 /* 1639 * The busy flags are only cleared at 1640 * interrupt -- minimize the spl transitions 1641 */ 1642 if (vm_page_sleep_busy(p, TRUE, "vmopar")) 1643 goto again; 1644 1645 if (clean_only && p->valid) { 1646 vm_page_test_dirty(p); 1647 if (p->valid & p->dirty) { 1648 start += 1; 1649 size -= 1; 1650 continue; 1651 } 1652 } 1653 1654 vm_page_busy(p); 1655 vm_page_protect(p, VM_PROT_NONE); 1656 vm_page_free(p); 1657 } 1658 start += 1; 1659 size -= 1; 1660 } 1661 } 1662 crit_exit(); 1663 vm_object_pip_wakeup(object); 1664 } 1665 1666 /* 1667 * Routine: vm_object_coalesce 1668 * Function: Coalesces two objects backing up adjoining 1669 * regions of memory into a single object. 1670 * 1671 * returns TRUE if objects were combined. 1672 * 1673 * NOTE: Only works at the moment if the second object is NULL - 1674 * if it's not, which object do we lock first? 1675 * 1676 * Parameters: 1677 * prev_object First object to coalesce 1678 * prev_offset Offset into prev_object 1679 * next_object Second object into coalesce 1680 * next_offset Offset into next_object 1681 * 1682 * prev_size Size of reference to prev_object 1683 * next_size Size of reference to next_object 1684 * 1685 * Conditions: 1686 * The object must *not* be locked. 1687 */ 1688 boolean_t 1689 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, 1690 vm_size_t prev_size, vm_size_t next_size) 1691 { 1692 vm_pindex_t next_pindex; 1693 1694 if (prev_object == NULL) { 1695 return (TRUE); 1696 } 1697 1698 if (prev_object->type != OBJT_DEFAULT && 1699 prev_object->type != OBJT_SWAP) { 1700 return (FALSE); 1701 } 1702 1703 /* 1704 * Try to collapse the object first 1705 */ 1706 vm_object_collapse(prev_object); 1707 1708 /* 1709 * Can't coalesce if: . more than one reference . paged out . shadows 1710 * another object . has a copy elsewhere (any of which mean that the 1711 * pages not mapped to prev_entry may be in use anyway) 1712 */ 1713 1714 if (prev_object->backing_object != NULL) { 1715 return (FALSE); 1716 } 1717 1718 prev_size >>= PAGE_SHIFT; 1719 next_size >>= PAGE_SHIFT; 1720 next_pindex = prev_pindex + prev_size; 1721 1722 if ((prev_object->ref_count > 1) && 1723 (prev_object->size != next_pindex)) { 1724 return (FALSE); 1725 } 1726 1727 /* 1728 * Remove any pages that may still be in the object from a previous 1729 * deallocation. 1730 */ 1731 if (next_pindex < prev_object->size) { 1732 vm_object_page_remove(prev_object, 1733 next_pindex, 1734 next_pindex + next_size, FALSE); 1735 if (prev_object->type == OBJT_SWAP) 1736 swap_pager_freespace(prev_object, 1737 next_pindex, next_size); 1738 } 1739 1740 /* 1741 * Extend the object if necessary. 1742 */ 1743 if (next_pindex + next_size > prev_object->size) 1744 prev_object->size = next_pindex + next_size; 1745 1746 return (TRUE); 1747 } 1748 1749 void 1750 vm_object_set_writeable_dirty(vm_object_t object) 1751 { 1752 struct vnode *vp; 1753 1754 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 1755 if (object->type == OBJT_VNODE && 1756 (vp = (struct vnode *)object->handle) != NULL) { 1757 if ((vp->v_flag & VOBJDIRTY) == 0) { 1758 vsetflags(vp, VOBJDIRTY); 1759 } 1760 } 1761 } 1762 1763 1764 1765 #include "opt_ddb.h" 1766 #ifdef DDB 1767 #include <sys/kernel.h> 1768 1769 #include <sys/cons.h> 1770 1771 #include <ddb/ddb.h> 1772 1773 static int _vm_object_in_map (vm_map_t map, vm_object_t object, 1774 vm_map_entry_t entry); 1775 static int vm_object_in_map (vm_object_t object); 1776 1777 static int 1778 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 1779 { 1780 vm_map_t tmpm; 1781 vm_map_entry_t tmpe; 1782 vm_object_t obj; 1783 int entcount; 1784 1785 if (map == 0) 1786 return 0; 1787 1788 if (entry == 0) { 1789 tmpe = map->header.next; 1790 entcount = map->nentries; 1791 while (entcount-- && (tmpe != &map->header)) { 1792 if( _vm_object_in_map(map, object, tmpe)) { 1793 return 1; 1794 } 1795 tmpe = tmpe->next; 1796 } 1797 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 1798 tmpm = entry->object.sub_map; 1799 tmpe = tmpm->header.next; 1800 entcount = tmpm->nentries; 1801 while (entcount-- && tmpe != &tmpm->header) { 1802 if( _vm_object_in_map(tmpm, object, tmpe)) { 1803 return 1; 1804 } 1805 tmpe = tmpe->next; 1806 } 1807 } else if ((obj = entry->object.vm_object) != NULL) { 1808 for(; obj; obj=obj->backing_object) 1809 if( obj == object) { 1810 return 1; 1811 } 1812 } 1813 return 0; 1814 } 1815 1816 static int 1817 vm_object_in_map(vm_object_t object) 1818 { 1819 struct proc *p; 1820 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1821 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1822 continue; 1823 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) 1824 return 1; 1825 } 1826 if( _vm_object_in_map( kernel_map, object, 0)) 1827 return 1; 1828 if( _vm_object_in_map( pager_map, object, 0)) 1829 return 1; 1830 if( _vm_object_in_map( buffer_map, object, 0)) 1831 return 1; 1832 return 0; 1833 } 1834 1835 DB_SHOW_COMMAND(vmochk, vm_object_check) 1836 { 1837 vm_object_t object; 1838 1839 /* 1840 * make sure that internal objs are in a map somewhere 1841 * and none have zero ref counts. 1842 */ 1843 for (object = TAILQ_FIRST(&vm_object_list); 1844 object != NULL; 1845 object = TAILQ_NEXT(object, object_list)) { 1846 if (object->handle == NULL && 1847 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1848 if (object->ref_count == 0) { 1849 db_printf("vmochk: internal obj has zero ref count: %ld\n", 1850 (long)object->size); 1851 } 1852 if (!vm_object_in_map(object)) { 1853 db_printf( 1854 "vmochk: internal obj is not in a map: " 1855 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 1856 object->ref_count, (u_long)object->size, 1857 (u_long)object->size, 1858 (void *)object->backing_object); 1859 } 1860 } 1861 } 1862 } 1863 1864 /* 1865 * vm_object_print: [ debug ] 1866 */ 1867 DB_SHOW_COMMAND(object, vm_object_print_static) 1868 { 1869 /* XXX convert args. */ 1870 vm_object_t object = (vm_object_t)addr; 1871 boolean_t full = have_addr; 1872 1873 vm_page_t p; 1874 1875 /* XXX count is an (unused) arg. Avoid shadowing it. */ 1876 #define count was_count 1877 1878 int count; 1879 1880 if (object == NULL) 1881 return; 1882 1883 db_iprintf( 1884 "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n", 1885 object, (int)object->type, (u_long)object->size, 1886 object->resident_page_count, object->ref_count, object->flags); 1887 /* 1888 * XXX no %qd in kernel. Truncate object->backing_object_offset. 1889 */ 1890 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n", 1891 object->shadow_count, 1892 object->backing_object ? object->backing_object->ref_count : 0, 1893 object->backing_object, (long)object->backing_object_offset); 1894 1895 if (!full) 1896 return; 1897 1898 db_indent += 2; 1899 count = 0; 1900 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) { 1901 if (count == 0) 1902 db_iprintf("memory:="); 1903 else if (count == 6) { 1904 db_printf("\n"); 1905 db_iprintf(" ..."); 1906 count = 0; 1907 } else 1908 db_printf(","); 1909 count++; 1910 1911 db_printf("(off=0x%lx,page=0x%lx)", 1912 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1913 } 1914 if (count != 0) 1915 db_printf("\n"); 1916 db_indent -= 2; 1917 } 1918 1919 /* XXX. */ 1920 #undef count 1921 1922 /* XXX need this non-static entry for calling from vm_map_print. */ 1923 void 1924 vm_object_print(/* db_expr_t */ long addr, 1925 boolean_t have_addr, 1926 /* db_expr_t */ long count, 1927 char *modif) 1928 { 1929 vm_object_print_static(addr, have_addr, count, modif); 1930 } 1931 1932 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 1933 { 1934 vm_object_t object; 1935 int nl = 0; 1936 int c; 1937 for (object = TAILQ_FIRST(&vm_object_list); 1938 object != NULL; 1939 object = TAILQ_NEXT(object, object_list)) { 1940 vm_pindex_t idx, fidx; 1941 vm_pindex_t osize; 1942 vm_paddr_t pa = -1, padiff; 1943 int rcount; 1944 vm_page_t m; 1945 1946 db_printf("new object: %p\n", (void *)object); 1947 if ( nl > 18) { 1948 c = cngetc(); 1949 if (c != ' ') 1950 return; 1951 nl = 0; 1952 } 1953 nl++; 1954 rcount = 0; 1955 fidx = 0; 1956 osize = object->size; 1957 if (osize > 128) 1958 osize = 128; 1959 for (idx = 0; idx < osize; idx++) { 1960 m = vm_page_lookup(object, idx); 1961 if (m == NULL) { 1962 if (rcount) { 1963 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 1964 (long)fidx, rcount, (long)pa); 1965 if ( nl > 18) { 1966 c = cngetc(); 1967 if (c != ' ') 1968 return; 1969 nl = 0; 1970 } 1971 nl++; 1972 rcount = 0; 1973 } 1974 continue; 1975 } 1976 1977 1978 if (rcount && 1979 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 1980 ++rcount; 1981 continue; 1982 } 1983 if (rcount) { 1984 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 1985 padiff >>= PAGE_SHIFT; 1986 padiff &= PQ_L2_MASK; 1987 if (padiff == 0) { 1988 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 1989 ++rcount; 1990 continue; 1991 } 1992 db_printf(" index(%ld)run(%d)pa(0x%lx)", 1993 (long)fidx, rcount, (long)pa); 1994 db_printf("pd(%ld)\n", (long)padiff); 1995 if ( nl > 18) { 1996 c = cngetc(); 1997 if (c != ' ') 1998 return; 1999 nl = 0; 2000 } 2001 nl++; 2002 } 2003 fidx = idx; 2004 pa = VM_PAGE_TO_PHYS(m); 2005 rcount = 1; 2006 } 2007 if (rcount) { 2008 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2009 (long)fidx, rcount, (long)pa); 2010 if ( nl > 18) { 2011 c = cngetc(); 2012 if (c != ' ') 2013 return; 2014 nl = 0; 2015 } 2016 nl++; 2017 } 2018 } 2019 } 2020 #endif /* DDB */ 2021