1 /* 2 * Copyright (c) 1991, 1993, 2013 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 * 60 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $ 61 */ 62 63 /* 64 * Virtual memory object module. 65 */ 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/proc.h> /* for curproc, pageproc */ 70 #include <sys/thread.h> 71 #include <sys/vnode.h> 72 #include <sys/vmmeter.h> 73 #include <sys/mman.h> 74 #include <sys/mount.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/refcount.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/pmap.h> 82 #include <vm/vm_map.h> 83 #include <vm/vm_object.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/vm_pager.h> 87 #include <vm/swap_pager.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_zone.h> 91 92 #include <vm/vm_page2.h> 93 94 #include <machine/specialreg.h> 95 96 #define EASY_SCAN_FACTOR 8 97 98 static void vm_object_qcollapse(vm_object_t object, 99 vm_object_t backing_object); 100 static void vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 101 int pagerflags); 102 static void vm_object_lock_init(vm_object_t); 103 104 105 /* 106 * Virtual memory objects maintain the actual data 107 * associated with allocated virtual memory. A given 108 * page of memory exists within exactly one object. 109 * 110 * An object is only deallocated when all "references" 111 * are given up. Only one "reference" to a given 112 * region of an object should be writeable. 113 * 114 * Associated with each object is a list of all resident 115 * memory pages belonging to that object; this list is 116 * maintained by the "vm_page" module, and locked by the object's 117 * lock. 118 * 119 * Each object also records a "pager" routine which is 120 * used to retrieve (and store) pages to the proper backing 121 * storage. In addition, objects may be backed by other 122 * objects from which they were virtual-copied. 123 * 124 * The only items within the object structure which are 125 * modified after time of creation are: 126 * reference count locked by object's lock 127 * pager routine locked by object's lock 128 * 129 */ 130 131 struct vm_object kernel_object; 132 133 static long vm_object_count; 134 135 static long object_collapses; 136 static long object_bypasses; 137 static vm_zone_t obj_zone; 138 static struct vm_zone obj_zone_store; 139 #define VM_OBJECTS_INIT 256 140 static struct vm_object vm_objects_init[VM_OBJECTS_INIT]; 141 142 struct object_q vm_object_lists[VMOBJ_HSIZE]; 143 struct lwkt_token vmobj_tokens[VMOBJ_HSIZE]; 144 145 #if defined(DEBUG_LOCKS) 146 147 #define vm_object_vndeallocate(obj, vpp) \ 148 debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__) 149 150 /* 151 * Debug helper to track hold/drop/ref/deallocate calls. 152 */ 153 static void 154 debugvm_object_add(vm_object_t obj, char *file, int line, int addrem) 155 { 156 int i; 157 158 i = atomic_fetchadd_int(&obj->debug_index, 1); 159 i = i & (VMOBJ_DEBUG_ARRAY_SIZE - 1); 160 ksnprintf(obj->debug_hold_thrs[i], 161 sizeof(obj->debug_hold_thrs[i]), 162 "%c%d:(%d):%s", 163 (addrem == -1 ? '-' : (addrem == 1 ? '+' : '=')), 164 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 165 obj->ref_count, 166 curthread->td_comm); 167 obj->debug_hold_file[i] = file; 168 obj->debug_hold_line[i] = line; 169 #if 0 170 /* Uncomment for debugging obj refs/derefs in reproducable cases */ 171 if (strcmp(curthread->td_comm, "sshd") == 0) { 172 kprintf("%d %p refs=%d ar=%d file: %s/%d\n", 173 (curthread->td_proc ? curthread->td_proc->p_pid : -1), 174 obj, obj->ref_count, addrem, file, line); 175 } 176 #endif 177 } 178 179 #endif 180 181 /* 182 * Misc low level routines 183 */ 184 static void 185 vm_object_lock_init(vm_object_t obj) 186 { 187 #if defined(DEBUG_LOCKS) 188 int i; 189 190 obj->debug_index = 0; 191 for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) { 192 obj->debug_hold_thrs[i][0] = 0; 193 obj->debug_hold_file[i] = NULL; 194 obj->debug_hold_line[i] = 0; 195 } 196 #endif 197 } 198 199 void 200 vm_object_lock_swap(void) 201 { 202 lwkt_token_swap(); 203 } 204 205 void 206 vm_object_lock(vm_object_t obj) 207 { 208 lwkt_gettoken(&obj->token); 209 } 210 211 /* 212 * Returns TRUE on sucesss 213 */ 214 static int 215 vm_object_lock_try(vm_object_t obj) 216 { 217 return(lwkt_trytoken(&obj->token)); 218 } 219 220 void 221 vm_object_lock_shared(vm_object_t obj) 222 { 223 lwkt_gettoken_shared(&obj->token); 224 } 225 226 void 227 vm_object_unlock(vm_object_t obj) 228 { 229 lwkt_reltoken(&obj->token); 230 } 231 232 void 233 vm_object_upgrade(vm_object_t obj) 234 { 235 lwkt_reltoken(&obj->token); 236 lwkt_gettoken(&obj->token); 237 } 238 239 void 240 vm_object_downgrade(vm_object_t obj) 241 { 242 lwkt_reltoken(&obj->token); 243 lwkt_gettoken_shared(&obj->token); 244 } 245 246 static __inline void 247 vm_object_assert_held(vm_object_t obj) 248 { 249 ASSERT_LWKT_TOKEN_HELD(&obj->token); 250 } 251 252 static __inline int 253 vm_quickcolor(void) 254 { 255 globaldata_t gd = mycpu; 256 int pg_color; 257 258 pg_color = (int)(intptr_t)gd->gd_curthread >> 10; 259 pg_color += gd->gd_quick_color; 260 gd->gd_quick_color += PQ_PRIME2; 261 262 return pg_color; 263 } 264 265 void 266 VMOBJDEBUG(vm_object_hold)(vm_object_t obj VMOBJDBARGS) 267 { 268 KKASSERT(obj != NULL); 269 270 /* 271 * Object must be held (object allocation is stable due to callers 272 * context, typically already holding the token on a parent object) 273 * prior to potentially blocking on the lock, otherwise the object 274 * can get ripped away from us. 275 */ 276 refcount_acquire(&obj->hold_count); 277 vm_object_lock(obj); 278 279 #if defined(DEBUG_LOCKS) 280 debugvm_object_add(obj, file, line, 1); 281 #endif 282 } 283 284 int 285 VMOBJDEBUG(vm_object_hold_try)(vm_object_t obj VMOBJDBARGS) 286 { 287 KKASSERT(obj != NULL); 288 289 /* 290 * Object must be held (object allocation is stable due to callers 291 * context, typically already holding the token on a parent object) 292 * prior to potentially blocking on the lock, otherwise the object 293 * can get ripped away from us. 294 */ 295 refcount_acquire(&obj->hold_count); 296 if (vm_object_lock_try(obj) == 0) { 297 if (refcount_release(&obj->hold_count)) { 298 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) 299 zfree(obj_zone, obj); 300 } 301 return(0); 302 } 303 304 #if defined(DEBUG_LOCKS) 305 debugvm_object_add(obj, file, line, 1); 306 #endif 307 return(1); 308 } 309 310 void 311 VMOBJDEBUG(vm_object_hold_shared)(vm_object_t obj VMOBJDBARGS) 312 { 313 KKASSERT(obj != NULL); 314 315 /* 316 * Object must be held (object allocation is stable due to callers 317 * context, typically already holding the token on a parent object) 318 * prior to potentially blocking on the lock, otherwise the object 319 * can get ripped away from us. 320 */ 321 refcount_acquire(&obj->hold_count); 322 vm_object_lock_shared(obj); 323 324 #if defined(DEBUG_LOCKS) 325 debugvm_object_add(obj, file, line, 1); 326 #endif 327 } 328 329 /* 330 * Drop the token and hold_count on the object. 331 * 332 * WARNING! Token might be shared. 333 */ 334 void 335 VMOBJDEBUG(vm_object_drop)(vm_object_t obj VMOBJDBARGS) 336 { 337 if (obj == NULL) 338 return; 339 340 /* 341 * No new holders should be possible once we drop hold_count 1->0 as 342 * there is no longer any way to reference the object. 343 */ 344 KKASSERT(obj->hold_count > 0); 345 if (refcount_release(&obj->hold_count)) { 346 #if defined(DEBUG_LOCKS) 347 debugvm_object_add(obj, file, line, -1); 348 #endif 349 350 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) { 351 vm_object_unlock(obj); 352 zfree(obj_zone, obj); 353 } else { 354 vm_object_unlock(obj); 355 } 356 } else { 357 #if defined(DEBUG_LOCKS) 358 debugvm_object_add(obj, file, line, -1); 359 #endif 360 vm_object_unlock(obj); 361 } 362 } 363 364 /* 365 * Initialize a freshly allocated object, returning a held object. 366 * 367 * Used only by vm_object_allocate() and zinitna(). 368 * 369 * No requirements. 370 */ 371 void 372 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 373 { 374 int n; 375 376 RB_INIT(&object->rb_memq); 377 LIST_INIT(&object->shadow_head); 378 lwkt_token_init(&object->token, "vmobj"); 379 380 object->type = type; 381 object->size = size; 382 object->ref_count = 1; 383 object->memattr = VM_MEMATTR_DEFAULT; 384 object->hold_count = 0; 385 object->flags = 0; 386 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 387 vm_object_set_flag(object, OBJ_ONEMAPPING); 388 object->paging_in_progress = 0; 389 object->resident_page_count = 0; 390 object->agg_pv_list_count = 0; 391 object->shadow_count = 0; 392 /* cpu localization twist */ 393 object->pg_color = vm_quickcolor(); 394 object->handle = NULL; 395 object->backing_object = NULL; 396 object->backing_object_offset = (vm_ooffset_t)0; 397 398 object->generation++; 399 object->swblock_count = 0; 400 RB_INIT(&object->swblock_root); 401 vm_object_lock_init(object); 402 pmap_object_init(object); 403 404 vm_object_hold(object); 405 406 n = VMOBJ_HASH(object); 407 atomic_add_long(&vm_object_count, 1); 408 lwkt_gettoken(&vmobj_tokens[n]); 409 TAILQ_INSERT_TAIL(&vm_object_lists[n], object, object_list); 410 lwkt_reltoken(&vmobj_tokens[n]); 411 } 412 413 /* 414 * Initialize the VM objects module. 415 * 416 * Called from the low level boot code only. 417 */ 418 void 419 vm_object_init(void) 420 { 421 int i; 422 423 for (i = 0; i < VMOBJ_HSIZE; ++i) { 424 TAILQ_INIT(&vm_object_lists[i]); 425 lwkt_token_init(&vmobj_tokens[i], "vmobjlst"); 426 } 427 428 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd), 429 &kernel_object); 430 vm_object_drop(&kernel_object); 431 432 obj_zone = &obj_zone_store; 433 zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object), 434 vm_objects_init, VM_OBJECTS_INIT); 435 } 436 437 void 438 vm_object_init2(void) 439 { 440 zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1); 441 } 442 443 /* 444 * Allocate and return a new object of the specified type and size. 445 * 446 * No requirements. 447 */ 448 vm_object_t 449 vm_object_allocate(objtype_t type, vm_pindex_t size) 450 { 451 vm_object_t result; 452 453 result = (vm_object_t) zalloc(obj_zone); 454 455 _vm_object_allocate(type, size, result); 456 vm_object_drop(result); 457 458 return (result); 459 } 460 461 /* 462 * This version returns a held object, allowing further atomic initialization 463 * of the object. 464 */ 465 vm_object_t 466 vm_object_allocate_hold(objtype_t type, vm_pindex_t size) 467 { 468 vm_object_t result; 469 470 result = (vm_object_t) zalloc(obj_zone); 471 472 _vm_object_allocate(type, size, result); 473 474 return (result); 475 } 476 477 /* 478 * Add an additional reference to a vm_object. The object must already be 479 * held. The original non-lock version is no longer supported. The object 480 * must NOT be chain locked by anyone at the time the reference is added. 481 * 482 * Referencing a chain-locked object can blow up the fairly sensitive 483 * ref_count and shadow_count tests in the deallocator. Most callers 484 * will call vm_object_chain_wait() prior to calling 485 * vm_object_reference_locked() to avoid the case. 486 * 487 * The object must be held, but may be held shared if desired (hence why 488 * we use an atomic op). 489 */ 490 void 491 VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS) 492 { 493 KKASSERT(object != NULL); 494 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 495 KKASSERT((object->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) == 0); 496 atomic_add_int(&object->ref_count, 1); 497 if (object->type == OBJT_VNODE) { 498 vref(object->handle); 499 /* XXX what if the vnode is being destroyed? */ 500 } 501 #if defined(DEBUG_LOCKS) 502 debugvm_object_add(object, file, line, 1); 503 #endif 504 } 505 506 /* 507 * This version is only allowed for vnode objects. 508 */ 509 void 510 VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS) 511 { 512 KKASSERT(object->type == OBJT_VNODE); 513 atomic_add_int(&object->ref_count, 1); 514 vref(object->handle); 515 #if defined(DEBUG_LOCKS) 516 debugvm_object_add(object, file, line, 1); 517 #endif 518 } 519 520 /* 521 * Object OBJ_CHAINLOCK lock handling. 522 * 523 * The caller can chain-lock backing objects recursively and then 524 * use vm_object_chain_release_all() to undo the whole chain. 525 * 526 * Chain locks are used to prevent collapses and are only applicable 527 * to OBJT_DEFAULT and OBJT_SWAP objects. Chain locking operations 528 * on other object types are ignored. This is also important because 529 * it allows e.g. the vnode underlying a memory mapping to take concurrent 530 * faults. 531 * 532 * The object must usually be held on entry, though intermediate 533 * objects need not be held on release. The object must be held exclusively, 534 * NOT shared. Note that the prefault path checks the shared state and 535 * avoids using the chain functions. 536 */ 537 void 538 vm_object_chain_wait(vm_object_t object, int shared) 539 { 540 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 541 for (;;) { 542 uint32_t chainlk = object->chainlk; 543 544 cpu_ccfence(); 545 if (shared) { 546 if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) { 547 tsleep_interlock(object, 0); 548 if (atomic_cmpset_int(&object->chainlk, 549 chainlk, 550 chainlk | CHAINLK_WAIT)) { 551 tsleep(object, PINTERLOCKED, 552 "objchns", 0); 553 } 554 /* retry */ 555 } else { 556 break; 557 } 558 /* retry */ 559 } else { 560 if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) { 561 tsleep_interlock(object, 0); 562 if (atomic_cmpset_int(&object->chainlk, 563 chainlk, 564 chainlk | CHAINLK_WAIT)) 565 { 566 tsleep(object, PINTERLOCKED, 567 "objchnx", 0); 568 } 569 /* retry */ 570 } else { 571 if (atomic_cmpset_int(&object->chainlk, 572 chainlk, 573 chainlk & ~CHAINLK_WAIT)) 574 { 575 if (chainlk & CHAINLK_WAIT) 576 wakeup(object); 577 break; 578 } 579 /* retry */ 580 } 581 } 582 /* retry */ 583 } 584 } 585 586 void 587 vm_object_chain_acquire(vm_object_t object, int shared) 588 { 589 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) 590 return; 591 if (vm_shared_fault == 0) 592 shared = 0; 593 594 for (;;) { 595 uint32_t chainlk = object->chainlk; 596 597 cpu_ccfence(); 598 if (shared) { 599 if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) { 600 tsleep_interlock(object, 0); 601 if (atomic_cmpset_int(&object->chainlk, 602 chainlk, 603 chainlk | CHAINLK_WAIT)) { 604 tsleep(object, PINTERLOCKED, 605 "objchns", 0); 606 } 607 /* retry */ 608 } else if (atomic_cmpset_int(&object->chainlk, 609 chainlk, chainlk + 1)) { 610 break; 611 } 612 /* retry */ 613 } else { 614 if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) { 615 tsleep_interlock(object, 0); 616 if (atomic_cmpset_int(&object->chainlk, 617 chainlk, 618 chainlk | 619 CHAINLK_WAIT | 620 CHAINLK_EXCLREQ)) { 621 tsleep(object, PINTERLOCKED, 622 "objchnx", 0); 623 } 624 /* retry */ 625 } else { 626 if (atomic_cmpset_int(&object->chainlk, 627 chainlk, 628 (chainlk | CHAINLK_EXCL) & 629 ~(CHAINLK_EXCLREQ | 630 CHAINLK_WAIT))) { 631 if (chainlk & CHAINLK_WAIT) 632 wakeup(object); 633 break; 634 } 635 /* retry */ 636 } 637 } 638 /* retry */ 639 } 640 } 641 642 void 643 vm_object_chain_release(vm_object_t object) 644 { 645 /*ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));*/ 646 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) 647 return; 648 KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL)); 649 for (;;) { 650 uint32_t chainlk = object->chainlk; 651 652 cpu_ccfence(); 653 if (chainlk & CHAINLK_MASK) { 654 if ((chainlk & CHAINLK_MASK) == 1 && 655 atomic_cmpset_int(&object->chainlk, 656 chainlk, 657 (chainlk - 1) & ~CHAINLK_WAIT)) { 658 if (chainlk & CHAINLK_WAIT) 659 wakeup(object); 660 break; 661 } 662 if ((chainlk & CHAINLK_MASK) > 1 && 663 atomic_cmpset_int(&object->chainlk, 664 chainlk, chainlk - 1)) { 665 break; 666 } 667 /* retry */ 668 } else { 669 KKASSERT(chainlk & CHAINLK_EXCL); 670 if (atomic_cmpset_int(&object->chainlk, 671 chainlk, 672 chainlk & ~(CHAINLK_EXCL | 673 CHAINLK_WAIT))) { 674 if (chainlk & CHAINLK_WAIT) 675 wakeup(object); 676 break; 677 } 678 } 679 } 680 } 681 682 /* 683 * Release the chain from first_object through and including stopobj. 684 * The caller is typically holding the first and last object locked 685 * (shared or exclusive) to prevent destruction races. 686 * 687 * We release stopobj first as an optimization as this object is most 688 * likely to be shared across multiple processes. 689 */ 690 void 691 vm_object_chain_release_all(vm_object_t first_object, vm_object_t stopobj) 692 { 693 vm_object_t backing_object; 694 vm_object_t object; 695 696 vm_object_chain_release(stopobj); 697 object = first_object; 698 699 while (object != stopobj) { 700 KKASSERT(object); 701 backing_object = object->backing_object; 702 vm_object_chain_release(object); 703 object = backing_object; 704 } 705 } 706 707 /* 708 * Dereference an object and its underlying vnode. The object may be 709 * held shared. On return the object will remain held. 710 * 711 * This function may return a vnode in *vpp which the caller must release 712 * after the caller drops its own lock. If vpp is NULL, we assume that 713 * the caller was holding an exclusive lock on the object and we vrele() 714 * the vp ourselves. 715 */ 716 static void 717 VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp 718 VMOBJDBARGS) 719 { 720 struct vnode *vp = (struct vnode *) object->handle; 721 722 KASSERT(object->type == OBJT_VNODE, 723 ("vm_object_vndeallocate: not a vnode object")); 724 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 725 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 726 #ifdef INVARIANTS 727 if (object->ref_count == 0) { 728 vprint("vm_object_vndeallocate", vp); 729 panic("vm_object_vndeallocate: bad object reference count"); 730 } 731 #endif 732 for (;;) { 733 int count = object->ref_count; 734 cpu_ccfence(); 735 if (count == 1) { 736 vm_object_upgrade(object); 737 if (atomic_cmpset_int(&object->ref_count, count, 0)) { 738 vclrflags(vp, VTEXT); 739 break; 740 } 741 } else { 742 if (atomic_cmpset_int(&object->ref_count, 743 count, count - 1)) { 744 break; 745 } 746 } 747 /* retry */ 748 } 749 #if defined(DEBUG_LOCKS) 750 debugvm_object_add(object, file, line, -1); 751 #endif 752 753 /* 754 * vrele or return the vp to vrele. We can only safely vrele(vp) 755 * if the object was locked exclusively. But there are two races 756 * here. 757 * 758 * We had to upgrade the object above to safely clear VTEXT 759 * but the alternative path where the shared lock is retained 760 * can STILL race to 0 in other paths and cause our own vrele() 761 * to terminate the vnode. We can't allow that if the VM object 762 * is still locked shared. 763 */ 764 if (vpp) 765 *vpp = vp; 766 else 767 vrele(vp); 768 } 769 770 /* 771 * Release a reference to the specified object, gained either through a 772 * vm_object_allocate or a vm_object_reference call. When all references 773 * are gone, storage associated with this object may be relinquished. 774 * 775 * The caller does not have to hold the object locked but must have control 776 * over the reference in question in order to guarantee that the object 777 * does not get ripped out from under us. 778 * 779 * XXX Currently all deallocations require an exclusive lock. 780 */ 781 void 782 VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS) 783 { 784 struct vnode *vp; 785 int count; 786 787 if (object == NULL) 788 return; 789 790 for (;;) { 791 count = object->ref_count; 792 cpu_ccfence(); 793 794 /* 795 * If decrementing the count enters into special handling 796 * territory (0, 1, or 2) we have to do it the hard way. 797 * Fortunate though, objects with only a few refs like this 798 * are not likely to be heavily contended anyway. 799 * 800 * For vnode objects we only care about 1->0 transitions. 801 */ 802 if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) { 803 #if defined(DEBUG_LOCKS) 804 debugvm_object_add(object, file, line, 0); 805 #endif 806 vm_object_hold(object); 807 vm_object_deallocate_locked(object); 808 vm_object_drop(object); 809 break; 810 } 811 812 /* 813 * Try to decrement ref_count without acquiring a hold on 814 * the object. This is particularly important for the exec*() 815 * and exit*() code paths because the program binary may 816 * have a great deal of sharing and an exclusive lock will 817 * crowbar performance in those circumstances. 818 */ 819 if (object->type == OBJT_VNODE) { 820 vp = (struct vnode *)object->handle; 821 if (atomic_cmpset_int(&object->ref_count, 822 count, count - 1)) { 823 #if defined(DEBUG_LOCKS) 824 debugvm_object_add(object, file, line, -1); 825 #endif 826 827 vrele(vp); 828 break; 829 } 830 /* retry */ 831 } else { 832 if (atomic_cmpset_int(&object->ref_count, 833 count, count - 1)) { 834 #if defined(DEBUG_LOCKS) 835 debugvm_object_add(object, file, line, -1); 836 #endif 837 break; 838 } 839 /* retry */ 840 } 841 /* retry */ 842 } 843 } 844 845 void 846 VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS) 847 { 848 struct vm_object_dealloc_list *dlist = NULL; 849 struct vm_object_dealloc_list *dtmp; 850 vm_object_t temp; 851 int must_drop = 0; 852 853 /* 854 * We may chain deallocate object, but additional objects may 855 * collect on the dlist which also have to be deallocated. We 856 * must avoid a recursion, vm_object chains can get deep. 857 */ 858 859 again: 860 while (object != NULL) { 861 /* 862 * vnode case, caller either locked the object exclusively 863 * or this is a recursion with must_drop != 0 and the vnode 864 * object will be locked shared. 865 * 866 * If locked shared we have to drop the object before we can 867 * call vrele() or risk a shared/exclusive livelock. 868 */ 869 if (object->type == OBJT_VNODE) { 870 ASSERT_LWKT_TOKEN_HELD(&object->token); 871 if (must_drop) { 872 struct vnode *tmp_vp; 873 874 vm_object_vndeallocate(object, &tmp_vp); 875 vm_object_drop(object); 876 must_drop = 0; 877 object = NULL; 878 vrele(tmp_vp); 879 } else { 880 vm_object_vndeallocate(object, NULL); 881 } 882 break; 883 } 884 ASSERT_LWKT_TOKEN_HELD_EXCL(&object->token); 885 886 /* 887 * Normal case (object is locked exclusively) 888 */ 889 if (object->ref_count == 0) { 890 panic("vm_object_deallocate: object deallocated " 891 "too many times: %d", object->type); 892 } 893 if (object->ref_count > 2) { 894 atomic_add_int(&object->ref_count, -1); 895 #if defined(DEBUG_LOCKS) 896 debugvm_object_add(object, file, line, -1); 897 #endif 898 break; 899 } 900 901 /* 902 * Here on ref_count of one or two, which are special cases for 903 * objects. 904 * 905 * Nominal ref_count > 1 case if the second ref is not from 906 * a shadow. 907 * 908 * (ONEMAPPING only applies to DEFAULT AND SWAP objects) 909 */ 910 if (object->ref_count == 2 && object->shadow_count == 0) { 911 if (object->type == OBJT_DEFAULT || 912 object->type == OBJT_SWAP) { 913 vm_object_set_flag(object, OBJ_ONEMAPPING); 914 } 915 atomic_add_int(&object->ref_count, -1); 916 #if defined(DEBUG_LOCKS) 917 debugvm_object_add(object, file, line, -1); 918 #endif 919 break; 920 } 921 922 /* 923 * If the second ref is from a shadow we chain along it 924 * upwards if object's handle is exhausted. 925 * 926 * We have to decrement object->ref_count before potentially 927 * collapsing the first shadow object or the collapse code 928 * will not be able to handle the degenerate case to remove 929 * object. However, if we do it too early the object can 930 * get ripped out from under us. 931 */ 932 if (object->ref_count == 2 && object->shadow_count == 1 && 933 object->handle == NULL && (object->type == OBJT_DEFAULT || 934 object->type == OBJT_SWAP)) { 935 temp = LIST_FIRST(&object->shadow_head); 936 KKASSERT(temp != NULL); 937 vm_object_hold(temp); 938 939 /* 940 * Wait for any paging to complete so the collapse 941 * doesn't (or isn't likely to) qcollapse. pip 942 * waiting must occur before we acquire the 943 * chainlock. 944 */ 945 while ( 946 temp->paging_in_progress || 947 object->paging_in_progress 948 ) { 949 vm_object_pip_wait(temp, "objde1"); 950 vm_object_pip_wait(object, "objde2"); 951 } 952 953 /* 954 * If the parent is locked we have to give up, as 955 * otherwise we would be acquiring locks in the 956 * wrong order and potentially deadlock. 957 */ 958 if (temp->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) { 959 vm_object_drop(temp); 960 goto skip; 961 } 962 vm_object_chain_acquire(temp, 0); 963 964 /* 965 * Recheck/retry after the hold and the paging 966 * wait, both of which can block us. 967 */ 968 if (object->ref_count != 2 || 969 object->shadow_count != 1 || 970 object->handle || 971 LIST_FIRST(&object->shadow_head) != temp || 972 (object->type != OBJT_DEFAULT && 973 object->type != OBJT_SWAP)) { 974 vm_object_chain_release(temp); 975 vm_object_drop(temp); 976 continue; 977 } 978 979 /* 980 * We can safely drop object's ref_count now. 981 */ 982 KKASSERT(object->ref_count == 2); 983 atomic_add_int(&object->ref_count, -1); 984 #if defined(DEBUG_LOCKS) 985 debugvm_object_add(object, file, line, -1); 986 #endif 987 988 /* 989 * If our single parent is not collapseable just 990 * decrement ref_count (2->1) and stop. 991 */ 992 if (temp->handle || (temp->type != OBJT_DEFAULT && 993 temp->type != OBJT_SWAP)) { 994 vm_object_chain_release(temp); 995 vm_object_drop(temp); 996 break; 997 } 998 999 /* 1000 * At this point we have already dropped object's 1001 * ref_count so it is possible for a race to 1002 * deallocate obj out from under us. Any collapse 1003 * will re-check the situation. We must not block 1004 * until we are able to collapse. 1005 * 1006 * Bump temp's ref_count to avoid an unwanted 1007 * degenerate recursion (can't call 1008 * vm_object_reference_locked() because it asserts 1009 * that CHAINLOCK is not set). 1010 */ 1011 atomic_add_int(&temp->ref_count, 1); 1012 KKASSERT(temp->ref_count > 1); 1013 1014 /* 1015 * Collapse temp, then deallocate the extra ref 1016 * formally. 1017 */ 1018 vm_object_collapse(temp, &dlist); 1019 vm_object_chain_release(temp); 1020 if (must_drop) { 1021 vm_object_lock_swap(); 1022 vm_object_drop(object); 1023 } 1024 object = temp; 1025 must_drop = 1; 1026 continue; 1027 } 1028 1029 /* 1030 * Drop the ref and handle termination on the 1->0 transition. 1031 * We may have blocked above so we have to recheck. 1032 */ 1033 skip: 1034 KKASSERT(object->ref_count != 0); 1035 if (object->ref_count >= 2) { 1036 atomic_add_int(&object->ref_count, -1); 1037 #if defined(DEBUG_LOCKS) 1038 debugvm_object_add(object, file, line, -1); 1039 #endif 1040 break; 1041 } 1042 KKASSERT(object->ref_count == 1); 1043 1044 /* 1045 * 1->0 transition. Chain through the backing_object. 1046 * Maintain the ref until we've located the backing object, 1047 * then re-check. 1048 */ 1049 while ((temp = object->backing_object) != NULL) { 1050 if (temp->type == OBJT_VNODE) 1051 vm_object_hold_shared(temp); 1052 else 1053 vm_object_hold(temp); 1054 if (temp == object->backing_object) 1055 break; 1056 vm_object_drop(temp); 1057 } 1058 1059 /* 1060 * 1->0 transition verified, retry if ref_count is no longer 1061 * 1. Otherwise disconnect the backing_object (temp) and 1062 * clean up. 1063 */ 1064 if (object->ref_count != 1) { 1065 vm_object_drop(temp); 1066 continue; 1067 } 1068 1069 /* 1070 * It shouldn't be possible for the object to be chain locked 1071 * if we're removing the last ref on it. 1072 * 1073 * Removing object from temp's shadow list requires dropping 1074 * temp, which we will do on loop. 1075 * 1076 * NOTE! vnodes do not use the shadow list, but still have 1077 * the backing_object reference. 1078 */ 1079 KKASSERT((object->chainlk & (CHAINLK_EXCL|CHAINLK_MASK)) == 0); 1080 1081 if (temp) { 1082 if (object->flags & OBJ_ONSHADOW) { 1083 LIST_REMOVE(object, shadow_list); 1084 temp->shadow_count--; 1085 temp->generation++; 1086 vm_object_clear_flag(object, OBJ_ONSHADOW); 1087 } 1088 object->backing_object = NULL; 1089 } 1090 1091 atomic_add_int(&object->ref_count, -1); 1092 if ((object->flags & OBJ_DEAD) == 0) 1093 vm_object_terminate(object); 1094 if (must_drop && temp) 1095 vm_object_lock_swap(); 1096 if (must_drop) 1097 vm_object_drop(object); 1098 object = temp; 1099 must_drop = 1; 1100 } 1101 1102 if (must_drop && object) 1103 vm_object_drop(object); 1104 1105 /* 1106 * Additional tail recursion on dlist. Avoid a recursion. Objects 1107 * on the dlist have a hold count but are not locked. 1108 */ 1109 if ((dtmp = dlist) != NULL) { 1110 dlist = dtmp->next; 1111 object = dtmp->object; 1112 kfree(dtmp, M_TEMP); 1113 1114 vm_object_lock(object); /* already held, add lock */ 1115 must_drop = 1; /* and we're responsible for it */ 1116 goto again; 1117 } 1118 } 1119 1120 /* 1121 * Destroy the specified object, freeing up related resources. 1122 * 1123 * The object must have zero references. 1124 * 1125 * The object must held. The caller is responsible for dropping the object 1126 * after terminate returns. Terminate does NOT drop the object. 1127 */ 1128 static int vm_object_terminate_callback(vm_page_t p, void *data); 1129 1130 void 1131 vm_object_terminate(vm_object_t object) 1132 { 1133 struct rb_vm_page_scan_info info; 1134 int n; 1135 1136 /* 1137 * Make sure no one uses us. Once we set OBJ_DEAD we should be 1138 * able to safely block. 1139 */ 1140 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1141 KKASSERT((object->flags & OBJ_DEAD) == 0); 1142 vm_object_set_flag(object, OBJ_DEAD); 1143 1144 /* 1145 * Wait for the pageout daemon to be done with the object 1146 */ 1147 vm_object_pip_wait(object, "objtrm1"); 1148 1149 KASSERT(!object->paging_in_progress, 1150 ("vm_object_terminate: pageout in progress")); 1151 1152 /* 1153 * Clean and free the pages, as appropriate. All references to the 1154 * object are gone, so we don't need to lock it. 1155 */ 1156 if (object->type == OBJT_VNODE) { 1157 struct vnode *vp; 1158 1159 /* 1160 * Clean pages and flush buffers. 1161 * 1162 * NOTE! TMPFS buffer flushes do not typically flush the 1163 * actual page to swap as this would be highly 1164 * inefficient, and normal filesystems usually wrap 1165 * page flushes with buffer cache buffers. 1166 * 1167 * To deal with this we have to call vinvalbuf() both 1168 * before and after the vm_object_page_clean(). 1169 */ 1170 vp = (struct vnode *) object->handle; 1171 vinvalbuf(vp, V_SAVE, 0, 0); 1172 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 1173 vinvalbuf(vp, V_SAVE, 0, 0); 1174 } 1175 1176 /* 1177 * Wait for any I/O to complete, after which there had better not 1178 * be any references left on the object. 1179 */ 1180 vm_object_pip_wait(object, "objtrm2"); 1181 1182 if (object->ref_count != 0) { 1183 panic("vm_object_terminate: object with references, " 1184 "ref_count=%d", object->ref_count); 1185 } 1186 1187 /* 1188 * Cleanup any shared pmaps associated with this object. 1189 */ 1190 pmap_object_free(object); 1191 1192 /* 1193 * Now free any remaining pages. For internal objects, this also 1194 * removes them from paging queues. Don't free wired pages, just 1195 * remove them from the object. 1196 */ 1197 info.count = 0; 1198 info.object = object; 1199 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, 1200 vm_object_terminate_callback, &info); 1201 1202 /* 1203 * Let the pager know object is dead. 1204 */ 1205 vm_pager_deallocate(object); 1206 1207 /* 1208 * Wait for the object hold count to hit 1, clean out pages as 1209 * we go. vmobj_token interlocks any race conditions that might 1210 * pick the object up from the vm_object_list after we have cleared 1211 * rb_memq. 1212 */ 1213 for (;;) { 1214 if (RB_ROOT(&object->rb_memq) == NULL) 1215 break; 1216 kprintf("vm_object_terminate: Warning, object %p " 1217 "still has %d pages\n", 1218 object, object->resident_page_count); 1219 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, 1220 vm_object_terminate_callback, &info); 1221 } 1222 1223 /* 1224 * There had better not be any pages left 1225 */ 1226 KKASSERT(object->resident_page_count == 0); 1227 1228 /* 1229 * Remove the object from the global object list. 1230 */ 1231 n = VMOBJ_HASH(object); 1232 lwkt_gettoken(&vmobj_tokens[n]); 1233 TAILQ_REMOVE(&vm_object_lists[n], object, object_list); 1234 lwkt_reltoken(&vmobj_tokens[n]); 1235 atomic_add_long(&vm_object_count, -1); 1236 1237 if (object->ref_count != 0) { 1238 panic("vm_object_terminate2: object with references, " 1239 "ref_count=%d", object->ref_count); 1240 } 1241 1242 /* 1243 * NOTE: The object hold_count is at least 1, so we cannot zfree() 1244 * the object here. See vm_object_drop(). 1245 */ 1246 } 1247 1248 /* 1249 * The caller must hold the object. 1250 */ 1251 static int 1252 vm_object_terminate_callback(vm_page_t p, void *data) 1253 { 1254 struct rb_vm_page_scan_info *info = data; 1255 vm_object_t object; 1256 1257 if ((++info->count & 63) == 0) 1258 lwkt_user_yield(); 1259 object = p->object; 1260 if (object != info->object) { 1261 kprintf("vm_object_terminate_callback: obj/pg race %p/%p\n", 1262 info->object, p); 1263 return(0); 1264 } 1265 vm_page_busy_wait(p, TRUE, "vmpgtrm"); 1266 if (object != p->object) { 1267 kprintf("vm_object_terminate: Warning: Encountered " 1268 "busied page %p on queue %d\n", p, p->queue); 1269 vm_page_wakeup(p); 1270 } else if (p->wire_count == 0) { 1271 /* 1272 * NOTE: p->dirty and PG_NEED_COMMIT are ignored. 1273 */ 1274 vm_page_free(p); 1275 mycpu->gd_cnt.v_pfree++; 1276 } else { 1277 if (p->queue != PQ_NONE) 1278 kprintf("vm_object_terminate: Warning: Encountered " 1279 "wired page %p on queue %d\n", p, p->queue); 1280 vm_page_remove(p); 1281 vm_page_wakeup(p); 1282 } 1283 return(0); 1284 } 1285 1286 /* 1287 * Clean all dirty pages in the specified range of object. Leaves page 1288 * on whatever queue it is currently on. If NOSYNC is set then do not 1289 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 1290 * leaving the object dirty. 1291 * 1292 * When stuffing pages asynchronously, allow clustering. XXX we need a 1293 * synchronous clustering mode implementation. 1294 * 1295 * Odd semantics: if start == end, we clean everything. 1296 * 1297 * The object must be locked? XXX 1298 */ 1299 static int vm_object_page_clean_pass1(struct vm_page *p, void *data); 1300 static int vm_object_page_clean_pass2(struct vm_page *p, void *data); 1301 1302 void 1303 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1304 int flags) 1305 { 1306 struct rb_vm_page_scan_info info; 1307 struct vnode *vp; 1308 int wholescan; 1309 int pagerflags; 1310 int generation; 1311 1312 vm_object_hold(object); 1313 if (object->type != OBJT_VNODE || 1314 (object->flags & OBJ_MIGHTBEDIRTY) == 0) { 1315 vm_object_drop(object); 1316 return; 1317 } 1318 1319 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? 1320 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 1321 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 1322 1323 vp = object->handle; 1324 1325 /* 1326 * Interlock other major object operations. This allows us to 1327 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY. 1328 */ 1329 vm_object_set_flag(object, OBJ_CLEANING); 1330 1331 /* 1332 * Handle 'entire object' case 1333 */ 1334 info.start_pindex = start; 1335 if (end == 0) { 1336 info.end_pindex = object->size - 1; 1337 } else { 1338 info.end_pindex = end - 1; 1339 } 1340 wholescan = (start == 0 && info.end_pindex == object->size - 1); 1341 info.limit = flags; 1342 info.pagerflags = pagerflags; 1343 info.object = object; 1344 1345 /* 1346 * If cleaning the entire object do a pass to mark the pages read-only. 1347 * If everything worked out ok, clear OBJ_WRITEABLE and 1348 * OBJ_MIGHTBEDIRTY. 1349 */ 1350 if (wholescan) { 1351 info.error = 0; 1352 info.count = 0; 1353 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1354 vm_object_page_clean_pass1, &info); 1355 if (info.error == 0) { 1356 vm_object_clear_flag(object, 1357 OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 1358 if (object->type == OBJT_VNODE && 1359 (vp = (struct vnode *)object->handle) != NULL) { 1360 /* 1361 * Use new-style interface to clear VISDIRTY 1362 * because the vnode is not necessarily removed 1363 * from the syncer list(s) as often as it was 1364 * under the old interface, which can leave 1365 * the vnode on the syncer list after reclaim. 1366 */ 1367 vclrobjdirty(vp); 1368 } 1369 } 1370 } 1371 1372 /* 1373 * Do a pass to clean all the dirty pages we find. 1374 */ 1375 do { 1376 info.error = 0; 1377 info.count = 0; 1378 generation = object->generation; 1379 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1380 vm_object_page_clean_pass2, &info); 1381 } while (info.error || generation != object->generation); 1382 1383 vm_object_clear_flag(object, OBJ_CLEANING); 1384 vm_object_drop(object); 1385 } 1386 1387 /* 1388 * The caller must hold the object. 1389 */ 1390 static 1391 int 1392 vm_object_page_clean_pass1(struct vm_page *p, void *data) 1393 { 1394 struct rb_vm_page_scan_info *info = data; 1395 1396 if ((++info->count & 63) == 0) 1397 lwkt_user_yield(); 1398 if (p->object != info->object || 1399 p->pindex < info->start_pindex || 1400 p->pindex > info->end_pindex) { 1401 kprintf("vm_object_page_clean_pass1: obj/pg race %p/%p\n", 1402 info->object, p); 1403 return(0); 1404 } 1405 vm_page_flag_set(p, PG_CLEANCHK); 1406 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 1407 info->error = 1; 1408 } else if (vm_page_busy_try(p, FALSE) == 0) { 1409 if (p->object == info->object) 1410 vm_page_protect(p, VM_PROT_READ); 1411 vm_page_wakeup(p); 1412 } else { 1413 info->error = 1; 1414 } 1415 return(0); 1416 } 1417 1418 /* 1419 * The caller must hold the object 1420 */ 1421 static 1422 int 1423 vm_object_page_clean_pass2(struct vm_page *p, void *data) 1424 { 1425 struct rb_vm_page_scan_info *info = data; 1426 int generation; 1427 1428 if (p->object != info->object || 1429 p->pindex < info->start_pindex || 1430 p->pindex > info->end_pindex) { 1431 kprintf("vm_object_page_clean_pass2: obj/pg race %p/%p\n", 1432 info->object, p); 1433 return(0); 1434 } 1435 1436 /* 1437 * Do not mess with pages that were inserted after we started 1438 * the cleaning pass. 1439 */ 1440 if ((p->flags & PG_CLEANCHK) == 0) 1441 goto done; 1442 1443 generation = info->object->generation; 1444 vm_page_busy_wait(p, TRUE, "vpcwai"); 1445 1446 if (p->object != info->object || 1447 p->pindex < info->start_pindex || 1448 p->pindex > info->end_pindex || 1449 info->object->generation != generation) { 1450 info->error = 1; 1451 vm_page_wakeup(p); 1452 goto done; 1453 } 1454 1455 /* 1456 * Before wasting time traversing the pmaps, check for trivial 1457 * cases where the page cannot be dirty. 1458 */ 1459 if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) { 1460 KKASSERT((p->dirty & p->valid) == 0 && 1461 (p->flags & PG_NEED_COMMIT) == 0); 1462 vm_page_wakeup(p); 1463 goto done; 1464 } 1465 1466 /* 1467 * Check whether the page is dirty or not. The page has been set 1468 * to be read-only so the check will not race a user dirtying the 1469 * page. 1470 */ 1471 vm_page_test_dirty(p); 1472 if ((p->dirty & p->valid) == 0 && (p->flags & PG_NEED_COMMIT) == 0) { 1473 vm_page_flag_clear(p, PG_CLEANCHK); 1474 vm_page_wakeup(p); 1475 goto done; 1476 } 1477 1478 /* 1479 * If we have been asked to skip nosync pages and this is a 1480 * nosync page, skip it. Note that the object flags were 1481 * not cleared in this case (because pass1 will have returned an 1482 * error), so we do not have to set them. 1483 */ 1484 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 1485 vm_page_flag_clear(p, PG_CLEANCHK); 1486 vm_page_wakeup(p); 1487 goto done; 1488 } 1489 1490 /* 1491 * Flush as many pages as we can. PG_CLEANCHK will be cleared on 1492 * the pages that get successfully flushed. Set info->error if 1493 * we raced an object modification. 1494 */ 1495 vm_object_page_collect_flush(info->object, p, info->pagerflags); 1496 /* vm_wait_nominal(); this can deadlock the system in syncer/pageout */ 1497 done: 1498 if ((++info->count & 63) == 0) 1499 lwkt_user_yield(); 1500 1501 return(0); 1502 } 1503 1504 /* 1505 * Collect the specified page and nearby pages and flush them out. 1506 * The number of pages flushed is returned. The passed page is busied 1507 * by the caller and we are responsible for its disposition. 1508 * 1509 * The caller must hold the object. 1510 */ 1511 static void 1512 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags) 1513 { 1514 int error; 1515 int is; 1516 int ib; 1517 int i; 1518 int page_base; 1519 vm_pindex_t pi; 1520 vm_page_t ma[BLIST_MAX_ALLOC]; 1521 1522 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); 1523 1524 pi = p->pindex; 1525 page_base = pi % BLIST_MAX_ALLOC; 1526 ma[page_base] = p; 1527 ib = page_base - 1; 1528 is = page_base + 1; 1529 1530 while (ib >= 0) { 1531 vm_page_t tp; 1532 1533 tp = vm_page_lookup_busy_try(object, pi - page_base + ib, 1534 TRUE, &error); 1535 if (error) 1536 break; 1537 if (tp == NULL) 1538 break; 1539 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 1540 (tp->flags & PG_CLEANCHK) == 0) { 1541 vm_page_wakeup(tp); 1542 break; 1543 } 1544 if ((tp->queue - tp->pc) == PQ_CACHE) { 1545 vm_page_flag_clear(tp, PG_CLEANCHK); 1546 vm_page_wakeup(tp); 1547 break; 1548 } 1549 vm_page_test_dirty(tp); 1550 if ((tp->dirty & tp->valid) == 0 && 1551 (tp->flags & PG_NEED_COMMIT) == 0) { 1552 vm_page_flag_clear(tp, PG_CLEANCHK); 1553 vm_page_wakeup(tp); 1554 break; 1555 } 1556 ma[ib] = tp; 1557 --ib; 1558 } 1559 ++ib; /* fixup */ 1560 1561 while (is < BLIST_MAX_ALLOC && 1562 pi - page_base + is < object->size) { 1563 vm_page_t tp; 1564 1565 tp = vm_page_lookup_busy_try(object, pi - page_base + is, 1566 TRUE, &error); 1567 if (error) 1568 break; 1569 if (tp == NULL) 1570 break; 1571 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 1572 (tp->flags & PG_CLEANCHK) == 0) { 1573 vm_page_wakeup(tp); 1574 break; 1575 } 1576 if ((tp->queue - tp->pc) == PQ_CACHE) { 1577 vm_page_flag_clear(tp, PG_CLEANCHK); 1578 vm_page_wakeup(tp); 1579 break; 1580 } 1581 vm_page_test_dirty(tp); 1582 if ((tp->dirty & tp->valid) == 0 && 1583 (tp->flags & PG_NEED_COMMIT) == 0) { 1584 vm_page_flag_clear(tp, PG_CLEANCHK); 1585 vm_page_wakeup(tp); 1586 break; 1587 } 1588 ma[is] = tp; 1589 ++is; 1590 } 1591 1592 /* 1593 * All pages in the ma[] array are busied now 1594 */ 1595 for (i = ib; i < is; ++i) { 1596 vm_page_flag_clear(ma[i], PG_CLEANCHK); 1597 vm_page_hold(ma[i]); /* XXX need this any more? */ 1598 } 1599 vm_pageout_flush(&ma[ib], is - ib, pagerflags); 1600 for (i = ib; i < is; ++i) /* XXX need this any more? */ 1601 vm_page_unhold(ma[i]); 1602 } 1603 1604 /* 1605 * Same as vm_object_pmap_copy, except range checking really 1606 * works, and is meant for small sections of an object. 1607 * 1608 * This code protects resident pages by making them read-only 1609 * and is typically called on a fork or split when a page 1610 * is converted to copy-on-write. 1611 * 1612 * NOTE: If the page is already at VM_PROT_NONE, calling 1613 * vm_page_protect will have no effect. 1614 */ 1615 void 1616 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1617 { 1618 vm_pindex_t idx; 1619 vm_page_t p; 1620 1621 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 1622 return; 1623 1624 vm_object_hold(object); 1625 for (idx = start; idx < end; idx++) { 1626 p = vm_page_lookup(object, idx); 1627 if (p == NULL) 1628 continue; 1629 vm_page_protect(p, VM_PROT_READ); 1630 } 1631 vm_object_drop(object); 1632 } 1633 1634 /* 1635 * Removes all physical pages in the specified object range from all 1636 * physical maps. 1637 * 1638 * The object must *not* be locked. 1639 */ 1640 1641 static int vm_object_pmap_remove_callback(vm_page_t p, void *data); 1642 1643 void 1644 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 1645 { 1646 struct rb_vm_page_scan_info info; 1647 1648 if (object == NULL) 1649 return; 1650 info.start_pindex = start; 1651 info.end_pindex = end - 1; 1652 info.count = 0; 1653 info.object = object; 1654 1655 vm_object_hold(object); 1656 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 1657 vm_object_pmap_remove_callback, &info); 1658 if (start == 0 && end == object->size) 1659 vm_object_clear_flag(object, OBJ_WRITEABLE); 1660 vm_object_drop(object); 1661 } 1662 1663 /* 1664 * The caller must hold the object 1665 */ 1666 static int 1667 vm_object_pmap_remove_callback(vm_page_t p, void *data) 1668 { 1669 struct rb_vm_page_scan_info *info = data; 1670 1671 if ((++info->count & 63) == 0) 1672 lwkt_user_yield(); 1673 1674 if (info->object != p->object || 1675 p->pindex < info->start_pindex || 1676 p->pindex > info->end_pindex) { 1677 kprintf("vm_object_pmap_remove_callback: obj/pg race %p/%p\n", 1678 info->object, p); 1679 return(0); 1680 } 1681 1682 vm_page_protect(p, VM_PROT_NONE); 1683 1684 return(0); 1685 } 1686 1687 /* 1688 * Implements the madvise function at the object/page level. 1689 * 1690 * MADV_WILLNEED (any object) 1691 * 1692 * Activate the specified pages if they are resident. 1693 * 1694 * MADV_DONTNEED (any object) 1695 * 1696 * Deactivate the specified pages if they are resident. 1697 * 1698 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only) 1699 * 1700 * Deactivate and clean the specified pages if they are 1701 * resident. This permits the process to reuse the pages 1702 * without faulting or the kernel to reclaim the pages 1703 * without I/O. 1704 * 1705 * No requirements. 1706 */ 1707 void 1708 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1709 { 1710 vm_pindex_t end, tpindex; 1711 vm_object_t tobject; 1712 vm_object_t xobj; 1713 vm_page_t m; 1714 int error; 1715 1716 if (object == NULL) 1717 return; 1718 1719 end = pindex + count; 1720 1721 vm_object_hold(object); 1722 tobject = object; 1723 1724 /* 1725 * Locate and adjust resident pages 1726 */ 1727 for (; pindex < end; pindex += 1) { 1728 relookup: 1729 if (tobject != object) 1730 vm_object_drop(tobject); 1731 tobject = object; 1732 tpindex = pindex; 1733 shadowlookup: 1734 /* 1735 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 1736 * and those pages must be OBJ_ONEMAPPING. 1737 */ 1738 if (advise == MADV_FREE) { 1739 if ((tobject->type != OBJT_DEFAULT && 1740 tobject->type != OBJT_SWAP) || 1741 (tobject->flags & OBJ_ONEMAPPING) == 0) { 1742 continue; 1743 } 1744 } 1745 1746 m = vm_page_lookup_busy_try(tobject, tpindex, TRUE, &error); 1747 1748 if (error) { 1749 vm_page_sleep_busy(m, TRUE, "madvpo"); 1750 goto relookup; 1751 } 1752 if (m == NULL) { 1753 /* 1754 * There may be swap even if there is no backing page 1755 */ 1756 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1757 swap_pager_freespace(tobject, tpindex, 1); 1758 1759 /* 1760 * next object 1761 */ 1762 while ((xobj = tobject->backing_object) != NULL) { 1763 KKASSERT(xobj != object); 1764 vm_object_hold(xobj); 1765 if (xobj == tobject->backing_object) 1766 break; 1767 vm_object_drop(xobj); 1768 } 1769 if (xobj == NULL) 1770 continue; 1771 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1772 if (tobject != object) { 1773 vm_object_lock_swap(); 1774 vm_object_drop(tobject); 1775 } 1776 tobject = xobj; 1777 goto shadowlookup; 1778 } 1779 1780 /* 1781 * If the page is not in a normal active state, we skip it. 1782 * If the page is not managed there are no page queues to 1783 * mess with. Things can break if we mess with pages in 1784 * any of the below states. 1785 */ 1786 if (m->wire_count || 1787 (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) || 1788 m->valid != VM_PAGE_BITS_ALL 1789 ) { 1790 vm_page_wakeup(m); 1791 continue; 1792 } 1793 1794 /* 1795 * Theoretically once a page is known not to be busy, an 1796 * interrupt cannot come along and rip it out from under us. 1797 */ 1798 1799 if (advise == MADV_WILLNEED) { 1800 vm_page_activate(m); 1801 } else if (advise == MADV_DONTNEED) { 1802 vm_page_dontneed(m); 1803 } else if (advise == MADV_FREE) { 1804 /* 1805 * Mark the page clean. This will allow the page 1806 * to be freed up by the system. However, such pages 1807 * are often reused quickly by malloc()/free() 1808 * so we do not do anything that would cause 1809 * a page fault if we can help it. 1810 * 1811 * Specifically, we do not try to actually free 1812 * the page now nor do we try to put it in the 1813 * cache (which would cause a page fault on reuse). 1814 * 1815 * But we do make the page is freeable as we 1816 * can without actually taking the step of unmapping 1817 * it. 1818 */ 1819 pmap_clear_modify(m); 1820 m->dirty = 0; 1821 m->act_count = 0; 1822 vm_page_dontneed(m); 1823 if (tobject->type == OBJT_SWAP) 1824 swap_pager_freespace(tobject, tpindex, 1); 1825 } 1826 vm_page_wakeup(m); 1827 } 1828 if (tobject != object) 1829 vm_object_drop(tobject); 1830 vm_object_drop(object); 1831 } 1832 1833 /* 1834 * Create a new object which is backed by the specified existing object 1835 * range. Replace the pointer and offset that was pointing at the existing 1836 * object with the pointer/offset for the new object. 1837 * 1838 * If addref is non-zero the returned object is given an additional reference. 1839 * This mechanic exists to avoid the situation where refs might be 1 and 1840 * race against a collapse when the caller intends to bump it. So the 1841 * caller cannot add the ref after the fact. Used when the caller is 1842 * duplicating a vm_map_entry. 1843 * 1844 * No other requirements. 1845 */ 1846 void 1847 vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length, 1848 int addref) 1849 { 1850 vm_object_t source; 1851 vm_object_t result; 1852 int useshadowlist; 1853 1854 source = *objectp; 1855 1856 /* 1857 * Don't create the new object if the old object isn't shared. 1858 * We have to chain wait before adding the reference to avoid 1859 * racing a collapse or deallocation. 1860 * 1861 * Clear OBJ_ONEMAPPING flag when shadowing. 1862 * 1863 * The caller owns a ref on source via *objectp which we are going 1864 * to replace. This ref is inherited by the backing_object assignment. 1865 * from nobject and does not need to be incremented here. 1866 * 1867 * However, we add a temporary extra reference to the original source 1868 * prior to holding nobject in case we block, to avoid races where 1869 * someone else might believe that the source can be collapsed. 1870 */ 1871 useshadowlist = 0; 1872 if (source) { 1873 if (source->type != OBJT_VNODE) { 1874 useshadowlist = 1; 1875 vm_object_hold(source); 1876 vm_object_chain_wait(source, 0); 1877 if (source->ref_count == 1 && 1878 source->handle == NULL && 1879 (source->type == OBJT_DEFAULT || 1880 source->type == OBJT_SWAP)) { 1881 if (addref) { 1882 vm_object_reference_locked(source); 1883 vm_object_clear_flag(source, 1884 OBJ_ONEMAPPING); 1885 } 1886 vm_object_drop(source); 1887 return; 1888 } 1889 vm_object_reference_locked(source); 1890 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1891 } else { 1892 vm_object_reference_quick(source); 1893 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1894 } 1895 } 1896 1897 /* 1898 * Allocate a new object with the given length. The new object 1899 * is returned referenced but we may have to add another one. 1900 * If we are adding a second reference we must clear OBJ_ONEMAPPING. 1901 * (typically because the caller is about to clone a vm_map_entry). 1902 * 1903 * The source object currently has an extra reference to prevent 1904 * collapses into it while we mess with its shadow list, which 1905 * we will remove later in this routine. 1906 * 1907 * The target object may require a second reference if asked for one 1908 * by the caller. 1909 */ 1910 result = vm_object_allocate(OBJT_DEFAULT, length); 1911 if (result == NULL) 1912 panic("vm_object_shadow: no object for shadowing"); 1913 vm_object_hold(result); 1914 if (addref) { 1915 vm_object_reference_locked(result); 1916 vm_object_clear_flag(result, OBJ_ONEMAPPING); 1917 } 1918 1919 /* 1920 * The new object shadows the source object. Chain wait before 1921 * adjusting shadow_count or the shadow list to avoid races. 1922 * 1923 * Try to optimize the result object's page color when shadowing 1924 * in order to maintain page coloring consistency in the combined 1925 * shadowed object. 1926 * 1927 * The backing_object reference to source requires adding a ref to 1928 * source. We simply inherit the ref from the original *objectp 1929 * (which we are replacing) so no additional refs need to be added. 1930 * (we must still clean up the extra ref we had to prevent collapse 1931 * races). 1932 * 1933 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS 1934 */ 1935 KKASSERT(result->backing_object == NULL); 1936 result->backing_object = source; 1937 if (source) { 1938 if (useshadowlist) { 1939 vm_object_chain_wait(source, 0); 1940 LIST_INSERT_HEAD(&source->shadow_head, 1941 result, shadow_list); 1942 source->shadow_count++; 1943 source->generation++; 1944 vm_object_set_flag(result, OBJ_ONSHADOW); 1945 } 1946 /* cpu localization twist */ 1947 result->pg_color = vm_quickcolor(); 1948 } 1949 1950 /* 1951 * Adjust the return storage. Drop the ref on source before 1952 * returning. 1953 */ 1954 result->backing_object_offset = *offset; 1955 vm_object_drop(result); 1956 *offset = 0; 1957 if (source) { 1958 if (useshadowlist) { 1959 vm_object_deallocate_locked(source); 1960 vm_object_drop(source); 1961 } else { 1962 vm_object_deallocate(source); 1963 } 1964 } 1965 1966 /* 1967 * Return the new things 1968 */ 1969 *objectp = result; 1970 } 1971 1972 #define OBSC_TEST_ALL_SHADOWED 0x0001 1973 #define OBSC_COLLAPSE_NOWAIT 0x0002 1974 #define OBSC_COLLAPSE_WAIT 0x0004 1975 1976 static int vm_object_backing_scan_callback(vm_page_t p, void *data); 1977 1978 /* 1979 * The caller must hold the object. 1980 */ 1981 static __inline int 1982 vm_object_backing_scan(vm_object_t object, vm_object_t backing_object, int op) 1983 { 1984 struct rb_vm_page_scan_info info; 1985 int n; 1986 1987 vm_object_assert_held(object); 1988 vm_object_assert_held(backing_object); 1989 1990 KKASSERT(backing_object == object->backing_object); 1991 info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1992 1993 /* 1994 * Initial conditions 1995 */ 1996 if (op & OBSC_TEST_ALL_SHADOWED) { 1997 /* 1998 * We do not want to have to test for the existence of 1999 * swap pages in the backing object. XXX but with the 2000 * new swapper this would be pretty easy to do. 2001 * 2002 * XXX what about anonymous MAP_SHARED memory that hasn't 2003 * been ZFOD faulted yet? If we do not test for this, the 2004 * shadow test may succeed! XXX 2005 */ 2006 if (backing_object->type != OBJT_DEFAULT) 2007 return(0); 2008 } 2009 if (op & OBSC_COLLAPSE_WAIT) { 2010 KKASSERT((backing_object->flags & OBJ_DEAD) == 0); 2011 vm_object_set_flag(backing_object, OBJ_DEAD); 2012 2013 n = VMOBJ_HASH(backing_object); 2014 lwkt_gettoken(&vmobj_tokens[n]); 2015 TAILQ_REMOVE(&vm_object_lists[n], backing_object, object_list); 2016 lwkt_reltoken(&vmobj_tokens[n]); 2017 atomic_add_long(&vm_object_count, -1); 2018 } 2019 2020 /* 2021 * Our scan. We have to retry if a negative error code is returned, 2022 * otherwise 0 or 1 will be returned in info.error. 0 Indicates that 2023 * the scan had to be stopped because the parent does not completely 2024 * shadow the child. 2025 */ 2026 info.object = object; 2027 info.backing_object = backing_object; 2028 info.limit = op; 2029 info.count = 0; 2030 do { 2031 info.error = 1; 2032 vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL, 2033 vm_object_backing_scan_callback, 2034 &info); 2035 } while (info.error < 0); 2036 2037 return(info.error); 2038 } 2039 2040 /* 2041 * The caller must hold the object. 2042 */ 2043 static int 2044 vm_object_backing_scan_callback(vm_page_t p, void *data) 2045 { 2046 struct rb_vm_page_scan_info *info = data; 2047 vm_object_t backing_object; 2048 vm_object_t object; 2049 vm_pindex_t pindex; 2050 vm_pindex_t new_pindex; 2051 vm_pindex_t backing_offset_index; 2052 int op; 2053 2054 pindex = p->pindex; 2055 new_pindex = pindex - info->backing_offset_index; 2056 op = info->limit; 2057 object = info->object; 2058 backing_object = info->backing_object; 2059 backing_offset_index = info->backing_offset_index; 2060 2061 if (op & OBSC_TEST_ALL_SHADOWED) { 2062 vm_page_t pp; 2063 2064 /* 2065 * Ignore pages outside the parent object's range 2066 * and outside the parent object's mapping of the 2067 * backing object. 2068 * 2069 * note that we do not busy the backing object's 2070 * page. 2071 */ 2072 if (pindex < backing_offset_index || 2073 new_pindex >= object->size 2074 ) { 2075 return(0); 2076 } 2077 2078 /* 2079 * See if the parent has the page or if the parent's 2080 * object pager has the page. If the parent has the 2081 * page but the page is not valid, the parent's 2082 * object pager must have the page. 2083 * 2084 * If this fails, the parent does not completely shadow 2085 * the object and we might as well give up now. 2086 */ 2087 pp = vm_page_lookup(object, new_pindex); 2088 if ((pp == NULL || pp->valid == 0) && 2089 !vm_pager_has_page(object, new_pindex) 2090 ) { 2091 info->error = 0; /* problemo */ 2092 return(-1); /* stop the scan */ 2093 } 2094 } 2095 2096 /* 2097 * Check for busy page. Note that we may have lost (p) when we 2098 * possibly blocked above. 2099 */ 2100 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 2101 vm_page_t pp; 2102 2103 if (vm_page_busy_try(p, TRUE)) { 2104 if (op & OBSC_COLLAPSE_NOWAIT) { 2105 return(0); 2106 } else { 2107 /* 2108 * If we slept, anything could have 2109 * happened. Ask that the scan be restarted. 2110 * 2111 * Since the object is marked dead, the 2112 * backing offset should not have changed. 2113 */ 2114 vm_page_sleep_busy(p, TRUE, "vmocol"); 2115 info->error = -1; 2116 return(-1); 2117 } 2118 } 2119 2120 /* 2121 * If (p) is no longer valid restart the scan. 2122 */ 2123 if (p->object != backing_object || p->pindex != pindex) { 2124 kprintf("vm_object_backing_scan: Warning: page " 2125 "%p ripped out from under us\n", p); 2126 vm_page_wakeup(p); 2127 info->error = -1; 2128 return(-1); 2129 } 2130 2131 if (op & OBSC_COLLAPSE_NOWAIT) { 2132 if (p->valid == 0 || 2133 p->wire_count || 2134 (p->flags & PG_NEED_COMMIT)) { 2135 vm_page_wakeup(p); 2136 return(0); 2137 } 2138 } else { 2139 /* XXX what if p->valid == 0 , hold_count, etc? */ 2140 } 2141 2142 KASSERT( 2143 p->object == backing_object, 2144 ("vm_object_qcollapse(): object mismatch") 2145 ); 2146 2147 /* 2148 * Destroy any associated swap 2149 */ 2150 if (backing_object->type == OBJT_SWAP) 2151 swap_pager_freespace(backing_object, p->pindex, 1); 2152 2153 if ( 2154 p->pindex < backing_offset_index || 2155 new_pindex >= object->size 2156 ) { 2157 /* 2158 * Page is out of the parent object's range, we 2159 * can simply destroy it. 2160 */ 2161 vm_page_protect(p, VM_PROT_NONE); 2162 vm_page_free(p); 2163 return(0); 2164 } 2165 2166 pp = vm_page_lookup(object, new_pindex); 2167 if (pp != NULL || vm_pager_has_page(object, new_pindex)) { 2168 /* 2169 * page already exists in parent OR swap exists 2170 * for this location in the parent. Destroy 2171 * the original page from the backing object. 2172 * 2173 * Leave the parent's page alone 2174 */ 2175 vm_page_protect(p, VM_PROT_NONE); 2176 vm_page_free(p); 2177 return(0); 2178 } 2179 2180 /* 2181 * Page does not exist in parent, rename the 2182 * page from the backing object to the main object. 2183 * 2184 * If the page was mapped to a process, it can remain 2185 * mapped through the rename. 2186 */ 2187 if ((p->queue - p->pc) == PQ_CACHE) 2188 vm_page_deactivate(p); 2189 2190 vm_page_rename(p, object, new_pindex); 2191 vm_page_wakeup(p); 2192 /* page automatically made dirty by rename */ 2193 } 2194 return(0); 2195 } 2196 2197 /* 2198 * This version of collapse allows the operation to occur earlier and 2199 * when paging_in_progress is true for an object... This is not a complete 2200 * operation, but should plug 99.9% of the rest of the leaks. 2201 * 2202 * The caller must hold the object and backing_object and both must be 2203 * chainlocked. 2204 * 2205 * (only called from vm_object_collapse) 2206 */ 2207 static void 2208 vm_object_qcollapse(vm_object_t object, vm_object_t backing_object) 2209 { 2210 if (backing_object->ref_count == 1) { 2211 atomic_add_int(&backing_object->ref_count, 2); 2212 #if defined(DEBUG_LOCKS) 2213 debugvm_object_add(backing_object, "qcollapse", 1, 2); 2214 #endif 2215 vm_object_backing_scan(object, backing_object, 2216 OBSC_COLLAPSE_NOWAIT); 2217 atomic_add_int(&backing_object->ref_count, -2); 2218 #if defined(DEBUG_LOCKS) 2219 debugvm_object_add(backing_object, "qcollapse", 2, -2); 2220 #endif 2221 } 2222 } 2223 2224 /* 2225 * Collapse an object with the object backing it. Pages in the backing 2226 * object are moved into the parent, and the backing object is deallocated. 2227 * Any conflict is resolved in favor of the parent's existing pages. 2228 * 2229 * object must be held and chain-locked on call. 2230 * 2231 * The caller must have an extra ref on object to prevent a race from 2232 * destroying it during the collapse. 2233 */ 2234 void 2235 vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp) 2236 { 2237 struct vm_object_dealloc_list *dlist = NULL; 2238 vm_object_t backing_object; 2239 2240 /* 2241 * Only one thread is attempting a collapse at any given moment. 2242 * There are few restrictions for (object) that callers of this 2243 * function check so reentrancy is likely. 2244 */ 2245 KKASSERT(object != NULL); 2246 vm_object_assert_held(object); 2247 KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL)); 2248 2249 for (;;) { 2250 vm_object_t bbobj; 2251 int dodealloc; 2252 2253 /* 2254 * We can only collapse a DEFAULT/SWAP object with a 2255 * DEFAULT/SWAP object. 2256 */ 2257 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) { 2258 backing_object = NULL; 2259 break; 2260 } 2261 2262 backing_object = object->backing_object; 2263 if (backing_object == NULL) 2264 break; 2265 if (backing_object->type != OBJT_DEFAULT && 2266 backing_object->type != OBJT_SWAP) { 2267 backing_object = NULL; 2268 break; 2269 } 2270 2271 /* 2272 * Hold the backing_object and check for races 2273 */ 2274 vm_object_hold(backing_object); 2275 if (backing_object != object->backing_object || 2276 (backing_object->type != OBJT_DEFAULT && 2277 backing_object->type != OBJT_SWAP)) { 2278 vm_object_drop(backing_object); 2279 continue; 2280 } 2281 2282 /* 2283 * Chain-lock the backing object too because if we 2284 * successfully merge its pages into the top object we 2285 * will collapse backing_object->backing_object as the 2286 * new backing_object. Re-check that it is still our 2287 * backing object. 2288 */ 2289 vm_object_chain_acquire(backing_object, 0); 2290 if (backing_object != object->backing_object) { 2291 vm_object_chain_release(backing_object); 2292 vm_object_drop(backing_object); 2293 continue; 2294 } 2295 2296 /* 2297 * we check the backing object first, because it is most likely 2298 * not collapsable. 2299 */ 2300 if (backing_object->handle != NULL || 2301 (backing_object->type != OBJT_DEFAULT && 2302 backing_object->type != OBJT_SWAP) || 2303 (backing_object->flags & OBJ_DEAD) || 2304 object->handle != NULL || 2305 (object->type != OBJT_DEFAULT && 2306 object->type != OBJT_SWAP) || 2307 (object->flags & OBJ_DEAD)) { 2308 break; 2309 } 2310 2311 /* 2312 * If paging is in progress we can't do a normal collapse. 2313 */ 2314 if ( 2315 object->paging_in_progress != 0 || 2316 backing_object->paging_in_progress != 0 2317 ) { 2318 vm_object_qcollapse(object, backing_object); 2319 break; 2320 } 2321 2322 /* 2323 * We know that we can either collapse the backing object (if 2324 * the parent is the only reference to it) or (perhaps) have 2325 * the parent bypass the object if the parent happens to shadow 2326 * all the resident pages in the entire backing object. 2327 * 2328 * This is ignoring pager-backed pages such as swap pages. 2329 * vm_object_backing_scan fails the shadowing test in this 2330 * case. 2331 */ 2332 if (backing_object->ref_count == 1) { 2333 /* 2334 * If there is exactly one reference to the backing 2335 * object, we can collapse it into the parent. 2336 */ 2337 KKASSERT(object->backing_object == backing_object); 2338 vm_object_backing_scan(object, backing_object, 2339 OBSC_COLLAPSE_WAIT); 2340 2341 /* 2342 * Move the pager from backing_object to object. 2343 */ 2344 if (backing_object->type == OBJT_SWAP) { 2345 vm_object_pip_add(backing_object, 1); 2346 2347 /* 2348 * scrap the paging_offset junk and do a 2349 * discrete copy. This also removes major 2350 * assumptions about how the swap-pager 2351 * works from where it doesn't belong. The 2352 * new swapper is able to optimize the 2353 * destroy-source case. 2354 */ 2355 vm_object_pip_add(object, 1); 2356 swap_pager_copy(backing_object, object, 2357 OFF_TO_IDX(object->backing_object_offset), 2358 TRUE); 2359 vm_object_pip_wakeup(object); 2360 vm_object_pip_wakeup(backing_object); 2361 } 2362 2363 /* 2364 * Object now shadows whatever backing_object did. 2365 * Remove object from backing_object's shadow_list. 2366 * 2367 * Removing object from backing_objects shadow list 2368 * requires releasing object, which we will do below. 2369 */ 2370 KKASSERT(object->backing_object == backing_object); 2371 if (object->flags & OBJ_ONSHADOW) { 2372 LIST_REMOVE(object, shadow_list); 2373 backing_object->shadow_count--; 2374 backing_object->generation++; 2375 vm_object_clear_flag(object, OBJ_ONSHADOW); 2376 } 2377 2378 /* 2379 * backing_object->backing_object moves from within 2380 * backing_object to within object. 2381 * 2382 * OBJT_VNODE bbobj's should have empty shadow lists. 2383 */ 2384 while ((bbobj = backing_object->backing_object) != NULL) { 2385 if (bbobj->type == OBJT_VNODE) 2386 vm_object_hold_shared(bbobj); 2387 else 2388 vm_object_hold(bbobj); 2389 if (bbobj == backing_object->backing_object) 2390 break; 2391 vm_object_drop(bbobj); 2392 } 2393 2394 /* 2395 * We are removing backing_object from bbobj's 2396 * shadow list and adding object to bbobj's shadow 2397 * list, so the ref_count on bbobj is unchanged. 2398 */ 2399 if (bbobj) { 2400 if (backing_object->flags & OBJ_ONSHADOW) { 2401 /* not locked exclusively if vnode */ 2402 KKASSERT(bbobj->type != OBJT_VNODE); 2403 LIST_REMOVE(backing_object, 2404 shadow_list); 2405 bbobj->shadow_count--; 2406 bbobj->generation++; 2407 vm_object_clear_flag(backing_object, 2408 OBJ_ONSHADOW); 2409 } 2410 backing_object->backing_object = NULL; 2411 } 2412 object->backing_object = bbobj; 2413 if (bbobj) { 2414 if (bbobj->type != OBJT_VNODE) { 2415 LIST_INSERT_HEAD(&bbobj->shadow_head, 2416 object, shadow_list); 2417 bbobj->shadow_count++; 2418 bbobj->generation++; 2419 vm_object_set_flag(object, 2420 OBJ_ONSHADOW); 2421 } 2422 } 2423 2424 object->backing_object_offset += 2425 backing_object->backing_object_offset; 2426 2427 vm_object_drop(bbobj); 2428 2429 /* 2430 * Discard the old backing_object. Nothing should be 2431 * able to ref it, other than a vm_map_split(), 2432 * and vm_map_split() will stall on our chain lock. 2433 * And we control the parent so it shouldn't be 2434 * possible for it to go away either. 2435 * 2436 * Since the backing object has no pages, no pager 2437 * left, and no object references within it, all 2438 * that is necessary is to dispose of it. 2439 */ 2440 KASSERT(backing_object->ref_count == 1, 2441 ("backing_object %p was somehow " 2442 "re-referenced during collapse!", 2443 backing_object)); 2444 KASSERT(RB_EMPTY(&backing_object->rb_memq), 2445 ("backing_object %p somehow has left " 2446 "over pages during collapse!", 2447 backing_object)); 2448 2449 /* 2450 * The object can be destroyed. 2451 * 2452 * XXX just fall through and dodealloc instead 2453 * of forcing destruction? 2454 */ 2455 atomic_add_int(&backing_object->ref_count, -1); 2456 #if defined(DEBUG_LOCKS) 2457 debugvm_object_add(backing_object, "collapse", 1, -1); 2458 #endif 2459 if ((backing_object->flags & OBJ_DEAD) == 0) 2460 vm_object_terminate(backing_object); 2461 object_collapses++; 2462 dodealloc = 0; 2463 } else { 2464 /* 2465 * If we do not entirely shadow the backing object, 2466 * there is nothing we can do so we give up. 2467 */ 2468 if (vm_object_backing_scan(object, backing_object, 2469 OBSC_TEST_ALL_SHADOWED) == 0) { 2470 break; 2471 } 2472 2473 /* 2474 * bbobj is backing_object->backing_object. Since 2475 * object completely shadows backing_object we can 2476 * bypass it and become backed by bbobj instead. 2477 * 2478 * The shadow list for vnode backing objects is not 2479 * used and a shared hold is allowed. 2480 */ 2481 while ((bbobj = backing_object->backing_object) != NULL) { 2482 if (bbobj->type == OBJT_VNODE) 2483 vm_object_hold_shared(bbobj); 2484 else 2485 vm_object_hold(bbobj); 2486 if (bbobj == backing_object->backing_object) 2487 break; 2488 vm_object_drop(bbobj); 2489 } 2490 2491 /* 2492 * Make object shadow bbobj instead of backing_object. 2493 * Remove object from backing_object's shadow list. 2494 * 2495 * Deallocating backing_object will not remove 2496 * it, since its reference count is at least 2. 2497 * 2498 * Removing object from backing_object's shadow 2499 * list requires releasing a ref, which we do 2500 * below by setting dodealloc to 1. 2501 */ 2502 KKASSERT(object->backing_object == backing_object); 2503 if (object->flags & OBJ_ONSHADOW) { 2504 LIST_REMOVE(object, shadow_list); 2505 backing_object->shadow_count--; 2506 backing_object->generation++; 2507 vm_object_clear_flag(object, OBJ_ONSHADOW); 2508 } 2509 2510 /* 2511 * Add a ref to bbobj, bbobj now shadows object. 2512 * 2513 * NOTE: backing_object->backing_object still points 2514 * to bbobj. That relationship remains intact 2515 * because backing_object has > 1 ref, so 2516 * someone else is pointing to it (hence why 2517 * we can't collapse it into object and can 2518 * only handle the all-shadowed bypass case). 2519 */ 2520 if (bbobj) { 2521 if (bbobj->type != OBJT_VNODE) { 2522 vm_object_chain_wait(bbobj, 0); 2523 vm_object_reference_locked(bbobj); 2524 LIST_INSERT_HEAD(&bbobj->shadow_head, 2525 object, shadow_list); 2526 bbobj->shadow_count++; 2527 bbobj->generation++; 2528 vm_object_set_flag(object, 2529 OBJ_ONSHADOW); 2530 } else { 2531 vm_object_reference_quick(bbobj); 2532 } 2533 object->backing_object_offset += 2534 backing_object->backing_object_offset; 2535 object->backing_object = bbobj; 2536 vm_object_drop(bbobj); 2537 } else { 2538 object->backing_object = NULL; 2539 } 2540 2541 /* 2542 * Drop the reference count on backing_object. To 2543 * handle ref_count races properly we can't assume 2544 * that the ref_count is still at least 2 so we 2545 * have to actually call vm_object_deallocate() 2546 * (after clearing the chainlock). 2547 */ 2548 object_bypasses++; 2549 dodealloc = 1; 2550 } 2551 2552 /* 2553 * Ok, we want to loop on the new object->bbobj association, 2554 * possibly collapsing it further. However if dodealloc is 2555 * non-zero we have to deallocate the backing_object which 2556 * itself can potentially undergo a collapse, creating a 2557 * recursion depth issue with the LWKT token subsystem. 2558 * 2559 * In the case where we must deallocate the backing_object 2560 * it is possible now that the backing_object has a single 2561 * shadow count on some other object (not represented here 2562 * as yet), since it no longer shadows us. Thus when we 2563 * call vm_object_deallocate() it may attempt to collapse 2564 * itself into its remaining parent. 2565 */ 2566 if (dodealloc) { 2567 struct vm_object_dealloc_list *dtmp; 2568 2569 vm_object_chain_release(backing_object); 2570 vm_object_unlock(backing_object); 2571 /* backing_object remains held */ 2572 2573 /* 2574 * Auto-deallocation list for caller convenience. 2575 */ 2576 if (dlistp == NULL) 2577 dlistp = &dlist; 2578 2579 dtmp = kmalloc(sizeof(*dtmp), M_TEMP, M_WAITOK); 2580 dtmp->object = backing_object; 2581 dtmp->next = *dlistp; 2582 *dlistp = dtmp; 2583 } else { 2584 vm_object_chain_release(backing_object); 2585 vm_object_drop(backing_object); 2586 } 2587 /* backing_object = NULL; not needed */ 2588 /* loop */ 2589 } 2590 2591 /* 2592 * Clean up any left over backing_object 2593 */ 2594 if (backing_object) { 2595 vm_object_chain_release(backing_object); 2596 vm_object_drop(backing_object); 2597 } 2598 2599 /* 2600 * Clean up any auto-deallocation list. This is a convenience 2601 * for top-level callers so they don't have to pass &dlist. 2602 * Do not clean up any caller-passed dlistp, the caller will 2603 * do that. 2604 */ 2605 if (dlist) 2606 vm_object_deallocate_list(&dlist); 2607 2608 } 2609 2610 /* 2611 * vm_object_collapse() may collect additional objects in need of 2612 * deallocation. This routine deallocates these objects. The 2613 * deallocation itself can trigger additional collapses (which the 2614 * deallocate function takes care of). This procedure is used to 2615 * reduce procedural recursion since these vm_object shadow chains 2616 * can become quite long. 2617 */ 2618 void 2619 vm_object_deallocate_list(struct vm_object_dealloc_list **dlistp) 2620 { 2621 struct vm_object_dealloc_list *dlist; 2622 2623 while ((dlist = *dlistp) != NULL) { 2624 *dlistp = dlist->next; 2625 vm_object_lock(dlist->object); 2626 vm_object_deallocate_locked(dlist->object); 2627 vm_object_drop(dlist->object); 2628 kfree(dlist, M_TEMP); 2629 } 2630 } 2631 2632 /* 2633 * Removes all physical pages in the specified object range from the 2634 * object's list of pages. 2635 * 2636 * No requirements. 2637 */ 2638 static int vm_object_page_remove_callback(vm_page_t p, void *data); 2639 2640 void 2641 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 2642 boolean_t clean_only) 2643 { 2644 struct rb_vm_page_scan_info info; 2645 int all; 2646 2647 /* 2648 * Degenerate cases and assertions 2649 */ 2650 vm_object_hold(object); 2651 if (object == NULL || 2652 (object->resident_page_count == 0 && object->swblock_count == 0)) { 2653 vm_object_drop(object); 2654 return; 2655 } 2656 KASSERT(object->type != OBJT_PHYS, 2657 ("attempt to remove pages from a physical object")); 2658 2659 /* 2660 * Indicate that paging is occuring on the object 2661 */ 2662 vm_object_pip_add(object, 1); 2663 2664 /* 2665 * Figure out the actual removal range and whether we are removing 2666 * the entire contents of the object or not. If removing the entire 2667 * contents, be sure to get all pages, even those that might be 2668 * beyond the end of the object. 2669 */ 2670 info.object = object; 2671 info.start_pindex = start; 2672 if (end == 0) 2673 info.end_pindex = (vm_pindex_t)-1; 2674 else 2675 info.end_pindex = end - 1; 2676 info.limit = clean_only; 2677 info.count = 0; 2678 all = (start == 0 && info.end_pindex >= object->size - 1); 2679 2680 /* 2681 * Loop until we are sure we have gotten them all. 2682 */ 2683 do { 2684 info.error = 0; 2685 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, 2686 vm_object_page_remove_callback, &info); 2687 } while (info.error); 2688 2689 /* 2690 * Remove any related swap if throwing away pages, or for 2691 * non-swap objects (the swap is a clean copy in that case). 2692 */ 2693 if (object->type != OBJT_SWAP || clean_only == FALSE) { 2694 if (all) 2695 swap_pager_freespace_all(object); 2696 else 2697 swap_pager_freespace(object, info.start_pindex, 2698 info.end_pindex - info.start_pindex + 1); 2699 } 2700 2701 /* 2702 * Cleanup 2703 */ 2704 vm_object_pip_wakeup(object); 2705 vm_object_drop(object); 2706 } 2707 2708 /* 2709 * The caller must hold the object. 2710 * 2711 * NOTE: User yields are allowed when removing more than one page, but not 2712 * allowed if only removing one page (the path for single page removals 2713 * might hold a spinlock). 2714 */ 2715 static int 2716 vm_object_page_remove_callback(vm_page_t p, void *data) 2717 { 2718 struct rb_vm_page_scan_info *info = data; 2719 2720 if ((++info->count & 63) == 0) 2721 lwkt_user_yield(); 2722 2723 if (info->object != p->object || 2724 p->pindex < info->start_pindex || 2725 p->pindex > info->end_pindex) { 2726 kprintf("vm_object_page_remove_callbackA: obj/pg race %p/%p\n", 2727 info->object, p); 2728 return(0); 2729 } 2730 if (vm_page_busy_try(p, TRUE)) { 2731 vm_page_sleep_busy(p, TRUE, "vmopar"); 2732 info->error = 1; 2733 return(0); 2734 } 2735 if (info->object != p->object) { 2736 /* this should never happen */ 2737 kprintf("vm_object_page_remove_callbackB: obj/pg race %p/%p\n", 2738 info->object, p); 2739 vm_page_wakeup(p); 2740 return(0); 2741 } 2742 2743 /* 2744 * Wired pages cannot be destroyed, but they can be invalidated 2745 * and we do so if clean_only (limit) is not set. 2746 * 2747 * WARNING! The page may be wired due to being part of a buffer 2748 * cache buffer, and the buffer might be marked B_CACHE. 2749 * This is fine as part of a truncation but VFSs must be 2750 * sure to fix the buffer up when re-extending the file. 2751 * 2752 * NOTE! PG_NEED_COMMIT is ignored. 2753 */ 2754 if (p->wire_count != 0) { 2755 vm_page_protect(p, VM_PROT_NONE); 2756 if (info->limit == 0) 2757 p->valid = 0; 2758 vm_page_wakeup(p); 2759 return(0); 2760 } 2761 2762 /* 2763 * limit is our clean_only flag. If set and the page is dirty or 2764 * requires a commit, do not free it. If set and the page is being 2765 * held by someone, do not free it. 2766 */ 2767 if (info->limit && p->valid) { 2768 vm_page_test_dirty(p); 2769 if ((p->valid & p->dirty) || (p->flags & PG_NEED_COMMIT)) { 2770 vm_page_wakeup(p); 2771 return(0); 2772 } 2773 } 2774 2775 /* 2776 * Destroy the page 2777 */ 2778 vm_page_protect(p, VM_PROT_NONE); 2779 vm_page_free(p); 2780 2781 return(0); 2782 } 2783 2784 /* 2785 * Coalesces two objects backing up adjoining regions of memory into a 2786 * single object. 2787 * 2788 * returns TRUE if objects were combined. 2789 * 2790 * NOTE: Only works at the moment if the second object is NULL - 2791 * if it's not, which object do we lock first? 2792 * 2793 * Parameters: 2794 * prev_object First object to coalesce 2795 * prev_offset Offset into prev_object 2796 * next_object Second object into coalesce 2797 * next_offset Offset into next_object 2798 * 2799 * prev_size Size of reference to prev_object 2800 * next_size Size of reference to next_object 2801 * 2802 * The caller does not need to hold (prev_object) but must have a stable 2803 * pointer to it (typically by holding the vm_map locked). 2804 */ 2805 boolean_t 2806 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, 2807 vm_size_t prev_size, vm_size_t next_size) 2808 { 2809 vm_pindex_t next_pindex; 2810 2811 if (prev_object == NULL) 2812 return (TRUE); 2813 2814 vm_object_hold(prev_object); 2815 2816 if (prev_object->type != OBJT_DEFAULT && 2817 prev_object->type != OBJT_SWAP) { 2818 vm_object_drop(prev_object); 2819 return (FALSE); 2820 } 2821 2822 /* 2823 * Try to collapse the object first 2824 */ 2825 vm_object_chain_acquire(prev_object, 0); 2826 vm_object_collapse(prev_object, NULL); 2827 2828 /* 2829 * Can't coalesce if: . more than one reference . paged out . shadows 2830 * another object . has a copy elsewhere (any of which mean that the 2831 * pages not mapped to prev_entry may be in use anyway) 2832 */ 2833 2834 if (prev_object->backing_object != NULL) { 2835 vm_object_chain_release(prev_object); 2836 vm_object_drop(prev_object); 2837 return (FALSE); 2838 } 2839 2840 prev_size >>= PAGE_SHIFT; 2841 next_size >>= PAGE_SHIFT; 2842 next_pindex = prev_pindex + prev_size; 2843 2844 if ((prev_object->ref_count > 1) && 2845 (prev_object->size != next_pindex)) { 2846 vm_object_chain_release(prev_object); 2847 vm_object_drop(prev_object); 2848 return (FALSE); 2849 } 2850 2851 /* 2852 * Remove any pages that may still be in the object from a previous 2853 * deallocation. 2854 */ 2855 if (next_pindex < prev_object->size) { 2856 vm_object_page_remove(prev_object, 2857 next_pindex, 2858 next_pindex + next_size, FALSE); 2859 if (prev_object->type == OBJT_SWAP) 2860 swap_pager_freespace(prev_object, 2861 next_pindex, next_size); 2862 } 2863 2864 /* 2865 * Extend the object if necessary. 2866 */ 2867 if (next_pindex + next_size > prev_object->size) 2868 prev_object->size = next_pindex + next_size; 2869 2870 vm_object_chain_release(prev_object); 2871 vm_object_drop(prev_object); 2872 return (TRUE); 2873 } 2874 2875 /* 2876 * Make the object writable and flag is being possibly dirty. 2877 * 2878 * The object might not be held (or might be held but held shared), 2879 * the related vnode is probably not held either. Object and vnode are 2880 * stable by virtue of the vm_page busied by the caller preventing 2881 * destruction. 2882 * 2883 * If the related mount is flagged MNTK_THR_SYNC we need to call 2884 * vsetobjdirty(). Filesystems using this option usually shortcut 2885 * synchronization by only scanning the syncer list. 2886 */ 2887 void 2888 vm_object_set_writeable_dirty(vm_object_t object) 2889 { 2890 struct vnode *vp; 2891 2892 /*vm_object_assert_held(object);*/ 2893 /* 2894 * Avoid contention in vm fault path by checking the state before 2895 * issuing an atomic op on it. 2896 */ 2897 if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) != 2898 (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) { 2899 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 2900 } 2901 if (object->type == OBJT_VNODE && 2902 (vp = (struct vnode *)object->handle) != NULL) { 2903 if ((vp->v_flag & VOBJDIRTY) == 0) { 2904 if (vp->v_mount && 2905 (vp->v_mount->mnt_kern_flag & MNTK_THR_SYNC)) { 2906 /* 2907 * New style THR_SYNC places vnodes on the 2908 * syncer list more deterministically. 2909 */ 2910 vsetobjdirty(vp); 2911 } else { 2912 /* 2913 * Old style scan would not necessarily place 2914 * a vnode on the syncer list when possibly 2915 * modified via mmap. 2916 */ 2917 vsetflags(vp, VOBJDIRTY); 2918 } 2919 } 2920 } 2921 } 2922 2923 #include "opt_ddb.h" 2924 #ifdef DDB 2925 #include <sys/kernel.h> 2926 2927 #include <sys/cons.h> 2928 2929 #include <ddb/ddb.h> 2930 2931 static int _vm_object_in_map (vm_map_t map, vm_object_t object, 2932 vm_map_entry_t entry); 2933 static int vm_object_in_map (vm_object_t object); 2934 2935 /* 2936 * The caller must hold the object. 2937 */ 2938 static int 2939 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2940 { 2941 vm_map_t tmpm; 2942 vm_map_entry_t tmpe; 2943 vm_object_t obj, nobj; 2944 int entcount; 2945 2946 if (map == 0) 2947 return 0; 2948 if (entry == 0) { 2949 tmpe = map->header.next; 2950 entcount = map->nentries; 2951 while (entcount-- && (tmpe != &map->header)) { 2952 if( _vm_object_in_map(map, object, tmpe)) { 2953 return 1; 2954 } 2955 tmpe = tmpe->next; 2956 } 2957 return (0); 2958 } 2959 switch(entry->maptype) { 2960 case VM_MAPTYPE_SUBMAP: 2961 tmpm = entry->object.sub_map; 2962 tmpe = tmpm->header.next; 2963 entcount = tmpm->nentries; 2964 while (entcount-- && tmpe != &tmpm->header) { 2965 if( _vm_object_in_map(tmpm, object, tmpe)) { 2966 return 1; 2967 } 2968 tmpe = tmpe->next; 2969 } 2970 break; 2971 case VM_MAPTYPE_NORMAL: 2972 case VM_MAPTYPE_VPAGETABLE: 2973 obj = entry->object.vm_object; 2974 while (obj) { 2975 if (obj == object) { 2976 if (obj != entry->object.vm_object) 2977 vm_object_drop(obj); 2978 return 1; 2979 } 2980 while ((nobj = obj->backing_object) != NULL) { 2981 vm_object_hold(nobj); 2982 if (nobj == obj->backing_object) 2983 break; 2984 vm_object_drop(nobj); 2985 } 2986 if (obj != entry->object.vm_object) { 2987 if (nobj) 2988 vm_object_lock_swap(); 2989 vm_object_drop(obj); 2990 } 2991 obj = nobj; 2992 } 2993 break; 2994 default: 2995 break; 2996 } 2997 return 0; 2998 } 2999 3000 static int vm_object_in_map_callback(struct proc *p, void *data); 3001 3002 struct vm_object_in_map_info { 3003 vm_object_t object; 3004 int rv; 3005 }; 3006 3007 /* 3008 * Debugging only 3009 */ 3010 static int 3011 vm_object_in_map(vm_object_t object) 3012 { 3013 struct vm_object_in_map_info info; 3014 3015 info.rv = 0; 3016 info.object = object; 3017 3018 allproc_scan(vm_object_in_map_callback, &info); 3019 if (info.rv) 3020 return 1; 3021 if( _vm_object_in_map(&kernel_map, object, 0)) 3022 return 1; 3023 if( _vm_object_in_map(&pager_map, object, 0)) 3024 return 1; 3025 if( _vm_object_in_map(&buffer_map, object, 0)) 3026 return 1; 3027 return 0; 3028 } 3029 3030 /* 3031 * Debugging only 3032 */ 3033 static int 3034 vm_object_in_map_callback(struct proc *p, void *data) 3035 { 3036 struct vm_object_in_map_info *info = data; 3037 3038 if (p->p_vmspace) { 3039 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) { 3040 info->rv = 1; 3041 return -1; 3042 } 3043 } 3044 return (0); 3045 } 3046 3047 DB_SHOW_COMMAND(vmochk, vm_object_check) 3048 { 3049 vm_object_t object; 3050 int n; 3051 3052 /* 3053 * make sure that internal objs are in a map somewhere 3054 * and none have zero ref counts. 3055 */ 3056 for (n = 0; n < VMOBJ_HSIZE; ++n) { 3057 for (object = TAILQ_FIRST(&vm_object_lists[n]); 3058 object != NULL; 3059 object = TAILQ_NEXT(object, object_list)) { 3060 if (object->type == OBJT_MARKER) 3061 continue; 3062 if (object->handle != NULL || 3063 (object->type != OBJT_DEFAULT && 3064 object->type != OBJT_SWAP)) { 3065 continue; 3066 } 3067 if (object->ref_count == 0) { 3068 db_printf("vmochk: internal obj has " 3069 "zero ref count: %ld\n", 3070 (long)object->size); 3071 } 3072 if (vm_object_in_map(object)) 3073 continue; 3074 db_printf("vmochk: internal obj is not in a map: " 3075 "ref: %d, size: %lu: 0x%lx, " 3076 "backing_object: %p\n", 3077 object->ref_count, (u_long)object->size, 3078 (u_long)object->size, 3079 (void *)object->backing_object); 3080 } 3081 } 3082 } 3083 3084 /* 3085 * Debugging only 3086 */ 3087 DB_SHOW_COMMAND(object, vm_object_print_static) 3088 { 3089 /* XXX convert args. */ 3090 vm_object_t object = (vm_object_t)addr; 3091 boolean_t full = have_addr; 3092 3093 vm_page_t p; 3094 3095 /* XXX count is an (unused) arg. Avoid shadowing it. */ 3096 #define count was_count 3097 3098 int count; 3099 3100 if (object == NULL) 3101 return; 3102 3103 db_iprintf( 3104 "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n", 3105 object, (int)object->type, (u_long)object->size, 3106 object->resident_page_count, object->ref_count, object->flags); 3107 /* 3108 * XXX no %qd in kernel. Truncate object->backing_object_offset. 3109 */ 3110 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n", 3111 object->shadow_count, 3112 object->backing_object ? object->backing_object->ref_count : 0, 3113 object->backing_object, (long)object->backing_object_offset); 3114 3115 if (!full) 3116 return; 3117 3118 db_indent += 2; 3119 count = 0; 3120 RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) { 3121 if (count == 0) 3122 db_iprintf("memory:="); 3123 else if (count == 6) { 3124 db_printf("\n"); 3125 db_iprintf(" ..."); 3126 count = 0; 3127 } else 3128 db_printf(","); 3129 count++; 3130 3131 db_printf("(off=0x%lx,page=0x%lx)", 3132 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 3133 } 3134 if (count != 0) 3135 db_printf("\n"); 3136 db_indent -= 2; 3137 } 3138 3139 /* XXX. */ 3140 #undef count 3141 3142 /* 3143 * XXX need this non-static entry for calling from vm_map_print. 3144 * 3145 * Debugging only 3146 */ 3147 void 3148 vm_object_print(/* db_expr_t */ long addr, 3149 boolean_t have_addr, 3150 /* db_expr_t */ long count, 3151 char *modif) 3152 { 3153 vm_object_print_static(addr, have_addr, count, modif); 3154 } 3155 3156 /* 3157 * Debugging only 3158 */ 3159 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 3160 { 3161 vm_object_t object; 3162 int nl = 0; 3163 int c; 3164 int n; 3165 3166 for (n = 0; n < VMOBJ_HSIZE; ++n) { 3167 for (object = TAILQ_FIRST(&vm_object_lists[n]); 3168 object != NULL; 3169 object = TAILQ_NEXT(object, object_list)) { 3170 vm_pindex_t idx, fidx; 3171 vm_pindex_t osize; 3172 vm_paddr_t pa = -1, padiff; 3173 int rcount; 3174 vm_page_t m; 3175 3176 if (object->type == OBJT_MARKER) 3177 continue; 3178 db_printf("new object: %p\n", (void *)object); 3179 if ( nl > 18) { 3180 c = cngetc(); 3181 if (c != ' ') 3182 return; 3183 nl = 0; 3184 } 3185 nl++; 3186 rcount = 0; 3187 fidx = 0; 3188 osize = object->size; 3189 if (osize > 128) 3190 osize = 128; 3191 for (idx = 0; idx < osize; idx++) { 3192 m = vm_page_lookup(object, idx); 3193 if (m == NULL) { 3194 if (rcount) { 3195 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 3196 (long)fidx, rcount, (long)pa); 3197 if ( nl > 18) { 3198 c = cngetc(); 3199 if (c != ' ') 3200 return; 3201 nl = 0; 3202 } 3203 nl++; 3204 rcount = 0; 3205 } 3206 continue; 3207 } 3208 3209 if (rcount && 3210 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 3211 ++rcount; 3212 continue; 3213 } 3214 if (rcount) { 3215 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 3216 padiff >>= PAGE_SHIFT; 3217 padiff &= PQ_L2_MASK; 3218 if (padiff == 0) { 3219 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 3220 ++rcount; 3221 continue; 3222 } 3223 db_printf(" index(%ld)run(%d)pa(0x%lx)", 3224 (long)fidx, rcount, (long)pa); 3225 db_printf("pd(%ld)\n", (long)padiff); 3226 if ( nl > 18) { 3227 c = cngetc(); 3228 if (c != ' ') 3229 return; 3230 nl = 0; 3231 } 3232 nl++; 3233 } 3234 fidx = idx; 3235 pa = VM_PAGE_TO_PHYS(m); 3236 rcount = 1; 3237 } 3238 if (rcount) { 3239 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 3240 (long)fidx, rcount, (long)pa); 3241 if ( nl > 18) { 3242 c = cngetc(); 3243 if (c != ' ') 3244 return; 3245 nl = 0; 3246 } 3247 nl++; 3248 } 3249 } 3250 } 3251 } 3252 #endif /* DDB */ 3253